/*
* DRBG based on NIST SP800-90A
*
* Copyright Stephan Mueller <smueller@chronox.de>, 2014
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#ifndef _DRBG_H
#define _DRBG_H
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/slab.h>
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/workqueue.h>
/*
* Concatenation Helper and string operation helper
*
* SP800-90A requires the concatenation of different data. To avoid copying
* buffers around or allocate additional memory, the following data structure
* is used to point to the original memory with its size. In addition, it
* is used to build a linked list. The linked list defines the concatenation
* of individual buffers. The order of memory block referenced in that
* linked list determines the order of concatenation.
*/
struct drbg_string {
const unsigned char *buf;
size_t len;
struct list_head list;
};
static inline void drbg_string_fill(struct drbg_string *string,
const unsigned char *buf, size_t len)
{
string->buf = buf;
string->len = len;
INIT_LIST_HEAD(&string->list);
}
struct drbg_state;
typedef uint32_t drbg_flag_t;
struct drbg_core {
drbg_flag_t flags; /* flags for the cipher */
__u8 statelen; /* maximum state length */
__u8 blocklen_bytes; /* block size of output in bytes */
char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
/* kernel crypto API backend cipher name */
char backend_cra_name[CRYPTO_MAX_ALG_NAME];
};
struct drbg_state_ops {
int (*update)(struct drbg_state *drbg, struct list_head *seed,
int reseed);
int (*generate)(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct list_head *addtl);
int (*crypto_init)(struct drbg_state *drbg);
int (*crypto_fini)(struct drbg_state *drbg);
};
struct drbg_test_data {
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
};
enum drbg_seed_state {
DRBG_SEED_STATE_UNSEEDED,
DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
DRBG_SEED_STATE_FULL,
};
struct drbg_state {
struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
unsigned char *Vbuf;
/* hash: static value 10.1.1.1 1b) hmac / ctr: key */
unsigned char *C;
unsigned char *Cbuf;
/* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
size_t reseed_ctr;
size_t reseed_threshold;
/* some memory the DRBG can use for its operation */
unsigned char *scratchpad;
unsigned char *scratchpadbuf;
void *priv_data; /* Cipher handle */
struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
struct skcipher_request *ctr_req; /* CTR mode request handle */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
enum drbg_seed_state seeded; /* DRBG fully seeded? */
unsigned long last_seed_time;
bool pr; /* Prediction resistance enabled? */
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
struct drbg_string test_data;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
{
if (drbg && drbg->core) return drbg->core->statelen;
return 0;
}
static inline __u8 drbg_blocklen(struct drbg_state *drbg)
{
if (drbg && drbg->core) return drbg->core->blocklen_bytes;
return 0;
}
static inline __u8 drbg_keylen(struct drbg_state *drbg)
{
if (drbg && drbg->core)
return (drbg->core->statelen - drbg->core->blocklen_bytes);
return 0;
}
static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
{
/* SP800-90A requires the limit 2**19 bits, but we return bytes */
return (1 << 16);
}
static inline size_t drbg_max_addtl(struct drbg_state *drbg)
{
/* SP800-90A requires 2**35 bytes additional info str / pers str */
#if (__BITS_PER_LONG == 32)
/*
* SP800-90A allows smaller maximum numbers to be returned -- we
* return SIZE_MAX - 1 to allow the verification of the enforcement
* of this value in drbg_healthcheck_sanity.
*/
return (SIZE_MAX - 1);
#else
return (1UL<<35);
#endif
}
static inline size_t drbg_max_requests(struct drbg_state *drbg)
{
/* SP800-90A requires 2**48 maximum requests before reseeding */
return (1<<20);
}
/*
* This is a wrapper to the kernel crypto API function of
* crypto_rng_generate() to allow the caller to provide additional data.
*
* @drng DRBG handle -- see crypto_rng_get_bytes
* @outbuf output buffer -- see crypto_rng_get_bytes
* @outlen length of output buffer -- see crypto_rng_get_bytes
* @addtl_input additional information string input buffer
* @addtllen length of additional information string buffer
*
* return
* see crypto_rng_get_bytes
*/
static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
unsigned char *outbuf, unsigned int outlen,
struct drbg_string *addtl)
{
return crypto_rng_generate(drng, addtl->buf, addtl->len,
outbuf, outlen);
}
/*
* TEST code
*
* This is a wrapper to the kernel crypto API function of
* crypto_rng_generate() to allow the caller to provide additional data and
* allow furnishing of test_data
*
* @drng DRBG handle -- see crypto_rng_get_bytes
* @outbuf output buffer -- see crypto_rng_get_bytes
* @outlen length of output buffer -- see crypto_rng_get_bytes
* @addtl_input additional information string input buffer
* @addtllen length of additional information string buffer
* @test_data filled test data
*
* return
* see crypto_rng_get_bytes
*/
static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
unsigned char *outbuf, unsigned int outlen,
struct drbg_string *addtl,
struct drbg_test_data *test_data)
{
crypto_rng_set_entropy(drng, test_data->testentropy->buf,
test_data->testentropy->len);
return crypto_rng_generate(drng, addtl->buf, addtl->len,
outbuf, outlen);
}
/*
* TEST code
*
* This is a wrapper to the kernel crypto API function of
* crypto_rng_reset() to allow the caller to provide test_data
*
* @drng DRBG handle -- see crypto_rng_reset
* @pers personalization string input buffer
* @perslen length of additional information string buffer
* @test_data filled test data
*
* return
* see crypto_rng_reset
*/
static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
struct drbg_string *pers,
struct drbg_test_data *test_data)
{
crypto_rng_set_entropy(drng, test_data->testentropy->buf,
test_data->testentropy->len);
return crypto_rng_reset(drng, pers->buf, pers->len);
}
/* DRBG type flags */
#define DRBG_CTR ((drbg_flag_t)1<<0)
#define DRBG_HMAC ((drbg_flag_t)1<<1)
#define DRBG_HASH ((drbg_flag_t)1<<2)
#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH)
/* DRBG strength flags */
#define DRBG_STRENGTH128 ((drbg_flag_t)1<<3)
#define DRBG_STRENGTH192 ((drbg_flag_t)1<<4)
#define DRBG_STRENGTH256 ((drbg_flag_t)1<<5)
#define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \
DRBG_STRENGTH256)
enum drbg_prefixes {
DRBG_PREFIX0 = 0x00,
DRBG_PREFIX1,
DRBG_PREFIX2,
DRBG_PREFIX3
};
#endif /* _DRBG_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file provides wrappers with sanitizer instrumentation for non-atomic
* bit operations.
*
* To use this functionality, an arch's bitops.h file needs to define each of
* the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
* arch___set_bit(), etc.).
*/
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#include <linux/instrumented.h>
/**
* ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic. If it is called on the same
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static __always_inline void
___set_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
/**
* ___clear_bit - Clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
*
* Unlike clear_bit(), this function is non-atomic. If it is called on the same
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static __always_inline void
___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
/**
* ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic. If it is called on the same
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
static __always_inline void
___change_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
{
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
/*
* We treat non-atomic read-write bitops a little more special.
* Given the operations here only modify a single bit, assuming
* non-atomicity of the writer is sufficient may be reasonable
* for certain usage (and follows the permissible nature of the
* assume-plain-writes-atomic rule):
* 1. report read-modify-write races -> check read;
* 2. do not report races with marked readers, but do report
* races with unmarked readers -> check "atomic" write.
*/
kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
/*
* Use generic write instrumentation, in case other sanitizers
* or tools are enabled alongside KCSAN.
*/
instrument_write(addr + BIT_WORD(nr), sizeof(long));
} else {
instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
}
}
/**
* ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static __always_inline bool
___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
/**
* ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static __always_inline bool
___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
/**
* ___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
static __always_inline bool
___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
/**
* _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static __always_inline bool
_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr);
}
/**
* _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static __always_inline bool
_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
{
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit_acquire(nr, addr);
}
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
*
* Based on the original implementation which is:
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
* Parts of the original code have been moved to arch/x86/vdso/vma.c
*
* This file implements vsyscall emulation. vsyscalls are a legacy ABI:
* Userspace can request certain kernel services by calling fixed
* addresses. This concept is problematic:
*
* - It interferes with ASLR.
* - It's awkward to write code that lives in kernel addresses but is
* callable by userspace at fixed addresses.
* - The whole concept is impossible for 32-bit compat userspace.
* - UML cannot easily virtualize a vsyscall.
*
* As of mid-2014, I believe that there is no new userspace code that
* will use a vsyscall if the vDSO is present. I hope that there will
* soon be no new userspace code that will ever use a vsyscall.
*
* The code in this file emulates vsyscalls when notified of a page
* fault to a vsyscall address.
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/sched/signal.h>
#include <linux/mm_types.h>
#include <linux/syscalls.h>
#include <linux/ratelimit.h>
#include <asm/vsyscall.h>
#include <asm/unistd.h>
#include <asm/fixmap.h>
#include <asm/traps.h>
#include <asm/paravirt.h>
#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"
static enum { EMULATE, XONLY, NONE } vsyscall_mode __ro_after_init =
#ifdef CONFIG_LEGACY_VSYSCALL_NONE
NONE;
#elif defined(CONFIG_LEGACY_VSYSCALL_XONLY)
XONLY;
#else
#error VSYSCALL config is broken
#endif
static int __init vsyscall_setup(char *str)
{
if (str) {
if (!strcmp("emulate", str))
vsyscall_mode = EMULATE;
else if (!strcmp("xonly", str))
vsyscall_mode = XONLY;
else if (!strcmp("none", str))
vsyscall_mode = NONE;
else
return -EINVAL;
return 0;
}
return -EINVAL;
}
early_param("vsyscall", vsyscall_setup);
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
const char *message)
{
if (!show_unhandled_signals)
return;
printk_ratelimited("%s%s[%d] %s ip:%lx cs:%x sp:%lx ax:%lx si:%lx di:%lx\n",
level, current->comm, task_pid_nr(current),
message, regs->ip, regs->cs,
regs->sp, regs->ax, regs->si, regs->di);
}
static int addr_to_vsyscall_nr(unsigned long addr)
{
int nr;
if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
return -EINVAL;
nr = (addr & 0xC00UL) >> 10;
if (nr >= 3)
return -EINVAL;
return nr;
}
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
if (!access_ok((void __user *)ptr, size)) {
struct thread_struct *thread = ¤t->thread;
thread->error_code = X86_PF_USER | X86_PF_WRITE;
thread->cr2 = ptr;
thread->trap_nr = X86_TRAP_PF;
force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr);
return false;
} else {
return true;
}
}
bool emulate_vsyscall(unsigned long error_code,
struct pt_regs *regs, unsigned long address)
{
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
long ret;
unsigned long orig_dx;
/* Write faults or kernel-privilege faults never get fixed up. */
if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
return false;
/*
* Assume that faults at regs->ip are because of an
* instruction fetch. Return early and avoid
* emulation for faults during data accesses:
*/
if (address != regs->ip) {
/* Failed vsyscall read */
if (vsyscall_mode == EMULATE)
return false;
/*
* User code tried and failed to read the vsyscall page.
*/
warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
return false;
}
/*
* X86_PF_INSTR is only set when NX is supported. When
* available, use it to double-check that the emulation code
* is only being used for instruction fetches:
*/
if (cpu_feature_enabled(X86_FEATURE_NX))
WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
/*
* No point in checking CS -- the only way to get here is a user mode
* trap to a high address, which means that we're in 64-bit user code.
*/
if (vsyscall_mode == NONE) {
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall attempted with vsyscall=none");
return false;
}
vsyscall_nr = addr_to_vsyscall_nr(address);
trace_emulate_vsyscall(vsyscall_nr);
if (vsyscall_nr < 0) {
warn_bad_vsyscall(KERN_WARNING, regs,
"misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
goto sigsegv;
}
if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
warn_bad_vsyscall(KERN_WARNING, regs,
"vsyscall with bad stack (exploit attempt?)");
goto sigsegv;
}
/*
* Check for access_ok violations and find the syscall nr.
*
* NULL is a valid user pointer (in the access_ok sense) on 32-bit and
* 64-bit, so we don't need to special-case it here. For all the
* vsyscalls, NULL means "don't write anything" not "write it at
* address 0".
*/
switch (vsyscall_nr) {
case 0:
if (!write_ok_or_segv(regs->di, sizeof(struct __kernel_old_timeval)) ||
!write_ok_or_segv(regs->si, sizeof(struct timezone))) {
ret = -EFAULT;
goto check_fault;
}
syscall_nr = __NR_gettimeofday;
break;
case 1:
if (!write_ok_or_segv(regs->di, sizeof(__kernel_old_time_t))) {
ret = -EFAULT;
goto check_fault;
}
syscall_nr = __NR_time;
break;
case 2:
if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
!write_ok_or_segv(regs->si, sizeof(unsigned))) {
ret = -EFAULT;
goto check_fault;
}
syscall_nr = __NR_getcpu;
break;
}
/*
* Handle seccomp. regs->ip must be the original value.
* See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst.
*
* We could optimize the seccomp disabled case, but performance
* here doesn't matter.
*/
regs->orig_ax = syscall_nr;
regs->ax = -ENOSYS;
tmp = secure_computing();
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
warn_bad_vsyscall(KERN_DEBUG, regs,
"seccomp tried to change syscall nr or ip");
force_exit_sig(SIGSYS);
return true;
}
regs->orig_ax = -1;
if (tmp)
goto do_ret; /* skip requested */
/*
* With a real vsyscall, page faults cause SIGSEGV.
*/
ret = -EFAULT;
switch (vsyscall_nr) {
case 0:
/* this decodes regs->di and regs->si on its own */
ret = __x64_sys_gettimeofday(regs);
break;
case 1:
/* this decodes regs->di on its own */
ret = __x64_sys_time(regs);
break;
case 2:
/* while we could clobber regs->dx, we didn't in the past... */
orig_dx = regs->dx;
regs->dx = 0;
/* this decodes regs->di, regs->si and regs->dx on its own */
ret = __x64_sys_getcpu(regs);
regs->dx = orig_dx;
break;
}
check_fault:
if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall fault (exploit attempt?)");
goto sigsegv;
}
regs->ax = ret;
do_ret:
/* Emulate a ret instruction. */
regs->ip = caller;
regs->sp += 8;
return true;
sigsegv:
force_sig(SIGSEGV);
return true;
}
/*
* A pseudo VMA to allow ptrace access for the vsyscall page. This only
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does
* not need special handling anymore:
*/
static const char *gate_vma_name(struct vm_area_struct *vma)
{
return "[vsyscall]";
}
static const struct vm_operations_struct gate_vma_ops = {
.name = gate_vma_name,
};
static struct vm_area_struct gate_vma __ro_after_init = {
.vm_start = VSYSCALL_ADDR,
.vm_end = VSYSCALL_ADDR + PAGE_SIZE,
.vm_page_prot = PAGE_READONLY_EXEC,
.vm_flags = VM_READ | VM_EXEC,
.vm_ops = &gate_vma_ops,
};
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef CONFIG_COMPAT
if (!mm || !test_bit(MM_CONTEXT_HAS_VSYSCALL, &mm->context.flags))
return NULL;
#endif
if (vsyscall_mode == NONE)
return NULL;
return &gate_vma;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = get_gate_vma(mm);
if (!vma)
return 0;
return (addr >= vma->vm_start) && (addr < vma->vm_end);
}
/*
* Use this when you have no reliable mm, typically from interrupt
* context. It is less reliable than using a task's mm and may give
* false positives.
*/
int in_gate_area_no_mm(unsigned long addr)
{
return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;}
/*
* The VSYSCALL page is the only user-accessible page in the kernel address
* range. Normally, the kernel page tables can have _PAGE_USER clear, but
* the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
* are enabled.
*
* Some day we may create a "minimal" vsyscall mode in which we emulate
* vsyscalls but leave the page not present. If so, we skip calling
* this.
*/
void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
pud = pud_offset(p4d, VSYSCALL_ADDR);
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
pmd = pmd_offset(pud, VSYSCALL_ADDR);
set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
}
void __init map_vsyscall(void)
{
extern char __vsyscall_page;
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
/*
* For full emulation, the page needs to exist for real. In
* execute-only mode, there is no PTE at all backing the vsyscall
* page.
*/
if (vsyscall_mode == EMULATE) {
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
PAGE_KERNEL_VVAR);
set_vsyscall_pgtable_user_bits(swapper_pg_dir);
}
if (vsyscall_mode == XONLY)
vm_flags_init(&gate_vma, VM_EXEC);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
(unsigned long)VSYSCALL_ADDR);
}
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef LLIST_H
#define LLIST_H
/*
* Lock-less NULL terminated single linked list
*
* Cases where locking is not needed:
* If there are multiple producers and multiple consumers, llist_add can be
* used in producers and llist_del_all can be used in consumers simultaneously
* without locking. Also a single consumer can use llist_del_first while
* multiple producers simultaneously use llist_add, without any locking.
*
* Cases where locking is needed:
* If we have multiple consumers with llist_del_first used in one consumer, and
* llist_del_first or llist_del_all used in other consumers, then a lock is
* needed. This is because llist_del_first depends on list->first->next not
* changing, but without lock protection, there's no way to be sure about that
* if a preemption happens in the middle of the delete operation and on being
* preempted back, the list->first is the same as before causing the cmpxchg in
* llist_del_first to succeed. For example, while a llist_del_first operation
* is in progress in one consumer, then a llist_del_first, llist_add,
* llist_add (or llist_del_all, llist_add, llist_add) sequence in another
* consumer may cause violations.
*
* This can be summarized as follows:
*
* | add | del_first | del_all
* add | - | - | -
* del_first | | L | L
* del_all | | | -
*
* Where, a particular row's operation can happen concurrently with a column's
* operation, with "-" being no lock needed, while "L" being lock is needed.
*
* The list entries deleted via llist_del_all can be traversed with
* traversing function such as llist_for_each etc. But the list
* entries can not be traversed safely before deleted from the list.
* The order of deleted entries is from the newest to the oldest added
* one. If you want to traverse from the oldest to the newest, you
* must reverse the order by yourself before traversing.
*
* The basic atomic operation of this list is cmpxchg on long. On
* architectures that don't have NMI-safe cmpxchg implementation, the
* list can NOT be used in NMI handlers. So code that uses the list in
* an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*/
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/stddef.h>
#include <linux/types.h>
struct llist_head {
struct llist_node *first;
};
struct llist_node {
struct llist_node *next;
};
#define LLIST_HEAD_INIT(name) { NULL }
#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
/**
* init_llist_head - initialize lock-less list head
* @head: the head for your lock-less list
*/
static inline void init_llist_head(struct llist_head *list)
{
list->first = NULL;
}
/**
* init_llist_node - initialize lock-less list node
* @node: the node to be initialised
*
* In cases where there is a need to test if a node is on
* a list or not, this initialises the node to clearly
* not be on any list.
*/
static inline void init_llist_node(struct llist_node *node)
{
WRITE_ONCE(node->next, node);
}
/**
* llist_on_list - test if a lock-list list node is on a list
* @node: the node to test
*
* When a node is on a list the ->next pointer will be NULL or
* some other node. It can never point to itself. We use that
* in init_llist_node() to record that a node is not on any list,
* and here to test whether it is on any list.
*/
static inline bool llist_on_list(const struct llist_node *node)
{
return READ_ONCE(node->next) != node;
}
/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the llist_node within the struct.
*/
#define llist_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* member_address_is_nonnull - check whether the member address is not NULL
* @ptr: the object pointer (struct type * that contains the llist_node)
* @member: the name of the llist_node within the struct.
*
* This macro is conceptually the same as
* &ptr->member != NULL
* but it works around the fact that compilers can decide that taking a member
* address is never a NULL pointer.
*
* Real objects that start at a high address and have a member at NULL are
* unlikely to exist, but such pointers may be returned e.g. by the
* container_of() macro.
*/
#define member_address_is_nonnull(ptr, member) \
((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
/**
* llist_for_each - iterate over some deleted entries of a lock-less list
* @pos: the &struct llist_node to use as a loop cursor
* @node: the first entry of deleted list entries
*
* In general, some entries of the lock-less list can be traversed
* safely only after being deleted from list, so start with an entry
* instead of list head.
*
* If being used on entries deleted from lock-less list directly, the
* traverse order is from the newest to the oldest added entry. If
* you want to traverse from the oldest to the newest, you must
* reverse the order by yourself before traversing.
*/
#define llist_for_each(pos, node) \
for ((pos) = (node); pos; (pos) = (pos)->next)
/**
* llist_for_each_safe - iterate over some deleted entries of a lock-less list
* safe against removal of list entry
* @pos: the &struct llist_node to use as a loop cursor
* @n: another &struct llist_node to use as temporary storage
* @node: the first entry of deleted list entries
*
* In general, some entries of the lock-less list can be traversed
* safely only after being deleted from list, so start with an entry
* instead of list head.
*
* If being used on entries deleted from lock-less list directly, the
* traverse order is from the newest to the oldest added entry. If
* you want to traverse from the oldest to the newest, you must
* reverse the order by yourself before traversing.
*/
#define llist_for_each_safe(pos, n, node) \
for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
/**
* llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
* @pos: the type * to use as a loop cursor.
* @node: the fist entry of deleted list entries.
* @member: the name of the llist_node with the struct.
*
* In general, some entries of the lock-less list can be traversed
* safely only after being removed from list, so start with an entry
* instead of list head.
*
* If being used on entries deleted from lock-less list directly, the
* traverse order is from the newest to the oldest added entry. If
* you want to traverse from the oldest to the newest, you must
* reverse the order by yourself before traversing.
*/
#define llist_for_each_entry(pos, node, member) \
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
member_address_is_nonnull(pos, member); \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
* llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type
* safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @node: the first entry of deleted list entries.
* @member: the name of the llist_node with the struct.
*
* In general, some entries of the lock-less list can be traversed
* safely only after being removed from list, so start with an entry
* instead of list head.
*
* If being used on entries deleted from lock-less list directly, the
* traverse order is from the newest to the oldest added entry. If
* you want to traverse from the oldest to the newest, you must
* reverse the order by yourself before traversing.
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
member_address_is_nonnull(pos, member) && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
/**
* llist_empty - tests whether a lock-less list is empty
* @head: the list to test
*
* Not guaranteed to be accurate or up to date. Just a quick way to
* test whether the list is empty without deleting something from the
* list.
*/
static inline bool llist_empty(const struct llist_head *head)
{
return READ_ONCE(head->first) == NULL;
}
static inline struct llist_node *llist_next(struct llist_node *node)
{
return READ_ONCE(node->next);
}
/**
* llist_add_batch - add several linked entries in batch
* @new_first: first entry in batch to be added
* @new_last: last entry in batch to be added
* @head: the head for your lock-less list
*
* Return whether list is empty before adding.
*/
static inline bool llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head)
{
struct llist_node *first = READ_ONCE(head->first);
do {
new_last->next = first; } while (!try_cmpxchg(&head->first, &first, new_first));
return !first;
}
static inline bool __llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head)
{
new_last->next = head->first;
head->first = new_first;
return new_last->next == NULL;
}
/**
* llist_add - add a new entry
* @new: new entry to be added
* @head: the head for your lock-less list
*
* Returns true if the list was empty prior to adding this entry.
*/
static inline bool llist_add(struct llist_node *new, struct llist_head *head)
{
return llist_add_batch(new, new, head);
}
static inline bool __llist_add(struct llist_node *new, struct llist_head *head)
{
return __llist_add_batch(new, new, head);
}
/**
* llist_del_all - delete all entries from lock-less list
* @head: the head of lock-less list to delete all entries
*
* If list is empty, return NULL, otherwise, delete all entries and
* return the pointer to the first entry. The order of entries
* deleted is from the newest to the oldest added one.
*/
static inline struct llist_node *llist_del_all(struct llist_head *head)
{
return xchg(&head->first, NULL);
}
static inline struct llist_node *__llist_del_all(struct llist_head *head)
{
struct llist_node *first = head->first;
head->first = NULL;
return first;
}
extern struct llist_node *llist_del_first(struct llist_head *head);
/**
* llist_del_first_init - delete first entry from lock-list and mark is as being off-list
* @head: the head of lock-less list to delete from.
*
* This behave the same as llist_del_first() except that llist_init_node() is called
* on the returned node so that llist_on_list() will report false for the node.
*/
static inline struct llist_node *llist_del_first_init(struct llist_head *head)
{
struct llist_node *n = llist_del_first(head);
if (n)
init_llist_node(n);
return n;
}
extern bool llist_del_first_this(struct llist_head *head,
struct llist_node *this);
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. NET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the Ethernet handlers.
*
* Version: @(#)eth.h 1.0.4 05/13/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Relocated to include/linux where it belongs by Alan Cox
* <gw4pts@gw4pts.ampr.org>
*/
#ifndef _LINUX_ETHERDEVICE_H
#define _LINUX_ETHERDEVICE_H
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/crc32.h>
#include <linux/unaligned.h>
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
struct device;
struct fwnode_handle;
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
int device_get_mac_address(struct device *dev, char *addr);
int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
const unsigned char *haddr);
__be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
int eth_validate_addr(struct net_device *dev);
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs);
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int txqs,
unsigned int rxqs);
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
/* Reserved Ethernet Addresses per IEEE 802.1Q */
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
#define eth_stp_addr eth_reserved_addr_base
static const u8 eth_ipv4_mcast_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) =
{ 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
/**
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return: true if address is link local reserved addr (01:80:c2:00:00:0X) per
* IEEE 802.1Q 8.6.3 Frame filtering.
*
* Please note: addr must be aligned to u16.
*/
static inline bool is_link_local_ether_addr(const u8 *addr)
{
__be16 *a = (__be16 *)addr;
static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
static const __be16 m = cpu_to_be16(0xfff0);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
(__force int)((a[2] ^ b[2]) & m)) == 0;
#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
#endif
}
/**
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return: true if the address is all zeroes.
*
* Please note: addr must be aligned to u16.
*/
static inline bool is_zero_ether_addr(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
#else
return (*(const u16 *)(addr + 0) |
*(const u16 *)(addr + 2) |
*(const u16 *)(addr + 4)) == 0;
#endif
}
/**
* is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return: true if the address is a multicast address.
* By definition the broadcast address is also a multicast address.
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
u32 a = *(const u32 *)addr;
#else
u16 a = *(const u16 *)addr;
#endif
#ifdef __BIG_ENDIAN
return 0x01 & (a >> ((sizeof(a) * 8) - 8));
#else
return 0x01 & a;
#endif
}
static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
return 0x01 & ((*(const u64 *)addr) >> 56);
#else
return 0x01 & (*(const u64 *)addr);
#endif
#else
return is_multicast_ether_addr(addr);
#endif
}
/**
* is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return: true if the address is a local address.
*/
static inline bool is_local_ether_addr(const u8 *addr)
{
return 0x02 & addr[0];
}
/**
* is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return: true if the address is the broadcast address.
*
* Please note: addr must be aligned to u16.
*/
static inline bool is_broadcast_ether_addr(const u8 *addr)
{
return (*(const u16 *)(addr + 0) &
*(const u16 *)(addr + 2) &
*(const u16 *)(addr + 4)) == 0xffff;
}
/**
* is_unicast_ether_addr - Determine if the Ethernet address is unicast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return: true if the address is a unicast address.
*/
static inline bool is_unicast_ether_addr(const u8 *addr)
{
return !is_multicast_ether_addr(addr);
}
/**
* is_valid_ether_addr - Determine if the given Ethernet address is valid
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
* a multicast address, and is not FF:FF:FF:FF:FF:FF.
*
* Return: true if the address is valid.
*
* Please note: addr must be aligned to u16.
*/
static inline bool is_valid_ether_addr(const u8 *addr)
{
/* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to
* explicitly check for it here. */
return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
}
/**
* eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
* @proto: Ethertype/length value to be tested
*
* Check that the value from the Ethertype/length field is a valid Ethertype.
*
* Return: true if the valid is an 802.3 supported Ethertype.
*/
static inline bool eth_proto_is_802_3(__be16 proto)
{
#ifndef __BIG_ENDIAN
/* if CPU is little endian mask off bits representing LSB */
proto &= htons(0xFF00);
#endif
/* cast both to u16 and compare since LSB can be ignored */
return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
}
/**
* eth_random_addr - Generate software assigned random Ethernet address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Generate a random Ethernet address (MAC) that is not multicast
* and has the local assigned bit set.
*/
static inline void eth_random_addr(u8 *addr)
{
get_random_bytes(addr, ETH_ALEN);
addr[0] &= 0xfe; /* clear multicast bit */
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Assign the broadcast address to the given address array.
*/
static inline void eth_broadcast_addr(u8 *addr)
{
memset(addr, 0xff, ETH_ALEN);
}
/**
* eth_zero_addr - Assign zero address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Assign the zero address to the given address array.
*/
static inline void eth_zero_addr(u8 *addr)
{
memset(addr, 0x00, ETH_ALEN);
}
/**
* eth_hw_addr_random - Generate software assigned random Ethernet and
* set device flag
* @dev: pointer to net_device structure
*
* Generate a random Ethernet address (MAC) to be used by a net device
* and set addr_assign_type so the state can be read by sysfs and be
* used by userspace.
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
u8 addr[ETH_ALEN];
eth_random_addr(addr);
__dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
}
/**
* eth_hw_addr_crc - Calculate CRC from netdev_hw_addr
* @ha: pointer to hardware address
*
* Calculate CRC from a hardware address as basis for filter hashes.
*/
static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
{
return ether_crc(ETH_ALEN, ha->addr);
}
/**
* ether_addr_copy - Copy an Ethernet address
* @dst: Pointer to a six-byte array Ethernet address destination
* @src: Pointer to a six-byte array Ethernet address source
*
* Please note: dst & src must both be aligned to u16.
*/
static inline void ether_addr_copy(u8 *dst, const u8 *src)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
*(u32 *)dst = *(const u32 *)src; *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
#else
u16 *a = (u16 *)dst;
const u16 *b = (const u16 *)src;
a[0] = b[0];
a[1] = b[1];
a[2] = b[2];
#endif
}
/**
* eth_hw_addr_set - Assign Ethernet address to a net_device
* @dev: pointer to net_device structure
* @addr: address to assign
*
* Assign given address to the net_device, addr_assign_type is not changed.
*/
static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
{
__dev_addr_set(dev, addr, ETH_ALEN);
}
/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
* @src: pointer to net_device to copy dev_addr from
*
* Copy the Ethernet address from one net_device to another along with
* the address attributes (addr_assign_type).
*/
static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
eth_hw_addr_set(dst, src->dev_addr);
}
/**
* ether_addr_equal - Compare two Ethernet addresses
* @addr1: Pointer to a six-byte array containing the Ethernet address
* @addr2: Pointer other six-byte array containing the Ethernet address
*
* Compare two Ethernet addresses, returns true if equal
*
* Please note: addr1 & addr2 must both be aligned to u16.
*/
static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
return fold == 0;
#else
const u16 *a = (const u16 *)addr1;
const u16 *b = (const u16 *)addr2;
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
#endif
}
/**
* ether_addr_equal_64bits - Compare two Ethernet addresses
* @addr1: Pointer to an array of 8 bytes
* @addr2: Pointer to an other array of 8 bytes
*
* Compare two Ethernet addresses, returns true if equal, false otherwise.
*
* The function doesn't need any conditional branches and possibly uses
* word memory accesses on CPU allowing cheap unaligned memory reads.
* arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 }
*
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
#ifdef __BIG_ENDIAN
return (fold >> 16) == 0;
#else
return (fold << 16) == 0;
#endif
#else
return ether_addr_equal(addr1, addr2);
#endif
}
/**
* ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses
* @addr1: Pointer to a six-byte array containing the Ethernet address
* @addr2: Pointer other six-byte array containing the Ethernet address
*
* Compare two Ethernet addresses, returns true if equal
*
* Please note: Use only when any Ethernet address may not be u16 aligned.
*/
static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return ether_addr_equal(addr1, addr2);
#else
return memcmp(addr1, addr2, ETH_ALEN) == 0;
#endif
}
/**
* ether_addr_equal_masked - Compare two Ethernet addresses with a mask
* @addr1: Pointer to a six-byte array containing the 1st Ethernet address
* @addr2: Pointer to a six-byte array containing the 2nd Ethernet address
* @mask: Pointer to a six-byte array containing the Ethernet address bitmask
*
* Compare two Ethernet addresses with a mask, returns true if for every bit
* set in the bitmask the equivalent bits in the ethernet addresses are equal.
* Using a mask with all bits set is a slower ether_addr_equal.
*/
static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
const u8 *mask)
{
int i;
for (i = 0; i < ETH_ALEN; i++) {
if ((addr1[i] ^ addr2[i]) & mask[i])
return false;
}
return true;
}
static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
{
u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
return ether_addr_equal_masked(addr, eth_ipv4_mcast_addr_base, mask);
}
static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
{
u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
return ether_addr_equal_masked(addr, eth_ipv6_mcast_addr_base, mask);
}
static inline bool ether_addr_is_ip_mcast(const u8 *addr)
{
return ether_addr_is_ipv4_mcast(addr) ||
ether_addr_is_ipv6_mcast(addr);
}
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return: a u64 value of the address
*/
static inline u64 ether_addr_to_u64(const u8 *addr)
{
u64 u = 0;
int i;
for (i = 0; i < ETH_ALEN; i++)
u = u << 8 | addr[i];
return u;
}
/**
* u64_to_ether_addr - Convert a u64 to an Ethernet address.
* @u: u64 to convert to an Ethernet MAC address
* @addr: Pointer to a six-byte array to contain the Ethernet address
*/
static inline void u64_to_ether_addr(u64 u, u8 *addr)
{
int i;
for (i = ETH_ALEN - 1; i >= 0; i--) {
addr[i] = u & 0xff;
u = u >> 8;
}
}
/**
* eth_addr_dec - Decrement the given MAC address
*
* @addr: Pointer to a six-byte array containing Ethernet address to decrement
*/
static inline void eth_addr_dec(u8 *addr)
{
u64 u = ether_addr_to_u64(addr);
u--;
u64_to_ether_addr(u, addr);
}
/**
* eth_addr_inc() - Increment the given MAC address.
* @addr: Pointer to a six-byte array containing Ethernet address to increment.
*/
static inline void eth_addr_inc(u8 *addr)
{
u64 u = ether_addr_to_u64(addr);
u++;
u64_to_ether_addr(u, addr);
}
/**
* eth_addr_add() - Add (or subtract) an offset to/from the given MAC address.
*
* @offset: Offset to add.
* @addr: Pointer to a six-byte array containing Ethernet address to increment.
*/
static inline void eth_addr_add(u8 *addr, long offset)
{
u64 u = ether_addr_to_u64(addr);
u += offset;
u64_to_ether_addr(u, addr);
}
/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Compare passed address with all addresses of the device. Return true if the
* address if one of the device addresses.
*
* Note that this function calls ether_addr_equal_64bits() so take care of
* the right padding.
*/
static inline bool is_etherdev_addr(const struct net_device *dev,
const u8 addr[6 + 2])
{
struct netdev_hw_addr *ha;
bool res = false;
rcu_read_lock();
for_each_dev_addr(dev, ha) {
res = ether_addr_equal_64bits(addr, ha->addr);
if (res)
break;
}
rcu_read_unlock();
return res;
}
#endif /* __KERNEL__ */
/**
* compare_ether_header - Compare two Ethernet headers
* @a: Pointer to Ethernet header
* @b: Pointer to Ethernet header
*
* Compare two Ethernet headers, returns 0 if equal.
* This assumes that the network header (i.e., IP header) is 4-byte
* aligned OR the platform can handle unaligned access. This is the
* case for all packets coming into netif_receive_skb or similar
* entry points.
*/
static inline unsigned long compare_ether_header(const void *a, const void *b)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
unsigned long fold;
/*
* We want to compare 14 bytes:
* [a0 ... a13] ^ [b0 ... b13]
* Use two long XOR, ORed together, with an overlap of two bytes.
* [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
* [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
* This means the [a6 a7] ^ [b6 b7] part is done two times.
*/
fold = *(unsigned long *)a ^ *(unsigned long *)b;
fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
return fold;
#else
u32 *a32 = (u32 *)((u8 *)a + 2);
u32 *b32 = (u32 *)((u8 *)b + 2);
return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
(a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
#endif
}
/**
* eth_hw_addr_gen - Generate and assign Ethernet address to a port
* @dev: pointer to port's net_device structure
* @base_addr: base Ethernet address
* @id: offset to add to the base address
*
* Generate a MAC address using a base address and an offset and assign it
* to a net_device. Commonly used by switch drivers which need to compute
* addresses for all their ports. addr_assign_type is not changed.
*/
static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
unsigned int id)
{
u64 u = ether_addr_to_u64(base_addr);
u8 addr[ETH_ALEN];
u += id;
u64_to_ether_addr(u, addr);
eth_hw_addr_set(dev, addr);
}
/**
* eth_skb_pkt_type - Assign packet type if destination address does not match
* @skb: Assigned a packet type if address does not match @dev address
* @dev: Network device used to compare packet address against
*
* If the destination MAC address of the packet does not match the network
* device address, assign an appropriate packet type.
*/
static inline void eth_skb_pkt_type(struct sk_buff *skb,
const struct net_device *dev)
{
const struct ethhdr *eth = eth_hdr(skb);
if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} else {
skb->pkt_type = PACKET_OTHERHOST;
}
}
}
static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
skb_pull_inline(skb, ETH_HLEN);
return eth;
}
/**
* eth_skb_pad - Pad buffer to minimum number of octets for Ethernet frame
* @skb: Buffer to pad
*
* An Ethernet frame should have a minimum size of 60 bytes. This function
* takes short frames and pads them with zeros up to the 60 byte limit.
*/
static inline int eth_skb_pad(struct sk_buff *skb)
{
return skb_put_padto(skb, ETH_ZLEN);
}
#endif /* _LINUX_ETHERDEVICE_H */
/**
* css_get - obtain a reference on the specified css
* @css: target css
*
* The caller must already have a reference.
*/
CGROUP_REF_FN_ATTRS
void css_get(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF)) percpu_ref_get(&css->refcnt);
}
CGROUP_REF_EXPORT(css_get)
/**
* css_get_many - obtain references on the specified css
* @css: target css
* @n: number of references to get
*
* The caller must already have a reference.
*/
CGROUP_REF_FN_ATTRS
void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
{
if (!(css->flags & CSS_NO_REF))
percpu_ref_get_many(&css->refcnt, n);
}
CGROUP_REF_EXPORT(css_get_many)
/**
* css_tryget - try to obtain a reference on the specified css
* @css: target css
*
* Obtain a reference on @css unless it already has reached zero and is
* being released. This function doesn't care whether @css is on or
* offline. The caller naturally needs to ensure that @css is accessible
* but doesn't have to be holding a reference on it - IOW, RCU protected
* access is good enough for this function. Returns %true if a reference
* count was successfully obtained; %false otherwise.
*/
CGROUP_REF_FN_ATTRS
bool css_tryget(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF))
return percpu_ref_tryget(&css->refcnt);
return true;
}
CGROUP_REF_EXPORT(css_tryget)
/**
* css_tryget_online - try to obtain a reference on the specified css if online
* @css: target css
*
* Obtain a reference on @css if it's online. The caller naturally needs
* to ensure that @css is accessible but doesn't have to be holding a
* reference on it - IOW, RCU protected access is good enough for this
* function. Returns %true if a reference count was successfully obtained;
* %false otherwise.
*/
CGROUP_REF_FN_ATTRS
bool css_tryget_online(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF))
return percpu_ref_tryget_live(&css->refcnt);
return true;
}
CGROUP_REF_EXPORT(css_tryget_online)
/**
* css_put - put a css reference
* @css: target css
*
* Put a reference obtained via css_get() and css_tryget_online().
*/
CGROUP_REF_FN_ATTRS
void css_put(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF)) percpu_ref_put(&css->refcnt);
}
CGROUP_REF_EXPORT(css_put)
/**
* css_put_many - put css references
* @css: target css
* @n: number of references to put
*
* Put references obtained via css_get() and css_tryget_online().
*/
CGROUP_REF_FN_ATTRS
void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
{
if (!(css->flags & CSS_NO_REF))
percpu_ref_put_many(&css->refcnt, n);
}
CGROUP_REF_EXPORT(css_put_many)
// SPDX-License-Identifier: GPL-2.0-only
/*
* Network interface table.
*
* Network interfaces (devices) do not have a security field, so we
* maintain a table associating each interface with a SID.
*
* Author: James Morris <jmorris@redhat.com>
*
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include "security.h"
#include "objsec.h"
#include "netif.h"
#define SEL_NETIF_HASH_SIZE 64
#define SEL_NETIF_HASH_MAX 1024
struct sel_netif {
struct list_head list;
struct netif_security_struct nsec;
struct rcu_head rcu_head;
};
static u32 sel_netif_total;
static DEFINE_SPINLOCK(sel_netif_lock);
static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
/**
* sel_netif_hashfn - Hashing function for the interface table
* @ns: the network namespace
* @ifindex: the network interface
*
* Description:
* This is the hashing function for the network interface table, it returns the
* bucket number for the given interface.
*
*/
static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
{
return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
}
/**
* sel_netif_find - Search for an interface record
* @ns: the network namespace
* @ifindex: the network interface
*
* Description:
* Search the network interface table and return the record matching @ifindex.
* If an entry can not be found in the table return NULL.
*
*/
static inline struct sel_netif *sel_netif_find(const struct net *ns,
int ifindex)
{
u32 idx = sel_netif_hashfn(ns, ifindex);
struct sel_netif *netif;
list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list) if (net_eq(netif->nsec.ns, ns) &&
netif->nsec.ifindex == ifindex)
return netif;
return NULL;
}
/**
* sel_netif_insert - Insert a new interface into the table
* @netif: the new interface record
*
* Description:
* Add a new interface record to the network interface hash table. Returns
* zero on success, negative values on failure.
*
*/
static int sel_netif_insert(struct sel_netif *netif)
{
u32 idx;
if (sel_netif_total >= SEL_NETIF_HASH_MAX)
return -ENOSPC;
idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
list_add_rcu(&netif->list, &sel_netif_hash[idx]);
sel_netif_total++;
return 0;
}
/**
* sel_netif_destroy - Remove an interface record from the table
* @netif: the existing interface record
*
* Description:
* Remove an existing interface record from the network interface table.
*
*/
static void sel_netif_destroy(struct sel_netif *netif)
{
list_del_rcu(&netif->list);
sel_netif_total--;
kfree_rcu(netif, rcu_head);
}
/**
* sel_netif_sid_slow - Lookup the SID of a network interface using the policy
* @ns: the network namespace
* @ifindex: the network interface
* @sid: interface SID
*
* Description:
* This function determines the SID of a network interface by querying the
* security policy. The result is added to the network interface table to
* speedup future queries. Returns zero on success, negative values on
* failure.
*
*/
static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
{
int ret = 0;
struct sel_netif *netif;
struct sel_netif *new;
struct net_device *dev;
/* NOTE: we always use init's network namespace since we don't
* currently support containers */
dev = dev_get_by_index(ns, ifindex);
if (unlikely(dev == NULL)) {
pr_warn("SELinux: failure in %s(), invalid network interface (%d)\n",
__func__, ifindex);
return -ENOENT;
}
spin_lock_bh(&sel_netif_lock);
netif = sel_netif_find(ns, ifindex);
if (netif != NULL) {
*sid = netif->nsec.sid;
goto out;
}
ret = security_netif_sid(dev->name, sid);
if (ret != 0)
goto out;
/* If this memory allocation fails still return 0. The SID
* is valid, it just won't be added to the cache.
*/
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (new) {
new->nsec.ns = ns;
new->nsec.ifindex = ifindex;
new->nsec.sid = *sid;
if (sel_netif_insert(new))
kfree(new);
}
out:
spin_unlock_bh(&sel_netif_lock);
dev_put(dev);
if (unlikely(ret))
pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
__func__, ifindex);
return ret;
}
/**
* sel_netif_sid - Lookup the SID of a network interface
* @ns: the network namespace
* @ifindex: the network interface
* @sid: interface SID
*
* Description:
* This function determines the SID of a network interface using the fastest
* method possible. First the interface table is queried, but if an entry
* can't be found then the policy is queried and the result is added to the
* table to speedup future queries. Returns zero on success, negative values
* on failure.
*
*/
int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
{
struct sel_netif *netif;
rcu_read_lock();
netif = sel_netif_find(ns, ifindex);
if (likely(netif != NULL)) {
*sid = netif->nsec.sid;
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
return sel_netif_sid_slow(ns, ifindex, sid);
}
/**
* sel_netif_kill - Remove an entry from the network interface table
* @ns: the network namespace
* @ifindex: the network interface
*
* Description:
* This function removes the entry matching @ifindex from the network interface
* table if it exists.
*
*/
static void sel_netif_kill(const struct net *ns, int ifindex)
{
struct sel_netif *netif;
rcu_read_lock(); spin_lock_bh(&sel_netif_lock); netif = sel_netif_find(ns, ifindex); if (netif)
sel_netif_destroy(netif);
spin_unlock_bh(&sel_netif_lock); rcu_read_unlock();
}
/**
* sel_netif_flush - Flush the entire network interface table
*
* Description:
* Remove all entries from the network interface table.
*
*/
void sel_netif_flush(void)
{
int idx;
struct sel_netif *netif;
spin_lock_bh(&sel_netif_lock);
for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
list_for_each_entry(netif, &sel_netif_hash[idx], list)
sel_netif_destroy(netif);
spin_unlock_bh(&sel_netif_lock);
}
static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (event == NETDEV_DOWN)
sel_netif_kill(dev_net(dev), dev->ifindex); return NOTIFY_DONE;
}
static struct notifier_block sel_netif_netdev_notifier = {
.notifier_call = sel_netif_netdev_notifier_handler,
};
static __init int sel_netif_init(void)
{
int i;
if (!selinux_enabled_boot)
return 0;
for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
INIT_LIST_HEAD(&sel_netif_hash[i]);
register_netdevice_notifier(&sel_netif_netdev_notifier);
return 0;
}
__initcall(sel_netif_init);
// SPDX-License-Identifier: GPL-2.0
/*
* fs/sysfs/dir.c - sysfs core and dir operation implementation
*
* Copyright (c) 2001-3 Patrick Mochel
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
*
* Please see Documentation/filesystems/sysfs.rst for more information.
*/
#define pr_fmt(fmt) "sysfs: " fmt
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/slab.h>
#include "sysfs.h"
DEFINE_SPINLOCK(sysfs_symlink_target_lock);
void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
{
char *buf;
buf = kzalloc(PATH_MAX, GFP_KERNEL);
if (buf)
kernfs_path(parent, buf, PATH_MAX);
pr_warn("cannot create duplicate filename '%s/%s'\n", buf, name);
dump_stack();
kfree(buf);
}
/**
* sysfs_create_dir_ns - create a directory for an object with a namespace tag
* @kobj: object we're creating directory for
* @ns: the namespace tag to use
*/
int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
struct kernfs_node *parent, *kn;
kuid_t uid;
kgid_t gid;
if (WARN_ON(!kobj))
return -EINVAL;
if (kobj->parent)
parent = kobj->parent->sd;
else
parent = sysfs_root_kn; if (!parent)
return -ENOENT;
kobject_get_ownership(kobj, &uid, &gid);
kn = kernfs_create_dir_ns(parent, kobject_name(kobj), 0755, uid, gid,
kobj, ns);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(parent, kobject_name(kobj)); return PTR_ERR(kn);
}
kobj->sd = kn; return 0;}
/**
* sysfs_remove_dir - remove an object's directory.
* @kobj: object.
*
* The only thing special about this is that we remove any files in
* the directory before we remove the directory, and we've inlined
* what used to be sysfs_rmdir() below, instead of calling separately.
*/
void sysfs_remove_dir(struct kobject *kobj)
{
struct kernfs_node *kn = kobj->sd;
/*
* In general, kobject owner is responsible for ensuring removal
* doesn't race with other operations and sysfs doesn't provide any
* protection; however, when @kobj is used as a symlink target, the
* symlinking entity usually doesn't own @kobj and thus has no
* control over removal. @kobj->sd may be removed anytime
* and symlink code may end up dereferencing an already freed node.
*
* sysfs_symlink_target_lock synchronizes @kobj->sd
* disassociation against symlink operations so that symlink code
* can safely dereference @kobj->sd.
*/
spin_lock(&sysfs_symlink_target_lock);
kobj->sd = NULL;
spin_unlock(&sysfs_symlink_target_lock);
if (kn) {
WARN_ON_ONCE(kernfs_type(kn) != KERNFS_DIR);
kernfs_remove(kn);
}
}
int sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
const void *new_ns)
{
struct kernfs_node *parent;
int ret;
parent = kernfs_get_parent(kobj->sd);
ret = kernfs_rename_ns(kobj->sd, parent, new_name, new_ns);
kernfs_put(parent);
return ret;
}
int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj,
const void *new_ns)
{
struct kernfs_node *kn = kobj->sd;
struct kernfs_node *new_parent;
new_parent = new_parent_kobj && new_parent_kobj->sd ?
new_parent_kobj->sd : sysfs_root_kn;
return kernfs_rename_ns(kn, new_parent, NULL, new_ns);
}
/**
* sysfs_create_mount_point - create an always empty directory
* @parent_kobj: kobject that will contain this always empty directory
* @name: The name of the always empty directory to add
*/
int sysfs_create_mount_point(struct kobject *parent_kobj, const char *name)
{
struct kernfs_node *kn, *parent = parent_kobj->sd;
kn = kernfs_create_empty_dir(parent, name);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(parent, name);
return PTR_ERR(kn);
}
return 0;
}
EXPORT_SYMBOL_GPL(sysfs_create_mount_point);
/**
* sysfs_remove_mount_point - remove an always empty directory.
* @parent_kobj: kobject that will contain this always empty directory
* @name: The name of the always empty directory to remove
*
*/
void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name)
{
struct kernfs_node *parent = parent_kobj->sd;
kernfs_remove_by_name_ns(parent, name, NULL);
}
EXPORT_SYMBOL_GPL(sysfs_remove_mount_point);
// SPDX-License-Identifier: GPL-2.0-only
/*
* ratelimit.c - Do something with rate limit.
*
* Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
*
* 2008-05-01 rewrite the function and use a ratelimit_state data struct as
* parameter. Now every user can use their own standalone ratelimit_state.
*/
#include <linux/ratelimit.h>
#include <linux/jiffies.h>
#include <linux/export.h>
/*
* __ratelimit - rate limiting
* @rs: ratelimit_state data
* @func: name of calling function
*
* This enforces a rate limit: not more than @rs->burst callbacks
* in every @rs->interval
*
* RETURNS:
* 0 means callbacks will be suppressed.
* 1 means go ahead and do it.
*/
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
/* Paired with WRITE_ONCE() in .proc_handler().
* Changing two values seperately could be inconsistent
* and some message could be lost. (See: net_ratelimit_state).
*/
int interval = READ_ONCE(rs->interval);
int burst = READ_ONCE(rs->burst);
unsigned long flags;
int ret = 0;
/*
* Zero interval says never limit, otherwise, non-positive burst
* says always limit.
*/
if (interval <= 0 || burst <= 0) { WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst); ret = interval == 0 || burst > 0; if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) || !raw_spin_trylock_irqsave(&rs->lock, flags))
goto nolock_ret;
/* Force re-initialization once re-enabled. */
rs->flags &= ~RATELIMIT_INITIALIZED;
goto unlock_ret;
}
/*
* If we contend on this state's lock then just check if
* the current burst is used or not. It might cause
* false positive when we are past the interval and
* the current lock owner is just about to reset it.
*/
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) { if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED &&
atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
ret = 1;
goto nolock_ret;
}
if (!(rs->flags & RATELIMIT_INITIALIZED)) {
rs->begin = jiffies;
rs->flags |= RATELIMIT_INITIALIZED;
atomic_set(&rs->rs_n_left, rs->burst);
}
if (time_is_before_jiffies(rs->begin + interval)) {
int m;
/*
* Reset rs_n_left ASAP to reduce false positives
* in parallel calls, see above.
*/
atomic_set(&rs->rs_n_left, rs->burst);
rs->begin = jiffies;
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
m = ratelimit_state_reset_miss(rs);
if (m) {
printk_deferred(KERN_WARNING
"%s: %d callbacks suppressed\n", func, m);
}
}
}
/* Note that the burst might be taken by a parallel call. */
if (atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
ret = 1;
unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
nolock_ret:
if (!ret) ratelimit_state_inc_miss(rs); return ret;}
EXPORT_SYMBOL(___ratelimit);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PGTABLE_H
#define _LINUX_PGTABLE_H
#include <linux/pfn.h>
#include <asm/pgtable.h>
#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
#include <linux/mm_types.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <asm-generic/pgtable_uffd.h>
#include <linux/page_table_check.h>
#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
#endif
/*
* On almost all architectures and configurations, 0 can be used as the
* upper ceiling to free_pgtables(): on many architectures it has the same
* effect as using TASK_SIZE. However, there is one configuration which
* must impose a more careful limit, to avoid freeing kernel pgtables.
*/
#ifndef USER_PGTABLES_CEILING
#define USER_PGTABLES_CEILING 0UL
#endif
/*
* This defines the first usable user address. Platforms
* can override its value with custom FIRST_USER_ADDRESS
* defined in their respective <asm/pgtable.h>.
*/
#ifndef FIRST_USER_ADDRESS
#define FIRST_USER_ADDRESS 0UL
#endif
/*
* This defines the generic helper for accessing PMD page
* table page. Although platforms can still override this
* via their respective <asm/pgtable.h>.
*/
#ifndef pmd_pgtable
#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
#define pmd_folio(pmd) page_folio(pmd_page(pmd))
/*
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
*
* The pXx_index() functions return the index of the entry in the page
* table page which would control the given virtual address
*
* As these functions may be used by the same code for different levels of
* the page table folding, they are always available, regardless of
* CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
* because in such cases PTRS_PER_PxD equals 1.
*/
static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
#define pmd_index pmd_index
#endif
#ifndef pud_index
static inline unsigned long pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
#define pud_index pud_index
#endif
#ifndef pgd_index
/* Must be a compile-time constant, so implement it as a macro */
#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#endif
#ifndef kernel_pte_init
static inline void kernel_pte_init(void *addr)
{
}
#define kernel_pte_init kernel_pte_init
#endif
#ifndef pmd_init
static inline void pmd_init(void *addr)
{
}
#define pmd_init pmd_init
#endif
#ifndef pud_init
static inline void pud_init(void *addr)
{
}
#define pud_init pud_init
#endif
#ifndef pte_offset_kernel
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
#define pte_offset_kernel pte_offset_kernel
#endif
#ifdef CONFIG_HIGHPTE
#define __pte_map(pmd, address) \
((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
#define pte_unmap(pte) do { \
kunmap_local((pte)); \
rcu_read_unlock(); \
} while (0)
#else
static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
{
return pte_offset_kernel(pmd, address);
}
static inline void pte_unmap(pte_t *pte)
{
rcu_read_unlock();}
#endif
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return pud_pgtable(*pud) + pmd_index(address);
}
#define pmd_offset pmd_offset
#endif
#ifndef pud_offset
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
return p4d_pgtable(*p4d) + pud_index(address);
}
#define pud_offset pud_offset
#endif
static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
{
return (pgd + pgd_index(address));
};
/*
* a shortcut to get a pgd_t in a given mm
*/
#ifndef pgd_offset
#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
#endif
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
/*
* In many cases it is known that a virtual address is mapped at PMD or PTE
* level, so instead of traversing all the page table levels, we can get a
* pointer to the PMD entry in user or kernel page table or translate a virtual
* address to the pointer in the PTE in the kernel page tables with simple
* helpers.
*/
static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
{
return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
}
static inline pmd_t *pmd_off_k(unsigned long va)
{
return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
}
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
pmd_t *pmd = pmd_off_k(vaddr);
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
}
#ifndef pmd_young
static inline int pmd_young(pmd_t pmd)
{
return 0;
}
#endif
#ifndef pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
return 0;
}
#endif
/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
* is issued. Some architectures may benefit from doing this, and it is
* beneficial for both shadow and direct mode hypervisors, which may batch
* the PTE updates which happen during this window. Note that using this
* interface requires that read hazards be removed from the code. A read
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
* up to date.
*
* In the general case, no lock is guaranteed to be held between entry and exit
* of the lazy mode. So the implementation must assume preemption may be enabled
* and cpu migration is possible; it must take steps to be robust against this.
* (In practice, for user PTE updates, the appropriate page table lock(s) are
* held, but for kernel PTE updates, no lock is held). Nesting is not permitted
* and the mode cannot be used in interrupt context.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void) {}
static inline void arch_leave_lazy_mmu_mode(void) {}
static inline void arch_flush_lazy_mmu_mode(void) {}
#endif
#ifndef pte_batch_hint
/**
* pte_batch_hint - Number of pages that can be added to batch without scanning.
* @ptep: Page table pointer for the entry.
* @pte: Page table entry.
*
* Some architectures know that a set of contiguous ptes all map the same
* contiguous memory with the same permissions. In this case, it can provide a
* hint to aid pte batching without the core code needing to scan every pte.
*
* An architecture implementation may ignore the PTE accessed state. Further,
* the dirty state must apply atomically to all the PTEs described by the hint.
*
* May be overridden by the architecture, else pte_batch_hint is always 1.
*/
static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
{
return 1;
}
#endif
#ifndef pte_advance_pfn
static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
{
return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
}
#endif
#define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
#ifndef set_ptes
/**
* set_ptes - Map consecutive pages to a contiguous range of addresses.
* @mm: Address space to map the pages into.
* @addr: Address to map the first page at.
* @ptep: Page table pointer for the first entry.
* @pte: Page table entry for the first page.
* @nr: Number of pages to map.
*
* When nr==1, initial state of pte may be present or not present, and new state
* may be present or not present. When nr>1, initial state of all ptes must be
* not present, and new state must be present.
*
* May be overridden by the architecture, or the architecture can define
* set_pte() and PFN_PTE_SHIFT.
*
* Context: The caller holds the page table lock. The pages all belong
* to the same folio. The PTEs are all in the same PMD.
*/
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
page_table_check_ptes_set(mm, ptep, pte, nr);
for (;;) {
set_pte(ptep, pte); if (--nr == 0)
break;
ptep++; pte = pte_next_pfn(pte);
}
}
#endif
#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
#endif
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
extern int pudp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
pud_t entry, int dirty);
#else
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
BUILD_BUG();
return 0;
}
static inline int pudp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
pud_t entry, int dirty)
{
BUILD_BUG();
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef ptep_get
static inline pte_t ptep_get(pte_t *ptep)
{
return READ_ONCE(*ptep);
}
#endif
#ifndef pmdp_get
static inline pmd_t pmdp_get(pmd_t *pmdp)
{
return READ_ONCE(*pmdp);
}
#endif
#ifndef pudp_get
static inline pud_t pudp_get(pud_t *pudp)
{
return READ_ONCE(*pudp);
}
#endif
#ifndef p4dp_get
static inline p4d_t p4dp_get(p4d_t *p4dp)
{
return READ_ONCE(*p4dp);
}
#endif
#ifndef pgdp_get
static inline pgd_t pgdp_get(pgd_t *pgdp)
{
return READ_ONCE(*pgdp);
}
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
int r = 1;
if (!pte_young(pte))
r = 0;
else
set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
return r;
}
#endif
#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
int r = 1;
if (!pmd_young(pmd))
r = 0;
else
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
return r;
}
#else
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
BUILD_BUG();
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
#endif
#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#else
/*
* Despite relevant to THP only, this API is called from generic rmap code
* under PageTransHuge(), hence needs a dummy implementation for !THP
*/
static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
BUILD_BUG();
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef arch_has_hw_nonleaf_pmd_young
/*
* Return whether the accessed bit in non-leaf PMD entries is supported on the
* local CPU.
*/
static inline bool arch_has_hw_nonleaf_pmd_young(void)
{
return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
}
#endif
#ifndef arch_has_hw_pte_young
/*
* Return whether the accessed bit is supported on the local CPU.
*
* This stub assumes accessing through an old PTE triggers a page fault.
* Architectures that automatically set the access bit should overwrite it.
*/
static inline bool arch_has_hw_pte_young(void)
{
return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
}
#endif
#ifndef exec_folio_order
/*
* Returns preferred minimum folio order for executable file-backed memory. Must
* be in range [0, PMD_ORDER). Default to order-0.
*/
static inline unsigned int exec_folio_order(void)
{
return 0;
}
#endif
#ifndef arch_check_zapped_pte
static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
pte_t pte)
{
}
#endif
#ifndef arch_check_zapped_pmd
static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
pmd_t pmd)
{
}
#endif
#ifndef arch_check_zapped_pud
static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
{
}
#endif
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
page_table_check_pte_clear(mm, pte);
return pte;
}
#endif
#ifndef clear_young_dirty_ptes
/**
* clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
* same folio as old/clean.
* @mm: Address space the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to mark old/clean.
* @flags: Flags to modify the PTE batch semantics.
*
* May be overridden by the architecture; otherwise, implemented by
* get_and_clear/modify/set for each pte in the range.
*
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
* some PTEs might be write-protected.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
unsigned int nr, cydp_t flags)
{
pte_t pte;
for (;;) {
if (flags == CYDP_CLEAR_YOUNG)
ptep_test_and_clear_young(vma, addr, ptep);
else {
pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
if (flags & CYDP_CLEAR_YOUNG)
pte = pte_mkold(pte);
if (flags & CYDP_CLEAR_DIRTY)
pte = pte_mkclean(pte);
set_pte_at(vma->vm_mm, addr, ptep, pte);
}
if (--nr == 0)
break;
ptep++;
addr += PAGE_SIZE;
}
}
#endif
static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
pte_clear(mm, addr, ptep);
/*
* No need for ptep_get_and_clear(): page table check doesn't care about
* any bits that could have been set by HW concurrently.
*/
page_table_check_pte_clear(mm, pte);
}
#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
/*
* For walking the pagetables without holding any locks. Some architectures
* (eg x86-32 PAE) cannot load the entries atomically without using expensive
* instructions. We are guaranteed that a PTE will only either go from not
* present to present, or present to not present -- it will not switch to a
* completely different present page without a TLB flush inbetween; which we
* are blocking by holding interrupts off.
*
* Setting ptes from not present to present goes:
*
* ptep->pte_high = h;
* smp_wmb();
* ptep->pte_low = l;
*
* And present to not present goes:
*
* ptep->pte_low = 0;
* smp_wmb();
* ptep->pte_high = 0;
*
* We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
* We load pte_high *after* loading pte_low, which ensures we don't see an older
* value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
* picked up a changed pte high. We might have gotten rubbish values from
* pte_low and pte_high, but we are guaranteed that pte_low will not have the
* present bit set *unless* it is 'l'. Because get_user_pages_fast() only
* operates on present ptes we're safe.
*/
static inline pte_t ptep_get_lockless(pte_t *ptep)
{
pte_t pte;
do {
pte.pte_low = ptep->pte_low;
smp_rmb();
pte.pte_high = ptep->pte_high;
smp_rmb();
} while (unlikely(pte.pte_low != ptep->pte_low));
return pte;
}
#define ptep_get_lockless ptep_get_lockless
#if CONFIG_PGTABLE_LEVELS > 2
static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
{
pmd_t pmd;
do {
pmd.pmd_low = pmdp->pmd_low;
smp_rmb();
pmd.pmd_high = pmdp->pmd_high;
smp_rmb();
} while (unlikely(pmd.pmd_low != pmdp->pmd_low));
return pmd;
}
#define pmdp_get_lockless pmdp_get_lockless
#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
/*
* We require that the PTE can be read atomically.
*/
#ifndef ptep_get_lockless
static inline pte_t ptep_get_lockless(pte_t *ptep)
{
return ptep_get(ptep);
}
#endif
#ifndef pmdp_get_lockless
static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
{
return pmdp_get(pmdp);
}
static inline void pmdp_get_lockless_sync(void)
{
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address,
pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
page_table_check_pmd_clear(mm, pmd);
return pmd;
}
#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address,
pud_t *pudp)
{
pud_t pud = *pudp;
pud_clear(pudp);
page_table_check_pud_clear(mm, pud);
return pud;
}
#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
int full)
{
return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
}
#endif
#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
int full)
{
return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pte_t *ptep,
int full)
{
return ptep_get_and_clear(mm, address, ptep);
}
#endif
#ifndef get_and_clear_full_ptes
/**
* get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
* the same folio, collecting dirty/accessed bits.
* @mm: Address space the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to clear.
* @full: Whether we are clearing a full mm.
*
* May be overridden by the architecture; otherwise, implemented as a simple
* loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
* returned PTE.
*
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
* some PTEs might be write-protected.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, unsigned int nr, int full)
{
pte_t pte, tmp_pte;
pte = ptep_get_and_clear_full(mm, addr, ptep, full);
while (--nr) {
ptep++;
addr += PAGE_SIZE;
tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
if (pte_young(tmp_pte))
pte = pte_mkyoung(pte);
}
return pte;
}
#endif
/**
* get_and_clear_ptes - Clear present PTEs that map consecutive pages of
* the same folio, collecting dirty/accessed bits.
* @mm: Address space the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to clear.
*
* Use this instead of get_and_clear_full_ptes() if it is known that we don't
* need to clear the full mm, which is mostly the case.
*
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
* some PTEs might be write-protected.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr)
{
return get_and_clear_full_ptes(mm, addr, ptep, nr, 0);
}
#ifndef clear_full_ptes
/**
* clear_full_ptes - Clear present PTEs that map consecutive pages of the same
* folio.
* @mm: Address space the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to clear.
* @full: Whether we are clearing a full mm.
*
* May be overridden by the architecture; otherwise, implemented as a simple
* loop over ptep_get_and_clear_full().
*
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
* some PTEs might be write-protected.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr, int full)
{
for (;;) {
ptep_get_and_clear_full(mm, addr, ptep, full);
if (--nr == 0)
break;
ptep++;
addr += PAGE_SIZE;
}
}
#endif
/**
* clear_ptes - Clear present PTEs that map consecutive pages of the same folio.
* @mm: Address space the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to clear.
*
* Use this instead of clear_full_ptes() if it is known that we don't need to
* clear the full mm, which is mostly the case.
*
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
* some PTEs might be write-protected.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
static inline void clear_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr)
{
clear_full_ptes(mm, addr, ptep, nr, 0);
}
/*
* If two threads concurrently fault at the same page, the thread that
* won the race updates the PTE and its local TLB/Cache. The other thread
* gives up, simply does nothing, and continues; on architectures where
* software can update TLB, local TLB can be updated here to avoid next page
* fault. This function updates TLB only, do nothing with cache or others.
* It is the difference with function update_mmu_cache.
*/
#ifndef update_mmu_tlb_range
static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int nr)
{
}
#endif
static inline void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
update_mmu_tlb_range(vma, address, ptep, 1);
}
/*
* Some architectures may be able to avoid expensive synchronization
* primitives when modifications are made to PTE's which are already
* not present, or in the process of an address space destruction.
*/
#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
static inline void pte_clear_not_present_full(struct mm_struct *mm,
unsigned long address,
pte_t *ptep,
int full)
{
pte_clear(mm, address, ptep);
}
#endif
#ifndef clear_not_present_full_ptes
/**
* clear_not_present_full_ptes - Clear multiple not present PTEs which are
* consecutive in the pgtable.
* @mm: Address space the ptes represent.
* @addr: Address of the first pte.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to clear.
* @full: Whether we are clearing a full mm.
*
* May be overridden by the architecture; otherwise, implemented as a simple
* loop over pte_clear_not_present_full().
*
* Context: The caller holds the page table lock. The PTEs are all not present.
* The PTEs are all in the same PMD.
*/
static inline void clear_not_present_full_ptes(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, unsigned int nr, int full)
{
for (;;) {
pte_clear_not_present_full(mm, addr, ptep, full);
if (--nr == 0)
break;
ptep++;
addr += PAGE_SIZE;
}
}
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep);
#endif
#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pud_t *pudp);
#endif
#ifndef pte_mkwrite
static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return pte_mkwrite_novma(pte);
}
#endif
#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
return pmd_mkwrite_novma(pmd);
}
#endif
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
pte_t old_pte = ptep_get(ptep);
set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
}
#endif
#ifndef wrprotect_ptes
/**
* wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
* folio.
* @mm: Address space the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to write-protect.
*
* May be overridden by the architecture; otherwise, implemented as a simple
* loop over ptep_set_wrprotect().
*
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
* some PTEs might be write-protected.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr)
{
for (;;) {
ptep_set_wrprotect(mm, addr, ptep);
if (--nr == 0)
break;
ptep++;
addr += PAGE_SIZE;
}
}
#endif
/*
* On some architectures hardware does not set page access bit when accessing
* memory page, it is responsibility of software setting this bit. It brings
* out extra page fault penalty to track page access bit. For optimization page
* access bit can be set during all page fault flow on these arches.
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
* where software maintains page access bit.
*/
#ifndef pte_sw_mkyoung
static inline pte_t pte_sw_mkyoung(pte_t pte)
{
return pte;
}
#define pte_sw_mkyoung pte_sw_mkyoung
#endif
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
pmd_t old_pmd = *pmdp;
set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
}
#else
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
BUILD_BUG();
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
pud_t old_pud = *pudp;
set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
}
#else
static inline void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
BUILD_BUG();
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#endif
#ifndef pmdp_collapse_flush
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#else
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
BUILD_BUG();
return *pmdp;
}
#define pmdp_collapse_flush pmdp_collapse_flush
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
#endif
#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
#ifndef arch_needs_pgtable_deposit
#define arch_needs_pgtable_deposit() (false)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* This is an implementation of pmdp_establish() that is only suitable for an
* architecture that doesn't have hardware dirty/accessed bits. In this case we
* can't race with CPU which sets these bits and non-atomic approach is fine.
*/
static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
pmd_t old_pmd = *pmdp;
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
return old_pmd;
}
#endif
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
/*
* pmdp_invalidate_ad() invalidates the PMD while changing a transparent
* hugepage mapping in the page tables. This function is similar to
* pmdp_invalidate(), but should only be used if the access and dirty bits would
* not be cleared by the software in the new PMD value. The function ensures
* that hardware changes of the access and dirty bits updates would not be lost.
*
* Doing so can allow in certain architectures to avoid a TLB flush in most
* cases. Yet, another TLB flush might be necessary later if the PMD update
* itself requires such flush (e.g., if protection was set to be stricter). Yet,
* even when a TLB flush is needed because of the update, the caller may be able
* to batch these TLB flushing operations, so fewer TLB flush operations are
* needed.
*/
extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
return pte_val(pte_a) == pte_val(pte_b);
}
#endif
#ifndef __HAVE_ARCH_PTE_UNUSED
/*
* Some architectures provide facilities to virtualization guests
* so that they can flag allocated pages as unused. This allows the
* host to transparently reclaim unused pages. This function returns
* whether the pte's page is unused.
*/
static inline int pte_unused(pte_t pte)
{
return 0;
}
#endif
#ifndef pte_access_permitted
#define pte_access_permitted(pte, write) \
(pte_present(pte) && (!(write) || pte_write(pte)))
#endif
#ifndef pmd_access_permitted
#define pmd_access_permitted(pmd, write) \
(pmd_present(pmd) && (!(write) || pmd_write(pmd)))
#endif
#ifndef pud_access_permitted
#define pud_access_permitted(pud, write) \
(pud_present(pud) && (!(write) || pud_write(pud)))
#endif
#ifndef p4d_access_permitted
#define p4d_access_permitted(p4d, write) \
(p4d_present(p4d) && (!(write) || p4d_write(p4d)))
#endif
#ifndef pgd_access_permitted
#define pgd_access_permitted(pgd, write) \
(pgd_present(pgd) && (!(write) || pgd_write(pgd)))
#endif
#ifndef __HAVE_ARCH_PMD_SAME
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
}
#endif
#ifndef pud_same
static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
#define pud_same pud_same
#endif
#ifndef __HAVE_ARCH_P4D_SAME
static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{
return p4d_val(p4d_a) == p4d_val(p4d_b);
}
#endif
#ifndef __HAVE_ARCH_PGD_SAME
static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{
return pgd_val(pgd_a) == pgd_val(pgd_b);
}
#endif
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
static inline void arch_do_swap_page_nr(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
pte_t pte, pte_t oldpte,
int nr)
{
}
#else
/*
* Some architectures support metadata associated with a page. When a
* page is being swapped out, this metadata must be saved so it can be
* restored when the page is swapped back in. SPARC M7 and newer
* processors support an ADI (Application Data Integrity) tag for the
* page as metadata for the page. arch_do_swap_page() can restore this
* metadata when a page is swapped back in.
*/
static inline void arch_do_swap_page_nr(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
pte_t pte, pte_t oldpte,
int nr)
{
for (int i = 0; i < nr; i++) {
arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
pte_advance_pfn(pte, i),
pte_advance_pfn(oldpte, i));
}
}
#endif
#ifndef __HAVE_ARCH_UNMAP_ONE
/*
* Some architectures support metadata associated with a page. When a
* page is being swapped out, this metadata must be saved so it can be
* restored when the page is swapped back in. SPARC M7 and newer
* processors support an ADI (Application Data Integrity) tag for the
* page as metadata for the page. arch_unmap_one() can save this
* metadata on a swap-out of a page.
*/
static inline int arch_unmap_one(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
pte_t orig_pte)
{
return 0;
}
#endif
/*
* Allow architectures to preserve additional metadata associated with
* swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
* prototypes must be defined in the arch-specific asm/pgtable.h file.
*/
#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
static inline int arch_prepare_to_swap(struct folio *folio)
{
return 0;
}
#endif
#ifndef __HAVE_ARCH_SWAP_INVALIDATE
static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
{
}
static inline void arch_swap_invalidate_area(int type)
{
}
#endif
#ifndef __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
}
#endif
#ifndef __HAVE_ARCH_MOVE_PTE
#define move_pte(pte, old_addr, new_addr) (pte)
#endif
#ifndef pte_accessible
# define pte_accessible(mm, pte) ((void)(pte), 1)
#endif
#ifndef flush_tlb_fix_spurious_fault
#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
#endif
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
* vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
*/
#define pgd_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#ifndef p4d_addr_end
#define p4d_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#endif
#ifndef pud_addr_end
#define pud_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#endif
#ifndef pmd_addr_end
#define pmd_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#endif
/*
* When walking page tables, we usually want to skip any p?d_none entries;
* and any p?d_bad entries - reporting the error before resetting to none.
* Do the tests inline, but report and clear the bad entry in mm/memory.c.
*/
void pgd_clear_bad(pgd_t *);
#ifndef __PAGETABLE_P4D_FOLDED
void p4d_clear_bad(p4d_t *);
#else
#define p4d_clear_bad(p4d) do { } while (0)
#endif
#ifndef __PAGETABLE_PUD_FOLDED
void pud_clear_bad(pud_t *);
#else
#define pud_clear_bad(p4d) do { } while (0)
#endif
void pmd_clear_bad(pmd_t *);
static inline int pgd_none_or_clear_bad(pgd_t *pgd)
{
if (pgd_none(*pgd))
return 1;
if (unlikely(pgd_bad(*pgd))) {
pgd_clear_bad(pgd);
return 1;
}
return 0;
}
static inline int p4d_none_or_clear_bad(p4d_t *p4d)
{
if (p4d_none(*p4d))
return 1;
if (unlikely(p4d_bad(*p4d))) {
p4d_clear_bad(p4d);
return 1;
}
return 0;
}
static inline int pud_none_or_clear_bad(pud_t *pud)
{
if (pud_none(*pud))
return 1;
if (unlikely(pud_bad(*pud))) {
pud_clear_bad(pud);
return 1;
}
return 0;
}
static inline int pmd_none_or_clear_bad(pmd_t *pmd)
{
if (pmd_none(*pmd))
return 1;
if (unlikely(pmd_bad(*pmd))) {
pmd_clear_bad(pmd);
return 1;
}
return 0;
}
static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{
/*
* Get the current pte state, but zero it out to make it
* non-present, preventing the hardware from asynchronously
* updating it.
*/
return ptep_get_and_clear(vma->vm_mm, addr, ptep);
}
static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
/*
* The pte is non-present, so there's no hardware state to
* preserve.
*/
set_pte_at(vma->vm_mm, addr, ptep, pte);
}
#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
/*
* Start a pte protection read-modify-write transaction, which
* protects against asynchronous hardware modifications to the pte.
* The intention is not to prevent the hardware from making pte
* updates, but to prevent any updates it may make from being lost.
*
* This does not protect against other software modifications of the
* pte; the appropriate pte lock must be held over the transaction.
*
* Note that this interface is intended to be batchable, meaning that
* ptep_modify_prot_commit may not actually update the pte, but merely
* queue the update to be done at some later time. The update must be
* actually committed before the pte lock is released, however.
*/
static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{
return __ptep_modify_prot_start(vma, addr, ptep);
}
/*
* Commit an update to a pte, leaving any hardware-controlled bits in
* the PTE unmodified. The pte returned from ptep_modify_prot_start() may
* additionally have young and/or dirty bits set where previously they were not,
* so the updated pte may have these additional changes.
*/
static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t old_pte, pte_t pte)
{
__ptep_modify_prot_commit(vma, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
/**
* modify_prot_start_ptes - Start a pte protection read-modify-write transaction
* over a batch of ptes, which protects against asynchronous hardware
* modifications to the ptes. The intention is not to prevent the hardware from
* making pte updates, but to prevent any updates it may make from being lost.
* Please see the comment above ptep_modify_prot_start() for full description.
*
* @vma: The virtual memory area the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries.
*
* May be overridden by the architecture; otherwise, implemented as a simple
* loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
* in the batch.
*
* Note that PTE bits in the PTE batch besides the PFN can differ.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. All other PTE bits must be identical for
* all PTEs in the batch except for young and dirty bits. The PTEs are all in
* the same PMD.
*/
#ifndef modify_prot_start_ptes
static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned int nr)
{
pte_t pte, tmp_pte;
pte = ptep_modify_prot_start(vma, addr, ptep);
while (--nr) {
ptep++;
addr += PAGE_SIZE;
tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
if (pte_young(tmp_pte))
pte = pte_mkyoung(pte);
}
return pte;
}
#endif
/**
* modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
* hardware-controlled bits in the PTE unmodified.
*
* @vma: The virtual memory area the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @old_pte: Old page table entry (for the first entry) which is now cleared.
* @pte: New page table entry to be set.
* @nr: Number of entries.
*
* May be overridden by the architecture; otherwise, implemented as a simple
* loop over ptep_modify_prot_commit().
*
* Context: The caller holds the page table lock. The PTEs are all in the same
* PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
* ptep_modify_prot_start() may additionally have young and/or dirty bits set
* where previously they were not, so the updated ptes may have these
* additional changes.
*/
#ifndef modify_prot_commit_ptes
static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
{
int i;
for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
/* Advance PFN only, set same prot */
old_pte = pte_next_pfn(old_pte);
pte = pte_next_pfn(pte);
}
}
#endif
/*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
* and let generic vmalloc, ioremap and page table update code know when
* arch_sync_kernel_mappings() needs to be called.
*/
#ifndef ARCH_PAGE_TABLE_SYNC_MASK
#define ARCH_PAGE_TABLE_SYNC_MASK 0
#endif
/*
* There is no default implementation for arch_sync_kernel_mappings(). It is
* relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
* is 0.
*/
void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
#endif /* CONFIG_MMU */
/*
* No-op macros that just return the current protection value. Defined here
* because these macros can be used even if CONFIG_MMU is not defined.
*/
#ifndef pgprot_nx
#define pgprot_nx(prot) (prot)
#endif
#ifndef pgprot_noncached
#define pgprot_noncached(prot) (prot)
#endif
#ifndef pgprot_writecombine
#define pgprot_writecombine pgprot_noncached
#endif
#ifndef pgprot_writethrough
#define pgprot_writethrough pgprot_noncached
#endif
#ifndef pgprot_device
#define pgprot_device pgprot_noncached
#endif
#ifndef pgprot_mhp
#define pgprot_mhp(prot) (prot)
#endif
#ifdef CONFIG_MMU
#ifndef pgprot_modify
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
newprot = pgprot_noncached(newprot);
if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
newprot = pgprot_writecombine(newprot);
if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
newprot = pgprot_device(newprot);
return newprot;
}
#endif
#endif /* CONFIG_MMU */
#ifndef pgprot_encrypted
#define pgprot_encrypted(prot) (prot)
#endif
#ifndef pgprot_decrypted
#define pgprot_decrypted(prot) (prot)
#endif
/*
* A facility to provide batching of the reload of page tables and
* other process state with the actual context switch code for
* paravirtualized guests. By convention, only one of the batched
* update (lazy) modes (CPU, MMU) should be active at any given time,
* entry should never be nested, and entry and exits should always be
* paired. This is for sanity of maintaining and reasoning about the
* kernel code. In this case, the exit (end of the context switch) is
* in architecture-specific code, and so doesn't need a generic
* definition.
*/
#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
#define arch_start_context_switch(prev) do {} while (0)
#endif
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
#endif
#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
static inline int pte_soft_dirty(pte_t pte)
{
return 0;
}
static inline int pmd_soft_dirty(pmd_t pmd)
{
return 0;
}
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
return pte;
}
static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
return pte;
}
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return pte;
}
static inline int pte_swp_soft_dirty(pte_t pte)
{
return 0;
}
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte;
}
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
#endif
#ifndef __HAVE_PFNMAP_TRACKING
/*
* Interfaces that can be used by architecture code to keep track of
* memory type of pfn mappings specified by the remap_pfn_range,
* vmf_insert_pfn.
*/
static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
pgprot_t *prot)
{
return 0;
}
static inline int pfnmap_track(unsigned long pfn, unsigned long size,
pgprot_t *prot)
{
return 0;
}
static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
{
}
#else
/**
* pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
* @pfn: the start of the pfn range
* @size: the size of the pfn range in bytes
* @prot: the pgprot to modify
*
* Lookup the cachemode for the pfn range starting at @pfn with the size
* @size and store it in @prot, leaving other data in @prot unchanged.
*
* This allows for a hardware implementation to have fine-grained control of
* memory cache behavior at page level granularity. Without a hardware
* implementation, this function does nothing.
*
* Currently there is only one implementation for this - x86 Page Attribute
* Table (PAT). See Documentation/arch/x86/pat.rst for more details.
*
* This function can fail if the pfn range spans pfns that require differing
* cachemodes. If the pfn range was previously verified to have a single
* cachemode, it is sufficient to query only a single pfn. The assumption is
* that this is the case for drivers using the vmf_insert_pfn*() interface.
*
* Returns 0 on success and -EINVAL on error.
*/
int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
pgprot_t *prot);
/**
* pfnmap_track - track a pfn range
* @pfn: the start of the pfn range
* @size: the size of the pfn range in bytes
* @prot: the pgprot to track
*
* Requested the pfn range to be 'tracked' by a hardware implementation and
* setup the cachemode in @prot similar to pfnmap_setup_cachemode().
*
* This allows for fine-grained control of memory cache behaviour at page
* level granularity. Tracking memory this way is persisted across VMA splits
* (VMA merging does not apply for VM_PFNMAP).
*
* Currently, there is only one implementation for this - x86 Page Attribute
* Table (PAT). See Documentation/arch/x86/pat.rst for more details.
*
* Returns 0 on success and -EINVAL on error.
*/
int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
/**
* pfnmap_untrack - untrack a pfn range
* @pfn: the start of the pfn range
* @size: the size of the pfn range in bytes
*
* Untrack a pfn range previously tracked through pfnmap_track().
*/
void pfnmap_untrack(unsigned long pfn, unsigned long size);
#endif
/**
* pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn
* @pfn: the pfn
* @prot: the pgprot to modify
*
* Lookup the cachemode for @pfn and store it in @prot, leaving other
* data in @prot unchanged.
*
* See pfnmap_setup_cachemode() for details.
*/
static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
{
pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
}
#ifdef CONFIG_MMU
#ifdef __HAVE_COLOR_ZERO_PAGE
static inline int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
unsigned long offset_from_zero_pfn = pfn - zero_pfn;
return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}
#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
#else
static inline int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
return pfn == zero_pfn;
}
static inline unsigned long my_zero_pfn(unsigned long addr)
{
extern unsigned long zero_pfn;
return zero_pfn;
}
#endif
#else
static inline int is_zero_pfn(unsigned long pfn)
{
return 0;
}
static inline unsigned long my_zero_pfn(unsigned long addr)
{
return 0;
}
#endif /* CONFIG_MMU */
#ifdef CONFIG_MMU
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
return 0;
}
#ifndef pmd_write
static inline int pmd_write(pmd_t pmd)
{
BUG();
return 0;
}
#endif /* pmd_write */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifndef pud_write
static inline int pud_write(pud_t pud)
{
BUG();
return 0;
}
#endif /* pud_write */
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
{
return 0;
}
#endif
static inline int pud_trans_unstable(pud_t *pud)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudval = READ_ONCE(*pud);
if (pud_none(pudval) || pud_trans_huge(pudval))
return 1;
if (unlikely(pud_bad(pudval))) {
pud_clear_bad(pud);
return 1;
}
#endif
return 0;
}
#ifndef CONFIG_NUMA_BALANCING
/*
* In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
* perfectly valid to indicate "no" in that case, which is why our default
* implementation defaults to "always no".
*
* In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
* page protection due to NUMA hinting. NUMA hinting faults only apply in
* accessible VMAs.
*
* So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
* looking at the VMA accessibility is sufficient.
*/
static inline int pte_protnone(pte_t pte)
{
return 0;
}
static inline int pmd_protnone(pmd_t pmd)
{
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_MMU */
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
#ifndef __PAGETABLE_P4D_FOLDED
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
void p4d_clear_huge(p4d_t *p4d);
#else
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline void p4d_clear_huge(p4d_t *p4d) { }
#endif /* !__PAGETABLE_P4D_FOLDED */
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
int pud_free_pmd_page(pud_t *pud, unsigned long addr);
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline void p4d_clear_huge(p4d_t *p4d) { }
static inline int pud_clear_huge(pud_t *pud)
{
return 0;
}
static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
{
return 0;
}
static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this. Otherwise also, it can help optimize normal TLB flush in
* THP regime. Stock flush_tlb_range() typically has optimization to nuke the
* entire TLB if flush span is greater than a threshold, which will
* likely be true for a single huge page. Thus a single THP flush will
* invalidate the entire TLB which is not desirable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#else
#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
#endif
#endif
struct file;
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot);
#ifndef CONFIG_X86_ESPFIX64
static inline void init_espfix_bsp(void) { }
#endif
extern void __init pgtable_cache_init(void);
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
{
return true;
}
static inline bool arch_has_pfn_modify_check(void)
{
return false;
}
#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
/*
* Architecture PAGE_KERNEL_* fallbacks
*
* Some architectures don't define certain PAGE_KERNEL_* flags. This is either
* because they really don't support them, or the port needs to be updated to
* reflect the required functionality. Below are a set of relatively safe
* fallbacks, as best effort, which we can count on in lieu of the architectures
* not defining them on their own yet.
*/
#ifndef PAGE_KERNEL_RO
# define PAGE_KERNEL_RO PAGE_KERNEL
#endif
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif
/*
* Page Table Modification bits for pgtbl_mod_mask.
*
* These are used by the p?d_alloc_track*() and p*d_populate_kernel()
* functions in the generic vmalloc, ioremap and page table update code
* to track at which page-table levels entries have been modified.
* Based on that the code can better decide when page table changes need
* to be synchronized to other page-tables in the system.
*/
#define __PGTBL_PGD_MODIFIED 0
#define __PGTBL_P4D_MODIFIED 1
#define __PGTBL_PUD_MODIFIED 2
#define __PGTBL_PMD_MODIFIED 3
#define __PGTBL_PTE_MODIFIED 4
#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
/* Page-Table Modification Mask */
typedef unsigned int pgtbl_mod_mask;
enum pgtable_level {
PGTABLE_LEVEL_PTE = 0,
PGTABLE_LEVEL_PMD,
PGTABLE_LEVEL_PUD,
PGTABLE_LEVEL_P4D,
PGTABLE_LEVEL_PGD,
};
static inline const char *pgtable_level_to_str(enum pgtable_level level)
{
switch (level) {
case PGTABLE_LEVEL_PTE:
return "pte";
case PGTABLE_LEVEL_PMD:
return "pmd";
case PGTABLE_LEVEL_PUD:
return "pud";
case PGTABLE_LEVEL_P4D:
return "p4d";
case PGTABLE_LEVEL_PGD:
return "pgd";
default:
return "unknown";
}
}
#endif /* !__ASSEMBLY__ */
#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
#ifdef CONFIG_PHYS_ADDR_T_64BIT
/*
* ZSMALLOC needs to know the highest PFN on 32-bit architectures
* with physical address space extension, but falls back to
* BITS_PER_LONG otherwise.
*/
#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
#else
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
#endif
#ifndef has_transparent_hugepage
#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
#endif
#ifndef has_transparent_pud_hugepage
#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
#endif
/*
* On some architectures it depends on the mm if the p4d/pud or pmd
* layer of the page table hierarchy is folded or not.
*/
#ifndef mm_p4d_folded
#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
#endif
#ifndef mm_pud_folded
#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
#endif
#ifndef mm_pmd_folded
#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
#endif
#ifndef p4d_offset_lockless
#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
#endif
#ifndef pud_offset_lockless
#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
#endif
#ifndef pmd_offset_lockless
#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
#endif
/*
* pXd_leaf() is the API to check whether a pgtable entry is a huge page
* mapping. It should work globally across all archs, without any
* dependency on CONFIG_* options. For architectures that do not support
* huge mappings on specific levels, below fallbacks will be used.
*
* A leaf pgtable entry should always imply the following:
*
* - It is a "present" entry. IOW, before using this API, please check it
* with pXd_present() first. NOTE: it may not always mean the "present
* bit" is set. For example, PROT_NONE entries are always "present".
*
* - It should _never_ be a swap entry of any type. Above "present" check
* should have guarded this, but let's be crystal clear on this.
*
* - It should contain a huge PFN, which points to a huge page larger than
* PAGE_SIZE of the platform. The PFN format isn't important here.
*
* - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
* or hugetlb mappings).
*/
#ifndef pgd_leaf
#define pgd_leaf(x) false
#endif
#ifndef p4d_leaf
#define p4d_leaf(x) false
#endif
#ifndef pud_leaf
#define pud_leaf(x) false
#endif
#ifndef pmd_leaf
#define pmd_leaf(x) false
#endif
#ifndef pgd_leaf_size
#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
#endif
#ifndef p4d_leaf_size
#define p4d_leaf_size(x) P4D_SIZE
#endif
#ifndef pud_leaf_size
#define pud_leaf_size(x) PUD_SIZE
#endif
#ifndef pmd_leaf_size
#define pmd_leaf_size(x) PMD_SIZE
#endif
#ifndef __pte_leaf_size
#ifndef pte_leaf_size
#define pte_leaf_size(x) PAGE_SIZE
#endif
#define __pte_leaf_size(x,y) pte_leaf_size(y)
#endif
/*
* We always define pmd_pfn for all archs as it's used in lots of generic
* code. Now it happens too for pud_pfn (and can happen for larger
* mappings too in the future; we're not there yet). Instead of defining
* it for all archs (like pmd_pfn), provide a fallback.
*
* Note that returning 0 here means any arch that didn't define this can
* get severely wrong when it hits a real pud leaf. It's arch's
* responsibility to properly define it when a huge pud is possible.
*/
#ifndef pud_pfn
#define pud_pfn(x) 0
#endif
/*
* Some architectures have MMUs that are configurable or selectable at boot
* time. These lead to variable PTRS_PER_x. For statically allocated arrays it
* helps to have a static maximum value.
*/
#ifndef MAX_PTRS_PER_PTE
#define MAX_PTRS_PER_PTE PTRS_PER_PTE
#endif
#ifndef MAX_PTRS_PER_PMD
#define MAX_PTRS_PER_PMD PTRS_PER_PMD
#endif
#ifndef MAX_PTRS_PER_PUD
#define MAX_PTRS_PER_PUD PTRS_PER_PUD
#endif
#ifndef MAX_PTRS_PER_P4D
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
#endif
#ifndef pte_pgprot
#define pte_pgprot(x) ((pgprot_t) {0})
#endif
#ifndef pmd_pgprot
#define pmd_pgprot(x) ((pgprot_t) {0})
#endif
#ifndef pud_pgprot
#define pud_pgprot(x) ((pgprot_t) {0})
#endif
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
*
* map_type prot
* PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
* MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (yes) yes w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
* On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
* MAP_PRIVATE (with Enhanced PAN supported):
* r: (no) no
* w: (no) no
* x: (yes) yes
*/
#define DECLARE_VM_GET_PAGE_PROT \
pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \
{ \
return protection_map[vm_flags & \
(VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
} \
EXPORT_SYMBOL(vm_get_page_prot);
#endif /* _LINUX_PGTABLE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/cgroup-defs.h - basic definitions for cgroup
*
* This file provides basic type and interface. Include this file directly
* only if necessary to avoid cyclic dependencies.
*/
#ifndef _LINUX_CGROUP_DEFS_H
#define _LINUX_CGROUP_DEFS_H
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
#include <linux/bpf-cgroup-defs.h>
#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
struct cgroup;
struct cgroup_root;
struct cgroup_subsys;
struct cgroup_taskset;
struct kernfs_node;
struct kernfs_ops;
struct kernfs_open_file;
struct seq_file;
struct poll_table_struct;
#define MAX_CGROUP_TYPE_NAMELEN 32
#define MAX_CGROUP_ROOT_NAMELEN 64
#define MAX_CFTYPE_NAME 64
/* define the enumeration of all cgroup subsystems */
#define SUBSYS(_x) _x ## _cgrp_id,
enum cgroup_subsys_id {
#include <linux/cgroup_subsys.h>
CGROUP_SUBSYS_COUNT,
};
#undef SUBSYS
/* bits in struct cgroup_subsys_state flags field */
enum {
CSS_NO_REF = (1 << 0), /* no reference counting for this css */
CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
CSS_VISIBLE = (1 << 3), /* css is visible to userland */
CSS_DYING = (1 << 4), /* css is dying */
};
/* bits in struct cgroup flags field */
enum {
/* Control Group requires release notifications to userspace */
CGRP_NOTIFY_ON_RELEASE,
/*
* Clone the parent's configuration when creating a new child
* cpuset cgroup. For historical reasons, this option can be
* specified at mount time and thus is implemented here.
*/
CGRP_CPUSET_CLONE_CHILDREN,
/* Control group has to be frozen. */
CGRP_FREEZE,
/* Cgroup is frozen. */
CGRP_FROZEN,
};
/* cgroup_root->flags */
enum {
CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
/*
* Consider namespaces as delegation boundaries. If this flag is
* set, controller specific interface files in a namespace root
* aren't writeable from inside the namespace.
*/
CGRP_ROOT_NS_DELEGATE = (1 << 3),
/*
* Reduce latencies on dynamic cgroup modifications such as task
* migrations and controller on/offs by disabling percpu operation on
* cgroup_threadgroup_rwsem. This makes hot path operations such as
* forks and exits into the slow path and more expensive.
*
* Alleviate the contention between fork, exec, exit operations and
* writing to cgroup.procs by taking a per threadgroup rwsem instead of
* the global cgroup_threadgroup_rwsem. Fork and other operations
* from threads in different thread groups no longer contend with
* writing to cgroup.procs.
*
* The static usage pattern of creating a cgroup, enabling controllers,
* and then seeding it with CLONE_INTO_CGROUP doesn't require write
* locking cgroup_threadgroup_rwsem and thus doesn't benefit from
* favordynmod.
*/
CGRP_ROOT_FAVOR_DYNMODS = (1 << 4),
/*
* Enable cpuset controller in v1 cgroup to use v2 behavior.
*/
CGRP_ROOT_CPUSET_V2_MODE = (1 << 16),
/*
* Enable legacy local memory.events.
*/
CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17),
/*
* Enable recursive subtree protection
*/
CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
/*
* Enable hugetlb accounting for the memory controller.
*/
CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
/*
* Enable legacy local pids.events.
*/
CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20),
};
/* cftype->flags */
enum {
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
__CFTYPE_ADDED = (1 << 18),
};
enum cgroup_attach_lock_mode {
/* Default */
CGRP_ATTACH_LOCK_GLOBAL,
/* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */
CGRP_ATTACH_LOCK_NONE,
/* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */
CGRP_ATTACH_LOCK_PER_THREADGROUP,
};
/*
* cgroup_file is the handle for a file instance created in a cgroup which
* is used, for example, to generate file changed notifications. This can
* be obtained by setting cftype->file_offset.
*/
struct cgroup_file {
/* do not access any fields from outside cgroup core */
struct kernfs_node *kn;
unsigned long notified_at;
struct timer_list notify_timer;
};
/*
* Per-subsystem/per-cgroup state maintained by the system. This is the
* fundamental structural building block that controllers deal with.
*
* Fields marked with "PI:" are public and immutable and may be accessed
* directly without synchronization.
*/
struct cgroup_subsys_state {
/* PI: the cgroup that this css is attached to */
struct cgroup *cgroup;
/* PI: the cgroup subsystem that this css is attached to */
struct cgroup_subsys *ss;
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
/*
* Depending on the context, this field is initialized
* via css_rstat_init() at different places:
*
* when css is associated with cgroup::self
* when css->cgroup is the root cgroup
* performed in cgroup_init()
* when css->cgroup is not the root cgroup
* performed in cgroup_create()
* when css is associated with a subsystem
* when css->cgroup is the root cgroup
* performed in cgroup_init_subsys() in the non-early path
* when css->cgroup is not the root cgroup
* performed in css_create()
*/
struct css_rstat_cpu __percpu *rstat_cpu;
/*
* siblings list anchored at the parent's ->children
*
* linkage is protected by cgroup_mutex or RCU
*/
struct list_head sibling;
struct list_head children;
/*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id().
*/
int id;
unsigned int flags;
/*
* Monotonically increasing unique serial number which defines a
* uniform order among all csses. It's guaranteed that all
* ->children lists are in the ascending order of ->serial_nr and
* used to allow interrupting and resuming iterations.
*/
u64 serial_nr;
/*
* Incremented by online self and children. Used to guarantee that
* parents are not offlined before their children.
*/
atomic_t online_cnt;
/* percpu_ref killing and RCU release */
struct work_struct destroy_work;
struct rcu_work destroy_rwork;
/*
* PI: the parent css. Placed here for cache proximity to following
* fields of the containing structure.
*/
struct cgroup_subsys_state *parent;
/*
* Keep track of total numbers of visible descendant CSSes.
* The total number of dying CSSes is tracked in
* css->cgroup->nr_dying_subsys[ssid].
* Protected by cgroup_mutex.
*/
int nr_descendants;
/*
* A singly-linked list of css structures to be rstat flushed.
* This is a scratch field to be used exclusively by
* css_rstat_flush().
*
* Protected by rstat_base_lock when css is cgroup::self.
* Protected by css->ss->rstat_ss_lock otherwise.
*/
struct cgroup_subsys_state *rstat_flush_next;
};
/*
* A css_set is a structure holding pointers to a set of
* cgroup_subsys_state objects. This saves space in the task struct
* object and speeds up fork()/exit(), since a single inc/dec and a
* list_add()/del() can bump the reference count on the entire cgroup
* set for a task.
*/
struct css_set {
/*
* Set of subsystem states, one for each subsystem. This array is
* immutable after creation apart from the init_css_set during
* subsystem registration (at boot time).
*/
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
/* reference count */
refcount_t refcount;
/*
* For a domain cgroup, the following points to self. If threaded,
* to the matching cset of the nearest domain ancestor. The
* dom_cset provides access to the domain cgroup and its csses to
* which domain level resource consumptions should be charged.
*/
struct css_set *dom_cset;
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
/* internal task count, protected by css_set_lock */
int nr_tasks;
/*
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
* css_set_lock, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/
struct list_head tasks;
struct list_head mg_tasks;
struct list_head dying_tasks;
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
/*
* On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
* iterate through all css's attached to a given cgroup.
*/
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
/* all threaded csets whose ->dom_cset points to this cset */
struct list_head threaded_csets;
struct list_head threaded_csets_node;
/*
* List running through all cgroup groups in the same hash
* slot. Protected by css_set_lock
*/
struct hlist_node hlist;
/*
* List of cgrp_cset_links pointing at cgroups referenced from this
* css_set. Protected by css_set_lock.
*/
struct list_head cgrp_links;
/*
* List of csets participating in the on-going migration either as
* source or destination. Protected by cgroup_mutex.
*/
struct list_head mg_src_preload_node;
struct list_head mg_dst_preload_node;
struct list_head mg_node;
/*
* If this cset is acting as the source of migration the following
* two fields are set. mg_src_cgrp and mg_dst_cgrp are
* respectively the source and destination cgroups of the on-going
* migration. mg_dst_cset is the destination cset the target tasks
* on this cset should be migrated to. Protected by cgroup_mutex.
*/
struct cgroup *mg_src_cgrp;
struct cgroup *mg_dst_cgrp;
struct css_set *mg_dst_cset;
/* dead and being drained, ignore for migration */
bool dead;
/* For RCU-protected deletion */
struct rcu_head rcu_head;
};
struct cgroup_base_stat {
struct task_cputime cputime;
#ifdef CONFIG_SCHED_CORE
u64 forceidle_sum;
#endif
u64 ntime;
};
/*
* rstat - cgroup scalable recursive statistics. Accounting is done
* per-cpu in css_rstat_cpu which is then lazily propagated up the
* hierarchy on reads.
*
* When a stat gets updated, the css_rstat_cpu and its ancestors are
* linked into the updated tree. On the following read, propagation only
* considers and consumes the updated tree. This makes reading O(the
* number of descendants which have been active since last read) instead of
* O(the total number of descendants).
*
* This is important because there can be a lot of (draining) cgroups which
* aren't active and stat may be read frequently. The combination can
* become very expensive. By propagating selectively, increasing reading
* frequency decreases the cost of each read.
*
* This struct hosts both the fields which implement the above -
* updated_children and updated_next.
*/
struct css_rstat_cpu {
/*
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next. updated_children is terminated by its container css.
*/
struct cgroup_subsys_state *updated_children;
struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
struct llist_node lnode; /* lockless list for update */
struct cgroup_subsys_state *owner; /* back pointer */
};
/*
* This struct hosts the fields which track basic resource statistics on
* top of it - bsync, bstat and last_bstat.
*/
struct cgroup_rstat_base_cpu {
/*
* ->bsync protects ->bstat. These are the only fields which get
* updated in the hot path.
*/
struct u64_stats_sync bsync;
struct cgroup_base_stat bstat;
/*
* Snapshots at the last reading. These are used to calculate the
* deltas to propagate to the global counters.
*/
struct cgroup_base_stat last_bstat;
/*
* This field is used to record the cumulative per-cpu time of
* the cgroup and its descendants. Currently it can be read via
* eBPF/drgn etc, and we are still trying to determine how to
* expose it in the cgroupfs interface.
*/
struct cgroup_base_stat subtree_bstat;
/*
* Snapshots at the last reading. These are used to calculate the
* deltas to propagate to the per-cpu subtree_bstat.
*/
struct cgroup_base_stat last_subtree_bstat;
};
struct cgroup_freezer_state {
/* Should the cgroup and its descendants be frozen. */
bool freeze;
/* Should the cgroup actually be frozen? */
bool e_freeze;
/* Fields below are protected by css_set_lock */
/* Number of frozen descendant cgroups */
int nr_frozen_descendants;
/*
* Number of tasks, which are counted as frozen:
* frozen, SIGSTOPped, and PTRACEd.
*/
int nr_frozen_tasks;
/* Freeze time data consistency protection */
seqcount_spinlock_t freeze_seq;
/*
* Most recent time the cgroup was requested to freeze.
* Accesses guarded by freeze_seq counter. Writes serialized
* by css_set_lock.
*/
u64 freeze_start_nsec;
/*
* Total duration the cgroup has spent freezing.
* Accesses guarded by freeze_seq counter. Writes serialized
* by css_set_lock.
*/
u64 frozen_nsec;
};
struct cgroup {
/* self css with NULL ->ss, points back to this cgroup */
struct cgroup_subsys_state self;
unsigned long flags; /* "unsigned long" so bitops work */
/*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
* ancestors[] can determine whether a given cgroup is a
* descendant of another without traversing the hierarchy.
*/
int level;
/* Maximum allowed descent tree depth */
int max_depth;
/*
* Keep track of total numbers of visible and dying descent cgroups.
* Dying cgroups are cgroups which were deleted by a user,
* but are still existing because someone else is holding a reference.
* max_descendants is a maximum allowed number of descent cgroups.
*
* nr_descendants and nr_dying_descendants are protected
* by cgroup_mutex and css_set_lock. It's fine to read them holding
* any of cgroup_mutex and css_set_lock; for writing both locks
* should be held.
*/
int nr_descendants;
int nr_dying_descendants;
int max_descendants;
/*
* Each non-empty css_set associated with this cgroup contributes
* one to nr_populated_csets. The counter is zero iff this cgroup
* doesn't have any tasks.
*
* All children which have non-zero nr_populated_csets and/or
* nr_populated_children of their own contribute one to either
* nr_populated_domain_children or nr_populated_threaded_children
* depending on their type. Each counter is zero iff all cgroups
* of the type in the subtree proper don't have any tasks.
*/
int nr_populated_csets;
int nr_populated_domain_children;
int nr_populated_threaded_children;
int nr_threaded_children; /* # of live threaded child cgroups */
/* sequence number for cgroup.kill, serialized by css_set_lock. */
unsigned int kill_seq;
struct kernfs_node *kn; /* cgroup kernfs entry */
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
/* handles for "{cpu,memory,io,irq}.pressure" */
struct cgroup_file psi_files[NR_PSI_RESOURCES];
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
* "cgroup.subtree_control" while ->subtree_ss_mask is the effective
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
u16 subtree_control;
u16 subtree_ss_mask;
u16 old_subtree_control;
u16 old_subtree_ss_mask;
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
/*
* Keep track of total number of dying CSSes at and below this cgroup.
* Protected by cgroup_mutex.
*/
int nr_dying_subsys[CGROUP_SUBSYS_COUNT];
struct cgroup_root *root;
/*
* List of cgrp_cset_links pointing at css_sets with tasks in this
* cgroup. Protected by css_set_lock.
*/
struct list_head cset_links;
/*
* On the default hierarchy, a css_set for a cgroup with some
* susbsys disabled will point to css's which are associated with
* the closest ancestor which has the subsys enabled. The
* following lists all css_sets which point to this cgroup's css
* for the given subsystem.
*/
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
/*
* If !threaded, self. If threaded, it points to the nearest
* domain ancestor. Inside a threaded subtree, cgroups are exempt
* from process granularity and no-internal-task constraint.
* Domain level resource consumptions which aren't tied to a
* specific task are charged to the dom_cgrp.
*/
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp; /* used while enabling threaded */
/*
* Depending on the context, this field is initialized via
* css_rstat_init() at different places:
*
* when cgroup is the root cgroup
* performed in cgroup_setup_root()
* otherwise
* performed in cgroup_create()
*/
struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu;
/*
* Add padding to keep the read mostly rstat per-cpu pointer on a
* different cacheline than the following *bstat fields which can have
* frequent updates.
*/
CACHELINE_PADDING(_pad_);
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
struct prev_cputime prev_cputime; /* for printing out cputime */
/*
* list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand.
*/
struct list_head pidlists;
struct mutex pidlist_mutex;
/* used to wait for offlining of csses */
wait_queue_head_t offline_waitq;
/* used to schedule release agent */
struct work_struct release_agent_work;
/* used to track pressure stalls */
struct psi_group *psi;
/* used to store eBPF programs */
struct cgroup_bpf bpf;
/* Used to store internal freezer state */
struct cgroup_freezer_state freezer;
#ifdef CONFIG_BPF_SYSCALL
struct bpf_local_storage __rcu *bpf_cgrp_storage;
#endif
/* All ancestors including self */
struct cgroup *ancestors[];
};
/*
* A cgroup_root represents the root of a cgroup hierarchy, and may be
* associated with a kernfs_root to form an active hierarchy. This is
* internal to cgroup core. Don't access directly from controllers.
*/
struct cgroup_root {
struct kernfs_root *kf_root;
/* The bitmask of subsystems attached to this hierarchy */
unsigned int subsys_mask;
/* Unique id for this hierarchy. */
int hierarchy_id;
/* A list running through the active hierarchies */
struct list_head root_list;
struct rcu_head rcu; /* Must be near the top */
/*
* The root cgroup. The containing cgroup_root will be destroyed on its
* release. cgrp->ancestors[0] will be used overflowing into the
* following field. cgrp_ancestor_storage must immediately follow.
*/
struct cgroup cgrp;
/* must follow cgrp for cgrp->ancestors[0], see above */
struct cgroup *cgrp_ancestor_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
/* Hierarchy-specific flags */
unsigned int flags;
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
/* The name for this hierarchy - may be empty */
char name[MAX_CGROUP_ROOT_NAMELEN];
};
/*
* struct cftype: handler definitions for cgroup control files
*
* When reading/writing to a file:
* - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
* - the 'cftype' of the file is file->f_path.dentry->d_fsdata
*/
struct cftype {
/*
* Name of the subsystem is prepended in cgroup_file_name().
* Zero length string indicates end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
unsigned long private;
/*
* The maximum length of string, excluding trailing nul, that can
* be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
*/
size_t max_write_len;
/* CFTYPE_* flags */
unsigned int flags;
/*
* If non-zero, should contain the offset from the start of css to
* a struct cgroup_file field. cgroup will record the handle of
* the created file into it. The recorded handle can be used as
* long as the containing css remains accessible.
*/
unsigned int file_offset;
/*
* Fields used for internal bookkeeping. Initialized automatically
* during registration.
*/
struct cgroup_subsys *ss; /* NULL for cgroup core files */
struct list_head node; /* anchored at ss->cfts */
struct kernfs_ops *kf_ops;
int (*open)(struct kernfs_open_file *of);
void (*release)(struct kernfs_open_file *of);
/*
* read_u64() is a shortcut for the common case of returning a
* single integer. Use it in place of read()
*/
u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
/*
* read_s64() is a signed version of read_u64()
*/
s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
/* generic seq_file read interface */
int (*seq_show)(struct seq_file *sf, void *v);
/* optional ops, implement all or none */
void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
void (*seq_stop)(struct seq_file *sf, void *v);
/*
* write_u64() is a shortcut for the common case of accepting
* a single integer (as parsed by simple_strtoull) from
* userspace. Use in place of write(); return 0 or error.
*/
int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val);
/*
* write_s64() is a signed version of write_u64()
*/
int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
s64 val);
/*
* write() is the generic write callback which maps directly to
* kernfs write operation and overrides all other operations.
* Maximum write size is determined by ->max_write_len. Use
* of_css/cft() to access the associated css and cft.
*/
ssize_t (*write)(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
__poll_t (*poll)(struct kernfs_open_file *of,
struct poll_table_struct *pt);
struct lock_class_key lockdep_key;
};
/*
* Control Group subsystem type.
* See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
int (*css_online)(struct cgroup_subsys_state *css);
void (*css_offline)(struct cgroup_subsys_state *css);
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
void (*css_killed)(struct cgroup_subsys_state *css);
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
int (*css_local_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
int (*can_fork)(struct task_struct *task,
struct css_set *cset);
void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
void (*release)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
bool early_init:1;
/*
* If %true, the controller, on the default hierarchy, doesn't show
* up in "cgroup.controllers" or "cgroup.subtree_control", is
* implicitly enabled on all cgroups on the default hierarchy, and
* bypasses the "no internal process" constraint. This is for
* utility type controllers which is transparent to userland.
*
* An implicit controller can be stolen from the default hierarchy
* anytime and thus must be okay with offline csses from previous
* hierarchies coexisting with csses for the current one.
*/
bool implicit_on_dfl:1;
/*
* If %true, the controller, supports threaded mode on the default
* hierarchy. In a threaded subtree, both process granularity and
* no-internal-process constraint are ignored and a threaded
* controllers should be able to handle that.
*
* Note that as an implicit controller is automatically enabled on
* all cgroups on the default hierarchy, it should also be
* threaded. implicit && !threaded is not supported.
*/
bool threaded:1;
/* the following two fields are initialized automatically during boot */
int id;
const char *name;
/* optional, initialized automatically during boot if not set */
const char *legacy_name;
/* link to parent, protected by cgroup_lock() */
struct cgroup_root *root;
/* idr for css->id */
struct idr css_idr;
/*
* List of cftypes. Each entry is the first entry of an array
* terminated by zero length name.
*/
struct list_head cfts;
/*
* Base cftypes which are automatically registered. The two can
* point to the same array.
*/
struct cftype *dfl_cftypes; /* for the default hierarchy */
struct cftype *legacy_cftypes; /* for the legacy hierarchies */
/*
* A subsystem may depend on other subsystems. When such subsystem
* is enabled on a cgroup, the depended-upon subsystems are enabled
* together if available. Subsystems enabled due to dependency are
* not visible to userland until explicitly enabled. The following
* specifies the mask of subsystems that this one depends on.
*/
unsigned int depends_on;
spinlock_t rstat_ss_lock;
struct llist_head __percpu *lhead; /* lockless update list head */
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
extern bool cgroup_enable_per_threadgroup_rwsem;
struct cgroup_of_peak {
unsigned long value;
struct list_head list;
};
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Allows cgroup operations to synchronize against threadgroup changes
* using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when
* favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem); if (cgroup_enable_per_threadgroup_rwsem) down_read(&tsk->signal->cgroup_threadgroup_rwsem);
}
/**
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
* @tsk: target task
*
* Counterpart of cgroup_threadcgroup_change_begin().
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
if (cgroup_enable_per_threadgroup_rwsem)
up_read(&tsk->signal->cgroup_threadgroup_rwsem); percpu_up_read(&cgroup_threadgroup_rwsem);}
#else /* CONFIG_CGROUPS */
#define CGROUP_SUBSYS_COUNT 0
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
might_sleep();
}
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
#endif /* CONFIG_CGROUPS */
#ifdef CONFIG_SOCK_CGROUP_DATA
/*
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
* On legacy hierarchies, net_prio and net_cls controllers directly
* set attributes on each sock which can then be tested by the network
* layer. On the default hierarchy, each sock is associated with the
* cgroup it was created in and the networking layer can match the
* cgroup directly.
*/
struct sock_cgroup_data {
struct cgroup *cgroup; /* v2 */
#ifdef CONFIG_CGROUP_NET_CLASSID
u32 classid; /* v1 */
#endif
#ifdef CONFIG_CGROUP_NET_PRIO
u16 prioidx; /* v1 */
#endif
};
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
#ifdef CONFIG_CGROUP_NET_PRIO
return READ_ONCE(skcd->prioidx);
#else
return 1;
#endif
}
#ifdef CONFIG_CGROUP_NET_CLASSID
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
return READ_ONCE(skcd->classid);
}
#endif
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
#ifdef CONFIG_CGROUP_NET_PRIO
WRITE_ONCE(skcd->prioidx, prioidx);
#endif
}
#ifdef CONFIG_CGROUP_NET_CLASSID
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
WRITE_ONCE(skcd->classid, classid);
}
#endif
#else /* CONFIG_SOCK_CGROUP_DATA */
struct sock_cgroup_data {
};
#endif /* CONFIG_SOCK_CGROUP_DATA */
#endif /* _LINUX_CGROUP_DEFS_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
#include <linux/bitops.h>
#include <linux/wordpart.h>
struct word_at_a_time {
const unsigned long one_bits, high_bits;
};
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
/* Return nonzero if it has a zero */
static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
{
unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
*bits = mask;
return mask;
}
static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
{
return bits;
}
#ifdef CONFIG_64BIT
/* Keep the initial has_zero() value for both bitmask and size calc */
#define create_zero_mask(bits) (bits)
static inline unsigned long zero_bytemask(unsigned long bits)
{
bits = (bits - 1) & ~bits;
return bits >> 7;
}
#define find_zero(bits) (__ffs(bits) >> 3)
#else
/* Create the final mask for both bytemask and size */
static inline unsigned long create_zero_mask(unsigned long bits)
{
bits = (bits - 1) & ~bits;
return bits >> 7;
}
/* The mask we created is directly usable as a bytemask */
#define zero_bytemask(mask) (mask)
/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
static inline unsigned long find_zero(unsigned long mask)
{
/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
long a = (0x0ff0001+mask) >> 23;
/* Fix the 1 for 00 case */
return a & mask;
}
#endif
/*
* Load an unaligned word from kernel space.
*
* In the (very unlikely) case of the word being a page-crosser
* and the next page not being mapped, take the exception and
* return zeroes in the non-existing part.
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret;
asm volatile(
"1: mov %[mem], %[ret]\n"
"2:\n"
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_ZEROPAD)
: [ret] "=r" (ret)
: [mem] "m" (*(unsigned long *)addr));
return ret;
}
#endif /* _ASM_WORD_AT_A_TIME_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IOCONTEXT_H
#define IOCONTEXT_H
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
enum {
ICQ_EXITED = 1 << 2,
ICQ_DESTROYED = 1 << 3,
};
/*
* An io_cq (icq) is association between an io_context (ioc) and a
* request_queue (q). This is used by elevators which need to track
* information per ioc - q pair.
*
* Elevator can request use of icq by setting elevator_type->icq_size and
* ->icq_align. Both size and align must be larger than that of struct
* io_cq and elevator can use the tail area for private information. The
* recommended way to do this is defining a struct which contains io_cq as
* the first member followed by private members and using its size and
* align. For example,
*
* struct snail_io_cq {
* struct io_cq icq;
* int poke_snail;
* int feed_snail;
* };
*
* struct elevator_type snail_elv_type {
* .ops = { ... },
* .icq_size = sizeof(struct snail_io_cq),
* .icq_align = __alignof__(struct snail_io_cq),
* ...
* };
*
* If icq_size is set, block core will manage icq's. All requests will
* have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
* is called and be holding a reference to the associated io_context.
*
* Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
* called and, on destruction, ->elevator_exit_icq_fn(). Both functions
* are called with both the associated io_context and queue locks held.
*
* Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
* queue lock but the returned icq is valid only until the queue lock is
* released. Elevators can not and should not try to create or destroy
* icq's.
*
* As icq's are linked from both ioc and q, the locking rules are a bit
* complex.
*
* - ioc lock nests inside q lock.
*
* - ioc->icq_list and icq->ioc_node are protected by ioc lock.
* q->icq_list and icq->q_node by q lock.
*
* - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
* itself is protected by q lock. However, both the indexes and icq
* itself are also RCU managed and lookup can be performed holding only
* the q lock.
*
* - icq's are not reference counted. They are destroyed when either the
* ioc or q goes away. Each request with icq set holds an extra
* reference to ioc to ensure it stays until the request is completed.
*
* - Linking and unlinking icq's are performed while holding both ioc and q
* locks. Due to the lock ordering, q exit is simple but ioc exit
* requires reverse-order double lock dance.
*/
struct io_cq {
struct request_queue *q;
struct io_context *ioc;
/*
* q_node and ioc_node link io_cq through icq_list of q and ioc
* respectively. Both fields are unused once ioc_exit_icq() is
* called and shared with __rcu_icq_cache and __rcu_head which are
* used for RCU free of io_cq.
*/
union {
struct list_head q_node;
struct kmem_cache *__rcu_icq_cache;
};
union {
struct hlist_node ioc_node;
struct rcu_head __rcu_head;
};
unsigned int flags;
};
/*
* I/O subsystem state of the associated processes. It is refcounted
* and kmalloc'ed. These could be shared between processes.
*/
struct io_context {
atomic_long_t refcount;
atomic_t active_ref;
unsigned short ioprio;
#ifdef CONFIG_BLK_ICQ
/* all the fields below are protected by this lock */
spinlock_t lock;
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
#endif /* CONFIG_BLK_ICQ */
};
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
int __copy_io(u64 clone_flags, struct task_struct *tsk);
static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
{
if (!current->io_context)
return 0;
return __copy_io(clone_flags, tsk);
}
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
{
return 0;
}
#endif /* CONFIG_BLOCK */
#endif /* IOCONTEXT_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SEQLOCK_H
#define __LINUX_SEQLOCK_H
/*
* seqcount_t / seqlock_t - a reader-writer consistency mechanism with
* lockless readers (read-only retry loops), and no writer starvation.
*
* See Documentation/locking/seqlock.rst
*
* Copyrights:
* - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
* - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
*/
#include <linux/compiler.h>
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/preempt.h>
#include <linux/seqlock_types.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
/*
* The seqlock seqcount_t interface does not prescribe a precise sequence of
* read begin/retry/end. For readers, typically there is a call to
* read_seqcount_begin() and read_seqcount_retry(), however, there are more
* esoteric cases which do not follow this pattern.
*
* As a consequence, we take the following best-effort approach for raw usage
* via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
* pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
* atomics; if there is a matching read_seqcount_retry() call, no following
* memory operations are considered atomic. Usage of the seqlock_t interface
* is not affected.
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
static inline void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
/*
* Make sure we are not reinitializing a held lock:
*/
lockdep_init_map(&s->dep_map, name, key, 0);
s->sequence = 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define SEQCOUNT_DEP_MAP_INIT(lockname) \
.dep_map = { .name = #lockname }
/**
* seqcount_init() - runtime initializer for seqcount_t
* @s: Pointer to the seqcount_t instance
*/
# define seqcount_init(s) \
do { \
static struct lock_class_key __key; \
__seqcount_init((s), #s, &__key); \
} while (0)
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
{
seqcount_t *l = (seqcount_t *)s;
unsigned long flags;
local_irq_save(flags); seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
seqcount_release(&l->dep_map, _RET_IP_);
local_irq_restore(flags);
}
#else
# define SEQCOUNT_DEP_MAP_INIT(lockname)
# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
# define seqcount_lockdep_reader_access(x)
#endif
/**
* SEQCNT_ZERO() - static initializer for seqcount_t
* @name: Name of the seqcount_t instance
*/
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
/*
* Sequence counters with associated locks (seqcount_LOCKNAME_t)
*
* A sequence counter which associates the lock used for writer
* serialization at initialization time. This enables lockdep to validate
* that the write side critical section is properly serialized.
*
* For associated locks which do not implicitly disable preemption,
* preemption protection is enforced in the write side function.
*
* Lockdep is never used in any for the raw write variants.
*
* See Documentation/locking/seqlock.rst
*/
/*
* typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
* @lock: Pointer to the associated lock
*
* A plain sequence counter with external writer synchronization by
* LOCKNAME @lock. The lock is associated to the sequence counter in the
* static initializer or init function. This enables lockdep to validate
* that the write side critical section is properly serialized.
*
* LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
*/
/*
* seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
* @s: Pointer to the seqcount_LOCKNAME_t instance
* @lock: Pointer to the associated lock
*/
#define seqcount_LOCKNAME_init(s, _lock, lockname) \
do { \
seqcount_##lockname##_t *____s = (s); \
seqcount_init(&____s->seqcount); \
__SEQ_LOCK(____s->lock = (_lock)); \
} while (0)
#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
* seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
*
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above locktype
* @lockbase: prefix for associated lock/unlock
*/
#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
static __always_inline seqcount_t * \
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
return &s->seqcount; \
} \
\
static __always_inline const seqcount_t * \
__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
{ \
return &s->seqcount; \
} \
\
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
\
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return seq; \
\
if (preemptible && unlikely(seq & 1)) { \
__SEQ_LOCK(lockbase##_lock(s->lock)); \
__SEQ_LOCK(lockbase##_unlock(s->lock)); \
\
/* \
* Re-read the sequence counter since the (possibly \
* preempted) writer made progress. \
*/ \
seq = smp_load_acquire(&s->seqcount.sequence); \
} \
\
return seq; \
} \
\
static __always_inline bool \
__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
{ \
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return preemptible; \
\
/* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
return false; \
} \
\
static __always_inline void \
__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
__SEQ_LOCK(lockdep_assert_held(s->lock)); \
}
/*
* __seqprop() for seqcount_t
*/
static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
{
return s;
}
static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
{
return s;
}
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
return smp_load_acquire(&s->sequence);
}
static inline bool __seqprop_preemptible(const seqcount_t *s)
{
return false;
}
static inline void __seqprop_assert(const seqcount_t *s)
{
lockdep_assert_preemption_disabled();
}
#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#undef SEQCOUNT_LOCKNAME
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
* @name: Name of the seqcount_LOCKNAME_t instance
* @lock: Pointer to the associated LOCKNAME
*/
#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
.seqcount = SEQCNT_ZERO(seq_name.seqcount), \
__SEQ_LOCK(.lock = (assoc_lock)) \
}
#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define __seqprop_case(s, lockname, prop) \
seqcount_##lockname##_t: __seqprop_##lockname##_##prop
#define __seqprop(s, prop) _Generic(*(s), \
seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
__seqprop_case((s), mutex, prop))
#define seqprop_ptr(s) __seqprop(s, ptr)(s)
#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
#define seqprop_sequence(s) __seqprop(s, sequence)(s)
#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
* __read_seqcount_begin() - begin a seqcount_t read section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
({ \
unsigned __seq; \
\
while (unlikely((__seq = seqprop_sequence(s)) & 1)) \
cpu_relax(); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
#define read_seqcount_begin(s) \
({ \
seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \
raw_read_seqcount_begin(s); \
})
/**
* raw_read_seqcount() - read the raw seqcount_t counter value
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_read_seqcount opens a read critical section of the given
* seqcount_t, without any lockdep checking, and without checking or
* masking the sequence counter LSB. Calling code is responsible for
* handling that.
*
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount(s) \
({ \
unsigned __seq = seqprop_sequence(s); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
/**
* raw_seqcount_try_begin() - begin a seqcount_t read critical section
* w/o lockdep and w/o counter stabilization
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count to be passed to read_seqcount_retry()
*
* Similar to raw_seqcount_begin(), except it enables eliding the critical
* section entirely if odd, instead of doing the speculation knowing it will
* fail.
*
* Useful when counter stabilization is more or less equivalent to taking
* the lock and there is a slowpath that does that.
*
* If true, start will be set to the (even) sequence count read.
*
* Return: true when a read critical section is started.
*/
#define raw_seqcount_try_begin(s, start) \
({ \
start = raw_read_seqcount(s); \
!(start & 1); \
})
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* raw_seqcount_begin opens a read critical section of the given
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
* for the count to stabilize. If a writer is active when it begins, it
* will fail the read_seqcount_retry() at the end of the read critical
* section instead of stabilizing at the beginning of it.
*
* Use this only in special kernel hot paths where the read section is
* small and has a high probability of success through other external
* means. It will save a single branching instruction.
*
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_seqcount_begin(s) \
({ \
/* \
* If the counter is odd, let read_seqcount_retry() fail \
* by decrementing the counter. \
*/ \
raw_read_seqcount(s) & ~1; \
})
/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
* provided before actually loading any of the variables that are to be
* protected in this critical section.
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
*
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
do___read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start);
}
/**
* read_seqcount_retry() - end a seqcount_t read critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @start: count, from read_seqcount_begin()
*
* read_seqcount_retry closes the read critical section of given
* seqcount_t. If the critical section was invalid, it must be ignored
* (and typically retried).
*
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
do_read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
return do___read_seqcount_retry(s, start);
}
/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Context: check write_seqcount_begin()
*/
#define raw_write_seqcount_begin(s) \
do { \
if (seqprop_preemptible(s)) \
preempt_disable(); \
\
do_raw_write_seqcount_begin(seqprop_ptr(s)); \
} while (0)
static inline void do_raw_write_seqcount_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
/**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Context: check write_seqcount_end()
*/
#define raw_write_seqcount_end(s) \
do { \
do_raw_write_seqcount_end(seqprop_ptr(s)); \
\
if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
static inline void do_raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
kcsan_nestable_atomic_end();
}
/**
* write_seqcount_begin_nested() - start a seqcount_t write section with
* custom lockdep nesting level
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* @subclass: lockdep nesting level
*
* See Documentation/locking/lockdep-design.rst
* Context: check write_seqcount_begin()
*/
#define write_seqcount_begin_nested(s, subclass) \
do { \
seqprop_assert(s); \
\
if (seqprop_preemptible(s)) \
preempt_disable(); \
\
do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
} while (0)
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
do_raw_write_seqcount_begin(s);
}
/**
* write_seqcount_begin() - start a seqcount_t write side critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Context: sequence counter write side sections must be serialized and
* non-preemptible. Preemption will be automatically disabled if and
* only if the seqcount write serialization lock is associated, and
* preemptible. If readers can be invoked from hardirq or softirq
* context, interrupts or bottom halves must be respectively disabled.
*/
#define write_seqcount_begin(s) \
do { \
seqprop_assert(s); \
\
if (seqprop_preemptible(s)) \
preempt_disable(); \
\
do_write_seqcount_begin(seqprop_ptr(s)); \
} while (0)
static inline void do_write_seqcount_begin(seqcount_t *s)
{
do_write_seqcount_begin_nested(s, 0);
}
/**
* write_seqcount_end() - end a seqcount_t write side critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Context: Preemption will be automatically re-enabled if and only if
* the seqcount write serialization lock is associated, and preemptible.
*/
#define write_seqcount_end(s) \
do { \
do_write_seqcount_end(seqprop_ptr(s)); \
\
if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
static inline void do_write_seqcount_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, _RET_IP_);
do_raw_write_seqcount_end(s);
}
/**
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* This can be used to provide an ordering guarantee instead of the usual
* consistency guarantee. It is one wmb cheaper, because it can collapse
* the two back-to-back wmb()s.
*
* Note that writes surrounding the barrier should be declared atomic (e.g.
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
* neither writes before nor after the barrier are enclosed in a seq-writer
* critical section that would ensure readers are aware of ongoing writes::
*
* seqcount_t seq;
* bool X = true, Y = false;
*
* void read(void)
* {
* bool x, y;
*
* do {
* int s = read_seqcount_begin(&seq);
*
* x = X; y = Y;
*
* } while (read_seqcount_retry(&seq, s));
*
* BUG_ON(!x && !y);
* }
*
* void write(void)
* {
* WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
* WRITE_ONCE(X, false);
* }
*/
#define raw_write_seqcount_barrier(s) \
do_raw_write_seqcount_barrier(seqprop_ptr(s))
static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
kcsan_nestable_atomic_end();
}
/**
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
* side operations
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* After write_seqcount_invalidate, no seqcount_t read side operations
* will complete successfully and see data older than this.
*/
#define write_seqcount_invalidate(s) \
do_write_seqcount_invalidate(seqprop_ptr(s))
static inline void do_write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
kcsan_nestable_atomic_begin();
s->sequence+=2;
kcsan_nestable_atomic_end();
}
/*
* Latch sequence counters (seqcount_latch_t)
*
* A sequence counter variant where the counter even/odd value is used to
* switch between two copies of protected data. This allows the read path,
* typically NMIs, to safely interrupt the write side critical section.
*
* As the write sections are fully preemptible, no special handling for
* PREEMPT_RT is needed.
*/
typedef struct {
seqcount_t seqcount;
} seqcount_latch_t;
/**
* SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
* @seq_name: Name of the seqcount_latch_t instance
*/
#define SEQCNT_LATCH_ZERO(seq_name) { \
.seqcount = SEQCNT_ZERO(seq_name.seqcount), \
}
/**
* seqcount_latch_init() - runtime initializer for seqcount_latch_t
* @s: Pointer to the seqcount_latch_t instance
*/
#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
/**
* raw_read_seqcount_latch() - pick even/odd latch data copy
* @s: Pointer to seqcount_latch_t
*
* See raw_write_seqcount_latch() for details and a full reader/writer
* usage example.
*
* Return: sequence counter raw value. Use the lowest bit as an index for
* picking which data copy to read. The full counter must then be checked
* with raw_read_seqcount_latch_retry().
*/
static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
{
/*
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
* Due to the dependent load, a full smp_rmb() is not needed.
*/
return READ_ONCE(s->seqcount.sequence);
}
/**
* read_seqcount_latch() - pick even/odd latch data copy
* @s: Pointer to seqcount_latch_t
*
* See write_seqcount_latch() for details and a full reader/writer usage
* example.
*
* Return: sequence counter raw value. Use the lowest bit as an index for
* picking which data copy to read. The full counter must then be checked
* with read_seqcount_latch_retry().
*/
static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s)
{
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return raw_read_seqcount_latch(s);
}
/**
* raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from raw_read_seqcount_latch()
*
* Return: true if a read section retry is required, else false
*/
static __always_inline int
raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
smp_rmb();
return unlikely(READ_ONCE(s->seqcount.sequence) != start);
}
/**
* read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from read_seqcount_latch()
*
* Return: true if a read section retry is required, else false
*/
static __always_inline int
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
kcsan_atomic_next(0);
return raw_read_seqcount_latch_retry(s, start);
}
/**
* raw_write_seqcount_latch() - redirect latch readers to even/odd copy
* @s: Pointer to seqcount_latch_t
*/
static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s)
{
smp_wmb(); /* prior stores before incrementing "sequence" */
s->seqcount.sequence++;
smp_wmb(); /* increment "sequence" before following stores */
}
/**
* write_seqcount_latch_begin() - redirect latch readers to odd copy
* @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
* interrupt the modification -- e.g. the concurrency is strictly between CPUs
* -- you most likely do not need this.
*
* Where the traditional RCU/lockless data structures rely on atomic
* modifications to ensure queries observe either the old or the new state the
* latch allows the same for non-atomic updates. The trade-off is doubling the
* cost of storage; we have to maintain two copies of the entire data
* structure.
*
* Very simply put: we first modify one copy and then the other. This ensures
* there is always one copy in a stable state, ready to give us an answer.
*
* The basic form is a data structure like::
*
* struct latch_struct {
* seqcount_latch_t seq;
* struct data_struct data[2];
* };
*
* Where a modification, which is assumed to be externally serialized, does the
* following::
*
* void latch_modify(struct latch_struct *latch, ...)
* {
* write_seqcount_latch_begin(&latch->seq);
* modify(latch->data[0], ...);
* write_seqcount_latch(&latch->seq);
* modify(latch->data[1], ...);
* write_seqcount_latch_end(&latch->seq);
* }
*
* The query will have a form like::
*
* struct entry *latch_query(struct latch_struct *latch, ...)
* {
* struct entry *entry;
* unsigned seq, idx;
*
* do {
* seq = read_seqcount_latch(&latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
* // This includes needed smp_rmb()
* } while (read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
*
* So during the modification, queries are first redirected to data[1]. Then we
* modify data[0]. When that is complete, we redirect queries back to data[0]
* and we can modify data[1].
*
* NOTE:
*
* The non-requirement for atomic modifications does _NOT_ include
* the publishing of new entries in the case where data is a dynamic
* data structure.
*
* An iteration might start in data[0] and get suspended long enough
* to miss an entire modification sequence, once it resumes it might
* observe the new entry.
*
* NOTE2:
*
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s)
{
kcsan_nestable_atomic_begin();
raw_write_seqcount_latch(s);
}
/**
* write_seqcount_latch() - redirect latch readers to even copy
* @s: Pointer to seqcount_latch_t
*/
static __always_inline void write_seqcount_latch(seqcount_latch_t *s)
{
raw_write_seqcount_latch(s);
}
/**
* write_seqcount_latch_end() - end a seqcount_latch_t write section
* @s: Pointer to seqcount_latch_t
*
* Marks the end of a seqcount_latch_t writer section, after all copies of the
* latch-protected data have been updated.
*/
static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
{
kcsan_nestable_atomic_end();
}
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
/**
* seqlock_init() - dynamic initializer for seqlock_t
* @sl: Pointer to the seqlock_t instance
*/
#define seqlock_init(sl) \
do { \
spin_lock_init(&(sl)->lock); \
seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
} while (0)
/**
* DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
* @sl: Name of the seqlock_t instance
*/
#define DEFINE_SEQLOCK(sl) \
seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
/**
* read_seqbegin() - start a seqlock_t read side critical section
* @sl: Pointer to seqlock_t
*
* Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
/**
* read_seqretry() - end a seqlock_t read side section
* @sl: Pointer to seqlock_t
* @start: count, from read_seqbegin()
*
* read_seqretry closes the read side critical section of given seqlock_t.
* If the critical section was invalid, it must be ignored (and typically
* retried).
*
* Return: true if a read section retry is required, else false
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
return read_seqcount_retry(&sl->seqcount, start);
}
/*
* For all seqlock_t write side functions, use the internal
* do_write_seqcount_begin() instead of generic write_seqcount_begin().
* This way, no redundant lockdep_assert_held() checks are added.
*/
/**
* write_seqlock() - start a seqlock_t write side critical section
* @sl: Pointer to seqlock_t
*
* write_seqlock opens a write side critical section for the given
* seqlock_t. It also implicitly acquires the spinlock_t embedded inside
* that sequential lock. All seqlock_t write side sections are thus
* automatically serialized and non-preemptible.
*
* Context: if the seqlock_t read section, or other write side critical
* sections, can be invoked from hardirq or softirq contexts, use the
* _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
* write_sequnlock() - end a seqlock_t write side critical section
* @sl: Pointer to seqlock_t
*
* write_sequnlock closes the (serialized and non-preemptible) write side
* critical section of given seqlock_t.
*/
static inline void write_sequnlock(seqlock_t *sl)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}
/**
* write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
* @sl: Pointer to seqlock_t
*
* _bh variant of write_seqlock(). Use only if the read side section, or
* other write side sections, can be invoked from softirq contexts.
*/
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
* write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
* @sl: Pointer to seqlock_t
*
* write_sequnlock_bh closes the serialized, non-preemptible, and
* softirqs-disabled, seqlock_t write side critical section opened with
* write_seqlock_bh().
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}
/**
* write_seqlock_irq() - start a non-interruptible seqlock_t write section
* @sl: Pointer to seqlock_t
*
* _irq variant of write_seqlock(). Use only if the read side section, or
* other write sections, can be invoked from hardirq contexts.
*/
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
}
/**
* write_sequnlock_irq() - end a non-interruptible seqlock_t write section
* @sl: Pointer to seqlock_t
*
* write_sequnlock_irq closes the serialized and non-interruptible
* seqlock_t write side section opened with write_seqlock_irq().
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
{
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
do_write_seqcount_begin(&sl->seqcount.seqcount);
return flags;
}
/**
* write_seqlock_irqsave() - start a non-interruptible seqlock_t write
* section
* @lock: Pointer to seqlock_t
* @flags: Stack-allocated storage for saving caller's local interrupt
* state, to be passed to write_sequnlock_irqrestore().
*
* _irqsave variant of write_seqlock(). Use it only if the read side
* section, or other write sections, can be invoked from hardirq context.
*/
#define write_seqlock_irqsave(lock, flags) \
do { flags = __write_seqlock_irqsave(lock); } while (0)
/**
* write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
* section
* @sl: Pointer to seqlock_t
* @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
*
* write_sequnlock_irqrestore closes the serialized and non-interruptible
* seqlock_t write section previously opened with write_seqlock_irqsave().
*/
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
/**
* read_seqlock_excl() - begin a seqlock_t locking reader section
* @sl: Pointer to seqlock_t
*
* read_seqlock_excl opens a seqlock_t locking reader critical section. A
* locking reader exclusively locks out *both* other writers *and* other
* locking readers, but it does not update the embedded sequence number.
*
* Locking readers act like a normal spin_lock()/spin_unlock().
*
* Context: if the seqlock_t write section, *or other read sections*, can
* be invoked from hardirq or softirq contexts, use the _irqsave or _bh
* variant of this function instead.
*
* The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
{
spin_lock(&sl->lock);
}
/**
* read_sequnlock_excl() - end a seqlock_t locking reader critical section
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl(seqlock_t *sl)
{
spin_unlock(&sl->lock);}
/**
* read_seqlock_excl_bh() - start a seqlock_t locking reader section with
* softirqs disabled
* @sl: Pointer to seqlock_t
*
* _bh variant of read_seqlock_excl(). Use this variant only if the
* seqlock_t write side section, *or other read sections*, can be invoked
* from softirq contexts.
*/
static inline void read_seqlock_excl_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
}
/**
* read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
* reader section
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
{
spin_unlock_bh(&sl->lock);
}
/**
* read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
* reader section
* @sl: Pointer to seqlock_t
*
* _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
* write side section, *or other read sections*, can be invoked from a
* hardirq context.
*/
static inline void read_seqlock_excl_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
}
/**
* read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
* locking reader section
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
{
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
{
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
return flags;
}
/**
* read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
* locking reader section
* @lock: Pointer to seqlock_t
* @flags: Stack-allocated storage for saving caller's local interrupt
* state, to be passed to read_sequnlock_excl_irqrestore().
*
* _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
* write side section, *or other read sections*, can be invoked from a
* hardirq context.
*/
#define read_seqlock_excl_irqsave(lock, flags) \
do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
/**
* read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
* locking reader section
* @sl: Pointer to seqlock_t
* @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
*/
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
{
spin_unlock_irqrestore(&sl->lock, flags);
}
/**
* read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
* @lock: Pointer to seqlock_t
* @seq : Marker and return parameter. If the passed value is even, the
* reader will become a *lockless* seqlock_t reader as in read_seqbegin().
* If the passed value is odd, the reader will become a *locking* reader
* as in read_seqlock_excl(). In the first call to this function, the
* caller *must* initialize and pass an even value to @seq; this way, a
* lockless read can be optimistically tried first.
*
* read_seqbegin_or_lock is an API designed to optimistically try a normal
* lockless seqlock_t read section first. If an odd counter is found, the
* lockless read trial has failed, and the next read iteration transforms
* itself into a full seqlock_t locking reader.
*
* This is typically used to avoid seqlock_t lockless readers starvation
* (too much retry loops) in the case of a sharp spike in write side
* activity.
*
* Context: if the seqlock_t write section, *or other read sections*, can
* be invoked from hardirq or softirq contexts, use the _irqsave or _bh
* variant of this function instead.
*
* Check Documentation/locking/seqlock.rst for template example code.
*
* Return: the encountered sequence counter value, through the @seq
* parameter, which is overloaded as a return parameter. This returned
* value must be checked with need_seqretry(). If the read section need to
* be retried, this returned value must also be passed as the @seq
* parameter of the next read_seqbegin_or_lock() iteration.
*/
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
{
if (!(*seq & 1)) /* Even */
*seq = read_seqbegin(lock);
else /* Odd */
read_seqlock_excl(lock);
}
/**
* need_seqretry() - validate seqlock_t "locking or lockless" read section
* @lock: Pointer to seqlock_t
* @seq: sequence count, from read_seqbegin_or_lock()
*
* Return: true if a read section retry is required, false otherwise
*/
static inline int need_seqretry(seqlock_t *lock, int seq)
{
return !(seq & 1) && read_seqretry(lock, seq);
}
/**
* done_seqretry() - end seqlock_t "locking or lockless" reader section
* @lock: Pointer to seqlock_t
* @seq: count, from read_seqbegin_or_lock()
*
* done_seqretry finishes the seqlock_t read side critical section started
* with read_seqbegin_or_lock() and validated by need_seqretry().
*/
static inline void done_seqretry(seqlock_t *lock, int seq)
{
if (seq & 1)
read_sequnlock_excl(lock);
}
/**
* read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
* a non-interruptible locking reader
* @lock: Pointer to seqlock_t
* @seq: Marker and return parameter. Check read_seqbegin_or_lock().
*
* This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
* the seqlock_t write section, *or other read sections*, can be invoked
* from hardirq context.
*
* Note: Interrupts will be disabled only for "locking reader" mode.
*
* Return:
*
* 1. The saved local interrupts state in case of a locking reader, to
* be passed to done_seqretry_irqrestore().
*
* 2. The encountered sequence counter value, returned through @seq
* overloaded as a return parameter. Check read_seqbegin_or_lock().
*/
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
{
unsigned long flags = 0;
if (!(*seq & 1)) /* Even */
*seq = read_seqbegin(lock);
else /* Odd */
read_seqlock_excl_irqsave(lock, flags);
return flags;
}
/**
* done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
* non-interruptible locking reader section
* @lock: Pointer to seqlock_t
* @seq: Count, from read_seqbegin_or_lock_irqsave()
* @flags: Caller's saved local interrupt state in case of a locking
* reader, also from read_seqbegin_or_lock_irqsave()
*
* This is the _irqrestore variant of done_seqretry(). The read section
* must've been opened with read_seqbegin_or_lock_irqsave(), and validated
* by need_seqretry().
*/
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
}
#endif /* __LINUX_SEQLOCK_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI device specific properties support.
*
* Copyright (C) 2014 - 2023, Intel Corporation
* All rights reserved.
*
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Darren Hart <dvhart@linux.intel.com>
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
* Sakari Ailus <sakari.ailus@linux.intel.com>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/export.h>
#include "internal.h"
static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj);
/*
* The GUIDs here are made equivalent to each other in order to avoid extra
* complexity in the properties handling code, with the caveat that the
* kernel will accept certain combinations of GUID and properties that are
* not defined without a warning. For instance if any of the properties
* from different GUID appear in a property list of another, it will be
* accepted by the kernel. Firmware validation tools should catch these.
*
* References:
*
* [1] UEFI DSD Guide.
* https://github.com/UEFI/DSD-Guide/blob/main/src/dsd-guide.adoc
*/
static const guid_t prp_guids[] = {
/* ACPI _DSD device properties GUID [1]: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01),
/* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
GUID_INIT(0x6211e2c0, 0x58a3, 0x4af3,
0x90, 0xe1, 0x92, 0x7a, 0x4e, 0x0c, 0x55, 0xa4),
/* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */
GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3,
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
/* Thunderbolt GUID for IMR_VALID: c44d002f-69f9-4e7d-a904-a7baabdf43f7 */
GUID_INIT(0xc44d002f, 0x69f9, 0x4e7d,
0xa9, 0x04, 0xa7, 0xba, 0xab, 0xdf, 0x43, 0xf7),
/* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */
GUID_INIT(0x6c501103, 0xc189, 0x4296,
0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d),
/* Storage device needs D3 GUID: 5025030f-842f-4ab4-a561-99a5189762d0 */
GUID_INIT(0x5025030f, 0x842f, 0x4ab4,
0xa5, 0x61, 0x99, 0xa5, 0x18, 0x97, 0x62, 0xd0),
};
/* ACPI _DSD data subnodes GUID [1]: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
/* ACPI _DSD data buffer GUID [1]: edb12dd0-363d-4085-a3d2-49522ca160c4 */
static const guid_t buffer_prop_guid =
GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent);
static bool acpi_extract_properties(acpi_handle handle,
union acpi_object *desc,
struct acpi_device_data *data);
static bool acpi_nondev_subnode_extract(union acpi_object *desc,
acpi_handle handle,
const union acpi_object *link,
struct list_head *list,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
acpi_handle scope = NULL;
bool result;
if (acpi_graph_ignore_port(handle))
return false;
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
return false;
dn->name = link->package.elements[0].string.pointer;
fwnode_init(&dn->fwnode, &acpi_data_fwnode_ops);
dn->parent = parent;
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
/*
* The scope for the completion of relative pathname segments and
* subnode object lookup is the one of the namespace node (device)
* containing the object that has returned the package. That is, it's
* the scope of that object's parent device.
*/
if (handle)
acpi_get_parent(handle, &scope);
/*
* Extract properties from the _DSD-equivalent package pointed to by
* desc and use scope (if not NULL) for the completion of relative
* pathname segments.
*
* The extracted properties will be held in the new data node dn.
*/
result = acpi_extract_properties(scope, desc, &dn->data);
/*
* Look for subnodes in the _DSD-equivalent package pointed to by desc
* and create child nodes of dn if there are any.
*/
if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
result = true;
if (!result) {
kfree(dn);
acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
}
/*
* This will be NULL if the desc package is embedded in an outer
* _DSD-equivalent package and its scope cannot be determined.
*/
dn->handle = handle;
dn->data.pointer = desc;
list_add_tail(&dn->sibling, list);
return true;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
const union acpi_object *link,
struct list_head *list,
struct fwnode_handle *parent)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_handle handle;
acpi_status status;
/*
* If the scope is unknown, the _DSD-equivalent package being parsed
* was embedded in an outer _DSD-equivalent package as a result of
* direct evaluation of an object pointed to by a reference. In that
* case, using a pathname as the target object pointer is invalid.
*/
if (!scope)
return false;
status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
&handle);
if (ACPI_FAILURE(status))
return false;
status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
return false;
if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
parent))
return true;
ACPI_FREE(buf.pointer);
return false;
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
union acpi_object *links,
struct list_head *list,
struct fwnode_handle *parent)
{
bool ret = false;
int i;
/*
* Every element in the links package is expected to represent a link
* to a non-device node in a tree containing device-specific data.
*/
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
bool result;
link = &links->package.elements[i];
/* Only two elements allowed. */
if (link->package.count != 2)
continue;
/* The first one (the key) must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
/* The second one (the target) may be a string or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
/*
* The string is expected to be a full pathname or a
* pathname segment relative to the given scope. That
* pathname is expected to point to an object returning
* a package that contains _DSD-equivalent information.
*/
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
case ACPI_TYPE_PACKAGE:
/*
* This happens when a reference is used in AML to
* point to the target. Since the target is expected
* to be a named object, a reference to it will cause it
* to be avaluated in place and its return package will
* be embedded in the links package at the location of
* the reference.
*
* The target package is expected to contain _DSD-
* equivalent information, but the scope in which it
* is located in the original AML is unknown. Thus
* it cannot contain pathname segments represented as
* strings because there is no way to build full
* pathnames out of them.
*/
acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
link->package.elements[0].string.pointer);
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/*
* It is not expected to see any local references in
* the links package because referencing a named object
* should cause it to be evaluated in place.
*/
acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
link->package.elements[0].string.pointer);
fallthrough;
default:
result = false;
break;
}
ret = ret || result;
}
return ret;
}
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent)
{
int i;
/* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
const union acpi_object *guid;
union acpi_object *links;
guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
/*
* The first element must be a GUID and the second one must be
* a package.
*/
if (guid->type != ACPI_TYPE_BUFFER ||
guid->buffer.length != 16 ||
links->type != ACPI_TYPE_PACKAGE)
break;
if (!guid_equal((guid_t *)guid->buffer.pointer, &ads_guid))
continue;
return acpi_add_nondev_subnodes(scope, links, &data->subnodes,
parent);
}
return false;
}
static bool acpi_property_value_ok(const union acpi_object *value)
{
int j;
/*
* The value must be an integer, a string, a reference, or a package
* whose every element must be an integer, a string, or a reference.
*/
switch (value->type) {
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_LOCAL_REFERENCE:
return true;
case ACPI_TYPE_PACKAGE:
for (j = 0; j < value->package.count; j++)
switch (value->package.elements[j].type) {
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_LOCAL_REFERENCE:
continue;
default:
return false;
}
return true;
}
return false;
}
static bool acpi_properties_format_valid(const union acpi_object *properties)
{
int i;
for (i = 0; i < properties->package.count; i++) {
const union acpi_object *property;
property = &properties->package.elements[i];
/*
* Only two elements allowed, the first one must be a string and
* the second one has to satisfy certain conditions.
*/
if (property->package.count != 2
|| property->package.elements[0].type != ACPI_TYPE_STRING
|| !acpi_property_value_ok(&property->package.elements[1]))
return false;
}
return true;
}
static void acpi_init_of_compatible(struct acpi_device *adev)
{
const union acpi_object *of_compatible;
int ret;
ret = acpi_data_get_property_array(&adev->data, "compatible",
ACPI_TYPE_STRING, &of_compatible);
if (ret) {
ret = acpi_dev_get_property(adev, "compatible",
ACPI_TYPE_STRING, &of_compatible);
if (ret) {
struct acpi_device *parent;
parent = acpi_dev_parent(adev);
if (parent && parent->flags.of_compatible_ok)
goto out;
return;
}
}
adev->data.of_compatible = of_compatible;
out:
adev->flags.of_compatible_ok = 1;
}
static bool acpi_is_property_guid(const guid_t *guid)
{
int i;
for (i = 0; i < ARRAY_SIZE(prp_guids); i++) {
if (guid_equal(guid, &prp_guids[i]))
return true;
}
return false;
}
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
union acpi_object *properties)
{
struct acpi_device_properties *props;
props = kzalloc(sizeof(*props), GFP_KERNEL);
if (props) {
INIT_LIST_HEAD(&props->list);
props->guid = guid;
props->properties = properties;
list_add_tail(&props->list, &data->properties);
}
return props;
}
static void acpi_nondev_subnode_tag(acpi_handle handle, void *context)
{
}
static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
{
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
if (!dn->handle)
continue;
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
}
}
static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
{
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
acpi_status status;
bool ret;
if (!dn->handle)
continue;
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
return false;
}
ret = acpi_tie_nondev_subnodes(&dn->data);
if (!ret)
return ret;
}
return true;
}
static void acpi_data_add_buffer_props(acpi_handle handle,
struct acpi_device_data *data,
union acpi_object *properties)
{
struct acpi_device_properties *props;
union acpi_object *package;
size_t alloc_size;
unsigned int i;
u32 *count;
if (check_mul_overflow((size_t)properties->package.count,
sizeof(*package) + sizeof(void *),
&alloc_size) ||
check_add_overflow(sizeof(*props) + sizeof(*package), alloc_size,
&alloc_size)) {
acpi_handle_warn(handle,
"can't allocate memory for %u buffer props",
properties->package.count);
return;
}
props = kvzalloc(alloc_size, GFP_KERNEL);
if (!props)
return;
props->guid = &buffer_prop_guid;
props->bufs = (void *)(props + 1);
props->properties = (void *)(props->bufs + properties->package.count);
/* Outer package */
package = props->properties;
package->type = ACPI_TYPE_PACKAGE;
package->package.elements = package + 1;
count = &package->package.count;
*count = 0;
/* Inner packages */
package++;
for (i = 0; i < properties->package.count; i++) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
union acpi_object *property = &properties->package.elements[i];
union acpi_object *prop, *obj, *buf_obj;
acpi_status status;
if (property->type != ACPI_TYPE_PACKAGE ||
property->package.count != 2) {
acpi_handle_warn(handle,
"buffer property %u has %u entries\n",
i, property->package.count);
continue;
}
prop = &property->package.elements[0];
obj = &property->package.elements[1];
if (prop->type != ACPI_TYPE_STRING ||
obj->type != ACPI_TYPE_STRING) {
acpi_handle_warn(handle,
"wrong object types %u and %u\n",
prop->type, obj->type);
continue;
}
status = acpi_evaluate_object_typed(handle, obj->string.pointer,
NULL, &buf,
ACPI_TYPE_BUFFER);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(handle,
"can't evaluate \"%*pE\" as buffer\n",
obj->string.length,
obj->string.pointer);
continue;
}
package->type = ACPI_TYPE_PACKAGE;
package->package.elements = prop;
package->package.count = 2;
buf_obj = buf.pointer;
/* Replace the string object with a buffer object */
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = buf_obj->buffer.length;
obj->buffer.pointer = buf_obj->buffer.pointer;
props->bufs[i] = buf.pointer;
package++;
(*count)++;
}
if (*count)
list_add(&props->list, &data->properties);
else
kvfree(props);
}
static bool acpi_extract_properties(acpi_handle scope, union acpi_object *desc,
struct acpi_device_data *data)
{
int i;
if (desc->package.count % 2)
return false;
/* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
const union acpi_object *guid;
union acpi_object *properties;
guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
/*
* The first element must be a GUID and the second one must be
* a package.
*/
if (guid->type != ACPI_TYPE_BUFFER ||
guid->buffer.length != 16 ||
properties->type != ACPI_TYPE_PACKAGE)
break;
if (guid_equal((guid_t *)guid->buffer.pointer,
&buffer_prop_guid)) {
acpi_data_add_buffer_props(scope, data, properties);
continue;
}
if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
/*
* We found the matching GUID. Now validate the format of the
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
continue;
acpi_data_add_props(data, (const guid_t *)guid->buffer.pointer,
properties);
}
return !list_empty(&data->properties);
}
void acpi_init_properties(struct acpi_device *adev)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
struct acpi_hardware_id *hwid;
acpi_status status;
bool acpi_of = false;
INIT_LIST_HEAD(&adev->data.properties);
INIT_LIST_HEAD(&adev->data.subnodes);
if (!adev->handle)
return;
/*
* Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in
* Device Tree compatible properties for this device.
*/
list_for_each_entry(hwid, &adev->pnp.ids, list) {
if (!strcmp(hwid->id, ACPI_DT_NAMESPACE_HID)) {
acpi_of = true;
break;
}
}
status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL, &buf,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
goto out;
if (acpi_extract_properties(adev->handle, buf.pointer, &adev->data)) {
adev->data.pointer = buf.pointer;
if (acpi_of)
acpi_init_of_compatible(adev);
}
if (acpi_enumerate_nondev_subnodes(adev->handle, buf.pointer,
&adev->data, acpi_fwnode_handle(adev)))
adev->data.pointer = buf.pointer;
if (!adev->data.pointer) {
acpi_handle_debug(adev->handle, "Invalid _DSD data, skipping\n");
ACPI_FREE(buf.pointer);
} else {
if (!acpi_tie_nondev_subnodes(&adev->data))
acpi_untie_nondev_subnodes(&adev->data);
}
out:
if (acpi_of && !adev->flags.of_compatible_ok)
acpi_handle_info(adev->handle,
ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n");
if (!adev->data.pointer)
acpi_extract_apple_properties(adev);
}
static void acpi_free_device_properties(struct list_head *list)
{
struct acpi_device_properties *props, *tmp;
list_for_each_entry_safe(props, tmp, list, list) {
u32 i;
list_del(&props->list);
/* Buffer data properties were separately allocated */
if (props->bufs)
for (i = 0; i < props->properties->package.count; i++)
ACPI_FREE(props->bufs[i]);
kvfree(props);
}
}
static void acpi_destroy_nondev_subnodes(struct list_head *list)
{
struct acpi_data_node *dn, *next;
if (list_empty(list))
return;
list_for_each_entry_safe_reverse(dn, next, list, sibling) {
acpi_destroy_nondev_subnodes(&dn->data.subnodes);
wait_for_completion(&dn->kobj_done);
list_del(&dn->sibling);
ACPI_FREE((void *)dn->data.pointer);
acpi_free_device_properties(&dn->data.properties);
kfree(dn);
}
}
void acpi_free_properties(struct acpi_device *adev)
{
acpi_untie_nondev_subnodes(&adev->data);
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
acpi_free_device_properties(&adev->data.properties);
}
/**
* acpi_data_get_property - return an ACPI property with given name
* @data: ACPI device deta object to get the property from
* @name: Name of the property
* @type: Expected property type
* @obj: Location to store the property value (if not %NULL)
*
* Look up a property with @name and store a pointer to the resulting ACPI
* object at the location pointed to by @obj if found.
*
* Callers must not attempt to free the returned objects. These objects will be
* freed by the ACPI core automatically during the removal of @data.
*
* Return: %0 if property with @name has been found (success),
* %-EINVAL if the arguments are invalid,
* %-EINVAL if the property doesn't exist,
* %-EPROTO if the property value type doesn't match @type.
*/
static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
const struct acpi_device_properties *props;
if (!data || !name)
return -EINVAL;
if (!data->pointer || list_empty(&data->properties))
return -EINVAL;
list_for_each_entry(props, &data->properties, list) {
const union acpi_object *properties;
unsigned int i;
properties = props->properties;
for (i = 0; i < properties->package.count; i++) {
const union acpi_object *propname, *propvalue;
const union acpi_object *property;
property = &properties->package.elements[i];
propname = &property->package.elements[0];
propvalue = &property->package.elements[1];
if (!strcmp(name, propname->string.pointer)) {
if (type != ACPI_TYPE_ANY &&
propvalue->type != type)
return -EPROTO;
if (obj)
*obj = propvalue;
return 0;
}
}
}
return -EINVAL;
}
/**
* acpi_dev_get_property - return an ACPI property with given name.
* @adev: ACPI device to get the property from.
* @name: Name of the property.
* @type: Expected property type.
* @obj: Location to store the property value (if not %NULL).
*/
int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj)
{
return adev ? acpi_data_get_property(&adev->data, name, type, obj) : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_dev_get_property);
static const struct acpi_device_data *
acpi_device_data_of_node(const struct fwnode_handle *fwnode)
{
if (is_acpi_device_node(fwnode)) {
const struct acpi_device *adev = to_acpi_device_node(fwnode);
return &adev->data;
}
if (is_acpi_data_node(fwnode)) {
const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return &dn->data;
}
return NULL;
}
/**
* acpi_node_prop_get - return an ACPI property with given name.
* @fwnode: Firmware node to get the property from.
* @propname: Name of the property.
* @valptr: Location to store a pointer to the property value (if not %NULL).
*/
int acpi_node_prop_get(const struct fwnode_handle *fwnode,
const char *propname, void **valptr)
{
return acpi_data_get_property(acpi_device_data_of_node(fwnode),
propname, ACPI_TYPE_ANY,
(const union acpi_object **)valptr);
}
/**
* acpi_data_get_property_array - return an ACPI array property with given name
* @data: ACPI data object to get the property from
* @name: Name of the property
* @type: Expected type of array elements
* @obj: Location to store a pointer to the property value (if not NULL)
*
* Look up an array property with @name and store a pointer to the resulting
* ACPI object at the location pointed to by @obj if found.
*
* Callers must not attempt to free the returned objects. Those objects will be
* freed by the ACPI core automatically during the removal of @data.
*
* Return: %0 if array property (package) with @name has been found (success),
* %-EINVAL if the arguments are invalid,
* %-EINVAL if the property doesn't exist,
* %-EPROTO if the property is not a package or the type of its elements
* doesn't match @type.
*/
static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj)
{
const union acpi_object *prop;
int ret, i;
ret = acpi_data_get_property(data, name, ACPI_TYPE_PACKAGE, &prop);
if (ret)
return ret;
if (type != ACPI_TYPE_ANY) {
/* Check that all elements are of correct type. */
for (i = 0; i < prop->package.count; i++)
if (prop->package.elements[i].type != type)
return -EPROTO;
}
if (obj)
*obj = prop;
return 0;
}
static struct fwnode_handle *
acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
struct fwnode_handle *child;
fwnode_for_each_child_node(fwnode, child) {
if (is_acpi_data_node(child)) {
if (acpi_data_node_match(child, childname))
return child;
continue;
}
if (!strncmp(acpi_device_bid(to_acpi_device_node(child)),
childname, ACPI_NAMESEG_SIZE))
return child;
}
return NULL;
}
static unsigned int acpi_fwnode_get_args_count(struct fwnode_handle *fwnode,
const char *nargs_prop)
{
const struct acpi_device_data *data;
const union acpi_object *obj;
int ret;
data = acpi_device_data_of_node(fwnode);
if (!data)
return 0;
ret = acpi_data_get_property(data, nargs_prop, ACPI_TYPE_INTEGER, &obj);
if (ret)
return 0;
return obj->integer.value;
}
static int acpi_get_ref_args(struct fwnode_reference_args *args,
struct fwnode_handle *ref_fwnode,
const char *nargs_prop,
const union acpi_object **element,
const union acpi_object *end, size_t num_args)
{
u32 nargs = 0, i;
if (nargs_prop)
num_args = acpi_fwnode_get_args_count(ref_fwnode, nargs_prop);
/*
* Assume the following integer elements are all args. Stop counting on
* the first reference (possibly represented as a string) or end of the
* package arguments. In case of neither reference, nor integer, return
* an error, we can't parse it.
*/
for (i = 0; (*element) + i < end && i < num_args; i++) {
acpi_object_type type = (*element)[i].type;
if (type == ACPI_TYPE_LOCAL_REFERENCE || type == ACPI_TYPE_STRING)
break;
if (type == ACPI_TYPE_INTEGER)
nargs++;
else
return -EINVAL;
}
if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;
if (args) {
args->fwnode = ref_fwnode;
args->nargs = nargs;
for (i = 0; i < nargs; i++)
args->args[i] = (*element)[i].integer.value;
}
(*element) += nargs;
return 0;
}
static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *fwnode,
const char *refstring)
{
acpi_handle scope, handle;
struct acpi_data_node *dn;
struct acpi_device *device;
acpi_status status;
if (is_acpi_device_node(fwnode)) {
scope = to_acpi_device_node(fwnode)->handle;
} else if (is_acpi_data_node(fwnode)) {
scope = to_acpi_data_node(fwnode)->handle;
} else {
pr_debug("Bad node type for node %pfw\n", fwnode);
return NULL;
}
status = acpi_get_handle(scope, refstring, &handle);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(scope, "Unable to get an ACPI handle for %s\n",
refstring);
return NULL;
}
device = acpi_fetch_acpi_dev(handle);
if (device)
return acpi_fwnode_handle(device);
status = acpi_get_data_full(handle, acpi_nondev_subnode_tag,
(void **)&dn, NULL);
if (ACPI_FAILURE(status) || !dn) {
acpi_handle_debug(handle, "Subnode not found\n");
return NULL;
}
return &dn->fwnode;
}
static int acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
const char *propname, const char *nargs_prop,
unsigned int args_count, unsigned int index,
struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
const struct acpi_device_data *data;
struct fwnode_handle *ref_fwnode;
struct acpi_device *device;
int ret, idx = 0;
data = acpi_device_data_of_node(fwnode);
if (!data)
return -ENOENT;
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
return ret == -EINVAL ? -ENOENT : -EINVAL;
switch (obj->type) {
case ACPI_TYPE_LOCAL_REFERENCE:
/* Plain single reference without arguments. */
if (index)
return -ENOENT;
device = acpi_fetch_acpi_dev(obj->reference.handle);
if (!device)
return -EINVAL;
if (!args)
return 0;
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
case ACPI_TYPE_STRING:
if (index)
return -ENOENT;
ref_fwnode = acpi_parse_string_ref(fwnode, obj->string.pointer);
if (!ref_fwnode)
return -EINVAL;
args->fwnode = ref_fwnode;
args->nargs = 0;
return 0;
case ACPI_TYPE_PACKAGE:
/*
* If it is not a single reference, then it is a package of
* references, followed by number of ints as follows:
*
* Package () { REF, INT, REF, INT, INT }
*
* Here, REF may be either a local reference or a string. The
* index argument is then used to determine which reference the
* caller wants (along with the arguments).
*/
break;
default:
return -EINVAL;
}
if (index >= obj->package.count)
return -ENOENT;
element = obj->package.elements;
end = element + obj->package.count;
while (element < end) {
switch (element->type) {
case ACPI_TYPE_LOCAL_REFERENCE:
device = acpi_fetch_acpi_dev(element->reference.handle);
if (!device)
return -EINVAL;
element++;
ret = acpi_get_ref_args(idx == index ? args : NULL,
acpi_fwnode_handle(device),
nargs_prop, &element, end,
args_count);
if (ret < 0)
return ret;
if (idx == index)
return 0;
break;
case ACPI_TYPE_STRING:
ref_fwnode = acpi_parse_string_ref(fwnode,
element->string.pointer);
if (!ref_fwnode)
return -EINVAL;
element++;
ret = acpi_get_ref_args(idx == index ? args : NULL,
ref_fwnode, nargs_prop, &element, end,
args_count);
if (ret < 0)
return ret;
if (idx == index)
return 0;
break;
case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
element++;
break;
default:
return -EINVAL;
}
idx++;
}
return -ENOENT;
}
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
* @propname: Name of the property
* @index: Index of the reference to return
* @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
* (may be NULL)
*
* Find property with @name, verifify that it is a package containing at least
* one object reference and if so, store the ACPI device object pointer to the
* target object in @args->adev. If the reference includes arguments, store
* them in the @args->args[] array.
*
* If there's more than one reference in the property value package, @index is
* used to select the one to return.
*
* It is possible to leave holes in the property value set like in the
* example below:
*
* Package () {
* "cs-gpios",
* Package () {
* ^GPIO, 19, 0, 0,
* ^GPIO, 20, 0, 0,
* 0,
* ^GPIO, 21, 0, 0,
* }
* }
*
* Calling this function with index %2 or index %3 return %-ENOENT. If the
* property does not contain any more values %-ENOENT is returned. The NULL
* entry must be single integer and preferably contain value %0.
*
* Return: %0 on success, negative error code on failure.
*/
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index,
size_t num_args,
struct fwnode_reference_args *args)
{
return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype, void *val)
{
const union acpi_object *obj;
int ret = 0;
if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
else if (proptype == DEV_PROP_STRING)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
if (ret)
return ret;
switch (proptype) {
case DEV_PROP_U8:
if (obj->integer.value > U8_MAX)
return -EOVERFLOW;
if (val)
*(u8 *)val = obj->integer.value;
break;
case DEV_PROP_U16:
if (obj->integer.value > U16_MAX)
return -EOVERFLOW;
if (val)
*(u16 *)val = obj->integer.value;
break;
case DEV_PROP_U32:
if (obj->integer.value > U32_MAX)
return -EOVERFLOW;
if (val)
*(u32 *)val = obj->integer.value;
break;
case DEV_PROP_U64:
if (val)
*(u64 *)val = obj->integer.value;
break;
case DEV_PROP_STRING:
if (val)
*(char **)val = obj->string.pointer;
return 1;
default:
return -EINVAL;
}
/* When no storage provided return number of available values */
return val ? 0 : 1;
}
#define acpi_copy_property_array_uint(items, val, nval) \
({ \
typeof(items) __items = items; \
typeof(val) __val = val; \
typeof(nval) __nval = nval; \
size_t i; \
int ret = 0; \
\
for (i = 0; i < __nval; i++) { \
if (__items->type == ACPI_TYPE_BUFFER) { \
__val[i] = __items->buffer.pointer[i]; \
continue; \
} \
if (__items[i].type != ACPI_TYPE_INTEGER) { \
ret = -EPROTO; \
break; \
} \
if (__items[i].integer.value > _Generic(__val, \
u8 *: U8_MAX, \
u16 *: U16_MAX, \
u32 *: U32_MAX, \
u64 *: U64_MAX)) { \
ret = -EOVERFLOW; \
break; \
} \
\
__val[i] = __items[i].integer.value; \
} \
ret; \
})
static int acpi_copy_property_array_string(const union acpi_object *items,
char **val, size_t nval)
{
int i;
for (i = 0; i < nval; i++) {
if (items[i].type != ACPI_TYPE_STRING)
return -EPROTO;
val[i] = items[i].string.pointer;
}
return nval;
}
static int acpi_data_prop_read(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
{
const union acpi_object *obj;
const union acpi_object *items;
int ret;
if (nval == 1 || !val) {
ret = acpi_data_prop_read_single(data, propname, proptype, val);
/*
* The overflow error means that the property is there and it is
* single-value, but its type does not match, so return.
*/
if (ret >= 0 || ret == -EOVERFLOW)
return ret;
/*
* Reading this property as a single-value one failed, but its
* value may still be represented as one-element array, so
* continue.
*/
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
if (ret && proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_BUFFER,
&obj);
if (ret)
return ret;
if (!val) {
if (obj->type == ACPI_TYPE_BUFFER)
return obj->buffer.length;
return obj->package.count;
}
switch (proptype) {
case DEV_PROP_STRING:
break;
default:
if (obj->type == ACPI_TYPE_BUFFER) {
if (nval > obj->buffer.length)
return -EOVERFLOW;
} else {
if (nval > obj->package.count)
return -EOVERFLOW;
}
break;
}
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
return -EPROTO;
items = obj;
} else {
items = obj->package.elements;
}
switch (proptype) {
case DEV_PROP_U8:
ret = acpi_copy_property_array_uint(items, (u8 *)val, nval);
break;
case DEV_PROP_U16:
ret = acpi_copy_property_array_uint(items, (u16 *)val, nval);
break;
case DEV_PROP_U32:
ret = acpi_copy_property_array_uint(items, (u32 *)val, nval);
break;
case DEV_PROP_U64:
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
nval = min_t(u32, nval, obj->package.count);
if (nval == 0)
return -ENODATA;
ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/**
* acpi_node_prop_read - retrieve the value of an ACPI property with given name.
* @fwnode: Firmware node to get the property from.
* @propname: Name of the property.
* @proptype: Expected property type.
* @val: Location to store the property value (if not %NULL).
* @nval: Size of the array pointed to by @val.
*
* If @val is %NULL, return the number of array elements comprising the value
* of the property. Otherwise, read at most @nval values to the array at the
* location pointed to by @val.
*/
static int acpi_node_prop_read(const struct fwnode_handle *fwnode,
const char *propname, enum dev_prop_type proptype,
void *val, size_t nval)
{
return acpi_data_prop_read(acpi_device_data_of_node(fwnode),
propname, proptype, val, nval);
}
static int stop_on_next(struct acpi_device *adev, void *data)
{
struct acpi_device **ret_p = data;
if (!*ret_p) {
*ret_p = adev;
return 1;
}
/* Skip until the "previous" object is found. */
if (*ret_p == adev)
*ret_p = NULL;
return 0;
}
/**
* acpi_get_next_subnode - Return the next child node handle for a fwnode
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
if ((!child || is_acpi_device_node(child)) && adev) {
struct acpi_device *child_adev = to_acpi_device_node(child);
acpi_dev_for_each_child(adev, stop_on_next, &child_adev);
if (child_adev)
return acpi_fwnode_handle(child_adev);
child = NULL;
}
if (!child || is_acpi_data_node(child)) {
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
const struct list_head *head;
struct list_head *next;
struct acpi_data_node *dn;
/*
* We can have a combination of device and data nodes, e.g. with
* hierarchical _DSD properties. Make sure the adev pointer is
* restored before going through data nodes, otherwise we will
* be looking for data_nodes below the last device found instead
* of the common fwnode shared by device_nodes and data_nodes.
*/
adev = to_acpi_device_node(fwnode);
if (adev)
head = &adev->data.subnodes;
else if (data)
head = &data->data.subnodes;
else
return NULL;
if (list_empty(head))
return NULL;
if (child) {
dn = to_acpi_data_node(child);
next = dn->sibling.next;
if (next == head)
return NULL;
dn = list_entry(next, struct acpi_data_node, sibling);
} else {
dn = list_first_entry(head, struct acpi_data_node, sibling);
}
return &dn->fwnode;
}
return NULL;
}
/*
* acpi_get_next_present_subnode - Return the next present child node handle
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*
* Like acpi_get_next_subnode(), but the device nodes returned by
* acpi_get_next_present_subnode() are guaranteed to be present.
*
* Returns: The fwnode handle of the next present sub-node.
*/
static struct fwnode_handle *
acpi_get_next_present_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
do {
child = acpi_get_next_subnode(fwnode, child);
} while (is_acpi_device_node(child) &&
!acpi_device_is_present(to_acpi_device_node(child)));
return child;
}
/**
* acpi_node_get_parent - Return parent fwnode of this fwnode
* @fwnode: Firmware node whose parent to get
*
* Returns parent node of an ACPI device or data firmware node or %NULL if
* not available.
*/
static struct fwnode_handle *
acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
return to_acpi_data_node(fwnode)->parent;
}
if (is_acpi_device_node(fwnode)) {
struct acpi_device *parent;
parent = acpi_dev_parent(to_acpi_device_node(fwnode));
if (parent)
return acpi_fwnode_handle(parent);
}
return NULL;
}
/*
* Return true if the node is an ACPI graph node. Called on either ports
* or endpoints.
*/
static bool is_acpi_graph_node(struct fwnode_handle *fwnode,
const char *str)
{
unsigned int len = strlen(str);
const char *name;
if (!len || !is_acpi_data_node(fwnode))
return false;
name = to_acpi_data_node(fwnode)->name;
return (fwnode_property_present(fwnode, "reg") &&
!strncmp(name, str, len) && name[len] == '@') ||
fwnode_property_present(fwnode, str);
}
/**
* acpi_graph_get_next_endpoint - Get next endpoint ACPI firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
* Looks up next endpoint ACPI firmware node below a given @fwnode. Returns
* %NULL if there is no next endpoint or in case of error. In case of success
* the next endpoint is returned.
*/
static struct fwnode_handle *acpi_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
struct fwnode_handle *endpoint;
if (!prev) {
do {
port = fwnode_get_next_child_node(fwnode, port);
/*
* The names of the port nodes begin with "port@"
* followed by the number of the port node and they also
* have a "reg" property that also has the number of the
* port node. For compatibility reasons a node is also
* recognised as a port node from the "port" property.
*/
if (is_acpi_graph_node(port, "port"))
break;
} while (port);
} else {
port = fwnode_get_parent(prev);
}
if (!port)
return NULL;
endpoint = fwnode_get_next_child_node(port, prev);
while (!endpoint) {
port = fwnode_get_next_child_node(fwnode, port);
if (!port)
break;
if (is_acpi_graph_node(port, "port"))
endpoint = fwnode_get_next_child_node(port, NULL);
}
/*
* The names of the endpoint nodes begin with "endpoint@" followed by
* the number of the endpoint node and they also have a "reg" property
* that also has the number of the endpoint node. For compatibility
* reasons a node is also recognised as an endpoint node from the
* "endpoint" property.
*/
if (!is_acpi_graph_node(endpoint, "endpoint"))
return NULL;
return endpoint;
}
/**
* acpi_graph_get_child_prop_value - Return a child with a given property value
* @fwnode: device fwnode
* @prop_name: The name of the property to look for
* @val: the desired property value
*
* Return the port node corresponding to a given port number. Returns
* the child node on success, NULL otherwise.
*/
static struct fwnode_handle *acpi_graph_get_child_prop_value(
const struct fwnode_handle *fwnode, const char *prop_name,
unsigned int val)
{
struct fwnode_handle *child;
fwnode_for_each_child_node(fwnode, child) {
u32 nr;
if (fwnode_property_read_u32(child, prop_name, &nr))
continue;
if (val == nr)
return child;
}
return NULL;
}
/**
* acpi_graph_get_remote_endpoint - Parses and returns remote end of an endpoint
* @__fwnode: Endpoint firmware node pointing to a remote device
*
* Returns the remote endpoint corresponding to @__fwnode. NULL on error.
*/
static struct fwnode_handle *
acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
{
struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
struct fwnode_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
return NULL;
/* Direct endpoint reference? */
if (!is_acpi_device_node(args.fwnode))
return args.nargs ? NULL : args.fwnode;
/*
* Always require two arguments with the reference: port and
* endpoint indices.
*/
if (args.nargs != 2)
return NULL;
fwnode = args.fwnode;
port_nr = args.args[0];
endpoint_nr = args.args[1];
fwnode = acpi_graph_get_child_prop_value(fwnode, "port", port_nr);
return acpi_graph_get_child_prop_value(fwnode, "endpoint", endpoint_nr);
}
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
return true;
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
static const void *
acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
const struct device *dev)
{
return acpi_device_get_match_data(dev);
}
static bool acpi_fwnode_device_dma_supported(const struct fwnode_handle *fwnode)
{
return acpi_dma_supported(to_acpi_device_node(fwnode));
}
static enum dev_dma_attr
acpi_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode)
{
return acpi_get_dma_attr(to_acpi_device_node(fwnode));
}
static bool acpi_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !acpi_node_prop_get(fwnode, propname, NULL);
}
static int
acpi_fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
enum dev_prop_type type;
switch (elem_size) {
case sizeof(u8):
type = DEV_PROP_U8;
break;
case sizeof(u16):
type = DEV_PROP_U16;
break;
case sizeof(u32):
type = DEV_PROP_U32;
break;
case sizeof(u64):
type = DEV_PROP_U64;
break;
default:
return -ENXIO;
}
return acpi_node_prop_read(fwnode, propname, type, val, nval);
}
static int
acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
val, nval);
}
static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
{
const struct acpi_device *adev;
struct fwnode_handle *parent;
/* Is this the root node? */
parent = fwnode_get_parent(fwnode);
if (!parent)
return "\\";
fwnode_handle_put(parent);
if (is_acpi_data_node(fwnode)) {
const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return dn->name;
}
adev = to_acpi_device_node(fwnode);
if (WARN_ON(!adev))
return NULL;
return acpi_device_bid(adev);
}
static const char *
acpi_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
/* Is this the root node? */
parent = fwnode_get_parent(fwnode);
if (!parent)
return "";
/* Is this 2nd node from the root? */
parent = fwnode_get_next_parent(parent);
if (!parent)
return "";
fwnode_handle_put(parent);
/* ACPI device or data node. */
return ".";
}
static struct fwnode_handle *
acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
{
return acpi_node_get_parent(fwnode);
}
static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode);
endpoint->local_fwnode = fwnode;
if (fwnode_property_read_u32(port_fwnode, "reg", &endpoint->port))
fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
return 0;
}
static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
unsigned int index)
{
struct resource res;
int ret;
ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
if (ret)
return ret;
return res.start;
}
#define DECLARE_ACPI_FWNODE_OPS(ops) \
const struct fwnode_operations ops = { \
.device_is_available = acpi_fwnode_device_is_available, \
.device_get_match_data = acpi_fwnode_device_get_match_data, \
.device_dma_supported = \
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
.property_read_bool = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
acpi_fwnode_property_read_string_array, \
.get_parent = acpi_node_get_parent, \
.get_next_child_node = acpi_get_next_present_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_name = acpi_fwnode_get_name, \
.get_name_prefix = acpi_fwnode_get_name_prefix, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
acpi_graph_get_next_endpoint, \
.graph_get_remote_endpoint = \
acpi_graph_get_remote_endpoint, \
.graph_get_port_parent = acpi_fwnode_get_parent, \
.graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
.irq_get = acpi_fwnode_irq_get, \
}; \
EXPORT_SYMBOL_GPL(ops)
DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
const struct fwnode_operations acpi_static_fwnode_ops;
bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) &&
fwnode->ops == &acpi_device_fwnode_ops;
}
EXPORT_SYMBOL(is_acpi_device_node);
bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
}
EXPORT_SYMBOL(is_acpi_data_node);
// SPDX-License-Identifier: GPL-2.0-only
/*
* umh - the kernel usermode helper
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/binfmts.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/cred.h>
#include <linux/file.h>
#include <linux/fs_struct.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/rwsem.h>
#include <linux/ptrace.h>
#include <linux/async.h>
#include <linux/uaccess.h>
#include <linux/initrd.h>
#include <linux/freezer.h>
#include <trace/events/module.h>
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
static DECLARE_RWSEM(umhelper_sem);
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
if (info->cleanup)
(*info->cleanup)(info);
kfree(info);}
static void umh_complete(struct subprocess_info *sub_info)
{
struct completion *comp = xchg(&sub_info->complete, NULL);
/*
* See call_usermodehelper_exec(). If xchg() returns NULL
* we own sub_info, the UMH_KILLABLE caller has gone away
* or the caller used UMH_NO_WAIT.
*/
if (comp)
complete(comp);
else
call_usermodehelper_freeinfo(sub_info);
}
/*
* This is the task which runs the usermode application
*/
static int call_usermodehelper_exec_async(void *data)
{
struct subprocess_info *sub_info = data;
struct cred *new;
int retval;
spin_lock_irq(¤t->sighand->siglock);
flush_signal_handlers(current, 1);
spin_unlock_irq(¤t->sighand->siglock);
/*
* Initial kernel threads share ther FS with init, in order to
* get the init root directory. But we've now created a new
* thread that is going to execve a user process and has its own
* 'struct fs_struct'. Reset umask to the default.
*/
current->fs->umask = 0022;
/*
* Our parent (unbound workqueue) runs with elevated scheduling
* priority. Avoid propagating that into the userspace child.
*/
set_user_nice(current, 0);
retval = -ENOMEM;
new = prepare_kernel_cred(current);
if (!new)
goto out;
spin_lock(&umh_sysctl_lock);
new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
new->cap_inheritable);
spin_unlock(&umh_sysctl_lock);
if (sub_info->init) {
retval = sub_info->init(sub_info, new);
if (retval) {
abort_creds(new);
goto out;
}
}
commit_creds(new);
wait_for_initramfs();
retval = kernel_execve(sub_info->path,
(const char *const *)sub_info->argv,
(const char *const *)sub_info->envp);
out:
sub_info->retval = retval;
/*
* call_usermodehelper_exec_sync() will call umh_complete
* if UHM_WAIT_PROC.
*/
if (!(sub_info->wait & UMH_WAIT_PROC))
umh_complete(sub_info);
if (!retval)
return 0;
do_exit(0);
}
/* Handles UMH_WAIT_PROC. */
static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
{
pid_t pid;
/* If SIGCLD is ignored do_wait won't populate the status. */
kernel_sigaction(SIGCHLD, SIG_DFL);
pid = user_mode_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
if (pid < 0)
sub_info->retval = pid;
else
kernel_wait(pid, &sub_info->retval);
/* Restore default kernel sig handler */
kernel_sigaction(SIGCHLD, SIG_IGN);
umh_complete(sub_info);
}
/*
* We need to create the usermodehelper kernel thread from a task that is affine
* to an optimized set of CPUs (or nohz housekeeping ones) such that they
* inherit a widest affinity irrespective of call_usermodehelper() callers with
* possibly reduced affinity (eg: per-cpu workqueues). We don't want
* usermodehelper targets to contend a busy CPU.
*
* Unbound workqueues provide such wide affinity and allow to block on
* UMH_WAIT_PROC requests without blocking pending request (up to some limit).
*
* Besides, workqueues provide the privilege level that caller might not have
* to perform the usermodehelper request.
*
*/
static void call_usermodehelper_exec_work(struct work_struct *work)
{
struct subprocess_info *sub_info =
container_of(work, struct subprocess_info, work);
if (sub_info->wait & UMH_WAIT_PROC) {
call_usermodehelper_exec_sync(sub_info);
} else {
pid_t pid;
/*
* Use CLONE_PARENT to reparent it to kthreadd; we do not
* want to pollute current->children, and we need a parent
* that always ignores SIGCHLD to ensure auto-reaping.
*/
pid = user_mode_thread(call_usermodehelper_exec_async, sub_info,
CLONE_PARENT | SIGCHLD);
if (pid < 0) {
sub_info->retval = pid;
umh_complete(sub_info);
}
}
}
/*
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
* (used for preventing user land processes from being created after the user
* land has been frozen during a system-wide hibernation or suspend operation).
* Should always be manipulated under umhelper_sem acquired for write.
*/
static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
/* Number of helpers running */
static atomic_t running_helpers = ATOMIC_INIT(0);
/*
* Wait queue head used by usermodehelper_disable() to wait for all running
* helpers to finish.
*/
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
/*
* Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
* to become 'false'.
*/
static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
/*
* Time to wait for running_helpers to become zero before the setting of
* usermodehelper_disabled in usermodehelper_disable() fails
*/
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
int usermodehelper_read_trylock(void)
{
DEFINE_WAIT(wait);
int ret = 0;
down_read(&umhelper_sem);
for (;;) {
prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
TASK_INTERRUPTIBLE);
if (!usermodehelper_disabled)
break;
if (usermodehelper_disabled == UMH_DISABLED)
ret = -EAGAIN;
up_read(&umhelper_sem);
if (ret)
break;
schedule();
try_to_freeze();
down_read(&umhelper_sem);
}
finish_wait(&usermodehelper_disabled_waitq, &wait);
return ret;
}
EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
long usermodehelper_read_lock_wait(long timeout)
{
DEFINE_WAIT(wait);
if (timeout < 0)
return -EINVAL;
down_read(&umhelper_sem);
for (;;) {
prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
TASK_UNINTERRUPTIBLE);
if (!usermodehelper_disabled)
break;
up_read(&umhelper_sem);
timeout = schedule_timeout(timeout);
if (!timeout)
break;
down_read(&umhelper_sem);
}
finish_wait(&usermodehelper_disabled_waitq, &wait);
return timeout;
}
EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
void usermodehelper_read_unlock(void)
{
up_read(&umhelper_sem);
}
EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
/**
* __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
* @depth: New value to assign to usermodehelper_disabled.
*
* Change the value of usermodehelper_disabled (under umhelper_sem locked for
* writing) and wakeup tasks waiting for it to change.
*/
void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
{
down_write(&umhelper_sem);
usermodehelper_disabled = depth;
wake_up(&usermodehelper_disabled_waitq);
up_write(&umhelper_sem);
}
/**
* __usermodehelper_disable - Prevent new helpers from being started.
* @depth: New value to assign to usermodehelper_disabled.
*
* Set usermodehelper_disabled to @depth and wait for running helpers to exit.
*/
int __usermodehelper_disable(enum umh_disable_depth depth)
{
long retval;
if (!depth)
return -EINVAL;
down_write(&umhelper_sem);
usermodehelper_disabled = depth;
up_write(&umhelper_sem);
/*
* From now on call_usermodehelper_exec() won't start any new
* helpers, so it is sufficient if running_helpers turns out to
* be zero at one point (it may be increased later, but that
* doesn't matter).
*/
retval = wait_event_timeout(running_helpers_waitq,
atomic_read(&running_helpers) == 0,
RUNNING_HELPERS_TIMEOUT);
if (retval)
return 0;
__usermodehelper_set_disable_depth(UMH_ENABLED);
return -EAGAIN;
}
static void helper_lock(void)
{
atomic_inc(&running_helpers);
smp_mb__after_atomic();
}
static void helper_unlock(void)
{
if (atomic_dec_and_test(&running_helpers))
wake_up(&running_helpers_waitq);
}
/**
* call_usermodehelper_setup - prepare to call a usermode helper
* @path: path to usermode executable
* @argv: arg vector for process
* @envp: environment for process
* @gfp_mask: gfp mask for memory allocation
* @init: an init function
* @cleanup: a cleanup function
* @data: arbitrary context sensitive data
*
* Returns either %NULL on allocation failure, or a subprocess_info
* structure. This should be passed to call_usermodehelper_exec to
* exec the process and free the structure.
*
* The init function is used to customize the helper process prior to
* exec. A non-zero return code causes the process to error out, exit,
* and return the failure to the calling process
*
* The cleanup function is just before the subprocess_info is about to
* be freed. This can be used for freeing the argv and envp. The
* Function must be runnable in either a process context or the
* context in which call_usermodehelper_exec is called.
*/
struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
char **envp, gfp_t gfp_mask,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *info),
void *data)
{
struct subprocess_info *sub_info;
sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); if (!sub_info) goto out;
INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
#ifdef CONFIG_STATIC_USERMODEHELPER
sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
#else
sub_info->path = path;
#endif
sub_info->argv = argv;
sub_info->envp = envp;
sub_info->cleanup = cleanup;
sub_info->init = init;
sub_info->data = data;
out:
return sub_info;
}
EXPORT_SYMBOL(call_usermodehelper_setup);
/**
* call_usermodehelper_exec - start a usermode application
* @sub_info: information about the subprocess
* @wait: wait for the application to finish and return status.
* when UMH_NO_WAIT don't wait at all, but you get no useful error back
* when the program couldn't be exec'ed. This makes it safe to call
* from interrupt context.
*
* Runs a user-space application. The application is started
* asynchronously if wait is not set, and runs as a child of system workqueues.
* (ie. it runs with full root capabilities and optimized affinity).
*
* Note: successful return value does not guarantee the helper was called at
* all. You can't rely on sub_info->{init,cleanup} being called even for
* UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
* into a successful no-op.
*/
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{
unsigned int state = TASK_UNINTERRUPTIBLE;
DECLARE_COMPLETION_ONSTACK(done);
int retval = 0;
if (!sub_info->path) {
call_usermodehelper_freeinfo(sub_info);
return -EINVAL;
}
helper_lock();
if (usermodehelper_disabled) {
retval = -EBUSY;
goto out;
}
/*
* If there is no binary for us to call, then just return and get out of
* here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and
* disable all call_usermodehelper() calls.
*/
if (strlen(sub_info->path) == 0)
goto out;
/*
* Set the completion pointer only if there is a waiter.
* This makes it possible to use umh_complete to free
* the data structure in case of UMH_NO_WAIT.
*/
sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; sub_info->wait = wait; queue_work(system_unbound_wq, &sub_info->work);
if (wait == UMH_NO_WAIT) /* task has freed sub_info */
goto unlock; if (wait & UMH_FREEZABLE) state |= TASK_FREEZABLE; if (wait & UMH_KILLABLE) {
retval = wait_for_completion_state(&done, state | TASK_KILLABLE);
if (!retval)
goto wait_done;
/* umh_complete() will see NULL and free sub_info */
if (xchg(&sub_info->complete, NULL)) goto unlock;
/*
* fallthrough; in case of -ERESTARTSYS now do uninterruptible
* wait_for_completion_state(). Since umh_complete() shall call
* complete() in a moment if xchg() above returned NULL, this
* uninterruptible wait_for_completion_state() will not block
* SIGKILL'ed processes for long.
*/
}
wait_for_completion_state(&done, state);
wait_done:
retval = sub_info->retval;
out:
call_usermodehelper_freeinfo(sub_info);
unlock:
helper_unlock(); return retval;
}
EXPORT_SYMBOL(call_usermodehelper_exec);
/**
* call_usermodehelper() - prepare and start a usermode application
* @path: path to usermode executable
* @argv: arg vector for process
* @envp: environment for process
* @wait: wait for the application to finish and return status.
* when UMH_NO_WAIT don't wait at all, but you get no useful error back
* when the program couldn't be exec'ed. This makes it safe to call
* from interrupt context.
*
* This function is the equivalent to use call_usermodehelper_setup() and
* call_usermodehelper_exec().
*/
int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
{
struct subprocess_info *info;
gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
NULL, NULL, NULL);
if (info == NULL)
return -ENOMEM;
return call_usermodehelper_exec(info, wait);
}
EXPORT_SYMBOL(call_usermodehelper);
#if defined(CONFIG_SYSCTL)
static int proc_cap_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
unsigned long cap_array[2];
kernel_cap_t new_cap, *cap;
int err;
if (write && (!capable(CAP_SETPCAP) ||
!capable(CAP_SYS_MODULE)))
return -EPERM;
/*
* convert from the global kernel_cap_t to the ulong array to print to
* userspace if this is a read.
*
* Legacy format: capabilities are exposed as two 32-bit values
*/
cap = table->data;
spin_lock(&umh_sysctl_lock);
cap_array[0] = (u32) cap->val;
cap_array[1] = cap->val >> 32;
spin_unlock(&umh_sysctl_lock);
t = *table;
t.data = &cap_array;
/*
* actually read or write and array of ulongs from userspace. Remember
* these are least significant 32 bits first
*/
err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
new_cap.val = (u32)cap_array[0];
new_cap.val += (u64)cap_array[1] << 32;
/*
* Drop everything not in the new_cap (but don't add things)
*/
if (write) {
spin_lock(&umh_sysctl_lock);
*cap = cap_intersect(*cap, new_cap);
spin_unlock(&umh_sysctl_lock);
}
return 0;
}
static const struct ctl_table usermodehelper_table[] = {
{
.procname = "bset",
.data = &usermodehelper_bset,
.maxlen = 2 * sizeof(unsigned long),
.mode = 0600,
.proc_handler = proc_cap_handler,
},
{
.procname = "inheritable",
.data = &usermodehelper_inheritable,
.maxlen = 2 * sizeof(unsigned long),
.mode = 0600,
.proc_handler = proc_cap_handler,
},
};
static int __init init_umh_sysctls(void)
{
register_sysctl_init("kernel/usermodehelper", usermodehelper_table);
return 0;
}
early_initcall(init_umh_sysctls);
#endif /* CONFIG_SYSCTL */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/drivers/char/serial_core.h
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
#ifndef LINUX_SERIAL_CORE_H
#define LINUX_SERIAL_CORE_H
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/lockdep.h>
#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/mutex.h>
#include <linux/sysrq.h>
#include <uapi/linux/serial_core.h>
#ifdef CONFIG_SERIAL_CORE_CONSOLE
#define uart_console(port) \
((port)->cons && (port)->cons->index == (port)->line)
#else
#define uart_console(port) ({ (void)port; 0; })
#endif
struct uart_port;
struct serial_struct;
struct serial_port_device;
struct device;
struct gpio_desc;
/**
* struct uart_ops -- interface between serial_core and the driver
*
* This structure describes all the operations that can be done on the
* physical hardware.
*
* @tx_empty: ``unsigned int ()(struct uart_port *port)``
*
* This function tests whether the transmitter fifo and shifter for the
* @port is empty. If it is empty, this function should return
* %TIOCSER_TEMT, otherwise return 0. If the port does not support this
* operation, then it should return %TIOCSER_TEMT.
*
* Locking: none.
* Interrupts: caller dependent.
* This call must not sleep
*
* @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)``
*
* This function sets the modem control lines for @port to the state
* described by @mctrl. The relevant bits of @mctrl are:
*
* - %TIOCM_RTS RTS signal.
* - %TIOCM_DTR DTR signal.
* - %TIOCM_OUT1 OUT1 signal.
* - %TIOCM_OUT2 OUT2 signal.
* - %TIOCM_LOOP Set the port into loopback mode.
*
* If the appropriate bit is set, the signal should be driven
* active. If the bit is clear, the signal should be driven
* inactive.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @get_mctrl: ``unsigned int ()(struct uart_port *port)``
*
* Returns the current state of modem control inputs of @port. The state
* of the outputs should not be returned, since the core keeps track of
* their state. The state information should include:
*
* - %TIOCM_CAR state of DCD signal
* - %TIOCM_CTS state of CTS signal
* - %TIOCM_DSR state of DSR signal
* - %TIOCM_RI state of RI signal
*
* The bit is set if the signal is currently driven active. If
* the port does not support CTS, DCD or DSR, the driver should
* indicate that the signal is permanently active. If RI is
* not available, the signal should not be indicated as active.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @stop_tx: ``void ()(struct uart_port *port)``
*
* Stop transmitting characters. This might be due to the CTS line
* becoming inactive or the tty layer indicating we want to stop
* transmission due to an %XOFF character.
*
* The driver should stop transmitting characters as soon as possible.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @start_tx: ``void ()(struct uart_port *port)``
*
* Start transmitting characters.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @throttle: ``void ()(struct uart_port *port)``
*
* Notify the serial driver that input buffers for the line discipline are
* close to full, and it should somehow signal that no more characters
* should be sent to the serial port.
* This will be called only if hardware assisted flow control is enabled.
*
* Locking: serialized with @unthrottle() and termios modification by the
* tty layer.
*
* @unthrottle: ``void ()(struct uart_port *port)``
*
* Notify the serial driver that characters can now be sent to the serial
* port without fear of overrunning the input buffers of the line
* disciplines.
*
* This will be called only if hardware assisted flow control is enabled.
*
* Locking: serialized with @throttle() and termios modification by the
* tty layer.
*
* @send_xchar: ``void ()(struct uart_port *port, char ch)``
*
* Transmit a high priority character, even if the port is stopped. This
* is used to implement XON/XOFF flow control and tcflow(). If the serial
* driver does not implement this function, the tty core will append the
* character to the circular buffer and then call start_tx() / stop_tx()
* to flush the data out.
*
* Do not transmit if @ch == '\0' (%__DISABLED_CHAR).
*
* Locking: none.
* Interrupts: caller dependent.
*
* @start_rx: ``void ()(struct uart_port *port)``
*
* Start receiving characters.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @stop_rx: ``void ()(struct uart_port *port)``
*
* Stop receiving characters; the @port is in the process of being closed.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @enable_ms: ``void ()(struct uart_port *port)``
*
* Enable the modem status interrupts.
*
* This method may be called multiple times. Modem status interrupts
* should be disabled when the @shutdown() method is called.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @break_ctl: ``void ()(struct uart_port *port, int ctl)``
*
* Control the transmission of a break signal. If @ctl is nonzero, the
* break signal should be transmitted. The signal should be terminated
* when another call is made with a zero @ctl.
*
* Locking: caller holds tty_port->mutex
*
* @startup: ``int ()(struct uart_port *port)``
*
* Grab any interrupt resources and initialise any low level driver state.
* Enable the port for reception. It should not activate RTS nor DTR;
* this will be done via a separate call to @set_mctrl().
*
* This method will only be called when the port is initially opened.
*
* Locking: port_sem taken.
* Interrupts: globally disabled.
*
* @shutdown: ``void ()(struct uart_port *port)``
*
* Disable the @port, disable any break condition that may be in effect,
* and free any interrupt resources. It should not disable RTS nor DTR;
* this will have already been done via a separate call to @set_mctrl().
*
* Drivers must not access @port->state once this call has completed.
*
* This method will only be called when there are no more users of this
* @port.
*
* Locking: port_sem taken.
* Interrupts: caller dependent.
*
* @flush_buffer: ``void ()(struct uart_port *port)``
*
* Flush any write buffers, reset any DMA state and stop any ongoing DMA
* transfers.
*
* This will be called whenever the @port->state->xmit circular buffer is
* cleared.
*
* Locking: @port->lock taken.
* Interrupts: locally disabled.
* This call must not sleep
*
* @set_termios: ``void ()(struct uart_port *port, struct ktermios *new,
* struct ktermios *old)``
*
* Change the @port parameters, including word length, parity, stop bits.
* Update @port->read_status_mask and @port->ignore_status_mask to
* indicate the types of events we are interested in receiving. Relevant
* ktermios::c_cflag bits are:
*
* - %CSIZE - word size
* - %CSTOPB - 2 stop bits
* - %PARENB - parity enable
* - %PARODD - odd parity (when %PARENB is in force)
* - %ADDRB - address bit (changed through uart_port::rs485_config()).
* - %CREAD - enable reception of characters (if not set, still receive
* characters from the port, but throw them away).
* - %CRTSCTS - if set, enable CTS status change reporting.
* - %CLOCAL - if not set, enable modem status change reporting.
*
* Relevant ktermios::c_iflag bits are:
*
* - %INPCK - enable frame and parity error events to be passed to the TTY
* layer.
* - %BRKINT / %PARMRK - both of these enable break events to be passed to
* the TTY layer.
* - %IGNPAR - ignore parity and framing errors.
* - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun
* errors as well.
*
* The interaction of the ktermios::c_iflag bits is as follows (parity
* error given as an example):
*
* ============ ======= ======= =========================================
* Parity error INPCK IGNPAR
* ============ ======= ======= =========================================
* n/a 0 n/a character received, marked as %TTY_NORMAL
* None 1 n/a character received, marked as %TTY_NORMAL
* Yes 1 0 character received, marked as %TTY_PARITY
* Yes 1 1 character discarded
* ============ ======= ======= =========================================
*
* Other flags may be used (eg, xon/xoff characters) if your hardware
* supports hardware "soft" flow control.
*
* Locking: caller holds tty_port->mutex
* Interrupts: caller dependent.
* This call must not sleep
*
* @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)``
*
* Notifier for discipline change. See
* Documentation/driver-api/tty/tty_ldisc.rst.
*
* Locking: caller holds tty_port->mutex
*
* @pm: ``void ()(struct uart_port *port, unsigned int state,
* unsigned int oldstate)``
*
* Perform any power management related activities on the specified @port.
* @state indicates the new state (defined by enum uart_pm_state),
* @oldstate indicates the previous state.
*
* This function should not be used to grab any resources.
*
* This will be called when the @port is initially opened and finally
* closed, except when the @port is also the system console. This will
* occur even if %CONFIG_PM is not set.
*
* Locking: none.
* Interrupts: caller dependent.
*
* @type: ``const char *()(struct uart_port *port)``
*
* Return a pointer to a string constant describing the specified @port,
* or return %NULL, in which case the string 'unknown' is substituted.
*
* Locking: none.
* Interrupts: caller dependent.
*
* @release_port: ``void ()(struct uart_port *port)``
*
* Release any memory and IO region resources currently in use by the
* @port.
*
* Locking: none.
* Interrupts: caller dependent.
*
* @request_port: ``int ()(struct uart_port *port)``
*
* Request any memory and IO region resources required by the port. If any
* fail, no resources should be registered when this function returns, and
* it should return -%EBUSY on failure.
*
* Locking: none.
* Interrupts: caller dependent.
*
* @config_port: ``void ()(struct uart_port *port, int type)``
*
* Perform any autoconfiguration steps required for the @port. @type
* contains a bit mask of the required configuration. %UART_CONFIG_TYPE
* indicates that the port requires detection and identification.
* @port->type should be set to the type found, or %PORT_UNKNOWN if no
* port was detected.
*
* %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal,
* which should be probed using standard kernel autoprobing techniques.
* This is not necessary on platforms where ports have interrupts
* internally hard wired (eg, system on a chip implementations).
*
* Locking: none.
* Interrupts: caller dependent.
*
* @verify_port: ``int ()(struct uart_port *port,
* struct serial_struct *serinfo)``
*
* Verify the new serial port information contained within @serinfo is
* suitable for this port type.
*
* Locking: none.
* Interrupts: caller dependent.
*
* @ioctl: ``int ()(struct uart_port *port, unsigned int cmd,
* unsigned long arg)``
*
* Perform any port specific IOCTLs. IOCTL commands must be defined using
* the standard numbering system found in <asm/ioctl.h>.
*
* Locking: none.
* Interrupts: caller dependent.
*
* @poll_init: ``int ()(struct uart_port *port)``
*
* Called by kgdb to perform the minimal hardware initialization needed to
* support @poll_put_char() and @poll_get_char(). Unlike @startup(), this
* should not request interrupts.
*
* Locking: %tty_mutex and tty_port->mutex taken.
* Interrupts: n/a.
*
* @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)``
*
* Called by kgdb to write a single character @ch directly to the serial
* @port. It can and should block until there is space in the TX FIFO.
*
* Locking: none.
* Interrupts: caller dependent.
* This call must not sleep
*
* @poll_get_char: ``int ()(struct uart_port *port)``
*
* Called by kgdb to read a single character directly from the serial
* port. If data is available, it should be returned; otherwise the
* function should return %NO_POLL_CHAR immediately.
*
* Locking: none.
* Interrupts: caller dependent.
* This call must not sleep
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
void (*set_mctrl)(struct uart_port *, unsigned int mctrl);
unsigned int (*get_mctrl)(struct uart_port *);
void (*stop_tx)(struct uart_port *);
void (*start_tx)(struct uart_port *);
void (*throttle)(struct uart_port *);
void (*unthrottle)(struct uart_port *);
void (*send_xchar)(struct uart_port *, char ch);
void (*stop_rx)(struct uart_port *);
void (*start_rx)(struct uart_port *);
void (*enable_ms)(struct uart_port *);
void (*break_ctl)(struct uart_port *, int ctl);
int (*startup)(struct uart_port *);
void (*shutdown)(struct uart_port *);
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
const struct ktermios *old);
void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
const char *(*type)(struct uart_port *);
void (*release_port)(struct uart_port *);
int (*request_port)(struct uart_port *);
void (*config_port)(struct uart_port *, int);
int (*verify_port)(struct uart_port *, struct serial_struct *);
int (*ioctl)(struct uart_port *, unsigned int, unsigned long);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct uart_port *);
void (*poll_put_char)(struct uart_port *, unsigned char);
int (*poll_get_char)(struct uart_port *);
#endif
};
#define NO_POLL_CHAR 0x00ff0000
#define UART_CONFIG_TYPE (1 << 0)
#define UART_CONFIG_IRQ (1 << 1)
struct uart_icount {
__u32 cts;
__u32 dsr;
__u32 rng;
__u32 dcd;
__u32 rx;
__u32 tx;
__u32 frame;
__u32 overrun;
__u32 parity;
__u32 brk;
__u32 buf_overrun;
};
typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
enum uart_iotype {
UPIO_UNKNOWN = -1,
UPIO_PORT = SERIAL_IO_PORT, /* 8b I/O port access */
UPIO_HUB6 = SERIAL_IO_HUB6, /* Hub6 ISA card */
UPIO_MEM = SERIAL_IO_MEM, /* driver-specific */
UPIO_MEM32 = SERIAL_IO_MEM32, /* 32b little endian */
UPIO_AU = SERIAL_IO_AU, /* Au1x00 and RT288x type IO */
UPIO_TSI = SERIAL_IO_TSI, /* Tsi108/109 type IO */
UPIO_MEM32BE = SERIAL_IO_MEM32BE, /* 32b big endian */
UPIO_MEM16 = SERIAL_IO_MEM16, /* 16b little endian */
};
struct uart_port {
spinlock_t lock; /* port lock */
unsigned long iobase; /* in/out[bwl] */
unsigned char __iomem *membase; /* read/write[bwl] */
u32 (*serial_in)(struct uart_port *, unsigned int offset);
void (*serial_out)(struct uart_port *, unsigned int offset, u32 val);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
void (*set_mctrl)(struct uart_port *, unsigned int);
unsigned int (*get_divisor)(struct uart_port *,
unsigned int baud,
unsigned int *frac);
void (*set_divisor)(struct uart_port *,
unsigned int baud,
unsigned int quot,
unsigned int quot_frac);
int (*startup)(struct uart_port *port);
void (*shutdown)(struct uart_port *port);
void (*throttle)(struct uart_port *port);
void (*unthrottle)(struct uart_port *port);
int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int old);
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
struct ktermios *termios,
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
unsigned int ctrl_id; /* optional serial core controller id */
unsigned int port_id; /* optional serial core port id */
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
unsigned int fifosize; /* tx fifo size */
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
unsigned char quirks; /* internal quirks */
/* internal quirks must be updated while holding port mutex */
#define UPQ_NO_TXEN_TEST BIT(0)
enum uart_iotype iotype; /* io access style */
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
struct uart_state *state; /* pointer to parent state */
struct uart_icount icount; /* statistics */
struct console *cons; /* struct console, if any */
/* flags must be updated while holding port mutex */
upf_t flags;
/*
* These flags must be equivalent to the flags defined in
* include/uapi/linux/tty_flags.h which are the userspace definitions
* assigned from the serial_struct flags in uart_set_info()
* [for bit definitions in the UPF_CHANGE_MASK]
*
* Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
#ifdef CONFIG_HAS_IOPORT
#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
#else
#define UPF_FOURPORT 0
#endif
#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
#define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ )
#define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ )
#define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ )
#define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ )
#define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ )
#define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ )
#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
#define UPF_NO_THRE_TEST ((__force upf_t) BIT_ULL(19))
/* Port has hardware-assisted h/w flow control */
#define UPF_AUTO_CTS ((__force upf_t) BIT_ULL(20))
#define UPF_AUTO_RTS ((__force upf_t) BIT_ULL(21))
#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
#define UPF_SOFT_FLOW ((__force upf_t) BIT_ULL(22))
#define UPF_CONS_FLOW ((__force upf_t) BIT_ULL(23))
#define UPF_SHARE_IRQ ((__force upf_t) BIT_ULL(24))
#define UPF_EXAR_EFR ((__force upf_t) BIT_ULL(25))
#define UPF_BUG_THRE ((__force upf_t) BIT_ULL(26))
/* The exact UART type is known and should not be probed. */
#define UPF_FIXED_TYPE ((__force upf_t) BIT_ULL(27))
#define UPF_BOOT_AUTOCONF ((__force upf_t) BIT_ULL(28))
#define UPF_FIXED_PORT ((__force upf_t) BIT_ULL(29))
#define UPF_DEAD ((__force upf_t) BIT_ULL(30))
#define UPF_IOREMAP ((__force upf_t) BIT_ULL(31))
#define UPF_FULL_PROBE ((__force upf_t) BIT_ULL(32))
#define __UPF_CHANGE_MASK 0x17fff
#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY))
#if __UPF_CHANGE_MASK > ASYNC_FLAGS
#error Change mask not equivalent to userspace-visible bit defines
#endif
/*
* Must hold termios_rwsem, port mutex and port lock to change;
* can hold any one lock to read.
*/
upstat_t status;
#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2))
#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3))
#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
#define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5))
bool hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
unsigned int frame_time; /* frame timing in ns */
unsigned int type; /* port type */
const struct uart_ops *ops;
unsigned int custom_divisor;
unsigned int line; /* port index */
unsigned int minor;
resource_size_t mapbase; /* for ioremap */
resource_size_t mapsize;
struct device *dev; /* serial port physical parent device */
struct serial_port_device *port_dev; /* serial core port device */
unsigned long sysrq; /* sysrq timeout */
u8 sysrq_ch; /* char for sysrq */
unsigned char has_sysrq;
unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
unsigned char console_reinit;
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */
struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
struct gpio_desc *rs485_rx_during_tx_gpio; /* Output GPIO that sets the state of RS485 RX during TX */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
/*
* Only for console->device_lock()/_unlock() callbacks and internal
* port lock wrapper synchronization.
*/
static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
}
/*
* Only for console->device_lock()/_unlock() callbacks and internal
* port lock wrapper synchronization.
*/
static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
spin_unlock_irqrestore(&up->lock, flags);
}
/**
* uart_port_set_cons - Safely set the @cons field for a uart
* @up: The uart port to set
* @con: The new console to set to
*
* This function must be used to set @up->cons. It uses the port lock to
* synchronize with the port lock wrappers in order to ensure that the console
* cannot change or disappear while another context is holding the port lock.
*/
static inline void uart_port_set_cons(struct uart_port *up, struct console *con)
{
unsigned long flags;
__uart_port_lock_irqsave(up, &flags);
up->cons = con;
__uart_port_unlock_irqrestore(up, flags);
}
/* Only for internal port lock wrapper usage. */
static inline bool __uart_port_using_nbcon(struct uart_port *up)
{
lockdep_assert_held_once(&up->lock); if (likely(!uart_console(up)))
return false;
/*
* @up->cons is only modified under the port lock. Therefore it is
* certain that it cannot disappear here.
*
* @up->cons->node is added/removed from the console list under the
* port lock. Therefore it is certain that the registration status
* cannot change here, thus @up->cons->flags can be read directly.
*/
if (hlist_unhashed_lockless(&up->cons->node) || !(up->cons->flags & CON_NBCON) ||
!up->cons->write_atomic) {
return false;
}
return true;
}
/* Only for internal port lock wrapper usage. */
static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up)
{
if (!__uart_port_using_nbcon(up))
return true;
return nbcon_device_try_acquire(up->cons);
}
/* Only for internal port lock wrapper usage. */
static inline void __uart_port_nbcon_acquire(struct uart_port *up)
{ if (!__uart_port_using_nbcon(up))
return;
while (!nbcon_device_try_acquire(up->cons))
cpu_relax();
}
/* Only for internal port lock wrapper usage. */
static inline void __uart_port_nbcon_release(struct uart_port *up)
{ if (!__uart_port_using_nbcon(up))
return;
nbcon_device_release(up->cons);}
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
__uart_port_nbcon_acquire(up);
}
/**
* uart_port_lock_irq - Lock the UART port and disable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
__uart_port_nbcon_acquire(up);
}
/**
* uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*/
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
__uart_port_nbcon_acquire(up);
}
/**
* uart_port_trylock - Try to lock the UART port
* @up: Pointer to UART port structure
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
if (!spin_trylock(&up->lock))
return false;
if (!__uart_port_nbcon_try_acquire(up)) {
spin_unlock(&up->lock);
return false;
}
return true;
}
/**
* uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
if (!spin_trylock_irqsave(&up->lock, *flags)) return false; if (!__uart_port_nbcon_try_acquire(up)) {
spin_unlock_irqrestore(&up->lock, *flags);
return false;
}
return true;
}
/**
* uart_port_unlock - Unlock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock(struct uart_port *up)
{
__uart_port_nbcon_release(up);
spin_unlock(&up->lock);
}
/**
* uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
__uart_port_nbcon_release(up);
spin_unlock_irq(&up->lock);
}
/**
* uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
* @up: Pointer to UART port structure
* @flags: The saved interrupt flags for restore
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
__uart_port_nbcon_release(up);
spin_unlock_irqrestore(&up->lock, flags);
}
DEFINE_GUARD(uart_port_lock, struct uart_port *, uart_port_lock(_T), uart_port_unlock(_T));
DEFINE_GUARD_COND(uart_port_lock, _try, uart_port_trylock(_T));
DEFINE_GUARD(uart_port_lock_irq, struct uart_port *, uart_port_lock_irq(_T),
uart_port_unlock_irq(_T));
DEFINE_LOCK_GUARD_1(uart_port_lock_irqsave, struct uart_port,
uart_port_lock_irqsave(_T->lock, &_T->flags),
uart_port_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags);
DEFINE_LOCK_GUARD_1_COND(uart_port_lock_irqsave, _try,
uart_port_trylock_irqsave(_T->lock, &_T->flags));
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);
}
static inline void serial_port_out(struct uart_port *up, int offset, int value)
{
up->serial_out(up, offset, value);
}
/**
* enum uart_pm_state - power states for UARTs
* @UART_PM_STATE_ON: UART is powered, up and operational
* @UART_PM_STATE_OFF: UART is powered off
* @UART_PM_STATE_UNDEFINED: sentinel
*/
enum uart_pm_state {
UART_PM_STATE_ON = 0,
UART_PM_STATE_OFF = 3, /* number taken from ACPI */
UART_PM_STATE_UNDEFINED,
};
/*
* This is the state information which is persistent across opens.
*/
struct uart_state {
struct tty_port port;
enum uart_pm_state pm_state;
atomic_t refcount;
wait_queue_head_t remove_wait;
struct uart_port *uart_port;
};
#define UART_XMIT_SIZE PAGE_SIZE
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
/**
* uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
* @up: uart_port structure describing the port
* @chars: number of characters sent
*
* This function advances the tail of circular xmit buffer by the number of
* @chars transmitted and handles accounting of transmitted bytes (into
* @up's icount.tx).
*/
static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
{
struct tty_port *tport = &up->state->port;
kfifo_skip_count(&tport->xmit_fifo, chars);
up->icount.tx += chars;
}
static inline unsigned int uart_fifo_out(struct uart_port *up,
unsigned char *buf, unsigned int chars)
{
struct tty_port *tport = &up->state->port;
chars = kfifo_out(&tport->xmit_fifo, buf, chars);
up->icount.tx += chars;
return chars;
}
static inline unsigned int uart_fifo_get(struct uart_port *up,
unsigned char *ch)
{
struct tty_port *tport = &up->state->port;
unsigned int chars;
chars = kfifo_get(&tport->xmit_fifo, ch);
up->icount.tx += chars;
return chars;
}
struct module;
struct tty_driver;
struct uart_driver {
struct module *owner;
const char *driver_name;
const char *dev_name;
int major;
int minor;
int nr;
struct console *cons;
/*
* these are private; the low level driver should not
* touch these; they should be initialised to NULL
*/
struct uart_state *state;
struct tty_driver *tty_driver;
};
void uart_write_wakeup(struct uart_port *port);
/**
* enum UART_TX_FLAGS -- flags for uart_port_tx_flags()
*
* @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer
*/
enum UART_TX_FLAGS {
UART_TX_NOSTOP = BIT(0),
};
#define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \
for_test, for_post) \
({ \
struct uart_port *__port = (uport); \
struct tty_port *__tport = &__port->state->port; \
unsigned int pending; \
\
for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \
if (__port->x_char) { \
(ch) = __port->x_char; \
(put_char); \
__port->x_char = 0; \
continue; \
} \
\
if (uart_tx_stopped(__port)) \
break; \
\
if (!kfifo_get(&__tport->xmit_fifo, &(ch))) \
break; \
\
(put_char); \
} \
\
(tx_done); \
\
pending = kfifo_len(&__tport->xmit_fifo); \
if (pending < WAKEUP_CHARS) { \
uart_write_wakeup(__port); \
\
if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
__port->ops->stop_tx(__port); \
} \
\
pending; \
})
/**
* uart_port_tx_limited -- transmit helper for uart_port with count limiting
* @port: uart port
* @ch: variable to store a character to be written to the HW
* @count: a limit of characters to send
* @tx_ready: can HW accept more data function
* @put_char: function to write a character
* @tx_done: function to call after the loop is done
*
* This helper transmits characters from the xmit buffer to the hardware using
* @put_char(). It does so until @count characters are sent and while @tx_ready
* evaluates to true.
*
* Returns: the number of characters in the xmit buffer when done.
*
* The expression in macro parameters shall be designed as follows:
* * **tx_ready:** should evaluate to true if the HW can accept more data to
* be sent. This parameter can be %true, which means the HW is always ready.
* * **put_char:** shall write @ch to the device of @port.
* * **tx_done:** when the write loop is done, this can perform arbitrary
* action before potential invocation of ops->stop_tx() happens. If the
* driver does not need to do anything, use e.g. ({}).
*
* For all of them, @port->lock is held, interrupts are locally disabled and
* the expressions must not sleep.
*/
#define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \
unsigned int __count = (count); \
__uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \
__count--); \
})
/**
* uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags
* @port: uart port
* @ch: variable to store a character to be written to the HW
* @flags: %UART_TX_NOSTOP or similar
* @count: a limit of characters to send
* @tx_ready: can HW accept more data function
* @put_char: function to write a character
* @tx_done: function to call after the loop is done
*
* See uart_port_tx_limited() for more details.
*/
#define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \
unsigned int __count = (count); \
__uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count, \
__count--); \
})
/**
* uart_port_tx -- transmit helper for uart_port
* @port: uart port
* @ch: variable to store a character to be written to the HW
* @tx_ready: can HW accept more data function
* @put_char: function to write a character
*
* See uart_port_tx_limited() for more details.
*/
#define uart_port_tx(port, ch, tx_ready, put_char) \
__uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({}))
/**
* uart_port_tx_flags -- transmit helper for uart_port with flags
* @port: uart port
* @ch: variable to store a character to be written to the HW
* @flags: %UART_TX_NOSTOP or similar
* @tx_ready: can HW accept more data function
* @put_char: function to write a character
*
* See uart_port_tx_limited() for more details.
*/
#define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \
__uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({}))
/*
* Baud rate helpers.
*/
void uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud);
unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old, unsigned int min,
unsigned int max);
unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
/*
* Calculates FIFO drain time.
*/
static inline unsigned long uart_fifo_timeout(struct uart_port *port)
{
u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize;
/* Add .02 seconds of slop */
fifo_timeout += 20 * NSEC_PER_MSEC;
return max(nsecs_to_jiffies(fifo_timeout), 1UL);
}
/* Base timer interval for polling */
static inline unsigned long uart_poll_timeout(struct uart_port *port)
{
unsigned long timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
/*
* Console helpers.
*/
struct earlycon_device {
struct console *con;
struct uart_port port;
char options[32]; /* e.g., 115200n8 */
unsigned int baud;
};
struct earlycon_id {
char name[15];
char name_term; /* In case compiler didn't '\0' term name */
char compatible[128];
int (*setup)(struct earlycon_device *, const char *options);
};
extern const struct earlycon_id __earlycon_table[];
extern const struct earlycon_id __earlycon_table_end[];
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
#define EARLYCON_USED_OR_UNUSED __used
#else
#define EARLYCON_USED_OR_UNUSED __maybe_unused
#endif
#define OF_EARLYCON_DECLARE(_name, compat, fn) \
static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \
__aligned(__alignof__(struct earlycon_id)) \
= { .name = __stringify(_name), \
.compatible = compat, \
.setup = fn }
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
int of_setup_earlycon(const struct earlycon_id *match, unsigned long node,
const char *options);
#ifdef CONFIG_SERIAL_EARLYCON
extern bool earlycon_acpi_spcr_enable __initdata;
int setup_earlycon(char *buf);
#else
static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
/* Variant of uart_console_registered() when the console_list_lock is held. */
static inline bool uart_console_registered_locked(struct uart_port *port)
{
return uart_console(port) && console_is_registered_locked(port->cons);
}
static inline bool uart_console_registered(struct uart_port *port)
{
return uart_console(port) && console_is_registered(port->cons);
}
int uart_parse_earlycon(char *p, enum uart_iotype *iotype,
resource_size_t *addr, char **options);
void uart_parse_options(const char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
int parity, int bits, int flow);
struct tty_driver *uart_console_device(struct console *co, int *index);
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
void (*putchar)(struct uart_port *, unsigned char));
/*
* Port/driver registration/removal
*/
int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
int uart_read_port_properties(struct uart_port *port);
int uart_read_and_validate_port_properties(struct uart_port *port);
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2);
/*
* Power Management
*/
int uart_suspend_port(struct uart_driver *reg, struct uart_port *port);
int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
if ((tty && tty->flow.stopped) || port->hw_stopped)
return 1;
return 0;
}
static inline bool uart_cts_enabled(struct uart_port *uport)
{
return !!(uport->status & UPSTAT_CTS_ENABLE);
}
static inline bool uart_softcts_mode(struct uart_port *uport)
{
upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS;
return ((uport->status & mask) == UPSTAT_CTS_ENABLE);
}
/*
* The following are helper functions for the low level drivers.
*/
void uart_handle_dcd_change(struct uart_port *uport, bool active);
void uart_handle_cts_change(struct uart_port *uport, bool active);
void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, u8 ch, u8 flag);
void uart_xchar_out(struct uart_port *uport, int offset);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
#define SYSRQ_TIMEOUT (HZ * 5)
bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch);
static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
if (ch && time_before(jiffies, port->sysrq)) {
if (sysrq_mask()) {
handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
if (uart_try_toggle_sysrq(port, ch))
return 1;
}
port->sysrq = 0;
return 0;
}
static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
if (ch && time_before(jiffies, port->sysrq)) {
if (sysrq_mask()) {
port->sysrq_ch = ch;
port->sysrq = 0;
return 1;
}
if (uart_try_toggle_sysrq(port, ch))
return 1;
}
port->sysrq = 0;
return 0;
}
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
u8 sysrq_ch;
if (!port->has_sysrq) {
uart_port_unlock(port);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
uart_port_unlock(port);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
}
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
u8 sysrq_ch;
if (!port->has_sysrq) {
uart_port_unlock_irqrestore(port, flags);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
uart_port_unlock_irqrestore(port, flags);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
}
#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
uart_port_unlock(port);
}
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
/*
* We do the SysRQ and SAK checking like this...
*/
static inline int uart_handle_break(struct uart_port *port)
{
struct uart_state *state = port->state;
if (port->handle_break)
port->handle_break(port);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
if (port->has_sysrq && uart_console(port)) {
if (!port->sysrq) {
port->sysrq = jiffies + SYSRQ_TIMEOUT;
return 1;
}
port->sysrq = 0;
}
#endif
if (port->flags & UPF_SAK)
do_SAK(state->port.tty);
return 0;
}
/*
* UART_ENABLE_MS - determine if port should enable modem status irqs
*/
#define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
int uart_get_rs485_mode(struct uart_port *port);
#endif /* LINUX_SERIAL_CORE_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/* auditsc.c -- System-call auditing support
* Handles all system-call specific auditing features.
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* Copyright 2005 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2005, 2006 IBM Corporation
* All Rights Reserved.
*
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
*
* Many of the ideas implemented here are from Stephen C. Tweedie,
* especially the idea of avoiding a copy by using getname.
*
* The method for actual interception of syscall entry and exit (not in
* this file -- see entry.S) is based on a GPL'd patch written by
* okir@suse.de and Copyright 2003 SuSE Linux AG.
*
* POSIX message queue support added by George Wilson <ltcgcw@us.ibm.com>,
* 2006.
*
* The support of additional filter rules compares (>, <, >=, <=) was
* added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005.
*
* Modified by Amy Griffis <amy.griffis@hp.com> to collect additional
* filesystem information.
*
* Subject and object context labeling support added by <danjones@us.ibm.com>
* and <dustin.kirkland@us.ibm.com> for LSPP certification compliance.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <asm/types.h>
#include <linux/atomic.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mount.h>
#include <linux/socket.h>
#include <linux/mqueue.h>
#include <linux/audit.h>
#include <linux/personality.h>
#include <linux/time.h>
#include <linux/netlink.h>
#include <linux/compiler.h>
#include <asm/unistd.h>
#include <linux/security.h>
#include <linux/list.h>
#include <linux/binfmts.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <asm/syscall.h>
#include <linux/capability.h>
#include <linux/fs_struct.h>
#include <linux/compat.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/fsnotify_backend.h>
#include <uapi/linux/limits.h>
#include <uapi/linux/netfilter/nf_tables.h>
#include <uapi/linux/openat2.h> // struct open_how
#include <uapi/linux/fanotify.h>
#include "audit.h"
/* flags stating the success for a syscall */
#define AUDITSC_INVALID 0
#define AUDITSC_SUCCESS 1
#define AUDITSC_FAILURE 2
/* no execve audit message should be longer than this (userspace limits),
* see the note near the top of audit_log_execve_info() about this value */
#define MAX_EXECVE_AUDIT_LEN 7500
/* max length to print of cmdline/proctitle value during audit */
#define MAX_PROCTITLE_AUDIT_LEN 128
/* number of audit rules */
int audit_n_rules;
/* determines whether we collect data for signals sent */
int audit_signals;
struct audit_aux_data {
struct audit_aux_data *next;
int type;
};
/* Number of target pids per aux struct. */
#define AUDIT_AUX_PIDS 16
struct audit_aux_data_pids {
struct audit_aux_data d;
pid_t target_pid[AUDIT_AUX_PIDS];
kuid_t target_auid[AUDIT_AUX_PIDS];
kuid_t target_uid[AUDIT_AUX_PIDS];
unsigned int target_sessionid[AUDIT_AUX_PIDS];
struct lsm_prop target_ref[AUDIT_AUX_PIDS];
char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN];
int pid_count;
};
struct audit_aux_data_bprm_fcaps {
struct audit_aux_data d;
struct audit_cap_data fcap;
unsigned int fcap_ver;
struct audit_cap_data old_pcap;
struct audit_cap_data new_pcap;
};
struct audit_tree_refs {
struct audit_tree_refs *next;
struct audit_chunk *c[31];
};
struct audit_nfcfgop_tab {
enum audit_nfcfgop op;
const char *s;
};
static const struct audit_nfcfgop_tab audit_nfcfgs[] = {
{ AUDIT_XT_OP_REGISTER, "xt_register" },
{ AUDIT_XT_OP_REPLACE, "xt_replace" },
{ AUDIT_XT_OP_UNREGISTER, "xt_unregister" },
{ AUDIT_NFT_OP_TABLE_REGISTER, "nft_register_table" },
{ AUDIT_NFT_OP_TABLE_UNREGISTER, "nft_unregister_table" },
{ AUDIT_NFT_OP_CHAIN_REGISTER, "nft_register_chain" },
{ AUDIT_NFT_OP_CHAIN_UNREGISTER, "nft_unregister_chain" },
{ AUDIT_NFT_OP_RULE_REGISTER, "nft_register_rule" },
{ AUDIT_NFT_OP_RULE_UNREGISTER, "nft_unregister_rule" },
{ AUDIT_NFT_OP_SET_REGISTER, "nft_register_set" },
{ AUDIT_NFT_OP_SET_UNREGISTER, "nft_unregister_set" },
{ AUDIT_NFT_OP_SETELEM_REGISTER, "nft_register_setelem" },
{ AUDIT_NFT_OP_SETELEM_UNREGISTER, "nft_unregister_setelem" },
{ AUDIT_NFT_OP_GEN_REGISTER, "nft_register_gen" },
{ AUDIT_NFT_OP_OBJ_REGISTER, "nft_register_obj" },
{ AUDIT_NFT_OP_OBJ_UNREGISTER, "nft_unregister_obj" },
{ AUDIT_NFT_OP_OBJ_RESET, "nft_reset_obj" },
{ AUDIT_NFT_OP_FLOWTABLE_REGISTER, "nft_register_flowtable" },
{ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, "nft_unregister_flowtable" },
{ AUDIT_NFT_OP_SETELEM_RESET, "nft_reset_setelem" },
{ AUDIT_NFT_OP_RULE_RESET, "nft_reset_rule" },
{ AUDIT_NFT_OP_INVALID, "nft_invalid" },
};
static int audit_match_perm(struct audit_context *ctx, int mask)
{
unsigned n;
if (unlikely(!ctx))
return 0;
n = ctx->major;
switch (audit_classify_syscall(ctx->arch, n)) {
case AUDITSC_NATIVE:
if ((mask & AUDIT_PERM_WRITE) &&
audit_match_class(AUDIT_CLASS_WRITE, n))
return 1;
if ((mask & AUDIT_PERM_READ) &&
audit_match_class(AUDIT_CLASS_READ, n))
return 1;
if ((mask & AUDIT_PERM_ATTR) &&
audit_match_class(AUDIT_CLASS_CHATTR, n))
return 1;
return 0;
case AUDITSC_COMPAT: /* 32bit on biarch */
if ((mask & AUDIT_PERM_WRITE) &&
audit_match_class(AUDIT_CLASS_WRITE_32, n))
return 1;
if ((mask & AUDIT_PERM_READ) &&
audit_match_class(AUDIT_CLASS_READ_32, n))
return 1;
if ((mask & AUDIT_PERM_ATTR) &&
audit_match_class(AUDIT_CLASS_CHATTR_32, n))
return 1;
return 0;
case AUDITSC_OPEN:
return mask & ACC_MODE(ctx->argv[1]);
case AUDITSC_OPENAT:
return mask & ACC_MODE(ctx->argv[2]);
case AUDITSC_SOCKETCALL:
return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND);
case AUDITSC_EXECVE:
return mask & AUDIT_PERM_EXEC;
case AUDITSC_OPENAT2:
return mask & ACC_MODE((u32)ctx->openat2.flags);
default:
return 0;
}
}
static int audit_match_filetype(struct audit_context *ctx, int val)
{
struct audit_names *n;
umode_t mode = (umode_t)val;
if (unlikely(!ctx))
return 0;
list_for_each_entry(n, &ctx->names_list, list) {
if ((n->ino != AUDIT_INO_UNSET) &&
((n->mode & S_IFMT) == mode))
return 1;
}
return 0;
}
/*
* We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *;
* ->first_trees points to its beginning, ->trees - to the current end of data.
* ->tree_count is the number of free entries in array pointed to by ->trees.
* Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL,
* "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously,
* it's going to remain 1-element for almost any setup) until we free context itself.
* References in it _are_ dropped - at the same time we free/drop aux stuff.
*/
static void audit_set_auditable(struct audit_context *ctx)
{
if (!ctx->prio) {
ctx->prio = 1;
ctx->current_state = AUDIT_STATE_RECORD;
}
}
static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)
{
struct audit_tree_refs *p = ctx->trees;
int left = ctx->tree_count;
if (likely(left)) {
p->c[--left] = chunk;
ctx->tree_count = left;
return 1;
}
if (!p)
return 0;
p = p->next;
if (p) {
p->c[30] = chunk;
ctx->trees = p;
ctx->tree_count = 30;
return 1;
}
return 0;
}
static int grow_tree_refs(struct audit_context *ctx)
{
struct audit_tree_refs *p = ctx->trees;
ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL);
if (!ctx->trees) {
ctx->trees = p;
return 0;
}
if (p)
p->next = ctx->trees;
else
ctx->first_trees = ctx->trees;
ctx->tree_count = 31;
return 1;
}
static void unroll_tree_refs(struct audit_context *ctx,
struct audit_tree_refs *p, int count)
{
struct audit_tree_refs *q;
int n;
if (!p) {
/* we started with empty chain */
p = ctx->first_trees;
count = 31;
/* if the very first allocation has failed, nothing to do */
if (!p)
return;
}
n = count;
for (q = p; q != ctx->trees; q = q->next, n = 31) {
while (n--) {
audit_put_chunk(q->c[n]);
q->c[n] = NULL;
}
}
while (n-- > ctx->tree_count) {
audit_put_chunk(q->c[n]);
q->c[n] = NULL;
}
ctx->trees = p;
ctx->tree_count = count;
}
static void free_tree_refs(struct audit_context *ctx)
{
struct audit_tree_refs *p, *q;
for (p = ctx->first_trees; p; p = q) {
q = p->next;
kfree(p);
}
}
static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
{
struct audit_tree_refs *p;
int n;
if (!tree)
return 0;
/* full ones */
for (p = ctx->first_trees; p != ctx->trees; p = p->next) {
for (n = 0; n < 31; n++)
if (audit_tree_match(p->c[n], tree))
return 1;
}
/* partial */
if (p) {
for (n = ctx->tree_count; n < 31; n++)
if (audit_tree_match(p->c[n], tree))
return 1;
}
return 0;
}
static int audit_compare_uid(kuid_t uid,
struct audit_names *name,
struct audit_field *f,
struct audit_context *ctx)
{
struct audit_names *n;
int rc;
if (name) {
rc = audit_uid_comparator(uid, f->op, name->uid);
if (rc)
return rc;
}
if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
rc = audit_uid_comparator(uid, f->op, n->uid);
if (rc)
return rc;
}
}
return 0;
}
static int audit_compare_gid(kgid_t gid,
struct audit_names *name,
struct audit_field *f,
struct audit_context *ctx)
{
struct audit_names *n;
int rc;
if (name) {
rc = audit_gid_comparator(gid, f->op, name->gid);
if (rc)
return rc;
}
if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
rc = audit_gid_comparator(gid, f->op, n->gid);
if (rc)
return rc;
}
}
return 0;
}
static int audit_field_compare(struct task_struct *tsk,
const struct cred *cred,
struct audit_field *f,
struct audit_context *ctx,
struct audit_names *name)
{
switch (f->val) {
/* process to file object comparisons */
case AUDIT_COMPARE_UID_TO_OBJ_UID:
return audit_compare_uid(cred->uid, name, f, ctx);
case AUDIT_COMPARE_GID_TO_OBJ_GID:
return audit_compare_gid(cred->gid, name, f, ctx);
case AUDIT_COMPARE_EUID_TO_OBJ_UID:
return audit_compare_uid(cred->euid, name, f, ctx);
case AUDIT_COMPARE_EGID_TO_OBJ_GID:
return audit_compare_gid(cred->egid, name, f, ctx);
case AUDIT_COMPARE_AUID_TO_OBJ_UID:
return audit_compare_uid(audit_get_loginuid(tsk), name, f, ctx);
case AUDIT_COMPARE_SUID_TO_OBJ_UID:
return audit_compare_uid(cred->suid, name, f, ctx);
case AUDIT_COMPARE_SGID_TO_OBJ_GID:
return audit_compare_gid(cred->sgid, name, f, ctx);
case AUDIT_COMPARE_FSUID_TO_OBJ_UID:
return audit_compare_uid(cred->fsuid, name, f, ctx);
case AUDIT_COMPARE_FSGID_TO_OBJ_GID:
return audit_compare_gid(cred->fsgid, name, f, ctx);
/* uid comparisons */
case AUDIT_COMPARE_UID_TO_AUID:
return audit_uid_comparator(cred->uid, f->op,
audit_get_loginuid(tsk));
case AUDIT_COMPARE_UID_TO_EUID:
return audit_uid_comparator(cred->uid, f->op, cred->euid);
case AUDIT_COMPARE_UID_TO_SUID:
return audit_uid_comparator(cred->uid, f->op, cred->suid);
case AUDIT_COMPARE_UID_TO_FSUID:
return audit_uid_comparator(cred->uid, f->op, cred->fsuid);
/* auid comparisons */
case AUDIT_COMPARE_AUID_TO_EUID:
return audit_uid_comparator(audit_get_loginuid(tsk), f->op,
cred->euid);
case AUDIT_COMPARE_AUID_TO_SUID:
return audit_uid_comparator(audit_get_loginuid(tsk), f->op,
cred->suid);
case AUDIT_COMPARE_AUID_TO_FSUID:
return audit_uid_comparator(audit_get_loginuid(tsk), f->op,
cred->fsuid);
/* euid comparisons */
case AUDIT_COMPARE_EUID_TO_SUID:
return audit_uid_comparator(cred->euid, f->op, cred->suid);
case AUDIT_COMPARE_EUID_TO_FSUID:
return audit_uid_comparator(cred->euid, f->op, cred->fsuid);
/* suid comparisons */
case AUDIT_COMPARE_SUID_TO_FSUID:
return audit_uid_comparator(cred->suid, f->op, cred->fsuid);
/* gid comparisons */
case AUDIT_COMPARE_GID_TO_EGID:
return audit_gid_comparator(cred->gid, f->op, cred->egid);
case AUDIT_COMPARE_GID_TO_SGID:
return audit_gid_comparator(cred->gid, f->op, cred->sgid);
case AUDIT_COMPARE_GID_TO_FSGID:
return audit_gid_comparator(cred->gid, f->op, cred->fsgid);
/* egid comparisons */
case AUDIT_COMPARE_EGID_TO_SGID:
return audit_gid_comparator(cred->egid, f->op, cred->sgid);
case AUDIT_COMPARE_EGID_TO_FSGID:
return audit_gid_comparator(cred->egid, f->op, cred->fsgid);
/* sgid comparison */
case AUDIT_COMPARE_SGID_TO_FSGID:
return audit_gid_comparator(cred->sgid, f->op, cred->fsgid);
default:
WARN(1, "Missing AUDIT_COMPARE define. Report as a bug\n");
return 0;
}
return 0;
}
/* Determine if any context name data matches a rule's watch data */
/* Compare a task_struct with an audit_rule. Return 1 on match, 0
* otherwise.
*
* If task_creation is true, this is an explicit indication that we are
* filtering a task rule at task creation time. This and tsk == current are
* the only situations where tsk->cred may be accessed without an rcu read lock.
*/
static int audit_filter_rules(struct task_struct *tsk,
struct audit_krule *rule,
struct audit_context *ctx,
struct audit_names *name,
enum audit_state *state,
bool task_creation)
{
const struct cred *cred;
int i, need_sid = 1;
struct lsm_prop prop = { };
unsigned int sessionid;
if (ctx && rule->prio <= ctx->prio)
return 0;
cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation);
for (i = 0; i < rule->field_count; i++) {
struct audit_field *f = &rule->fields[i];
struct audit_names *n;
int result = 0;
pid_t pid;
switch (f->type) {
case AUDIT_PID:
pid = task_tgid_nr(tsk);
result = audit_comparator(pid, f->op, f->val);
break;
case AUDIT_PPID:
if (ctx) {
if (!ctx->ppid)
ctx->ppid = task_ppid_nr(tsk);
result = audit_comparator(ctx->ppid, f->op, f->val);
}
break;
case AUDIT_EXE:
result = audit_exe_compare(tsk, rule->exe);
if (f->op == Audit_not_equal)
result = !result;
break;
case AUDIT_UID:
result = audit_uid_comparator(cred->uid, f->op, f->uid);
break;
case AUDIT_EUID:
result = audit_uid_comparator(cred->euid, f->op, f->uid);
break;
case AUDIT_SUID:
result = audit_uid_comparator(cred->suid, f->op, f->uid);
break;
case AUDIT_FSUID:
result = audit_uid_comparator(cred->fsuid, f->op, f->uid);
break;
case AUDIT_GID:
result = audit_gid_comparator(cred->gid, f->op, f->gid);
if (f->op == Audit_equal) {
if (!result)
result = groups_search(cred->group_info, f->gid);
} else if (f->op == Audit_not_equal) {
if (result)
result = !groups_search(cred->group_info, f->gid);
}
break;
case AUDIT_EGID:
result = audit_gid_comparator(cred->egid, f->op, f->gid);
if (f->op == Audit_equal) {
if (!result)
result = groups_search(cred->group_info, f->gid);
} else if (f->op == Audit_not_equal) {
if (result)
result = !groups_search(cred->group_info, f->gid);
}
break;
case AUDIT_SGID:
result = audit_gid_comparator(cred->sgid, f->op, f->gid);
break;
case AUDIT_FSGID:
result = audit_gid_comparator(cred->fsgid, f->op, f->gid);
break;
case AUDIT_SESSIONID:
sessionid = audit_get_sessionid(tsk);
result = audit_comparator(sessionid, f->op, f->val);
break;
case AUDIT_PERS:
result = audit_comparator(tsk->personality, f->op, f->val);
break;
case AUDIT_ARCH:
if (ctx)
result = audit_comparator(ctx->arch, f->op, f->val);
break;
case AUDIT_EXIT:
if (ctx && ctx->return_valid != AUDITSC_INVALID)
result = audit_comparator(ctx->return_code, f->op, f->val);
break;
case AUDIT_SUCCESS:
if (ctx && ctx->return_valid != AUDITSC_INVALID) {
if (f->val)
result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS);
else
result = audit_comparator(ctx->return_valid, f->op, AUDITSC_FAILURE);
}
break;
case AUDIT_DEVMAJOR:
if (name) {
if (audit_comparator(MAJOR(name->dev), f->op, f->val) ||
audit_comparator(MAJOR(name->rdev), f->op, f->val))
++result;
} else if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
if (audit_comparator(MAJOR(n->dev), f->op, f->val) ||
audit_comparator(MAJOR(n->rdev), f->op, f->val)) {
++result;
break;
}
}
}
break;
case AUDIT_DEVMINOR:
if (name) {
if (audit_comparator(MINOR(name->dev), f->op, f->val) ||
audit_comparator(MINOR(name->rdev), f->op, f->val))
++result;
} else if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
if (audit_comparator(MINOR(n->dev), f->op, f->val) ||
audit_comparator(MINOR(n->rdev), f->op, f->val)) {
++result;
break;
}
}
}
break;
case AUDIT_INODE:
if (name)
result = audit_comparator(name->ino, f->op, f->val);
else if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
if (audit_comparator(n->ino, f->op, f->val)) {
++result;
break;
}
}
}
break;
case AUDIT_OBJ_UID:
if (name) {
result = audit_uid_comparator(name->uid, f->op, f->uid);
} else if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
if (audit_uid_comparator(n->uid, f->op, f->uid)) {
++result;
break;
}
}
}
break;
case AUDIT_OBJ_GID:
if (name) {
result = audit_gid_comparator(name->gid, f->op, f->gid);
} else if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
if (audit_gid_comparator(n->gid, f->op, f->gid)) {
++result;
break;
}
}
}
break;
case AUDIT_WATCH:
if (name) {
result = audit_watch_compare(rule->watch,
name->ino,
name->dev);
if (f->op == Audit_not_equal)
result = !result;
}
break;
case AUDIT_DIR:
if (ctx) {
result = match_tree_refs(ctx, rule->tree);
if (f->op == Audit_not_equal)
result = !result;
}
break;
case AUDIT_LOGINUID:
result = audit_uid_comparator(audit_get_loginuid(tsk),
f->op, f->uid);
break;
case AUDIT_LOGINUID_SET:
result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
break;
case AUDIT_SADDR_FAM:
if (ctx && ctx->sockaddr)
result = audit_comparator(ctx->sockaddr->ss_family,
f->op, f->val);
break;
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
case AUDIT_SUBJ_TYPE:
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
/* NOTE: this may return negative values indicating
a temporary error. We simply treat this as a
match for now to avoid losing information that
may be wanted. An error message will also be
logged upon error */
if (f->lsm_rule) {
if (need_sid) {
/* @tsk should always be equal to
* @current with the exception of
* fork()/copy_process() in which case
* the new @tsk creds are still a dup
* of @current's creds so we can still
* use
* security_current_getlsmprop_subj()
* here even though it always refs
* @current's creds
*/
security_current_getlsmprop_subj(&prop);
need_sid = 0;
}
result = security_audit_rule_match(&prop,
f->type,
f->op,
f->lsm_rule);
}
break;
case AUDIT_OBJ_USER:
case AUDIT_OBJ_ROLE:
case AUDIT_OBJ_TYPE:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
/* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR
also applies here */
if (f->lsm_rule) {
/* Find files that match */
if (name) {
result = security_audit_rule_match(
&name->oprop,
f->type,
f->op,
f->lsm_rule);
} else if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
if (security_audit_rule_match(
&n->oprop,
f->type,
f->op,
f->lsm_rule)) {
++result;
break;
}
}
}
/* Find ipc objects that match */
if (!ctx || ctx->type != AUDIT_IPC)
break;
if (security_audit_rule_match(&ctx->ipc.oprop,
f->type, f->op,
f->lsm_rule))
++result;
}
break;
case AUDIT_ARG0:
case AUDIT_ARG1:
case AUDIT_ARG2:
case AUDIT_ARG3:
if (ctx)
result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val);
break;
case AUDIT_FILTERKEY:
/* ignore this field for filtering */
result = 1;
break;
case AUDIT_PERM:
result = audit_match_perm(ctx, f->val);
if (f->op == Audit_not_equal)
result = !result;
break;
case AUDIT_FILETYPE:
result = audit_match_filetype(ctx, f->val);
if (f->op == Audit_not_equal)
result = !result;
break;
case AUDIT_FIELD_COMPARE:
result = audit_field_compare(tsk, cred, f, ctx, name);
break;
}
if (!result)
return 0;
}
if (ctx) {
if (rule->filterkey) {
kfree(ctx->filterkey);
ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
}
ctx->prio = rule->prio;
}
switch (rule->action) {
case AUDIT_NEVER:
*state = AUDIT_STATE_DISABLED;
break;
case AUDIT_ALWAYS:
*state = AUDIT_STATE_RECORD;
break;
}
return 1;
}
/* At process creation time, we can determine if system-call auditing is
* completely disabled for this task. Since we only have the task
* structure at this point, we can only check uid and gid.
*/
static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
{
struct audit_entry *e;
enum audit_state state;
rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { if (audit_filter_rules(tsk, &e->rule, NULL, NULL,
&state, true)) {
if (state == AUDIT_STATE_RECORD)
*key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
rcu_read_unlock();
return state;
}
}
rcu_read_unlock();
return AUDIT_STATE_BUILD;
}
static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
{
int word, bit;
if (val > 0xffffffff)
return false;
word = AUDIT_WORD(val);
if (word >= AUDIT_BITMASK_SIZE)
return false;
bit = AUDIT_BIT(val);
return rule->mask[word] & bit;
}
/**
* __audit_filter_op - common filter helper for operations (syscall/uring/etc)
* @tsk: associated task
* @ctx: audit context
* @list: audit filter list
* @name: audit_name (can be NULL)
* @op: current syscall/uring_op
*
* Run the udit filters specified in @list against @tsk using @ctx,
* @name, and @op, as necessary; the caller is responsible for ensuring
* that the call is made while the RCU read lock is held. The @name
* parameter can be NULL, but all others must be specified.
* Returns 1/true if the filter finds a match, 0/false if none are found.
*/
static int __audit_filter_op(struct task_struct *tsk,
struct audit_context *ctx,
struct list_head *list,
struct audit_names *name,
unsigned long op)
{
struct audit_entry *e;
enum audit_state state;
list_for_each_entry_rcu(e, list, list) {
if (audit_in_mask(&e->rule, op) &&
audit_filter_rules(tsk, &e->rule, ctx, name,
&state, false)) {
ctx->current_state = state;
return 1;
}
}
return 0;
}
/**
* audit_filter_uring - apply filters to an io_uring operation
* @tsk: associated task
* @ctx: audit context
*/
static void audit_filter_uring(struct task_struct *tsk,
struct audit_context *ctx)
{
if (auditd_test_task(tsk))
return;
rcu_read_lock();
__audit_filter_op(tsk, ctx, &audit_filter_list[AUDIT_FILTER_URING_EXIT],
NULL, ctx->uring_op);
rcu_read_unlock();
}
/* At syscall exit time, this filter is called if the audit_state is
* not low enough that auditing cannot take place, but is also not
* high enough that we already know we have to write an audit record
* (i.e., the state is AUDIT_STATE_BUILD).
*/
static void audit_filter_syscall(struct task_struct *tsk,
struct audit_context *ctx)
{
if (auditd_test_task(tsk))
return;
rcu_read_lock();
__audit_filter_op(tsk, ctx, &audit_filter_list[AUDIT_FILTER_EXIT],
NULL, ctx->major);
rcu_read_unlock();
}
/*
* Given an audit_name check the inode hash table to see if they match.
* Called holding the rcu read lock to protect the use of audit_inode_hash
*/
static int audit_filter_inode_name(struct task_struct *tsk,
struct audit_names *n,
struct audit_context *ctx)
{
int h = audit_hash_ino((u32)n->ino);
struct list_head *list = &audit_inode_hash[h];
return __audit_filter_op(tsk, ctx, list, n, ctx->major);
}
/* At syscall exit time, this filter is called if any audit_names have been
* collected during syscall processing. We only check rules in sublists at hash
* buckets applicable to the inode numbers in audit_names.
* Regarding audit_state, same rules apply as for audit_filter_syscall().
*/
void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
{
struct audit_names *n;
if (auditd_test_task(tsk))
return;
rcu_read_lock();
list_for_each_entry(n, &ctx->names_list, list) {
if (audit_filter_inode_name(tsk, n, ctx))
break;
}
rcu_read_unlock();
}
static inline void audit_proctitle_free(struct audit_context *context)
{
kfree(context->proctitle.value);
context->proctitle.value = NULL;
context->proctitle.len = 0;
}
static inline void audit_free_module(struct audit_context *context)
{
if (context->type == AUDIT_KERN_MODULE) {
kfree(context->module.name);
context->module.name = NULL;
}
}
static inline void audit_free_names(struct audit_context *context)
{
struct audit_names *n, *next;
list_for_each_entry_safe(n, next, &context->names_list, list) {
list_del(&n->list);
if (n->name)
putname(n->name);
if (n->should_free)
kfree(n);
}
context->name_count = 0;
path_put(&context->pwd);
context->pwd.dentry = NULL;
context->pwd.mnt = NULL;
}
static inline void audit_free_aux(struct audit_context *context)
{
struct audit_aux_data *aux;
while ((aux = context->aux)) {
context->aux = aux->next;
kfree(aux);
}
context->aux = NULL;
while ((aux = context->aux_pids)) {
context->aux_pids = aux->next;
kfree(aux);
}
context->aux_pids = NULL;
}
/**
* audit_reset_context - reset a audit_context structure
* @ctx: the audit_context to reset
*
* All fields in the audit_context will be reset to an initial state, all
* references held by fields will be dropped, and private memory will be
* released. When this function returns the audit_context will be suitable
* for reuse, so long as the passed context is not NULL or a dummy context.
*/
static void audit_reset_context(struct audit_context *ctx)
{
if (!ctx)
return;
/* if ctx is non-null, reset the "ctx->context" regardless */
ctx->context = AUDIT_CTX_UNUSED;
if (ctx->dummy)
return;
/*
* NOTE: It shouldn't matter in what order we release the fields, so
* release them in the order in which they appear in the struct;
* this gives us some hope of quickly making sure we are
* resetting the audit_context properly.
*
* Other things worth mentioning:
* - we don't reset "dummy"
* - we don't reset "state", we do reset "current_state"
* - we preserve "filterkey" if "state" is AUDIT_STATE_RECORD
* - much of this is likely overkill, but play it safe for now
* - we really need to work on improving the audit_context struct
*/
ctx->current_state = ctx->state;
ctx->stamp.serial = 0;
ctx->stamp.ctime = (struct timespec64){ .tv_sec = 0, .tv_nsec = 0 };
ctx->major = 0;
ctx->uring_op = 0;
memset(ctx->argv, 0, sizeof(ctx->argv));
ctx->return_code = 0;
ctx->prio = (ctx->state == AUDIT_STATE_RECORD ? ~0ULL : 0);
ctx->return_valid = AUDITSC_INVALID;
audit_free_names(ctx);
if (ctx->state != AUDIT_STATE_RECORD) {
kfree(ctx->filterkey);
ctx->filterkey = NULL;
}
audit_free_aux(ctx);
kfree(ctx->sockaddr);
ctx->sockaddr = NULL;
ctx->sockaddr_len = 0;
ctx->ppid = 0;
ctx->uid = ctx->euid = ctx->suid = ctx->fsuid = KUIDT_INIT(0);
ctx->gid = ctx->egid = ctx->sgid = ctx->fsgid = KGIDT_INIT(0);
ctx->personality = 0;
ctx->arch = 0;
ctx->target_pid = 0;
ctx->target_auid = ctx->target_uid = KUIDT_INIT(0);
ctx->target_sessionid = 0;
lsmprop_init(&ctx->target_ref);
ctx->target_comm[0] = '\0';
unroll_tree_refs(ctx, NULL, 0);
WARN_ON(!list_empty(&ctx->killed_trees));
audit_free_module(ctx);
ctx->fds[0] = -1;
ctx->type = 0; /* reset last for audit_free_*() */
}
static inline struct audit_context *audit_alloc_context(enum audit_state state)
{
struct audit_context *context;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return NULL;
context->context = AUDIT_CTX_UNUSED;
context->state = state;
context->prio = state == AUDIT_STATE_RECORD ? ~0ULL : 0;
INIT_LIST_HEAD(&context->killed_trees);
INIT_LIST_HEAD(&context->names_list);
context->fds[0] = -1;
context->return_valid = AUDITSC_INVALID;
return context;
}
/**
* audit_alloc - allocate an audit context block for a task
* @tsk: task
*
* Filter on the task information and allocate a per-task audit context
* if necessary. Doing so turns on system call auditing for the
* specified task. This is called from copy_process, so no lock is
* needed.
*/
int audit_alloc(struct task_struct *tsk)
{
struct audit_context *context;
enum audit_state state;
char *key = NULL;
if (likely(!audit_ever_enabled)) return 0; state = audit_filter_task(tsk, &key); if (state == AUDIT_STATE_DISABLED) {
clear_task_syscall_work(tsk, SYSCALL_AUDIT);
return 0;
}
context = audit_alloc_context(state);
if (!context) {
kfree(key);
audit_log_lost("out of memory in audit_alloc");
return -ENOMEM;
}
context->filterkey = key;
audit_set_context(tsk, context);
set_task_syscall_work(tsk, SYSCALL_AUDIT);
return 0;}
static inline void audit_free_context(struct audit_context *context)
{
/* resetting is extra work, but it is likely just noise */
audit_reset_context(context);
audit_proctitle_free(context);
free_tree_refs(context);
kfree(context->filterkey);
kfree(context);
}
static int audit_log_pid_context(struct audit_context *context, pid_t pid,
kuid_t auid, kuid_t uid,
unsigned int sessionid, struct lsm_prop *prop,
char *comm)
{
struct audit_buffer *ab;
int rc = 0;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID);
if (!ab)
return rc;
audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid,
from_kuid(&init_user_ns, auid),
from_kuid(&init_user_ns, uid), sessionid);
if (lsmprop_is_set(prop) && audit_log_obj_ctx(ab, prop))
rc = 1;
audit_log_format(ab, " ocomm=");
audit_log_untrustedstring(ab, comm);
audit_log_end(ab);
return rc;
}
static void audit_log_execve_info(struct audit_context *context,
struct audit_buffer **ab)
{
long len_max;
long len_rem;
long len_full;
long len_buf;
long len_abuf = 0;
long len_tmp;
bool require_data;
bool encode;
unsigned int iter;
unsigned int arg;
char *buf_head;
char *buf;
const char __user *p = (const char __user *)current->mm->arg_start;
/* NOTE: this buffer needs to be large enough to hold all the non-arg
* data we put in the audit record for this argument (see the
* code below) ... at this point in time 96 is plenty */
char abuf[96];
/* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the
* current value of 7500 is not as important as the fact that it
* is less than 8k, a setting of 7500 gives us plenty of wiggle
* room if we go over a little bit in the logging below */
WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500);
len_max = MAX_EXECVE_AUDIT_LEN;
/* scratch buffer to hold the userspace args */
buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
if (!buf_head) {
audit_panic("out of memory for argv string");
return;
}
buf = buf_head;
audit_log_format(*ab, "argc=%d", context->execve.argc);
len_rem = len_max;
len_buf = 0;
len_full = 0;
require_data = true;
encode = false;
iter = 0;
arg = 0;
do {
/* NOTE: we don't ever want to trust this value for anything
* serious, but the audit record format insists we
* provide an argument length for really long arguments,
* e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but
* to use strncpy_from_user() to obtain this value for
* recording in the log, although we don't use it
* anywhere here to avoid a double-fetch problem */
if (len_full == 0)
len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;
/* read more data from userspace */
if (require_data) {
/* can we make more room in the buffer? */
if (buf != buf_head) {
memmove(buf_head, buf, len_buf);
buf = buf_head;
}
/* fetch as much as we can of the argument */
len_tmp = strncpy_from_user(&buf_head[len_buf], p,
len_max - len_buf);
if (len_tmp == -EFAULT) {
/* unable to copy from userspace */
send_sig(SIGKILL, current, 0);
goto out;
} else if (len_tmp == (len_max - len_buf)) {
/* buffer is not large enough */
require_data = true;
/* NOTE: if we are going to span multiple
* buffers force the encoding so we stand
* a chance at a sane len_full value and
* consistent record encoding */
encode = true;
len_full = len_full * 2;
p += len_tmp;
} else {
require_data = false;
if (!encode)
encode = audit_string_contains_control(
buf, len_tmp);
/* try to use a trusted value for len_full */
if (len_full < len_max)
len_full = (encode ?
len_tmp * 2 : len_tmp);
p += len_tmp + 1;
}
len_buf += len_tmp;
buf_head[len_buf] = '\0';
/* length of the buffer in the audit record? */
len_abuf = (encode ? len_buf * 2 : len_buf + 2);
}
/* write as much as we can to the audit log */
if (len_buf >= 0) {
/* NOTE: some magic numbers here - basically if we
* can't fit a reasonable amount of data into the
* existing audit buffer, flush it and start with
* a new buffer */
if ((sizeof(abuf) + 8) > len_rem) {
len_rem = len_max;
audit_log_end(*ab);
*ab = audit_log_start(context,
GFP_KERNEL, AUDIT_EXECVE);
if (!*ab)
goto out;
}
/* create the non-arg portion of the arg record */
len_tmp = 0;
if (require_data || (iter > 0) ||
((len_abuf + sizeof(abuf)) > len_rem)) {
if (iter == 0) {
len_tmp += snprintf(&abuf[len_tmp],
sizeof(abuf) - len_tmp,
" a%d_len=%lu",
arg, len_full);
}
len_tmp += snprintf(&abuf[len_tmp],
sizeof(abuf) - len_tmp,
" a%d[%d]=", arg, iter++);
} else
len_tmp += snprintf(&abuf[len_tmp],
sizeof(abuf) - len_tmp,
" a%d=", arg);
WARN_ON(len_tmp >= sizeof(abuf));
abuf[sizeof(abuf) - 1] = '\0';
/* log the arg in the audit record */
audit_log_format(*ab, "%s", abuf);
len_rem -= len_tmp;
len_tmp = len_buf;
if (encode) {
if (len_abuf > len_rem)
len_tmp = len_rem / 2; /* encoding */
audit_log_n_hex(*ab, buf, len_tmp);
len_rem -= len_tmp * 2;
len_abuf -= len_tmp * 2;
} else {
if (len_abuf > len_rem)
len_tmp = len_rem - 2; /* quotes */
audit_log_n_string(*ab, buf, len_tmp);
len_rem -= len_tmp + 2;
/* don't subtract the "2" because we still need
* to add quotes to the remaining string */
len_abuf -= len_tmp;
}
len_buf -= len_tmp;
buf += len_tmp;
}
/* ready to move to the next argument? */
if ((len_buf == 0) && !require_data) {
arg++;
iter = 0;
len_full = 0;
require_data = true;
encode = false;
}
} while (arg < context->execve.argc);
/* NOTE: the caller handles the final audit_log_end() call */
out:
kfree(buf_head);
}
static void audit_log_cap(struct audit_buffer *ab, char *prefix,
kernel_cap_t *cap)
{
if (cap_isclear(*cap)) {
audit_log_format(ab, " %s=0", prefix);
return;
}
audit_log_format(ab, " %s=%016llx", prefix, cap->val);
}
static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
{
if (name->fcap_ver == -1) {
audit_log_format(ab, " cap_fe=? cap_fver=? cap_fp=? cap_fi=?");
return;
}
audit_log_cap(ab, "cap_fp", &name->fcap.permitted);
audit_log_cap(ab, "cap_fi", &name->fcap.inheritable);
audit_log_format(ab, " cap_fe=%d cap_fver=%x cap_frootid=%d",
name->fcap.fE, name->fcap_ver,
from_kuid(&init_user_ns, name->fcap.rootid));
}
static void audit_log_time(struct audit_context *context, struct audit_buffer **ab)
{
const struct audit_ntp_data *ntp = &context->time.ntp_data;
const struct timespec64 *tk = &context->time.tk_injoffset;
static const char * const ntp_name[] = {
"offset",
"freq",
"status",
"tai",
"tick",
"adjust",
};
int type;
if (context->type == AUDIT_TIME_ADJNTPVAL) {
for (type = 0; type < AUDIT_NTP_NVALS; type++) {
if (ntp->vals[type].newval != ntp->vals[type].oldval) {
if (!*ab) {
*ab = audit_log_start(context,
GFP_KERNEL,
AUDIT_TIME_ADJNTPVAL);
if (!*ab)
return;
}
audit_log_format(*ab, "op=%s old=%lli new=%lli",
ntp_name[type],
ntp->vals[type].oldval,
ntp->vals[type].newval);
audit_log_end(*ab);
*ab = NULL;
}
}
}
if (tk->tv_sec != 0 || tk->tv_nsec != 0) {
if (!*ab) {
*ab = audit_log_start(context, GFP_KERNEL,
AUDIT_TIME_INJOFFSET);
if (!*ab)
return;
}
audit_log_format(*ab, "sec=%lli nsec=%li",
(long long)tk->tv_sec, tk->tv_nsec);
audit_log_end(*ab);
*ab = NULL;
}
}
static void show_special(struct audit_context *context, int *call_panic)
{
struct audit_buffer *ab;
int i;
ab = audit_log_start(context, GFP_KERNEL, context->type);
if (!ab)
return;
switch (context->type) {
case AUDIT_SOCKETCALL: {
int nargs = context->socketcall.nargs;
audit_log_format(ab, "nargs=%d", nargs);
for (i = 0; i < nargs; i++)
audit_log_format(ab, " a%d=%lx", i,
context->socketcall.args[i]);
break; }
case AUDIT_IPC:
audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho",
from_kuid(&init_user_ns, context->ipc.uid),
from_kgid(&init_user_ns, context->ipc.gid),
context->ipc.mode);
if (lsmprop_is_set(&context->ipc.oprop)) {
if (audit_log_obj_ctx(ab, &context->ipc.oprop))
*call_panic = 1;
}
if (context->ipc.has_perm) {
audit_log_end(ab);
ab = audit_log_start(context, GFP_KERNEL,
AUDIT_IPC_SET_PERM);
if (unlikely(!ab))
return;
audit_log_format(ab,
"qbytes=%lx ouid=%u ogid=%u mode=%#ho",
context->ipc.qbytes,
context->ipc.perm_uid,
context->ipc.perm_gid,
context->ipc.perm_mode);
}
break;
case AUDIT_MQ_OPEN:
audit_log_format(ab,
"oflag=0x%x mode=%#ho mq_flags=0x%lx mq_maxmsg=%ld "
"mq_msgsize=%ld mq_curmsgs=%ld",
context->mq_open.oflag, context->mq_open.mode,
context->mq_open.attr.mq_flags,
context->mq_open.attr.mq_maxmsg,
context->mq_open.attr.mq_msgsize,
context->mq_open.attr.mq_curmsgs);
break;
case AUDIT_MQ_SENDRECV:
audit_log_format(ab,
"mqdes=%d msg_len=%zd msg_prio=%u "
"abs_timeout_sec=%lld abs_timeout_nsec=%ld",
context->mq_sendrecv.mqdes,
context->mq_sendrecv.msg_len,
context->mq_sendrecv.msg_prio,
(long long) context->mq_sendrecv.abs_timeout.tv_sec,
context->mq_sendrecv.abs_timeout.tv_nsec);
break;
case AUDIT_MQ_NOTIFY:
audit_log_format(ab, "mqdes=%d sigev_signo=%d",
context->mq_notify.mqdes,
context->mq_notify.sigev_signo);
break;
case AUDIT_MQ_GETSETATTR: {
struct mq_attr *attr = &context->mq_getsetattr.mqstat;
audit_log_format(ab,
"mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld "
"mq_curmsgs=%ld ",
context->mq_getsetattr.mqdes,
attr->mq_flags, attr->mq_maxmsg,
attr->mq_msgsize, attr->mq_curmsgs);
break; }
case AUDIT_CAPSET:
audit_log_format(ab, "pid=%d", context->capset.pid);
audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable);
audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted);
audit_log_cap(ab, "cap_pe", &context->capset.cap.effective);
audit_log_cap(ab, "cap_pa", &context->capset.cap.ambient);
break;
case AUDIT_MMAP:
audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd,
context->mmap.flags);
break;
case AUDIT_OPENAT2:
audit_log_format(ab, "oflag=0%llo mode=0%llo resolve=0x%llx",
context->openat2.flags,
context->openat2.mode,
context->openat2.resolve);
break;
case AUDIT_EXECVE:
audit_log_execve_info(context, &ab);
break;
case AUDIT_KERN_MODULE:
audit_log_format(ab, "name=");
if (context->module.name) {
audit_log_untrustedstring(ab, context->module.name);
} else
audit_log_format(ab, "(null)");
break;
case AUDIT_TIME_ADJNTPVAL:
case AUDIT_TIME_INJOFFSET:
/* this call deviates from the rest, eating the buffer */
audit_log_time(context, &ab);
break;
}
audit_log_end(ab);
}
static inline int audit_proctitle_rtrim(char *proctitle, int len)
{
char *end = proctitle + len - 1;
while (end > proctitle && !isprint(*end))
end--;
/* catch the case where proctitle is only 1 non-print character */
len = end - proctitle + 1;
len -= isprint(proctitle[len-1]) == 0;
return len;
}
/*
* audit_log_name - produce AUDIT_PATH record from struct audit_names
* @context: audit_context for the task
* @n: audit_names structure with reportable details
* @path: optional path to report instead of audit_names->name
* @record_num: record number to report when handling a list of names
* @call_panic: optional pointer to int that will be updated if secid fails
*/
static void audit_log_name(struct audit_context *context, struct audit_names *n,
const struct path *path, int record_num, int *call_panic)
{
struct audit_buffer *ab;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
if (!ab)
return;
audit_log_format(ab, "item=%d", record_num);
if (path)
audit_log_d_path(ab, " name=", path);
else if (n->name) {
switch (n->name_len) {
case AUDIT_NAME_FULL:
/* log the full path */
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, n->name->name);
break;
case 0:
/* name was specified as a relative path and the
* directory component is the cwd
*/
if (context->pwd.dentry && context->pwd.mnt)
audit_log_d_path(ab, " name=", &context->pwd);
else
audit_log_format(ab, " name=(null)");
break;
default:
/* log the name's directory component */
audit_log_format(ab, " name=");
audit_log_n_untrustedstring(ab, n->name->name,
n->name_len);
}
} else
audit_log_format(ab, " name=(null)");
if (n->ino != AUDIT_INO_UNSET)
audit_log_format(ab, " inode=%lu dev=%02x:%02x mode=%#ho ouid=%u ogid=%u rdev=%02x:%02x",
n->ino,
MAJOR(n->dev),
MINOR(n->dev),
n->mode,
from_kuid(&init_user_ns, n->uid),
from_kgid(&init_user_ns, n->gid),
MAJOR(n->rdev),
MINOR(n->rdev));
if (lsmprop_is_set(&n->oprop) &&
audit_log_obj_ctx(ab, &n->oprop))
*call_panic = 2;
/* log the audit_names record type */
switch (n->type) {
case AUDIT_TYPE_NORMAL:
audit_log_format(ab, " nametype=NORMAL");
break;
case AUDIT_TYPE_PARENT:
audit_log_format(ab, " nametype=PARENT");
break;
case AUDIT_TYPE_CHILD_DELETE:
audit_log_format(ab, " nametype=DELETE");
break;
case AUDIT_TYPE_CHILD_CREATE:
audit_log_format(ab, " nametype=CREATE");
break;
default:
audit_log_format(ab, " nametype=UNKNOWN");
break;
}
audit_log_fcaps(ab, n);
audit_log_end(ab);
}
static void audit_log_proctitle(void)
{
int res;
char *buf;
char *msg = "(null)";
int len = strlen(msg);
struct audit_context *context = audit_context();
struct audit_buffer *ab;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
if (!ab)
return; /* audit_panic or being filtered */
audit_log_format(ab, "proctitle=");
/* Not cached */
if (!context->proctitle.value) {
buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL);
if (!buf)
goto out;
/* Historically called this from procfs naming */
res = get_cmdline(current, buf, MAX_PROCTITLE_AUDIT_LEN);
if (res == 0) {
kfree(buf);
goto out;
}
res = audit_proctitle_rtrim(buf, res);
if (res == 0) {
kfree(buf);
goto out;
}
context->proctitle.value = buf;
context->proctitle.len = res;
}
msg = context->proctitle.value;
len = context->proctitle.len;
out:
audit_log_n_untrustedstring(ab, msg, len);
audit_log_end(ab);
}
/**
* audit_log_uring - generate a AUDIT_URINGOP record
* @ctx: the audit context
*/
static void audit_log_uring(struct audit_context *ctx)
{
struct audit_buffer *ab;
const struct cred *cred;
ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_URINGOP);
if (!ab)
return;
cred = current_cred();
audit_log_format(ab, "uring_op=%d", ctx->uring_op);
if (ctx->return_valid != AUDITSC_INVALID)
audit_log_format(ab, " success=%s exit=%ld",
str_yes_no(ctx->return_valid ==
AUDITSC_SUCCESS),
ctx->return_code);
audit_log_format(ab,
" items=%d"
" ppid=%d pid=%d uid=%u gid=%u euid=%u suid=%u"
" fsuid=%u egid=%u sgid=%u fsgid=%u",
ctx->name_count,
task_ppid_nr(current), task_tgid_nr(current),
from_kuid(&init_user_ns, cred->uid),
from_kgid(&init_user_ns, cred->gid),
from_kuid(&init_user_ns, cred->euid),
from_kuid(&init_user_ns, cred->suid),
from_kuid(&init_user_ns, cred->fsuid),
from_kgid(&init_user_ns, cred->egid),
from_kgid(&init_user_ns, cred->sgid),
from_kgid(&init_user_ns, cred->fsgid));
audit_log_task_context(ab);
audit_log_key(ab, ctx->filterkey);
audit_log_end(ab);
}
static void audit_log_exit(void)
{
int i, call_panic = 0;
struct audit_context *context = audit_context();
struct audit_buffer *ab;
struct audit_aux_data *aux;
struct audit_names *n;
context->personality = current->personality;
switch (context->context) {
case AUDIT_CTX_SYSCALL:
ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
if (!ab)
return;
audit_log_format(ab, "arch=%x syscall=%d",
context->arch, context->major);
if (context->personality != PER_LINUX)
audit_log_format(ab, " per=%lx", context->personality);
if (context->return_valid != AUDITSC_INVALID)
audit_log_format(ab, " success=%s exit=%ld",
str_yes_no(context->return_valid ==
AUDITSC_SUCCESS),
context->return_code);
audit_log_format(ab,
" a0=%lx a1=%lx a2=%lx a3=%lx items=%d",
context->argv[0],
context->argv[1],
context->argv[2],
context->argv[3],
context->name_count);
audit_log_task_info(ab);
audit_log_key(ab, context->filterkey);
audit_log_end(ab);
break;
case AUDIT_CTX_URING:
audit_log_uring(context);
break;
default:
BUG();
break;
}
for (aux = context->aux; aux; aux = aux->next) {
ab = audit_log_start(context, GFP_KERNEL, aux->type);
if (!ab)
continue; /* audit_panic has been called */
switch (aux->type) {
case AUDIT_BPRM_FCAPS: {
struct audit_aux_data_bprm_fcaps *axs = (void *)aux;
audit_log_format(ab, "fver=%x", axs->fcap_ver);
audit_log_cap(ab, "fp", &axs->fcap.permitted);
audit_log_cap(ab, "fi", &axs->fcap.inheritable);
audit_log_format(ab, " fe=%d", axs->fcap.fE);
audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted);
audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable);
audit_log_cap(ab, "old_pe", &axs->old_pcap.effective);
audit_log_cap(ab, "old_pa", &axs->old_pcap.ambient);
audit_log_cap(ab, "pp", &axs->new_pcap.permitted);
audit_log_cap(ab, "pi", &axs->new_pcap.inheritable);
audit_log_cap(ab, "pe", &axs->new_pcap.effective);
audit_log_cap(ab, "pa", &axs->new_pcap.ambient);
audit_log_format(ab, " frootid=%d",
from_kuid(&init_user_ns,
axs->fcap.rootid));
break; }
}
audit_log_end(ab);
}
if (context->type)
show_special(context, &call_panic);
if (context->fds[0] >= 0) {
ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR);
if (ab) {
audit_log_format(ab, "fd0=%d fd1=%d",
context->fds[0], context->fds[1]);
audit_log_end(ab);
}
}
if (context->sockaddr_len) {
ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR);
if (ab) {
audit_log_format(ab, "saddr=");
audit_log_n_hex(ab, (void *)context->sockaddr,
context->sockaddr_len);
audit_log_end(ab);
}
}
for (aux = context->aux_pids; aux; aux = aux->next) {
struct audit_aux_data_pids *axs = (void *)aux;
for (i = 0; i < axs->pid_count; i++)
if (audit_log_pid_context(context, axs->target_pid[i],
axs->target_auid[i],
axs->target_uid[i],
axs->target_sessionid[i],
&axs->target_ref[i],
axs->target_comm[i]))
call_panic = 1;
}
if (context->target_pid &&
audit_log_pid_context(context, context->target_pid,
context->target_auid, context->target_uid,
context->target_sessionid,
&context->target_ref,
context->target_comm))
call_panic = 1;
if (context->pwd.dentry && context->pwd.mnt) {
ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD);
if (ab) {
audit_log_d_path(ab, "cwd=", &context->pwd);
audit_log_end(ab);
}
}
i = 0;
list_for_each_entry(n, &context->names_list, list) {
if (n->hidden)
continue;
audit_log_name(context, n, NULL, i++, &call_panic);
}
if (context->context == AUDIT_CTX_SYSCALL)
audit_log_proctitle();
/* Send end of event record to help user space know we are finished */
ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
if (ab)
audit_log_end(ab);
if (call_panic)
audit_panic("error in audit_log_exit()");
}
/**
* __audit_free - free a per-task audit context
* @tsk: task whose audit context block to free
*
* Called from copy_process, do_exit, and the io_uring code
*/
void __audit_free(struct task_struct *tsk)
{
struct audit_context *context = tsk->audit_context;
if (!context)
return;
/* this may generate CONFIG_CHANGE records */
if (!list_empty(&context->killed_trees))
audit_kill_trees(context);
/* We are called either by do_exit() or the fork() error handling code;
* in the former case tsk == current and in the latter tsk is a
* random task_struct that doesn't have any meaningful data we
* need to log via audit_log_exit().
*/
if (tsk == current && !context->dummy) {
context->return_valid = AUDITSC_INVALID;
context->return_code = 0;
if (context->context == AUDIT_CTX_SYSCALL) {
audit_filter_syscall(tsk, context);
audit_filter_inodes(tsk, context);
if (context->current_state == AUDIT_STATE_RECORD)
audit_log_exit();
} else if (context->context == AUDIT_CTX_URING) {
/* TODO: verify this case is real and valid */
audit_filter_uring(tsk, context);
audit_filter_inodes(tsk, context);
if (context->current_state == AUDIT_STATE_RECORD)
audit_log_uring(context);
}
}
audit_set_context(tsk, NULL);
audit_free_context(context);
}
/**
* audit_return_fixup - fixup the return codes in the audit_context
* @ctx: the audit_context
* @success: true/false value to indicate if the operation succeeded or not
* @code: operation return code
*
* We need to fixup the return code in the audit logs if the actual return
* codes are later going to be fixed by the arch specific signal handlers.
*/
static void audit_return_fixup(struct audit_context *ctx,
int success, long code)
{
/*
* This is actually a test for:
* (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) ||
* (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK)
*
* but is faster than a bunch of ||
*/
if (unlikely(code <= -ERESTARTSYS) &&
(code >= -ERESTART_RESTARTBLOCK) &&
(code != -ENOIOCTLCMD))
ctx->return_code = -EINTR;
else
ctx->return_code = code;
ctx->return_valid = (success ? AUDITSC_SUCCESS : AUDITSC_FAILURE);
}
/**
* __audit_uring_entry - prepare the kernel task's audit context for io_uring
* @op: the io_uring opcode
*
* This is similar to audit_syscall_entry() but is intended for use by io_uring
* operations. This function should only ever be called from
* audit_uring_entry() as we rely on the audit context checking present in that
* function.
*/
void __audit_uring_entry(u8 op)
{
struct audit_context *ctx = audit_context();
if (ctx->state == AUDIT_STATE_DISABLED)
return;
/*
* NOTE: It's possible that we can be called from the process' context
* before it returns to userspace, and before audit_syscall_exit()
* is called. In this case there is not much to do, just record
* the io_uring details and return.
*/
ctx->uring_op = op;
if (ctx->context == AUDIT_CTX_SYSCALL)
return;
ctx->dummy = !audit_n_rules;
if (!ctx->dummy && ctx->state == AUDIT_STATE_BUILD)
ctx->prio = 0;
ctx->context = AUDIT_CTX_URING;
ctx->current_state = ctx->state;
ktime_get_coarse_real_ts64(&ctx->stamp.ctime);
}
/**
* __audit_uring_exit - wrap up the kernel task's audit context after io_uring
* @success: true/false value to indicate if the operation succeeded or not
* @code: operation return code
*
* This is similar to audit_syscall_exit() but is intended for use by io_uring
* operations. This function should only ever be called from
* audit_uring_exit() as we rely on the audit context checking present in that
* function.
*/
void __audit_uring_exit(int success, long code)
{
struct audit_context *ctx = audit_context();
if (ctx->dummy) {
if (ctx->context != AUDIT_CTX_URING)
return;
goto out;
}
audit_return_fixup(ctx, success, code);
if (ctx->context == AUDIT_CTX_SYSCALL) {
/*
* NOTE: See the note in __audit_uring_entry() about the case
* where we may be called from process context before we
* return to userspace via audit_syscall_exit(). In this
* case we simply emit a URINGOP record and bail, the
* normal syscall exit handling will take care of
* everything else.
* It is also worth mentioning that when we are called,
* the current process creds may differ from the creds
* used during the normal syscall processing; keep that
* in mind if/when we move the record generation code.
*/
/*
* We need to filter on the syscall info here to decide if we
* should emit a URINGOP record. I know it seems odd but this
* solves the problem where users have a filter to block *all*
* syscall records in the "exit" filter; we want to preserve
* the behavior here.
*/
audit_filter_syscall(current, ctx);
if (ctx->current_state != AUDIT_STATE_RECORD)
audit_filter_uring(current, ctx);
audit_filter_inodes(current, ctx);
if (ctx->current_state != AUDIT_STATE_RECORD)
return;
audit_log_uring(ctx);
return;
}
/* this may generate CONFIG_CHANGE records */
if (!list_empty(&ctx->killed_trees))
audit_kill_trees(ctx);
/* run through both filters to ensure we set the filterkey properly */
audit_filter_uring(current, ctx);
audit_filter_inodes(current, ctx);
if (ctx->current_state != AUDIT_STATE_RECORD)
goto out;
audit_log_exit();
out:
audit_reset_context(ctx);
}
/**
* __audit_syscall_entry - fill in an audit record at syscall entry
* @major: major syscall type (function)
* @a1: additional syscall register 1
* @a2: additional syscall register 2
* @a3: additional syscall register 3
* @a4: additional syscall register 4
*
* Fill in audit context at syscall entry. This only happens if the
* audit context was created when the task was created and the state or
* filters demand the audit context be built. If the state from the
* per-task filter or from the per-syscall filter is AUDIT_STATE_RECORD,
* then the record will be written at syscall exit time (otherwise, it
* will only be written if another part of the kernel requests that it
* be written).
*/
void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4)
{
struct audit_context *context = audit_context();
enum audit_state state;
if (!audit_enabled || !context)
return;
WARN_ON(context->context != AUDIT_CTX_UNUSED);
WARN_ON(context->name_count);
if (context->context != AUDIT_CTX_UNUSED || context->name_count) {
audit_panic("unrecoverable error in audit_syscall_entry()");
return;
}
state = context->state;
if (state == AUDIT_STATE_DISABLED)
return;
context->dummy = !audit_n_rules;
if (!context->dummy && state == AUDIT_STATE_BUILD) {
context->prio = 0;
if (auditd_test_task(current))
return;
}
context->arch = syscall_get_arch(current);
context->major = major;
context->argv[0] = a1;
context->argv[1] = a2;
context->argv[2] = a3;
context->argv[3] = a4;
context->context = AUDIT_CTX_SYSCALL;
context->current_state = state;
ktime_get_coarse_real_ts64(&context->stamp.ctime);
}
/**
* __audit_syscall_exit - deallocate audit context after a system call
* @success: success value of the syscall
* @return_code: return value of the syscall
*
* Tear down after system call. If the audit context has been marked as
* auditable (either because of the AUDIT_STATE_RECORD state from
* filtering, or because some other part of the kernel wrote an audit
* message), then write out the syscall information. In call cases,
* free the names stored from getname().
*/
void __audit_syscall_exit(int success, long return_code)
{
struct audit_context *context = audit_context();
if (!context || context->dummy ||
context->context != AUDIT_CTX_SYSCALL)
goto out;
/* this may generate CONFIG_CHANGE records */
if (!list_empty(&context->killed_trees))
audit_kill_trees(context);
audit_return_fixup(context, success, return_code);
/* run through both filters to ensure we set the filterkey properly */
audit_filter_syscall(current, context);
audit_filter_inodes(current, context);
if (context->current_state != AUDIT_STATE_RECORD)
goto out;
audit_log_exit();
out:
audit_reset_context(context);
}
static inline void handle_one(const struct inode *inode)
{
struct audit_context *context;
struct audit_tree_refs *p;
struct audit_chunk *chunk;
int count;
if (likely(!inode->i_fsnotify_marks))
return;
context = audit_context();
p = context->trees;
count = context->tree_count;
rcu_read_lock();
chunk = audit_tree_lookup(inode);
rcu_read_unlock();
if (!chunk)
return;
if (likely(put_tree_ref(context, chunk)))
return;
if (unlikely(!grow_tree_refs(context))) {
pr_warn("out of memory, audit has lost a tree reference\n");
audit_set_auditable(context);
audit_put_chunk(chunk);
unroll_tree_refs(context, p, count);
return;
}
put_tree_ref(context, chunk);
}
static void handle_path(const struct dentry *dentry)
{
struct audit_context *context;
struct audit_tree_refs *p;
const struct dentry *d, *parent;
struct audit_chunk *drop;
unsigned long seq;
int count;
context = audit_context();
p = context->trees;
count = context->tree_count;
retry:
drop = NULL;
d = dentry;
rcu_read_lock();
seq = read_seqbegin(&rename_lock);
for (;;) {
struct inode *inode = d_backing_inode(d);
if (inode && unlikely(inode->i_fsnotify_marks)) {
struct audit_chunk *chunk;
chunk = audit_tree_lookup(inode);
if (chunk) {
if (unlikely(!put_tree_ref(context, chunk))) {
drop = chunk;
break;
}
}
}
parent = d->d_parent;
if (parent == d)
break;
d = parent;
}
if (unlikely(read_seqretry(&rename_lock, seq) || drop)) { /* in this order */
rcu_read_unlock();
if (!drop) {
/* just a race with rename */
unroll_tree_refs(context, p, count);
goto retry;
}
audit_put_chunk(drop);
if (grow_tree_refs(context)) {
/* OK, got more space */
unroll_tree_refs(context, p, count);
goto retry;
}
/* too bad */
pr_warn("out of memory, audit has lost a tree reference\n");
unroll_tree_refs(context, p, count);
audit_set_auditable(context);
return;
}
rcu_read_unlock();
}
static struct audit_names *audit_alloc_name(struct audit_context *context,
unsigned char type)
{
struct audit_names *aname;
if (context->name_count < AUDIT_NAMES) {
aname = &context->preallocated_names[context->name_count];
memset(aname, 0, sizeof(*aname));
} else {
aname = kzalloc(sizeof(*aname), GFP_NOFS);
if (!aname)
return NULL;
aname->should_free = true;
}
aname->ino = AUDIT_INO_UNSET;
aname->type = type;
list_add_tail(&aname->list, &context->names_list);
context->name_count++;
if (!context->pwd.dentry)
get_fs_pwd(current->fs, &context->pwd);
return aname;
}
/**
* __audit_reusename - fill out filename with info from existing entry
* @uptr: userland ptr to pathname
*
* Search the audit_names list for the current audit context. If there is an
* existing entry with a matching "uptr" then return the filename
* associated with that audit_name. If not, return NULL.
*/
struct filename *
__audit_reusename(const __user char *uptr)
{
struct audit_context *context = audit_context();
struct audit_names *n;
list_for_each_entry(n, &context->names_list, list) {
if (!n->name)
continue;
if (n->name->uptr == uptr)
return refname(n->name);
}
return NULL;
}
/**
* __audit_getname - add a name to the list
* @name: name to add
*
* Add a name to the list of audit names for this context.
* Called from fs/namei.c:getname().
*/
void __audit_getname(struct filename *name)
{
struct audit_context *context = audit_context();
struct audit_names *n;
if (context->context == AUDIT_CTX_UNUSED)
return;
n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
if (!n)
return;
n->name = name;
n->name_len = AUDIT_NAME_FULL;
name->aname = n;
refname(name);
}
static inline int audit_copy_fcaps(struct audit_names *name,
const struct dentry *dentry)
{
struct cpu_vfs_cap_data caps;
int rc;
if (!dentry)
return 0;
rc = get_vfs_caps_from_disk(&nop_mnt_idmap, dentry, &caps);
if (rc)
return rc;
name->fcap.permitted = caps.permitted;
name->fcap.inheritable = caps.inheritable;
name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
name->fcap.rootid = caps.rootid;
name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >>
VFS_CAP_REVISION_SHIFT;
return 0;
}
/* Copy inode data into an audit_names. */
static void audit_copy_inode(struct audit_names *name,
const struct dentry *dentry,
struct inode *inode, unsigned int flags)
{
name->ino = inode->i_ino;
name->dev = inode->i_sb->s_dev;
name->mode = inode->i_mode;
name->uid = inode->i_uid;
name->gid = inode->i_gid;
name->rdev = inode->i_rdev;
security_inode_getlsmprop(inode, &name->oprop);
if (flags & AUDIT_INODE_NOEVAL) {
name->fcap_ver = -1;
return;
}
audit_copy_fcaps(name, dentry);
}
/**
* __audit_inode - store the inode and device from a lookup
* @name: name being audited
* @dentry: dentry being audited
* @flags: attributes for this particular entry
*/
void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags)
{
struct audit_context *context = audit_context();
struct inode *inode = d_backing_inode(dentry);
struct audit_names *n;
bool parent = flags & AUDIT_INODE_PARENT;
struct audit_entry *e;
struct list_head *list = &audit_filter_list[AUDIT_FILTER_FS];
int i;
if (context->context == AUDIT_CTX_UNUSED)
return;
rcu_read_lock();
list_for_each_entry_rcu(e, list, list) {
for (i = 0; i < e->rule.field_count; i++) {
struct audit_field *f = &e->rule.fields[i];
if (f->type == AUDIT_FSTYPE
&& audit_comparator(inode->i_sb->s_magic,
f->op, f->val)
&& e->rule.action == AUDIT_NEVER) {
rcu_read_unlock();
return;
}
}
}
rcu_read_unlock();
if (!name)
goto out_alloc;
/*
* If we have a pointer to an audit_names entry already, then we can
* just use it directly if the type is correct.
*/
n = name->aname;
if (n) {
if (parent) {
if (n->type == AUDIT_TYPE_PARENT ||
n->type == AUDIT_TYPE_UNKNOWN)
goto out;
} else {
if (n->type != AUDIT_TYPE_PARENT)
goto out;
}
}
list_for_each_entry_reverse(n, &context->names_list, list) {
if (n->ino) {
/* valid inode number, use that for the comparison */
if (n->ino != inode->i_ino ||
n->dev != inode->i_sb->s_dev)
continue;
} else if (n->name) {
/* inode number has not been set, check the name */
if (strcmp(n->name->name, name->name))
continue;
} else
/* no inode and no name (?!) ... this is odd ... */
continue;
/* match the correct record type */
if (parent) {
if (n->type == AUDIT_TYPE_PARENT ||
n->type == AUDIT_TYPE_UNKNOWN)
goto out;
} else {
if (n->type != AUDIT_TYPE_PARENT)
goto out;
}
}
out_alloc:
/* unable to find an entry with both a matching name and type */
n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
if (!n)
return;
if (name) {
n->name = name;
refname(name);
}
out:
if (parent) {
n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
n->type = AUDIT_TYPE_PARENT;
if (flags & AUDIT_INODE_HIDDEN)
n->hidden = true;
} else {
n->name_len = AUDIT_NAME_FULL;
n->type = AUDIT_TYPE_NORMAL;
}
handle_path(dentry);
audit_copy_inode(n, dentry, inode, flags & AUDIT_INODE_NOEVAL);
}
void __audit_file(const struct file *file)
{
__audit_inode(NULL, file->f_path.dentry, 0);
}
/**
* __audit_inode_child - collect inode info for created/removed objects
* @parent: inode of dentry parent
* @dentry: dentry being audited
* @type: AUDIT_TYPE_* value that we're looking for
*
* For syscalls that create or remove filesystem objects, audit_inode
* can only collect information for the filesystem object's parent.
* This call updates the audit context with the child's information.
* Syscalls that create a new filesystem object must be hooked after
* the object is created. Syscalls that remove a filesystem object
* must be hooked prior, in order to capture the target inode during
* unsuccessful attempts.
*/
void __audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type)
{
struct audit_context *context = audit_context();
struct inode *inode = d_backing_inode(dentry);
const struct qstr *dname = &dentry->d_name;
struct audit_names *n, *found_parent = NULL, *found_child = NULL;
struct audit_entry *e;
struct list_head *list = &audit_filter_list[AUDIT_FILTER_FS];
int i;
if (context->context == AUDIT_CTX_UNUSED)
return;
rcu_read_lock();
list_for_each_entry_rcu(e, list, list) {
for (i = 0; i < e->rule.field_count; i++) {
struct audit_field *f = &e->rule.fields[i];
if (f->type == AUDIT_FSTYPE
&& audit_comparator(parent->i_sb->s_magic,
f->op, f->val)
&& e->rule.action == AUDIT_NEVER) {
rcu_read_unlock();
return;
}
}
}
rcu_read_unlock();
if (inode)
handle_one(inode);
/* look for a parent entry first */
list_for_each_entry(n, &context->names_list, list) {
if (!n->name ||
(n->type != AUDIT_TYPE_PARENT &&
n->type != AUDIT_TYPE_UNKNOWN))
continue;
if (n->ino == parent->i_ino && n->dev == parent->i_sb->s_dev &&
!audit_compare_dname_path(dname,
n->name->name, n->name_len)) {
if (n->type == AUDIT_TYPE_UNKNOWN)
n->type = AUDIT_TYPE_PARENT;
found_parent = n;
break;
}
}
cond_resched();
/* is there a matching child entry? */
list_for_each_entry(n, &context->names_list, list) {
/* can only match entries that have a name */
if (!n->name ||
(n->type != type && n->type != AUDIT_TYPE_UNKNOWN))
continue;
if (!strcmp(dname->name, n->name->name) ||
!audit_compare_dname_path(dname, n->name->name,
found_parent ?
found_parent->name_len :
AUDIT_NAME_FULL)) {
if (n->type == AUDIT_TYPE_UNKNOWN)
n->type = type;
found_child = n;
break;
}
}
if (!found_parent) {
/* create a new, "anonymous" parent record */
n = audit_alloc_name(context, AUDIT_TYPE_PARENT);
if (!n)
return;
audit_copy_inode(n, NULL, parent, 0);
}
if (!found_child) {
found_child = audit_alloc_name(context, type);
if (!found_child)
return;
/* Re-use the name belonging to the slot for a matching parent
* directory. All names for this context are relinquished in
* audit_free_names() */
if (found_parent) {
found_child->name = found_parent->name;
found_child->name_len = AUDIT_NAME_FULL;
refname(found_child->name);
}
}
if (inode)
audit_copy_inode(found_child, dentry, inode, 0);
else
found_child->ino = AUDIT_INO_UNSET;
}
EXPORT_SYMBOL_GPL(__audit_inode_child);
/**
* auditsc_get_stamp - get local copies of audit_context values
* @ctx: audit_context for the task
* @stamp: timestamp to record
*
* Also sets the context as auditable.
*/
int auditsc_get_stamp(struct audit_context *ctx, struct audit_stamp *stamp)
{
if (ctx->context == AUDIT_CTX_UNUSED)
return 0;
if (!ctx->stamp.serial)
ctx->stamp.serial = audit_serial();
*stamp = ctx->stamp;
if (!ctx->prio) {
ctx->prio = 1;
ctx->current_state = AUDIT_STATE_RECORD;
}
return 1;
}
/**
* __audit_mq_open - record audit data for a POSIX MQ open
* @oflag: open flag
* @mode: mode bits
* @attr: queue attributes
*
*/
void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
{
struct audit_context *context = audit_context();
if (attr)
memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr));
else
memset(&context->mq_open.attr, 0, sizeof(struct mq_attr));
context->mq_open.oflag = oflag;
context->mq_open.mode = mode;
context->type = AUDIT_MQ_OPEN;
}
/**
* __audit_mq_sendrecv - record audit data for a POSIX MQ timed send/receive
* @mqdes: MQ descriptor
* @msg_len: Message length
* @msg_prio: Message priority
* @abs_timeout: Message timeout in absolute time
*
*/
void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
const struct timespec64 *abs_timeout)
{
struct audit_context *context = audit_context();
struct timespec64 *p = &context->mq_sendrecv.abs_timeout;
if (abs_timeout)
memcpy(p, abs_timeout, sizeof(*p));
else
memset(p, 0, sizeof(*p));
context->mq_sendrecv.mqdes = mqdes;
context->mq_sendrecv.msg_len = msg_len;
context->mq_sendrecv.msg_prio = msg_prio;
context->type = AUDIT_MQ_SENDRECV;
}
/**
* __audit_mq_notify - record audit data for a POSIX MQ notify
* @mqdes: MQ descriptor
* @notification: Notification event
*
*/
void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification)
{
struct audit_context *context = audit_context();
if (notification)
context->mq_notify.sigev_signo = notification->sigev_signo;
else
context->mq_notify.sigev_signo = 0;
context->mq_notify.mqdes = mqdes;
context->type = AUDIT_MQ_NOTIFY;
}
/**
* __audit_mq_getsetattr - record audit data for a POSIX MQ get/set attribute
* @mqdes: MQ descriptor
* @mqstat: MQ flags
*
*/
void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
{
struct audit_context *context = audit_context();
context->mq_getsetattr.mqdes = mqdes;
context->mq_getsetattr.mqstat = *mqstat;
context->type = AUDIT_MQ_GETSETATTR;
}
/**
* __audit_ipc_obj - record audit data for ipc object
* @ipcp: ipc permissions
*
*/
void __audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
struct audit_context *context = audit_context();
context->ipc.uid = ipcp->uid;
context->ipc.gid = ipcp->gid;
context->ipc.mode = ipcp->mode;
context->ipc.has_perm = 0;
security_ipc_getlsmprop(ipcp, &context->ipc.oprop);
context->type = AUDIT_IPC;
}
/**
* __audit_ipc_set_perm - record audit data for new ipc permissions
* @qbytes: msgq bytes
* @uid: msgq user id
* @gid: msgq group id
* @mode: msgq mode (permissions)
*
* Called only after audit_ipc_obj().
*/
void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode)
{
struct audit_context *context = audit_context();
context->ipc.qbytes = qbytes;
context->ipc.perm_uid = uid;
context->ipc.perm_gid = gid;
context->ipc.perm_mode = mode;
context->ipc.has_perm = 1;
}
void __audit_bprm(struct linux_binprm *bprm)
{
struct audit_context *context = audit_context();
context->type = AUDIT_EXECVE;
context->execve.argc = bprm->argc;
}
/**
* __audit_socketcall - record audit data for sys_socketcall
* @nargs: number of args, which should not be more than AUDITSC_ARGS.
* @args: args array
*
*/
int __audit_socketcall(int nargs, unsigned long *args)
{
struct audit_context *context = audit_context();
if (nargs <= 0 || nargs > AUDITSC_ARGS || !args)
return -EINVAL;
context->type = AUDIT_SOCKETCALL;
context->socketcall.nargs = nargs;
memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
return 0;
}
/**
* __audit_fd_pair - record audit data for pipe and socketpair
* @fd1: the first file descriptor
* @fd2: the second file descriptor
*
*/
void __audit_fd_pair(int fd1, int fd2)
{
struct audit_context *context = audit_context();
context->fds[0] = fd1;
context->fds[1] = fd2;
}
/**
* __audit_sockaddr - record audit data for sys_bind, sys_connect, sys_sendto
* @len: data length in user space
* @a: data address in kernel space
*
* Returns 0 for success or NULL context or < 0 on error.
*/
int __audit_sockaddr(int len, void *a)
{
struct audit_context *context = audit_context();
if (!context->sockaddr) {
void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL);
if (!p)
return -ENOMEM;
context->sockaddr = p;
}
context->sockaddr_len = len;
memcpy(context->sockaddr, a, len);
return 0;
}
void __audit_ptrace(struct task_struct *t)
{
struct audit_context *context = audit_context();
context->target_pid = task_tgid_nr(t);
context->target_auid = audit_get_loginuid(t);
context->target_uid = task_uid(t);
context->target_sessionid = audit_get_sessionid(t);
strscpy(context->target_comm, t->comm);
security_task_getlsmprop_obj(t, &context->target_ref);
}
/**
* audit_signal_info_syscall - record signal info for syscalls
* @t: task being signaled
*
* If the audit subsystem is being terminated, record the task (pid)
* and uid that is doing that.
*/
int audit_signal_info_syscall(struct task_struct *t)
{
struct audit_aux_data_pids *axp;
struct audit_context *ctx = audit_context();
kuid_t t_uid = task_uid(t);
if (!audit_signals || audit_dummy_context())
return 0;
/* optimize the common case by putting first signal recipient directly
* in audit_context */
if (!ctx->target_pid) {
ctx->target_pid = task_tgid_nr(t);
ctx->target_auid = audit_get_loginuid(t);
ctx->target_uid = t_uid;
ctx->target_sessionid = audit_get_sessionid(t);
strscpy(ctx->target_comm, t->comm);
security_task_getlsmprop_obj(t, &ctx->target_ref);
return 0;
}
axp = (void *)ctx->aux_pids;
if (!axp || axp->pid_count == AUDIT_AUX_PIDS) {
axp = kzalloc(sizeof(*axp), GFP_ATOMIC);
if (!axp)
return -ENOMEM;
axp->d.type = AUDIT_OBJ_PID;
axp->d.next = ctx->aux_pids;
ctx->aux_pids = (void *)axp;
}
BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS);
axp->target_pid[axp->pid_count] = task_tgid_nr(t);
axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
axp->target_uid[axp->pid_count] = t_uid;
axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
security_task_getlsmprop_obj(t, &axp->target_ref[axp->pid_count]);
strscpy(axp->target_comm[axp->pid_count], t->comm);
axp->pid_count++;
return 0;
}
/**
* __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps
* @bprm: pointer to the bprm being processed
* @new: the proposed new credentials
* @old: the old credentials
*
* Simply check if the proc already has the caps given by the file and if not
* store the priv escalation info for later auditing at the end of the syscall
*
* -Eric
*/
int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *new, const struct cred *old)
{
struct audit_aux_data_bprm_fcaps *ax;
struct audit_context *context = audit_context();
struct cpu_vfs_cap_data vcaps;
ax = kmalloc(sizeof(*ax), GFP_KERNEL);
if (!ax)
return -ENOMEM;
ax->d.type = AUDIT_BPRM_FCAPS;
ax->d.next = context->aux;
context->aux = (void *)ax;
get_vfs_caps_from_disk(&nop_mnt_idmap,
bprm->file->f_path.dentry, &vcaps);
ax->fcap.permitted = vcaps.permitted;
ax->fcap.inheritable = vcaps.inheritable;
ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
ax->fcap.rootid = vcaps.rootid;
ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
ax->old_pcap.permitted = old->cap_permitted;
ax->old_pcap.inheritable = old->cap_inheritable;
ax->old_pcap.effective = old->cap_effective;
ax->old_pcap.ambient = old->cap_ambient;
ax->new_pcap.permitted = new->cap_permitted;
ax->new_pcap.inheritable = new->cap_inheritable;
ax->new_pcap.effective = new->cap_effective;
ax->new_pcap.ambient = new->cap_ambient;
return 0;
}
/**
* __audit_log_capset - store information about the arguments to the capset syscall
* @new: the new credentials
* @old: the old (current) credentials
*
* Record the arguments userspace sent to sys_capset for later printing by the
* audit system if applicable
*/
void __audit_log_capset(const struct cred *new, const struct cred *old)
{
struct audit_context *context = audit_context();
context->capset.pid = task_tgid_nr(current);
context->capset.cap.effective = new->cap_effective;
context->capset.cap.inheritable = new->cap_effective;
context->capset.cap.permitted = new->cap_permitted;
context->capset.cap.ambient = new->cap_ambient;
context->type = AUDIT_CAPSET;
}
void __audit_mmap_fd(int fd, int flags)
{
struct audit_context *context = audit_context();
context->mmap.fd = fd;
context->mmap.flags = flags;
context->type = AUDIT_MMAP;
}
void __audit_openat2_how(struct open_how *how)
{
struct audit_context *context = audit_context();
context->openat2.flags = how->flags;
context->openat2.mode = how->mode;
context->openat2.resolve = how->resolve;
context->type = AUDIT_OPENAT2;
}
void __audit_log_kern_module(const char *name)
{
struct audit_context *context = audit_context();
context->module.name = kstrdup(name, GFP_KERNEL);
if (!context->module.name)
audit_log_lost("out of memory in __audit_log_kern_module");
context->type = AUDIT_KERN_MODULE;
}
void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{
/* {subj,obj}_trust values are {0,1,2}: no,yes,unknown */
switch (friar->hdr.type) {
case FAN_RESPONSE_INFO_NONE:
audit_log(audit_context(), GFP_KERNEL, AUDIT_FANOTIFY,
"resp=%u fan_type=%u fan_info=0 subj_trust=2 obj_trust=2",
response, FAN_RESPONSE_INFO_NONE);
break;
case FAN_RESPONSE_INFO_AUDIT_RULE:
audit_log(audit_context(), GFP_KERNEL, AUDIT_FANOTIFY,
"resp=%u fan_type=%u fan_info=%X subj_trust=%u obj_trust=%u",
response, friar->hdr.type, friar->rule_number,
friar->subj_trust, friar->obj_trust);
}
}
void __audit_tk_injoffset(struct timespec64 offset)
{
struct audit_context *context = audit_context();
/* only set type if not already set by NTP */
if (!context->type)
context->type = AUDIT_TIME_INJOFFSET;
memcpy(&context->time.tk_injoffset, &offset, sizeof(offset));
}
void __audit_ntp_log(const struct audit_ntp_data *ad)
{
struct audit_context *context = audit_context();
int type;
for (type = 0; type < AUDIT_NTP_NVALS; type++)
if (ad->vals[type].newval != ad->vals[type].oldval) {
/* unconditionally set type, overwriting TK */
context->type = AUDIT_TIME_ADJNTPVAL;
memcpy(&context->time.ntp_data, ad, sizeof(*ad));
break;
}
}
void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
enum audit_nfcfgop op, gfp_t gfp)
{
struct audit_buffer *ab;
char comm[sizeof(current->comm)];
ab = audit_log_start(audit_context(), gfp, AUDIT_NETFILTER_CFG);
if (!ab)
return;
audit_log_format(ab, "table=%s family=%u entries=%u op=%s",
name, af, nentries, audit_nfcfgs[op].s);
audit_log_format(ab, " pid=%u", task_tgid_nr(current));
audit_log_task_context(ab); /* subj= */
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_end(ab);
}
EXPORT_SYMBOL_GPL(__audit_log_nfcfg);
static void audit_log_task(struct audit_buffer *ab)
{
kuid_t auid, uid;
kgid_t gid;
unsigned int sessionid;
char comm[sizeof(current->comm)];
auid = audit_get_loginuid(current);
sessionid = audit_get_sessionid(current);
current_uid_gid(&uid, &gid);
audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
from_kuid(&init_user_ns, auid),
from_kuid(&init_user_ns, uid),
from_kgid(&init_user_ns, gid),
sessionid);
audit_log_task_context(ab);
audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_d_path_exe(ab, current->mm);
}
/**
* audit_core_dumps - record information about processes that end abnormally
* @signr: signal value
*
* If a process ends with a core dump, something fishy is going on and we
* should record the event for investigation.
*/
void audit_core_dumps(long signr)
{
struct audit_buffer *ab;
if (!audit_enabled)
return;
if (signr == SIGQUIT) /* don't care for those */
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_ANOM_ABEND);
if (unlikely(!ab))
return;
audit_log_task(ab);
audit_log_format(ab, " sig=%ld res=1", signr);
audit_log_end(ab);
}
/**
* audit_seccomp - record information about a seccomp action
* @syscall: syscall number
* @signr: signal value
* @code: the seccomp action
*
* Record the information associated with a seccomp action. Event filtering for
* seccomp actions that are not to be logged is done in seccomp_log().
* Therefore, this function forces auditing independent of the audit_enabled
* and dummy context state because seccomp actions should be logged even when
* audit is not in use.
*/
void audit_seccomp(unsigned long syscall, long signr, int code)
{
struct audit_buffer *ab;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_SECCOMP);
if (unlikely(!ab))
return;
audit_log_task(ab);
audit_log_format(ab, " sig=%ld arch=%x syscall=%ld compat=%d ip=0x%lx code=0x%x",
signr, syscall_get_arch(current), syscall,
in_compat_syscall(), KSTK_EIP(current), code);
audit_log_end(ab);
}
void audit_seccomp_actions_logged(const char *names, const char *old_names,
int res)
{
struct audit_buffer *ab;
if (!audit_enabled)
return;
ab = audit_log_start(audit_context(), GFP_KERNEL,
AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return;
audit_log_format(ab,
"op=seccomp-logging actions=%s old-actions=%s res=%d",
names, old_names, res);
audit_log_end(ab);
}
struct list_head *audit_killed_trees(void)
{
struct audit_context *ctx = audit_context();
if (likely(!ctx || ctx->context == AUDIT_CTX_UNUSED))
return NULL;
return &ctx->killed_trees;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Fast batching percpu counters.
*/
#include <linux/percpu_counter.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/debugobjects.h>
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
static DEFINE_SPINLOCK(percpu_counters_lock);
#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
static const struct debug_obj_descr percpu_counter_debug_descr;
static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
struct percpu_counter *fbc = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
percpu_counter_destroy(fbc);
debug_object_free(fbc, &percpu_counter_debug_descr);
return true;
default:
return false;
}
}
static const struct debug_obj_descr percpu_counter_debug_descr = {
.name = "percpu_counter",
.fixup_free = percpu_counter_fixup_free,
};
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{
debug_object_init(fbc, &percpu_counter_debug_descr);
debug_object_activate(fbc, &percpu_counter_debug_descr);
}
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{
debug_object_deactivate(fbc, &percpu_counter_debug_descr);
debug_object_free(fbc, &percpu_counter_debug_descr);
}
#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{ }
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{ }
#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
EXPORT_SYMBOL(percpu_counter_set);
/*
* Add to a counter while respecting batch size.
*
* There are 2 implementations, both dealing with the following problem:
*
* The decision slow path/fast path and the actual update must be atomic.
* Otherwise a call in process context could check the current values and
* decide that the fast path can be used. If now an interrupt occurs before
* the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
* then the this_cpu_add() that is executed after the interrupt has completed
* can produce values larger than "batch" or even overflows.
*/
#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
/*
* Safety against interrupts is achieved in 2 ways:
* 1. the fast path uses local cmpxchg (note: no lock prefix)
* 2. the slow path operates with interrupts disabled
*/
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
unsigned long flags;
count = this_cpu_read(*fbc->counters);
do {
if (unlikely(abs(count + amount) >= batch)) {
raw_spin_lock_irqsave(&fbc->lock, flags);
/*
* Note: by now we might have migrated to another CPU
* or the value might have changed.
*/
count = __this_cpu_read(*fbc->counters);
fbc->count += count + amount;
__this_cpu_sub(*fbc->counters, count);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return;
}
} while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount));}
#else
/*
* local_irq_save() is used to make the function irq safe:
* - The slow path would be ok as protected by an irq-safe spinlock.
* - this_cpu_add would be ok as it is irq-safe by definition.
*/
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
unsigned long flags;
local_irq_save(flags);
count = __this_cpu_read(*fbc->counters) + amount;
if (abs(count) >= batch) {
raw_spin_lock(&fbc->lock);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count - amount);
raw_spin_unlock(&fbc->lock);
} else {
this_cpu_add(*fbc->counters, amount);
}
local_irq_restore(flags);
}
#endif
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
* For percpu_counter with a big batch, the devication of its count could
* be big, and there is requirement to reduce the deviation, like when the
* counter's batch could be runtime decreased to get a better accuracy,
* which can be achieved by running this sync function on each CPU.
*/
void percpu_counter_sync(struct percpu_counter *fbc)
{
unsigned long flags;
s64 count;
raw_spin_lock_irqsave(&fbc->lock, flags);
count = __this_cpu_read(*fbc->counters);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
EXPORT_SYMBOL(percpu_counter_sync);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive().
*
* We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
* from CPUs that are in the process of being taken offline. Dying cpus have
* been removed from the online mask, but may not have had the hotplug dead
* notifier called to fold the percpu count back into the global counter sum.
* By including dying CPUs in the iteration mask, we avoid this race condition
* so __percpu_counter_sum() just does the right thing when CPUs are being taken
* offline.
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
gfp_t gfp, u32 nr_counters,
struct lock_class_key *key)
{
unsigned long flags __maybe_unused;
size_t counter_size;
s32 __percpu *counters;
u32 i;
counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
counters = __alloc_percpu_gfp(nr_counters * counter_size,
__alignof__(*counters), gfp);
if (!counters) {
fbc[0].counters = NULL;
return -ENOMEM;
}
for (i = 0; i < nr_counters; i++) {
raw_spin_lock_init(&fbc[i].lock);
lockdep_set_class(&fbc[i].lock, key);
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc[i].list);
#endif
fbc[i].count = amount;
fbc[i].counters = (void __percpu *)counters + i * counter_size;
debug_percpu_counter_activate(&fbc[i]);
}
#ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags);
for (i = 0; i < nr_counters; i++)
list_add(&fbc[i].list, &percpu_counters); spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;}
EXPORT_SYMBOL(__percpu_counter_init_many);
void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
{
unsigned long flags __maybe_unused;
u32 i;
if (WARN_ON_ONCE(!fbc))
return;
if (!fbc[0].counters)
return;
for (i = 0; i < nr_counters; i++)
debug_percpu_counter_deactivate(&fbc[i]);
#ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags);
for (i = 0; i < nr_counters; i++)
list_del(&fbc[i].list);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc[0].counters);
for (i = 0; i < nr_counters; i++)
fbc[i].counters = NULL;
}
EXPORT_SYMBOL(percpu_counter_destroy_many);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
static int compute_batch_value(unsigned int cpu)
{
int nr = num_online_cpus();
percpu_counter_batch = max(32, nr*2);
return 0;
}
static int percpu_counter_cpu_dead(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
struct percpu_counter *fbc;
compute_batch_value(cpu);
spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
raw_spin_lock(&fbc->lock);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
raw_spin_unlock(&fbc->lock);
}
spin_unlock_irq(&percpu_counters_lock);
#endif
return 0;
}
/*
* Compare counter against given value.
* Return 1 if greater, 0 if equal and -1 if less
*/
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
{
s64 count;
count = percpu_counter_read(fbc);
/* Check to see if rough count will be sufficient for comparison */
if (abs(count - rhs) > (batch * num_online_cpus())) {
if (count > rhs)
return 1;
else
return -1;
}
/* Need to use precise count */
count = percpu_counter_sum(fbc);
if (count > rhs)
return 1;
else if (count < rhs)
return -1;
else
return 0;
}
EXPORT_SYMBOL(__percpu_counter_compare);
/*
* Compare counter, and add amount if total is: less than or equal to limit if
* amount is positive, or greater than or equal to limit if amount is negative.
* Return true if amount is added, or false if total would be beyond the limit.
*
* Negative limit is allowed, but unusual.
* When negative amounts (subs) are given to percpu_counter_limited_add(),
* the limit would most naturally be 0 - but other limits are also allowed.
*
* Overflow beyond S64_MAX is not allowed for: counter, limit and amount
* are all assumed to be sane (far from S64_MIN and S64_MAX).
*/
bool __percpu_counter_limited_add(struct percpu_counter *fbc,
s64 limit, s64 amount, s32 batch)
{
s64 count;
s64 unknown;
unsigned long flags;
bool good = false;
if (amount == 0)
return true;
local_irq_save(flags);
unknown = batch * num_online_cpus();
count = __this_cpu_read(*fbc->counters);
/* Skip taking the lock when safe */
if (abs(count + amount) <= batch &&
((amount > 0 && fbc->count + unknown <= limit) ||
(amount < 0 && fbc->count - unknown >= limit))) {
this_cpu_add(*fbc->counters, amount);
local_irq_restore(flags);
return true;
}
raw_spin_lock(&fbc->lock);
count = fbc->count + amount;
/* Skip percpu_counter_sum() when safe */
if (amount > 0) {
if (count - unknown > limit)
goto out;
if (count + unknown <= limit)
good = true;
} else {
if (count + unknown < limit)
goto out;
if (count - unknown >= limit)
good = true;
}
if (!good) {
s32 *pcount;
int cpu;
for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
pcount = per_cpu_ptr(fbc->counters, cpu);
count += *pcount;
}
if (amount > 0) {
if (count > limit)
goto out;
} else {
if (count < limit)
goto out;
}
good = true;
}
count = __this_cpu_read(*fbc->counters);
fbc->count += count + amount;
__this_cpu_sub(*fbc->counters, count);
out:
raw_spin_unlock(&fbc->lock);
local_irq_restore(flags);
return good;
}
static int __init percpu_counter_startup(void)
{
int ret;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
compute_batch_value, NULL);
WARN_ON(ret < 0);
ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
"lib/percpu_cnt:dead", NULL,
percpu_counter_cpu_dead);
WARN_ON(ret < 0);
return 0;
}
module_init(percpu_counter_startup);
// SPDX-License-Identifier: GPL-2.0
/*
* Out-of-line refcount functions.
*/
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/bug.h>
#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
{
refcount_set(r, REFCOUNT_SATURATED);
switch (t) {
case REFCOUNT_ADD_NOT_ZERO_OVF:
REFCOUNT_WARN("saturated; leaking memory");
break;
case REFCOUNT_ADD_OVF:
REFCOUNT_WARN("saturated; leaking memory");
break;
case REFCOUNT_ADD_UAF:
REFCOUNT_WARN("addition on 0; use-after-free");
break;
case REFCOUNT_SUB_UAF:
REFCOUNT_WARN("underflow; use-after-free");
break;
case REFCOUNT_DEC_LEAK:
REFCOUNT_WARN("decrement hit 0; leaking memory");
break;
default:
REFCOUNT_WARN("unknown saturation event!?");
}
}
EXPORT_SYMBOL(refcount_warn_saturate);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
* @r: the refcount
*
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
* Like all decrement operations, it provides release memory order and provides
* a control dependency.
*
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_if_one(refcount_t *r)
{
int val = 1;
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
EXPORT_SYMBOL(refcount_dec_if_one);
/**
* refcount_dec_not_one - decrement a refcount if it is not 1
* @r: the refcount
*
* No atomic_t counterpart, it decrements unless the value is 1, in which case
* it will return false.
*
* Was often done like: atomic_add_unless(&var, -1, 1)
*
* Return: true if the decrement operation was successful, false otherwise
*/
bool refcount_dec_not_one(refcount_t *r)
{ unsigned int new, val = atomic_read(&r->refs);
do {
if (unlikely(val == REFCOUNT_SATURATED)) return true; if (val == 1) return false; new = val - 1; if (new > val) { WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
return true;
}
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return true;
}
EXPORT_SYMBOL(refcount_dec_not_one);
/**
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
* refcount to 0
* @r: the refcount
* @lock: the mutex to be locked
*
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
* to decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
*
* Return: true and hold mutex if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
if (refcount_dec_not_one(r))
return false;
mutex_lock(lock);
if (!refcount_dec_and_test(r)) {
mutex_unlock(lock);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
/**
* refcount_dec_and_lock - return holding spinlock if able to decrement
* refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
*
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
* decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{ if (refcount_dec_not_one(r)) return false;
spin_lock(lock);
if (!refcount_dec_and_test(r)) {
spin_unlock(lock);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock);
/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
* interrupts if able to decrement refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
* with disabled interrupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{ if (refcount_dec_not_one(r)) return false;
spin_lock_irqsave(lock, *flags);
if (!refcount_dec_and_test(r)) {
spin_unlock_irqrestore(lock, *flags);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/minmax.h>
#include <linux/nospec.h>
#include <linux/sched.h>
#include <linux/ucopysize.h>
#include <asm/uaccess.h>
/*
* Architectures that support memory tagging (assigning tags to memory regions,
* embedding these tags into addresses that point to these memory regions, and
* checking that the memory and the pointer tags match on memory accesses)
* redefine this macro to strip tags from pointers.
*
* Passing down mm_struct allows to define untagging rules on per-process
* basis.
*
* It's defined as noop for architectures that don't support memory tagging.
*/
#ifndef untagged_addr
#define untagged_addr(addr) (addr)
#endif
#ifndef untagged_addr_remote
#define untagged_addr_remote(mm, addr) ({ \
mmap_assert_locked(mm); \
untagged_addr(addr); \
})
#endif
#ifdef masked_user_access_begin
#define can_do_masked_user_access() 1
#else
#define can_do_masked_user_access() 0
#define masked_user_access_begin(src) NULL
#define mask_user_address(src) (src)
#endif
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
* __copy_{to,from}_user{,_inatomic}().
*
* raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
* return the amount left to copy. They should assume that access_ok() has
* already been checked (and succeeded); they should *not* zero-pad anything.
* No KASAN or object size checks either - those belong here.
*
* Both of these functions should attempt to copy size bytes starting at from
* into the area starting at to. They must not fetch or store anything
* outside of those areas. Return value must be between 0 (everything
* copied successfully) and size (nothing copied).
*
* If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
* at to must become equal to the bytes fetched from the corresponding area
* starting at from. All data past to + size - N must be left unmodified.
*
* If copying succeeds, the return value must be 0. If some data cannot be
* fetched, it is permitted to copy less than had been fetched; the only
* hard requirement is that not storing anything at all (i.e. returning size)
* should happen only when nothing could be copied. In other words, you don't
* have to squeeze as much as possible - it is allowed, but not necessary.
*
* For raw_copy_from_user() to always points to kernel memory and no faults
* on store should happen. Interpretation of from is affected by set_fs().
* For raw_copy_to_user() it's the other way round.
*
* Both can be inlined - it's up to architectures whether it wants to bother
* with that. They should not be used directly; they are used to implement
* the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
* that are used instead. Out of those, __... ones are inlined. Plain
* copy_{to,from}_user() might or might not be inlined. If you want them
* inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
*
* NOTE: only copy_from_user() zero-pads the destination in case of short copy.
* Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
* at all; their callers absolutely must check the return value.
*
* Biarch ones should also provide raw_copy_in_user() - similar to the above,
* but both source and destination are __user pointers (affected by set_fs()
* as usual) and both source and destination can trigger faults.
*/
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
unsigned long res;
instrument_copy_from_user_before(to, from, n);
check_object_size(to, n, false);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
return res;
}
static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res;
might_fault();
instrument_copy_from_user_before(to, from, n);
if (should_fail_usercopy())
return n;
check_object_size(to, n, false);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
return res;
}
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
if (should_fail_usercopy())
return n;
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (should_fail_usercopy())
return n;
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
/*
* Architectures that #define INLINE_COPY_TO_USER use this function
* directly in the normal copy_to/from_user(), the other ones go
* through an extern _copy_to/from_user(), which expands the same code
* here.
*
* Rust code always uses the extern definition.
*/
static inline __must_check unsigned long
_inline_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
if (should_fail_usercopy())
goto fail;
if (can_do_masked_user_access())
from = mask_user_address(from);
else {
if (!access_ok(from, n))
goto fail;
/*
* Ensure that bad access_ok() speculation will not
* lead to nasty side effects *after* the copy is
* finished:
*/
barrier_nospec();
}
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
if (likely(!res))
return 0;
fail:
memset(to + (n - res), 0, res); return res;
}
extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
static inline __must_check unsigned long
_inline_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (should_fail_usercopy())
return n;
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
}
extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (!check_copy_size(to, n, false))
return n;
#ifdef INLINE_COPY_FROM_USER
return _inline_copy_from_user(to, from, n);
#else
return _copy_from_user(to, from, n);
#endif
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (!check_copy_size(from, n, true))
return n;
#ifdef INLINE_COPY_TO_USER
return _inline_copy_to_user(to, from, n);
#else
return _copy_to_user(to, from, n);
#endif
}
#ifndef copy_mc_to_kernel
/*
* Without arch opt-in this generic copy_mc_to_kernel() will not handle
* #MC (or arch equivalent) during source read.
*/
static inline unsigned long __must_check
copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
return 0;
}
#endif
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
}
static __always_inline void pagefault_disabled_dec(void)
{
current->pagefault_disabled--;
}
/*
* These routines enable/disable the pagefault handler. If disabled, it will
* not take any locks and go straight to the fixup table.
*
* User access methods will not sleep when called from a pagefault_disabled()
* environment.
*/
static inline void pagefault_disable(void)
{
pagefault_disabled_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier();
}
static inline void pagefault_enable(void)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
pagefault_disabled_dec();
}
/*
* Is the pagefault handler disabled? If so, user access methods will not sleep.
*/
static inline bool pagefault_disabled(void)
{
return current->pagefault_disabled != 0;
}
/*
* The pagefault handler is in general disabled by pagefault_disable() or
* when in irq context (via in_atomic()).
*
* This function should only be used by the fault handlers. Other users should
* stick to pagefault_disabled().
* Please NEVER use preempt_disable() to disable the fault handler. With
* !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
* in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
DEFINE_LOCK_GUARD_0(pagefault, pagefault_disable(), pagefault_enable())
#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
/**
* probe_subpage_writeable: probe the user range for write faults at sub-page
* granularity (e.g. arm64 MTE)
* @uaddr: start of address range
* @size: size of address range
*
* Returns 0 on success, the number of bytes not probed on fault.
*
* It is expected that the caller checked for the write permission of each
* page in the range either by put_user() or GUP. The architecture port can
* implement a more efficient get_user() probing if the same sub-page faults
* are triggered by either a read or a write.
*/
static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
{
return 0;
}
#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline __must_check unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
#endif /* ARCH_HAS_NOCACHE_UACCESS */
extern __must_check int check_zeroed_user(const void __user *from, size_t size);
/**
* copy_struct_from_user: copy a struct from userspace
* @dst: Destination address, in kernel space. This buffer must be @ksize
* bytes long.
* @ksize: Size of @dst struct.
* @src: Source address, in userspace.
* @usize: (Alleged) size of @src struct.
*
* Copies a struct from userspace to kernel space, in a way that guarantees
* backwards-compatibility for struct syscall arguments (as long as future
* struct extensions are made such that all new fields are *appended* to the
* old struct, and zeroed-out new fields have the same meaning as the old
* struct).
*
* @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
* The recommended usage is something like the following:
*
* SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
* {
* int err;
* struct foo karg = {};
*
* if (usize > PAGE_SIZE)
* return -E2BIG;
* if (usize < FOO_SIZE_VER0)
* return -EINVAL;
*
* err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
* if (err)
* return err;
*
* // ...
* }
*
* There are three cases to consider:
* * If @usize == @ksize, then it's copied verbatim.
* * If @usize < @ksize, then the userspace has passed an old struct to a
* newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
* are to be zero-filled.
* * If @usize > @ksize, then the userspace has passed a new struct to an
* older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
* are checked to ensure they are zeroed, otherwise -E2BIG is returned.
*
* Returns (in all cases, some data may have been copied):
* * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
* * -EFAULT: access to userspace failed.
*/
static __always_inline __must_check int
copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
size_t usize)
{
size_t size = min(ksize, usize);
size_t rest = max(ksize, usize) - size;
/* Double check if ksize is larger than a known object size. */
if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
return -E2BIG;
/* Deal with trailing bytes. */
if (usize < ksize) {
memset(dst + size, 0, rest);
} else if (usize > ksize) {
int ret = check_zeroed_user(src + size, rest);
if (ret <= 0)
return ret ?: -E2BIG;
}
/* Copy the interoperable parts of the struct. */
if (copy_from_user(dst, src, size))
return -EFAULT;
return 0;
}
/**
* copy_struct_to_user: copy a struct to userspace
* @dst: Destination address, in userspace. This buffer must be @ksize
* bytes long.
* @usize: (Alleged) size of @dst struct.
* @src: Source address, in kernel space.
* @ksize: Size of @src struct.
* @ignored_trailing: Set to %true if there was a non-zero byte in @src that
* userspace cannot see because they are using an smaller struct.
*
* Copies a struct from kernel space to userspace, in a way that guarantees
* backwards-compatibility for struct syscall arguments (as long as future
* struct extensions are made such that all new fields are *appended* to the
* old struct, and zeroed-out new fields have the same meaning as the old
* struct).
*
* Some syscalls may wish to make sure that userspace knows about everything in
* the struct, and if there is a non-zero value that userspce doesn't know
* about, they want to return an error (such as -EMSGSIZE) or have some other
* fallback (such as adding a "you're missing some information" flag). If
* @ignored_trailing is non-%NULL, it will be set to %true if there was a
* non-zero byte that could not be copied to userspace (ie. was past @usize).
*
* While unconditionally returning an error in this case is the simplest
* solution, for maximum backward compatibility you should try to only return
* -EMSGSIZE if the user explicitly requested the data that couldn't be copied.
* Note that structure sizes can change due to header changes and simple
* recompilations without code changes(!), so if you care about
* @ignored_trailing you probably want to make sure that any new field data is
* associated with a flag. Otherwise you might assume that a program knows
* about data it does not.
*
* @ksize is just sizeof(*src), and @usize should've been passed by userspace.
* The recommended usage is something like the following:
*
* SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize)
* {
* int err;
* bool ignored_trailing;
* struct foo karg = {};
*
* if (usize > PAGE_SIZE)
* return -E2BIG;
* if (usize < FOO_SIZE_VER0)
* return -EINVAL;
*
* // ... modify karg somehow ...
*
* err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg),
* &ignored_trailing);
* if (err)
* return err;
* if (ignored_trailing)
* return -EMSGSIZE:
*
* // ...
* }
*
* There are three cases to consider:
* * If @usize == @ksize, then it's copied verbatim.
* * If @usize < @ksize, then the kernel is trying to pass userspace a newer
* struct than it supports. Thus we only copy the interoperable portions
* (@usize) and ignore the rest (but @ignored_trailing is set to %true if
* any of the trailing (@ksize - @usize) bytes are non-zero).
* * If @usize > @ksize, then the kernel is trying to pass userspace an older
* struct than userspace supports. In order to make sure the
* unknown-to-the-kernel fields don't contain garbage values, we zero the
* trailing (@usize - @ksize) bytes.
*
* Returns (in all cases, some data may have been copied):
* * -EFAULT: access to userspace failed.
*/
static __always_inline __must_check int
copy_struct_to_user(void __user *dst, size_t usize, const void *src,
size_t ksize, bool *ignored_trailing)
{
size_t size = min(ksize, usize);
size_t rest = max(ksize, usize) - size;
/* Double check if ksize is larger than a known object size. */
if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1)))
return -E2BIG;
/* Deal with trailing bytes. */
if (usize > ksize) {
if (clear_user(dst + size, rest))
return -EFAULT;
}
if (ignored_trailing)
*ignored_trailing = ksize < usize &&
memchr_inv(src + size, 0, rest) != NULL;
/* Copy the interoperable parts of the struct. */
if (copy_to_user(dst, src, size))
return -EFAULT;
return 0;
}
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
long notrace copy_to_user_nofault(void __user *dst, const void *src,
size_t size);
long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
long count);
long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
long count);
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
#ifndef __get_kernel_nofault
#define __get_kernel_nofault(dst, src, type, label) \
do { \
type __user *p = (type __force __user *)(src); \
type data; \
if (__get_user(data, p)) \
goto label; \
*(type *)dst = data; \
} while (0)
#define __put_kernel_nofault(dst, src, type, label) \
do { \
type __user *p = (type __force __user *)(dst); \
type data = *(type *)src; \
if (__put_user(data, p)) \
goto label; \
} while (0)
#endif
/**
* get_kernel_nofault(): safely attempt to read from a location
* @val: read into this variable
* @ptr: address to read from
*
* Returns 0 on success, or -EFAULT.
*/
#define get_kernel_nofault(val, ptr) ({ \
const typeof(val) *__gk_ptr = (ptr); \
copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
})
#ifndef user_access_begin
#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif
#ifndef user_write_access_begin
#define user_write_access_begin user_access_begin
#define user_write_access_end user_access_end
#endif
#ifndef user_read_access_begin
#define user_read_access_begin user_access_begin
#define user_read_access_end user_access_end
#endif
#ifdef CONFIG_HARDENED_USERCOPY
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
unsigned long len);
#endif
#endif /* __LINUX_UACCESS_H__ */
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/exec.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* #!-checking implemented by tytso.
*/
/*
* Demand-loading implemented 01.12.91 - no need to read anything but
* the header into memory. The inode of the executable is put into
* "current->executable", and page faults do the actual loading. Clean.
*
* Once more I can proudly say that linux stood up to being changed: it
* was less than 2 hours work to get demand-loading completely implemented.
*
* Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
* current->executable is only used by the procfs. This allows a dispatch
* table to check for several different types of binary formats. We keep
* trying until we recognize the file or we run out of supported binary
* formats.
*/
#include <linux/kernel_read_file.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
#include <linux/sched/signal.h>
#include <linux/sched/numa_balancing.h>
#include <linux/sched/task.h>
#include <linux/pagemap.h>
#include <linux/perf_event.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/key.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/utsname.h>
#include <linux/pid_namespace.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/audit.h>
#include <linux/kmod.h>
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
#include <linux/io_uring.h>
#include <linux/syscall_user_dispatch.h>
#include <linux/coredump.h>
#include <linux/time_namespace.h>
#include <linux/user_events.h>
#include <linux/rseq.h>
#include <linux/ksm.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <trace/events/task.h>
#include "internal.h"
#include <trace/events/sched.h>
/* For vma exec functions. */
#include "../mm/internal.h"
static int bprm_creds_from_file(struct linux_binprm *bprm);
int suid_dumpable = 0;
static LIST_HEAD(formats);
static DEFINE_RWLOCK(binfmt_lock);
void __register_binfmt(struct linux_binfmt * fmt, int insert)
{
write_lock(&binfmt_lock);
insert ? list_add(&fmt->lh, &formats) :
list_add_tail(&fmt->lh, &formats);
write_unlock(&binfmt_lock);
}
EXPORT_SYMBOL(__register_binfmt);
void unregister_binfmt(struct linux_binfmt * fmt)
{
write_lock(&binfmt_lock);
list_del(&fmt->lh);
write_unlock(&binfmt_lock);
}
EXPORT_SYMBOL(unregister_binfmt);
static inline void put_binfmt(struct linux_binfmt * fmt)
{
module_put(fmt->module);
}
bool path_noexec(const struct path *path)
{
/* If it's an anonymous inode make sure that we catch any shenanigans. */
VFS_WARN_ON_ONCE(IS_ANON_FILE(d_inode(path->dentry)) &&
!(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC));
return (path->mnt->mnt_flags & MNT_NOEXEC) ||
(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
}
#ifdef CONFIG_MMU
/*
* The nascent bprm->mm is not visible until exec_mmap() but it can
* use a lot of memory, account these pages in current->mm temporary
* for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
* change the counter back via acct_arg_size(0).
*/
static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
{
struct mm_struct *mm = current->mm;
long diff = (long)(pages - bprm->vma_pages);
if (!mm || !diff)
return;
bprm->vma_pages = pages;
add_mm_counter(mm, MM_ANONPAGES, diff);
}
static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
struct vm_area_struct *vma = bprm->vma;
struct mm_struct *mm = bprm->mm;
int ret;
/*
* Avoid relying on expanding the stack down in GUP (which
* does not work for STACK_GROWSUP anyway), and just do it
* ahead of time.
*/
if (!mmap_read_lock_maybe_expand(mm, vma, pos, write))
return NULL;
/*
* We are doing an exec(). 'current' is the process
* doing the exec and 'mm' is the new process's mm.
*/
ret = get_user_pages_remote(mm, pos, 1,
write ? FOLL_WRITE : 0,
&page, NULL);
mmap_read_unlock(mm);
if (ret <= 0)
return NULL;
if (write)
acct_arg_size(bprm, vma_pages(vma));
return page;
}
static void put_arg_page(struct page *page)
{
put_page(page);
}
static void free_arg_pages(struct linux_binprm *bprm)
{
}
static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
struct page *page)
{
flush_cache_page(bprm->vma, pos, page_to_pfn(page));
}
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= MAX_ARG_STRLEN;
}
#else
static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
{
}
static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
page = bprm->page[pos / PAGE_SIZE];
if (!page && write) {
page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
if (!page)
return NULL;
bprm->page[pos / PAGE_SIZE] = page;
}
return page;
}
static void put_arg_page(struct page *page)
{
}
static void free_arg_page(struct linux_binprm *bprm, int i)
{
if (bprm->page[i]) {
__free_page(bprm->page[i]);
bprm->page[i] = NULL;
}
}
static void free_arg_pages(struct linux_binprm *bprm)
{
int i;
for (i = 0; i < MAX_ARG_PAGES; i++)
free_arg_page(bprm, i);
}
static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
struct page *page)
{
}
static bool valid_arg_len(struct linux_binprm *bprm, long len)
{
return len <= bprm->p;
}
#endif /* CONFIG_MMU */
/*
* Create a new mm_struct and populate it with a temporary stack
* vm_area_struct. We don't have enough context at this point to set the stack
* flags, permissions, and offset, so we use temporary values. We'll update
* them later in setup_arg_pages().
*/
static int bprm_mm_init(struct linux_binprm *bprm)
{
int err;
struct mm_struct *mm = NULL;
bprm->mm = mm = mm_alloc();
err = -ENOMEM;
if (!mm)
goto err;
/* Save current stack limit for all calculations made during exec. */
task_lock(current->group_leader);
bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
task_unlock(current->group_leader);
#ifndef CONFIG_MMU
bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
#else
err = create_init_stack_vma(bprm->mm, &bprm->vma, &bprm->p);
if (err)
goto err;
#endif
return 0;
err:
if (mm) {
bprm->mm = NULL;
mmdrop(mm);
}
return err;
}
struct user_arg_ptr {
#ifdef CONFIG_COMPAT
bool is_compat;
#endif
union {
const char __user *const __user *native;
#ifdef CONFIG_COMPAT
const compat_uptr_t __user *compat;
#endif
} ptr;
};
static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
{
const char __user *native;
#ifdef CONFIG_COMPAT
if (unlikely(argv.is_compat)) {
compat_uptr_t compat;
if (get_user(compat, argv.ptr.compat + nr))
return ERR_PTR(-EFAULT);
return compat_ptr(compat);
}
#endif
if (get_user(native, argv.ptr.native + nr))
return ERR_PTR(-EFAULT);
return native;
}
/*
* count() counts the number of strings in array ARGV.
*/
static int count(struct user_arg_ptr argv, int max)
{
int i = 0;
if (argv.ptr.native != NULL) {
for (;;) {
const char __user *p = get_user_arg_ptr(argv, i);
if (!p)
break;
if (IS_ERR(p))
return -EFAULT;
if (i >= max)
return -E2BIG;
++i;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
cond_resched();
}
}
return i;
}
static int count_strings_kernel(const char *const *argv)
{
int i;
if (!argv)
return 0;
for (i = 0; argv[i]; ++i) {
if (i >= MAX_ARG_STRINGS)
return -E2BIG;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
cond_resched();
}
return i;
}
static inline int bprm_set_stack_limit(struct linux_binprm *bprm,
unsigned long limit)
{
#ifdef CONFIG_MMU
/* Avoid a pathological bprm->p. */
if (bprm->p < limit)
return -E2BIG;
bprm->argmin = bprm->p - limit;
#endif
return 0;
}
static inline bool bprm_hit_stack_limit(struct linux_binprm *bprm)
{
#ifdef CONFIG_MMU
return bprm->p < bprm->argmin;
#else
return false;
#endif
}
/*
* Calculate bprm->argmin from:
* - _STK_LIM
* - ARG_MAX
* - bprm->rlim_stack.rlim_cur
* - bprm->argc
* - bprm->envc
* - bprm->p
*/
static int bprm_stack_limits(struct linux_binprm *bprm)
{
unsigned long limit, ptr_size;
/*
* Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
* (whichever is smaller) for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
limit = _STK_LIM / 4 * 3;
limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
/*
* We've historically supported up to 32 pages (ARG_MAX)
* of argument strings even with small stacks
*/
limit = max_t(unsigned long, limit, ARG_MAX);
/* Reject totally pathological counts. */
if (bprm->argc < 0 || bprm->envc < 0)
return -E2BIG;
/*
* We must account for the size of all the argv and envp pointers to
* the argv and envp strings, since they will also take up space in
* the stack. They aren't stored until much later when we can't
* signal to the parent that the child has run out of stack space.
* Instead, calculate it here so it's possible to fail gracefully.
*
* In the case of argc = 0, make sure there is space for adding a
* empty string (which will bump argc to 1), to ensure confused
* userspace programs don't start processing from argv[1], thinking
* argc can never be 0, to keep them from walking envp by accident.
* See do_execveat_common().
*/
if (check_add_overflow(max(bprm->argc, 1), bprm->envc, &ptr_size) ||
check_mul_overflow(ptr_size, sizeof(void *), &ptr_size))
return -E2BIG;
if (limit <= ptr_size)
return -E2BIG;
limit -= ptr_size;
return bprm_set_stack_limit(bprm, limit);
}
/*
* 'copy_strings()' copies argument/environment strings from the old
* processes's memory to the new process's stack. The call to get_user_pages()
* ensures the destination page is created and not swapped out.
*/
static int copy_strings(int argc, struct user_arg_ptr argv,
struct linux_binprm *bprm)
{
struct page *kmapped_page = NULL;
char *kaddr = NULL;
unsigned long kpos = 0;
int ret;
while (argc-- > 0) {
const char __user *str;
int len;
unsigned long pos;
ret = -EFAULT;
str = get_user_arg_ptr(argv, argc);
if (IS_ERR(str))
goto out;
len = strnlen_user(str, MAX_ARG_STRLEN);
if (!len)
goto out;
ret = -E2BIG;
if (!valid_arg_len(bprm, len))
goto out;
/* We're going to work our way backwards. */
pos = bprm->p;
str += len;
bprm->p -= len;
if (bprm_hit_stack_limit(bprm))
goto out;
while (len > 0) {
int offset, bytes_to_copy;
if (fatal_signal_pending(current)) {
ret = -ERESTARTNOHAND;
goto out;
}
cond_resched();
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
bytes_to_copy = offset;
if (bytes_to_copy > len)
bytes_to_copy = len;
offset -= bytes_to_copy;
pos -= bytes_to_copy;
str -= bytes_to_copy;
len -= bytes_to_copy;
if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
struct page *page;
page = get_arg_page(bprm, pos, 1);
if (!page) {
ret = -E2BIG;
goto out;
}
if (kmapped_page) {
flush_dcache_page(kmapped_page);
kunmap_local(kaddr);
put_arg_page(kmapped_page);
}
kmapped_page = page;
kaddr = kmap_local_page(kmapped_page);
kpos = pos & PAGE_MASK;
flush_arg_page(bprm, kpos, kmapped_page);
}
if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
ret = -EFAULT;
goto out;
}
}
}
ret = 0;
out:
if (kmapped_page) {
flush_dcache_page(kmapped_page);
kunmap_local(kaddr);
put_arg_page(kmapped_page);
}
return ret;
}
/*
* Copy and argument/environment string from the kernel to the processes stack.
*/
int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
{
int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
unsigned long pos = bprm->p;
if (len == 0)
return -EFAULT;
if (!valid_arg_len(bprm, len))
return -E2BIG;
/* We're going to work our way backwards. */
arg += len;
bprm->p -= len;
if (bprm_hit_stack_limit(bprm))
return -E2BIG;
while (len > 0) {
unsigned int bytes_to_copy = min_t(unsigned int, len,
min_not_zero(offset_in_page(pos), PAGE_SIZE));
struct page *page;
pos -= bytes_to_copy;
arg -= bytes_to_copy;
len -= bytes_to_copy;
page = get_arg_page(bprm, pos, 1);
if (!page)
return -E2BIG;
flush_arg_page(bprm, pos & PAGE_MASK, page);
memcpy_to_page(page, offset_in_page(pos), arg, bytes_to_copy);
put_arg_page(page);
}
return 0;
}
EXPORT_SYMBOL(copy_string_kernel);
static int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm)
{
while (argc-- > 0) {
int ret = copy_string_kernel(argv[argc], bprm);
if (ret < 0)
return ret;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
cond_resched();
}
return 0;
}
#ifdef CONFIG_MMU
/*
* Finalizes the stack vm_area_struct. The flags and permissions are updated,
* the stack is optionally relocated, and some extra space is added.
*/
int setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top,
int executable_stack)
{
int ret;
unsigned long stack_shift;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = bprm->vma;
struct vm_area_struct *prev = NULL;
vm_flags_t vm_flags;
unsigned long stack_base;
unsigned long stack_size;
unsigned long stack_expand;
unsigned long rlim_stack;
struct mmu_gather tlb;
struct vma_iterator vmi;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size */
stack_base = bprm->rlim_stack.rlim_max;
stack_base = calc_max_stack_size(stack_base);
/* Add space for stack randomization. */
if (current->flags & PF_RANDOMIZE)
stack_base += (STACK_RND_MASK << PAGE_SHIFT);
/* Make sure we didn't let the argument array grow too large. */
if (vma->vm_end - vma->vm_start > stack_base)
return -ENOMEM;
stack_base = PAGE_ALIGN(stack_top - stack_base);
stack_shift = vma->vm_start - stack_base;
mm->arg_start = bprm->p - stack_shift;
bprm->p = vma->vm_end - stack_shift;
#else
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
if (unlikely(stack_top < mmap_min_addr) ||
unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
return -ENOMEM;
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
mm->arg_start = bprm->p;
#endif
bprm->exec -= stack_shift;
if (mmap_write_lock_killable(mm))
return -EINTR;
vm_flags = VM_STACK_FLAGS;
/*
* Adjust stack execute permissions; explicitly enable for
* EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
* (arch default) otherwise.
*/
if (unlikely(executable_stack == EXSTACK_ENABLE_X))
vm_flags |= VM_EXEC;
else if (executable_stack == EXSTACK_DISABLE_X)
vm_flags &= ~VM_EXEC;
vm_flags |= mm->def_flags;
vm_flags |= VM_STACK_INCOMPLETE_SETUP;
vma_iter_init(&vmi, mm, vma->vm_start);
tlb_gather_mmu(&tlb, mm);
ret = mprotect_fixup(&vmi, &tlb, vma, &prev, vma->vm_start, vma->vm_end,
vm_flags);
tlb_finish_mmu(&tlb);
if (ret)
goto out_unlock;
BUG_ON(prev != vma);
if (unlikely(vm_flags & VM_EXEC)) {
pr_warn_once("process '%pD4' started with executable stack\n",
bprm->file);
}
/* Move stack pages down in memory. */
if (stack_shift) {
/*
* During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
* the binfmt code determines where the new stack should reside, we shift it to
* its final location.
*/
ret = relocate_vma_down(vma, stack_shift);
if (ret)
goto out_unlock;
}
/* mprotect_fixup is overkill to remove the temporary stack flags */
vm_flags_clear(vma, VM_STACK_INCOMPLETE_SETUP);
stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
stack_size = vma->vm_end - vma->vm_start;
/*
* Align this down to a page boundary as expand_stack
* will align it up.
*/
rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
stack_expand = min(rlim_stack, stack_size + stack_expand);
#ifdef CONFIG_STACK_GROWSUP
stack_base = vma->vm_start + stack_expand;
#else
stack_base = vma->vm_end - stack_expand;
#endif
current->mm->start_stack = bprm->p;
ret = expand_stack_locked(vma, stack_base);
if (ret)
ret = -EFAULT;
out_unlock:
mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL(setup_arg_pages);
#else
/*
* Transfer the program arguments and environment from the holding pages
* onto the stack. The provided stack pointer is adjusted accordingly.
*/
int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location)
{
unsigned long index, stop, sp;
int ret = 0;
stop = bprm->p >> PAGE_SHIFT;
sp = *sp_location;
for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
char *src = kmap_local_page(bprm->page[index]) + offset;
sp -= PAGE_SIZE - offset;
if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
ret = -EFAULT;
kunmap_local(src);
if (ret)
goto out;
}
bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
*sp_location = sp;
out:
return ret;
}
EXPORT_SYMBOL(transfer_args_to_stack);
#endif /* CONFIG_MMU */
/*
* On success, caller must call do_close_execat() on the returned
* struct file to close it.
*/
static struct file *do_open_execat(int fd, struct filename *name, int flags)
{
int err;
struct file *file __free(fput) = NULL;
struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC,
.intent = LOOKUP_OPEN,
.lookup_flags = LOOKUP_FOLLOW,
};
if ((flags &
~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH | AT_EXECVE_CHECK)) != 0)
return ERR_PTR(-EINVAL);
if (flags & AT_SYMLINK_NOFOLLOW)
open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
if (flags & AT_EMPTY_PATH)
open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
file = do_filp_open(fd, name, &open_exec_flags);
if (IS_ERR(file))
return file;
if (path_noexec(&file->f_path))
return ERR_PTR(-EACCES);
/*
* In the past the regular type check was here. It moved to may_open() in
* 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
* an invariant that all non-regular files error out before we get here.
*/
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)))
return ERR_PTR(-EACCES);
err = exe_file_deny_write_access(file);
if (err)
return ERR_PTR(err);
return no_free_ptr(file);
}
/**
* open_exec - Open a path name for execution
*
* @name: path name to open with the intent of executing it.
*
* Returns ERR_PTR on failure or allocated struct file on success.
*
* As this is a wrapper for the internal do_open_execat(), callers
* must call exe_file_allow_write_access() before fput() on release. Also see
* do_close_execat().
*/
struct file *open_exec(const char *name)
{
struct filename *filename = getname_kernel(name);
struct file *f = ERR_CAST(filename);
if (!IS_ERR(filename)) {
f = do_open_execat(AT_FDCWD, filename, 0);
putname(filename);
}
return f;
}
EXPORT_SYMBOL(open_exec);
#if defined(CONFIG_BINFMT_FLAT) || defined(CONFIG_BINFMT_ELF_FDPIC)
ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
{
ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
if (res > 0)
flush_icache_user_range(addr, addr + len);
return res;
}
EXPORT_SYMBOL(read_code);
#endif
/*
* Maps the mm_struct mm into the current task struct.
* On success, this function returns with exec_update_lock
* held for writing.
*/
static int exec_mmap(struct mm_struct *mm)
{
struct task_struct *tsk;
struct mm_struct *old_mm, *active_mm;
int ret;
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
ret = down_write_killable(&tsk->signal->exec_update_lock);
if (ret)
return ret;
if (old_mm) {
/*
* If there is a pending fatal signal perhaps a signal
* whose default action is to create a coredump get
* out and die instead of going through with the exec.
*/
ret = mmap_read_lock_killable(old_mm);
if (ret) {
up_write(&tsk->signal->exec_update_lock);
return ret;
}
}
task_lock(tsk);
membarrier_exec_mmap(mm);
local_irq_disable();
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
mm_init_cid(mm, tsk);
/*
* This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for
* lazy tlb mm refcounting when these are updated by context
* switches. Not all architectures can handle irqs off over
* activate_mm yet.
*/
if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
activate_mm(active_mm, mm);
if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
lru_gen_add_mm(mm);
task_unlock(tsk);
lru_gen_use_mm(mm);
if (old_mm) {
mmap_read_unlock(old_mm);
BUG_ON(active_mm != old_mm);
setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
mm_update_next_owner(old_mm);
mmput(old_mm);
return 0;
}
mmdrop_lazy_tlb(active_mm);
return 0;
}
static int de_thread(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
if (thread_group_empty(tsk))
goto no_thread_group;
/*
* Kill all other threads in the thread group.
*/
spin_lock_irq(lock);
if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) {
/*
* Another group action in progress, just
* return so that the signal is processed.
*/
spin_unlock_irq(lock);
return -EAGAIN;
}
sig->group_exec_task = tsk;
sig->notify_count = zap_other_threads(tsk);
if (!thread_group_leader(tsk))
sig->notify_count--;
while (sig->notify_count) {
__set_current_state(TASK_KILLABLE);
spin_unlock_irq(lock);
schedule();
if (__fatal_signal_pending(tsk))
goto killed;
spin_lock_irq(lock);
}
spin_unlock_irq(lock);
/*
* At this point all other threads have exited, all we have to
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
if (!thread_group_leader(tsk)) {
struct task_struct *leader = tsk->group_leader;
for (;;) {
cgroup_threadgroup_change_begin(tsk);
write_lock_irq(&tasklist_lock);
/*
* Do this under tasklist_lock to ensure that
* exit_notify() can't miss ->group_exec_task
*/
sig->notify_count = -1;
if (likely(leader->exit_state))
break;
__set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
cgroup_threadgroup_change_end(tsk);
schedule();
if (__fatal_signal_pending(tsk))
goto killed;
}
/*
* The only record we have of the real-time age of a
* process, regardless of execs it's done, is start_time.
* All the past CPU time is accumulated in signal_struct
* from sister threads now dead. But in this non-leader
* exec, nothing survives from the original leader thread,
* whose birth marks the true age of this process now.
* When we take on its identity by switching to its PID, we
* also take its birthdate (always earlier than our own).
*/
tsk->start_time = leader->start_time;
tsk->start_boottime = leader->start_boottime;
BUG_ON(!same_thread_group(leader, tsk));
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
* two threads with a switched PID, and release
* the former thread group leader:
*/
/* Become a process group leader with the old leader's pid.
* The old leader becomes a thread of the this thread group.
*/
exchange_tids(tsk, leader);
transfer_pid(leader, tsk, PIDTYPE_TGID);
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
list_replace_rcu(&leader->tasks, &tsk->tasks);
list_replace_init(&leader->sibling, &tsk->sibling);
tsk->group_leader = tsk;
leader->group_leader = tsk;
tsk->exit_signal = SIGCHLD;
leader->exit_signal = -1;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
/*
* We are going to release_task()->ptrace_unlink() silently,
* the tracer can sleep in do_wait(). EXIT_DEAD guarantees
* the tracer won't block again waiting for this thread.
*/
if (unlikely(leader->ptrace))
__wake_up_parent(leader, leader->parent);
write_unlock_irq(&tasklist_lock);
cgroup_threadgroup_change_end(tsk);
release_task(leader);
}
sig->group_exec_task = NULL;
sig->notify_count = 0;
no_thread_group:
/* we have changed execution domain */
tsk->exit_signal = SIGCHLD;
BUG_ON(!thread_group_leader(tsk));
return 0;
killed:
/* protects against exit_notify() and __exit_signal() */
read_lock(&tasklist_lock);
sig->group_exec_task = NULL;
sig->notify_count = 0;
read_unlock(&tasklist_lock);
return -EAGAIN;
}
/*
* This function makes sure the current process has its own signal table,
* so that flush_signal_handlers can later reset the handlers without
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().)
*/
static int unshare_sighand(struct task_struct *me)
{
struct sighand_struct *oldsighand = me->sighand;
if (refcount_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/*
* This ->sighand is shared with the CLONE_SIGHAND
* but not CLONE_THREAD task, switch to the new one.
*/
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsighand)
return -ENOMEM;
refcount_set(&newsighand->count, 1);
write_lock_irq(&tasklist_lock);
spin_lock(&oldsighand->siglock);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
rcu_assign_pointer(me->sighand, newsighand);
spin_unlock(&oldsighand->siglock);
write_unlock_irq(&tasklist_lock);
__cleanup_sighand(oldsighand);
}
return 0;
}
/*
* This is unlocked -- the string will always be NUL-terminated, but
* may show overlapping contents if racing concurrent reads.
*/
void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
{
size_t len = min(strlen(buf), sizeof(tsk->comm) - 1);
trace_task_rename(tsk, buf);
memcpy(tsk->comm, buf, len);
memset(&tsk->comm[len], 0, sizeof(tsk->comm) - len);
perf_event_comm(tsk, exec);
}
/*
* Calling this is the point of no return. None of the failures will be
* seen by userspace since either the process is already taking a fatal
* signal (via de_thread() or coredump), or will have SEGV raised
* (after exec_mmap()) by search_binary_handler (see below).
*/
int begin_new_exec(struct linux_binprm * bprm)
{
struct task_struct *me = current;
int retval;
/* Once we are committed compute the creds */
retval = bprm_creds_from_file(bprm);
if (retval)
return retval;
/*
* This tracepoint marks the point before flushing the old exec where
* the current task is still unchanged, but errors are fatal (point of
* no return). The later "sched_process_exec" tracepoint is called after
* the current task has successfully switched to the new exec.
*/
trace_sched_prepare_exec(current, bprm);
/*
* Ensure all future errors are fatal.
*/
bprm->point_of_no_return = true;
/* Make this the only thread in the thread group */
retval = de_thread(me);
if (retval)
goto out;
/* see the comment in check_unsafe_exec() */
current->fs->in_exec = 0;
/*
* Cancel any io_uring activity across execve
*/
io_uring_task_cancel();
/* Ensure the files table is not shared. */
retval = unshare_files();
if (retval)
goto out;
/*
* Must be called _before_ exec_mmap() as bprm->mm is
* not visible until then. Doing it here also ensures
* we don't race against replace_mm_exe_file().
*/
retval = set_mm_exe_file(bprm->mm, bprm->file);
if (retval)
goto out;
/* If the binary is not readable then enforce mm->dumpable=0 */
would_dump(bprm, bprm->file);
if (bprm->have_execfd)
would_dump(bprm, bprm->executable);
/*
* Release all of the old mmap stuff
*/
acct_arg_size(bprm, 0);
retval = exec_mmap(bprm->mm);
if (retval)
goto out;
bprm->mm = NULL;
retval = exec_task_namespaces();
if (retval)
goto out_unlock;
#ifdef CONFIG_POSIX_TIMERS
spin_lock_irq(&me->sighand->siglock);
posix_cpu_timers_exit(me);
spin_unlock_irq(&me->sighand->siglock);
exit_itimers(me);
flush_itimer_signals();
#endif
/*
* Make the signal table private.
*/
retval = unshare_sighand(me);
if (retval)
goto out_unlock;
me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC |
PF_NOFREEZE | PF_NO_SETAFFINITY);
flush_thread();
me->personality &= ~bprm->per_clear;
clear_syscall_work_syscall_user_dispatch(me);
/*
* We have to apply CLOEXEC before we change whether the process is
* dumpable (in setup_new_exec) to avoid a race with a process in userspace
* trying to access the should-be-closed file descriptors of a process
* undergoing exec(2).
*/
do_close_on_exec(me->files);
if (bprm->secureexec) {
/* Make sure parent cannot signal privileged process. */
me->pdeath_signal = 0;
/*
* For secureexec, reset the stack limit to sane default to
* avoid bad behavior from the prior rlimits. This has to
* happen before arch_pick_mmap_layout(), which examines
* RLIMIT_STACK, but after the point of no return to avoid
* needing to clean up the change on failure.
*/
if (bprm->rlim_stack.rlim_cur > _STK_LIM)
bprm->rlim_stack.rlim_cur = _STK_LIM;
}
me->sas_ss_sp = me->sas_ss_size = 0;
/*
* Figure out dumpability. Note that this checking only of current
* is wrong, but userspace depends on it. This should be testing
* bprm->secureexec instead.
*/
if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
!(uid_eq(current_euid(), current_uid()) &&
gid_eq(current_egid(), current_gid())))
set_dumpable(current->mm, suid_dumpable);
else
set_dumpable(current->mm, SUID_DUMP_USER);
perf_event_exec();
/*
* If the original filename was empty, alloc_bprm() made up a path
* that will probably not be useful to admins running ps or similar.
* Let's fix it up to be something reasonable.
*/
if (bprm->comm_from_dentry) {
/*
* Hold RCU lock to keep the name from being freed behind our back.
* Use acquire semantics to make sure the terminating NUL from
* __d_alloc() is seen.
*
* Note, we're deliberately sloppy here. We don't need to care about
* detecting a concurrent rename and just want a terminated name.
*/
rcu_read_lock();
__set_task_comm(me, smp_load_acquire(&bprm->file->f_path.dentry->d_name.name),
true);
rcu_read_unlock();
} else {
__set_task_comm(me, kbasename(bprm->filename), true);
}
/* An exec changes our domain. We are no longer part of the thread
group */
WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
flush_signal_handlers(me, 0);
retval = set_cred_ucounts(bprm->cred);
if (retval < 0)
goto out_unlock;
/*
* install the new credentials for this executable
*/
security_bprm_committing_creds(bprm);
commit_creds(bprm->cred);
bprm->cred = NULL;
/*
* Disable monitoring for regular users
* when executing setuid binaries. Must
* wait until new credentials are committed
* by commit_creds() above
*/
if (get_dumpable(me->mm) != SUID_DUMP_USER)
perf_event_exit_task(me);
/*
* cred_guard_mutex must be held at least to this point to prevent
* ptrace_attach() from altering our determination of the task's
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
/* Pass the opened binary to the interpreter. */
if (bprm->have_execfd) {
retval = get_unused_fd_flags(0);
if (retval < 0)
goto out_unlock;
fd_install(retval, bprm->executable);
bprm->executable = NULL;
bprm->execfd = retval;
}
return 0;
out_unlock:
up_write(&me->signal->exec_update_lock);
if (!bprm->cred)
mutex_unlock(&me->signal->cred_guard_mutex);
out:
return retval;
}
EXPORT_SYMBOL(begin_new_exec);
void would_dump(struct linux_binprm *bprm, struct file *file)
{
struct inode *inode = file_inode(file);
struct mnt_idmap *idmap = file_mnt_idmap(file);
if (inode_permission(idmap, inode, MAY_READ) < 0) {
struct user_namespace *old, *user_ns;
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
/* Ensure mm->user_ns contains the executable */
user_ns = old = bprm->mm->user_ns;
while ((user_ns != &init_user_ns) &&
!privileged_wrt_inode_uidgid(user_ns, idmap, inode))
user_ns = user_ns->parent;
if (old != user_ns) {
bprm->mm->user_ns = get_user_ns(user_ns);
put_user_ns(old);
}
}
}
EXPORT_SYMBOL(would_dump);
void setup_new_exec(struct linux_binprm * bprm)
{
/* Setup things that can depend upon the personality */
struct task_struct *me = current;
arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
arch_setup_new_exec();
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
*/
me->mm->task_size = TASK_SIZE;
up_write(&me->signal->exec_update_lock);
mutex_unlock(&me->signal->cred_guard_mutex);
}
EXPORT_SYMBOL(setup_new_exec);
/* Runs immediately before start_thread() takes over. */
void finalize_exec(struct linux_binprm *bprm)
{
/* Store any stack rlimit changes before starting thread. */
task_lock(current->group_leader);
current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
task_unlock(current->group_leader);
}
EXPORT_SYMBOL(finalize_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
* setup_new_exec() commits the new creds and drops the lock.
* Or, if exec fails before, free_bprm() should release ->cred
* and unlock.
*/
static int prepare_bprm_creds(struct linux_binprm *bprm)
{
if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex))
return -ERESTARTNOINTR;
bprm->cred = prepare_exec_creds();
if (likely(bprm->cred))
return 0;
mutex_unlock(¤t->signal->cred_guard_mutex);
return -ENOMEM;
}
/* Matches do_open_execat() */
static void do_close_execat(struct file *file)
{
if (!file)
return;
exe_file_allow_write_access(file);
fput(file);
}
static void free_bprm(struct linux_binprm *bprm)
{
if (bprm->mm) {
acct_arg_size(bprm, 0);
mmput(bprm->mm);
}
free_arg_pages(bprm);
if (bprm->cred) {
/* in case exec fails before de_thread() succeeds */
current->fs->in_exec = 0;
mutex_unlock(¤t->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
do_close_execat(bprm->file);
if (bprm->executable)
fput(bprm->executable);
/* If a binfmt changed the interp, free it. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
kfree(bprm->fdpath);
kfree(bprm);
}
static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int flags)
{
struct linux_binprm *bprm;
struct file *file;
int retval = -ENOMEM;
file = do_open_execat(fd, filename, flags);
if (IS_ERR(file))
return ERR_CAST(file);
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm) {
do_close_execat(file);
return ERR_PTR(-ENOMEM);
}
bprm->file = file;
if (fd == AT_FDCWD || filename->name[0] == '/') {
bprm->filename = filename->name;
} else {
if (filename->name[0] == '\0') {
bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
bprm->comm_from_dentry = 1;
} else {
bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
fd, filename->name);
}
if (!bprm->fdpath)
goto out_free;
/*
* Record that a name derived from an O_CLOEXEC fd will be
* inaccessible after exec. This allows the code in exec to
* choose to fail when the executable is not mmaped into the
* interpreter and an open file descriptor is not passed to
* the interpreter. This makes for a better user experience
* than having the interpreter start and then immediately fail
* when it finds the executable is inaccessible.
*/
if (get_close_on_exec(fd))
bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
bprm->filename = bprm->fdpath;
}
bprm->interp = bprm->filename;
/*
* At this point, security_file_open() has already been called (with
* __FMODE_EXEC) and access control checks for AT_EXECVE_CHECK will
* stop just after the security_bprm_creds_for_exec() call in
* bprm_execve(). Indeed, the kernel should not try to parse the
* content of the file with exec_binprm() nor change the calling
* thread, which means that the following security functions will not
* be called:
* - security_bprm_check()
* - security_bprm_creds_from_file()
* - security_bprm_committing_creds()
* - security_bprm_committed_creds()
*/
bprm->is_check = !!(flags & AT_EXECVE_CHECK);
retval = bprm_mm_init(bprm);
if (!retval)
return bprm;
out_free:
free_bprm(bprm);
return ERR_PTR(retval);
}
int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
{
/* If a binfmt changed the interp, free it first. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
bprm->interp = kstrdup(interp, GFP_KERNEL);
if (!bprm->interp)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(bprm_change_interp);
/*
* determine how safe it is to execute the proposed program
* - the caller must hold ->cred_guard_mutex to protect against
* PTRACE_ATTACH or seccomp thread-sync
*/
static void check_unsafe_exec(struct linux_binprm *bprm)
{
struct task_struct *p = current, *t;
unsigned n_fs;
if (p->ptrace)
bprm->unsafe |= LSM_UNSAFE_PTRACE;
/*
* This isn't strictly necessary, but it makes it harder for LSMs to
* mess up.
*/
if (task_no_new_privs(current))
bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
/*
* If another task is sharing our fs, we cannot safely
* suid exec because the differently privileged task
* will be able to manipulate the current directory, etc.
* It would be nice to force an unshare instead...
*
* Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS)
* from another sub-thread until de_thread() succeeds, this
* state is protected by cred_guard_mutex we hold.
*/
n_fs = 1;
read_seqlock_excl(&p->fs->seq);
rcu_read_lock();
for_other_threads(p, t) {
if (t->fs == p->fs)
n_fs++;
}
rcu_read_unlock();
/* "users" and "in_exec" locked for copy_fs() */
if (p->fs->users > n_fs)
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
p->fs->in_exec = 1;
read_sequnlock_excl(&p->fs->seq);
}
static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
{
/* Handle suid and sgid on files */
struct mnt_idmap *idmap;
struct inode *inode = file_inode(file);
unsigned int mode;
vfsuid_t vfsuid;
vfsgid_t vfsgid;
int err;
if (!mnt_may_suid(file->f_path.mnt))
return;
if (task_no_new_privs(current))
return;
mode = READ_ONCE(inode->i_mode);
if (!(mode & (S_ISUID|S_ISGID)))
return;
idmap = file_mnt_idmap(file);
/* Be careful if suid/sgid is set */
inode_lock(inode);
/* Atomically reload and check mode/uid/gid now that lock held. */
mode = inode->i_mode;
vfsuid = i_uid_into_vfsuid(idmap, inode);
vfsgid = i_gid_into_vfsgid(idmap, inode);
err = inode_permission(idmap, inode, MAY_EXEC);
inode_unlock(inode);
/* Did the exec bit vanish out from under us? Give up. */
if (err)
return;
/* We ignore suid/sgid if there are no mappings for them in the ns */
if (!vfsuid_has_mapping(bprm->cred->user_ns, vfsuid) ||
!vfsgid_has_mapping(bprm->cred->user_ns, vfsgid))
return;
if (mode & S_ISUID) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->euid = vfsuid_into_kuid(vfsuid);
}
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->egid = vfsgid_into_kgid(vfsgid);
}
}
/*
* Compute brpm->cred based upon the final binary.
*/
static int bprm_creds_from_file(struct linux_binprm *bprm)
{
/* Compute creds based on which file? */
struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
bprm_fill_uid(bprm, file);
return security_bprm_creds_from_file(bprm, file);
}
/*
* Fill the binprm structure from the inode.
* Read the first BINPRM_BUF_SIZE bytes
*
* This may be called multiple times for binary chains (scripts for example).
*/
static int prepare_binprm(struct linux_binprm *bprm)
{
loff_t pos = 0;
memset(bprm->buf, 0, BINPRM_BUF_SIZE);
return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
}
/*
* Arguments are '\0' separated strings found at the location bprm->p
* points to; chop off the first by relocating brpm->p to right after
* the first '\0' encountered.
*/
int remove_arg_zero(struct linux_binprm *bprm)
{
unsigned long offset;
char *kaddr;
struct page *page;
if (!bprm->argc)
return 0;
do {
offset = bprm->p & ~PAGE_MASK;
page = get_arg_page(bprm, bprm->p, 0);
if (!page)
return -EFAULT;
kaddr = kmap_local_page(page);
for (; offset < PAGE_SIZE && kaddr[offset];
offset++, bprm->p++)
;
kunmap_local(kaddr);
put_arg_page(page);
} while (offset == PAGE_SIZE);
bprm->p++;
bprm->argc--;
return 0;
}
EXPORT_SYMBOL(remove_arg_zero);
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
static int search_binary_handler(struct linux_binprm *bprm)
{
struct linux_binfmt *fmt;
int retval;
retval = prepare_binprm(bprm);
if (retval < 0)
return retval;
retval = security_bprm_check(bprm);
if (retval)
return retval;
read_lock(&binfmt_lock);
list_for_each_entry(fmt, &formats, lh) {
if (!try_module_get(fmt->module))
continue;
read_unlock(&binfmt_lock);
retval = fmt->load_binary(bprm);
read_lock(&binfmt_lock);
put_binfmt(fmt);
if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
read_unlock(&binfmt_lock);
return retval;
}
}
read_unlock(&binfmt_lock);
return -ENOEXEC;
}
/* binfmt handlers will call back into begin_new_exec() on success. */
static int exec_binprm(struct linux_binprm *bprm)
{
pid_t old_pid, old_vpid;
int ret, depth;
/* Need to fetch pid before load_binary changes it */
old_pid = current->pid;
rcu_read_lock();
old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
rcu_read_unlock();
/* This allows 4 levels of binfmt rewrites before failing hard. */
for (depth = 0;; depth++) {
struct file *exec;
if (depth > 5)
return -ELOOP;
ret = search_binary_handler(bprm);
if (ret < 0)
return ret;
if (!bprm->interpreter)
break;
exec = bprm->file;
bprm->file = bprm->interpreter;
bprm->interpreter = NULL;
exe_file_allow_write_access(exec);
if (unlikely(bprm->have_execfd)) {
if (bprm->executable) {
fput(exec);
return -ENOEXEC;
}
bprm->executable = exec;
} else
fput(exec);
}
audit_bprm(bprm);
trace_sched_process_exec(current, old_pid, bprm);
ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
proc_exec_connector(current);
return 0;
}
static int bprm_execve(struct linux_binprm *bprm)
{
int retval;
retval = prepare_bprm_creds(bprm);
if (retval)
return retval;
/*
* Check for unsafe execution states before exec_binprm(), which
* will call back into begin_new_exec(), into bprm_creds_from_file(),
* where setuid-ness is evaluated.
*/
check_unsafe_exec(bprm);
current->in_execve = 1;
sched_mm_cid_before_execve(current);
sched_exec();
/* Set the unchanging part of bprm->cred */
retval = security_bprm_creds_for_exec(bprm);
if (retval || bprm->is_check)
goto out;
retval = exec_binprm(bprm);
if (retval < 0)
goto out;
sched_mm_cid_after_execve(current);
rseq_execve(current);
/* execve succeeded */
current->in_execve = 0;
user_events_execve(current);
acct_update_integrals(current);
task_numa_free(current, false);
return retval;
out:
/*
* If past the point of no return ensure the code never
* returns to the userspace process. Use an existing fatal
* signal if present otherwise terminate the process with
* SIGSEGV.
*/
if (bprm->point_of_no_return && !fatal_signal_pending(current))
force_fatal_sig(SIGSEGV);
sched_mm_cid_after_execve(current);
rseq_set_notify_resume(current);
current->in_execve = 0;
return retval;
}
static int do_execveat_common(int fd, struct filename *filename,
struct user_arg_ptr argv,
struct user_arg_ptr envp,
int flags)
{
struct linux_binprm *bprm;
int retval;
if (IS_ERR(filename))
return PTR_ERR(filename);
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
* don't check setuid() return code. Here we additionally recheck
* whether NPROC limit is still exceeded.
*/
if ((current->flags & PF_NPROC_EXCEEDED) &&
is_rlimit_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
retval = -EAGAIN;
goto out_ret;
}
/* We're below the limit (still or again), so we don't want to make
* further execve() calls fail. */
current->flags &= ~PF_NPROC_EXCEEDED;
bprm = alloc_bprm(fd, filename, flags);
if (IS_ERR(bprm)) {
retval = PTR_ERR(bprm);
goto out_ret;
}
retval = count(argv, MAX_ARG_STRINGS);
if (retval < 0)
goto out_free;
bprm->argc = retval;
retval = count(envp, MAX_ARG_STRINGS);
if (retval < 0)
goto out_free;
bprm->envc = retval;
retval = bprm_stack_limits(bprm);
if (retval < 0)
goto out_free;
retval = copy_string_kernel(bprm->filename, bprm);
if (retval < 0)
goto out_free;
bprm->exec = bprm->p;
retval = copy_strings(bprm->envc, envp, bprm);
if (retval < 0)
goto out_free;
retval = copy_strings(bprm->argc, argv, bprm);
if (retval < 0)
goto out_free;
/*
* When argv is empty, add an empty string ("") as argv[0] to
* ensure confused userspace programs that start processing
* from argv[1] won't end up walking envp. See also
* bprm_stack_limits().
*/
if (bprm->argc == 0) {
retval = copy_string_kernel("", bprm);
if (retval < 0)
goto out_free;
bprm->argc = 1;
pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
current->comm, bprm->filename);
}
retval = bprm_execve(bprm);
out_free:
free_bprm(bprm);
out_ret:
putname(filename);
return retval;
}
int kernel_execve(const char *kernel_filename,
const char *const *argv, const char *const *envp)
{
struct filename *filename;
struct linux_binprm *bprm;
int fd = AT_FDCWD;
int retval;
/* It is non-sense for kernel threads to call execve */
if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
return -EINVAL;
filename = getname_kernel(kernel_filename);
if (IS_ERR(filename))
return PTR_ERR(filename);
bprm = alloc_bprm(fd, filename, 0);
if (IS_ERR(bprm)) {
retval = PTR_ERR(bprm);
goto out_ret;
}
retval = count_strings_kernel(argv);
if (WARN_ON_ONCE(retval == 0))
retval = -EINVAL;
if (retval < 0)
goto out_free;
bprm->argc = retval;
retval = count_strings_kernel(envp);
if (retval < 0)
goto out_free;
bprm->envc = retval;
retval = bprm_stack_limits(bprm);
if (retval < 0)
goto out_free;
retval = copy_string_kernel(bprm->filename, bprm);
if (retval < 0)
goto out_free;
bprm->exec = bprm->p;
retval = copy_strings_kernel(bprm->envc, envp, bprm);
if (retval < 0)
goto out_free;
retval = copy_strings_kernel(bprm->argc, argv, bprm);
if (retval < 0)
goto out_free;
retval = bprm_execve(bprm);
out_free:
free_bprm(bprm);
out_ret:
putname(filename);
return retval;
}
static int do_execve(struct filename *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
struct user_arg_ptr argv = { .ptr.native = __argv };
struct user_arg_ptr envp = { .ptr.native = __envp };
return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
}
static int do_execveat(int fd, struct filename *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp,
int flags)
{
struct user_arg_ptr argv = { .ptr.native = __argv };
struct user_arg_ptr envp = { .ptr.native = __envp };
return do_execveat_common(fd, filename, argv, envp, flags);
}
#ifdef CONFIG_COMPAT
static int compat_do_execve(struct filename *filename,
const compat_uptr_t __user *__argv,
const compat_uptr_t __user *__envp)
{
struct user_arg_ptr argv = {
.is_compat = true,
.ptr.compat = __argv,
};
struct user_arg_ptr envp = {
.is_compat = true,
.ptr.compat = __envp,
};
return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
}
static int compat_do_execveat(int fd, struct filename *filename,
const compat_uptr_t __user *__argv,
const compat_uptr_t __user *__envp,
int flags)
{
struct user_arg_ptr argv = {
.is_compat = true,
.ptr.compat = __argv,
};
struct user_arg_ptr envp = {
.is_compat = true,
.ptr.compat = __envp,
};
return do_execveat_common(fd, filename, argv, envp, flags);
}
#endif
void set_binfmt(struct linux_binfmt *new)
{
struct mm_struct *mm = current->mm;
if (mm->binfmt)
module_put(mm->binfmt->module);
mm->binfmt = new;
if (new)
__module_get(new->module);
}
EXPORT_SYMBOL(set_binfmt);
/*
* set_dumpable stores three-value SUID_DUMP_* into mm->flags.
*/
void set_dumpable(struct mm_struct *mm, int value)
{
if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
return;
__mm_flags_set_mask_dumpable(mm, value);}
SYSCALL_DEFINE3(execve,
const char __user *, filename,
const char __user *const __user *, argv,
const char __user *const __user *, envp)
{
return do_execve(getname(filename), argv, envp);
}
SYSCALL_DEFINE5(execveat,
int, fd, const char __user *, filename,
const char __user *const __user *, argv,
const char __user *const __user *, envp,
int, flags)
{
return do_execveat(fd,
getname_uflags(filename, flags),
argv, envp, flags);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
const compat_uptr_t __user *, argv,
const compat_uptr_t __user *, envp)
{
return compat_do_execve(getname(filename), argv, envp);
}
COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
const char __user *, filename,
const compat_uptr_t __user *, argv,
const compat_uptr_t __user *, envp,
int, flags)
{
return compat_do_execveat(fd,
getname_uflags(filename, flags),
argv, envp, flags);
}
#endif
#ifdef CONFIG_SYSCTL
static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!error && write)
validate_coredump_safety();
return error;
}
static const struct ctl_table fs_exec_sysctls[] = {
{
.procname = "suid_dumpable",
.data = &suid_dumpable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax_coredump,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
};
static int __init init_fs_exec_sysctls(void)
{
register_sysctl_init("fs", fs_exec_sysctls);
return 0;
}
fs_initcall(init_fs_exec_sysctls);
#endif /* CONFIG_SYSCTL */
#ifdef CONFIG_EXEC_KUNIT_TEST
#include "tests/exec_kunit.c"
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
#define __LINUX_INSIDE_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types_raw:
* The raw types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types_raw:
* The raw RT types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
#include <linux/typecheck.h>
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <linux/cleanup.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
/*
* Must define these before including other files, inline functions need them
*/
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
extra \
".ifndef " LOCK_SECTION_NAME "\n\t" \
LOCK_SECTION_NAME ":\n\t" \
".endif\n"
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc __section(".spinlock.text")
/*
* Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
* Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key, short inner);
# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
} while (0)
#else
# define raw_spin_lock_init(lock) \
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef arch_spin_is_contended
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
#define raw_spin_is_contended(lock) (((void)(lock), 0))
#endif /*arch_spin_is_contended*/
/*
* smp_mb__after_spinlock() provides the equivalent of a full memory barrier
* between program-order earlier lock acquisitions and program-order later
* memory accesses.
*
* This guarantees that the following two properties hold:
*
* 1) Given the snippet:
*
* { X = 0; Y = 0; }
*
* CPU0 CPU1
*
* WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
* spin_lock(S); smp_mb();
* smp_mb__after_spinlock(); r1 = READ_ONCE(X);
* r0 = READ_ONCE(Y);
* spin_unlock(S);
*
* it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
* and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
* preceding the call to smp_mb__after_spinlock() in __schedule() and in
* try_to_wake_up().
*
* 2) Given the snippet:
*
* { X = 0; Y = 0; }
*
* CPU0 CPU1 CPU2
*
* spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
* WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
* spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
* WRITE_ONCE(Y, 1);
* spin_unlock(S);
*
* it is forbidden that CPU0's critical section executes before CPU1's
* critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
* and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
* preceding the calls to smp_rmb() in try_to_wake_up() for similar
* snippets but "projected" onto two CPUs.
*
* Property (2) upgrades the lock to an RCsc lock.
*
* Since most load-store architectures implement ACQUIRE with an smp_mb() after
* the LL/SC loop, they need no further barriers. Similarly all our TSO
* architectures imply an smp_mb() for each atomic instruction and equally don't
* need more.
*
* Architectures that can implement ACQUIRE better need to take care.
*/
#ifndef smp_mb__after_spinlock
#define smp_mb__after_spinlock() kcsan_mb()
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
mmiowb_spin_lock();
}
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&(lock)->raw_lock);
if (ret)
mmiowb_spin_lock();
return ret;
}
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
mmiowb_spin_unlock();
arch_spin_unlock(&lock->raw_lock);
__release(lock);
}
#endif
/*
* Define the various spin_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
* various methods are defined as nops in the case they are not
* required.
*/
#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
/*
* Always evaluate the 'subclass' argument to avoid that the compiler
* warns about set-but-not-used variables when building with
* CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
*/
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_lock_irqsave(lock, flags); \
} while (0)
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
raw_spin_lock_irqsave(lock, flags)
#endif
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
#define raw_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
#define raw_spin_trylock_bh(lock) \
__cond_lock(lock, _raw_spin_trylock_bh(lock))
#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
#ifndef CONFIG_PREEMPT_RT
/* Include rwlock functions for !RT */
#include <linux/rwlock.h>
#endif
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
*/
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif
/* Non PREEMPT_RT kernel, map to raw spinlocks: */
#ifndef CONFIG_PREEMPT_RT
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
#ifdef CONFIG_DEBUG_SPINLOCK
# define spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__raw_spin_lock_init(spinlock_check(lock), \
#lock, &__key, LD_WAIT_CONFIG); \
} while (0)
#else
# define spin_lock_init(_lock) \
do { \
spinlock_check(_lock); \
*(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
} while (0)
#endif
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);}
static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
#define spin_lock_nested(lock, subclass) \
do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);}
static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
#define spin_trylock_irqsave(lock, flags) \
({ \
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
/**
* spin_is_locked() - Check whether a spinlock is locked.
* @lock: Pointer to the spinlock.
*
* This function is NOT required to provide any memory ordering
* guarantees; it could be used for debugging purposes or, when
* additional synchronization is needed, accompanied with other
* constructs (memory barriers) enforcing the synchronization.
*
* Returns: 1 if @lock is locked, 0 otherwise.
*
* Note that the function only tells you that the spinlock is
* seen to be locked, not that it is locked on your CPU.
*
* Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
* the return value is always 0 (see include/linux/spinlock_up.h).
* Therefore you should not rely heavily on the return value.
*/
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
#else /* !CONFIG_PREEMPT_RT */
# include <linux/spinlock_rt.h>
#endif /* CONFIG_PREEMPT_RT */
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
if (!preempt_model_preemptible())
return 0;
return spin_is_contended(lock);
}
/*
* Check if a rwlock is contended.
* Returns non-zero if there is another task waiting on the rwlock.
* Returns zero if the lock is not contended or the system / underlying
* rwlock implementation does not support contention detection.
* Technically does not depend on CONFIG_PREEMPTION, but a general need
* for low latency.
*/
static inline int rwlock_needbreak(rwlock_t *lock)
{
if (!preempt_model_preemptible())
return 0;
return rwlock_is_contended(lock);
}
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
#include <linux/atomic.h>
/**
* atomic_dec_and_lock - lock on reaching reference count zero
* @atomic: the atomic counter
* @lock: the spinlock in question
*
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags);
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
#define atomic_dec_and_raw_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
unsigned long *flags);
#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
struct lock_class_key *key);
#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
({ \
static struct lock_class_key key; \
int ret; \
\
ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
cpu_mult, gfp, #locks, &key); \
ret; \
})
void free_bucket_spinlocks(spinlock_t *locks);
DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
raw_spin_lock_bh(_T->lock),
raw_spin_unlock_bh(_T->lock))
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
raw_spin_trylock_irqsave(_T->lock, _T->flags))
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
spin_lock_bh(_T->lock),
spin_unlock_bh(_T->lock))
DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
spin_trylock_bh(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
spin_trylock_irqsave(_T->lock, _T->flags))
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
read_lock_irq(_T->lock),
read_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
read_lock_irqsave(_T->lock, _T->flags),
read_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
write_lock(_T->lock),
write_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
write_lock_irq(_T->lock),
write_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
write_lock_irqsave(_T->lock, _T->flags),
write_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
/*
* Generic process-grouping system.
*
* Based originally on the cpuset system, extracted by Paul Menage
* Copyright (C) 2006 Google, Inc
*
* Notifications support
* Copyright (C) 2009 Nokia Corporation
* Author: Kirill A. Shutemov
*
* Copyright notices from the original cpuset code:
* --------------------------------------------------
* Copyright (C) 2003 BULL SA.
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
*
* Portions derived from Patrick Mochel's sysfs code.
* sysfs is Copyright (c) 2001-3 Patrick Mochel
*
* 2003-10-10 Written by Simon Derr.
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
* ---------------------------------------------------
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "cgroup-internal.h"
#include <linux/bpf-cgroup.h>
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/init_task.h>
#include <linux/kernel.h>
#include <linux/magic.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/percpu-rwsem.h>
#include <linux/string.h>
#include <linux/hashtable.h>
#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/atomic.h>
#include <linux/cpuset.h>
#include <linux/proc_ns.h>
#include <linux/nsproxy.h>
#include <linux/file.h>
#include <linux/fs_parser.h>
#include <linux/sched/cputime.h>
#include <linux/sched/deadline.h>
#include <linux/psi.h>
#include <linux/nstree.h>
#include <net/sock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cgroup.h>
#define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
MAX_CFTYPE_NAME + 2)
/* let's not notify more than 100 times per second */
#define CGROUP_FILE_NOTIFY_MIN_INTV DIV_ROUND_UP(HZ, 100)
/*
* To avoid confusing the compiler (and generating warnings) with code
* that attempts to access what would be a 0-element array (i.e. sized
* to a potentially empty array when CGROUP_SUBSYS_COUNT == 0), this
* constant expression can be added.
*/
#define CGROUP_HAS_SUBSYS_CONFIG (CGROUP_SUBSYS_COUNT > 0)
/*
* cgroup_mutex is the master lock. Any modification to cgroup or its
* hierarchy must be performed while holding it.
*
* css_set_lock protects task->cgroups pointer, the list of css_set
* objects, and the chain of tasks off each css_set.
*
* These locks are exported if CONFIG_PROVE_RCU so that accessors in
* cgroup.h can use them for lockdep annotations.
*/
DEFINE_MUTEX(cgroup_mutex);
DEFINE_SPINLOCK(css_set_lock);
#if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP)
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_lock);
#endif
struct blocking_notifier_head cgroup_lifetime_notifier =
BLOCKING_NOTIFIER_INIT(cgroup_lifetime_notifier);
DEFINE_SPINLOCK(trace_cgroup_path_lock);
char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
static bool cgroup_debug __read_mostly;
/*
* Protects cgroup_idr and css_idr so that IDs can be released without
* grabbing cgroup_mutex.
*/
static DEFINE_SPINLOCK(cgroup_idr_lock);
/*
* Protects cgroup_file->kn for !self csses. It synchronizes notifications
* against file removal/re-creation across css hiding.
*/
static DEFINE_SPINLOCK(cgroup_file_kn_lock);
DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
#define cgroup_assert_mutex_or_rcu_locked() \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&cgroup_mutex), \
"cgroup_mutex or RCU read lock required");
/*
* cgroup destruction makes heavy use of work items and there can be a lot
* of concurrent destructions. Use a separate workqueue so that cgroup
* destruction work items don't end up filling up max_active of system_percpu_wq
* which may lead to deadlock.
*
* A cgroup destruction should enqueue work sequentially to:
* cgroup_offline_wq: use for css offline work
* cgroup_release_wq: use for css release work
* cgroup_free_wq: use for free work
*
* Rationale for using separate workqueues:
* The cgroup root free work may depend on completion of other css offline
* operations. If all tasks were enqueued to a single workqueue, this could
* create a deadlock scenario where:
* - Free work waits for other css offline work to complete.
* - But other css offline work is queued after free work in the same queue.
*
* Example deadlock scenario with single workqueue (cgroup_destroy_wq):
* 1. umount net_prio
* 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
* 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
* 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
* 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
* which can never complete as it's behind in the same queue and
* workqueue's max_active is 1.
*/
static struct workqueue_struct *cgroup_offline_wq;
static struct workqueue_struct *cgroup_release_wq;
static struct workqueue_struct *cgroup_free_wq;
/* generate an array of cgroup subsystem pointers */
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
struct cgroup_subsys *cgroup_subsys[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS
/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS
/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
#define SUBSYS(_x) \
DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \
DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \
EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \
EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
#include <linux/cgroup_subsys.h>
#undef SUBSYS
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
static struct static_key_true *cgroup_subsys_enabled_key[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS
static DEFINE_PER_CPU(struct css_rstat_cpu, root_rstat_cpu);
static DEFINE_PER_CPU(struct cgroup_rstat_base_cpu, root_rstat_base_cpu);
/* the default hierarchy */
struct cgroup_root cgrp_dfl_root = {
.cgrp.self.rstat_cpu = &root_rstat_cpu,
.cgrp.rstat_base_cpu = &root_rstat_base_cpu,
};
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
/*
* The default hierarchy always exists but is hidden until mounted for the
* first time. This is for backward compatibility.
*/
bool cgrp_dfl_visible;
/* some controllers are not supported in the default hierarchy */
static u16 cgrp_dfl_inhibit_ss_mask;
/* some controllers are implicitly enabled on the default hierarchy */
static u16 cgrp_dfl_implicit_ss_mask;
/* some controllers can be threaded on the default hierarchy */
static u16 cgrp_dfl_threaded_ss_mask;
/* The list of hierarchy roots */
LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
static DEFINE_IDR(cgroup_hierarchy_idr);
/*
* Assign a monotonically increasing serial number to csses. It guarantees
* cgroups with bigger numbers are newer than those with smaller numbers.
* Also, as csses are always appended to the parent's ->children list, it
* guarantees that sibling csses are always sorted in the ascending serial
* number order on the list. Protected by cgroup_mutex.
*/
static u64 css_serial_nr_next = 1;
/*
* These bitmasks identify subsystems with specific features to avoid
* having to do iterative checks repeatedly.
*/
static u16 have_fork_callback __read_mostly;
static u16 have_exit_callback __read_mostly;
static u16 have_release_callback __read_mostly;
static u16 have_canfork_callback __read_mostly;
static bool have_favordynmods __ro_after_init = IS_ENABLED(CONFIG_CGROUP_FAVOR_DYNMODS);
/*
* Write protected by cgroup_mutex and write-lock of cgroup_threadgroup_rwsem,
* read protected by either.
*
* Can only be turned on, but not turned off.
*/
bool cgroup_enable_per_threadgroup_rwsem __read_mostly;
/* cgroup namespace for init task */
struct cgroup_namespace init_cgroup_ns = {
.ns.__ns_ref = REFCOUNT_INIT(2),
.user_ns = &init_user_ns,
.ns.ops = &cgroupns_operations,
.ns.inum = ns_init_inum(&init_cgroup_ns),
.root_cset = &init_css_set,
.ns.ns_type = ns_common_type(&init_cgroup_ns),
};
static struct file_system_type cgroup2_fs_type;
static struct cftype cgroup_base_files[];
static struct cftype cgroup_psi_files[];
/* cgroup optional features */
enum cgroup_opt_features {
#ifdef CONFIG_PSI
OPT_FEATURE_PRESSURE,
#endif
OPT_FEATURE_COUNT
};
static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = {
#ifdef CONFIG_PSI
"pressure",
#endif
};
static u16 cgroup_feature_disable_mask __read_mostly;
static int cgroup_apply_control(struct cgroup *cgrp);
static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
static void css_task_iter_skip(struct css_task_iter *it,
struct task_struct *task);
static int cgroup_destroy_locked(struct cgroup *cgrp);
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
struct cgroup_subsys *ss);
static void css_release(struct percpu_ref *ref);
static void kill_css(struct cgroup_subsys_state *css);
static int cgroup_addrm_files(struct cgroup_subsys_state *css,
struct cgroup *cgrp, struct cftype cfts[],
bool is_add);
#ifdef CONFIG_DEBUG_CGROUP_REF
#define CGROUP_REF_FN_ATTRS noinline
#define CGROUP_REF_EXPORT(fn) EXPORT_SYMBOL_GPL(fn);
#include <linux/cgroup_refcnt.h>
#endif
/**
* cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
* @ssid: subsys ID of interest
*
* cgroup_subsys_enabled() can only be used with literal subsys names which
* is fine for individual subsystems but unsuitable for cgroup core. This
* is slower static_key_enabled() based test indexed by @ssid.
*/
bool cgroup_ssid_enabled(int ssid)
{
if (!CGROUP_HAS_SUBSYS_CONFIG)
return false;
return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
}
/**
* cgroup_on_dfl - test whether a cgroup is on the default hierarchy
* @cgrp: the cgroup of interest
*
* The default hierarchy is the v2 interface of cgroup and this function
* can be used to test whether a cgroup is on the default hierarchy for
* cases where a subsystem should behave differently depending on the
* interface version.
*
* List of changed behaviors:
*
* - Mount options "noprefix", "xattr", "clone_children", "release_agent"
* and "name" are disallowed.
*
* - When mounting an existing superblock, mount options should match.
*
* - rename(2) is disallowed.
*
* - "tasks" is removed. Everything should be at process granularity. Use
* "cgroup.procs" instead.
*
* - "cgroup.procs" is not sorted. pids will be unique unless they got
* recycled in-between reads.
*
* - "release_agent" and "notify_on_release" are removed. Replacement
* notification mechanism will be implemented.
*
* - "cgroup.clone_children" is removed.
*
* - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
* and its descendants contain no task; otherwise, 1. The file also
* generates kernfs notification which can be monitored through poll and
* [di]notify when the value of the file changes.
*
* - cpuset: tasks will be kept in empty cpusets when hotplug happens and
* take masks of ancestors with non-empty cpus/mems, instead of being
* moved to an ancestor.
*
* - cpuset: a task can be moved into an empty cpuset, and again it takes
* masks of ancestors.
*
* - blkcg: blk-throttle becomes properly hierarchical.
*/
bool cgroup_on_dfl(const struct cgroup *cgrp)
{
return cgrp->root == &cgrp_dfl_root;
}
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
gfp_t gfp_mask)
{
int ret;
idr_preload(gfp_mask);
spin_lock_bh(&cgroup_idr_lock);
ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
spin_unlock_bh(&cgroup_idr_lock);
idr_preload_end();
return ret;
}
static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
{
void *ret;
spin_lock_bh(&cgroup_idr_lock);
ret = idr_replace(idr, ptr, id);
spin_unlock_bh(&cgroup_idr_lock);
return ret;
}
static void cgroup_idr_remove(struct idr *idr, int id)
{
spin_lock_bh(&cgroup_idr_lock);
idr_remove(idr, id);
spin_unlock_bh(&cgroup_idr_lock);
}
static bool cgroup_has_tasks(struct cgroup *cgrp)
{
return cgrp->nr_populated_csets;
}
static bool cgroup_is_threaded(struct cgroup *cgrp)
{
return cgrp->dom_cgrp != cgrp;
}
/* can @cgrp host both domain and threaded children? */
static bool cgroup_is_mixable(struct cgroup *cgrp)
{
/*
* Root isn't under domain level resource control exempting it from
* the no-internal-process constraint, so it can serve as a thread
* root and a parent of resource domains at the same time.
*/
return !cgroup_parent(cgrp);
}
/* can @cgrp become a thread root? Should always be true for a thread root */
static bool cgroup_can_be_thread_root(struct cgroup *cgrp)
{
/* mixables don't care */
if (cgroup_is_mixable(cgrp))
return true;
/* domain roots can't be nested under threaded */
if (cgroup_is_threaded(cgrp))
return false;
/* can only have either domain or threaded children */
if (cgrp->nr_populated_domain_children)
return false;
/* and no domain controllers can be enabled */
if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
return false;
return true;
}
/* is @cgrp root of a threaded subtree? */
static bool cgroup_is_thread_root(struct cgroup *cgrp)
{
/* thread root should be a domain */
if (cgroup_is_threaded(cgrp))
return false;
/* a domain w/ threaded children is a thread root */
if (cgrp->nr_threaded_children)
return true;
/*
* A domain which has tasks and explicit threaded controllers
* enabled is a thread root.
*/
if (cgroup_has_tasks(cgrp) &&
(cgrp->subtree_control & cgrp_dfl_threaded_ss_mask))
return true;
return false;
}
/* a domain which isn't connected to the root w/o brekage can't be used */
static bool cgroup_is_valid_domain(struct cgroup *cgrp)
{
/* the cgroup itself can be a thread root */
if (cgroup_is_threaded(cgrp))
return false;
/* but the ancestors can't be unless mixable */
while ((cgrp = cgroup_parent(cgrp))) {
if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp))
return false;
if (cgroup_is_threaded(cgrp))
return false;
}
return true;
}
/* subsystems visibly enabled on a cgroup */
static u16 cgroup_control(struct cgroup *cgrp)
{
struct cgroup *parent = cgroup_parent(cgrp);
u16 root_ss_mask = cgrp->root->subsys_mask;
if (parent) {
u16 ss_mask = parent->subtree_control;
/* threaded cgroups can only have threaded controllers */
if (cgroup_is_threaded(cgrp))
ss_mask &= cgrp_dfl_threaded_ss_mask;
return ss_mask;
}
if (cgroup_on_dfl(cgrp))
root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
cgrp_dfl_implicit_ss_mask);
return root_ss_mask;
}
/* subsystems enabled on a cgroup */
static u16 cgroup_ss_mask(struct cgroup *cgrp)
{
struct cgroup *parent = cgroup_parent(cgrp);
if (parent) {
u16 ss_mask = parent->subtree_ss_mask;
/* threaded cgroups can only have threaded controllers */
if (cgroup_is_threaded(cgrp))
ss_mask &= cgrp_dfl_threaded_ss_mask;
return ss_mask;
}
return cgrp->root->subsys_mask;
}
/**
* cgroup_css - obtain a cgroup's css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
*
* Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
* function must be called either under cgroup_mutex or rcu_read_lock() and
* the caller is responsible for pinning the returned css if it wants to
* keep accessing it outside the said locks. This function may return
* %NULL if @cgrp doesn't have @subsys_id enabled.
*/
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
if (CGROUP_HAS_SUBSYS_CONFIG && ss)
return rcu_dereference_check(cgrp->subsys[ss->id],
lockdep_is_held(&cgroup_mutex));
else
return &cgrp->self;
}
/**
* cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
*
* Similar to cgroup_css() but returns the effective css, which is defined
* as the matching css of the nearest ancestor including self which has @ss
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
* function is guaranteed to return non-NULL css.
*/
static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
lockdep_assert_held(&cgroup_mutex);
if (!ss)
return &cgrp->self;
/*
* This function is used while updating css associations and thus
* can't test the csses directly. Test ss_mask.
*/
while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
cgrp = cgroup_parent(cgrp);
if (!cgrp)
return NULL;
}
return cgroup_css(cgrp, ss);
}
/**
* cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
*
* Find and get the effective css of @cgrp for @ss. The effective css is
* defined as the matching css of the nearest ancestor including self which
* has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
* the root css is returned, so this function always returns a valid css.
*
* The returned css is not guaranteed to be online, and therefore it is the
* callers responsibility to try get a reference for it.
*/
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
if (!CGROUP_HAS_SUBSYS_CONFIG)
return NULL;
do {
css = cgroup_css(cgrp, ss);
if (css)
return css;
cgrp = cgroup_parent(cgrp);
} while (cgrp);
return init_css_set.subsys[ss->id];
}
/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
*
* Find and get the effective css of @cgrp for @ss. The effective css is
* defined as the matching css of the nearest ancestor including self which
* has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
* the root css is returned, so this function always returns a valid css.
* The returned css must be put using css_put().
*/
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
if (!CGROUP_HAS_SUBSYS_CONFIG)
return NULL;
rcu_read_lock();
do {
css = cgroup_css(cgrp, ss);
if (css && css_tryget_online(css))
goto out_unlock;
cgrp = cgroup_parent(cgrp);
} while (cgrp);
css = init_css_set.subsys[ss->id];
css_get(css);
out_unlock:
rcu_read_unlock();
return css;
}
EXPORT_SYMBOL_GPL(cgroup_get_e_css);
static void cgroup_get_live(struct cgroup *cgrp)
{
WARN_ON_ONCE(cgroup_is_dead(cgrp));
cgroup_get(cgrp);
}
/**
* __cgroup_task_count - count the number of tasks in a cgroup. The caller
* is responsible for taking the css_set_lock.
* @cgrp: the cgroup in question
*/
int __cgroup_task_count(const struct cgroup *cgrp)
{
int count = 0;
struct cgrp_cset_link *link;
lockdep_assert_held(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
count += link->cset->nr_tasks;
return count;
}
/**
* cgroup_task_count - count the number of tasks in a cgroup.
* @cgrp: the cgroup in question
*/
int cgroup_task_count(const struct cgroup *cgrp)
{
int count;
spin_lock_irq(&css_set_lock);
count = __cgroup_task_count(cgrp);
spin_unlock_irq(&css_set_lock);
return count;
}
static struct cgroup *kn_priv(struct kernfs_node *kn)
{
struct kernfs_node *parent;
/*
* The parent can not be replaced due to KERNFS_ROOT_INVARIANT_PARENT.
* Therefore it is always safe to dereference this pointer outside of a
* RCU section.
*/
parent = rcu_dereference_check(kn->__parent,
kernfs_root_flags(kn) & KERNFS_ROOT_INVARIANT_PARENT);
return parent->priv;
}
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
{
struct cgroup *cgrp = kn_priv(of->kn);
struct cftype *cft = of_cft(of);
/*
* This is open and unprotected implementation of cgroup_css().
* seq_css() is only called from a kernfs file operation which has
* an active reference on the file. Because all the subsystem
* files are drained before a css is disassociated with a cgroup,
* the matching css from the cgroup's subsys table is guaranteed to
* be and stay valid until the enclosing operation is complete.
*/
if (CGROUP_HAS_SUBSYS_CONFIG && cft->ss)
return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
else
return &cgrp->self;
}
EXPORT_SYMBOL_GPL(of_css);
/**
* for_each_css - iterate all css's of a cgroup
* @css: the iteration cursor
* @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
* @cgrp: the target cgroup to iterate css's of
*
* Should be called under cgroup_mutex.
*/
#define for_each_css(css, ssid, cgrp) \
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
if (!((css) = rcu_dereference_check( \
(cgrp)->subsys[(ssid)], \
lockdep_is_held(&cgroup_mutex)))) { } \
else
/**
* do_each_subsys_mask - filter for_each_subsys with a bitmask
* @ss: the iteration cursor
* @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
* @ss_mask: the bitmask
*
* The block will only run for cases where the ssid-th bit (1 << ssid) of
* @ss_mask is set.
*/
#define do_each_subsys_mask(ss, ssid, ss_mask) do { \
unsigned long __ss_mask = (ss_mask); \
if (!CGROUP_HAS_SUBSYS_CONFIG) { \
(ssid) = 0; \
break; \
} \
for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \
(ss) = cgroup_subsys[ssid]; \
{
#define while_each_subsys_mask() \
} \
} \
} while (false)
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp) \
list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
if (({ lockdep_assert_held(&cgroup_mutex); \
cgroup_is_dead(child); })) \
; \
else
/* walk live descendants in pre order */
#define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
if (({ lockdep_assert_held(&cgroup_mutex); \
(dsct) = (d_css)->cgroup; \
cgroup_is_dead(dsct); })) \
; \
else
/* walk live descendants in postorder */
#define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \
css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \
if (({ lockdep_assert_held(&cgroup_mutex); \
(dsct) = (d_css)->cgroup; \
cgroup_is_dead(dsct); })) \
; \
else
/*
* The default css_set - used by init and its children prior to any
* hierarchies being mounted. It contains a pointer to the root state
* for each subsystem. Also used to anchor the list of css_sets. Not
* reference-counted, to improve performance when child cgroups
* haven't been created.
*/
struct css_set init_css_set = {
.refcount = REFCOUNT_INIT(1),
.dom_cset = &init_css_set,
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
.dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
.task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
.threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
.mg_src_preload_node = LIST_HEAD_INIT(init_css_set.mg_src_preload_node),
.mg_dst_preload_node = LIST_HEAD_INIT(init_css_set.mg_dst_preload_node),
.mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
/*
* The following field is re-initialized when this cset gets linked
* in cgroup_init(). However, let's initialize the field
* statically too so that the default cgroup can be accessed safely
* early during boot.
*/
.dfl_cgrp = &cgrp_dfl_root.cgrp,
};
static int css_set_count = 1; /* 1 for init_css_set */
static bool css_set_threaded(struct css_set *cset)
{
return cset->dom_cset != cset;
}
/**
* css_set_populated - does a css_set contain any tasks?
* @cset: target css_set
*
* css_set_populated() should be the same as !!cset->nr_tasks at steady
* state. However, css_set_populated() can be called while a task is being
* added to or removed from the linked list before the nr_tasks is
* properly updated. Hence, we can't just look at ->nr_tasks here.
*/
static bool css_set_populated(struct css_set *cset)
{
lockdep_assert_held(&css_set_lock); return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);}
/**
* cgroup_update_populated - update the populated count of a cgroup
* @cgrp: the target cgroup
* @populated: inc or dec populated count
*
* One of the css_sets associated with @cgrp is either getting its first
* task or losing the last. Update @cgrp->nr_populated_* accordingly. The
* count is propagated towards root so that a given cgroup's
* nr_populated_children is zero iff none of its descendants contain any
* tasks.
*
* @cgrp's interface file "cgroup.populated" is zero if both
* @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and
* 1 otherwise. When the sum changes from or to zero, userland is notified
* that the content of the interface file has changed. This can be used to
* detect when @cgrp and its descendants become populated or empty.
*/
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
struct cgroup *child = NULL;
int adj = populated ? 1 : -1;
lockdep_assert_held(&css_set_lock);
do {
bool was_populated = cgroup_is_populated(cgrp);
if (!child) {
cgrp->nr_populated_csets += adj;
} else {
if (cgroup_is_threaded(child))
cgrp->nr_populated_threaded_children += adj;
else
cgrp->nr_populated_domain_children += adj;
}
if (was_populated == cgroup_is_populated(cgrp))
break;
cgroup1_check_for_release(cgrp);
TRACE_CGROUP_PATH(notify_populated, cgrp,
cgroup_is_populated(cgrp));
cgroup_file_notify(&cgrp->events_file);
child = cgrp;
cgrp = cgroup_parent(cgrp);
} while (cgrp);
}
/**
* css_set_update_populated - update populated state of a css_set
* @cset: target css_set
* @populated: whether @cset is populated or depopulated
*
* @cset is either getting the first task or losing the last. Update the
* populated counters of all associated cgroups accordingly.
*/
static void css_set_update_populated(struct css_set *cset, bool populated)
{
struct cgrp_cset_link *link;
lockdep_assert_held(&css_set_lock);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
cgroup_update_populated(link->cgrp, populated);
}
/*
* @task is leaving, advance task iterators which are pointing to it so
* that they can resume at the next position. Advancing an iterator might
* remove it from the list, use safe walk. See css_task_iter_skip() for
* details.
*/
static void css_set_skip_task_iters(struct css_set *cset,
struct task_struct *task)
{
struct css_task_iter *it, *pos;
list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
css_task_iter_skip(it, task);
}
/**
* css_set_move_task - move a task from one css_set to another
* @task: task being moved
* @from_cset: css_set @task currently belongs to (may be NULL)
* @to_cset: new css_set @task is being moved to (may be NULL)
* @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
*
* Move @task from @from_cset to @to_cset. If @task didn't belong to any
* css_set, @from_cset can be NULL. If @task is being disassociated
* instead of moved, @to_cset can be NULL.
*
* This function automatically handles populated counter updates and
* css_task_iter adjustments but the caller is responsible for managing
* @from_cset and @to_cset's reference counts.
*/
static void css_set_move_task(struct task_struct *task,
struct css_set *from_cset, struct css_set *to_cset,
bool use_mg_tasks)
{
lockdep_assert_held(&css_set_lock); if (to_cset && !css_set_populated(to_cset)) css_set_update_populated(to_cset, true); if (from_cset) {
WARN_ON_ONCE(list_empty(&task->cg_list));
css_set_skip_task_iters(from_cset, task);
list_del_init(&task->cg_list);
if (!css_set_populated(from_cset))
css_set_update_populated(from_cset, false);
} else {
WARN_ON_ONCE(!list_empty(&task->cg_list));
}
if (to_cset) {
/*
* We are synchronized through cgroup_threadgroup_rwsem
* against PF_EXITING setting such that we can't race
* against cgroup_exit()/cgroup_free() dropping the css_set.
*/
WARN_ON_ONCE(task->flags & PF_EXITING); cgroup_move_task(task, to_cset); list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
&to_cset->tasks);
}
}
/*
* hash table for cgroup groups. This improves the performance to find
* an existing css_set. This hash doesn't (currently) take into
* account cgroups in empty hierarchies.
*/
#define CSS_SET_HASH_BITS 7
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
static unsigned long css_set_hash(struct cgroup_subsys_state **css)
{
unsigned long key = 0UL;
struct cgroup_subsys *ss;
int i;
for_each_subsys(ss, i)
key += (unsigned long)css[i];
key = (key >> 16) ^ key;
return key;
}
void put_css_set_locked(struct css_set *cset)
{
struct cgrp_cset_link *link, *tmp_link;
struct cgroup_subsys *ss;
int ssid;
lockdep_assert_held(&css_set_lock);
if (!refcount_dec_and_test(&cset->refcount))
return;
WARN_ON_ONCE(!list_empty(&cset->threaded_csets));
/* This css_set is dead. Unlink it and release cgroup and css refs */
for_each_subsys(ss, ssid) {
list_del(&cset->e_cset_node[ssid]);
css_put(cset->subsys[ssid]);
}
hash_del(&cset->hlist);
css_set_count--;
list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
list_del(&link->cset_link);
list_del(&link->cgrp_link);
if (cgroup_parent(link->cgrp))
cgroup_put(link->cgrp);
kfree(link);
}
if (css_set_threaded(cset)) {
list_del(&cset->threaded_csets_node);
put_css_set_locked(cset->dom_cset);
}
kfree_rcu(cset, rcu_head);
}
/**
* compare_css_sets - helper function for find_existing_css_set().
* @cset: candidate css_set being tested
* @old_cset: existing css_set for a task
* @new_cgrp: cgroup that's being entered by the task
* @template: desired set of css pointers in css_set (pre-calculated)
*
* Returns true if "cset" matches "old_cset" except for the hierarchy
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
*/
static bool compare_css_sets(struct css_set *cset,
struct css_set *old_cset,
struct cgroup *new_cgrp,
struct cgroup_subsys_state *template[])
{
struct cgroup *new_dfl_cgrp;
struct list_head *l1, *l2;
/*
* On the default hierarchy, there can be csets which are
* associated with the same set of cgroups but different csses.
* Let's first ensure that csses match.
*/
if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
return false;
/* @cset's domain should match the default cgroup's */
if (cgroup_on_dfl(new_cgrp))
new_dfl_cgrp = new_cgrp;
else
new_dfl_cgrp = old_cset->dfl_cgrp;
if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp)
return false;
/*
* Compare cgroup pointers in order to distinguish between
* different cgroups in hierarchies. As different cgroups may
* share the same effective css, this comparison is always
* necessary.
*/
l1 = &cset->cgrp_links;
l2 = &old_cset->cgrp_links;
while (1) {
struct cgrp_cset_link *link1, *link2;
struct cgroup *cgrp1, *cgrp2;
l1 = l1->next;
l2 = l2->next;
/* See if we reached the end - both lists are equal length. */
if (l1 == &cset->cgrp_links) {
BUG_ON(l2 != &old_cset->cgrp_links);
break;
} else {
BUG_ON(l2 == &old_cset->cgrp_links);
}
/* Locate the cgroups associated with these links. */
link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
cgrp1 = link1->cgrp;
cgrp2 = link2->cgrp;
/* Hierarchies should be linked in the same order. */
BUG_ON(cgrp1->root != cgrp2->root);
/*
* If this hierarchy is the hierarchy of the cgroup
* that's changing, then we need to check that this
* css_set points to the new cgroup; if it's any other
* hierarchy, then this css_set should point to the
* same cgroup as the old css_set.
*/
if (cgrp1->root == new_cgrp->root) {
if (cgrp1 != new_cgrp)
return false;
} else {
if (cgrp1 != cgrp2)
return false;
}
}
return true;
}
/**
* find_existing_css_set - init css array and find the matching css_set
* @old_cset: the css_set that we're using before the cgroup transition
* @cgrp: the cgroup that we're moving into
* @template: out param for the new set of csses, should be clear on entry
*/
static struct css_set *find_existing_css_set(struct css_set *old_cset,
struct cgroup *cgrp,
struct cgroup_subsys_state **template)
{
struct cgroup_root *root = cgrp->root;
struct cgroup_subsys *ss;
struct css_set *cset;
unsigned long key;
int i;
/*
* Build the set of subsystem state objects that we want to see in the
* new css_set. While subsystems can change globally, the entries here
* won't change, so no need for locking.
*/
for_each_subsys(ss, i) {
if (root->subsys_mask & (1UL << i)) {
/*
* @ss is in this hierarchy, so we want the
* effective css from @cgrp.
*/
template[i] = cgroup_e_css_by_mask(cgrp, ss);
} else {
/*
* @ss is not in this hierarchy, so we don't want
* to change the css.
*/
template[i] = old_cset->subsys[i];
}
}
key = css_set_hash(template);
hash_for_each_possible(css_set_table, cset, hlist, key) {
if (!compare_css_sets(cset, old_cset, cgrp, template))
continue;
/* This css_set matches what we need */
return cset;
}
/* No existing cgroup group matched */
return NULL;
}
static void free_cgrp_cset_links(struct list_head *links_to_free)
{
struct cgrp_cset_link *link, *tmp_link;
list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
list_del(&link->cset_link);
kfree(link);
}
}
/**
* allocate_cgrp_cset_links - allocate cgrp_cset_links
* @count: the number of links to allocate
* @tmp_links: list_head the allocated links are put on
*
* Allocate @count cgrp_cset_link structures and chain them on @tmp_links
* through ->cset_link. Returns 0 on success or -errno.
*/
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
{
struct cgrp_cset_link *link;
int i;
INIT_LIST_HEAD(tmp_links);
for (i = 0; i < count; i++) {
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
free_cgrp_cset_links(tmp_links);
return -ENOMEM;
}
list_add(&link->cset_link, tmp_links);
}
return 0;
}
/**
* link_css_set - a helper function to link a css_set to a cgroup
* @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
* @cset: the css_set to be linked
* @cgrp: the destination cgroup
*/
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
struct cgroup *cgrp)
{
struct cgrp_cset_link *link;
BUG_ON(list_empty(tmp_links));
if (cgroup_on_dfl(cgrp))
cset->dfl_cgrp = cgrp;
link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
link->cset = cset;
link->cgrp = cgrp;
/*
* Always add links to the tail of the lists so that the lists are
* in chronological order.
*/
list_move_tail(&link->cset_link, &cgrp->cset_links);
list_add_tail(&link->cgrp_link, &cset->cgrp_links);
if (cgroup_parent(cgrp))
cgroup_get_live(cgrp);
}
/**
* find_css_set - return a new css_set with one cgroup updated
* @old_cset: the baseline css_set
* @cgrp: the cgroup to be updated
*
* Return a new css_set that's equivalent to @old_cset, but with @cgrp
* substituted into the appropriate hierarchy.
*/
static struct css_set *find_css_set(struct css_set *old_cset,
struct cgroup *cgrp)
{
struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
struct css_set *cset;
struct list_head tmp_links;
struct cgrp_cset_link *link;
struct cgroup_subsys *ss;
unsigned long key;
int ssid;
lockdep_assert_held(&cgroup_mutex);
/* First see if we already have a cgroup group that matches
* the desired set */
spin_lock_irq(&css_set_lock);
cset = find_existing_css_set(old_cset, cgrp, template);
if (cset)
get_css_set(cset);
spin_unlock_irq(&css_set_lock);
if (cset)
return cset;
cset = kzalloc(sizeof(*cset), GFP_KERNEL);
if (!cset)
return NULL;
/* Allocate all the cgrp_cset_link objects that we'll need */
if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
kfree(cset);
return NULL;
}
refcount_set(&cset->refcount, 1);
cset->dom_cset = cset;
INIT_LIST_HEAD(&cset->tasks);
INIT_LIST_HEAD(&cset->mg_tasks);
INIT_LIST_HEAD(&cset->dying_tasks);
INIT_LIST_HEAD(&cset->task_iters);
INIT_LIST_HEAD(&cset->threaded_csets);
INIT_HLIST_NODE(&cset->hlist);
INIT_LIST_HEAD(&cset->cgrp_links);
INIT_LIST_HEAD(&cset->mg_src_preload_node);
INIT_LIST_HEAD(&cset->mg_dst_preload_node);
INIT_LIST_HEAD(&cset->mg_node);
/* Copy the set of subsystem state objects generated in
* find_existing_css_set() */
memcpy(cset->subsys, template, sizeof(cset->subsys));
spin_lock_irq(&css_set_lock);
/* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
if (c->root == cgrp->root)
c = cgrp;
link_css_set(&tmp_links, cset, c);
}
BUG_ON(!list_empty(&tmp_links));
css_set_count++;
/* Add @cset to the hash table */
key = css_set_hash(cset->subsys);
hash_add(css_set_table, &cset->hlist, key);
for_each_subsys(ss, ssid) {
struct cgroup_subsys_state *css = cset->subsys[ssid];
list_add_tail(&cset->e_cset_node[ssid],
&css->cgroup->e_csets[ssid]);
css_get(css);
}
spin_unlock_irq(&css_set_lock);
/*
* If @cset should be threaded, look up the matching dom_cset and
* link them up. We first fully initialize @cset then look for the
* dom_cset. It's simpler this way and safe as @cset is guaranteed
* to stay empty until we return.
*/
if (cgroup_is_threaded(cset->dfl_cgrp)) {
struct css_set *dcset;
dcset = find_css_set(cset, cset->dfl_cgrp->dom_cgrp);
if (!dcset) {
put_css_set(cset);
return NULL;
}
spin_lock_irq(&css_set_lock);
cset->dom_cset = dcset;
list_add_tail(&cset->threaded_csets_node,
&dcset->threaded_csets);
spin_unlock_irq(&css_set_lock);
}
return cset;
}
struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
{
struct cgroup *root_cgrp = kernfs_root_to_node(kf_root)->priv;
return root_cgrp->root;
}
void cgroup_favor_dynmods(struct cgroup_root *root, bool favor)
{
bool favoring = root->flags & CGRP_ROOT_FAVOR_DYNMODS;
/*
* see the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
* favordynmods can flip while task is between
* cgroup_threadgroup_change_begin() and end(), so down_write global
* cgroup_threadgroup_rwsem to synchronize them.
*
* Once cgroup_enable_per_threadgroup_rwsem is enabled, holding
* cgroup_threadgroup_rwsem doesn't exlude tasks between
* cgroup_thread_group_change_begin() and end() and thus it's unsafe to
* turn off. As the scenario is unlikely, simply disallow disabling once
* enabled and print out a warning.
*/
percpu_down_write(&cgroup_threadgroup_rwsem);
if (favor && !favoring) {
cgroup_enable_per_threadgroup_rwsem = true;
rcu_sync_enter(&cgroup_threadgroup_rwsem.rss);
root->flags |= CGRP_ROOT_FAVOR_DYNMODS;
} else if (!favor && favoring) {
if (cgroup_enable_per_threadgroup_rwsem)
pr_warn_once("cgroup favordynmods: per threadgroup rwsem mechanism can't be disabled\n");
rcu_sync_exit(&cgroup_threadgroup_rwsem.rss);
root->flags &= ~CGRP_ROOT_FAVOR_DYNMODS;
}
percpu_up_write(&cgroup_threadgroup_rwsem);
}
static int cgroup_init_root_id(struct cgroup_root *root)
{
int id;
lockdep_assert_held(&cgroup_mutex);
id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
if (id < 0)
return id;
root->hierarchy_id = id;
return 0;
}
static void cgroup_exit_root_id(struct cgroup_root *root)
{
lockdep_assert_held(&cgroup_mutex);
idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
}
void cgroup_free_root(struct cgroup_root *root)
{
kfree_rcu(root, rcu);
}
static void cgroup_destroy_root(struct cgroup_root *root)
{
struct cgroup *cgrp = &root->cgrp;
struct cgrp_cset_link *link, *tmp_link;
int ret;
trace_cgroup_destroy_root(root);
cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
BUG_ON(atomic_read(&root->nr_cgrps));
BUG_ON(!list_empty(&cgrp->self.children));
ret = blocking_notifier_call_chain(&cgroup_lifetime_notifier,
CGROUP_LIFETIME_OFFLINE, cgrp);
WARN_ON_ONCE(notifier_to_errno(ret));
/* Rebind all subsystems back to the default hierarchy */
WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
/*
* Release all the links from cset_links to this hierarchy's
* root cgroup
*/
spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
list_del(&link->cset_link);
list_del(&link->cgrp_link);
kfree(link);
}
spin_unlock_irq(&css_set_lock);
WARN_ON_ONCE(list_empty(&root->root_list));
list_del_rcu(&root->root_list);
cgroup_root_count--;
if (!have_favordynmods)
cgroup_favor_dynmods(root, false);
cgroup_exit_root_id(root);
cgroup_unlock();
kernfs_destroy_root(root->kf_root);
cgroup_free_root(root);
}
/*
* Returned cgroup is without refcount but it's valid as long as cset pins it.
*/
static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root)
{
struct cgroup *res_cgroup = NULL;
if (cset == &init_css_set) {
res_cgroup = &root->cgrp;
} else if (root == &cgrp_dfl_root) {
res_cgroup = cset->dfl_cgrp;
} else {
struct cgrp_cset_link *link;
lockdep_assert_held(&css_set_lock);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
if (c->root == root) {
res_cgroup = c;
break;
}
}
}
/*
* If cgroup_mutex is not held, the cgrp_cset_link will be freed
* before we remove the cgroup root from the root_list. Consequently,
* when accessing a cgroup root, the cset_link may have already been
* freed, resulting in a NULL res_cgroup. However, by holding the
* cgroup_mutex, we ensure that res_cgroup can't be NULL.
* If we don't hold cgroup_mutex in the caller, we must do the NULL
* check.
*/
return res_cgroup;
}
/*
* look up cgroup associated with current task's cgroup namespace on the
* specified hierarchy
*/
static struct cgroup *
current_cgns_cgroup_from_root(struct cgroup_root *root)
{
struct cgroup *res = NULL;
struct css_set *cset;
lockdep_assert_held(&css_set_lock);
rcu_read_lock();
cset = current->nsproxy->cgroup_ns->root_cset;
res = __cset_cgroup_from_root(cset, root);
rcu_read_unlock();
/*
* The namespace_sem is held by current, so the root cgroup can't
* be umounted. Therefore, we can ensure that the res is non-NULL.
*/
WARN_ON_ONCE(!res);
return res;
}
/*
* Look up cgroup associated with current task's cgroup namespace on the default
* hierarchy.
*
* Unlike current_cgns_cgroup_from_root(), this doesn't need locks:
* - Internal rcu_read_lock is unnecessary because we don't dereference any rcu
* pointers.
* - css_set_lock is not needed because we just read cset->dfl_cgrp.
* - As a bonus returned cgrp is pinned with the current because it cannot
* switch cgroup_ns asynchronously.
*/
static struct cgroup *current_cgns_cgroup_dfl(void)
{
struct css_set *cset;
if (current->nsproxy) {
cset = current->nsproxy->cgroup_ns->root_cset;
return __cset_cgroup_from_root(cset, &cgrp_dfl_root);
} else {
/*
* NOTE: This function may be called from bpf_cgroup_from_id()
* on a task which has already passed exit_task_namespaces() and
* nsproxy == NULL. Fall back to cgrp_dfl_root which will make all
* cgroups visible for lookups.
*/
return &cgrp_dfl_root.cgrp;
}
}
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root)
{
lockdep_assert_held(&css_set_lock);
return __cset_cgroup_from_root(cset, root);
}
/*
* Return the cgroup for "task" from the given hierarchy. Must be
* called with css_set_lock held to prevent task's groups from being modified.
* Must be called with either cgroup_mutex or rcu read lock to prevent the
* cgroup root from being destroyed.
*/
struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root)
{
/*
* No need to lock the task - since we hold css_set_lock the
* task can't change groups.
*/
return cset_cgroup_from_root(task_css_set(task), root);
}
/*
* A task must hold cgroup_mutex to modify cgroups.
*
* Any task can increment and decrement the count field without lock.
* So in general, code holding cgroup_mutex can't rely on the count
* field not changing. However, if the count goes to zero, then only
* cgroup_attach_task() can increment it again. Because a count of zero
* means that no tasks are currently attached, therefore there is no
* way a task attached to that cgroup can fork (the other way to
* increment the count). So code holding cgroup_mutex can safely
* assume that if the count is zero, it will stay zero. Similarly, if
* a task holds cgroup_mutex on a cgroup with zero count, it
* knows that the cgroup won't be removed, as cgroup_rmdir()
* needs that mutex.
*
* A cgroup can only be deleted if both its 'count' of using tasks
* is zero, and its list of 'children' cgroups is empty. Since all
* tasks in the system use _some_ cgroup, and since there is always at
* least one task in the system (init, pid == 1), therefore, root cgroup
* always has either children cgroups and/or using tasks. So we don't
* need a special hack to ensure that root cgroup cannot be deleted.
*
* P.S. One more locking exception. RCU is used to guard the
* update of a tasks cgroup pointer by cgroup_attach_task()
*/
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
char *buf)
{
struct cgroup_subsys *ss = cft->ss;
if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
!(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : "";
snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
cft->name);
} else {
strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
}
return buf;
}
/**
* cgroup_file_mode - deduce file mode of a control file
* @cft: the control file in question
*
* S_IRUGO for read, S_IWUSR for write.
*/
static umode_t cgroup_file_mode(const struct cftype *cft)
{
umode_t mode = 0;
if (cft->read_u64 || cft->read_s64 || cft->seq_show)
mode |= S_IRUGO;
if (cft->write_u64 || cft->write_s64 || cft->write) {
if (cft->flags & CFTYPE_WORLD_WRITABLE)
mode |= S_IWUGO;
else
mode |= S_IWUSR;
}
return mode;
}
/**
* cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
* @subtree_control: the new subtree_control mask to consider
* @this_ss_mask: available subsystems
*
* On the default hierarchy, a subsystem may request other subsystems to be
* enabled together through its ->depends_on mask. In such cases, more
* subsystems than specified in "cgroup.subtree_control" may be enabled.
*
* This function calculates which subsystems need to be enabled if
* @subtree_control is to be applied while restricted to @this_ss_mask.
*/
static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask)
{
u16 cur_ss_mask = subtree_control;
struct cgroup_subsys *ss;
int ssid;
lockdep_assert_held(&cgroup_mutex);
cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
while (true) {
u16 new_ss_mask = cur_ss_mask;
do_each_subsys_mask(ss, ssid, cur_ss_mask) {
new_ss_mask |= ss->depends_on;
} while_each_subsys_mask();
/*
* Mask out subsystems which aren't available. This can
* happen only if some depended-upon subsystems were bound
* to non-default hierarchies.
*/
new_ss_mask &= this_ss_mask;
if (new_ss_mask == cur_ss_mask)
break;
cur_ss_mask = new_ss_mask;
}
return cur_ss_mask;
}
/**
* cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
* @kn: the kernfs_node being serviced
*
* This helper undoes cgroup_kn_lock_live() and should be invoked before
* the method finishes if locking succeeded. Note that once this function
* returns the cgroup returned by cgroup_kn_lock_live() may become
* inaccessible any time. If the caller intends to continue to access the
* cgroup, it should pin it before invoking this function.
*/
void cgroup_kn_unlock(struct kernfs_node *kn)
{
struct cgroup *cgrp;
if (kernfs_type(kn) == KERNFS_DIR)
cgrp = kn->priv;
else
cgrp = kn_priv(kn);
cgroup_unlock();
kernfs_unbreak_active_protection(kn);
cgroup_put(cgrp);
}
/**
* cgroup_kn_lock_live - locking helper for cgroup kernfs methods
* @kn: the kernfs_node being serviced
* @drain_offline: perform offline draining on the cgroup
*
* This helper is to be used by a cgroup kernfs method currently servicing
* @kn. It breaks the active protection, performs cgroup locking and
* verifies that the associated cgroup is alive. Returns the cgroup if
* alive; otherwise, %NULL. A successful return should be undone by a
* matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the
* cgroup is drained of offlining csses before return.
*
* Any cgroup kernfs method implementation which requires locking the
* associated cgroup should use this helper. It avoids nesting cgroup
* locking under kernfs active protection and allows all kernfs operations
* including self-removal.
*/
struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
{
struct cgroup *cgrp;
if (kernfs_type(kn) == KERNFS_DIR)
cgrp = kn->priv;
else
cgrp = kn_priv(kn);
/*
* We're gonna grab cgroup_mutex which nests outside kernfs
* active_ref. cgroup liveliness check alone provides enough
* protection against removal. Ensure @cgrp stays accessible and
* break the active_ref protection.
*/
if (!cgroup_tryget(cgrp))
return NULL;
kernfs_break_active_protection(kn);
if (drain_offline)
cgroup_lock_and_drain_offline(cgrp);
else
cgroup_lock();
if (!cgroup_is_dead(cgrp))
return cgrp;
cgroup_kn_unlock(kn);
return NULL;
}
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
{
char name[CGROUP_FILE_NAME_MAX];
lockdep_assert_held(&cgroup_mutex);
if (cft->file_offset) {
struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
struct cgroup_file *cfile = (void *)css + cft->file_offset;
spin_lock_irq(&cgroup_file_kn_lock);
cfile->kn = NULL;
spin_unlock_irq(&cgroup_file_kn_lock);
timer_delete_sync(&cfile->notify_timer);
}
kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
}
/**
* css_clear_dir - remove subsys files in a cgroup directory
* @css: target css
*/
static void css_clear_dir(struct cgroup_subsys_state *css)
{
struct cgroup *cgrp = css->cgroup;
struct cftype *cfts;
if (!(css->flags & CSS_VISIBLE))
return;
css->flags &= ~CSS_VISIBLE;
if (css_is_self(css)) {
if (cgroup_on_dfl(cgrp)) {
cgroup_addrm_files(css, cgrp,
cgroup_base_files, false);
if (cgroup_psi_enabled())
cgroup_addrm_files(css, cgrp,
cgroup_psi_files, false);
} else {
cgroup_addrm_files(css, cgrp,
cgroup1_base_files, false);
}
} else {
list_for_each_entry(cfts, &css->ss->cfts, node)
cgroup_addrm_files(css, cgrp, cfts, false);
}
}
/**
* css_populate_dir - create subsys files in a cgroup directory
* @css: target css
*
* On failure, no file is added.
*/
static int css_populate_dir(struct cgroup_subsys_state *css)
{
struct cgroup *cgrp = css->cgroup;
struct cftype *cfts, *failed_cfts;
int ret;
if (css->flags & CSS_VISIBLE)
return 0;
if (css_is_self(css)) {
if (cgroup_on_dfl(cgrp)) {
ret = cgroup_addrm_files(css, cgrp,
cgroup_base_files, true);
if (ret < 0)
return ret;
if (cgroup_psi_enabled()) {
ret = cgroup_addrm_files(css, cgrp,
cgroup_psi_files, true);
if (ret < 0) {
cgroup_addrm_files(css, cgrp,
cgroup_base_files, false);
return ret;
}
}
} else {
ret = cgroup_addrm_files(css, cgrp,
cgroup1_base_files, true);
if (ret < 0)
return ret;
}
} else {
list_for_each_entry(cfts, &css->ss->cfts, node) {
ret = cgroup_addrm_files(css, cgrp, cfts, true);
if (ret < 0) {
failed_cfts = cfts;
goto err;
}
}
}
css->flags |= CSS_VISIBLE;
return 0;
err:
list_for_each_entry(cfts, &css->ss->cfts, node) {
if (cfts == failed_cfts)
break;
cgroup_addrm_files(css, cgrp, cfts, false);
}
return ret;
}
int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
{
struct cgroup *dcgrp = &dst_root->cgrp;
struct cgroup_subsys *ss;
int ssid, ret;
u16 dfl_disable_ss_mask = 0;
lockdep_assert_held(&cgroup_mutex);
do_each_subsys_mask(ss, ssid, ss_mask) {
/*
* If @ss has non-root csses attached to it, can't move.
* If @ss is an implicit controller, it is exempt from this
* rule and can be stolen.
*/
if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
!ss->implicit_on_dfl)
return -EBUSY;
/* can't move between two non-dummy roots either */
if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
return -EBUSY;
/*
* Collect ssid's that need to be disabled from default
* hierarchy.
*/
if (ss->root == &cgrp_dfl_root)
dfl_disable_ss_mask |= 1 << ssid;
} while_each_subsys_mask();
if (dfl_disable_ss_mask) {
struct cgroup *scgrp = &cgrp_dfl_root.cgrp;
/*
* Controllers from default hierarchy that need to be rebound
* are all disabled together in one go.
*/
cgrp_dfl_root.subsys_mask &= ~dfl_disable_ss_mask;
WARN_ON(cgroup_apply_control(scgrp));
cgroup_finalize_control(scgrp, 0);
}
do_each_subsys_mask(ss, ssid, ss_mask) {
struct cgroup_root *src_root = ss->root;
struct cgroup *scgrp = &src_root->cgrp;
struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
struct css_set *cset, *cset_pos;
struct css_task_iter *it;
WARN_ON(!css || cgroup_css(dcgrp, ss));
if (src_root != &cgrp_dfl_root) {
/* disable from the source */
src_root->subsys_mask &= ~(1 << ssid);
WARN_ON(cgroup_apply_control(scgrp));
cgroup_finalize_control(scgrp, 0);
}
/* rebind */
RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
rcu_assign_pointer(dcgrp->subsys[ssid], css);
ss->root = dst_root;
spin_lock_irq(&css_set_lock);
css->cgroup = dcgrp;
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
e_cset_node[ss->id]) {
list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]);
/*
* all css_sets of scgrp together in same order to dcgrp,
* patch in-flight iterators to preserve correct iteration.
* since the iterator is always advanced right away and
* finished when it->cset_pos meets it->cset_head, so only
* update it->cset_head is enough here.
*/
list_for_each_entry(it, &cset->task_iters, iters_node)
if (it->cset_head == &scgrp->e_csets[ss->id])
it->cset_head = &dcgrp->e_csets[ss->id];
}
spin_unlock_irq(&css_set_lock);
/* default hierarchy doesn't enable controllers by default */
dst_root->subsys_mask |= 1 << ssid;
if (dst_root == &cgrp_dfl_root) {
static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
} else {
dcgrp->subtree_control |= 1 << ssid;
static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
}
ret = cgroup_apply_control(dcgrp);
if (ret)
pr_warn("partial failure to rebind %s controller (err=%d)\n",
ss->name, ret);
if (ss->bind)
ss->bind(css);
} while_each_subsys_mask();
kernfs_activate(dcgrp->kn);
return 0;
}
int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
struct kernfs_root *kf_root)
{
int len = 0;
char *buf = NULL;
struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
struct cgroup *ns_cgroup;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf)
return -ENOMEM;
spin_lock_irq(&css_set_lock);
ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
spin_unlock_irq(&css_set_lock);
if (len == -E2BIG)
len = -ERANGE;
else if (len > 0) {
seq_escape(sf, buf, " \t\n\\");
len = 0;
}
kfree(buf);
return len;
}
enum cgroup2_param {
Opt_nsdelegate,
Opt_favordynmods,
Opt_memory_localevents,
Opt_memory_recursiveprot,
Opt_memory_hugetlb_accounting,
Opt_pids_localevents,
nr__cgroup2_params
};
static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
fsparam_flag("nsdelegate", Opt_nsdelegate),
fsparam_flag("favordynmods", Opt_favordynmods),
fsparam_flag("memory_localevents", Opt_memory_localevents),
fsparam_flag("memory_recursiveprot", Opt_memory_recursiveprot),
fsparam_flag("memory_hugetlb_accounting", Opt_memory_hugetlb_accounting),
fsparam_flag("pids_localevents", Opt_pids_localevents),
{}
};
static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
struct fs_parse_result result;
int opt;
opt = fs_parse(fc, cgroup2_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case Opt_nsdelegate:
ctx->flags |= CGRP_ROOT_NS_DELEGATE;
return 0;
case Opt_favordynmods:
ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
return 0;
case Opt_memory_localevents:
ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
return 0;
case Opt_memory_recursiveprot:
ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
return 0;
case Opt_memory_hugetlb_accounting:
ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
return 0;
case Opt_pids_localevents:
ctx->flags |= CGRP_ROOT_PIDS_LOCAL_EVENTS;
return 0;
}
return -EINVAL;
}
struct cgroup_of_peak *of_peak(struct kernfs_open_file *of)
{
struct cgroup_file_ctx *ctx = of->priv;
return &ctx->peak;
}
static void apply_cgroup_root_flags(unsigned int root_flags)
{
if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
if (root_flags & CGRP_ROOT_NS_DELEGATE)
cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
cgroup_favor_dynmods(&cgrp_dfl_root,
root_flags & CGRP_ROOT_FAVOR_DYNMODS);
if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
if (root_flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
if (root_flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
if (root_flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
cgrp_dfl_root.flags |= CGRP_ROOT_PIDS_LOCAL_EVENTS;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_PIDS_LOCAL_EVENTS;
}
}
static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
{
if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
seq_puts(seq, ",nsdelegate");
if (cgrp_dfl_root.flags & CGRP_ROOT_FAVOR_DYNMODS)
seq_puts(seq, ",favordynmods");
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
seq_puts(seq, ",memory_localevents");
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
seq_puts(seq, ",memory_recursiveprot");
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
seq_puts(seq, ",memory_hugetlb_accounting");
if (cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
seq_puts(seq, ",pids_localevents");
return 0;
}
static int cgroup_reconfigure(struct fs_context *fc)
{
struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
apply_cgroup_root_flags(ctx->flags);
return 0;
}
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
struct cgroup_subsys *ss;
int ssid;
INIT_LIST_HEAD(&cgrp->self.sibling);
INIT_LIST_HEAD(&cgrp->self.children);
INIT_LIST_HEAD(&cgrp->cset_links);
INIT_LIST_HEAD(&cgrp->pidlists);
mutex_init(&cgrp->pidlist_mutex);
cgrp->self.cgroup = cgrp;
cgrp->self.flags |= CSS_ONLINE;
cgrp->dom_cgrp = cgrp;
cgrp->max_descendants = INT_MAX;
cgrp->max_depth = INT_MAX;
prev_cputime_init(&cgrp->prev_cputime);
for_each_subsys(ss, ssid)
INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
#ifdef CONFIG_CGROUP_BPF
for (int i = 0; i < ARRAY_SIZE(cgrp->bpf.revisions); i++)
cgrp->bpf.revisions[i] = 1;
#endif
init_waitqueue_head(&cgrp->offline_waitq);
INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent);
}
void init_cgroup_root(struct cgroup_fs_context *ctx)
{
struct cgroup_root *root = ctx->root;
struct cgroup *cgrp = &root->cgrp;
INIT_LIST_HEAD_RCU(&root->root_list);
atomic_set(&root->nr_cgrps, 1);
cgrp->root = root;
init_cgroup_housekeeping(cgrp);
/* DYNMODS must be modified through cgroup_favor_dynmods() */
root->flags = ctx->flags & ~CGRP_ROOT_FAVOR_DYNMODS;
if (ctx->release_agent)
strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX);
if (ctx->name)
strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN);
if (ctx->cpuset_clone_children)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}
int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
{
LIST_HEAD(tmp_links);
struct cgroup *root_cgrp = &root->cgrp;
struct kernfs_syscall_ops *kf_sops;
struct css_set *cset;
int i, ret;
lockdep_assert_held(&cgroup_mutex);
ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
0, GFP_KERNEL);
if (ret)
goto out;
/*
* We're accessing css_set_count without locking css_set_lock here,
* but that's OK - it can only be increased by someone holding
* cgroup_lock, and that's us. Later rebinding may disable
* controllers on the default hierarchy and thus create new csets,
* which can't be more than the existing ones. Allocate 2x.
*/
ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
if (ret)
goto cancel_ref;
ret = cgroup_init_root_id(root);
if (ret)
goto cancel_ref;
kf_sops = root == &cgrp_dfl_root ?
&cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops;
root->kf_root = kernfs_create_root(kf_sops,
KERNFS_ROOT_CREATE_DEACTIVATED |
KERNFS_ROOT_SUPPORT_EXPORTOP |
KERNFS_ROOT_SUPPORT_USER_XATTR |
KERNFS_ROOT_INVARIANT_PARENT,
root_cgrp);
if (IS_ERR(root->kf_root)) {
ret = PTR_ERR(root->kf_root);
goto exit_root_id;
}
root_cgrp->kn = kernfs_root_to_node(root->kf_root);
WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1);
root_cgrp->ancestors[0] = root_cgrp;
ret = css_populate_dir(&root_cgrp->self);
if (ret)
goto destroy_root;
ret = css_rstat_init(&root_cgrp->self);
if (ret)
goto destroy_root;
ret = rebind_subsystems(root, ss_mask);
if (ret)
goto exit_stats;
ret = blocking_notifier_call_chain(&cgroup_lifetime_notifier,
CGROUP_LIFETIME_ONLINE, root_cgrp);
WARN_ON_ONCE(notifier_to_errno(ret));
trace_cgroup_setup_root(root);
/*
* There must be no failure case after here, since rebinding takes
* care of subsystems' refcounts, which are explicitly dropped in
* the failure exit path.
*/
list_add_rcu(&root->root_list, &cgroup_roots);
cgroup_root_count++;
/*
* Link the root cgroup in this hierarchy into all the css_set
* objects.
*/
spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist) {
link_css_set(&tmp_links, cset, root_cgrp);
if (css_set_populated(cset))
cgroup_update_populated(root_cgrp, true);
}
spin_unlock_irq(&css_set_lock);
BUG_ON(!list_empty(&root_cgrp->self.children));
BUG_ON(atomic_read(&root->nr_cgrps) != 1);
ret = 0;
goto out;
exit_stats:
css_rstat_exit(&root_cgrp->self);
destroy_root:
kernfs_destroy_root(root->kf_root);
root->kf_root = NULL;
exit_root_id:
cgroup_exit_root_id(root);
cancel_ref:
percpu_ref_exit(&root_cgrp->self.refcnt);
out:
free_cgrp_cset_links(&tmp_links);
return ret;
}
int cgroup_do_get_tree(struct fs_context *fc)
{
struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
int ret;
ctx->kfc.root = ctx->root->kf_root;
if (fc->fs_type == &cgroup2_fs_type)
ctx->kfc.magic = CGROUP2_SUPER_MAGIC;
else
ctx->kfc.magic = CGROUP_SUPER_MAGIC;
ret = kernfs_get_tree(fc);
/*
* In non-init cgroup namespace, instead of root cgroup's dentry,
* we return the dentry corresponding to the cgroupns->root_cgrp.
*/
if (!ret && ctx->ns != &init_cgroup_ns) {
struct dentry *nsdentry;
struct super_block *sb = fc->root->d_sb;
struct cgroup *cgrp;
cgroup_lock();
spin_lock_irq(&css_set_lock);
cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
spin_unlock_irq(&css_set_lock);
cgroup_unlock();
nsdentry = kernfs_node_dentry(cgrp->kn, sb);
dput(fc->root);
if (IS_ERR(nsdentry)) {
deactivate_locked_super(sb);
ret = PTR_ERR(nsdentry);
nsdentry = NULL;
}
fc->root = nsdentry;
}
if (!ctx->kfc.new_sb_created)
cgroup_put(&ctx->root->cgrp);
return ret;
}
/*
* Destroy a cgroup filesystem context.
*/
static void cgroup_fs_context_free(struct fs_context *fc)
{
struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
kfree(ctx->name);
kfree(ctx->release_agent);
put_cgroup_ns(ctx->ns);
kernfs_free_fs_context(fc);
kfree(ctx);
}
static int cgroup_get_tree(struct fs_context *fc)
{
struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
int ret;
WRITE_ONCE(cgrp_dfl_visible, true);
cgroup_get_live(&cgrp_dfl_root.cgrp);
ctx->root = &cgrp_dfl_root;
ret = cgroup_do_get_tree(fc);
if (!ret)
apply_cgroup_root_flags(ctx->flags);
return ret;
}
static const struct fs_context_operations cgroup_fs_context_ops = {
.free = cgroup_fs_context_free,
.parse_param = cgroup2_parse_param,
.get_tree = cgroup_get_tree,
.reconfigure = cgroup_reconfigure,
};
static const struct fs_context_operations cgroup1_fs_context_ops = {
.free = cgroup_fs_context_free,
.parse_param = cgroup1_parse_param,
.get_tree = cgroup1_get_tree,
.reconfigure = cgroup1_reconfigure,
};
/*
* Initialise the cgroup filesystem creation/reconfiguration context. Notably,
* we select the namespace we're going to use.
*/
static int cgroup_init_fs_context(struct fs_context *fc)
{
struct cgroup_fs_context *ctx;
ctx = kzalloc(sizeof(struct cgroup_fs_context), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->ns = current->nsproxy->cgroup_ns;
get_cgroup_ns(ctx->ns);
fc->fs_private = &ctx->kfc;
if (fc->fs_type == &cgroup2_fs_type)
fc->ops = &cgroup_fs_context_ops;
else
fc->ops = &cgroup1_fs_context_ops;
put_user_ns(fc->user_ns);
fc->user_ns = get_user_ns(ctx->ns->user_ns);
fc->global = true;
if (have_favordynmods)
ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
return 0;
}
static void cgroup_kill_sb(struct super_block *sb)
{
struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
struct cgroup_root *root = cgroup_root_from_kf(kf_root);
/*
* If @root doesn't have any children, start killing it.
* This prevents new mounts by disabling percpu_ref_tryget_live().
*
* And don't kill the default root.
*/
if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
!percpu_ref_is_dying(&root->cgrp.self.refcnt))
percpu_ref_kill(&root->cgrp.self.refcnt);
cgroup_put(&root->cgrp);
kernfs_kill_sb(sb);
}
struct file_system_type cgroup_fs_type = {
.name = "cgroup",
.init_fs_context = cgroup_init_fs_context,
.parameters = cgroup1_fs_parameters,
.kill_sb = cgroup_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
static struct file_system_type cgroup2_fs_type = {
.name = "cgroup2",
.init_fs_context = cgroup_init_fs_context,
.parameters = cgroup2_fs_parameters,
.kill_sb = cgroup_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
#ifdef CONFIG_CPUSETS_V1
enum cpuset_param {
Opt_cpuset_v2_mode,
};
static const struct fs_parameter_spec cpuset_fs_parameters[] = {
fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
{}
};
static int cpuset_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
struct fs_parse_result result;
int opt;
opt = fs_parse(fc, cpuset_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case Opt_cpuset_v2_mode:
ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
return 0;
}
return -EINVAL;
}
static const struct fs_context_operations cpuset_fs_context_ops = {
.get_tree = cgroup1_get_tree,
.free = cgroup_fs_context_free,
.parse_param = cpuset_parse_param,
};
/*
* This is ugly, but preserves the userspace API for existing cpuset
* users. If someone tries to mount the "cpuset" filesystem, we
* silently switch it to mount "cgroup" instead
*/
static int cpuset_init_fs_context(struct fs_context *fc)
{
char *agent = kstrdup("/sbin/cpuset_release_agent", GFP_USER);
struct cgroup_fs_context *ctx;
int err;
err = cgroup_init_fs_context(fc);
if (err) {
kfree(agent);
return err;
}
fc->ops = &cpuset_fs_context_ops;
ctx = cgroup_fc2context(fc);
ctx->subsys_mask = 1 << cpuset_cgrp_id;
ctx->flags |= CGRP_ROOT_NOPREFIX;
ctx->release_agent = agent;
get_filesystem(&cgroup_fs_type);
put_filesystem(fc->fs_type);
fc->fs_type = &cgroup_fs_type;
return 0;
}
static struct file_system_type cpuset_fs_type = {
.name = "cpuset",
.init_fs_context = cpuset_init_fs_context,
.parameters = cpuset_fs_parameters,
.fs_flags = FS_USERNS_MOUNT,
};
#endif
int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
struct cgroup_namespace *ns)
{
struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
}
int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
struct cgroup_namespace *ns)
{
int ret;
cgroup_lock();
spin_lock_irq(&css_set_lock);
ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
spin_unlock_irq(&css_set_lock);
cgroup_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(cgroup_path_ns);
/**
* cgroup_attach_lock - Lock for ->attach()
* @lock_mode: whether acquire and acquire which rwsem
* @tsk: thread group to lock
*
* cgroup migration sometimes needs to stabilize threadgroups against forks and
* exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
* implementations (e.g. cpuset), also need to disable CPU hotplug.
* Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
* lead to deadlocks.
*
* Bringing up a CPU may involve creating and destroying tasks which requires
* read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
* cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
* write-locking threadgroup_rwsem, the locking order is reversed and we end up
* waiting for an on-going CPU hotplug operation which in turn is waiting for
* the threadgroup_rwsem to be released to create new tasks. For more details:
*
* http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
*
* Resolve the situation by always acquiring cpus_read_lock() before optionally
* write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
* CPU hotplug is disabled on entry.
*
* When favordynmods is enabled, take per threadgroup rwsem to reduce overhead
* on dynamic cgroup modifications. see the comment above
* CGRP_ROOT_FAVOR_DYNMODS definition.
*
* tsk is not NULL only when writing to cgroup.procs.
*/
void cgroup_attach_lock(enum cgroup_attach_lock_mode lock_mode,
struct task_struct *tsk)
{
cpus_read_lock();
switch (lock_mode) {
case CGRP_ATTACH_LOCK_NONE:
break;
case CGRP_ATTACH_LOCK_GLOBAL:
percpu_down_write(&cgroup_threadgroup_rwsem);
break;
case CGRP_ATTACH_LOCK_PER_THREADGROUP:
down_write(&tsk->signal->cgroup_threadgroup_rwsem);
break;
default:
pr_warn("cgroup: Unexpected attach lock mode.");
break;
}
}
/**
* cgroup_attach_unlock - Undo cgroup_attach_lock()
* @lock_mode: whether release and release which rwsem
* @tsk: thread group to lock
*/
void cgroup_attach_unlock(enum cgroup_attach_lock_mode lock_mode,
struct task_struct *tsk)
{
switch (lock_mode) {
case CGRP_ATTACH_LOCK_NONE:
break;
case CGRP_ATTACH_LOCK_GLOBAL:
percpu_up_write(&cgroup_threadgroup_rwsem);
break;
case CGRP_ATTACH_LOCK_PER_THREADGROUP:
up_write(&tsk->signal->cgroup_threadgroup_rwsem);
break;
default:
pr_warn("cgroup: Unexpected attach lock mode.");
break;
}
cpus_read_unlock();
}
/**
* cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task
* @mgctx: target migration context
*
* Add @task, which is a migration target, to @mgctx->tset. This function
* becomes noop if @task doesn't need to be migrated. @task's css_set
* should have been added as a migration source and @task->cg_list will be
* moved from the css_set's tasks list to mg_tasks one.
*/
static void cgroup_migrate_add_task(struct task_struct *task,
struct cgroup_mgctx *mgctx)
{
struct css_set *cset;
lockdep_assert_held(&css_set_lock);
/* @task either already exited or can't exit until the end */
if (task->flags & PF_EXITING)
return;
/* cgroup_threadgroup_rwsem protects racing against forks */
WARN_ON_ONCE(list_empty(&task->cg_list));
cset = task_css_set(task);
if (!cset->mg_src_cgrp)
return;
mgctx->tset.nr_tasks++;
list_move_tail(&task->cg_list, &cset->mg_tasks);
if (list_empty(&cset->mg_node))
list_add_tail(&cset->mg_node,
&mgctx->tset.src_csets);
if (list_empty(&cset->mg_dst_cset->mg_node))
list_add_tail(&cset->mg_dst_cset->mg_node,
&mgctx->tset.dst_csets);
}
/**
* cgroup_taskset_first - reset taskset and return the first task
* @tset: taskset of interest
* @dst_cssp: output variable for the destination css
*
* @tset iteration is initialized and the first task is returned.
*/
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp)
{
tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
tset->cur_task = NULL;
return cgroup_taskset_next(tset, dst_cssp);
}
/**
* cgroup_taskset_next - iterate to the next task in taskset
* @tset: taskset of interest
* @dst_cssp: output variable for the destination css
*
* Return the next task in @tset. Iteration must have been initialized
* with cgroup_taskset_first().
*/
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp)
{
struct css_set *cset = tset->cur_cset;
struct task_struct *task = tset->cur_task;
while (CGROUP_HAS_SUBSYS_CONFIG && &cset->mg_node != tset->csets) {
if (!task)
task = list_first_entry(&cset->mg_tasks,
struct task_struct, cg_list);
else
task = list_next_entry(task, cg_list);
if (&task->cg_list != &cset->mg_tasks) {
tset->cur_cset = cset;
tset->cur_task = task;
/*
* This function may be called both before and
* after cgroup_migrate_execute(). The two cases
* can be distinguished by looking at whether @cset
* has its ->mg_dst_cset set.
*/
if (cset->mg_dst_cset)
*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
else
*dst_cssp = cset->subsys[tset->ssid];
return task;
}
cset = list_next_entry(cset, mg_node);
task = NULL;
}
return NULL;
}
/**
* cgroup_migrate_execute - migrate a taskset
* @mgctx: migration context
*
* Migrate tasks in @mgctx as setup by migration preparation functions.
* This function fails iff one of the ->can_attach callbacks fails and
* guarantees that either all or none of the tasks in @mgctx are migrated.
* @mgctx is consumed regardless of success.
*/
static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
{
struct cgroup_taskset *tset = &mgctx->tset;
struct cgroup_subsys *ss;
struct task_struct *task, *tmp_task;
struct css_set *cset, *tmp_cset;
int ssid, failed_ssid, ret;
/* check that we can legitimately attach to the cgroup */
if (tset->nr_tasks) {
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
if (ss->can_attach) {
tset->ssid = ssid;
ret = ss->can_attach(tset);
if (ret) {
failed_ssid = ssid;
goto out_cancel_attach;
}
}
} while_each_subsys_mask();
}
/*
* Now that we're guaranteed success, proceed to move all tasks to
* the new cgroup. There are no failure cases after here, so this
* is the commit point.
*/
spin_lock_irq(&css_set_lock);
list_for_each_entry(cset, &tset->src_csets, mg_node) {
list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
struct css_set *from_cset = task_css_set(task);
struct css_set *to_cset = cset->mg_dst_cset;
get_css_set(to_cset);
to_cset->nr_tasks++;
css_set_move_task(task, from_cset, to_cset, true);
from_cset->nr_tasks--;
/*
* If the source or destination cgroup is frozen,
* the task might require to change its state.
*/
cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp,
to_cset->dfl_cgrp);
put_css_set_locked(from_cset);
}
}
spin_unlock_irq(&css_set_lock);
/*
* Migration is committed, all target tasks are now on dst_csets.
* Nothing is sensitive to fork() after this point. Notify
* controllers that migration is complete.
*/
tset->csets = &tset->dst_csets;
if (tset->nr_tasks) {
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
if (ss->attach) {
tset->ssid = ssid;
ss->attach(tset);
}
} while_each_subsys_mask();
}
ret = 0;
goto out_release_tset;
out_cancel_attach:
if (tset->nr_tasks) {
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
if (ssid == failed_ssid)
break;
if (ss->cancel_attach) {
tset->ssid = ssid;
ss->cancel_attach(tset);
}
} while_each_subsys_mask();
}
out_release_tset:
spin_lock_irq(&css_set_lock);
list_splice_init(&tset->dst_csets, &tset->src_csets);
list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
list_del_init(&cset->mg_node);
}
spin_unlock_irq(&css_set_lock);
/*
* Re-initialize the cgroup_taskset structure in case it is reused
* again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
* iteration.
*/
tset->nr_tasks = 0;
tset->csets = &tset->src_csets;
return ret;
}
/**
* cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination
* @dst_cgrp: destination cgroup to test
*
* On the default hierarchy, except for the mixable, (possible) thread root
* and threaded cgroups, subtree_control must be zero for migration
* destination cgroups with tasks so that child cgroups don't compete
* against tasks.
*/
int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp)
{
/* v1 doesn't have any restriction */
if (!cgroup_on_dfl(dst_cgrp))
return 0;
/* verify @dst_cgrp can host resources */
if (!cgroup_is_valid_domain(dst_cgrp->dom_cgrp))
return -EOPNOTSUPP;
/*
* If @dst_cgrp is already or can become a thread root or is
* threaded, it doesn't matter.
*/
if (cgroup_can_be_thread_root(dst_cgrp) || cgroup_is_threaded(dst_cgrp))
return 0;
/* apply no-internal-process constraint */
if (dst_cgrp->subtree_control)
return -EBUSY;
return 0;
}
/**
* cgroup_migrate_finish - cleanup after attach
* @mgctx: migration context
*
* Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
* those functions for details.
*/
void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
{
struct css_set *cset, *tmp_cset;
lockdep_assert_held(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets,
mg_src_preload_node) {
cset->mg_src_cgrp = NULL;
cset->mg_dst_cgrp = NULL;
cset->mg_dst_cset = NULL;
list_del_init(&cset->mg_src_preload_node);
put_css_set_locked(cset);
}
list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets,
mg_dst_preload_node) {
cset->mg_src_cgrp = NULL;
cset->mg_dst_cgrp = NULL;
cset->mg_dst_cset = NULL;
list_del_init(&cset->mg_dst_preload_node);
put_css_set_locked(cset);
}
spin_unlock_irq(&css_set_lock);
}
/**
* cgroup_migrate_add_src - add a migration source css_set
* @src_cset: the source css_set to add
* @dst_cgrp: the destination cgroup
* @mgctx: migration context
*
* Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
* @src_cset and add it to @mgctx->src_csets, which should later be cleaned
* up by cgroup_migrate_finish().
*
* This function may be called without holding cgroup_threadgroup_rwsem
* even if the target is a process. Threads may be created and destroyed
* but as long as cgroup_mutex is not dropped, no new css_set can be put
* into play and the preloaded css_sets are guaranteed to cover all
* migrations.
*/
void cgroup_migrate_add_src(struct css_set *src_cset,
struct cgroup *dst_cgrp,
struct cgroup_mgctx *mgctx)
{
struct cgroup *src_cgrp;
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock);
/*
* If ->dead, @src_set is associated with one or more dead cgroups
* and doesn't contain any migratable tasks. Ignore it early so
* that the rest of migration path doesn't get confused by it.
*/
if (src_cset->dead)
return;
if (!list_empty(&src_cset->mg_src_preload_node))
return;
src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
WARN_ON(src_cset->mg_src_cgrp);
WARN_ON(src_cset->mg_dst_cgrp);
WARN_ON(!list_empty(&src_cset->mg_tasks));
WARN_ON(!list_empty(&src_cset->mg_node));
src_cset->mg_src_cgrp = src_cgrp;
src_cset->mg_dst_cgrp = dst_cgrp;
get_css_set(src_cset);
list_add_tail(&src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets);
}
/**
* cgroup_migrate_prepare_dst - prepare destination css_sets for migration
* @mgctx: migration context
*
* Tasks are about to be moved and all the source css_sets have been
* preloaded to @mgctx->preloaded_src_csets. This function looks up and
* pins all destination css_sets, links each to its source, and append them
* to @mgctx->preloaded_dst_csets.
*
* This function must be called after cgroup_migrate_add_src() has been
* called on each migration source css_set. After migration is performed
* using cgroup_migrate(), cgroup_migrate_finish() must be called on
* @mgctx.
*/
int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
{
struct css_set *src_cset, *tmp_cset;
lockdep_assert_held(&cgroup_mutex);
/* look up the dst cset for each src cset and link it to src */
list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
mg_src_preload_node) {
struct css_set *dst_cset;
struct cgroup_subsys *ss;
int ssid;
dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
if (!dst_cset)
return -ENOMEM;
WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
/*
* If src cset equals dst, it's noop. Drop the src.
* cgroup_migrate() will skip the cset too. Note that we
* can't handle src == dst as some nodes are used by both.
*/
if (src_cset == dst_cset) {
src_cset->mg_src_cgrp = NULL;
src_cset->mg_dst_cgrp = NULL;
list_del_init(&src_cset->mg_src_preload_node);
put_css_set(src_cset);
put_css_set(dst_cset);
continue;
}
src_cset->mg_dst_cset = dst_cset;
if (list_empty(&dst_cset->mg_dst_preload_node))
list_add_tail(&dst_cset->mg_dst_preload_node,
&mgctx->preloaded_dst_csets);
else
put_css_set(dst_cset);
for_each_subsys(ss, ssid)
if (src_cset->subsys[ssid] != dst_cset->subsys[ssid])
mgctx->ss_mask |= 1 << ssid;
}
return 0;
}
/**
* cgroup_migrate - migrate a process or task to a cgroup
* @leader: the leader of the process or the task to migrate
* @threadgroup: whether @leader points to the whole process or a single task
* @mgctx: migration context
*
* Migrate a process or task denoted by @leader. If migrating a process,
* the caller must be holding cgroup_threadgroup_rwsem. The caller is also
* responsible for invoking cgroup_migrate_add_src() and
* cgroup_migrate_prepare_dst() on the targets before invoking this
* function and following up with cgroup_migrate_finish().
*
* As long as a controller's ->can_attach() doesn't fail, this function is
* guaranteed to succeed. This means that, excluding ->can_attach()
* failure, when migrating multiple targets, the success or failure can be
* decided for all targets by invoking group_migrate_prepare_dst() before
* actually starting migrating.
*/
int cgroup_migrate(struct task_struct *leader, bool threadgroup,
struct cgroup_mgctx *mgctx)
{
struct task_struct *task;
/*
* The following thread iteration should be inside an RCU critical
* section to prevent tasks from being freed while taking the snapshot.
* spin_lock_irq() implies RCU critical section here.
*/
spin_lock_irq(&css_set_lock);
task = leader;
do {
cgroup_migrate_add_task(task, mgctx);
if (!threadgroup)
break;
} while_each_thread(leader, task);
spin_unlock_irq(&css_set_lock);
return cgroup_migrate_execute(mgctx);
}
/**
* cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
* @dst_cgrp: the cgroup to attach to
* @leader: the task or the leader of the threadgroup to be attached
* @threadgroup: attach the whole threadgroup?
*
* Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
*/
int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
bool threadgroup)
{
DEFINE_CGROUP_MGCTX(mgctx);
struct task_struct *task;
int ret = 0;
/* look up all src csets */
spin_lock_irq(&css_set_lock);
task = leader;
do {
cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx);
if (!threadgroup)
break;
} while_each_thread(leader, task);
spin_unlock_irq(&css_set_lock);
/* prepare dst csets and commit */
ret = cgroup_migrate_prepare_dst(&mgctx);
if (!ret)
ret = cgroup_migrate(leader, threadgroup, &mgctx);
cgroup_migrate_finish(&mgctx);
if (!ret)
TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup);
return ret;
}
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
enum cgroup_attach_lock_mode *lock_mode)
{
struct task_struct *tsk;
pid_t pid;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return ERR_PTR(-EINVAL);
retry_find_task:
rcu_read_lock();
if (pid) {
tsk = find_task_by_vpid(pid);
if (!tsk) {
tsk = ERR_PTR(-ESRCH);
goto out_unlock_rcu;
}
} else {
tsk = current;
}
if (threadgroup)
tsk = tsk->group_leader;
/*
* kthreads may acquire PF_NO_SETAFFINITY during initialization.
* If userland migrates such a kthread to a non-root cgroup, it can
* become trapped in a cpuset, or RT kthread may be born in a
* cgroup with no rt_runtime allocated. Just say no.
*/
if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
tsk = ERR_PTR(-EINVAL);
goto out_unlock_rcu;
}
get_task_struct(tsk);
rcu_read_unlock();
/*
* If we migrate a single thread, we don't care about threadgroup
* stability. If the thread is `current`, it won't exit(2) under our
* hands or change PID through exec(2). We exclude
* cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write callers
* by cgroup_mutex. Therefore, we can skip the global lock.
*/
lockdep_assert_held(&cgroup_mutex);
if (pid || threadgroup) {
if (cgroup_enable_per_threadgroup_rwsem)
*lock_mode = CGRP_ATTACH_LOCK_PER_THREADGROUP;
else
*lock_mode = CGRP_ATTACH_LOCK_GLOBAL;
} else {
*lock_mode = CGRP_ATTACH_LOCK_NONE;
}
cgroup_attach_lock(*lock_mode, tsk);
if (threadgroup) {
if (!thread_group_leader(tsk)) {
/*
* A race with de_thread from another thread's exec()
* may strip us of our leadership. If this happens,
* throw this task away and try again.
*/
cgroup_attach_unlock(*lock_mode, tsk);
put_task_struct(tsk);
goto retry_find_task;
}
}
return tsk;
out_unlock_rcu:
rcu_read_unlock();
return tsk;
}
void cgroup_procs_write_finish(struct task_struct *task,
enum cgroup_attach_lock_mode lock_mode)
{
cgroup_attach_unlock(lock_mode, task);
/* release reference from cgroup_procs_write_start() */
put_task_struct(task);
}
static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
{
struct cgroup_subsys *ss;
bool printed = false;
int ssid;
do_each_subsys_mask(ss, ssid, ss_mask) {
if (printed)
seq_putc(seq, ' ');
seq_puts(seq, ss->name);
printed = true;
} while_each_subsys_mask();
if (printed)
seq_putc(seq, '\n');
}
/* show controllers which are enabled from the parent */
static int cgroup_controllers_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
cgroup_print_ss_mask(seq, cgroup_control(cgrp));
return 0;
}
/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
cgroup_print_ss_mask(seq, cgrp->subtree_control);
return 0;
}
/**
* cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
* @cgrp: root of the subtree to update csses for
*
* @cgrp's control masks have changed and its subtree's css associations
* need to be updated accordingly. This function looks up all css_sets
* which are attached to the subtree, creates the matching updated css_sets
* and migrates the tasks to the new ones.
*/
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
DEFINE_CGROUP_MGCTX(mgctx);
struct cgroup_subsys_state *d_css;
struct cgroup *dsct;
struct css_set *src_cset;
enum cgroup_attach_lock_mode lock_mode;
bool has_tasks;
int ret;
lockdep_assert_held(&cgroup_mutex);
/* look up all csses currently attached to @cgrp's subtree */
spin_lock_irq(&css_set_lock);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
struct cgrp_cset_link *link;
/*
* As cgroup_update_dfl_csses() is only called by
* cgroup_apply_control(). The csses associated with the
* given cgrp will not be affected by changes made to
* its subtree_control file. We can skip them.
*/
if (dsct == cgrp)
continue;
list_for_each_entry(link, &dsct->cset_links, cset_link)
cgroup_migrate_add_src(link->cset, dsct, &mgctx);
}
spin_unlock_irq(&css_set_lock);
/*
* We need to write-lock threadgroup_rwsem while migrating tasks.
* However, if there are no source csets for @cgrp, changing its
* controllers isn't gonna produce any task migrations and the
* write-locking can be skipped safely.
*/
has_tasks = !list_empty(&mgctx.preloaded_src_csets);
if (has_tasks)
lock_mode = CGRP_ATTACH_LOCK_GLOBAL;
else
lock_mode = CGRP_ATTACH_LOCK_NONE;
cgroup_attach_lock(lock_mode, NULL);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&mgctx);
if (ret)
goto out_finish;
spin_lock_irq(&css_set_lock);
list_for_each_entry(src_cset, &mgctx.preloaded_src_csets,
mg_src_preload_node) {
struct task_struct *task, *ntask;
/* all tasks in src_csets need to be migrated */
list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
cgroup_migrate_add_task(task, &mgctx);
}
spin_unlock_irq(&css_set_lock);
ret = cgroup_migrate_execute(&mgctx);
out_finish:
cgroup_migrate_finish(&mgctx);
cgroup_attach_unlock(lock_mode, NULL);
return ret;
}
/**
* cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
* @cgrp: root of the target subtree
*
* Because css offlining is asynchronous, userland may try to re-enable a
* controller while the previous css is still around. This function grabs
* cgroup_mutex and drains the previous css instances of @cgrp's subtree.
*/
void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
__acquires(&cgroup_mutex)
{
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
struct cgroup_subsys *ss;
int ssid;
restart:
cgroup_lock();
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
for_each_subsys(ss, ssid) {
struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
DEFINE_WAIT(wait);
if (!css || !percpu_ref_is_dying(&css->refcnt))
continue;
cgroup_get_live(dsct);
prepare_to_wait(&dsct->offline_waitq, &wait,
TASK_UNINTERRUPTIBLE);
cgroup_unlock();
schedule();
finish_wait(&dsct->offline_waitq, &wait);
cgroup_put(dsct);
goto restart;
}
}
}
/**
* cgroup_save_control - save control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
* Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
* respective old_ prefixed fields for @cgrp's subtree including @cgrp
* itself.
*/
static void cgroup_save_control(struct cgroup *cgrp)
{
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
dsct->old_subtree_control = dsct->subtree_control;
dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
dsct->old_dom_cgrp = dsct->dom_cgrp;
}
}
/**
* cgroup_propagate_control - refresh control masks of a subtree
* @cgrp: root of the target subtree
*
* For @cgrp and its subtree, ensure ->subtree_ss_mask matches
* ->subtree_control and propagate controller availability through the
* subtree so that descendants don't have unavailable controllers enabled.
*/
static void cgroup_propagate_control(struct cgroup *cgrp)
{
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
dsct->subtree_control &= cgroup_control(dsct);
dsct->subtree_ss_mask =
cgroup_calc_subtree_ss_mask(dsct->subtree_control,
cgroup_ss_mask(dsct));
}
}
/**
* cgroup_restore_control - restore control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
* Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
* respective old_ prefixed fields for @cgrp's subtree including @cgrp
* itself.
*/
static void cgroup_restore_control(struct cgroup *cgrp)
{
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
dsct->subtree_control = dsct->old_subtree_control;
dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
dsct->dom_cgrp = dsct->old_dom_cgrp;
}
}
static bool css_visible(struct cgroup_subsys_state *css)
{
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
if (cgroup_control(cgrp) & (1 << ss->id))
return true;
if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
return false;
return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
}
/**
* cgroup_apply_control_enable - enable or show csses according to control
* @cgrp: root of the target subtree
*
* Walk @cgrp's subtree and create new csses or make the existing ones
* visible. A css is created invisible if it's being implicitly enabled
* through dependency. An invisible css is made visible when the userland
* explicitly enables it.
*
* Returns 0 on success, -errno on failure. On failure, csses which have
* been processed already aren't cleaned up. The caller is responsible for
* cleaning up with cgroup_apply_control_disable().
*/
static int cgroup_apply_control_enable(struct cgroup *cgrp)
{
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
struct cgroup_subsys *ss;
int ssid, ret;
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
for_each_subsys(ss, ssid) {
struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
continue;
if (!css) {
css = css_create(dsct, ss);
if (IS_ERR(css))
return PTR_ERR(css);
}
WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
if (css_visible(css)) {
ret = css_populate_dir(css);
if (ret)
return ret;
}
}
}
return 0;
}
/**
* cgroup_apply_control_disable - kill or hide csses according to control
* @cgrp: root of the target subtree
*
* Walk @cgrp's subtree and kill and hide csses so that they match
* cgroup_ss_mask() and cgroup_visible_mask().
*
* A css is hidden when the userland requests it to be disabled while other
* subsystems are still depending on it. The css must not actively control
* resources and be in the vanilla state if it's made visible again later.
* Controllers which may be depended upon should provide ->css_reset() for
* this purpose.
*/
static void cgroup_apply_control_disable(struct cgroup *cgrp)
{
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
struct cgroup_subsys *ss;
int ssid;
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
for_each_subsys(ss, ssid) {
struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
if (!css)
continue;
WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
if (css->parent &&
!(cgroup_ss_mask(dsct) & (1 << ss->id))) {
kill_css(css);
} else if (!css_visible(css)) {
css_clear_dir(css);
if (ss->css_reset)
ss->css_reset(css);
}
}
}
}
/**
* cgroup_apply_control - apply control mask updates to the subtree
* @cgrp: root of the target subtree
*
* subsystems can be enabled and disabled in a subtree using the following
* steps.
*
* 1. Call cgroup_save_control() to stash the current state.
* 2. Update ->subtree_control masks in the subtree as desired.
* 3. Call cgroup_apply_control() to apply the changes.
* 4. Optionally perform other related operations.
* 5. Call cgroup_finalize_control() to finish up.
*
* This function implements step 3 and propagates the mask changes
* throughout @cgrp's subtree, updates csses accordingly and perform
* process migrations.
*/
static int cgroup_apply_control(struct cgroup *cgrp)
{
int ret;
cgroup_propagate_control(cgrp);
ret = cgroup_apply_control_enable(cgrp);
if (ret)
return ret;
/*
* At this point, cgroup_e_css_by_mask() results reflect the new csses
* making the following cgroup_update_dfl_csses() properly update
* css associations of all tasks in the subtree.
*/
return cgroup_update_dfl_csses(cgrp);
}
/**
* cgroup_finalize_control - finalize control mask update
* @cgrp: root of the target subtree
* @ret: the result of the update
*
* Finalize control mask update. See cgroup_apply_control() for more info.
*/
static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
{
if (ret) {
cgroup_restore_control(cgrp);
cgroup_propagate_control(cgrp);
}
cgroup_apply_control_disable(cgrp);
}
static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u16 enable)
{
u16 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask;
/* if nothing is getting enabled, nothing to worry about */
if (!enable)
return 0;
/* can @cgrp host any resources? */
if (!cgroup_is_valid_domain(cgrp->dom_cgrp))
return -EOPNOTSUPP;
/* mixables don't care */
if (cgroup_is_mixable(cgrp))
return 0;
if (domain_enable) {
/* can't enable domain controllers inside a thread subtree */
if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp))
return -EOPNOTSUPP;
} else {
/*
* Threaded controllers can handle internal competitions
* and are always allowed inside a (prospective) thread
* subtree.
*/
if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp))
return 0;
}
/*
* Controllers can't be enabled for a cgroup with tasks to avoid
* child cgroups competing against tasks.
*/
if (cgroup_has_tasks(cgrp))
return -EBUSY;
return 0;
}
/* change the enabled child controllers for a cgroup in the default hierarchy */
static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
u16 enable = 0, disable = 0;
struct cgroup *cgrp, *child;
struct cgroup_subsys *ss;
char *tok;
int ssid, ret;
/*
* Parse input - space separated list of subsystem names prefixed
* with either + or -.
*/
buf = strstrip(buf);
while ((tok = strsep(&buf, " "))) {
if (tok[0] == '\0')
continue;
do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
if (!cgroup_ssid_enabled(ssid) ||
strcmp(tok + 1, ss->name))
continue;
if (*tok == '+') {
enable |= 1 << ssid;
disable &= ~(1 << ssid);
} else if (*tok == '-') {
disable |= 1 << ssid;
enable &= ~(1 << ssid);
} else {
return -EINVAL;
}
break;
} while_each_subsys_mask();
if (ssid == CGROUP_SUBSYS_COUNT)
return -EINVAL;
}
cgrp = cgroup_kn_lock_live(of->kn, true);
if (!cgrp)
return -ENODEV;
for_each_subsys(ss, ssid) {
if (enable & (1 << ssid)) {
if (cgrp->subtree_control & (1 << ssid)) {
enable &= ~(1 << ssid);
continue;
}
if (!(cgroup_control(cgrp) & (1 << ssid))) {
ret = -ENOENT;
goto out_unlock;
}
} else if (disable & (1 << ssid)) {
if (!(cgrp->subtree_control & (1 << ssid))) {
disable &= ~(1 << ssid);
continue;
}
/* a child has it enabled? */
cgroup_for_each_live_child(child, cgrp) {
if (child->subtree_control & (1 << ssid)) {
ret = -EBUSY;
goto out_unlock;
}
}
}
}
if (!enable && !disable) {
ret = 0;
goto out_unlock;
}
ret = cgroup_vet_subtree_control_enable(cgrp, enable);
if (ret)
goto out_unlock;
/* save and update control masks and prepare csses */
cgroup_save_control(cgrp);
cgrp->subtree_control |= enable;
cgrp->subtree_control &= ~disable;
ret = cgroup_apply_control(cgrp);
cgroup_finalize_control(cgrp, ret);
if (ret)
goto out_unlock;
kernfs_activate(cgrp->kn);
out_unlock:
cgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}
/**
* cgroup_enable_threaded - make @cgrp threaded
* @cgrp: the target cgroup
*
* Called when "threaded" is written to the cgroup.type interface file and
* tries to make @cgrp threaded and join the parent's resource domain.
* This function is never called on the root cgroup as cgroup.type doesn't
* exist on it.
*/
static int cgroup_enable_threaded(struct cgroup *cgrp)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup *dom_cgrp = parent->dom_cgrp;
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
int ret;
lockdep_assert_held(&cgroup_mutex);
/* noop if already threaded */
if (cgroup_is_threaded(cgrp))
return 0;
/*
* If @cgroup is populated or has domain controllers enabled, it
* can't be switched. While the below cgroup_can_be_thread_root()
* test can catch the same conditions, that's only when @parent is
* not mixable, so let's check it explicitly.
*/
if (cgroup_is_populated(cgrp) ||
cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
return -EOPNOTSUPP;
/* we're joining the parent's domain, ensure its validity */
if (!cgroup_is_valid_domain(dom_cgrp) ||
!cgroup_can_be_thread_root(dom_cgrp))
return -EOPNOTSUPP;
/*
* The following shouldn't cause actual migrations and should
* always succeed.
*/
cgroup_save_control(cgrp);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
if (dsct == cgrp || cgroup_is_threaded(dsct))
dsct->dom_cgrp = dom_cgrp;
ret = cgroup_apply_control(cgrp);
if (!ret)
parent->nr_threaded_children++;
cgroup_finalize_control(cgrp, ret);
return ret;
}
static int cgroup_type_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
if (cgroup_is_threaded(cgrp))
seq_puts(seq, "threaded\n");
else if (!cgroup_is_valid_domain(cgrp))
seq_puts(seq, "domain invalid\n");
else if (cgroup_is_thread_root(cgrp))
seq_puts(seq, "domain threaded\n");
else
seq_puts(seq, "domain\n");
return 0;
}
static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cgroup *cgrp;
int ret;
/* only switching to threaded mode is supported */
if (strcmp(strstrip(buf), "threaded"))
return -EINVAL;
/* drain dying csses before we re-apply (threaded) subtree control */
cgrp = cgroup_kn_lock_live(of->kn, true);
if (!cgrp)
return -ENOENT;
/* threaded can only be enabled */
ret = cgroup_enable_threaded(cgrp);
cgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}
static int cgroup_max_descendants_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
int descendants = READ_ONCE(cgrp->max_descendants);
if (descendants == INT_MAX)
seq_puts(seq, "max\n");
else
seq_printf(seq, "%d\n", descendants);
return 0;
}
static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cgroup *cgrp;
int descendants;
ssize_t ret;
buf = strstrip(buf);
if (!strcmp(buf, "max")) {
descendants = INT_MAX;
} else {
ret = kstrtoint(buf, 0, &descendants);
if (ret)
return ret;
}
if (descendants < 0)
return -ERANGE;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENOENT;
cgrp->max_descendants = descendants;
cgroup_kn_unlock(of->kn);
return nbytes;
}
static int cgroup_max_depth_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
int depth = READ_ONCE(cgrp->max_depth);
if (depth == INT_MAX)
seq_puts(seq, "max\n");
else
seq_printf(seq, "%d\n", depth);
return 0;
}
static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cgroup *cgrp;
ssize_t ret;
int depth;
buf = strstrip(buf);
if (!strcmp(buf, "max")) {
depth = INT_MAX;
} else {
ret = kstrtoint(buf, 0, &depth);
if (ret)
return ret;
}
if (depth < 0)
return -ERANGE;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENOENT;
cgrp->max_depth = depth;
cgroup_kn_unlock(of->kn);
return nbytes;
}
static int cgroup_events_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp));
seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags));
return 0;
}
static int cgroup_stat_show(struct seq_file *seq, void *v)
{
struct cgroup *cgroup = seq_css(seq)->cgroup;
struct cgroup_subsys_state *css;
int dying_cnt[CGROUP_SUBSYS_COUNT];
int ssid;
seq_printf(seq, "nr_descendants %d\n",
cgroup->nr_descendants);
/*
* Show the number of live and dying csses associated with each of
* non-inhibited cgroup subsystems that is bound to cgroup v2.
*
* Without proper lock protection, racing is possible. So the
* numbers may not be consistent when that happens.
*/
rcu_read_lock();
for (ssid = 0; ssid < CGROUP_SUBSYS_COUNT; ssid++) {
dying_cnt[ssid] = -1;
if ((BIT(ssid) & cgrp_dfl_inhibit_ss_mask) ||
(cgroup_subsys[ssid]->root != &cgrp_dfl_root))
continue;
css = rcu_dereference_raw(cgroup->subsys[ssid]);
dying_cnt[ssid] = cgroup->nr_dying_subsys[ssid];
seq_printf(seq, "nr_subsys_%s %d\n", cgroup_subsys[ssid]->name,
css ? (css->nr_descendants + 1) : 0);
}
seq_printf(seq, "nr_dying_descendants %d\n",
cgroup->nr_dying_descendants);
for (ssid = 0; ssid < CGROUP_SUBSYS_COUNT; ssid++) {
if (dying_cnt[ssid] >= 0)
seq_printf(seq, "nr_dying_subsys_%s %d\n",
cgroup_subsys[ssid]->name, dying_cnt[ssid]);
}
rcu_read_unlock();
return 0;
}
static int cgroup_core_local_stat_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
unsigned int sequence;
u64 freeze_time;
do {
sequence = read_seqcount_begin(&cgrp->freezer.freeze_seq);
freeze_time = cgrp->freezer.frozen_nsec;
/* Add in current freezer interval if the cgroup is freezing. */
if (test_bit(CGRP_FREEZE, &cgrp->flags))
freeze_time += (ktime_get_ns() -
cgrp->freezer.freeze_start_nsec);
} while (read_seqcount_retry(&cgrp->freezer.freeze_seq, sequence));
do_div(freeze_time, NSEC_PER_USEC);
seq_printf(seq, "frozen_usec %llu\n", freeze_time);
return 0;
}
#ifdef CONFIG_CGROUP_SCHED
/**
* cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
*
* Find and get @cgrp's css associated with @ss. If the css doesn't exist
* or is offline, %NULL is returned.
*/
static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
rcu_read_lock();
css = cgroup_css(cgrp, ss);
if (css && !css_tryget_online(css))
css = NULL;
rcu_read_unlock();
return css;
}
static int cgroup_extra_stat_show(struct seq_file *seq, int ssid)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct cgroup_subsys *ss = cgroup_subsys[ssid];
struct cgroup_subsys_state *css;
int ret;
if (!ss->css_extra_stat_show)
return 0;
css = cgroup_tryget_css(cgrp, ss);
if (!css)
return 0;
ret = ss->css_extra_stat_show(seq, css);
css_put(css);
return ret;
}
static int cgroup_local_stat_show(struct seq_file *seq,
struct cgroup *cgrp, int ssid)
{
struct cgroup_subsys *ss = cgroup_subsys[ssid];
struct cgroup_subsys_state *css;
int ret;
if (!ss->css_local_stat_show)
return 0;
css = cgroup_tryget_css(cgrp, ss);
if (!css)
return 0;
ret = ss->css_local_stat_show(seq, css);
css_put(css);
return ret;
}
#endif
static int cpu_stat_show(struct seq_file *seq, void *v)
{
int ret = 0;
cgroup_base_stat_cputime_show(seq);
#ifdef CONFIG_CGROUP_SCHED
ret = cgroup_extra_stat_show(seq, cpu_cgrp_id);
#endif
return ret;
}
static int cpu_local_stat_show(struct seq_file *seq, void *v)
{
struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
int ret = 0;
#ifdef CONFIG_CGROUP_SCHED
ret = cgroup_local_stat_show(seq, cgrp, cpu_cgrp_id);
#endif
return ret;
}
#ifdef CONFIG_PSI
static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct psi_group *psi = cgroup_psi(cgrp);
return psi_show(seq, psi, PSI_IO);
}
static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct psi_group *psi = cgroup_psi(cgrp);
return psi_show(seq, psi, PSI_MEM);
}
static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct psi_group *psi = cgroup_psi(cgrp);
return psi_show(seq, psi, PSI_CPU);
}
static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, enum psi_res res)
{
struct cgroup_file_ctx *ctx = of->priv;
struct psi_trigger *new;
struct cgroup *cgrp;
struct psi_group *psi;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
cgroup_get(cgrp);
cgroup_kn_unlock(of->kn);
/* Allow only one trigger per file descriptor */
if (ctx->psi.trigger) {
cgroup_put(cgrp);
return -EBUSY;
}
psi = cgroup_psi(cgrp);
new = psi_trigger_create(psi, buf, res, of->file, of);
if (IS_ERR(new)) {
cgroup_put(cgrp);
return PTR_ERR(new);
}
smp_store_release(&ctx->psi.trigger, new);
cgroup_put(cgrp);
return nbytes;
}
static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
return pressure_write(of, buf, nbytes, PSI_IO);
}
static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
return pressure_write(of, buf, nbytes, PSI_MEM);
}
static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
return pressure_write(of, buf, nbytes, PSI_CPU);
}
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
static int cgroup_irq_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct psi_group *psi = cgroup_psi(cgrp);
return psi_show(seq, psi, PSI_IRQ);
}
static ssize_t cgroup_irq_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
return pressure_write(of, buf, nbytes, PSI_IRQ);
}
#endif
static int cgroup_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct psi_group *psi = cgroup_psi(cgrp);
seq_printf(seq, "%d\n", psi->enabled);
return 0;
}
static ssize_t cgroup_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
ssize_t ret;
int enable;
struct cgroup *cgrp;
struct psi_group *psi;
ret = kstrtoint(strstrip(buf), 0, &enable);
if (ret)
return ret;
if (enable < 0 || enable > 1)
return -ERANGE;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENOENT;
psi = cgroup_psi(cgrp);
if (psi->enabled != enable) {
int i;
/* show or hide {cpu,memory,io,irq}.pressure files */
for (i = 0; i < NR_PSI_RESOURCES; i++)
cgroup_file_show(&cgrp->psi_files[i], enable);
psi->enabled = enable;
if (enable)
psi_cgroup_restart(psi);
}
cgroup_kn_unlock(of->kn);
return nbytes;
}
static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
poll_table *pt)
{
struct cgroup_file_ctx *ctx = of->priv;
return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
}
static void cgroup_pressure_release(struct kernfs_open_file *of)
{
struct cgroup_file_ctx *ctx = of->priv;
psi_trigger_destroy(ctx->psi.trigger);
}
bool cgroup_psi_enabled(void)
{
if (static_branch_likely(&psi_disabled))
return false;
return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0;
}
#else /* CONFIG_PSI */
bool cgroup_psi_enabled(void)
{
return false;
}
#endif /* CONFIG_PSI */
static int cgroup_freeze_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
seq_printf(seq, "%d\n", cgrp->freezer.freeze);
return 0;
}
static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cgroup *cgrp;
ssize_t ret;
int freeze;
ret = kstrtoint(strstrip(buf), 0, &freeze);
if (ret)
return ret;
if (freeze < 0 || freeze > 1)
return -ERANGE;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENOENT;
cgroup_freeze(cgrp, freeze);
cgroup_kn_unlock(of->kn);
return nbytes;
}
static void __cgroup_kill(struct cgroup *cgrp)
{
struct css_task_iter it;
struct task_struct *task;
lockdep_assert_held(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
cgrp->kill_seq++;
spin_unlock_irq(&css_set_lock);
css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it);
while ((task = css_task_iter_next(&it))) {
/* Ignore kernel threads here. */
if (task->flags & PF_KTHREAD)
continue;
/* Skip tasks that are already dying. */
if (__fatal_signal_pending(task))
continue;
send_sig(SIGKILL, task, 0);
}
css_task_iter_end(&it);
}
static void cgroup_kill(struct cgroup *cgrp)
{
struct cgroup_subsys_state *css;
struct cgroup *dsct;
lockdep_assert_held(&cgroup_mutex);
cgroup_for_each_live_descendant_pre(dsct, css, cgrp)
__cgroup_kill(dsct);
}
static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
ssize_t ret = 0;
int kill;
struct cgroup *cgrp;
ret = kstrtoint(strstrip(buf), 0, &kill);
if (ret)
return ret;
if (kill != 1)
return -ERANGE;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENOENT;
/*
* Killing is a process directed operation, i.e. the whole thread-group
* is taken down so act like we do for cgroup.procs and only make this
* writable in non-threaded cgroups.
*/
if (cgroup_is_threaded(cgrp))
ret = -EOPNOTSUPP;
else
cgroup_kill(cgrp);
cgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}
static int cgroup_file_open(struct kernfs_open_file *of)
{
struct cftype *cft = of_cft(of);
struct cgroup_file_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->ns = current->nsproxy->cgroup_ns;
get_cgroup_ns(ctx->ns);
of->priv = ctx;
if (!cft->open)
return 0;
ret = cft->open(of);
if (ret) {
put_cgroup_ns(ctx->ns);
kfree(ctx);
}
return ret;
}
static void cgroup_file_release(struct kernfs_open_file *of)
{
struct cftype *cft = of_cft(of);
struct cgroup_file_ctx *ctx = of->priv;
if (cft->release)
cft->release(of);
put_cgroup_ns(ctx->ns);
kfree(ctx);
of->priv = NULL;
}
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cgroup_file_ctx *ctx = of->priv;
struct cgroup *cgrp = kn_priv(of->kn);
struct cftype *cft = of_cft(of);
struct cgroup_subsys_state *css;
int ret;
if (!nbytes)
return 0;
/*
* If namespaces are delegation boundaries, disallow writes to
* files in an non-init namespace root from inside the namespace
* except for the files explicitly marked delegatable -
* eg. cgroup.procs, cgroup.threads and cgroup.subtree_control.
*/
if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
!(cft->flags & CFTYPE_NS_DELEGATABLE) &&
ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp)
return -EPERM;
if (cft->write)
return cft->write(of, buf, nbytes, off);
/*
* kernfs guarantees that a file isn't deleted with operations in
* flight, which means that the matching css is and stays alive and
* doesn't need to be pinned. The RCU locking is not necessary
* either. It's just for the convenience of using cgroup_css().
*/
rcu_read_lock();
css = cgroup_css(cgrp, cft->ss);
rcu_read_unlock();
if (cft->write_u64) {
unsigned long long v;
ret = kstrtoull(buf, 0, &v);
if (!ret)
ret = cft->write_u64(css, cft, v);
} else if (cft->write_s64) {
long long v;
ret = kstrtoll(buf, 0, &v);
if (!ret)
ret = cft->write_s64(css, cft, v);
} else {
ret = -EINVAL;
}
return ret ?: nbytes;
}
static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
{
struct cftype *cft = of_cft(of);
if (cft->poll)
return cft->poll(of, pt);
return kernfs_generic_poll(of, pt);
}
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
{
return seq_cft(seq)->seq_start(seq, ppos);
}
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
{
return seq_cft(seq)->seq_next(seq, v, ppos);
}
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
{
if (seq_cft(seq)->seq_stop)
seq_cft(seq)->seq_stop(seq, v);
}
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
{
struct cftype *cft = seq_cft(m);
struct cgroup_subsys_state *css = seq_css(m);
if (cft->seq_show)
return cft->seq_show(m, arg);
if (cft->read_u64)
seq_printf(m, "%llu\n", cft->read_u64(css, cft));
else if (cft->read_s64)
seq_printf(m, "%lld\n", cft->read_s64(css, cft));
else
return -EINVAL;
return 0;
}
static struct kernfs_ops cgroup_kf_single_ops = {
.atomic_write_len = PAGE_SIZE,
.open = cgroup_file_open,
.release = cgroup_file_release,
.write = cgroup_file_write,
.poll = cgroup_file_poll,
.seq_show = cgroup_seqfile_show,
};
static struct kernfs_ops cgroup_kf_ops = {
.atomic_write_len = PAGE_SIZE,
.open = cgroup_file_open,
.release = cgroup_file_release,
.write = cgroup_file_write,
.poll = cgroup_file_poll,
.seq_start = cgroup_seqfile_start,
.seq_next = cgroup_seqfile_next,
.seq_stop = cgroup_seqfile_stop,
.seq_show = cgroup_seqfile_show,
};
static void cgroup_file_notify_timer(struct timer_list *timer)
{
cgroup_file_notify(container_of(timer, struct cgroup_file,
notify_timer));
}
static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
struct cftype *cft)
{
char name[CGROUP_FILE_NAME_MAX];
struct kernfs_node *kn;
struct lock_class_key *key = NULL;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
key = &cft->lockdep_key;
#endif
kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
cgroup_file_mode(cft),
current_fsuid(), current_fsgid(),
0, cft->kf_ops, cft,
NULL, key);
if (IS_ERR(kn))
return PTR_ERR(kn);
if (cft->file_offset) {
struct cgroup_file *cfile = (void *)css + cft->file_offset;
timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0);
spin_lock_irq(&cgroup_file_kn_lock);
cfile->kn = kn;
spin_unlock_irq(&cgroup_file_kn_lock);
}
return 0;
}
/**
* cgroup_addrm_files - add or remove files to a cgroup directory
* @css: the target css
* @cgrp: the target cgroup (usually css->cgroup)
* @cfts: array of cftypes to be added
* @is_add: whether to add or remove
*
* Depending on @is_add, add or remove files defined by @cfts on @cgrp.
* For removals, this function never fails.
*/
static int cgroup_addrm_files(struct cgroup_subsys_state *css,
struct cgroup *cgrp, struct cftype cfts[],
bool is_add)
{
struct cftype *cft, *cft_end = NULL;
int ret = 0;
lockdep_assert_held(&cgroup_mutex);
restart:
for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
/* does cft->flags tell us to skip this file on @cgrp? */
if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
continue;
if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
continue;
if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
continue;
if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
continue;
if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug)
continue;
if (is_add) {
ret = cgroup_add_file(css, cgrp, cft);
if (ret) {
pr_warn("%s: failed to add %s, err=%d\n",
__func__, cft->name, ret);
cft_end = cft;
is_add = false;
goto restart;
}
} else {
cgroup_rm_file(cgrp, cft);
}
}
return ret;
}
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
{
struct cgroup_subsys *ss = cfts[0].ss;
struct cgroup *root = &ss->root->cgrp;
struct cgroup_subsys_state *css;
int ret = 0;
lockdep_assert_held(&cgroup_mutex);
/* add/rm files for all cgroups created before */
css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
struct cgroup *cgrp = css->cgroup;
if (!(css->flags & CSS_VISIBLE))
continue;
ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
if (ret)
break;
}
if (is_add && !ret)
kernfs_activate(root->kn);
return ret;
}
static void cgroup_exit_cftypes(struct cftype *cfts)
{
struct cftype *cft;
for (cft = cfts; cft->name[0] != '\0'; cft++) {
/* free copy for custom atomic_write_len, see init_cftypes() */
if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
kfree(cft->kf_ops);
cft->kf_ops = NULL;
cft->ss = NULL;
/* revert flags set by cgroup core while adding @cfts */
cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL |
__CFTYPE_ADDED);
}
}
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype *cft;
int ret = 0;
for (cft = cfts; cft->name[0] != '\0'; cft++) {
struct kernfs_ops *kf_ops;
WARN_ON(cft->ss || cft->kf_ops);
if (cft->flags & __CFTYPE_ADDED) {
ret = -EBUSY;
break;
}
if (cft->seq_start)
kf_ops = &cgroup_kf_ops;
else
kf_ops = &cgroup_kf_single_ops;
/*
* Ugh... if @cft wants a custom max_write_len, we need to
* make a copy of kf_ops to set its atomic_write_len.
*/
if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
if (!kf_ops) {
ret = -ENOMEM;
break;
}
kf_ops->atomic_write_len = cft->max_write_len;
}
cft->kf_ops = kf_ops;
cft->ss = ss;
cft->flags |= __CFTYPE_ADDED;
}
if (ret)
cgroup_exit_cftypes(cfts);
return ret;
}
static void cgroup_rm_cftypes_locked(struct cftype *cfts)
{
lockdep_assert_held(&cgroup_mutex);
list_del(&cfts->node);
cgroup_apply_cftypes(cfts, false);
cgroup_exit_cftypes(cfts);
}
/**
* cgroup_rm_cftypes - remove an array of cftypes from a subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Unregister @cfts. Files described by @cfts are removed from all
* existing cgroups and all future cgroups won't have them either. This
* function can be called anytime whether @cfts' subsys is attached or not.
*
* Returns 0 on successful unregistration, -ENOENT if @cfts is not
* registered.
*/
int cgroup_rm_cftypes(struct cftype *cfts)
{
if (!cfts || cfts[0].name[0] == '\0')
return 0;
if (!(cfts[0].flags & __CFTYPE_ADDED))
return -ENOENT;
cgroup_lock();
cgroup_rm_cftypes_locked(cfts);
cgroup_unlock();
return 0;
}
/**
* cgroup_add_cftypes - add an array of cftypes to a subsystem
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Register @cfts to @ss. Files described by @cfts are created for all
* existing cgroups to which @ss is attached and all future cgroups will
* have them too. This function can be called anytime whether @ss is
* attached or not.
*
* Returns 0 on successful registration, -errno on failure. Note that this
* function currently returns 0 as long as @cfts registration is successful
* even if some file creation attempts on existing cgroups fail.
*/
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
int ret;
if (!cgroup_ssid_enabled(ss->id))
return 0;
if (!cfts || cfts[0].name[0] == '\0')
return 0;
ret = cgroup_init_cftypes(ss, cfts);
if (ret)
return ret;
cgroup_lock();
list_add_tail(&cfts->node, &ss->cfts);
ret = cgroup_apply_cftypes(cfts, true);
if (ret)
cgroup_rm_cftypes_locked(cfts);
cgroup_unlock();
return ret;
}
/**
* cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Similar to cgroup_add_cftypes() but the added files are only used for
* the default hierarchy.
*/
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype *cft;
for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
cft->flags |= __CFTYPE_ONLY_ON_DFL;
return cgroup_add_cftypes(ss, cfts);
}
/**
* cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Similar to cgroup_add_cftypes() but the added files are only used for
* the legacy hierarchies.
*/
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype *cft;
for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
cft->flags |= __CFTYPE_NOT_ON_DFL;
return cgroup_add_cftypes(ss, cfts);
}
/**
* cgroup_file_notify - generate a file modified event for a cgroup_file
* @cfile: target cgroup_file
*
* @cfile must have been obtained by setting cftype->file_offset.
*/
void cgroup_file_notify(struct cgroup_file *cfile)
{
unsigned long flags;
spin_lock_irqsave(&cgroup_file_kn_lock, flags);
if (cfile->kn) {
unsigned long last = cfile->notified_at;
unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV;
if (time_in_range(jiffies, last, next)) {
timer_reduce(&cfile->notify_timer, next);
} else {
kernfs_notify(cfile->kn);
cfile->notified_at = jiffies;
}
}
spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
}
/**
* cgroup_file_show - show or hide a hidden cgroup file
* @cfile: target cgroup_file obtained by setting cftype->file_offset
* @show: whether to show or hide
*/
void cgroup_file_show(struct cgroup_file *cfile, bool show)
{
struct kernfs_node *kn;
spin_lock_irq(&cgroup_file_kn_lock);
kn = cfile->kn;
kernfs_get(kn);
spin_unlock_irq(&cgroup_file_kn_lock);
if (kn)
kernfs_show(kn, show);
kernfs_put(kn);
}
/**
* css_next_child - find the next child of a given css
* @pos: the current position (%NULL to initiate traversal)
* @parent: css whose children to walk
*
* This function returns the next child of @parent and should be called
* under either cgroup_mutex or RCU read lock. The only requirement is
* that @parent and @pos are accessible. The next sibling is guaranteed to
* be returned regardless of their states.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
*/
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *parent)
{
struct cgroup_subsys_state *next;
cgroup_assert_mutex_or_rcu_locked();
/*
* @pos could already have been unlinked from the sibling list.
* Once a cgroup is removed, its ->sibling.next is no longer
* updated when its next sibling changes. CSS_RELEASED is set when
* @pos is taken off list, at which time its next pointer is valid,
* and, as releases are serialized, the one pointed to by the next
* pointer is guaranteed to not have started release yet. This
* implies that if we observe !CSS_RELEASED on @pos in this RCU
* critical section, the one pointed to by its next pointer is
* guaranteed to not have finished its RCU grace period even if we
* have dropped rcu_read_lock() in-between iterations.
*
* If @pos has CSS_RELEASED set, its next pointer can't be
* dereferenced; however, as each css is given a monotonically
* increasing unique serial number and always appended to the
* sibling list, the next one can be found by walking the parent's
* children until the first css with higher serial number than
* @pos's. While this path can be slower, it happens iff iteration
* races against release and the race window is very small.
*/
if (!pos) {
next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
} else if (likely(!(pos->flags & CSS_RELEASED))) {
next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
} else {
list_for_each_entry_rcu(next, &parent->children, sibling,
lockdep_is_held(&cgroup_mutex))
if (next->serial_nr > pos->serial_nr)
break;
}
/*
* @next, if not pointing to the head, can be dereferenced and is
* the next sibling.
*/
if (&next->sibling != &parent->children)
return next;
return NULL;
}
/**
* css_next_descendant_pre - find the next descendant for pre-order walk
* @pos: the current position (%NULL to initiate traversal)
* @root: css whose descendants to walk
*
* To be used by css_for_each_descendant_pre(). Find the next descendant
* to visit for pre-order traversal of @root's descendants. @root is
* included in the iteration and the first node to be visited.
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
* section. Additionally, it isn't necessary to hold onto a reference to @pos.
* This function will return the correct next descendant as long as both @pos
* and @root are accessible and @pos is a descendant of @root.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
*/
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *root)
{
struct cgroup_subsys_state *next;
cgroup_assert_mutex_or_rcu_locked();
/* if first iteration, visit @root */
if (!pos)
return root;
/* visit the first child if exists */
next = css_next_child(NULL, pos);
if (next)
return next;
/* no child, visit my or the closest ancestor's next sibling */
while (pos != root) {
next = css_next_child(pos, pos->parent);
if (next)
return next;
pos = pos->parent;
}
return NULL;
}
EXPORT_SYMBOL_GPL(css_next_descendant_pre);
/**
* css_rightmost_descendant - return the rightmost descendant of a css
* @pos: css of interest
*
* Return the rightmost descendant of @pos. If there's no descendant, @pos
* is returned. This can be used during pre-order traversal to skip
* subtree of @pos.
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
* section. Additionally, it isn't necessary to hold onto a reference to @pos.
* This function will return the correct rightmost descendant as long as @pos
* is accessible.
*/
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
{
struct cgroup_subsys_state *last, *tmp;
cgroup_assert_mutex_or_rcu_locked();
do {
last = pos;
/* ->prev isn't RCU safe, walk ->next till the end */
pos = NULL;
css_for_each_child(tmp, last)
pos = tmp;
} while (pos);
return last;
}
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
{
struct cgroup_subsys_state *last;
do {
last = pos;
pos = css_next_child(NULL, pos);
} while (pos);
return last;
}
/**
* css_next_descendant_post - find the next descendant for post-order walk
* @pos: the current position (%NULL to initiate traversal)
* @root: css whose descendants to walk
*
* To be used by css_for_each_descendant_post(). Find the next descendant
* to visit for post-order traversal of @root's descendants. @root is
* included in the iteration and the last node to be visited.
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
* section. Additionally, it isn't necessary to hold onto a reference to @pos.
* This function will return the correct next descendant as long as both @pos
* and @cgroup are accessible and @pos is a descendant of @cgroup.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
*/
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *root)
{
struct cgroup_subsys_state *next;
cgroup_assert_mutex_or_rcu_locked();
/* if first iteration, visit leftmost descendant which may be @root */
if (!pos)
return css_leftmost_descendant(root);
/* if we visited @root, we're done */
if (pos == root)
return NULL;
/* if there's an unvisited sibling, visit its leftmost descendant */
next = css_next_child(pos, pos->parent);
if (next)
return css_leftmost_descendant(next);
/* no sibling left, visit parent */
return pos->parent;
}
/**
* css_has_online_children - does a css have online children
* @css: the target css
*
* Returns %true if @css has any online children; otherwise, %false. This
* function can be called from any context but the caller is responsible
* for synchronizing against on/offlining as necessary.
*/
bool css_has_online_children(struct cgroup_subsys_state *css)
{
struct cgroup_subsys_state *child;
bool ret = false;
rcu_read_lock();
css_for_each_child(child, css) {
if (child->flags & CSS_ONLINE) {
ret = true;
break;
}
}
rcu_read_unlock();
return ret;
}
static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
{
struct list_head *l;
struct cgrp_cset_link *link;
struct css_set *cset;
lockdep_assert_held(&css_set_lock);
/* find the next threaded cset */
if (it->tcset_pos) {
l = it->tcset_pos->next;
if (l != it->tcset_head) {
it->tcset_pos = l;
return container_of(l, struct css_set,
threaded_csets_node);
}
it->tcset_pos = NULL;
}
/* find the next cset */
l = it->cset_pos;
l = l->next;
if (l == it->cset_head) {
it->cset_pos = NULL;
return NULL;
}
if (it->ss) {
cset = container_of(l, struct css_set, e_cset_node[it->ss->id]);
} else {
link = list_entry(l, struct cgrp_cset_link, cset_link);
cset = link->cset;
}
it->cset_pos = l;
/* initialize threaded css_set walking */
if (it->flags & CSS_TASK_ITER_THREADED) {
if (it->cur_dcset)
put_css_set_locked(it->cur_dcset);
it->cur_dcset = cset;
get_css_set(cset);
it->tcset_head = &cset->threaded_csets;
it->tcset_pos = &cset->threaded_csets;
}
return cset;
}
/**
* css_task_iter_advance_css_set - advance a task iterator to the next css_set
* @it: the iterator to advance
*
* Advance @it to the next css_set to walk.
*/
static void css_task_iter_advance_css_set(struct css_task_iter *it)
{
struct css_set *cset;
lockdep_assert_held(&css_set_lock);
/* Advance to the next non-empty css_set and find first non-empty tasks list*/
while ((cset = css_task_iter_next_css_set(it))) {
if (!list_empty(&cset->tasks)) {
it->cur_tasks_head = &cset->tasks;
break;
} else if (!list_empty(&cset->mg_tasks)) {
it->cur_tasks_head = &cset->mg_tasks;
break;
} else if (!list_empty(&cset->dying_tasks)) {
it->cur_tasks_head = &cset->dying_tasks;
break;
}
}
if (!cset) {
it->task_pos = NULL;
return;
}
it->task_pos = it->cur_tasks_head->next;
/*
* We don't keep css_sets locked across iteration steps and thus
* need to take steps to ensure that iteration can be resumed after
* the lock is re-acquired. Iteration is performed at two levels -
* css_sets and tasks in them.
*
* Once created, a css_set never leaves its cgroup lists, so a
* pinned css_set is guaranteed to stay put and we can resume
* iteration afterwards.
*
* Tasks may leave @cset across iteration steps. This is resolved
* by registering each iterator with the css_set currently being
* walked and making css_set_move_task() advance iterators whose
* next task is leaving.
*/
if (it->cur_cset) {
list_del(&it->iters_node);
put_css_set_locked(it->cur_cset);
}
get_css_set(cset);
it->cur_cset = cset;
list_add(&it->iters_node, &cset->task_iters);
}
static void css_task_iter_skip(struct css_task_iter *it,
struct task_struct *task)
{
lockdep_assert_held(&css_set_lock);
if (it->task_pos == &task->cg_list) {
it->task_pos = it->task_pos->next;
it->flags |= CSS_TASK_ITER_SKIPPED;
}
}
static void css_task_iter_advance(struct css_task_iter *it)
{
struct task_struct *task;
lockdep_assert_held(&css_set_lock);
repeat:
if (it->task_pos) {
/*
* Advance iterator to find next entry. We go through cset
* tasks, mg_tasks and dying_tasks, when consumed we move onto
* the next cset.
*/
if (it->flags & CSS_TASK_ITER_SKIPPED)
it->flags &= ~CSS_TASK_ITER_SKIPPED;
else
it->task_pos = it->task_pos->next;
if (it->task_pos == &it->cur_cset->tasks) {
it->cur_tasks_head = &it->cur_cset->mg_tasks;
it->task_pos = it->cur_tasks_head->next;
}
if (it->task_pos == &it->cur_cset->mg_tasks) {
it->cur_tasks_head = &it->cur_cset->dying_tasks;
it->task_pos = it->cur_tasks_head->next;
}
if (it->task_pos == &it->cur_cset->dying_tasks)
css_task_iter_advance_css_set(it);
} else {
/* called from start, proceed to the first cset */
css_task_iter_advance_css_set(it);
}
if (!it->task_pos)
return;
task = list_entry(it->task_pos, struct task_struct, cg_list);
if (it->flags & CSS_TASK_ITER_PROCS) {
/* if PROCS, skip over tasks which aren't group leaders */
if (!thread_group_leader(task))
goto repeat;
/* and dying leaders w/o live member threads */
if (it->cur_tasks_head == &it->cur_cset->dying_tasks &&
!atomic_read(&task->signal->live))
goto repeat;
} else {
/* skip all dying ones */
if (it->cur_tasks_head == &it->cur_cset->dying_tasks)
goto repeat;
}
}
/**
* css_task_iter_start - initiate task iteration
* @css: the css to walk tasks of
* @flags: CSS_TASK_ITER_* flags
* @it: the task iterator to use
*
* Initiate iteration through the tasks of @css. The caller can call
* css_task_iter_next() to walk through the tasks until the function
* returns NULL. On completion of iteration, css_task_iter_end() must be
* called.
*/
void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it)
{
unsigned long irqflags;
memset(it, 0, sizeof(*it));
spin_lock_irqsave(&css_set_lock, irqflags);
it->ss = css->ss;
it->flags = flags;
if (CGROUP_HAS_SUBSYS_CONFIG && it->ss)
it->cset_pos = &css->cgroup->e_csets[css->ss->id];
else
it->cset_pos = &css->cgroup->cset_links;
it->cset_head = it->cset_pos;
css_task_iter_advance(it);
spin_unlock_irqrestore(&css_set_lock, irqflags);
}
/**
* css_task_iter_next - return the next task for the iterator
* @it: the task iterator being iterated
*
* The "next" function for task iteration. @it should have been
* initialized via css_task_iter_start(). Returns NULL when the iteration
* reaches the end.
*/
struct task_struct *css_task_iter_next(struct css_task_iter *it)
{
unsigned long irqflags;
if (it->cur_task) {
put_task_struct(it->cur_task);
it->cur_task = NULL;
}
spin_lock_irqsave(&css_set_lock, irqflags);
/* @it may be half-advanced by skips, finish advancing */
if (it->flags & CSS_TASK_ITER_SKIPPED)
css_task_iter_advance(it);
if (it->task_pos) {
it->cur_task = list_entry(it->task_pos, struct task_struct,
cg_list);
get_task_struct(it->cur_task);
css_task_iter_advance(it);
}
spin_unlock_irqrestore(&css_set_lock, irqflags);
return it->cur_task;
}
/**
* css_task_iter_end - finish task iteration
* @it: the task iterator to finish
*
* Finish task iteration started by css_task_iter_start().
*/
void css_task_iter_end(struct css_task_iter *it)
{
unsigned long irqflags;
if (it->cur_cset) {
spin_lock_irqsave(&css_set_lock, irqflags);
list_del(&it->iters_node);
put_css_set_locked(it->cur_cset);
spin_unlock_irqrestore(&css_set_lock, irqflags);
}
if (it->cur_dcset)
put_css_set(it->cur_dcset);
if (it->cur_task)
put_task_struct(it->cur_task);
}
static void cgroup_procs_release(struct kernfs_open_file *of)
{
struct cgroup_file_ctx *ctx = of->priv;
if (ctx->procs.started)
css_task_iter_end(&ctx->procs.iter);
}
static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
{
struct kernfs_open_file *of = s->private;
struct cgroup_file_ctx *ctx = of->priv;
if (pos)
(*pos)++;
return css_task_iter_next(&ctx->procs.iter);
}
static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
unsigned int iter_flags)
{
struct kernfs_open_file *of = s->private;
struct cgroup *cgrp = seq_css(s)->cgroup;
struct cgroup_file_ctx *ctx = of->priv;
struct css_task_iter *it = &ctx->procs.iter;
/*
* When a seq_file is seeked, it's always traversed sequentially
* from position 0, so we can simply keep iterating on !0 *pos.
*/
if (!ctx->procs.started) {
if (WARN_ON_ONCE((*pos)))
return ERR_PTR(-EINVAL);
css_task_iter_start(&cgrp->self, iter_flags, it);
ctx->procs.started = true;
} else if (!(*pos)) {
css_task_iter_end(it);
css_task_iter_start(&cgrp->self, iter_flags, it);
} else
return it->cur_task;
return cgroup_procs_next(s, NULL, NULL);
}
static void *cgroup_procs_start(struct seq_file *s, loff_t *pos)
{
struct cgroup *cgrp = seq_css(s)->cgroup;
/*
* All processes of a threaded subtree belong to the domain cgroup
* of the subtree. Only threads can be distributed across the
* subtree. Reject reads on cgroup.procs in the subtree proper.
* They're always empty anyway.
*/
if (cgroup_is_threaded(cgrp))
return ERR_PTR(-EOPNOTSUPP);
return __cgroup_procs_start(s, pos, CSS_TASK_ITER_PROCS |
CSS_TASK_ITER_THREADED);
}
static int cgroup_procs_show(struct seq_file *s, void *v)
{
seq_printf(s, "%d\n", task_pid_vnr(v));
return 0;
}
static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
{
int ret;
struct inode *inode;
lockdep_assert_held(&cgroup_mutex);
inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
if (!inode)
return -ENOMEM;
ret = inode_permission(&nop_mnt_idmap, inode, MAY_WRITE);
iput(inode);
return ret;
}
static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
struct cgroup *dst_cgrp,
struct super_block *sb,
struct cgroup_namespace *ns)
{
struct cgroup *com_cgrp = src_cgrp;
int ret;
lockdep_assert_held(&cgroup_mutex);
/* find the common ancestor */
while (!cgroup_is_descendant(dst_cgrp, com_cgrp))
com_cgrp = cgroup_parent(com_cgrp);
/* %current should be authorized to migrate to the common ancestor */
ret = cgroup_may_write(com_cgrp, sb);
if (ret)
return ret;
/*
* If namespaces are delegation boundaries, %current must be able
* to see both source and destination cgroups from its namespace.
*/
if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) &&
(!cgroup_is_descendant(src_cgrp, ns->root_cset->dfl_cgrp) ||
!cgroup_is_descendant(dst_cgrp, ns->root_cset->dfl_cgrp)))
return -ENOENT;
return 0;
}
static int cgroup_attach_permissions(struct cgroup *src_cgrp,
struct cgroup *dst_cgrp,
struct super_block *sb, bool threadgroup,
struct cgroup_namespace *ns)
{
int ret = 0;
ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns);
if (ret)
return ret;
ret = cgroup_migrate_vet_dst(dst_cgrp);
if (ret)
return ret;
if (!threadgroup && (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp))
ret = -EOPNOTSUPP;
return ret;
}
static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
bool threadgroup)
{
struct cgroup_file_ctx *ctx = of->priv;
struct cgroup *src_cgrp, *dst_cgrp;
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
enum cgroup_attach_lock_mode lock_mode;
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, threadgroup, &lock_mode);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
/* find the source cgroup */
spin_lock_irq(&css_set_lock);
src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
spin_unlock_irq(&css_set_lock);
/*
* Process and thread migrations follow same delegation rule. Check
* permissions using the credentials from file open to protect against
* inherited fd attacks.
*/
saved_cred = override_creds(of->file->f_cred);
ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
of->file->f_path.dentry->d_sb,
threadgroup, ctx->ns);
revert_creds(saved_cred);
if (ret)
goto out_finish;
ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
out_finish:
cgroup_procs_write_finish(task, lock_mode);
out_unlock:
cgroup_kn_unlock(of->kn);
return ret;
}
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
return __cgroup_procs_write(of, buf, true) ?: nbytes;
}
static void *cgroup_threads_start(struct seq_file *s, loff_t *pos)
{
return __cgroup_procs_start(s, pos, 0);
}
static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
return __cgroup_procs_write(of, buf, false) ?: nbytes;
}
/* cgroup core interface files for the default hierarchy */
static struct cftype cgroup_base_files[] = {
{
.name = "cgroup.type",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cgroup_type_show,
.write = cgroup_type_write,
},
{
.name = "cgroup.procs",
.flags = CFTYPE_NS_DELEGATABLE,
.file_offset = offsetof(struct cgroup, procs_file),
.release = cgroup_procs_release,
.seq_start = cgroup_procs_start,
.seq_next = cgroup_procs_next,
.seq_show = cgroup_procs_show,
.write = cgroup_procs_write,
},
{
.name = "cgroup.threads",
.flags = CFTYPE_NS_DELEGATABLE,
.release = cgroup_procs_release,
.seq_start = cgroup_threads_start,
.seq_next = cgroup_procs_next,
.seq_show = cgroup_procs_show,
.write = cgroup_threads_write,
},
{
.name = "cgroup.controllers",
.seq_show = cgroup_controllers_show,
},
{
.name = "cgroup.subtree_control",
.flags = CFTYPE_NS_DELEGATABLE,
.seq_show = cgroup_subtree_control_show,
.write = cgroup_subtree_control_write,
},
{
.name = "cgroup.events",
.flags = CFTYPE_NOT_ON_ROOT,
.file_offset = offsetof(struct cgroup, events_file),
.seq_show = cgroup_events_show,
},
{
.name = "cgroup.max.descendants",
.seq_show = cgroup_max_descendants_show,
.write = cgroup_max_descendants_write,
},
{
.name = "cgroup.max.depth",
.seq_show = cgroup_max_depth_show,
.write = cgroup_max_depth_write,
},
{
.name = "cgroup.stat",
.seq_show = cgroup_stat_show,
},
{
.name = "cgroup.stat.local",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cgroup_core_local_stat_show,
},
{
.name = "cgroup.freeze",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cgroup_freeze_show,
.write = cgroup_freeze_write,
},
{
.name = "cgroup.kill",
.flags = CFTYPE_NOT_ON_ROOT,
.write = cgroup_kill_write,
},
{
.name = "cpu.stat",
.seq_show = cpu_stat_show,
},
{
.name = "cpu.stat.local",
.seq_show = cpu_local_stat_show,
},
{ } /* terminate */
};
static struct cftype cgroup_psi_files[] = {
#ifdef CONFIG_PSI
{
.name = "io.pressure",
.file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
.seq_show = cgroup_io_pressure_show,
.write = cgroup_io_pressure_write,
.poll = cgroup_pressure_poll,
.release = cgroup_pressure_release,
},
{
.name = "memory.pressure",
.file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
.seq_show = cgroup_memory_pressure_show,
.write = cgroup_memory_pressure_write,
.poll = cgroup_pressure_poll,
.release = cgroup_pressure_release,
},
{
.name = "cpu.pressure",
.file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
.seq_show = cgroup_cpu_pressure_show,
.write = cgroup_cpu_pressure_write,
.poll = cgroup_pressure_poll,
.release = cgroup_pressure_release,
},
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
{
.name = "irq.pressure",
.file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
.seq_show = cgroup_irq_pressure_show,
.write = cgroup_irq_pressure_write,
.poll = cgroup_pressure_poll,
.release = cgroup_pressure_release,
},
#endif
{
.name = "cgroup.pressure",
.seq_show = cgroup_pressure_show,
.write = cgroup_pressure_write,
},
#endif /* CONFIG_PSI */
{ } /* terminate */
};
/*
* css destruction is four-stage process.
*
* 1. Destruction starts. Killing of the percpu_ref is initiated.
* Implemented in kill_css().
*
* 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
* and thus css_tryget_online() is guaranteed to fail, the css can be
* offlined by invoking offline_css(). After offlining, the base ref is
* put. Implemented in css_killed_work_fn().
*
* 3. When the percpu_ref reaches zero, the only possible remaining
* accessors are inside RCU read sections. css_release() schedules the
* RCU callback.
*
* 4. After the grace period, the css can be freed. Implemented in
* css_free_rwork_fn().
*
* It is actually hairier because both step 2 and 4 require process context
* and thus involve punting to css->destroy_work adding two additional
* steps to the already complex sequence.
*/
static void css_free_rwork_fn(struct work_struct *work)
{
struct cgroup_subsys_state *css = container_of(to_rcu_work(work),
struct cgroup_subsys_state, destroy_rwork);
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
percpu_ref_exit(&css->refcnt);
css_rstat_exit(css);
if (!css_is_self(css)) {
/* css free path */
struct cgroup_subsys_state *parent = css->parent;
int id = css->id;
ss->css_free(css);
cgroup_idr_remove(&ss->css_idr, id);
cgroup_put(cgrp);
if (parent)
css_put(parent);
} else {
/* cgroup free path */
atomic_dec(&cgrp->root->nr_cgrps);
if (!cgroup_on_dfl(cgrp))
cgroup1_pidlist_destroy_all(cgrp);
cancel_work_sync(&cgrp->release_agent_work);
bpf_cgrp_storage_free(cgrp);
if (cgroup_parent(cgrp)) {
/*
* We get a ref to the parent, and put the ref when
* this cgroup is being freed, so it's guaranteed
* that the parent won't be destroyed before its
* children.
*/
cgroup_put(cgroup_parent(cgrp));
kernfs_put(cgrp->kn);
psi_cgroup_free(cgrp);
kfree(cgrp);
} else {
/*
* This is root cgroup's refcnt reaching zero,
* which indicates that the root should be
* released.
*/
cgroup_destroy_root(cgrp->root);
}
}
}
static void css_release_work_fn(struct work_struct *work)
{
struct cgroup_subsys_state *css =
container_of(work, struct cgroup_subsys_state, destroy_work);
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
cgroup_lock();
css->flags |= CSS_RELEASED;
list_del_rcu(&css->sibling);
if (!css_is_self(css)) {
struct cgroup *parent_cgrp;
css_rstat_flush(css);
cgroup_idr_replace(&ss->css_idr, NULL, css->id);
if (ss->css_released)
ss->css_released(css);
cgrp->nr_dying_subsys[ss->id]--;
/*
* When a css is released and ready to be freed, its
* nr_descendants must be zero. However, the corresponding
* cgrp->nr_dying_subsys[ss->id] may not be 0 if a subsystem
* is activated and deactivated multiple times with one or
* more of its previous activation leaving behind dying csses.
*/
WARN_ON_ONCE(css->nr_descendants);
parent_cgrp = cgroup_parent(cgrp);
while (parent_cgrp) {
parent_cgrp->nr_dying_subsys[ss->id]--;
parent_cgrp = cgroup_parent(parent_cgrp);
}
} else {
struct cgroup *tcgrp;
/* cgroup release path */
TRACE_CGROUP_PATH(release, cgrp);
css_rstat_flush(&cgrp->self);
spin_lock_irq(&css_set_lock);
for (tcgrp = cgroup_parent(cgrp); tcgrp;
tcgrp = cgroup_parent(tcgrp))
tcgrp->nr_dying_descendants--;
spin_unlock_irq(&css_set_lock);
/*
* There are two control paths which try to determine
* cgroup from dentry without going through kernfs -
* cgroupstats_build() and css_tryget_online_from_dir().
* Those are supported by RCU protecting clearing of
* cgrp->kn->priv backpointer.
*/
if (cgrp->kn)
RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
NULL);
}
cgroup_unlock();
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
}
static void css_release(struct percpu_ref *ref)
{
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
INIT_WORK(&css->destroy_work, css_release_work_fn);
queue_work(cgroup_release_wq, &css->destroy_work);
}
static void init_and_link_css(struct cgroup_subsys_state *css,
struct cgroup_subsys *ss, struct cgroup *cgrp)
{
lockdep_assert_held(&cgroup_mutex);
cgroup_get_live(cgrp);
memset(css, 0, sizeof(*css));
css->cgroup = cgrp;
css->ss = ss;
css->id = -1;
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
atomic_set(&css->online_cnt, 0);
if (cgroup_parent(cgrp)) {
css->parent = cgroup_css(cgroup_parent(cgrp), ss);
css_get(css->parent);
}
BUG_ON(cgroup_css(cgrp, ss));
}
/* invoke ->css_online() on a new CSS and mark it online if successful */
static int online_css(struct cgroup_subsys_state *css)
{
struct cgroup_subsys *ss = css->ss;
int ret = 0;
lockdep_assert_held(&cgroup_mutex);
if (ss->css_online)
ret = ss->css_online(css);
if (!ret) {
css->flags |= CSS_ONLINE;
rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
atomic_inc(&css->online_cnt);
if (css->parent) {
atomic_inc(&css->parent->online_cnt);
while ((css = css->parent))
css->nr_descendants++;
}
}
return ret;
}
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
static void offline_css(struct cgroup_subsys_state *css)
{
struct cgroup_subsys *ss = css->ss;
lockdep_assert_held(&cgroup_mutex);
if (!(css->flags & CSS_ONLINE))
return;
if (ss->css_offline)
ss->css_offline(css);
css->flags &= ~CSS_ONLINE;
RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
wake_up_all(&css->cgroup->offline_waitq);
css->cgroup->nr_dying_subsys[ss->id]++;
/*
* Parent css and cgroup cannot be freed until after the freeing
* of child css, see css_free_rwork_fn().
*/
while ((css = css->parent)) {
css->nr_descendants--;
css->cgroup->nr_dying_subsys[ss->id]++;
}
}
/**
* css_create - create a cgroup_subsys_state
* @cgrp: the cgroup new css will be associated with
* @ss: the subsys of new css
*
* Create a new css associated with @cgrp - @ss pair. On success, the new
* css is online and installed in @cgrp. This function doesn't create the
* interface files. Returns 0 on success, -errno on failure.
*/
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
struct cgroup_subsys_state *css;
int err;
lockdep_assert_held(&cgroup_mutex);
css = ss->css_alloc(parent_css);
if (!css)
css = ERR_PTR(-ENOMEM);
if (IS_ERR(css))
return css;
init_and_link_css(css, ss, cgrp);
err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
if (err)
goto err_free_css;
err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
if (err < 0)
goto err_free_css;
css->id = err;
err = css_rstat_init(css);
if (err)
goto err_free_css;
/* @css is ready to be brought online now, make it visible */
list_add_tail_rcu(&css->sibling, &parent_css->children);
cgroup_idr_replace(&ss->css_idr, css, css->id);
err = online_css(css);
if (err)
goto err_list_del;
return css;
err_list_del:
list_del_rcu(&css->sibling);
err_free_css:
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
return ERR_PTR(err);
}
/*
* The returned cgroup is fully initialized including its control mask, but
* it doesn't have the control mask applied.
*/
static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
umode_t mode)
{
struct cgroup_root *root = parent->root;
struct cgroup *cgrp, *tcgrp;
struct kernfs_node *kn;
int i, level = parent->level + 1;
int ret;
/* allocate the cgroup and its ID, 0 is reserved for the root */
cgrp = kzalloc(struct_size(cgrp, ancestors, (level + 1)), GFP_KERNEL);
if (!cgrp)
return ERR_PTR(-ENOMEM);
ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
if (ret)
goto out_free_cgrp;
/* create the directory */
kn = kernfs_create_dir_ns(parent->kn, name, mode,
current_fsuid(), current_fsgid(),
cgrp, NULL);
if (IS_ERR(kn)) {
ret = PTR_ERR(kn);
goto out_cancel_ref;
}
cgrp->kn = kn;
init_cgroup_housekeeping(cgrp);
cgrp->self.parent = &parent->self;
cgrp->root = root;
cgrp->level = level;
/*
* Now that init_cgroup_housekeeping() has been called and cgrp->self
* is setup, it is safe to perform rstat initialization on it.
*/
ret = css_rstat_init(&cgrp->self);
if (ret)
goto out_kernfs_remove;
ret = psi_cgroup_alloc(cgrp);
if (ret)
goto out_stat_exit;
for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
cgrp->ancestors[tcgrp->level] = tcgrp;
/*
* New cgroup inherits effective freeze counter, and
* if the parent has to be frozen, the child has too.
*/
cgrp->freezer.e_freeze = parent->freezer.e_freeze;
seqcount_spinlock_init(&cgrp->freezer.freeze_seq, &css_set_lock);
if (cgrp->freezer.e_freeze) {
/*
* Set the CGRP_FREEZE flag, so when a process will be
* attached to the child cgroup, it will become frozen.
* At this point the new cgroup is unpopulated, so we can
* consider it frozen immediately.
*/
set_bit(CGRP_FREEZE, &cgrp->flags);
cgrp->freezer.freeze_start_nsec = ktime_get_ns();
set_bit(CGRP_FROZEN, &cgrp->flags);
}
if (notify_on_release(parent))
set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
cgrp->self.serial_nr = css_serial_nr_next++;
ret = blocking_notifier_call_chain_robust(&cgroup_lifetime_notifier,
CGROUP_LIFETIME_ONLINE,
CGROUP_LIFETIME_OFFLINE, cgrp);
ret = notifier_to_errno(ret);
if (ret)
goto out_psi_free;
/* allocation complete, commit to creation */
spin_lock_irq(&css_set_lock);
for (i = 0; i < level; i++) {
tcgrp = cgrp->ancestors[i];
tcgrp->nr_descendants++;
/*
* If the new cgroup is frozen, all ancestor cgroups get a new
* frozen descendant, but their state can't change because of
* this.
*/
if (cgrp->freezer.e_freeze)
tcgrp->freezer.nr_frozen_descendants++;
}
spin_unlock_irq(&css_set_lock);
list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
atomic_inc(&root->nr_cgrps);
cgroup_get_live(parent);
/*
* On the default hierarchy, a child doesn't automatically inherit
* subtree_control from the parent. Each is configured manually.
*/
if (!cgroup_on_dfl(cgrp))
cgrp->subtree_control = cgroup_control(cgrp);
cgroup_propagate_control(cgrp);
return cgrp;
out_psi_free:
psi_cgroup_free(cgrp);
out_stat_exit:
css_rstat_exit(&cgrp->self);
out_kernfs_remove:
kernfs_remove(cgrp->kn);
out_cancel_ref:
percpu_ref_exit(&cgrp->self.refcnt);
out_free_cgrp:
kfree(cgrp);
return ERR_PTR(ret);
}
static bool cgroup_check_hierarchy_limits(struct cgroup *parent)
{
struct cgroup *cgroup;
int ret = false;
int level = 0;
lockdep_assert_held(&cgroup_mutex);
for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgroup)) {
if (cgroup->nr_descendants >= cgroup->max_descendants)
goto fail;
if (level >= cgroup->max_depth)
goto fail;
level++;
}
ret = true;
fail:
return ret;
}
int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
{
struct cgroup *parent, *cgrp;
int ret;
/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
if (strchr(name, '\n'))
return -EINVAL;
parent = cgroup_kn_lock_live(parent_kn, false);
if (!parent)
return -ENODEV;
if (!cgroup_check_hierarchy_limits(parent)) {
ret = -EAGAIN;
goto out_unlock;
}
cgrp = cgroup_create(parent, name, mode);
if (IS_ERR(cgrp)) {
ret = PTR_ERR(cgrp);
goto out_unlock;
}
/*
* This extra ref will be put in css_free_rwork_fn() and guarantees
* that @cgrp->kn is always accessible.
*/
kernfs_get(cgrp->kn);
ret = css_populate_dir(&cgrp->self);
if (ret)
goto out_destroy;
ret = cgroup_apply_control_enable(cgrp);
if (ret)
goto out_destroy;
TRACE_CGROUP_PATH(mkdir, cgrp);
/* let's create and online css's */
kernfs_activate(cgrp->kn);
ret = 0;
goto out_unlock;
out_destroy:
cgroup_destroy_locked(cgrp);
out_unlock:
cgroup_kn_unlock(parent_kn);
return ret;
}
/*
* This is called when the refcnt of a css is confirmed to be killed.
* css_tryget_online() is now guaranteed to fail. Tell the subsystem to
* initiate destruction and put the css ref from kill_css().
*/
static void css_killed_work_fn(struct work_struct *work)
{
struct cgroup_subsys_state *css =
container_of(work, struct cgroup_subsys_state, destroy_work);
cgroup_lock();
do {
offline_css(css);
css_put(css);
/* @css can't go away while we're holding cgroup_mutex */
css = css->parent;
} while (css && atomic_dec_and_test(&css->online_cnt));
cgroup_unlock();
}
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
{
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
if (atomic_dec_and_test(&css->online_cnt)) {
INIT_WORK(&css->destroy_work, css_killed_work_fn);
queue_work(cgroup_offline_wq, &css->destroy_work);
}
}
/**
* kill_css - destroy a css
* @css: css to destroy
*
* This function initiates destruction of @css by removing cgroup interface
* files and putting its base reference. ->css_offline() will be invoked
* asynchronously once css_tryget_online() is guaranteed to fail and when
* the reference count reaches zero, @css will be released.
*/
static void kill_css(struct cgroup_subsys_state *css)
{
lockdep_assert_held(&cgroup_mutex);
if (css->flags & CSS_DYING)
return;
/*
* Call css_killed(), if defined, before setting the CSS_DYING flag
*/
if (css->ss->css_killed)
css->ss->css_killed(css);
css->flags |= CSS_DYING;
/*
* This must happen before css is disassociated with its cgroup.
* See seq_css() for details.
*/
css_clear_dir(css);
/*
* Killing would put the base ref, but we need to keep it alive
* until after ->css_offline().
*/
css_get(css);
/*
* cgroup core guarantees that, by the time ->css_offline() is
* invoked, no new css reference will be given out via
* css_tryget_online(). We can't simply call percpu_ref_kill() and
* proceed to offlining css's because percpu_ref_kill() doesn't
* guarantee that the ref is seen as killed on all CPUs on return.
*
* Use percpu_ref_kill_and_confirm() to get notifications as each
* css is confirmed to be seen as killed on all CPUs.
*/
percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
}
/**
* cgroup_destroy_locked - the first stage of cgroup destruction
* @cgrp: cgroup to be destroyed
*
* css's make use of percpu refcnts whose killing latency shouldn't be
* exposed to userland and are RCU protected. Also, cgroup core needs to
* guarantee that css_tryget_online() won't succeed by the time
* ->css_offline() is invoked. To satisfy all the requirements,
* destruction is implemented in the following two steps.
*
* s1. Verify @cgrp can be destroyed and mark it dying. Remove all
* userland visible parts and start killing the percpu refcnts of
* css's. Set up so that the next stage will be kicked off once all
* the percpu refcnts are confirmed to be killed.
*
* s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
* rest of destruction. Once all cgroup references are gone, the
* cgroup is RCU-freed.
*
* This function implements s1. After this step, @cgrp is gone as far as
* the userland is concerned and a new cgroup with the same name may be
* created. As cgroup doesn't care about the names internally, this
* doesn't cause any problem.
*/
static int cgroup_destroy_locked(struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{
struct cgroup *tcgrp, *parent = cgroup_parent(cgrp);
struct cgroup_subsys_state *css;
struct cgrp_cset_link *link;
int ssid, ret;
lockdep_assert_held(&cgroup_mutex);
/*
* Only migration can raise populated from zero and we're already
* holding cgroup_mutex.
*/
if (cgroup_is_populated(cgrp))
return -EBUSY;
/*
* Make sure there's no live children. We can't test emptiness of
* ->self.children as dead children linger on it while being
* drained; otherwise, "rmdir parent/child parent" may fail.
*/
if (css_has_online_children(&cgrp->self))
return -EBUSY;
/*
* Mark @cgrp and the associated csets dead. The former prevents
* further task migration and child creation by disabling
* cgroup_kn_lock_live(). The latter makes the csets ignored by
* the migration path.
*/
cgrp->self.flags &= ~CSS_ONLINE;
spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
link->cset->dead = true;
spin_unlock_irq(&css_set_lock);
/* initiate massacre of all css's */
for_each_css(css, ssid, cgrp)
kill_css(css);
/* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */
css_clear_dir(&cgrp->self);
kernfs_remove(cgrp->kn);
if (cgroup_is_threaded(cgrp))
parent->nr_threaded_children--;
spin_lock_irq(&css_set_lock);
for (tcgrp = parent; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
tcgrp->nr_descendants--;
tcgrp->nr_dying_descendants++;
/*
* If the dying cgroup is frozen, decrease frozen descendants
* counters of ancestor cgroups.
*/
if (test_bit(CGRP_FROZEN, &cgrp->flags))
tcgrp->freezer.nr_frozen_descendants--;
}
spin_unlock_irq(&css_set_lock);
cgroup1_check_for_release(parent);
ret = blocking_notifier_call_chain(&cgroup_lifetime_notifier,
CGROUP_LIFETIME_OFFLINE, cgrp);
WARN_ON_ONCE(notifier_to_errno(ret));
/* put the base reference */
percpu_ref_kill(&cgrp->self.refcnt);
return 0;
};
int cgroup_rmdir(struct kernfs_node *kn)
{
struct cgroup *cgrp;
int ret = 0;
cgrp = cgroup_kn_lock_live(kn, false);
if (!cgrp)
return 0;
ret = cgroup_destroy_locked(cgrp);
if (!ret)
TRACE_CGROUP_PATH(rmdir, cgrp);
cgroup_kn_unlock(kn);
return ret;
}
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
.show_options = cgroup_show_options,
.mkdir = cgroup_mkdir,
.rmdir = cgroup_rmdir,
.show_path = cgroup_show_path,
};
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
{
struct cgroup_subsys_state *css;
pr_debug("Initializing cgroup subsys %s\n", ss->name);
cgroup_lock();
idr_init(&ss->css_idr);
INIT_LIST_HEAD(&ss->cfts);
/* Create the root cgroup state for this subsystem */
ss->root = &cgrp_dfl_root;
css = ss->css_alloc(NULL);
/* We don't handle early failures gracefully */
BUG_ON(IS_ERR(css));
init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
/*
* Root csses are never destroyed and we can't initialize
* percpu_ref during early init. Disable refcnting.
*/
css->flags |= CSS_NO_REF;
if (early) {
/* allocation can't be done safely during early init */
css->id = 1;
} else {
css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
BUG_ON(css->id < 0);
BUG_ON(ss_rstat_init(ss));
BUG_ON(css_rstat_init(css));
}
/* Update the init_css_set to contain a subsys
* pointer to this state - since the subsystem is
* newly registered, all tasks and hence the
* init_css_set is in the subsystem's root cgroup. */
init_css_set.subsys[ss->id] = css;
have_fork_callback |= (bool)ss->fork << ss->id;
have_exit_callback |= (bool)ss->exit << ss->id;
have_release_callback |= (bool)ss->release << ss->id;
have_canfork_callback |= (bool)ss->can_fork << ss->id;
/* At system boot, before all subsystems have been
* registered, no tasks have been forked, so we don't
* need to invoke fork callbacks here. */
BUG_ON(!list_empty(&init_task.tasks));
BUG_ON(online_css(css));
cgroup_unlock();
}
/**
* cgroup_init_early - cgroup initialization at system boot
*
* Initialize cgroups at system boot, and initialize any
* subsystems that request early init.
*/
int __init cgroup_init_early(void)
{
static struct cgroup_fs_context __initdata ctx;
struct cgroup_subsys *ss;
int i;
ctx.root = &cgrp_dfl_root;
init_cgroup_root(&ctx);
cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
for_each_subsys(ss, i) {
WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
"invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
ss->id, ss->name);
WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
"cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
WARN(ss->early_init && ss->css_rstat_flush,
"cgroup rstat cannot be used with early init subsystem\n");
ss->id = i;
ss->name = cgroup_subsys_name[i];
if (!ss->legacy_name)
ss->legacy_name = cgroup_subsys_name[i];
if (ss->early_init)
cgroup_init_subsys(ss, true);
}
return 0;
}
/**
* cgroup_init - cgroup initialization
*
* Register cgroup filesystem and /proc file, and initialize
* any subsystems that didn't request early init.
*/
int __init cgroup_init(void)
{
struct cgroup_subsys *ss;
int ssid;
BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_psi_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
BUG_ON(ss_rstat_init(NULL));
get_user_ns(init_cgroup_ns.user_ns);
cgroup_lock();
/*
* Add init_css_set to the hash table so that dfl_root can link to
* it during init.
*/
hash_add(css_set_table, &init_css_set.hlist,
css_set_hash(init_css_set.subsys));
cgroup_bpf_lifetime_notifier_init();
BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
cgroup_unlock();
for_each_subsys(ss, ssid) {
if (ss->early_init) {
struct cgroup_subsys_state *css =
init_css_set.subsys[ss->id];
css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
GFP_KERNEL);
BUG_ON(css->id < 0);
} else {
cgroup_init_subsys(ss, false);
}
list_add_tail(&init_css_set.e_cset_node[ssid],
&cgrp_dfl_root.cgrp.e_csets[ssid]);
/*
* Setting dfl_root subsys_mask needs to consider the
* disabled flag and cftype registration needs kmalloc,
* both of which aren't available during early_init.
*/
if (!cgroup_ssid_enabled(ssid))
continue;
if (cgroup1_ssid_disabled(ssid))
pr_info("Disabling %s control group subsystem in v1 mounts\n",
ss->legacy_name);
cgrp_dfl_root.subsys_mask |= 1 << ss->id;
/* implicit controllers must be threaded too */
WARN_ON(ss->implicit_on_dfl && !ss->threaded);
if (ss->implicit_on_dfl)
cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
else if (!ss->dfl_cftypes)
cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
if (ss->threaded)
cgrp_dfl_threaded_ss_mask |= 1 << ss->id;
if (ss->dfl_cftypes == ss->legacy_cftypes) {
WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
} else {
WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
}
if (ss->bind)
ss->bind(init_css_set.subsys[ssid]);
cgroup_lock();
css_populate_dir(init_css_set.subsys[ssid]);
cgroup_unlock();
}
/* init_css_set.subsys[] has been updated, re-hash */
hash_del(&init_css_set.hlist);
hash_add(css_set_table, &init_css_set.hlist,
css_set_hash(init_css_set.subsys));
WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
WARN_ON(register_filesystem(&cgroup_fs_type));
WARN_ON(register_filesystem(&cgroup2_fs_type));
WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show));
#ifdef CONFIG_CPUSETS_V1
WARN_ON(register_filesystem(&cpuset_fs_type));
#endif
ns_tree_add(&init_cgroup_ns);
return 0;
}
static int __init cgroup_wq_init(void)
{
/*
* There isn't much point in executing destruction path in
* parallel. Good chunk is serialized with cgroup_mutex anyway.
* Use 1 for @max_active.
*
* We would prefer to do this in cgroup_init() above, but that
* is called before init_workqueues(): so leave this until after.
*/
cgroup_offline_wq = alloc_workqueue("cgroup_offline", WQ_PERCPU, 1);
BUG_ON(!cgroup_offline_wq);
cgroup_release_wq = alloc_workqueue("cgroup_release", WQ_PERCPU, 1);
BUG_ON(!cgroup_release_wq);
cgroup_free_wq = alloc_workqueue("cgroup_free", WQ_PERCPU, 1);
BUG_ON(!cgroup_free_wq);
return 0;
}
core_initcall(cgroup_wq_init);
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
{
struct kernfs_node *kn;
kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
if (!kn)
return;
kernfs_path(kn, buf, buflen);
kernfs_put(kn);
}
/*
* __cgroup_get_from_id : get the cgroup associated with cgroup id
* @id: cgroup id
* On success return the cgrp or ERR_PTR on failure
* There are no cgroup NS restrictions.
*/
struct cgroup *__cgroup_get_from_id(u64 id)
{
struct kernfs_node *kn;
struct cgroup *cgrp;
kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
if (!kn)
return ERR_PTR(-ENOENT);
if (kernfs_type(kn) != KERNFS_DIR) {
kernfs_put(kn);
return ERR_PTR(-ENOENT);
}
rcu_read_lock();
cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
if (cgrp && !cgroup_tryget(cgrp))
cgrp = NULL;
rcu_read_unlock();
kernfs_put(kn);
if (!cgrp)
return ERR_PTR(-ENOENT);
return cgrp;
}
/*
* cgroup_get_from_id : get the cgroup associated with cgroup id
* @id: cgroup id
* On success return the cgrp or ERR_PTR on failure
* Only cgroups within current task's cgroup NS are valid.
*/
struct cgroup *cgroup_get_from_id(u64 id)
{
struct cgroup *cgrp, *root_cgrp;
cgrp = __cgroup_get_from_id(id);
if (IS_ERR(cgrp))
return cgrp;
root_cgrp = current_cgns_cgroup_dfl();
if (!cgroup_is_descendant(cgrp, root_cgrp)) {
cgroup_put(cgrp);
return ERR_PTR(-ENOENT);
}
return cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_get_from_id);
/*
* proc_cgroup_show()
* - Print task's cgroup paths into seq_file, one line for each hierarchy
* - Used for /proc/<pid>/cgroup.
*/
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk)
{
char *buf;
int retval;
struct cgroup_root *root;
retval = -ENOMEM;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf)
goto out;
rcu_read_lock();
spin_lock_irq(&css_set_lock);
for_each_root(root) {
struct cgroup_subsys *ss;
struct cgroup *cgrp;
int ssid, count = 0;
if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible))
continue;
cgrp = task_cgroup_from_root(tsk, root);
/* The root has already been unmounted. */
if (!cgrp)
continue;
seq_printf(m, "%d:", root->hierarchy_id);
if (root != &cgrp_dfl_root)
for_each_subsys(ss, ssid)
if (root->subsys_mask & (1 << ssid))
seq_printf(m, "%s%s", count++ ? "," : "",
ss->legacy_name);
if (strlen(root->name))
seq_printf(m, "%sname=%s", count ? "," : "",
root->name);
seq_putc(m, ':');
/*
* On traditional hierarchies, all zombie tasks show up as
* belonging to the root cgroup. On the default hierarchy,
* while a zombie doesn't show up in "cgroup.procs" and
* thus can't be migrated, its /proc/PID/cgroup keeps
* reporting the cgroup it belonged to before exiting. If
* the cgroup is removed before the zombie is reaped,
* " (deleted)" is appended to the cgroup path.
*/
if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
current->nsproxy->cgroup_ns);
if (retval == -E2BIG)
retval = -ENAMETOOLONG;
if (retval < 0)
goto out_unlock;
seq_puts(m, buf);
} else {
seq_puts(m, "/");
}
if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
seq_puts(m, " (deleted)\n");
else
seq_putc(m, '\n');
}
retval = 0;
out_unlock:
spin_unlock_irq(&css_set_lock);
rcu_read_unlock();
kfree(buf);
out:
return retval;
}
/**
* cgroup_fork - initialize cgroup related fields during copy_process()
* @child: pointer to task_struct of forking parent process.
*
* A task is associated with the init_css_set until cgroup_post_fork()
* attaches it to the target css_set.
*/
void cgroup_fork(struct task_struct *child)
{
RCU_INIT_POINTER(child->cgroups, &init_css_set);
INIT_LIST_HEAD(&child->cg_list);
}
/**
* cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer
* @f: file corresponding to cgroup_dir
*
* Find the cgroup from a file pointer associated with a cgroup directory.
* Returns a pointer to the cgroup on success. ERR_PTR is returned if the
* cgroup cannot be found.
*/
static struct cgroup *cgroup_v1v2_get_from_file(struct file *f)
{
struct cgroup_subsys_state *css;
css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
if (IS_ERR(css))
return ERR_CAST(css);
return css->cgroup;
}
/**
* cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports
* cgroup2.
* @f: file corresponding to cgroup2_dir
*/
static struct cgroup *cgroup_get_from_file(struct file *f)
{
struct cgroup *cgrp = cgroup_v1v2_get_from_file(f); if (IS_ERR(cgrp))
return ERR_CAST(cgrp);
if (!cgroup_on_dfl(cgrp)) {
cgroup_put(cgrp); return ERR_PTR(-EBADF);
}
return cgrp;
}
/**
* cgroup_css_set_fork - find or create a css_set for a child process
* @kargs: the arguments passed to create the child process
*
* This functions finds or creates a new css_set which the child
* process will be attached to in cgroup_post_fork(). By default,
* the child process will be given the same css_set as its parent.
*
* If CLONE_INTO_CGROUP is specified this function will try to find an
* existing css_set which includes the requested cgroup and if not create
* a new css_set that the child will be attached to later. If this function
* succeeds it will hold cgroup_threadgroup_rwsem on return. If
* CLONE_INTO_CGROUP is requested this function will grab cgroup mutex
* before grabbing cgroup_threadgroup_rwsem and will hold a reference
* to the target cgroup.
*/
static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
__acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem)
{
int ret;
struct cgroup *dst_cgrp = NULL;
struct css_set *cset;
struct super_block *sb;
if (kargs->flags & CLONE_INTO_CGROUP)
cgroup_lock(); cgroup_threadgroup_change_begin(current);
spin_lock_irq(&css_set_lock);
cset = task_css_set(current);
get_css_set(cset); if (kargs->cgrp) kargs->kill_seq = kargs->cgrp->kill_seq;
else
kargs->kill_seq = cset->dfl_cgrp->kill_seq;
spin_unlock_irq(&css_set_lock);
if (!(kargs->flags & CLONE_INTO_CGROUP)) {
kargs->cset = cset;
return 0;
}
CLASS(fd_raw, f)(kargs->cgroup); if (fd_empty(f)) {
ret = -EBADF;
goto err;
}
sb = fd_file(f)->f_path.dentry->d_sb;
dst_cgrp = cgroup_get_from_file(fd_file(f));
if (IS_ERR(dst_cgrp)) {
ret = PTR_ERR(dst_cgrp);
dst_cgrp = NULL;
goto err;
}
if (cgroup_is_dead(dst_cgrp)) {
ret = -ENODEV;
goto err;
}
/*
* Verify that we the target cgroup is writable for us. This is
* usually done by the vfs layer but since we're not going through
* the vfs layer here we need to do it "manually".
*/
ret = cgroup_may_write(dst_cgrp, sb);
if (ret)
goto err;
/*
* Spawning a task directly into a cgroup works by passing a file
* descriptor to the target cgroup directory. This can even be an O_PATH
* file descriptor. But it can never be a cgroup.procs file descriptor.
* This was done on purpose so spawning into a cgroup could be
* conceptualized as an atomic
*
* fd = openat(dfd_cgroup, "cgroup.procs", ...);
* write(fd, <child-pid>, ...);
*
* sequence, i.e. it's a shorthand for the caller opening and writing
* cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us
* to always use the caller's credentials.
*/
ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
!(kargs->flags & CLONE_THREAD),
current->nsproxy->cgroup_ns);
if (ret)
goto err;
kargs->cset = find_css_set(cset, dst_cgrp);
if (!kargs->cset) {
ret = -ENOMEM;
goto err;
}
put_css_set(cset);
kargs->cgrp = dst_cgrp;
return ret;
err:
cgroup_threadgroup_change_end(current);
cgroup_unlock();
if (dst_cgrp) cgroup_put(dst_cgrp); put_css_set(cset); if (kargs->cset) put_css_set(kargs->cset);
return ret;
}
/**
* cgroup_css_set_put_fork - drop references we took during fork
* @kargs: the arguments passed to create the child process
*
* Drop references to the prepared css_set and target cgroup if
* CLONE_INTO_CGROUP was requested.
*/
static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
{
struct cgroup *cgrp = kargs->cgrp;
struct css_set *cset = kargs->cset;
cgroup_threadgroup_change_end(current);
if (cset) { put_css_set(cset);
kargs->cset = NULL;
}
if (kargs->flags & CLONE_INTO_CGROUP) {
cgroup_unlock();
if (cgrp) { cgroup_put(cgrp); kargs->cgrp = NULL;
}
}
}
/**
* cgroup_can_fork - called on a new task before the process is exposed
* @child: the child process
* @kargs: the arguments passed to create the child process
*
* This prepares a new css_set for the child process which the child will
* be attached to in cgroup_post_fork().
* This calls the subsystem can_fork() callbacks. If the cgroup_can_fork()
* callback returns an error, the fork aborts with that error code. This
* allows for a cgroup subsystem to conditionally allow or deny new forks.
*/
int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs)
{
struct cgroup_subsys *ss;
int i, j, ret;
ret = cgroup_css_set_fork(kargs); if (ret)
return ret;
do_each_subsys_mask(ss, i, have_canfork_callback) {
ret = ss->can_fork(child, kargs->cset);
if (ret)
goto out_revert;
} while_each_subsys_mask();
return 0;
out_revert:
for_each_subsys(ss, j) { if (j >= i)
break;
if (ss->cancel_fork)
ss->cancel_fork(child, kargs->cset);
}
cgroup_css_set_put_fork(kargs);
return ret;
}
/**
* cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
* @child: the child process
* @kargs: the arguments passed to create the child process
*
* This calls the cancel_fork() callbacks if a fork failed *after*
* cgroup_can_fork() succeeded and cleans up references we took to
* prepare a new css_set for the child process in cgroup_can_fork().
*/
void cgroup_cancel_fork(struct task_struct *child,
struct kernel_clone_args *kargs)
{
struct cgroup_subsys *ss;
int i;
for_each_subsys(ss, i)
if (ss->cancel_fork)
ss->cancel_fork(child, kargs->cset);
cgroup_css_set_put_fork(kargs);
}
/**
* cgroup_post_fork - finalize cgroup setup for the child process
* @child: the child process
* @kargs: the arguments passed to create the child process
*
* Attach the child process to its css_set calling the subsystem fork()
* callbacks.
*/
void cgroup_post_fork(struct task_struct *child,
struct kernel_clone_args *kargs)
__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
{ unsigned int cgrp_kill_seq = 0;
unsigned long cgrp_flags = 0;
bool kill = false;
struct cgroup_subsys *ss;
struct css_set *cset;
int i;
cset = kargs->cset;
kargs->cset = NULL;
spin_lock_irq(&css_set_lock);
/* init tasks are special, only link regular threads */
if (likely(child->pid)) {
if (kargs->cgrp) {
cgrp_flags = kargs->cgrp->flags;
cgrp_kill_seq = kargs->cgrp->kill_seq;
} else {
cgrp_flags = cset->dfl_cgrp->flags;
cgrp_kill_seq = cset->dfl_cgrp->kill_seq;
}
WARN_ON_ONCE(!list_empty(&child->cg_list)); cset->nr_tasks++;
css_set_move_task(child, NULL, cset, false);
} else {
put_css_set(cset); cset = NULL;
}
if (!(child->flags & PF_KTHREAD)) { if (unlikely(test_bit(CGRP_FREEZE, &cgrp_flags))) {
/*
* If the cgroup has to be frozen, the new task has
* too. Let's set the JOBCTL_TRAP_FREEZE jobctl bit to
* get the task into the frozen state.
*/
spin_lock(&child->sighand->siglock);
WARN_ON_ONCE(child->frozen);
child->jobctl |= JOBCTL_TRAP_FREEZE; spin_unlock(&child->sighand->siglock);
/*
* Calling cgroup_update_frozen() isn't required here,
* because it will be called anyway a bit later from
* do_freezer_trap(). So we avoid cgroup's transient
* switch from the frozen state and back.
*/
}
/*
* If the cgroup is to be killed notice it now and take the
* child down right after we finished preparing it for
* userspace.
*/
kill = kargs->kill_seq != cgrp_kill_seq;
}
spin_unlock_irq(&css_set_lock);
/*
* Call ss->fork(). This must happen after @child is linked on
* css_set; otherwise, @child might change state between ->fork()
* and addition to css_set.
*/
do_each_subsys_mask(ss, i, have_fork_callback) {
ss->fork(child);
} while_each_subsys_mask();
/* Make the new cset the root_cset of the new cgroup namespace. */
if (kargs->flags & CLONE_NEWCGROUP) {
struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset;
get_css_set(cset);
child->nsproxy->cgroup_ns->root_cset = cset;
put_css_set(rcset);
}
/* Cgroup has to be killed so take down child immediately. */
if (unlikely(kill)) do_send_sig_info(SIGKILL, SEND_SIG_NOINFO, child, PIDTYPE_TGID); cgroup_css_set_put_fork(kargs);
}
/**
* cgroup_exit - detach cgroup from exiting task
* @tsk: pointer to task_struct of exiting process
*
* Description: Detach cgroup from @tsk.
*
*/
void cgroup_exit(struct task_struct *tsk)
{
struct cgroup_subsys *ss;
struct css_set *cset;
int i;
spin_lock_irq(&css_set_lock);
WARN_ON_ONCE(list_empty(&tsk->cg_list));
cset = task_css_set(tsk);
css_set_move_task(tsk, cset, NULL, false);
cset->nr_tasks--;
/* matches the signal->live check in css_task_iter_advance() */
if (thread_group_leader(tsk) && atomic_read(&tsk->signal->live))
list_add_tail(&tsk->cg_list, &cset->dying_tasks);
if (dl_task(tsk))
dec_dl_tasks_cs(tsk);
WARN_ON_ONCE(cgroup_task_frozen(tsk));
if (unlikely(!(tsk->flags & PF_KTHREAD) &&
test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
cgroup_update_frozen(task_dfl_cgroup(tsk));
spin_unlock_irq(&css_set_lock);
/* see cgroup_post_fork() for details */
do_each_subsys_mask(ss, i, have_exit_callback) {
ss->exit(tsk);
} while_each_subsys_mask();
}
void cgroup_release(struct task_struct *task)
{
struct cgroup_subsys *ss;
int ssid;
do_each_subsys_mask(ss, ssid, have_release_callback) {
ss->release(task);
} while_each_subsys_mask();
if (!list_empty(&task->cg_list)) {
spin_lock_irq(&css_set_lock);
css_set_skip_task_iters(task_css_set(task), task);
list_del_init(&task->cg_list);
spin_unlock_irq(&css_set_lock);
}
}
void cgroup_free(struct task_struct *task)
{
struct css_set *cset = task_css_set(task);
put_css_set(cset);
}
static int __init cgroup_disable(char *str)
{
struct cgroup_subsys *ss;
char *token;
int i;
while ((token = strsep(&str, ",")) != NULL) {
if (!*token)
continue;
for_each_subsys(ss, i) {
if (strcmp(token, ss->name) &&
strcmp(token, ss->legacy_name))
continue;
static_branch_disable(cgroup_subsys_enabled_key[i]);
pr_info("Disabling %s control group subsystem\n",
ss->name);
}
for (i = 0; i < OPT_FEATURE_COUNT; i++) {
if (strcmp(token, cgroup_opt_feature_names[i]))
continue;
cgroup_feature_disable_mask |= 1 << i;
pr_info("Disabling %s control group feature\n",
cgroup_opt_feature_names[i]);
break;
}
}
return 1;
}
__setup("cgroup_disable=", cgroup_disable);
void __init __weak enable_debug_cgroup(void) { }
static int __init enable_cgroup_debug(char *str)
{
cgroup_debug = true;
enable_debug_cgroup();
return 1;
}
__setup("cgroup_debug", enable_cgroup_debug);
static int __init cgroup_favordynmods_setup(char *str)
{
return (kstrtobool(str, &have_favordynmods) == 0);
}
__setup("cgroup_favordynmods=", cgroup_favordynmods_setup);
/**
* css_tryget_online_from_dir - get corresponding css from a cgroup dentry
* @dentry: directory dentry of interest
* @ss: subsystem of interest
*
* If @dentry is a directory for a cgroup which has @ss enabled on it, try
* to get the corresponding css and return it. If such css doesn't exist
* or can't be pinned, an ERR_PTR value is returned.
*/
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss)
{
struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
struct file_system_type *s_type = dentry->d_sb->s_type;
struct cgroup_subsys_state *css = NULL;
struct cgroup *cgrp;
/* is @dentry a cgroup dir? */
if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
!kn || kernfs_type(kn) != KERNFS_DIR)
return ERR_PTR(-EBADF);
rcu_read_lock();
/*
* This path doesn't originate from kernfs and @kn could already
* have been or be removed at any point. @kn->priv is RCU
* protected for this access. See css_release_work_fn() for details.
*/
cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
if (cgrp)
css = cgroup_css(cgrp, ss);
if (!css || !css_tryget_online(css))
css = ERR_PTR(-ENOENT);
rcu_read_unlock();
return css;
}
/**
* css_from_id - lookup css by id
* @id: the cgroup id
* @ss: cgroup subsys to be looked into
*
* Returns the css if there's valid one with @id, otherwise returns NULL.
* Should be called under rcu_read_lock().
*/
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return idr_find(&ss->css_idr, id);
}
/**
* cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
* @path: path on the default hierarchy
*
* Find the cgroup at @path on the default hierarchy, increment its
* reference count and return it. Returns pointer to the found cgroup on
* success, ERR_PTR(-ENOENT) if @path doesn't exist or if the cgroup has already
* been released and ERR_PTR(-ENOTDIR) if @path points to a non-directory.
*/
struct cgroup *cgroup_get_from_path(const char *path)
{
struct kernfs_node *kn;
struct cgroup *cgrp = ERR_PTR(-ENOENT);
struct cgroup *root_cgrp;
root_cgrp = current_cgns_cgroup_dfl();
kn = kernfs_walk_and_get(root_cgrp->kn, path);
if (!kn)
goto out;
if (kernfs_type(kn) != KERNFS_DIR) {
cgrp = ERR_PTR(-ENOTDIR);
goto out_kernfs;
}
rcu_read_lock();
cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
if (!cgrp || !cgroup_tryget(cgrp))
cgrp = ERR_PTR(-ENOENT);
rcu_read_unlock();
out_kernfs:
kernfs_put(kn);
out:
return cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_get_from_path);
/**
* cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd
* @fd: fd obtained by open(cgroup_dir)
*
* Find the cgroup from a fd which should be obtained
* by opening a cgroup directory. Returns a pointer to the
* cgroup on success. ERR_PTR is returned if the cgroup
* cannot be found.
*/
struct cgroup *cgroup_v1v2_get_from_fd(int fd)
{
CLASS(fd_raw, f)(fd);
if (fd_empty(f))
return ERR_PTR(-EBADF);
return cgroup_v1v2_get_from_file(fd_file(f));
}
/**
* cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports
* cgroup2.
* @fd: fd obtained by open(cgroup2_dir)
*/
struct cgroup *cgroup_get_from_fd(int fd)
{
struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd);
if (IS_ERR(cgrp))
return ERR_CAST(cgrp);
if (!cgroup_on_dfl(cgrp)) {
cgroup_put(cgrp);
return ERR_PTR(-EBADF);
}
return cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
static u64 power_of_ten(int power)
{
u64 v = 1;
while (power--)
v *= 10;
return v;
}
/**
* cgroup_parse_float - parse a floating number
* @input: input string
* @dec_shift: number of decimal digits to shift
* @v: output
*
* Parse a decimal floating point number in @input and store the result in
* @v with decimal point right shifted @dec_shift times. For example, if
* @input is "12.3456" and @dec_shift is 3, *@v will be set to 12345.
* Returns 0 on success, -errno otherwise.
*
* There's nothing cgroup specific about this function except that it's
* currently the only user.
*/
int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
{
s64 whole, frac = 0;
int fstart = 0, fend = 0, flen;
if (!sscanf(input, "%lld.%n%lld%n", &whole, &fstart, &frac, &fend))
return -EINVAL;
if (frac < 0)
return -EINVAL;
flen = fend > fstart ? fend - fstart : 0;
if (flen < dec_shift)
frac *= power_of_ten(dec_shift - flen);
else
frac = DIV_ROUND_CLOSEST_ULL(frac, power_of_ten(flen - dec_shift));
*v = whole * power_of_ten(dec_shift) + frac;
return 0;
}
/*
* sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
* definition in cgroup-defs.h.
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
struct cgroup *cgroup;
rcu_read_lock();
/* Don't associate the sock with unrelated interrupted task's cgroup. */
if (in_interrupt()) {
cgroup = &cgrp_dfl_root.cgrp;
cgroup_get(cgroup); goto out;
}
while (true) {
struct css_set *cset;
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
cgroup = cset->dfl_cgrp; break;
}
cpu_relax();
}
out:
skcd->cgroup = cgroup;
cgroup_bpf_get(cgroup);
rcu_read_unlock();}
void cgroup_sk_clone(struct sock_cgroup_data *skcd)
{
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
/*
* We might be cloning a socket which is left in an empty
* cgroup and the cgroup might have already been rmdir'd.
* Don't use cgroup_get_live().
*/
cgroup_get(cgrp);
cgroup_bpf_get(cgrp);
}
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
cgroup_bpf_put(cgrp);
cgroup_put(cgrp);
}
#endif /* CONFIG_SOCK_CGROUP_DATA */
#ifdef CONFIG_SYSFS
static ssize_t show_delegatable_files(struct cftype *files, char *buf,
ssize_t size, const char *prefix)
{
struct cftype *cft;
ssize_t ret = 0;
for (cft = files; cft && cft->name[0] != '\0'; cft++) {
if (!(cft->flags & CFTYPE_NS_DELEGATABLE))
continue;
if (prefix)
ret += snprintf(buf + ret, size - ret, "%s.", prefix);
ret += snprintf(buf + ret, size - ret, "%s\n", cft->name);
if (WARN_ON(ret >= size))
break;
}
return ret;
}
static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct cgroup_subsys *ss;
int ssid;
ssize_t ret = 0;
ret = show_delegatable_files(cgroup_base_files, buf + ret,
PAGE_SIZE - ret, NULL);
if (cgroup_psi_enabled())
ret += show_delegatable_files(cgroup_psi_files, buf + ret,
PAGE_SIZE - ret, NULL);
for_each_subsys(ss, ssid)
ret += show_delegatable_files(ss->dfl_cftypes, buf + ret,
PAGE_SIZE - ret,
cgroup_subsys_name[ssid]);
return ret;
}
static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE,
"nsdelegate\n"
"favordynmods\n"
"memory_localevents\n"
"memory_recursiveprot\n"
"memory_hugetlb_accounting\n"
"pids_localevents\n");
}
static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
static struct attribute *cgroup_sysfs_attrs[] = {
&cgroup_delegate_attr.attr,
&cgroup_features_attr.attr,
NULL,
};
static const struct attribute_group cgroup_sysfs_attr_group = {
.attrs = cgroup_sysfs_attrs,
.name = "cgroup",
};
static int __init cgroup_sysfs_init(void)
{
return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group);
}
subsys_initcall(cgroup_sysfs_init);
#endif /* CONFIG_SYSFS */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_TIMER_H
#define _LINUX_TIMER_H
#include <linux/list.h>
#include <linux/ktime.h>
#include <linux/stddef.h>
#include <linux/debugobjects.h>
#include <linux/stringify.h>
#include <linux/timer_types.h>
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
* (second argument) here is required, otherwise it could be initialised to
* the copy of the lockdep_map later! We use the pointer to and the string
* "<file>:<line>" as the key resp. the name of the lockdep_map.
*/
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
.lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
#else
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
#endif
/*
* @TIMER_DEFERRABLE: A deferrable timer will work normally when the
* system is busy, but will not cause a CPU to come out of idle just
* to service it; instead, the timer will be serviced when the CPU
* eventually wakes up with a subsequent non-deferrable timer.
*
* @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and
* it's safe to wait for the completion of the running instance from
* IRQ handlers, for example, by calling timer_delete_sync().
*
* Note: The irq disabled callback execution is a special case for
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*
* @TIMER_PINNED: A pinned timer will always expire on the CPU on which the
* timer was enqueued. When a particular CPU is required, add_timer_on()
* has to be used. Enqueue via mod_timer() and add_timer() is always done
* on the local CPU.
*/
#define TIMER_CPUMASK 0x0003FFFF
#define TIMER_MIGRATING 0x00040000
#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
#define TIMER_DEFERRABLE 0x00080000
#define TIMER_PINNED 0x00100000
#define TIMER_IRQSAFE 0x00200000
#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
#define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000
#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
#define __TIMER_INITIALIZER(_function, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.flags = (_flags), \
__TIMER_LOCKDEP_MAP_INITIALIZER(FILE_LINE) \
}
#define DEFINE_TIMER(_name, _function) \
struct timer_list _name = \
__TIMER_INITIALIZER(_function, 0)
/*
* LOCKDEP and DEBUG timer interfaces.
*/
void timer_init_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
extern void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags, const char *name,
struct lock_class_key *key);
#else
static inline void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name,
struct lock_class_key *key)
{
timer_init_key(timer, func, flags, name, key);
}
#endif
#ifdef CONFIG_LOCKDEP
#define __timer_init(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
timer_init_key((_timer), (_fn), (_flags), #_timer, &__key);\
} while (0)
#define __timer_init_on_stack(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
timer_init_key_on_stack((_timer), (_fn), (_flags), \
#_timer, &__key); \
} while (0)
#else
#define __timer_init(_timer, _fn, _flags) \
timer_init_key((_timer), (_fn), (_flags), NULL, NULL)
#define __timer_init_on_stack(_timer, _fn, _flags) \
timer_init_key_on_stack((_timer), (_fn), (_flags), NULL, NULL)
#endif
/**
* timer_setup - prepare a timer for first use
* @timer: the timer in question
* @callback: the function to call when timer expires
* @flags: any TIMER_* flags
*
* Regular timer initialization should use either DEFINE_TIMER() above,
* or timer_setup(). For timers on the stack, timer_setup_on_stack() must
* be used and must be balanced with a call to timer_destroy_on_stack().
*/
#define timer_setup(timer, callback, flags) \
__timer_init((timer), (callback), (flags))
#define timer_setup_on_stack(timer, callback, flags) \
__timer_init_on_stack((timer), (callback), (flags))
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
extern void timer_destroy_on_stack(struct timer_list *timer);
#else
static inline void timer_destroy_on_stack(struct timer_list *timer) { }
#endif
#define timer_container_of(var, callback_timer, timer_fieldname) \
container_of(callback_timer, typeof(*var), timer_fieldname)
/**
* timer_pending - is a timer pending?
* @timer: the timer in question
*
* timer_pending will tell whether a given timer is currently pending,
* or not. Callers must ensure serialization wrt. other operations done
* to this timer, eg. interrupt contexts, or other CPUs on SMP.
*
* Returns: 1 if the timer is pending, 0 if not.
*/
static inline int timer_pending(const struct timer_list * timer)
{
return !hlist_unhashed_lockless(&timer->entry);
}
extern void add_timer_on(struct timer_list *timer, int cpu);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
extern int timer_reduce(struct timer_list *timer, unsigned long expires);
/*
* The jiffies value which is added to now, when there is no timer
* in the timer wheel:
*/
#define TIMER_NEXT_MAX_DELTA ((1UL << 30) - 1)
extern void add_timer(struct timer_list *timer);
extern void add_timer_local(struct timer_list *timer);
extern void add_timer_global(struct timer_list *timer);
extern int timer_delete_sync_try(struct timer_list *timer);
extern int timer_delete_sync(struct timer_list *timer);
extern int timer_delete(struct timer_list *timer);
extern int timer_shutdown_sync(struct timer_list *timer);
extern int timer_shutdown(struct timer_list *timer);
extern void timers_init(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);
#ifdef CONFIG_HOTPLUG_CPU
int timers_prepare_cpu(unsigned int cpu);
int timers_dead_cpu(unsigned int cpu);
#else
#define timers_prepare_cpu NULL
#define timers_dead_cpu NULL
#endif
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __CPUSET_INTERNAL_H
#define __CPUSET_INTERNAL_H
#include <linux/cgroup.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/spinlock.h>
#include <linux/union_find.h>
/* See "Frequency meter" comments, below. */
struct fmeter {
int cnt; /* unprocessed events count */
int val; /* most recent output value */
time64_t time; /* clock (secs) when val computed */
spinlock_t lock; /* guards read or write of above */
};
/*
* Invalid partition error code
*/
enum prs_errcode {
PERR_NONE = 0,
PERR_INVCPUS,
PERR_INVPARENT,
PERR_NOTPART,
PERR_NOTEXCL,
PERR_NOCPUS,
PERR_HOTPLUG,
PERR_CPUSEMPTY,
PERR_HKEEPING,
PERR_ACCESS,
PERR_REMOTE,
};
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
CS_MEM_EXCLUSIVE,
CS_MEM_HARDWALL,
CS_MEMORY_MIGRATE,
CS_SCHED_LOAD_BALANCE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
} cpuset_flagbits_t;
/* The various types of files and directories in a cpuset file system */
typedef enum {
FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
FILE_EFFECTIVE_CPULIST,
FILE_EFFECTIVE_MEMLIST,
FILE_SUBPARTS_CPULIST,
FILE_EXCLUSIVE_CPULIST,
FILE_EFFECTIVE_XCPULIST,
FILE_ISOLATED_CPULIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_MEM_HARDWALL,
FILE_SCHED_LOAD_BALANCE,
FILE_PARTITION_ROOT,
FILE_SCHED_RELAX_DOMAIN_LEVEL,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
FILE_SPREAD_SLAB,
} cpuset_filetype_t;
struct cpuset {
struct cgroup_subsys_state css;
unsigned long flags; /* "unsigned long" so bitops work */
/*
* On default hierarchy:
*
* The user-configured masks can only be changed by writing to
* cpuset.cpus and cpuset.mems, and won't be limited by the
* parent masks.
*
* The effective masks is the real masks that apply to the tasks
* in the cpuset. They may be changed if the configured masks are
* changed or hotplug happens.
*
* effective_mask == configured_mask & parent's effective_mask,
* and if it ends up empty, it will inherit the parent's mask.
*
*
* On legacy hierarchy:
*
* The user-configured masks are always the same with effective masks.
*/
/* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed;
nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */
cpumask_var_t effective_cpus;
nodemask_t effective_mems;
/*
* Exclusive CPUs dedicated to current cgroup (default hierarchy only)
*
* The effective_cpus of a valid partition root comes solely from its
* effective_xcpus and some of the effective_xcpus may be distributed
* to sub-partitions below & hence excluded from its effective_cpus.
* For a valid partition root, its effective_cpus have no relationship
* with cpus_allowed unless its exclusive_cpus isn't set.
*
* This value will only be set if either exclusive_cpus is set or
* when this cpuset becomes a local partition root.
*/
cpumask_var_t effective_xcpus;
/*
* Exclusive CPUs as requested by the user (default hierarchy only)
*
* Its value is independent of cpus_allowed and designates the set of
* CPUs that can be granted to the current cpuset or its children when
* it becomes a valid partition root. The effective set of exclusive
* CPUs granted (effective_xcpus) depends on whether those exclusive
* CPUs are passed down by its ancestors and not yet taken up by
* another sibling partition root along the way.
*
* If its value isn't set, it defaults to cpus_allowed.
*/
cpumask_var_t exclusive_cpus;
/*
* This is old Memory Nodes tasks took on.
*
* - top_cpuset.old_mems_allowed is initialized to mems_allowed.
* - A new cpuset's old_mems_allowed is initialized when some
* task is moved into it.
* - old_mems_allowed is used in cpuset_migrate_mm() when we change
* cpuset.mems_allowed and have tasks' nodemask updated, and
* then old_mems_allowed is updated to mems_allowed.
*/
nodemask_t old_mems_allowed;
struct fmeter fmeter; /* memory_pressure filter */
/*
* Tasks are being attached to this cpuset. Used to prevent
* zeroing cpus/mems_allowed between ->can_attach() and ->attach().
*/
int attach_in_progress;
/* for custom sched domain */
int relax_domain_level;
/* number of valid local child partitions */
int nr_subparts;
/* partition root state */
int partition_root_state;
/*
* number of SCHED_DEADLINE tasks attached to this cpuset, so that we
* know when to rebuild associated root domain bandwidth information.
*/
int nr_deadline_tasks;
int nr_migrate_dl_tasks;
u64 sum_migrate_dl_bw;
/* Invalid partition error code, not lock protected */
enum prs_errcode prs_err;
/* Handle for cpuset.cpus.partition */
struct cgroup_file partition_file;
/* Remote partition silbling list anchored at remote_children */
struct list_head remote_sibling;
/* Used to merge intersecting subsets for generate_sched_domains */
struct uf_node node;
};
static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct cpuset, css) : NULL;
}
/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
return css_cs(task_css(task, cpuset_cgrp_id));
}
static inline struct cpuset *parent_cs(struct cpuset *cs)
{
return css_cs(cs->css.parent);
}
/* convenient tests for these bits */
static inline bool is_cpuset_online(struct cpuset *cs)
{
return css_is_online(&cs->css) && !css_is_dying(&cs->css);
}
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
}
static inline int is_mem_exclusive(const struct cpuset *cs)
{
return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
}
static inline int is_mem_hardwall(const struct cpuset *cs)
{
return test_bit(CS_MEM_HARDWALL, &cs->flags);
}
static inline int is_sched_load_balance(const struct cpuset *cs)
{
return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}
static inline int is_memory_migrate(const struct cpuset *cs)
{
return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
}
static inline int is_spread_page(const struct cpuset *cs)
{
return test_bit(CS_SPREAD_PAGE, &cs->flags);
}
static inline int is_spread_slab(const struct cpuset *cs)
{
return test_bit(CS_SPREAD_SLAB, &cs->flags);
}
/**
* cpuset_for_each_child - traverse online children of a cpuset
* @child_cs: loop cursor pointing to the current child
* @pos_css: used for iteration
* @parent_cs: target cpuset to walk children of
*
* Walk @child_cs through the online children of @parent_cs. Must be used
* with RCU read locked.
*/
#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
css_for_each_child((pos_css), &(parent_cs)->css) \
if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
/**
* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
* @des_cs: loop cursor pointing to the current descendant
* @pos_css: used for iteration
* @root_cs: target cpuset to walk ancestor of
*
* Walk @des_cs through the online descendants of @root_cs. Must be used
* with RCU read locked. The caller may modify @pos_css by calling
* css_rightmost_descendant() to skip subtree. @root_cs is included in the
* iteration and the first node to be visited.
*/
#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
void rebuild_sched_domains_locked(void);
void cpuset_callback_lock_irq(void);
void cpuset_callback_unlock_irq(void);
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
void cpuset_update_tasks_nodemask(struct cpuset *cs);
int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int cpuset_common_seq_show(struct seq_file *sf, void *v);
void cpuset_full_lock(void);
void cpuset_full_unlock(void);
/*
* cpuset-v1.c
*/
#ifdef CONFIG_CPUSETS_V1
extern struct cftype cpuset1_files[];
void fmeter_init(struct fmeter *fmp);
void cpuset1_update_task_spread_flags(struct cpuset *cs,
struct task_struct *tsk);
void cpuset1_update_tasks_flags(struct cpuset *cs);
void cpuset1_hotplug_update_tasks(struct cpuset *cs,
struct cpumask *new_cpus, nodemask_t *new_mems,
bool cpus_updated, bool mems_updated);
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
#else
static inline void fmeter_init(struct fmeter *fmp) {}
static inline void cpuset1_update_task_spread_flags(struct cpuset *cs,
struct task_struct *tsk) {}
static inline void cpuset1_update_tasks_flags(struct cpuset *cs) {}
static inline void cpuset1_hotplug_update_tasks(struct cpuset *cs,
struct cpumask *new_cpus, nodemask_t *new_mems,
bool cpus_updated, bool mems_updated) {}
static inline int cpuset1_validate_change(struct cpuset *cur,
struct cpuset *trial) { return 0; }
#endif /* CONFIG_CPUSETS_V1 */
#endif /* __CPUSET_INTERNAL_H */
/*
* kmod - the kernel module loader
*
* Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/binfmts.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/cred.h>
#include <linux/file.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/rwsem.h>
#include <linux/ptrace.h>
#include <linux/async.h>
#include <linux/uaccess.h>
#include <trace/events/module.h>
#include "internal.h"
/*
* Assuming:
*
* threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
* (u64) THREAD_SIZE * 8UL);
*
* If you need less than 50 threads would mean we're dealing with systems
* smaller than 3200 pages. This assumes you are capable of having ~13M memory,
* and this would only be an upper limit, after which the OOM killer would take
* effect. Systems like these are very unlikely if modules are enabled.
*/
#define MAX_KMOD_CONCURRENT 50
static DEFINE_SEMAPHORE(kmod_concurrent_max, MAX_KMOD_CONCURRENT);
/*
* This is a restriction on having *all* MAX_KMOD_CONCURRENT threads
* running at the same time without returning. When this happens we
* believe you've somehow ended up with a recursive module dependency
* creating a loop.
*
* We have no option but to fail.
*
* Userspace should proactively try to detect and prevent these.
*/
#define MAX_KMOD_ALL_BUSY_TIMEOUT 5
/*
modprobe_path is set via /proc/sys.
*/
char modprobe_path[KMOD_PATH_LEN] = CONFIG_MODPROBE_PATH;
static void free_modprobe_argv(struct subprocess_info *info)
{
kfree(info->argv[3]); /* check call_modprobe() */
kfree(info->argv);
}
static int call_modprobe(char *orig_module_name, int wait)
{
struct subprocess_info *info;
static char *envp[] = {
"HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL
};
char *module_name;
int ret;
char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
if (!argv) goto out;
module_name = kstrdup(orig_module_name, GFP_KERNEL);
if (!module_name) goto free_argv;
argv[0] = modprobe_path;
argv[1] = "-q";
argv[2] = "--";
argv[3] = module_name; /* check free_modprobe_argv() */
argv[4] = NULL;
info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
NULL, free_modprobe_argv, NULL);
if (!info)
goto free_module_name;
ret = call_usermodehelper_exec(info, wait | UMH_KILLABLE);
kmod_dup_request_announce(orig_module_name, ret);
return ret;
free_module_name:
kfree(module_name);
free_argv:
kfree(argv);
out:
kmod_dup_request_announce(orig_module_name, -ENOMEM);
return -ENOMEM;
}
/**
* __request_module - try to load a kernel module
* @wait: wait (or not) for the operation to complete
* @fmt: printf style format string for the name of the module
* @...: arguments as specified in the format string
*
* Load a module using the user mode module loader. The function returns
* zero on success or a negative errno code or positive exit code from
* "modprobe" on failure. Note that a successful module load does not mean
* the module did not then unload and exit on an error of its own. Callers
* must check that the service they requested is now available not blindly
* invoke it.
*
* If module auto-loading support is disabled then this function
* simply returns -ENOENT.
*/
int __request_module(bool wait, const char *fmt, ...)
{
va_list args;
char module_name[MODULE_NAME_LEN];
int ret, dup_ret;
/*
* We don't allow synchronous module loading from async. Module
* init may invoke async_synchronize_full() which will end up
* waiting for this task which already is waiting for the module
* loading to complete, leading to a deadlock.
*/
WARN_ON_ONCE(wait && current_is_async()); if (!modprobe_path[0]) return -ENOENT; va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
if (ret >= MODULE_NAME_LEN)
return -ENAMETOOLONG; ret = security_kernel_module_request(module_name); if (ret)
return ret;
ret = down_timeout(&kmod_concurrent_max, MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
if (ret) { pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
return ret;
}
trace_module_request(module_name, wait, _RET_IP_);
if (kmod_dup_request_exists_wait(module_name, wait, &dup_ret)) {
ret = dup_ret;
goto out;
}
ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
out:
up(&kmod_concurrent_max);
return ret;}
EXPORT_SYMBOL(__request_module);
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __NET_CFG80211_H
#define __NET_CFG80211_H
/*
* 802.11 device and configuration interface
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/ethtool.h>
#include <uapi/linux/rfkill.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/netlink.h>
#include <linux/skbuff.h>
#include <linux/nl80211.h>
#include <linux/if_ether.h>
#include <linux/ieee80211.h>
#include <linux/net.h>
#include <linux/rfkill.h>
#include <net/regulatory.h>
/**
* DOC: Introduction
*
* cfg80211 is the configuration API for 802.11 devices in Linux. It bridges
* userspace and drivers, and offers some utility functionality associated
* with 802.11. cfg80211 must, directly or indirectly via mac80211, be used
* by all modern wireless drivers in Linux, so that they offer a consistent
* API through nl80211. For backward compatibility, cfg80211 also offers
* wireless extensions to userspace, but hides them from drivers completely.
*
* Additionally, cfg80211 contains code to help enforce regulatory spectrum
* use restrictions.
*/
/**
* DOC: Device registration
*
* In order for a driver to use cfg80211, it must register the hardware device
* with cfg80211. This happens through a number of hardware capability structs
* described below.
*
* The fundamental structure for each device is the 'wiphy', of which each
* instance describes a physical wireless device connected to the system. Each
* such wiphy can have zero, one, or many virtual interfaces associated with
* it, which need to be identified as such by pointing the network interface's
* @ieee80211_ptr pointer to a &struct wireless_dev which further describes
* the wireless part of the interface. Normally this struct is embedded in the
* network interface's private data area. Drivers can optionally allow creating
* or destroying virtual interfaces on the fly, but without at least one or the
* ability to create some the wireless device isn't useful.
*
* Each wiphy structure contains device capability information, and also has
* a pointer to the various operations the driver offers. The definitions and
* structures here describe these capabilities in detail.
*/
struct wiphy;
/*
* wireless hardware capability structures
*/
/**
* enum ieee80211_channel_flags - channel flags
*
* Channel flags set by the regulatory control code.
*
* @IEEE80211_CHAN_DISABLED: This channel is disabled.
* @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
* sending probe requests or beaconing.
* @IEEE80211_CHAN_PSD: Power spectral density (in dBm) is set for this
* channel.
* @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
* @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
* is not permitted.
* @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
* is not permitted.
* @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
* @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band,
* this flag indicates that an 80 MHz channel cannot use this
* channel as the control or any of the secondary channels.
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_NO_160MHZ: If the driver supports 160 MHz on the band,
* this flag indicates that an 160 MHz channel cannot use this
* channel as the control or any of the secondary channels.
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
* @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
* @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band,
* this flag indicates that a 320 MHz channel cannot use this
* channel as the control or any of the secondary channels.
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
* @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT
* @IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT: Client connection with VLP AP
* not permitted using this channel
* @IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT: Client connection with AFC AP
* not permitted using this channel
* @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
* mode even in the presence of other (regulatory) restrictions,
* even if it is otherwise disabled.
* @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation
* with very low power (VLP), even if otherwise set to NO_IR.
* @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel,
* even if otherwise set to NO_IR.
* @IEEE80211_CHAN_S1G_NO_PRIMARY: Prevents the channel for use as an S1G
* primary channel. Does not prevent the wider operating channel
* described by the chandef from being used. In order for a 2MHz primary
* to be used, both 1MHz subchannels shall not contain this flag.
* @IEEE80211_CHAN_NO_4MHZ: 4 MHz bandwidth is not permitted on this channel.
* @IEEE80211_CHAN_NO_8MHZ: 8 MHz bandwidth is not permitted on this channel.
* @IEEE80211_CHAN_NO_16MHZ: 16 MHz bandwidth is not permitted on this channel.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = BIT(0),
IEEE80211_CHAN_NO_IR = BIT(1),
IEEE80211_CHAN_PSD = BIT(2),
IEEE80211_CHAN_RADAR = BIT(3),
IEEE80211_CHAN_NO_HT40PLUS = BIT(4),
IEEE80211_CHAN_NO_HT40MINUS = BIT(5),
IEEE80211_CHAN_NO_OFDM = BIT(6),
IEEE80211_CHAN_NO_80MHZ = BIT(7),
IEEE80211_CHAN_NO_160MHZ = BIT(8),
IEEE80211_CHAN_INDOOR_ONLY = BIT(9),
IEEE80211_CHAN_IR_CONCURRENT = BIT(10),
IEEE80211_CHAN_NO_20MHZ = BIT(11),
IEEE80211_CHAN_NO_10MHZ = BIT(12),
IEEE80211_CHAN_NO_HE = BIT(13),
/* can use free bits here */
IEEE80211_CHAN_NO_320MHZ = BIT(19),
IEEE80211_CHAN_NO_EHT = BIT(20),
IEEE80211_CHAN_DFS_CONCURRENT = BIT(21),
IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = BIT(22),
IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23),
IEEE80211_CHAN_CAN_MONITOR = BIT(24),
IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25),
IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26),
IEEE80211_CHAN_S1G_NO_PRIMARY = BIT(27),
IEEE80211_CHAN_NO_4MHZ = BIT(28),
IEEE80211_CHAN_NO_8MHZ = BIT(29),
IEEE80211_CHAN_NO_16MHZ = BIT(30),
};
#define IEEE80211_CHAN_NO_HT40 \
(IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
#define IEEE80211_DFS_MIN_CAC_TIME_MS 60000
#define IEEE80211_DFS_MIN_NOP_TIME_MS (30 * 60 * 1000)
/**
* struct ieee80211_channel - channel definition
*
* This structure describes a single channel for use
* with cfg80211.
*
* @center_freq: center frequency in MHz
* @freq_offset: offset from @center_freq, in KHz
* @hw_value: hardware-specific value for the channel
* @flags: channel flags from &enum ieee80211_channel_flags.
* @orig_flags: channel flags at registration time, used by regulatory
* code to support devices with additional restrictions
* @band: band this channel belongs to.
* @max_antenna_gain: maximum antenna gain in dBi
* @max_power: maximum transmission power (in dBm)
* @max_reg_power: maximum regulatory transmission power (in dBm)
* @beacon_found: helper to regulatory code to indicate when a beacon
* has been found on this channel. Use regulatory_hint_found_beacon()
* to enable this, this is useful only on 5 GHz band.
* @orig_mag: internal use
* @orig_mpwr: internal use
* @dfs_state: current state of this channel. Only relevant if radar is required
* on this channel.
* @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
* @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
* @psd: power spectral density (in dBm)
*/
struct ieee80211_channel {
enum nl80211_band band;
u32 center_freq;
u16 freq_offset;
u16 hw_value;
u32 flags;
int max_antenna_gain;
int max_power;
int max_reg_power;
bool beacon_found;
u32 orig_flags;
int orig_mag, orig_mpwr;
enum nl80211_dfs_state dfs_state;
unsigned long dfs_state_entered;
unsigned int dfs_cac_ms;
s8 psd;
};
/**
* enum ieee80211_rate_flags - rate flags
*
* Hardware/specification flags for rates. These are structured
* in a way that allows using the same bitrate structure for
* different bands/PHY modes.
*
* @IEEE80211_RATE_SHORT_PREAMBLE: Hardware can send with short
* preamble on this bitrate; only relevant in 2.4GHz band and
* with CCK rates.
* @IEEE80211_RATE_MANDATORY_A: This bitrate is a mandatory rate
* when used with 802.11a (on the 5 GHz band); filled by the
* core code when registering the wiphy.
* @IEEE80211_RATE_MANDATORY_B: This bitrate is a mandatory rate
* when used with 802.11b (on the 2.4 GHz band); filled by the
* core code when registering the wiphy.
* @IEEE80211_RATE_MANDATORY_G: This bitrate is a mandatory rate
* when used with 802.11g (on the 2.4 GHz band); filled by the
* core code when registering the wiphy.
* @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode.
* @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode
* @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
*/
enum ieee80211_rate_flags {
IEEE80211_RATE_SHORT_PREAMBLE = BIT(0),
IEEE80211_RATE_MANDATORY_A = BIT(1),
IEEE80211_RATE_MANDATORY_B = BIT(2),
IEEE80211_RATE_MANDATORY_G = BIT(3),
IEEE80211_RATE_ERP_G = BIT(4),
IEEE80211_RATE_SUPPORTS_5MHZ = BIT(5),
IEEE80211_RATE_SUPPORTS_10MHZ = BIT(6),
};
/**
* enum ieee80211_bss_type - BSS type filter
*
* @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS
* @IEEE80211_BSS_TYPE_PBSS: Personal BSS
* @IEEE80211_BSS_TYPE_IBSS: Independent BSS
* @IEEE80211_BSS_TYPE_MBSS: Mesh BSS
* @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type
*/
enum ieee80211_bss_type {
IEEE80211_BSS_TYPE_ESS,
IEEE80211_BSS_TYPE_PBSS,
IEEE80211_BSS_TYPE_IBSS,
IEEE80211_BSS_TYPE_MBSS,
IEEE80211_BSS_TYPE_ANY
};
/**
* enum ieee80211_privacy - BSS privacy filter
*
* @IEEE80211_PRIVACY_ON: privacy bit set
* @IEEE80211_PRIVACY_OFF: privacy bit clear
* @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting
*/
enum ieee80211_privacy {
IEEE80211_PRIVACY_ON,
IEEE80211_PRIVACY_OFF,
IEEE80211_PRIVACY_ANY
};
#define IEEE80211_PRIVACY(x) \
((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF)
/**
* struct ieee80211_rate - bitrate definition
*
* This structure describes a bitrate that an 802.11 PHY can
* operate with. The two values @hw_value and @hw_value_short
* are only for driver use when pointers to this structure are
* passed around.
*
* @flags: rate-specific flags from &enum ieee80211_rate_flags
* @bitrate: bitrate in units of 100 Kbps
* @hw_value: driver/hardware value for this rate
* @hw_value_short: driver/hardware value for this rate when
* short preamble is used
*/
struct ieee80211_rate {
u32 flags;
u16 bitrate;
u16 hw_value, hw_value_short;
};
/**
* struct ieee80211_he_obss_pd - AP settings for spatial reuse
*
* @enable: is the feature enabled.
* @sr_ctrl: The SR Control field of SRP element.
* @non_srg_max_offset: non-SRG maximum tx power offset
* @min_offset: minimal tx power offset an associated station shall use
* @max_offset: maximum tx power offset an associated station shall use
* @bss_color_bitmap: bitmap that indicates the BSS color values used by
* members of the SRG
* @partial_bssid_bitmap: bitmap that indicates the partial BSSID values
* used by members of the SRG
*/
struct ieee80211_he_obss_pd {
bool enable;
u8 sr_ctrl;
u8 non_srg_max_offset;
u8 min_offset;
u8 max_offset;
u8 bss_color_bitmap[8];
u8 partial_bssid_bitmap[8];
};
/**
* struct cfg80211_he_bss_color - AP settings for BSS coloring
*
* @color: the current color.
* @enabled: HE BSS color is used
* @partial: define the AID equation.
*/
struct cfg80211_he_bss_color {
u8 color;
bool enabled;
bool partial;
};
/**
* struct ieee80211_sta_ht_cap - STA's HT capabilities
*
* This structure describes most essential parameters needed
* to describe 802.11n HT capabilities for an STA.
*
* @ht_supported: is HT supported by the STA
* @cap: HT capabilities map as described in 802.11n spec
* @ampdu_factor: Maximum A-MPDU length factor
* @ampdu_density: Minimum A-MPDU spacing
* @mcs: Supported MCS rates
*/
struct ieee80211_sta_ht_cap {
u16 cap; /* use IEEE80211_HT_CAP_ */
bool ht_supported;
u8 ampdu_factor;
u8 ampdu_density;
struct ieee80211_mcs_info mcs;
};
/**
* struct ieee80211_sta_vht_cap - STA's VHT capabilities
*
* This structure describes most essential parameters needed
* to describe 802.11ac VHT capabilities for an STA.
*
* @vht_supported: is VHT supported by the STA
* @cap: VHT capabilities map as described in 802.11ac spec
* @vht_mcs: Supported VHT MCS rates
*/
struct ieee80211_sta_vht_cap {
bool vht_supported;
u32 cap; /* use IEEE80211_VHT_CAP_ */
struct ieee80211_vht_mcs_info vht_mcs;
};
#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
/**
* struct ieee80211_sta_he_cap - STA's HE capabilities
*
* This structure describes most essential parameters needed
* to describe 802.11ax HE capabilities for a STA.
*
* @has_he: true iff HE data is valid.
* @he_cap_elem: Fixed portion of the HE capabilities element.
* @he_mcs_nss_supp: The supported NSS/MCS combinations.
* @ppe_thres: Holds the PPE Thresholds data.
*/
struct ieee80211_sta_he_cap {
bool has_he;
struct ieee80211_he_cap_elem he_cap_elem;
struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
};
/**
* struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS
*
* See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS
* and NSS Set field"
*
* @only_20mhz: MCS/NSS support for 20 MHz-only STA.
* @bw: MCS/NSS support for 80, 160 and 320 MHz
* @bw._80: MCS/NSS support for BW <= 80 MHz
* @bw._160: MCS/NSS support for BW = 160 MHz
* @bw._320: MCS/NSS support for BW = 320 MHz
*/
struct ieee80211_eht_mcs_nss_supp {
union {
struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz;
struct {
struct ieee80211_eht_mcs_nss_supp_bw _80;
struct ieee80211_eht_mcs_nss_supp_bw _160;
struct ieee80211_eht_mcs_nss_supp_bw _320;
} __packed bw;
} __packed;
} __packed;
#define IEEE80211_EHT_PPE_THRES_MAX_LEN 32
/**
* struct ieee80211_sta_eht_cap - STA's EHT capabilities
*
* This structure describes most essential parameters needed
* to describe 802.11be EHT capabilities for a STA.
*
* @has_eht: true iff EHT data is valid.
* @eht_cap_elem: Fixed portion of the eht capabilities element.
* @eht_mcs_nss_supp: The supported NSS/MCS combinations.
* @eht_ppe_thres: Holds the PPE Thresholds data.
*/
struct ieee80211_sta_eht_cap {
bool has_eht;
struct ieee80211_eht_cap_elem_fixed eht_cap_elem;
struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp;
u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN];
};
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/*
* This is used to mark the sband->iftype_data pointer which is supposed
* to be an array with special access semantics (per iftype), but a lot
* of code got it wrong in the past, so with this marking sparse will be
* noisy when the pointer is used directly.
*/
# define __iftd __attribute__((noderef, address_space(__iftype_data)))
#else
# define __iftd
#endif /* __CHECKER__ */
/**
* struct ieee80211_sband_iftype_data - sband data per interface type
*
* This structure encapsulates sband data that is relevant for the
* interface types defined in @types_mask. Each type in the
* @types_mask must be unique across all instances of iftype_data.
*
* @types_mask: interface types mask
* @he_cap: holds the HE capabilities
* @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a
* 6 GHz band channel (and 0 may be valid value).
* @eht_cap: STA's EHT capabilities
* @vendor_elems: vendor element(s) to advertise
* @vendor_elems.data: vendor element(s) data
* @vendor_elems.len: vendor element(s) length
*/
struct ieee80211_sband_iftype_data {
u16 types_mask;
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
struct {
const u8 *data;
unsigned int len;
} vendor_elems;
};
/**
* enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations
*
* @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz
* @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz
* @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz
* @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz
* @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz
* @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz
* @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz
* @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and
* 2.16GHz+2.16GHz
* @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and
* 4.32GHz + 4.32GHz
* @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and
* 4.32GHz + 4.32GHz
* @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz
* and 4.32GHz + 4.32GHz
* @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz,
* 2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz
*/
enum ieee80211_edmg_bw_config {
IEEE80211_EDMG_BW_CONFIG_4 = 4,
IEEE80211_EDMG_BW_CONFIG_5 = 5,
IEEE80211_EDMG_BW_CONFIG_6 = 6,
IEEE80211_EDMG_BW_CONFIG_7 = 7,
IEEE80211_EDMG_BW_CONFIG_8 = 8,
IEEE80211_EDMG_BW_CONFIG_9 = 9,
IEEE80211_EDMG_BW_CONFIG_10 = 10,
IEEE80211_EDMG_BW_CONFIG_11 = 11,
IEEE80211_EDMG_BW_CONFIG_12 = 12,
IEEE80211_EDMG_BW_CONFIG_13 = 13,
IEEE80211_EDMG_BW_CONFIG_14 = 14,
IEEE80211_EDMG_BW_CONFIG_15 = 15,
};
/**
* struct ieee80211_edmg - EDMG configuration
*
* This structure describes most essential parameters needed
* to describe 802.11ay EDMG configuration
*
* @channels: bitmap that indicates the 2.16 GHz channel(s)
* that are allowed to be used for transmissions.
* Bit 0 indicates channel 1, bit 1 indicates channel 2, etc.
* Set to 0 indicate EDMG not supported.
* @bw_config: Channel BW Configuration subfield encodes
* the allowed channel bandwidth configurations
*/
struct ieee80211_edmg {
u8 channels;
enum ieee80211_edmg_bw_config bw_config;
};
/**
* struct ieee80211_sta_s1g_cap - STA's S1G capabilities
*
* This structure describes most essential parameters needed
* to describe 802.11ah S1G capabilities for a STA.
*
* @s1g: is STA an S1G STA
* @cap: S1G capabilities information
* @nss_mcs: Supported NSS MCS set
*/
struct ieee80211_sta_s1g_cap {
bool s1g;
u8 cap[10]; /* use S1G_CAPAB_ */
u8 nss_mcs[5];
};
/**
* struct ieee80211_supported_band - frequency band definition
*
* This structure describes a frequency band a wiphy
* is able to operate in.
*
* @channels: Array of channels the hardware can operate with
* in this band.
* @band: the band this structure represents
* @n_channels: Number of channels in @channels
* @bitrates: Array of bitrates the hardware can operate with
* in this band. Must be sorted to give a valid "supported
* rates" IE, i.e. CCK rates first, then OFDM.
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
* @s1g_cap: S1G capabilities in this band
* @edmg_cap: EDMG capabilities in this band
* @s1g_cap: S1G capabilities in this band (S1G band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
* one occurrence of each type is allowed across all instances of
* iftype_data).
*/
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
struct ieee80211_rate *bitrates;
enum nl80211_band band;
int n_channels;
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_edmg edmg_cap;
u16 n_iftype_data;
const struct ieee80211_sband_iftype_data __iftd *iftype_data;
};
/**
* _ieee80211_set_sband_iftype_data - set sband iftype data array
* @sband: the sband to initialize
* @iftd: the iftype data array pointer
* @n_iftd: the length of the iftype data array
*
* Set the sband iftype data array; use this where the length cannot
* be derived from the ARRAY_SIZE() of the argument, but prefer
* ieee80211_set_sband_iftype_data() where it can be used.
*/
static inline void
_ieee80211_set_sband_iftype_data(struct ieee80211_supported_band *sband,
const struct ieee80211_sband_iftype_data *iftd,
u16 n_iftd)
{
sband->iftype_data = (const void __iftd __force *)iftd;
sband->n_iftype_data = n_iftd;
}
/**
* ieee80211_set_sband_iftype_data - set sband iftype data array
* @sband: the sband to initialize
* @iftd: the iftype data array
*/
#define ieee80211_set_sband_iftype_data(sband, iftd) \
_ieee80211_set_sband_iftype_data(sband, iftd, ARRAY_SIZE(iftd))
/**
* for_each_sband_iftype_data - iterate sband iftype data entries
* @sband: the sband whose iftype_data array to iterate
* @i: iterator counter
* @iftd: iftype data pointer to set
*/
#define for_each_sband_iftype_data(sband, i, iftd) \
for (i = 0, iftd = (const void __force *)&(sband)->iftype_data[i]; \
i < (sband)->n_iftype_data; \
i++, iftd = (const void __force *)&(sband)->iftype_data[i])
/**
* ieee80211_get_sband_iftype_data - return sband data for a given iftype
* @sband: the sband to search for the STA on
* @iftype: enum nl80211_iftype
*
* Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
*/
static inline const struct ieee80211_sband_iftype_data *
ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
u8 iftype)
{
const struct ieee80211_sband_iftype_data *data;
int i;
if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
return NULL;
if (iftype == NL80211_IFTYPE_AP_VLAN)
iftype = NL80211_IFTYPE_AP;
for_each_sband_iftype_data(sband, i, data) {
if (data->types_mask & BIT(iftype))
return data;
}
return NULL;
}
/**
* ieee80211_get_he_iftype_cap - return HE capabilities for an sband's iftype
* @sband: the sband to search for the iftype on
* @iftype: enum nl80211_iftype
*
* Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
*/
static inline const struct ieee80211_sta_he_cap *
ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband,
u8 iftype)
{
const struct ieee80211_sband_iftype_data *data =
ieee80211_get_sband_iftype_data(sband, iftype);
if (data && data->he_cap.has_he)
return &data->he_cap;
return NULL;
}
/**
* ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities
* @sband: the sband to search for the STA on
* @iftype: the iftype to search for
*
* Return: the 6GHz capabilities
*/
static inline __le16
ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband,
enum nl80211_iftype iftype)
{
const struct ieee80211_sband_iftype_data *data =
ieee80211_get_sband_iftype_data(sband, iftype);
if (WARN_ON(!data || !data->he_cap.has_he))
return 0;
return data->he_6ghz_capa.capa;
}
/**
* ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype
* @sband: the sband to search for the iftype on
* @iftype: enum nl80211_iftype
*
* Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found
*/
static inline const struct ieee80211_sta_eht_cap *
ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband,
enum nl80211_iftype iftype)
{
const struct ieee80211_sband_iftype_data *data =
ieee80211_get_sband_iftype_data(sband, iftype);
if (data && data->eht_cap.has_eht)
return &data->eht_cap;
return NULL;
}
/**
* wiphy_read_of_freq_limits - read frequency limits from device tree
*
* @wiphy: the wireless device to get extra limits for
*
* Some devices may have extra limitations specified in DT. This may be useful
* for chipsets that normally support more bands but are limited due to board
* design (e.g. by antennas or external power amplifier).
*
* This function reads info from DT and uses it to *modify* channels (disable
* unavailable ones). It's usually a *bad* idea to use it in drivers with
* shared channel data as DT limitations are device specific. You should make
* sure to call it only if channels in wiphy are copied and can be modified
* without affecting other devices.
*
* As this function access device node it has to be called after set_wiphy_dev.
* It also modifies channels so they have to be set first.
* If using this helper, call it before wiphy_register().
*/
#ifdef CONFIG_OF
void wiphy_read_of_freq_limits(struct wiphy *wiphy);
#else /* CONFIG_OF */
static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy)
{
}
#endif /* !CONFIG_OF */
/*
* Wireless hardware/device configuration structures and methods
*/
/**
* DOC: Actions and configuration
*
* Each wireless device and each virtual interface offer a set of configuration
* operations and other actions that are invoked by userspace. Each of these
* actions is described in the operations structure, and the parameters these
* operations use are described separately.
*
* Additionally, some operations are asynchronous and expect to get status
* information via some functions that drivers need to call.
*
* Scanning and BSS list handling with its associated functionality is described
* in a separate chapter.
*/
#define VHT_MUMIMO_GROUPS_DATA_LEN (WLAN_MEMBERSHIP_LEN +\
WLAN_USER_POSITION_LEN)
/**
* struct vif_params - describes virtual interface parameters
* @flags: monitor interface flags, unchanged if 0, otherwise
* %MONITOR_FLAG_CHANGED will be set
* @use_4addr: use 4-address frames
* @macaddr: address to use for this virtual interface.
* If this parameter is set to zero address the driver may
* determine the address as needed.
* This feature is only fully supported by drivers that enable the
* %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating
** only p2p devices with specified MAC.
* @vht_mumimo_groups: MU-MIMO groupID, used for monitoring MU-MIMO packets
* belonging to that MU-MIMO groupID; %NULL if not changed
* @vht_mumimo_follow_addr: MU-MIMO follow address, used for monitoring
* MU-MIMO packets going to the specified station; %NULL if not changed
*/
struct vif_params {
u32 flags;
int use_4addr;
u8 macaddr[ETH_ALEN];
const u8 *vht_mumimo_groups;
const u8 *vht_mumimo_follow_addr;
};
/**
* struct key_params - key information
*
* Information about a key
*
* @key: key material
* @key_len: length of key material
* @cipher: cipher suite selector
* @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used
* with the get_key() callback, must be in little endian,
* length given by @seq_len.
* @seq_len: length of @seq.
* @vlan_id: vlan_id for VLAN group key (if nonzero)
* @mode: key install mode (RX_TX, NO_TX or SET_TX)
*/
struct key_params {
const u8 *key;
const u8 *seq;
int key_len;
int seq_len;
u16 vlan_id;
u32 cipher;
enum nl80211_key_mode mode;
};
/**
* struct cfg80211_chan_def - channel definition
* @chan: the (control) channel
* @width: channel width
* @center_freq1: center frequency of first segment
* @center_freq2: center frequency of second segment
* (only with 80+80 MHz)
* @edmg: define the EDMG channels configuration.
* If edmg is requested (i.e. the .channels member is non-zero),
* chan will define the primary channel and all other
* parameters are ignored.
* @freq1_offset: offset from @center_freq1, in KHz
* @punctured: mask of the punctured 20 MHz subchannels, with
* bits turned on being disabled (punctured); numbered
* from lower to higher frequency (like in the spec)
* @s1g_primary_2mhz: Indicates if the control channel pointed to
* by 'chan' exists as a 1MHz primary subchannel within an
* S1G 2MHz primary channel.
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
enum nl80211_chan_width width;
u32 center_freq1;
u32 center_freq2;
struct ieee80211_edmg edmg;
u16 freq1_offset;
u16 punctured;
bool s1g_primary_2mhz;
};
/*
* cfg80211_bitrate_mask - masks for bitrate control
*/
struct cfg80211_bitrate_mask {
struct {
u32 legacy;
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
u16 he_mcs[NL80211_HE_NSS_MAX];
u16 eht_mcs[NL80211_EHT_NSS_MAX];
enum nl80211_txrate_gi gi;
enum nl80211_he_gi he_gi;
enum nl80211_eht_gi eht_gi;
enum nl80211_he_ltf he_ltf;
enum nl80211_eht_ltf eht_ltf;
} control[NUM_NL80211_BANDS];
};
/**
* struct cfg80211_tid_cfg - TID specific configuration
* @config_override: Flag to notify driver to reset TID configuration
* of the peer.
* @tids: bitmap of TIDs to modify
* @mask: bitmap of attributes indicating which parameter changed,
* similar to &nl80211_tid_config_supp.
* @noack: noack configuration value for the TID
* @retry_long: retry count value
* @retry_short: retry count value
* @ampdu: Enable/Disable MPDU aggregation
* @rtscts: Enable/Disable RTS/CTS
* @amsdu: Enable/Disable MSDU aggregation
* @txrate_type: Tx bitrate mask type
* @txrate_mask: Tx bitrate to be applied for the TID
*/
struct cfg80211_tid_cfg {
bool config_override;
u8 tids;
u64 mask;
enum nl80211_tid_config noack;
u8 retry_long, retry_short;
enum nl80211_tid_config ampdu;
enum nl80211_tid_config rtscts;
enum nl80211_tid_config amsdu;
enum nl80211_tx_rate_setting txrate_type;
struct cfg80211_bitrate_mask txrate_mask;
};
/**
* struct cfg80211_tid_config - TID configuration
* @peer: Station's MAC address
* @n_tid_conf: Number of TID specific configurations to be applied
* @tid_conf: Configuration change info
*/
struct cfg80211_tid_config {
const u8 *peer;
u32 n_tid_conf;
struct cfg80211_tid_cfg tid_conf[] __counted_by(n_tid_conf);
};
/**
* struct cfg80211_fils_aad - FILS AAD data
* @macaddr: STA MAC address
* @kek: FILS KEK
* @kek_len: FILS KEK length
* @snonce: STA Nonce
* @anonce: AP Nonce
*/
struct cfg80211_fils_aad {
const u8 *macaddr;
const u8 *kek;
u8 kek_len;
const u8 *snonce;
const u8 *anonce;
};
/**
* struct cfg80211_set_hw_timestamp - enable/disable HW timestamping
* @macaddr: peer MAC address. NULL to enable/disable HW timestamping for all
* addresses.
* @enable: if set, enable HW timestamping for the specified MAC address.
* Otherwise disable HW timestamping for the specified MAC address.
*/
struct cfg80211_set_hw_timestamp {
const u8 *macaddr;
bool enable;
};
/**
* cfg80211_get_chandef_type - return old channel type from chandef
* @chandef: the channel definition
*
* Return: The old channel type (NOHT, HT20, HT40+/-) from a given
* chandef, which must have a bandwidth allowing this conversion.
*/
static inline enum nl80211_channel_type
cfg80211_get_chandef_type(const struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
return NL80211_CHAN_NO_HT;
case NL80211_CHAN_WIDTH_20:
return NL80211_CHAN_HT20;
case NL80211_CHAN_WIDTH_40:
if (chandef->center_freq1 > chandef->chan->center_freq)
return NL80211_CHAN_HT40PLUS;
return NL80211_CHAN_HT40MINUS;
default:
WARN_ON(1);
return NL80211_CHAN_NO_HT;
}
}
/**
* cfg80211_chandef_create - create channel definition using channel type
* @chandef: the channel definition struct to fill
* @channel: the control channel
* @chantype: the channel type
*
* Given a channel type, create a channel definition.
*/
void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
struct ieee80211_channel *channel,
enum nl80211_channel_type chantype);
/**
* cfg80211_chandef_identical - check if two channel definitions are identical
* @chandef1: first channel definition
* @chandef2: second channel definition
*
* Return: %true if the channels defined by the channel definitions are
* identical, %false otherwise.
*/
static inline bool
cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
const struct cfg80211_chan_def *chandef2)
{
return (chandef1->chan == chandef2->chan &&
chandef1->width == chandef2->width &&
chandef1->center_freq1 == chandef2->center_freq1 &&
chandef1->freq1_offset == chandef2->freq1_offset &&
chandef1->center_freq2 == chandef2->center_freq2 &&
chandef1->punctured == chandef2->punctured);
}
/**
* cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel
*
* @chandef: the channel definition
*
* Return: %true if EDMG defined, %false otherwise.
*/
static inline bool
cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef)
{
return chandef->edmg.channels || chandef->edmg.bw_config;
}
/**
* cfg80211_chandef_is_s1g - check if chandef represents an S1G channel
* @chandef: the channel definition
*
* Return: %true if S1G.
*/
static inline bool
cfg80211_chandef_is_s1g(const struct cfg80211_chan_def *chandef)
{
return chandef->chan->band == NL80211_BAND_S1GHZ;
}
/**
* cfg80211_chandef_compatible - check if two channel definitions are compatible
* @chandef1: first channel definition
* @chandef2: second channel definition
*
* Return: %NULL if the given channel definitions are incompatible,
* chandef1 or chandef2 otherwise.
*/
const struct cfg80211_chan_def *
cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
const struct cfg80211_chan_def *chandef2);
/**
* nl80211_chan_width_to_mhz - get the channel width in MHz
* @chan_width: the channel width from &enum nl80211_chan_width
*
* Return: channel width in MHz if the chan_width from &enum nl80211_chan_width
* is valid. -1 otherwise.
*/
int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width);
/**
* cfg80211_chandef_get_width - return chandef width in MHz
* @c: chandef to return bandwidth for
* Return: channel width in MHz for the given chandef; note that it returns
* 80 for 80+80 configurations
*/
static inline int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
{
return nl80211_chan_width_to_mhz(c->width);
}
/**
* cfg80211_chandef_valid - check if a channel definition is valid
* @chandef: the channel definition to check
* Return: %true if the channel definition is valid. %false otherwise.
*/
bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef);
/**
* cfg80211_chandef_usable - check if secondary channels can be used
* @wiphy: the wiphy to validate against
* @chandef: the channel definition to check
* @prohibited_flags: the regulatory channel flags that must not be set
* Return: %true if secondary channels are usable. %false otherwise.
*/
bool cfg80211_chandef_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
u32 prohibited_flags);
/**
* cfg80211_chandef_dfs_required - checks if radar detection is required
* @wiphy: the wiphy to validate against
* @chandef: the channel definition to check
* @iftype: the interface type as specified in &enum nl80211_iftype
* Returns:
* 1 if radar detection is required, 0 if it is not, < 0 on error
*/
int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
enum nl80211_iftype iftype);
/**
* cfg80211_chandef_dfs_usable - checks if chandef is DFS usable and we
* can/need start CAC on such channel
* @wiphy: the wiphy to validate against
* @chandef: the channel definition to check
*
* Return: true if all channels available and at least
* one channel requires CAC (NL80211_DFS_USABLE)
*/
bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef);
/**
* cfg80211_chandef_dfs_cac_time - get the DFS CAC time (in ms) for given
* channel definition
* @wiphy: the wiphy to validate against
* @chandef: the channel definition to check
*
* Returns: DFS CAC time (in ms) which applies for this channel definition
*/
unsigned int
cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef);
/**
* cfg80211_chandef_primary - calculate primary 40/80/160 MHz freq
* @chandef: chandef to calculate for
* @primary_chan_width: primary channel width to calculate center for
* @punctured: punctured sub-channel bitmap, will be recalculated
* according to the new bandwidth, can be %NULL
*
* Returns: the primary 40/80/160 MHz channel center frequency, or -1
* for errors, updating the punctured bitmap
*/
int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef,
enum nl80211_chan_width primary_chan_width,
u16 *punctured);
/**
* nl80211_send_chandef - sends the channel definition.
* @msg: the msg to send channel definition
* @chandef: the channel definition to check
*
* Returns: 0 if sent the channel definition to msg, < 0 on error
**/
int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef);
/**
* ieee80211_chandef_max_power - maximum transmission power for the chandef
*
* In some regulations, the transmit power may depend on the configured channel
* bandwidth which may be defined as dBm/MHz. This function returns the actual
* max_power for non-standard (20 MHz) channels.
*
* @chandef: channel definition for the channel
*
* Returns: maximum allowed transmission power in dBm for the chandef
*/
static inline int
ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_5:
return min(chandef->chan->max_reg_power - 6,
chandef->chan->max_power);
case NL80211_CHAN_WIDTH_10:
return min(chandef->chan->max_reg_power - 3,
chandef->chan->max_power);
default:
break;
}
return chandef->chan->max_power;
}
/**
* cfg80211_any_usable_channels - check for usable channels
* @wiphy: the wiphy to check for
* @band_mask: which bands to check on
* @prohibited_flags: which channels to not consider usable,
* %IEEE80211_CHAN_DISABLED is always taken into account
*
* Return: %true if usable channels found, %false otherwise
*/
bool cfg80211_any_usable_channels(struct wiphy *wiphy,
unsigned long band_mask,
u32 prohibited_flags);
/**
* enum survey_info_flags - survey information flags
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
* @SURVEY_INFO_IN_USE: channel is currently being used
* @SURVEY_INFO_TIME: active time (in ms) was filled in
* @SURVEY_INFO_TIME_BUSY: busy time was filled in
* @SURVEY_INFO_TIME_EXT_BUSY: extension channel busy time was filled in
* @SURVEY_INFO_TIME_RX: receive time was filled in
* @SURVEY_INFO_TIME_TX: transmit time was filled in
* @SURVEY_INFO_TIME_SCAN: scan time was filled in
* @SURVEY_INFO_TIME_BSS_RX: local BSS receive time was filled in
*
* Used by the driver to indicate which info in &struct survey_info
* it has filled in during the get_survey().
*/
enum survey_info_flags {
SURVEY_INFO_NOISE_DBM = BIT(0),
SURVEY_INFO_IN_USE = BIT(1),
SURVEY_INFO_TIME = BIT(2),
SURVEY_INFO_TIME_BUSY = BIT(3),
SURVEY_INFO_TIME_EXT_BUSY = BIT(4),
SURVEY_INFO_TIME_RX = BIT(5),
SURVEY_INFO_TIME_TX = BIT(6),
SURVEY_INFO_TIME_SCAN = BIT(7),
SURVEY_INFO_TIME_BSS_RX = BIT(8),
};
/**
* struct survey_info - channel survey response
*
* @channel: the channel this survey record reports, may be %NULL for a single
* record to report global statistics
* @filled: bitflag of flags from &enum survey_info_flags
* @noise: channel noise in dBm. This and all following fields are
* optional
* @time: amount of time in ms the radio was turn on (on the channel)
* @time_busy: amount of time the primary channel was sensed busy
* @time_ext_busy: amount of time the extension channel was sensed busy
* @time_rx: amount of time the radio spent receiving data
* @time_tx: amount of time the radio spent transmitting data
* @time_scan: amount of time the radio spent for scanning
* @time_bss_rx: amount of time the radio spent receiving data on a local BSS
*
* Used by dump_survey() to report back per-channel survey information.
*
* This structure can later be expanded with things like
* channel duty cycle etc.
*/
struct survey_info {
struct ieee80211_channel *channel;
u64 time;
u64 time_busy;
u64 time_ext_busy;
u64 time_rx;
u64 time_tx;
u64 time_scan;
u64 time_bss_rx;
u32 filled;
s8 noise;
};
#define CFG80211_MAX_NUM_AKM_SUITES 10
/**
* struct cfg80211_crypto_settings - Crypto settings
* @wpa_versions: indicates which, if any, WPA versions are enabled
* (from enum nl80211_wpa_versions)
* @cipher_group: group key cipher suite (or 0 if unset)
* @n_ciphers_pairwise: number of AP supported unicast ciphers
* @ciphers_pairwise: unicast key cipher suites
* @n_akm_suites: number of AKM suites
* @akm_suites: AKM suites
* @control_port: Whether user space controls IEEE 802.1X port, i.e.,
* sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
* required to assume that the port is unauthorized until authorized by
* user space. Otherwise, port is marked authorized by default.
* @control_port_ethertype: the control port protocol that should be
* allowed through even on unauthorized ports
* @control_port_no_encrypt: TRUE to prevent encryption of control port
* protocol frames.
* @control_port_over_nl80211: TRUE if userspace expects to exchange control
* port frames over NL80211 instead of the network interface.
* @control_port_no_preauth: disables pre-auth rx over the nl80211 control
* port for mac80211
* @psk: PSK (for devices supporting 4-way-handshake offload)
* @sae_pwd: password for SAE authentication (for devices supporting SAE
* offload)
* @sae_pwd_len: length of SAE password (for devices supporting SAE offload)
* @sae_pwe: The mechanisms allowed for SAE PWE derivation:
*
* NL80211_SAE_PWE_UNSPECIFIED
* Not-specified, used to indicate userspace did not specify any
* preference. The driver should follow its internal policy in
* such a scenario.
*
* NL80211_SAE_PWE_HUNT_AND_PECK
* Allow hunting-and-pecking loop only
*
* NL80211_SAE_PWE_HASH_TO_ELEMENT
* Allow hash-to-element only
*
* NL80211_SAE_PWE_BOTH
* Allow either hunting-and-pecking loop or hash-to-element
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
u32 cipher_group;
int n_ciphers_pairwise;
u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES];
int n_akm_suites;
u32 akm_suites[CFG80211_MAX_NUM_AKM_SUITES];
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
bool control_port_over_nl80211;
bool control_port_no_preauth;
const u8 *psk;
const u8 *sae_pwd;
u8 sae_pwd_len;
enum nl80211_sae_pwe_mechanism sae_pwe;
};
/**
* struct cfg80211_mbssid_config - AP settings for multi bssid
*
* @tx_wdev: pointer to the transmitted interface in the MBSSID set
* @tx_link_id: link ID of the transmitted profile in an MLD.
* @index: index of this AP in the multi bssid group.
* @ema: set to true if the beacons should be sent out in EMA mode.
*/
struct cfg80211_mbssid_config {
struct wireless_dev *tx_wdev;
u8 tx_link_id;
u8 index;
bool ema;
};
/**
* struct cfg80211_mbssid_elems - Multiple BSSID elements
*
* @cnt: Number of elements in array %elems.
*
* @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
* @elem.data: Data for multiple BSSID elements.
* @elem.len: Length of data.
*/
struct cfg80211_mbssid_elems {
u8 cnt;
struct {
const u8 *data;
size_t len;
} elem[] __counted_by(cnt);
};
/**
* struct cfg80211_rnr_elems - Reduced neighbor report (RNR) elements
*
* @cnt: Number of elements in array %elems.
*
* @elem: Array of RNR element(s) to be added into Beacon frames.
* @elem.data: Data for RNR elements.
* @elem.len: Length of data.
*/
struct cfg80211_rnr_elems {
u8 cnt;
struct {
const u8 *data;
size_t len;
} elem[] __counted_by(cnt);
};
/**
* struct cfg80211_beacon_data - beacon data
* @link_id: the link ID for the AP MLD link sending this beacon
* @head: head portion of beacon (before TIM IE)
* or %NULL if not changed
* @tail: tail portion of beacon (after TIM IE)
* or %NULL if not changed
* @head_len: length of @head
* @tail_len: length of @tail
* @beacon_ies: extra information element(s) to add into Beacon frames or %NULL
* @beacon_ies_len: length of beacon_ies in octets
* @proberesp_ies: extra information element(s) to add into Probe Response
* frames or %NULL
* @proberesp_ies_len: length of proberesp_ies in octets
* @assocresp_ies: extra information element(s) to add into (Re)Association
* Response frames or %NULL
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
* @mbssid_ies: multiple BSSID elements
* @rnr_ies: reduced neighbor report elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
* (measurement type 8)
* @civicloc: Measurement Report element content, starting with Measurement
* Token (measurement type 11)
* @lci_len: LCI data length
* @civicloc_len: Civic location data length
* @he_bss_color: BSS Color settings
* @he_bss_color_valid: indicates whether bss color
* attribute is present in beacon data or not.
*/
struct cfg80211_beacon_data {
unsigned int link_id;
const u8 *head, *tail;
const u8 *beacon_ies;
const u8 *proberesp_ies;
const u8 *assocresp_ies;
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
struct cfg80211_mbssid_elems *mbssid_ies;
struct cfg80211_rnr_elems *rnr_ies;
s8 ftm_responder;
size_t head_len, tail_len;
size_t beacon_ies_len;
size_t proberesp_ies_len;
size_t assocresp_ies_len;
size_t probe_resp_len;
size_t lci_len;
size_t civicloc_len;
struct cfg80211_he_bss_color he_bss_color;
bool he_bss_color_valid;
};
struct mac_address {
u8 addr[ETH_ALEN];
};
/**
* struct cfg80211_acl_data - Access control list data
*
* @acl_policy: ACL policy to be applied on the station's
* entry specified by mac_addr
* @n_acl_entries: Number of MAC address entries passed
* @mac_addrs: List of MAC addresses of stations to be used for ACL
*/
struct cfg80211_acl_data {
enum nl80211_acl_policy acl_policy;
int n_acl_entries;
/* Keep it last */
struct mac_address mac_addrs[] __counted_by(n_acl_entries);
};
/**
* struct cfg80211_fils_discovery - FILS discovery parameters from
* IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
*
* @update: Set to true if the feature configuration should be updated.
* @min_interval: Minimum packet interval in TUs (0 - 10000)
* @max_interval: Maximum packet interval in TUs (0 - 10000)
* @tmpl_len: Template length
* @tmpl: Template data for FILS discovery frame including the action
* frame headers.
*/
struct cfg80211_fils_discovery {
bool update;
u32 min_interval;
u32 max_interval;
size_t tmpl_len;
const u8 *tmpl;
};
/**
* struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
* response parameters in 6GHz.
*
* @update: Set to true if the feature configuration should be updated.
* @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
* in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
* scanning
* @tmpl_len: Template length
* @tmpl: Template data for probe response
*/
struct cfg80211_unsol_bcast_probe_resp {
bool update;
u32 interval;
size_t tmpl_len;
const u8 *tmpl;
};
/**
* struct cfg80211_s1g_short_beacon - S1G short beacon data.
*
* @update: Set to true if the feature configuration should be updated.
* @short_head: Short beacon head.
* @short_tail: Short beacon tail.
* @short_head_len: Short beacon head len.
* @short_tail_len: Short beacon tail len.
*/
struct cfg80211_s1g_short_beacon {
bool update;
const u8 *short_head;
const u8 *short_tail;
size_t short_head_len;
size_t short_tail_len;
};
/**
* struct cfg80211_ap_settings - AP configuration
*
* Used to configure an AP interface.
*
* @chandef: defines the channel to use
* @beacon: beacon data
* @beacon_interval: beacon interval
* @dtim_period: DTIM period
* @ssid: SSID to be used in the BSS (note: may be %NULL if not provided from
* user space)
* @ssid_len: length of @ssid
* @hidden_ssid: whether to hide the SSID in Beacon/Probe Response frames
* @crypto: crypto settings
* @privacy: the BSS uses privacy
* @auth_type: Authentication type (algorithm)
* @inactivity_timeout: time in seconds to determine station's inactivity.
* @p2p_ctwindow: P2P CT Window
* @p2p_opp_ps: P2P opportunistic PS
* @acl: ACL configuration used by the drivers which has support for
* MAC address based access control
* @pbss: If set, start as a PCP instead of AP. Relevant for DMG
* networks.
* @beacon_rate: bitrate to be used for beacons
* @ht_cap: HT capabilities (or %NULL if HT isn't enabled)
* @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled)
* @he_cap: HE capabilities (or %NULL if HE isn't enabled)
* @eht_cap: EHT capabilities (or %NULL if EHT isn't enabled)
* @eht_oper: EHT operation IE (or %NULL if EHT isn't enabled)
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
* @twt_responder: Enable Target Wait Time
* @he_required: stations must support HE
* @sae_h2e_required: stations must support direct H2E technique in SAE
* @flags: flags, as defined in &enum nl80211_ap_settings_flags
* @he_obss_pd: OBSS Packet Detection settings
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
* @s1g_long_beacon_period: S1G long beacon period
* @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
struct cfg80211_beacon_data beacon;
int beacon_interval, dtim_period;
const u8 *ssid;
size_t ssid_len;
enum nl80211_hidden_ssid hidden_ssid;
struct cfg80211_crypto_settings crypto;
bool privacy;
enum nl80211_auth_type auth_type;
int inactivity_timeout;
u8 p2p_ctwindow;
bool p2p_opp_ps;
const struct cfg80211_acl_data *acl;
bool pbss;
struct cfg80211_bitrate_mask beacon_rate;
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
const struct ieee80211_he_cap_elem *he_cap;
const struct ieee80211_he_operation *he_oper;
const struct ieee80211_eht_cap_elem *eht_cap;
const struct ieee80211_eht_operation *eht_oper;
bool ht_required, vht_required, he_required, sae_h2e_required;
bool twt_responder;
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
u8 s1g_long_beacon_period;
struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
/**
* struct cfg80211_ap_update - AP configuration update
*
* Subset of &struct cfg80211_ap_settings, for updating a running AP.
*
* @beacon: beacon data
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_update {
struct cfg80211_beacon_data beacon;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
/**
* struct cfg80211_csa_settings - channel switch settings
*
* Used for channel switch
*
* @chandef: defines the channel to use after the switch
* @beacon_csa: beacon data while performing the switch
* @counter_offsets_beacon: offsets of the counters within the beacon (tail)
* @counter_offsets_presp: offsets of the counters within the probe response
* @n_counter_offsets_beacon: number of csa counters the beacon (tail)
* @n_counter_offsets_presp: number of csa counters in the probe response
* @beacon_after: beacon data to be used on the new channel
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
* @link_id: defines the link on which channel switch is expected during
* MLO. 0 in case of non-MLO.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
struct cfg80211_beacon_data beacon_csa;
const u16 *counter_offsets_beacon;
const u16 *counter_offsets_presp;
unsigned int n_counter_offsets_beacon;
unsigned int n_counter_offsets_presp;
struct cfg80211_beacon_data beacon_after;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
bool radar_required;
bool block_tx;
u8 count;
u8 link_id;
};
/**
* struct cfg80211_color_change_settings - color change settings
*
* Used for bss color change
*
* @beacon_color_change: beacon data while performing the color countdown
* @counter_offset_beacon: offsets of the counters within the beacon (tail)
* @counter_offset_presp: offsets of the counters within the probe response
* @beacon_next: beacon data to be used after the color change
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @count: number of beacons until the color change
* @color: the color used after the change
* @link_id: defines the link on which color change is expected during MLO.
* 0 in case of non-MLO.
*/
struct cfg80211_color_change_settings {
struct cfg80211_beacon_data beacon_color_change;
u16 counter_offset_beacon;
u16 counter_offset_presp;
struct cfg80211_beacon_data beacon_next;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
u8 count;
u8 color;
u8 link_id;
};
/**
* struct iface_combination_params - input parameters for interface combinations
*
* Used to pass interface combination parameters
*
* @radio_idx: wiphy radio index or -1 for global
* @num_different_channels: the number of different channels we want
* to use for verification
* @radar_detect: a bitmap where each bit corresponds to a channel
* width where radar detection is needed, as in the definition of
* &struct ieee80211_iface_combination.@radar_detect_widths
* @iftype_num: array with the number of interfaces of each interface
* type. The index is the interface type as specified in &enum
* nl80211_iftype.
* @new_beacon_int: set this to the beacon interval of a new interface
* that's not operating yet, if such is to be checked as part of
* the verification
*/
struct iface_combination_params {
int radio_idx;
int num_different_channels;
u8 radar_detect;
int iftype_num[NUM_NL80211_IFTYPES];
u32 new_beacon_int;
};
/**
* enum station_parameters_apply_mask - station parameter values to apply
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
* @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state
*
* Not all station parameters have in-band "no change" signalling,
* for those that don't these flags will are used.
*/
enum station_parameters_apply_mask {
STATION_PARAM_APPLY_UAPSD = BIT(0),
STATION_PARAM_APPLY_CAPABILITY = BIT(1),
STATION_PARAM_APPLY_PLINK_STATE = BIT(2),
};
/**
* struct sta_txpwr - station txpower configuration
*
* Used to configure txpower for station.
*
* @power: tx power (in dBm) to be used for sending data traffic. If tx power
* is not provided, the default per-interface tx power setting will be
* overriding. Driver should be picking up the lowest tx power, either tx
* power per-interface or per-station.
* @type: In particular if TPC %type is NL80211_TX_POWER_LIMITED then tx power
* will be less than or equal to specified from userspace, whereas if TPC
* %type is NL80211_TX_POWER_AUTOMATIC then it indicates default tx power.
* NL80211_TX_POWER_FIXED is not a valid configuration option for
* per peer TPC.
*/
struct sta_txpwr {
s16 power;
enum nl80211_tx_power_setting type;
};
/**
* struct link_station_parameters - link station parameters
*
* Used to change and create a new link station.
*
* @mld_mac: MAC address of the station
* @link_id: the link id (-1 for non-MLD station)
* @link_mac: MAC address of the link
* @supported_rates: supported rates in IEEE 802.11 format
* (or NULL for no change)
* @supported_rates_len: number of supported rates
* @ht_capa: HT capabilities of station
* @vht_capa: VHT capabilities of station
* @opmode_notif: operating mode field from Operating Mode Notification
* @opmode_notif_used: information if operating mode field is used
* @he_capa: HE capabilities of station
* @he_capa_len: the length of the HE capabilities
* @txpwr: transmit power for an associated station
* @txpwr_set: txpwr field is set
* @he_6ghz_capa: HE 6 GHz Band capabilities of station
* @eht_capa: EHT capabilities of station
* @eht_capa_len: the length of the EHT capabilities
* @s1g_capa: S1G capabilities of station
*/
struct link_station_parameters {
const u8 *mld_mac;
int link_id;
const u8 *link_mac;
const u8 *supported_rates;
u8 supported_rates_len;
const struct ieee80211_ht_cap *ht_capa;
const struct ieee80211_vht_cap *vht_capa;
u8 opmode_notif;
bool opmode_notif_used;
const struct ieee80211_he_cap_elem *he_capa;
u8 he_capa_len;
struct sta_txpwr txpwr;
bool txpwr_set;
const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
const struct ieee80211_eht_cap_elem *eht_capa;
u8 eht_capa_len;
const struct ieee80211_s1g_cap *s1g_capa;
};
/**
* struct link_station_del_parameters - link station deletion parameters
*
* Used to delete a link station entry (or all stations).
*
* @mld_mac: MAC address of the station
* @link_id: the link id
*/
struct link_station_del_parameters {
const u8 *mld_mac;
u32 link_id;
};
/**
* struct cfg80211_ttlm_params: TID to link mapping parameters
*
* Used for setting a TID to link mapping.
*
* @dlink: Downlink TID to link mapping, as defined in section 9.4.2.314
* (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
* @ulink: Uplink TID to link mapping, as defined in section 9.4.2.314
* (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
*/
struct cfg80211_ttlm_params {
u16 dlink[8];
u16 ulink[8];
};
/**
* struct station_parameters - station parameters
*
* Used to change and create a new station.
*
* @vlan: vlan interface station should belong to
* @sta_flags_mask: station flags that changed
* (bitmask of BIT(%NL80211_STA_FLAG_...))
* @sta_flags_set: station flags values
* (bitmask of BIT(%NL80211_STA_FLAG_...))
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
* @vlan_id: VLAN ID for station (if nonzero)
* @peer_aid: mesh peer AID or zero for no change
* @plink_action: plink action to take
* @plink_state: set the peer link state for a station
* @uapsd_queues: bitmap of queues configured for uapsd. same format
* as the AC bitmap in the QoS info field
* @max_sp: max Service Period. same format as the MAX_SP in the
* QoS info field (but already shifted down)
* @sta_modify_mask: bitmap indicating which parameters changed
* (for those that don't have a natural "no change" value),
* see &enum station_parameters_apply_mask
* @local_pm: local link-specific mesh power save mode (no change when set
* to unknown)
* @capability: station capability
* @ext_capab: extended capabilities of the station
* @ext_capab_len: number of extended capabilities
* @supported_channels: supported channels in IEEE 802.11 format
* @supported_channels_len: number of supported channels
* @supported_oper_classes: supported oper classes in IEEE 802.11 format
* @supported_oper_classes_len: number of supported operating classes
* @support_p2p_ps: information if station supports P2P PS mechanism
* @airtime_weight: airtime scheduler weight for this station
* @eml_cap_present: Specifies if EML capabilities field (@eml_cap) is
* present/updated
* @eml_cap: EML capabilities of this station
* @link_sta_params: link related params.
*/
struct station_parameters {
struct net_device *vlan;
u32 sta_flags_mask, sta_flags_set;
u32 sta_modify_mask;
int listen_interval;
u16 aid;
u16 vlan_id;
u16 peer_aid;
u8 plink_action;
u8 plink_state;
u8 uapsd_queues;
u8 max_sp;
enum nl80211_mesh_power_mode local_pm;
u16 capability;
const u8 *ext_capab;
u8 ext_capab_len;
const u8 *supported_channels;
u8 supported_channels_len;
const u8 *supported_oper_classes;
u8 supported_oper_classes_len;
int support_p2p_ps;
u16 airtime_weight;
bool eml_cap_present;
u16 eml_cap;
struct link_station_parameters link_sta_params;
};
/**
* struct station_del_parameters - station deletion parameters
*
* Used to delete a station entry (or all stations).
*
* @mac: MAC address of the station to remove or NULL to remove all stations
* @subtype: Management frame subtype to use for indicating removal
* (10 = Disassociation, 12 = Deauthentication)
* @reason_code: Reason code for the Disassociation/Deauthentication frame
* @link_id: Link ID indicating a link that stations to be flushed must be
* using; valid only for MLO, but can also be -1 for MLO to really
* remove all stations.
*/
struct station_del_parameters {
const u8 *mac;
u8 subtype;
u16 reason_code;
int link_id;
};
/**
* enum cfg80211_station_type - the type of station being modified
* @CFG80211_STA_AP_CLIENT: client of an AP interface
* @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still
* unassociated (update properties for this type of client is permitted)
* @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
* the AP MLME in the device
* @CFG80211_STA_AP_STA: AP station on managed interface
* @CFG80211_STA_IBSS: IBSS station
* @CFG80211_STA_TDLS_PEER_SETUP: TDLS peer on managed interface (dummy entry
* while TDLS setup is in progress, it moves out of this state when
* being marked authorized; use this only if TDLS with external setup is
* supported/used)
* @CFG80211_STA_TDLS_PEER_ACTIVE: TDLS peer on managed interface (active
* entry that is operating, has been marked authorized by userspace)
* @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed)
* @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed)
*/
enum cfg80211_station_type {
CFG80211_STA_AP_CLIENT,
CFG80211_STA_AP_CLIENT_UNASSOC,
CFG80211_STA_AP_MLME_CLIENT,
CFG80211_STA_AP_STA,
CFG80211_STA_IBSS,
CFG80211_STA_TDLS_PEER_SETUP,
CFG80211_STA_TDLS_PEER_ACTIVE,
CFG80211_STA_MESH_PEER_KERNEL,
CFG80211_STA_MESH_PEER_USER,
};
/**
* cfg80211_check_station_change - validate parameter changes
* @wiphy: the wiphy this operates on
* @params: the new parameters for a station
* @statype: the type of station being modified
*
* Utility function for the @change_station driver method. Call this function
* with the appropriate station type looking up the station (and checking that
* it exists). It will verify whether the station change is acceptable.
*
* Return: 0 if the change is acceptable, otherwise an error code. Note that
* it may modify the parameters for backward compatibility reasons, so don't
* use them before calling this.
*/
int cfg80211_check_station_change(struct wiphy *wiphy,
struct station_parameters *params,
enum cfg80211_station_type statype);
/**
* enum rate_info_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
* type for 802.11n transmissions.
*
* @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
* @RATE_INFO_FLAGS_DMG: 60GHz MCS
* @RATE_INFO_FLAGS_HE_MCS: HE MCS information
* @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
* @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
* @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
* @RATE_INFO_FLAGS_S1G_MCS: MCS field filled with S1G MCS
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
RATE_INFO_FLAGS_SHORT_GI = BIT(2),
RATE_INFO_FLAGS_DMG = BIT(3),
RATE_INFO_FLAGS_HE_MCS = BIT(4),
RATE_INFO_FLAGS_EDMG = BIT(5),
RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6),
RATE_INFO_FLAGS_EHT_MCS = BIT(7),
RATE_INFO_FLAGS_S1G_MCS = BIT(8),
};
/**
* enum rate_info_bw - rate bandwidth information
*
* Used by the driver to indicate the rate bandwidth.
*
* @RATE_INFO_BW_5: 5 MHz bandwidth
* @RATE_INFO_BW_10: 10 MHz bandwidth
* @RATE_INFO_BW_20: 20 MHz bandwidth
* @RATE_INFO_BW_40: 40 MHz bandwidth
* @RATE_INFO_BW_80: 80 MHz bandwidth
* @RATE_INFO_BW_160: 160 MHz bandwidth
* @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
* @RATE_INFO_BW_320: 320 MHz bandwidth
* @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
* @RATE_INFO_BW_1: 1 MHz bandwidth
* @RATE_INFO_BW_2: 2 MHz bandwidth
* @RATE_INFO_BW_4: 4 MHz bandwidth
* @RATE_INFO_BW_8: 8 MHz bandwidth
* @RATE_INFO_BW_16: 16 MHz bandwidth
*/
enum rate_info_bw {
RATE_INFO_BW_20 = 0,
RATE_INFO_BW_5,
RATE_INFO_BW_10,
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
RATE_INFO_BW_HE_RU,
RATE_INFO_BW_320,
RATE_INFO_BW_EHT_RU,
RATE_INFO_BW_1,
RATE_INFO_BW_2,
RATE_INFO_BW_4,
RATE_INFO_BW_8,
RATE_INFO_BW_16,
};
/**
* struct rate_info - bitrate information
*
* Information about a receiving or transmitting bitrate
*
* @flags: bitflag of flags from &enum rate_info_flags
* @legacy: bitrate in 100kbit/s for 802.11abg
* @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G rate
* @nss: number of streams (VHT & HE only)
* @bw: bandwidth (from &enum rate_info_bw)
* @he_gi: HE guard interval (from &enum nl80211_he_gi)
* @he_dcm: HE DCM value
* @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
* only valid if bw is %RATE_INFO_BW_HE_RU)
* @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
* @eht_gi: EHT guard interval (from &enum nl80211_eht_gi)
* @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc,
* only valid if bw is %RATE_INFO_BW_EHT_RU)
*/
struct rate_info {
u16 flags;
u16 legacy;
u8 mcs;
u8 nss;
u8 bw;
u8 he_gi;
u8 he_dcm;
u8 he_ru_alloc;
u8 n_bonded_ch;
u8 eht_gi;
u8 eht_ru_alloc;
};
/**
* enum bss_param_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
* type for 802.11n transmissions.
*
* @BSS_PARAM_FLAGS_CTS_PROT: whether CTS protection is enabled
* @BSS_PARAM_FLAGS_SHORT_PREAMBLE: whether short preamble is enabled
* @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled
*/
enum bss_param_flags {
BSS_PARAM_FLAGS_CTS_PROT = BIT(0),
BSS_PARAM_FLAGS_SHORT_PREAMBLE = BIT(1),
BSS_PARAM_FLAGS_SHORT_SLOT_TIME = BIT(2),
};
/**
* struct sta_bss_parameters - BSS parameters for the attached station
*
* Information about the currently associated BSS
*
* @flags: bitflag of flags from &enum bss_param_flags
* @dtim_period: DTIM period for the BSS
* @beacon_interval: beacon interval
*/
struct sta_bss_parameters {
u8 flags;
u8 dtim_period;
u16 beacon_interval;
};
/**
* struct cfg80211_txq_stats - TXQ statistics for this TID
* @filled: bitmap of flags using the bits of &enum nl80211_txq_stats to
* indicate the relevant values in this struct are filled
* @backlog_bytes: total number of bytes currently backlogged
* @backlog_packets: total number of packets currently backlogged
* @flows: number of new flows seen
* @drops: total number of packets dropped
* @ecn_marks: total number of packets marked with ECN CE
* @overlimit: number of drops due to queue space overflow
* @overmemory: number of drops due to memory limit overflow
* @collisions: number of hash collisions
* @tx_bytes: total number of bytes dequeued
* @tx_packets: total number of packets dequeued
* @max_flows: maximum number of flows supported
*/
struct cfg80211_txq_stats {
u32 filled;
u32 backlog_bytes;
u32 backlog_packets;
u32 flows;
u32 drops;
u32 ecn_marks;
u32 overlimit;
u32 overmemory;
u32 collisions;
u32 tx_bytes;
u32 tx_packets;
u32 max_flows;
};
/**
* struct cfg80211_tid_stats - per-TID statistics
* @filled: bitmap of flags using the bits of &enum nl80211_tid_stats to
* indicate the relevant values in this struct are filled
* @rx_msdu: number of received MSDUs
* @tx_msdu: number of (attempted) transmitted MSDUs
* @tx_msdu_retries: number of retries (not counting the first) for
* transmitted MSDUs
* @tx_msdu_failed: number of failed transmitted MSDUs
* @txq_stats: TXQ statistics
*/
struct cfg80211_tid_stats {
u32 filled;
u64 rx_msdu;
u64 tx_msdu;
u64 tx_msdu_retries;
u64 tx_msdu_failed;
struct cfg80211_txq_stats txq_stats;
};
#define IEEE80211_MAX_CHAINS 4
/**
* struct link_station_info - link station information
*
* Link station information filled by driver for get_station() and
* dump_station().
* @filled: bit flag of flags using the bits of &enum nl80211_sta_info to
* indicate the relevant values in this struct for them
* @connected_time: time(in secs) since a link of station is last connected
* @inactive_time: time since last activity for link station(tx/rx)
* in milliseconds
* @assoc_at: bootime (ns) of the last association of link of station
* @rx_bytes: bytes (size of MPDUs) received from this link of station
* @tx_bytes: bytes (size of MPDUs) transmitted to this link of station
* @signal: The signal strength, type depends on the wiphy's signal_type.
* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
* @signal_avg: Average signal strength, type depends on the wiphy's
* signal_type. For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_
* @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
* @chain_signal: per-chain signal strength of last received packet in dBm
* @chain_signal_avg: per-chain signal strength average in dBm
* @txrate: current unicast bitrate from this link of station
* @rxrate: current unicast bitrate to this link of station
* @rx_packets: packets (MSDUs & MMPDUs) received from this link of station
* @tx_packets: packets (MSDUs & MMPDUs) transmitted to this link of station
* @tx_retries: cumulative retry counts (MPDUs) for this link of station
* @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
* @rx_dropped_misc: Dropped for un-specified reason.
* @bss_param: current BSS parameters
* @beacon_loss_count: Number of times beacon loss event has triggered.
* @expected_throughput: expected throughput in kbps (including 802.11 headers)
* towards this station.
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
* @airtime_weight: current airtime scheduling weight
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
* Note that this doesn't use the @filled bit, but is used if non-NULL.
* @ack_signal: signal strength (in dBm) of the last ACK frame.
* @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
* been sent.
* @rx_mpdu_count: number of MPDUs received from this station
* @fcs_err_count: number of packets (MPDUs) received from this station with
* an FCS error. This counter should be incremented only when TA of the
* received packet with an FCS error matches the peer MAC address.
* @addr: For MLO STA connection, filled with address of the link of station.
*/
struct link_station_info {
u64 filled;
u32 connected_time;
u32 inactive_time;
u64 assoc_at;
u64 rx_bytes;
u64 tx_bytes;
s8 signal;
s8 signal_avg;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
struct rate_info txrate;
struct rate_info rxrate;
u32 rx_packets;
u32 tx_packets;
u32 tx_retries;
u32 tx_failed;
u32 rx_dropped_misc;
struct sta_bss_parameters bss_param;
u32 beacon_loss_count;
u32 expected_throughput;
u64 tx_duration;
u64 rx_duration;
u64 rx_beacon;
u8 rx_beacon_signal_avg;
u16 airtime_weight;
s8 ack_signal;
s8 avg_ack_signal;
struct cfg80211_tid_stats *pertid;
u32 rx_mpdu_count;
u32 fcs_err_count;
u8 addr[ETH_ALEN] __aligned(2);
};
/**
* struct station_info - station information
*
* Station information filled by driver for get_station() and dump_station.
*
* @filled: bitflag of flags using the bits of &enum nl80211_sta_info to
* indicate the relevant values in this struct for them
* @connected_time: time(in secs) since a station is last connected
* @inactive_time: time since last station activity (tx/rx) in milliseconds
* @assoc_at: bootime (ns) of the last association
* @rx_bytes: bytes (size of MPDUs) received from this station
* @tx_bytes: bytes (size of MPDUs) transmitted to this station
* @signal: The signal strength, type depends on the wiphy's signal_type.
* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
* @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
* @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
* @chain_signal: per-chain signal strength of last received packet in dBm
* @chain_signal_avg: per-chain signal strength average in dBm
* @txrate: current unicast bitrate from this station
* @rxrate: current unicast bitrate to this station
* @rx_packets: packets (MSDUs & MMPDUs) received from this station
* @tx_packets: packets (MSDUs & MMPDUs) transmitted to this station
* @tx_retries: cumulative retry counts (MPDUs)
* @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
* @rx_dropped_misc: Dropped for un-specified reason.
* @bss_param: current BSS parameters
* @generation: generation number for nl80211 dumps.
* This number should increase every time the list of stations
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
* @beacon_loss_count: Number of times beacon loss event has triggered.
* @assoc_req_ies: IEs from (Re)Association Request.
* This is used only when in AP mode with drivers that do not use
* user space MLME/SME implementation. The information is provided for
* the cfg80211_new_sta() calls to notify user space of the IEs.
* @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
* @sta_flags: station flags mask & values
* @t_offset: Time offset of the station relative to this host.
* @llid: mesh local link id
* @plid: mesh peer link id
* @plink_state: mesh peer link state
* @connected_to_gate: true if mesh STA has a path to mesh gate
* @connected_to_as: true if mesh STA has a path to authentication server
* @airtime_link_metric: mesh airtime link metric.
* @local_pm: local mesh STA power save mode
* @peer_pm: peer mesh STA power save mode
* @nonpeer_pm: non-peer mesh STA power save mode
* @expected_throughput: expected throughput in kbps (including 802.11 headers)
* towards this station.
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
* @airtime_weight: current airtime scheduling weight
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
* Note that this doesn't use the @filled bit, but is used if non-NULL.
* @ack_signal: signal strength (in dBm) of the last ACK frame.
* @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
* been sent.
* @rx_mpdu_count: number of MPDUs received from this station
* @fcs_err_count: number of packets (MPDUs) received from this station with
* an FCS error. This counter should be incremented only when TA of the
* received packet with an FCS error matches the peer MAC address.
* @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled
* by driver. Drivers use this only in cfg80211_new_sta() calls when AP
* MLD's MLME/SME is offload to driver. Drivers won't fill this
* information in cfg80211_del_sta_sinfo(), get_station() and
* dump_station() callbacks.
* @assoc_link_id: Indicates MLO link ID of the AP, with which the station
* completed (re)association. This information filled for both MLO
* and non-MLO STA connections when the AP affiliated with an MLD.
* @mld_addr: For MLO STA connection, filled with MLD address of the station.
* For non-MLO STA connection, filled with all zeros.
* @assoc_resp_ies: IEs from (Re)Association Response.
* This is used only when in AP mode with drivers that do not use user
* space MLME/SME implementation. The information is provided only for the
* cfg80211_new_sta() calls to notify user space of the IEs. Drivers won't
* fill this information in cfg80211_del_sta_sinfo(), get_station() and
* dump_station() callbacks. User space needs this information to determine
* the accepted and rejected affiliated links of the connected station.
* @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets.
* @valid_links: bitmap of valid links, or 0 for non-MLO. Drivers fill this
* information in cfg80211_new_sta(), cfg80211_del_sta_sinfo(),
* get_station() and dump_station() callbacks.
* @links: reference to Link sta entries for MLO STA, all link specific
* information is accessed through links[link_id].
*/
struct station_info {
u64 filled;
u32 connected_time;
u32 inactive_time;
u64 assoc_at;
u64 rx_bytes;
u64 tx_bytes;
s8 signal;
s8 signal_avg;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
struct rate_info txrate;
struct rate_info rxrate;
u32 rx_packets;
u32 tx_packets;
u32 tx_retries;
u32 tx_failed;
u32 rx_dropped_misc;
struct sta_bss_parameters bss_param;
struct nl80211_sta_flag_update sta_flags;
int generation;
u32 beacon_loss_count;
const u8 *assoc_req_ies;
size_t assoc_req_ies_len;
s64 t_offset;
u16 llid;
u16 plid;
u8 plink_state;
u8 connected_to_gate;
u8 connected_to_as;
u32 airtime_link_metric;
enum nl80211_mesh_power_mode local_pm;
enum nl80211_mesh_power_mode peer_pm;
enum nl80211_mesh_power_mode nonpeer_pm;
u32 expected_throughput;
u16 airtime_weight;
s8 ack_signal;
s8 avg_ack_signal;
struct cfg80211_tid_stats *pertid;
u64 tx_duration;
u64 rx_duration;
u64 rx_beacon;
u8 rx_beacon_signal_avg;
u32 rx_mpdu_count;
u32 fcs_err_count;
bool mlo_params_valid;
u8 assoc_link_id;
u8 mld_addr[ETH_ALEN] __aligned(2);
const u8 *assoc_resp_ies;
size_t assoc_resp_ies_len;
u16 valid_links;
struct link_station_info *links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
* struct cfg80211_sar_sub_specs - sub specs limit
* @power: power limitation in 0.25dbm
* @freq_range_index: index the power limitation applies to
*/
struct cfg80211_sar_sub_specs {
s32 power;
u32 freq_range_index;
};
/**
* struct cfg80211_sar_specs - sar limit specs
* @type: it's set with power in 0.25dbm or other types
* @num_sub_specs: number of sar sub specs
* @sub_specs: memory to hold the sar sub specs
*/
struct cfg80211_sar_specs {
enum nl80211_sar_type type;
u32 num_sub_specs;
struct cfg80211_sar_sub_specs sub_specs[] __counted_by(num_sub_specs);
};
/**
* struct cfg80211_sar_freq_ranges - sar frequency ranges
* @start_freq: start range edge frequency
* @end_freq: end range edge frequency
*/
struct cfg80211_sar_freq_ranges {
u32 start_freq;
u32 end_freq;
};
/**
* struct cfg80211_sar_capa - sar limit capability
* @type: it's set via power in 0.25dbm or other types
* @num_freq_ranges: number of frequency ranges
* @freq_ranges: memory to hold the freq ranges.
*
* Note: WLAN driver may append new ranges or split an existing
* range to small ones and then append them.
*/
struct cfg80211_sar_capa {
enum nl80211_sar_type type;
u32 num_freq_ranges;
const struct cfg80211_sar_freq_ranges *freq_ranges;
};
#if IS_ENABLED(CONFIG_CFG80211)
/**
* cfg80211_get_station - retrieve information about a given station
* @dev: the device where the station is supposed to be connected to
* @mac_addr: the mac address of the station of interest
* @sinfo: pointer to the structure to fill with the information
*
* Return: 0 on success and sinfo is filled with the available information
* otherwise returns a negative error code and the content of sinfo has to be
* considered undefined.
*/
int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo);
#else
static inline int cfg80211_get_station(struct net_device *dev,
const u8 *mac_addr,
struct station_info *sinfo)
{
return -ENOENT;
}
#endif
/**
* enum monitor_flags - monitor flags
*
* Monitor interface configuration flags. Note that these must be the bits
* according to the nl80211 flags.
*
* @MONITOR_FLAG_CHANGED: set if the flags were changed
* @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS
* @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP
* @MONITOR_FLAG_CONTROL: pass control frames
* @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering
* @MONITOR_FLAG_COOK_FRAMES: deprecated, will unconditionally be refused
* @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
* @MONITOR_FLAG_SKIP_TX: do not pass locally transmitted frames
*/
enum monitor_flags {
MONITOR_FLAG_CHANGED = BIT(__NL80211_MNTR_FLAG_INVALID),
MONITOR_FLAG_FCSFAIL = BIT(NL80211_MNTR_FLAG_FCSFAIL),
MONITOR_FLAG_PLCPFAIL = BIT(NL80211_MNTR_FLAG_PLCPFAIL),
MONITOR_FLAG_CONTROL = BIT(NL80211_MNTR_FLAG_CONTROL),
MONITOR_FLAG_OTHER_BSS = BIT(NL80211_MNTR_FLAG_OTHER_BSS),
MONITOR_FLAG_COOK_FRAMES = BIT(NL80211_MNTR_FLAG_COOK_FRAMES),
MONITOR_FLAG_ACTIVE = BIT(NL80211_MNTR_FLAG_ACTIVE),
MONITOR_FLAG_SKIP_TX = BIT(NL80211_MNTR_FLAG_SKIP_TX),
};
/**
* enum mpath_info_flags - mesh path information flags
*
* Used by the driver to indicate which info in &struct mpath_info it has filled
* in during get_station() or dump_station().
*
* @MPATH_INFO_FRAME_QLEN: @frame_qlen filled
* @MPATH_INFO_SN: @sn filled
* @MPATH_INFO_METRIC: @metric filled
* @MPATH_INFO_EXPTIME: @exptime filled
* @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled
* @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled
* @MPATH_INFO_FLAGS: @flags filled
* @MPATH_INFO_HOP_COUNT: @hop_count filled
* @MPATH_INFO_PATH_CHANGE: @path_change_count filled
*/
enum mpath_info_flags {
MPATH_INFO_FRAME_QLEN = BIT(0),
MPATH_INFO_SN = BIT(1),
MPATH_INFO_METRIC = BIT(2),
MPATH_INFO_EXPTIME = BIT(3),
MPATH_INFO_DISCOVERY_TIMEOUT = BIT(4),
MPATH_INFO_DISCOVERY_RETRIES = BIT(5),
MPATH_INFO_FLAGS = BIT(6),
MPATH_INFO_HOP_COUNT = BIT(7),
MPATH_INFO_PATH_CHANGE = BIT(8),
};
/**
* struct mpath_info - mesh path information
*
* Mesh path information filled by driver for get_mpath() and dump_mpath().
*
* @filled: bitfield of flags from &enum mpath_info_flags
* @frame_qlen: number of queued frames for this destination
* @sn: target sequence number
* @metric: metric (cost) of this mesh path
* @exptime: expiration time for the mesh path from now, in msecs
* @flags: mesh path flags from &enum mesh_path_flags
* @discovery_timeout: total mesh path discovery timeout, in msecs
* @discovery_retries: mesh path discovery retries
* @generation: generation number for nl80211 dumps.
* This number should increase every time the list of mesh paths
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
* @hop_count: hops to destination
* @path_change_count: total number of path changes to destination
*/
struct mpath_info {
u32 filled;
u32 frame_qlen;
u32 sn;
u32 metric;
u32 exptime;
u32 discovery_timeout;
u8 discovery_retries;
u8 flags;
u8 hop_count;
u32 path_change_count;
int generation;
};
/**
* enum wiphy_bss_param_flags - bit positions for supported bss parameters.
*
* @WIPHY_BSS_PARAM_CTS_PROT: support changing CTS protection.
* @WIPHY_BSS_PARAM_SHORT_PREAMBLE: support changing short preamble usage.
* @WIPHY_BSS_PARAM_SHORT_SLOT_TIME: support changing short slot time usage.
* @WIPHY_BSS_PARAM_BASIC_RATES: support reconfiguring basic rates.
* @WIPHY_BSS_PARAM_AP_ISOLATE: support changing AP isolation.
* @WIPHY_BSS_PARAM_HT_OPMODE: support changing HT operating mode.
* @WIPHY_BSS_PARAM_P2P_CTWINDOW: support reconfiguring ctwindow.
* @WIPHY_BSS_PARAM_P2P_OPPPS: support changing P2P opportunistic power-save.
*/
enum wiphy_bss_param_flags {
WIPHY_BSS_PARAM_CTS_PROT = BIT(0),
WIPHY_BSS_PARAM_SHORT_PREAMBLE = BIT(1),
WIPHY_BSS_PARAM_SHORT_SLOT_TIME = BIT(2),
WIPHY_BSS_PARAM_BASIC_RATES = BIT(3),
WIPHY_BSS_PARAM_AP_ISOLATE = BIT(4),
WIPHY_BSS_PARAM_HT_OPMODE = BIT(5),
WIPHY_BSS_PARAM_P2P_CTWINDOW = BIT(6),
WIPHY_BSS_PARAM_P2P_OPPPS = BIT(7),
};
/**
* struct bss_parameters - BSS parameters
*
* Used to change BSS parameters (mainly for AP mode).
*
* @link_id: link_id or -1 for non-MLD
* @use_cts_prot: Whether to use CTS protection
* (0 = no, 1 = yes, -1 = do not change)
* @use_short_preamble: Whether the use of short preambles is allowed
* (0 = no, 1 = yes, -1 = do not change)
* @use_short_slot_time: Whether the use of short slot time is allowed
* (0 = no, 1 = yes, -1 = do not change)
* @basic_rates: basic rates in IEEE 802.11 format
* (or NULL for no change)
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
* (0 = no, 1 = yes, -1 = do not change)
* @ht_opmode: HT Operation mode
* (u16 = opmode, -1 = do not change)
* @p2p_ctwindow: P2P CT Window (-1 = no change)
* @p2p_opp_ps: P2P opportunistic PS (-1 = no change)
*/
struct bss_parameters {
int link_id;
int use_cts_prot;
int use_short_preamble;
int use_short_slot_time;
const u8 *basic_rates;
u8 basic_rates_len;
int ap_isolate;
int ht_opmode;
s8 p2p_ctwindow, p2p_opp_ps;
};
/**
* struct mesh_config - 802.11s mesh configuration
*
* These parameters can be changed while the mesh is active.
*
* @dot11MeshRetryTimeout: the initial retry timeout in millisecond units used
* by the Mesh Peering Open message
* @dot11MeshConfirmTimeout: the initial retry timeout in millisecond units
* used by the Mesh Peering Open message
* @dot11MeshHoldingTimeout: the confirm timeout in millisecond units used by
* the mesh peering management to close a mesh peering
* @dot11MeshMaxPeerLinks: the maximum number of peer links allowed on this
* mesh interface
* @dot11MeshMaxRetries: the maximum number of peer link open retries that can
* be sent to establish a new peer link instance in a mesh
* @dot11MeshTTL: the value of TTL field set at a source mesh STA
* @element_ttl: the value of TTL field set at a mesh STA for path selection
* elements
* @auto_open_plinks: whether we should automatically open peer links when we
* detect compatible mesh peers
* @dot11MeshNbrOffsetMaxNeighbor: the maximum number of neighbors to
* synchronize to for 11s default synchronization method
* @dot11MeshHWMPmaxPREQretries: the number of action frames containing a PREQ
* that an originator mesh STA can send to a particular path target
* @path_refresh_time: how frequently to refresh mesh paths in milliseconds
* @min_discovery_timeout: the minimum length of time to wait until giving up on
* a path discovery in milliseconds
* @dot11MeshHWMPactivePathTimeout: the time (in TUs) for which mesh STAs
* receiving a PREQ shall consider the forwarding information from the
* root to be valid. (TU = time unit)
* @dot11MeshHWMPpreqMinInterval: the minimum interval of time (in TUs) during
* which a mesh STA can send only one action frame containing a PREQ
* element
* @dot11MeshHWMPperrMinInterval: the minimum interval of time (in TUs) during
* which a mesh STA can send only one Action frame containing a PERR
* element
* @dot11MeshHWMPnetDiameterTraversalTime: the interval of time (in TUs) that
* it takes for an HWMP information element to propagate across the mesh
* @dot11MeshHWMPRootMode: the configuration of a mesh STA as root mesh STA
* @dot11MeshHWMPRannInterval: the interval of time (in TUs) between root
* announcements are transmitted
* @dot11MeshGateAnnouncementProtocol: whether to advertise that this mesh
* station has access to a broader network beyond the MBSS. (This is
* missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol set to true
* only means that the station will announce others it's a mesh gate, but
* not necessarily using the gate announcement protocol. Still keeping the
* same nomenclature to be in sync with the spec)
* @dot11MeshForwarding: whether the Mesh STA is forwarding or non-forwarding
* entity (default is TRUE - forwarding entity)
* @rssi_threshold: the threshold for average signal strength of candidate
* station to establish a peer link
* @ht_opmode: mesh HT protection mode
*
* @dot11MeshHWMPactivePathToRootTimeout: The time (in TUs) for which mesh STAs
* receiving a proactive PREQ shall consider the forwarding information to
* the root mesh STA to be valid.
*
* @dot11MeshHWMProotInterval: The interval of time (in TUs) between proactive
* PREQs are transmitted.
* @dot11MeshHWMPconfirmationInterval: The minimum interval of time (in TUs)
* during which a mesh STA can send only one Action frame containing
* a PREQ element for root path confirmation.
* @power_mode: The default mesh power save mode which will be the initial
* setting for new peer links.
* @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake
* after transmitting its beacon.
* @plink_timeout: If no tx activity is seen from a STA we've established
* peering with for longer than this time (in seconds), then remove it
* from the STA's list of peers. Default is 30 minutes.
* @dot11MeshConnectedToAuthServer: if set to true then this mesh STA
* will advertise that it is connected to a authentication server
* in the mesh formation field.
* @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is
* connected to a mesh gate in mesh formation info. If false, the
* value in mesh formation is determined by the presence of root paths
* in the mesh path table
* @dot11MeshNolearn: Try to avoid multi-hop path discovery (e.g. PREQ/PREP
* for HWMP) if the destination is a direct neighbor. Note that this might
* not be the optimal decision as a multi-hop route might be better. So
* if using this setting you will likely also want to disable
* dot11MeshForwarding and use another mesh routing protocol on top.
*/
struct mesh_config {
u16 dot11MeshRetryTimeout;
u16 dot11MeshConfirmTimeout;
u16 dot11MeshHoldingTimeout;
u16 dot11MeshMaxPeerLinks;
u8 dot11MeshMaxRetries;
u8 dot11MeshTTL;
u8 element_ttl;
bool auto_open_plinks;
u32 dot11MeshNbrOffsetMaxNeighbor;
u8 dot11MeshHWMPmaxPREQretries;
u32 path_refresh_time;
u16 min_discovery_timeout;
u32 dot11MeshHWMPactivePathTimeout;
u16 dot11MeshHWMPpreqMinInterval;
u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
bool dot11MeshConnectedToMeshGate;
bool dot11MeshConnectedToAuthServer;
u16 dot11MeshHWMPRannInterval;
bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
s32 rssi_threshold;
u16 ht_opmode;
u32 dot11MeshHWMPactivePathToRootTimeout;
u16 dot11MeshHWMProotInterval;
u16 dot11MeshHWMPconfirmationInterval;
enum nl80211_mesh_power_mode power_mode;
u16 dot11MeshAwakeWindowDuration;
u32 plink_timeout;
bool dot11MeshNolearn;
};
/**
* struct mesh_setup - 802.11s mesh setup configuration
* @chandef: defines the channel to use
* @mesh_id: the mesh ID
* @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
* @sync_method: which synchronization method to use
* @path_sel_proto: which path selection protocol to use
* @path_metric: which metric to use
* @auth_id: which authentication method this mesh is using
* @ie: vendor information elements (optional)
* @ie_len: length of vendor information elements
* @is_authenticated: this mesh requires authentication
* @is_secure: this mesh uses security
* @user_mpm: userspace handles all MPM functions
* @dtim_period: DTIM period to use
* @beacon_interval: beacon interval to use
* @mcast_rate: multicast rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
* @beacon_rate: bitrate to be used for beacons
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
* changes the channel when a radar is detected. This is required
* to operate on DFS channels.
* @control_port_over_nl80211: TRUE if userspace expects to exchange control
* port frames over NL80211 instead of the network interface.
*
* These parameters are fixed when the mesh is created.
*/
struct mesh_setup {
struct cfg80211_chan_def chandef;
const u8 *mesh_id;
u8 mesh_id_len;
u8 sync_method;
u8 path_sel_proto;
u8 path_metric;
u8 auth_id;
const u8 *ie;
u8 ie_len;
bool is_authenticated;
bool is_secure;
bool user_mpm;
u8 dtim_period;
u16 beacon_interval;
int mcast_rate[NUM_NL80211_BANDS];
u32 basic_rates;
struct cfg80211_bitrate_mask beacon_rate;
bool userspace_handles_dfs;
bool control_port_over_nl80211;
};
/**
* struct ocb_setup - 802.11p OCB mode setup configuration
* @chandef: defines the channel to use
*
* These parameters are fixed when connecting to the network
*/
struct ocb_setup {
struct cfg80211_chan_def chandef;
};
/**
* struct ieee80211_txq_params - TX queue parameters
* @ac: AC identifier
* @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled
* @cwmin: Minimum contention window [a value of the form 2^n-1 in the range
* 1..32767]
* @cwmax: Maximum contention window [a value of the form 2^n-1 in the range
* 1..32767]
* @aifs: Arbitration interframe space [0..255]
* @link_id: link_id or -1 for non-MLD
*/
struct ieee80211_txq_params {
enum nl80211_ac ac;
u16 txop;
u16 cwmin;
u16 cwmax;
u8 aifs;
int link_id;
};
/**
* DOC: Scanning and BSS list handling
*
* The scanning process itself is fairly simple, but cfg80211 offers quite
* a bit of helper functionality. To start a scan, the scan operation will
* be invoked with a scan definition. This scan definition contains the
* channels to scan, and the SSIDs to send probe requests for (including the
* wildcard, if desired). A passive scan is indicated by having no SSIDs to
* probe. Additionally, a scan request may contain extra information elements
* that should be added to the probe request. The IEs are guaranteed to be
* well-formed, and will not exceed the maximum length the driver advertised
* in the wiphy structure.
*
* When scanning finds a BSS, cfg80211 needs to be notified of that, because
* it is responsible for maintaining the BSS list; the driver should not
* maintain a list itself. For this notification, various functions exist.
*
* Since drivers do not maintain a BSS list, there are also a number of
* functions to search for a BSS and obtain information about it from the
* BSS structure cfg80211 maintains. The BSS list is also made available
* to userspace.
*/
/**
* struct cfg80211_ssid - SSID description
* @ssid: the SSID
* @ssid_len: length of the ssid
*/
struct cfg80211_ssid {
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len;
};
/**
* struct cfg80211_scan_info - information about completed scan
* @scan_start_tsf: scan start time in terms of the TSF of the BSS that the
* wireless device that requested the scan is connected to. If this
* information is not available, this field is left zero.
* @tsf_bssid: the BSSID according to which %scan_start_tsf is set.
* @aborted: set to true if the scan was aborted for any reason,
* userspace will be notified of that
*/
struct cfg80211_scan_info {
u64 scan_start_tsf;
u8 tsf_bssid[ETH_ALEN] __aligned(2);
bool aborted;
};
/**
* struct cfg80211_scan_6ghz_params - relevant for 6 GHz only
*
* @short_ssid: short ssid to scan for
* @bssid: bssid to scan for
* @channel_idx: idx of the channel in the channel array in the scan request
* which the above info is relevant to
* @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
* @short_ssid_valid: @short_ssid is valid and can be used
* @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
* 20 TUs before starting to send probe requests.
* @psd_20: The AP's 20 MHz PSD value.
*/
struct cfg80211_scan_6ghz_params {
u32 short_ssid;
u32 channel_idx;
u8 bssid[ETH_ALEN];
bool unsolicited_probe;
bool short_ssid_valid;
bool psc_no_listen;
s8 psd_20;
};
/**
* struct cfg80211_scan_request - scan request description
*
* @ssids: SSIDs to scan for (active scan only)
* @n_ssids: number of SSIDs
* @channels: channels to scan on.
* @n_channels: total number of channels to scan
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
* @duration: how long to listen on each channel, in TUs. If
* %duration_mandatory is not set, this is the maximum dwell time and
* the actual dwell time may be shorter.
* @duration_mandatory: if set, the scan duration must be as specified by the
* %duration field.
* @flags: control flags from &enum nl80211_scan_flags
* @rates: bitmap of rates to advertise for each band
* @wiphy: the wiphy this was for
* @scan_start: time (in jiffies) when the scan started
* @wdev: the wireless device to scan for
* @no_cck: used to send probe requests at non CCK rate in 2GHz band
* @mac_addr: MAC address used with randomisation
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
* @scan_6ghz: relevant for split scan request only,
* true if this is a 6 GHz scan request
* @first_part: %true if this is the first part of a split scan request or a
* scan that was not split. May be %true for a @scan_6ghz scan if no other
* channels were requested
* @n_6ghz_params: number of 6 GHz params
* @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
* @tsf_report_link_id: for MLO, indicates the link ID of the BSS that should be
* used for TSF reporting. Can be set to -1 to indicate no preference.
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
const u8 *ie;
size_t ie_len;
u16 duration;
bool duration_mandatory;
u32 flags;
u32 rates[NUM_NL80211_BANDS];
struct wireless_dev *wdev;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
u8 bssid[ETH_ALEN] __aligned(2);
struct wiphy *wiphy;
unsigned long scan_start;
bool no_cck;
bool scan_6ghz;
bool first_part;
u32 n_6ghz_params;
struct cfg80211_scan_6ghz_params *scan_6ghz_params;
s8 tsf_report_link_id;
/* keep last */
struct ieee80211_channel *channels[];
};
static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
{
int i;
get_random_bytes(buf, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++) {
buf[i] &= ~mask[i];
buf[i] |= addr[i] & mask[i];
}
}
/**
* struct cfg80211_match_set - sets of attributes to match
*
* @ssid: SSID to be matched; may be zero-length in case of BSSID match
* or no match (RSSI only)
* @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
* or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
s32 rssi_thold;
};
/**
* struct cfg80211_sched_scan_plan - scan plan for scheduled scan
*
* @interval: interval between scheduled scan iterations. In seconds.
* @iterations: number of scan iterations in this scan plan. Zero means
* infinite loop.
* The last scan plan will always have this parameter set to zero,
* all other scan plans will have a finite number of iterations.
*/
struct cfg80211_sched_scan_plan {
u32 interval;
u32 iterations;
};
/**
* struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
*
* @band: band of BSS which should match for RSSI level adjustment.
* @delta: value of RSSI level adjustment.
*/
struct cfg80211_bss_select_adjust {
enum nl80211_band band;
s8 delta;
};
/**
* struct cfg80211_sched_scan_request - scheduled scan request description
*
* @reqid: identifies this request.
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
* @flags: control flags from &enum nl80211_scan_flags
* @match_sets: sets of parameters to be matched for a scan result
* entry to be considered valid and to be passed to the host
* (others are filtered out).
* If omitted, all results are passed.
* @n_match_sets: number of match sets
* @report_results: indicates that results were reported for this request
* @wiphy: the wiphy this was for
* @dev: the interface
* @scan_start: start time of the scheduled scan
* @channels: channels to scan
* @min_rssi_thold: for drivers only supporting a single threshold, this
* contains the minimum over all matchsets
* @mac_addr: MAC address used with randomisation
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
* @scan_plans: scan plans to be executed in this scheduled scan. Lowest
* index must be executed first.
* @n_scan_plans: number of scan plans, at least 1.
* @rcu_head: RCU callback used to free the struct
* @owner_nlportid: netlink portid of owner (if this should is a request
* owned by a particular socket)
* @nl_owner_dead: netlink owner socket was closed - this request be freed
* @list: for keeping list of requests.
* @delay: delay in seconds to use before starting the first scan
* cycle. The driver may ignore this parameter and start
* immediately (or at any other time), if this feature is not
* supported.
* @relative_rssi_set: Indicates whether @relative_rssi is set or not.
* @relative_rssi: Relative RSSI threshold in dB to restrict scan result
* reporting in connected state to cases where a matching BSS is determined
* to have better or slightly worse RSSI than the current connected BSS.
* The relative RSSI threshold values are ignored in disconnected state.
* @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong
* to the specified band while deciding whether a better BSS is reported
* using @relative_rssi. If delta is a negative number, the BSSs that
* belong to the specified band will be penalized by delta dB in relative
* comparisons.
*/
struct cfg80211_sched_scan_request {
u64 reqid;
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
const u8 *ie;
size_t ie_len;
u32 flags;
struct cfg80211_match_set *match_sets;
int n_match_sets;
s32 min_rssi_thold;
u32 delay;
struct cfg80211_sched_scan_plan *scan_plans;
int n_scan_plans;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
bool relative_rssi_set;
s8 relative_rssi;
struct cfg80211_bss_select_adjust rssi_adjust;
/* internal */
struct wiphy *wiphy;
struct net_device *dev;
unsigned long scan_start;
bool report_results;
struct rcu_head rcu_head;
u32 owner_nlportid;
bool nl_owner_dead;
struct list_head list;
/* keep last */
struct ieee80211_channel *channels[] __counted_by(n_channels);
};
/**
* enum cfg80211_signal_type - signal type
*
* @CFG80211_SIGNAL_TYPE_NONE: no signal strength information available
* @CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm)
* @CFG80211_SIGNAL_TYPE_UNSPEC: signal strength, increasing from 0 through 100
*/
enum cfg80211_signal_type {
CFG80211_SIGNAL_TYPE_NONE,
CFG80211_SIGNAL_TYPE_MBM,
CFG80211_SIGNAL_TYPE_UNSPEC,
};
/**
* struct cfg80211_inform_bss - BSS inform data
* @chan: channel the frame was received on
* @signal: signal strength value, according to the wiphy's
* signal type
* @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
* received; should match the time when the frame was actually
* received by the device (not just by the host, in case it was
* buffered on the device) and be accurate to about 10ms.
* If the frame isn't buffered, just passing the return value of
* ktime_get_boottime_ns() is likely appropriate.
* @parent_tsf: the time at the start of reception of the first octet of the
* timestamp field of the frame. The time is the TSF of the BSS specified
* by %parent_bssid.
* @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
* the BSS that requested the scan in which the beacon/probe was received.
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
* @restrict_use: restrict usage, if not set, assume @use_for is
* %NL80211_BSS_USE_FOR_NORMAL.
* @use_for: bitmap of possible usage for this BSS, see
* &enum nl80211_bss_use_for
* @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
* if @restrict_use is set and @use_for is zero (empty); may be 0 for
* unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
* @drv_data: Data to be passed through to @inform_bss
*/
struct cfg80211_inform_bss {
struct ieee80211_channel *chan;
s32 signal;
u64 boottime_ns;
u64 parent_tsf;
u8 parent_bssid[ETH_ALEN] __aligned(2);
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 restrict_use:1, use_for:7;
u8 cannot_use_reasons;
void *drv_data;
};
/**
* struct cfg80211_bss_ies - BSS entry IE data
* @tsf: TSF contained in the frame that carried these IEs
* @rcu_head: internal use, for freeing
* @len: length of the IEs
* @from_beacon: these IEs are known to come from a beacon
* @data: IE data
*/
struct cfg80211_bss_ies {
u64 tsf;
struct rcu_head rcu_head;
int len;
bool from_beacon;
u8 data[];
};
/**
* struct cfg80211_bss - BSS description
*
* This structure describes a BSS (which may also be a mesh network)
* for use in scan results and similar.
*
* @channel: channel this BSS is on
* @bssid: BSSID of the BSS
* @beacon_interval: the beacon interval as from the frame
* @capability: the capability field in host byte order
* @ies: the information elements (Note that there is no guarantee that these
* are well-formed!); this is a pointer to either the beacon_ies or
* proberesp_ies depending on whether Probe Response frame has been
* received. It is always non-%NULL.
* @beacon_ies: the information elements from the last Beacon frame
* (implementation note: if @hidden_beacon_bss is set this struct doesn't
* own the beacon_ies, but they're just pointers to the ones from the
* @hidden_beacon_bss struct)
* @proberesp_ies: the information elements from the last Probe Response frame
* @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame,
* cannot rely on it having valid data
* @hidden_beacon_bss: in case this BSS struct represents a probe response from
* a BSS that hides the SSID in its beacon, this points to the BSS struct
* that holds the beacon data. @beacon_ies is still valid, of course, and
* points to the same data as hidden_beacon_bss->beacon_ies in that case.
* @transmitted_bss: pointer to the transmitted BSS, if this is a
* non-transmitted one (multi-BSSID support)
* @nontrans_list: list of non-transmitted BSS, if this is a transmitted one
* (multi-BSSID support)
* @signal: signal strength value (type depends on the wiphy's signal_type)
* @ts_boottime: timestamp of the last BSS update in nanoseconds since boot
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
* @bssid_index: index in the multiple BSS set
* @max_bssid_indicator: max number of members in the BSS set
* @use_for: bitmap of possible usage for this BSS, see
* &enum nl80211_bss_use_for
* @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
* if @restrict_use is set and @use_for is zero (empty); may be 0 for
* unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
struct ieee80211_channel *channel;
const struct cfg80211_bss_ies __rcu *ies;
const struct cfg80211_bss_ies __rcu *beacon_ies;
const struct cfg80211_bss_ies __rcu *proberesp_ies;
struct cfg80211_bss *hidden_beacon_bss;
struct cfg80211_bss *transmitted_bss;
struct list_head nontrans_list;
s32 signal;
u64 ts_boottime;
u16 beacon_interval;
u16 capability;
u8 bssid[ETH_ALEN];
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 proberesp_ecsa_stuck:1;
u8 bssid_index;
u8 max_bssid_indicator;
u8 use_for;
u8 cannot_use_reasons;
u8 priv[] __aligned(sizeof(void *));
};
/**
* ieee80211_bss_get_elem - find element with given ID
* @bss: the bss to search
* @id: the element ID
*
* Note that the return value is an RCU-protected pointer, so
* rcu_read_lock() must be held when calling this function.
* Return: %NULL if not found.
*/
const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id);
/**
* ieee80211_bss_get_ie - find IE with given ID
* @bss: the bss to search
* @id: the element ID
*
* Note that the return value is an RCU-protected pointer, so
* rcu_read_lock() must be held when calling this function.
* Return: %NULL if not found.
*/
static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
{
return (const void *)ieee80211_bss_get_elem(bss, id);
}
/**
* struct cfg80211_auth_request - Authentication request data
*
* This structure provides information needed to complete IEEE 802.11
* authentication.
*
* @bss: The BSS to authenticate with, the callee must obtain a reference
* to it if it needs to keep it.
* @supported_selectors: List of selectors that should be assumed to be
* supported by the station.
* SAE_H2E must be assumed supported if set to %NULL.
* @supported_selectors_len: Length of supported_selectors in octets.
* @auth_type: Authentication type (algorithm)
* @ie: Extra IEs to add to Authentication frame or %NULL
* @ie_len: Length of ie buffer in octets
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
* @auth_data: Fields and elements in Authentication frames. This contains
* the authentication frame body (non-IE and IE data), excluding the
* Authentication algorithm number, i.e., starting at the Authentication
* transaction sequence number field.
* @auth_data_len: Length of auth_data buffer in octets
* @link_id: if >= 0, indicates authentication should be done as an MLD,
* the interface address is included as the MLD address and the
* necessary link (with the given link_id) will be created (and
* given an MLD address) by the driver
* @ap_mld_addr: AP MLD address in case of authentication request with
* an AP MLD, valid iff @link_id >= 0
*/
struct cfg80211_auth_request {
struct cfg80211_bss *bss;
const u8 *ie;
size_t ie_len;
const u8 *supported_selectors;
u8 supported_selectors_len;
enum nl80211_auth_type auth_type;
const u8 *key;
u8 key_len;
s8 key_idx;
const u8 *auth_data;
size_t auth_data_len;
s8 link_id;
const u8 *ap_mld_addr;
};
/**
* struct cfg80211_assoc_link - per-link information for MLO association
* @bss: the BSS pointer, see also &struct cfg80211_assoc_request::bss;
* if this is %NULL for a link, that link is not requested
* @elems: extra elements for the per-STA profile for this link
* @elems_len: length of the elements
* @disabled: If set this link should be included during association etc. but it
* should not be used until enabled by the AP MLD.
* @error: per-link error code, must be <= 0. If there is an error, then the
* operation as a whole must fail.
*/
struct cfg80211_assoc_link {
struct cfg80211_bss *bss;
const u8 *elems;
size_t elems_len;
bool disabled;
int error;
};
/**
* struct cfg80211_ml_reconf_req - MLO link reconfiguration request
* @add_links: data for links to add, see &struct cfg80211_assoc_link
* @rem_links: bitmap of links to remove
* @ext_mld_capa_ops: extended MLD capabilities and operations set by
* userspace for the ML reconfiguration action frame
*/
struct cfg80211_ml_reconf_req {
struct cfg80211_assoc_link add_links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 rem_links;
u16 ext_mld_capa_ops;
};
/**
* enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
*
* @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
* @ASSOC_REQ_DISABLE_VHT: Disable VHT
* @ASSOC_REQ_USE_RRM: Declare RRM capability in this association
* @CONNECT_REQ_EXTERNAL_AUTH_SUPPORT: User space indicates external
* authentication capability. Drivers can offload authentication to
* userspace if this flag is set. Only applicable for cfg80211_connect()
* request (connect callback).
* @ASSOC_REQ_DISABLE_HE: Disable HE
* @ASSOC_REQ_DISABLE_EHT: Disable EHT
* @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links.
* Drivers shall disable MLO features for the current association if this
* flag is not set.
* @ASSOC_REQ_SPP_AMSDU: SPP A-MSDUs will be used on this connection (if any)
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
ASSOC_REQ_DISABLE_VHT = BIT(1),
ASSOC_REQ_USE_RRM = BIT(2),
CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3),
ASSOC_REQ_DISABLE_HE = BIT(4),
ASSOC_REQ_DISABLE_EHT = BIT(5),
CONNECT_REQ_MLO_SUPPORT = BIT(6),
ASSOC_REQ_SPP_AMSDU = BIT(7),
};
/**
* struct cfg80211_assoc_request - (Re)Association request data
*
* This structure provides information needed to complete IEEE 802.11
* (re)association.
* @bss: The BSS to associate with. If the call is successful the driver is
* given a reference that it must give back to cfg80211_send_rx_assoc()
* or to cfg80211_assoc_timeout(). To ensure proper refcounting, new
* association requests while already associating must be rejected.
* This also applies to the @links.bss parameter, which is used instead
* of this one (it is %NULL) for MLO associations.
* @ie: Extra IEs to add to (Re)Association Request frame or %NULL
* @ie_len: Length of ie buffer in octets
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
* @crypto: crypto settings
* @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
* to indicate a request to reassociate within the ESS instead of a request
* do the initial association with the ESS. When included, this is set to
* the BSSID of the current association, i.e., to the value that is
* included in the Current AP address field of the Reassociation Request
* frame.
* @flags: See &enum cfg80211_assoc_req_flags
* @supported_selectors: supported BSS selectors in IEEE 802.11 format
* (or %NULL for no change).
* If %NULL, then support for SAE_H2E should be assumed.
* @supported_selectors_len: number of supported BSS selectors
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT capability override
* @vht_capa_mask: VHT capability mask indicating which fields to use
* @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or
* %NULL if FILS is not used.
* @fils_kek_len: Length of fils_kek in octets
* @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
* Request/Response frame or %NULL if FILS is not used. This field starts
* with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
* @s1g_capa: S1G capability override
* @s1g_capa_mask: S1G capability override mask
* @links: per-link information for MLO connections
* @link_id: >= 0 for MLO connections, where links are given, and indicates
* the link on which the association request should be sent
* @ap_mld_addr: AP MLD address in case of MLO association request,
* valid iff @link_id >= 0
* @ext_mld_capa_ops: extended MLD capabilities and operations set by
* userspace for the association
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
const u8 *ie, *prev_bssid;
size_t ie_len;
struct cfg80211_crypto_settings crypto;
bool use_mfp;
u32 flags;
const u8 *supported_selectors;
u8 supported_selectors_len;
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa, vht_capa_mask;
const u8 *fils_kek;
size_t fils_kek_len;
const u8 *fils_nonces;
struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask;
struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS];
const u8 *ap_mld_addr;
s8 link_id;
u16 ext_mld_capa_ops;
};
/**
* struct cfg80211_deauth_request - Deauthentication request data
*
* This structure provides information needed to complete IEEE 802.11
* deauthentication.
*
* @bssid: the BSSID or AP MLD address to deauthenticate from
* @ie: Extra IEs to add to Deauthentication frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the deauthentication
* @local_state_change: if set, change local state only and
* do not set a deauth frame
*/
struct cfg80211_deauth_request {
const u8 *bssid;
const u8 *ie;
size_t ie_len;
u16 reason_code;
bool local_state_change;
};
/**
* struct cfg80211_disassoc_request - Disassociation request data
*
* This structure provides information needed to complete IEEE 802.11
* disassociation.
*
* @ap_addr: the BSSID or AP MLD address to disassociate from
* @ie: Extra IEs to add to Disassociation frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the disassociation
* @local_state_change: This is a request for a local state only, i.e., no
* Disassociation frame is to be transmitted.
*/
struct cfg80211_disassoc_request {
const u8 *ap_addr;
const u8 *ie;
size_t ie_len;
u16 reason_code;
bool local_state_change;
};
/**
* struct cfg80211_ibss_params - IBSS parameters
*
* This structure defines the IBSS parameters for the join_ibss()
* method.
*
* @ssid: The SSID, will always be non-null.
* @ssid_len: The length of the SSID, will always be non-zero.
* @bssid: Fixed BSSID requested, maybe be %NULL, if set do not
* search for IBSSs with a different BSSID.
* @chandef: defines the channel to use if no other IBSS to join can be found
* @channel_fixed: The channel should be fixed -- do not search for
* IBSSs to join on other channels.
* @ie: information element(s) to include in the beacon
* @ie_len: length of that
* @beacon_interval: beacon interval to use
* @privacy: this is a protected network, keys will be configured
* after joining
* @control_port: whether user space controls IEEE 802.1X port, i.e.,
* sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
* required to assume that the port is unauthorized until authorized by
* user space. Otherwise, port is marked authorized by default.
* @control_port_over_nl80211: TRUE if userspace expects to exchange control
* port frames over NL80211 instead of the network interface.
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
* changes the channel when a radar is detected. This is required
* to operate on DFS channels.
* @basic_rates: bitmap of basic rates to use when creating the IBSS
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @wep_keys: static WEP keys, if not NULL points to an array of
* CFG80211_MAX_WEP_KEYS WEP keys
* @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_ibss_params {
const u8 *ssid;
const u8 *bssid;
struct cfg80211_chan_def chandef;
const u8 *ie;
u8 ssid_len, ie_len;
u16 beacon_interval;
u32 basic_rates;
bool channel_fixed;
bool privacy;
bool control_port;
bool control_port_over_nl80211;
bool userspace_handles_dfs;
int mcast_rate[NUM_NL80211_BANDS];
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
struct key_params *wep_keys;
int wep_tx_key;
};
/**
* struct cfg80211_bss_selection - connection parameters for BSS selection.
*
* @behaviour: requested BSS selection behaviour.
* @param: parameters for requestion behaviour.
* @param.band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
* @param.adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
*/
struct cfg80211_bss_selection {
enum nl80211_bss_select_attr behaviour;
union {
enum nl80211_band band_pref;
struct cfg80211_bss_select_adjust adjust;
} param;
};
/**
* struct cfg80211_connect_params - Connection parameters
*
* This structure provides information needed to complete IEEE 802.11
* authentication and association.
*
* @channel: The channel to use or %NULL if not specified (auto-select based
* on scan results)
* @channel_hint: The channel of the recommended BSS for initial connection or
* %NULL if not specified
* @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan
* results)
* @bssid_hint: The recommended AP BSSID for initial connection to the BSS or
* %NULL if not specified. Unlike the @bssid parameter, the driver is
* allowed to ignore this @bssid_hint if it has knowledge of a better BSS
* to use.
* @ssid: SSID
* @ssid_len: Length of ssid in octets
* @auth_type: Authentication type (algorithm)
* @ie: IEs for association request
* @ie_len: Length of assoc_ie in octets
* @privacy: indicates whether privacy-enabled APs should be used
* @mfp: indicate whether management frame protection is used
* @crypto: crypto settings
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
* @flags: See &enum cfg80211_assoc_req_flags
* @bg_scan_period: Background scan period in seconds
* or -1 to indicate that default value is to be used.
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT Capability overrides
* @vht_capa_mask: The bits of vht_capa which are to be used.
* @pbss: if set, connect to a PCP instead of AP. Valid for DMG
* networks.
* @bss_select: criteria to be used for BSS selection.
* @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
* to indicate a request to reassociate within the ESS instead of a request
* do the initial association with the ESS. When included, this is set to
* the BSSID of the current association, i.e., to the value that is
* included in the Current AP address field of the Reassociation Request
* frame.
* @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
* NAI or %NULL if not specified. This is used to construct FILS wrapped
* data IE.
* @fils_erp_username_len: Length of @fils_erp_username in octets.
* @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
* %NULL if not specified. This specifies the domain name of ER server and
* is used to construct FILS wrapped data IE.
* @fils_erp_realm_len: Length of @fils_erp_realm in octets.
* @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
* messages. This is also used to construct FILS wrapped data IE.
* @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
* keys in FILS or %NULL if not specified.
* @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
* @want_1x: indicates user-space supports and wants to use 802.1X driver
* offload of 4-way handshake.
* @edmg: define the EDMG channels.
* This may specify multiple channels and bonding options for the driver
* to choose from, based on BSS configuration.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
struct ieee80211_channel *channel_hint;
const u8 *bssid;
const u8 *bssid_hint;
const u8 *ssid;
size_t ssid_len;
enum nl80211_auth_type auth_type;
const u8 *ie;
size_t ie_len;
bool privacy;
enum nl80211_mfp mfp;
struct cfg80211_crypto_settings crypto;
const u8 *key;
u8 key_len, key_idx;
u32 flags;
int bg_scan_period;
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa;
struct ieee80211_vht_cap vht_capa_mask;
bool pbss;
struct cfg80211_bss_selection bss_select;
const u8 *prev_bssid;
const u8 *fils_erp_username;
size_t fils_erp_username_len;
const u8 *fils_erp_realm;
size_t fils_erp_realm_len;
u16 fils_erp_next_seq_num;
const u8 *fils_erp_rrk;
size_t fils_erp_rrk_len;
bool want_1x;
struct ieee80211_edmg edmg;
};
/**
* enum cfg80211_connect_params_changed - Connection parameters being updated
*
* This enum provides information of all connect parameters that
* have to be updated as part of update_connect_params() call.
*
* @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated
* @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm,
* username, erp sequence number and rrk) are updated
* @UPDATE_AUTH_TYPE: Indicates that authentication type is updated
*/
enum cfg80211_connect_params_changed {
UPDATE_ASSOC_IES = BIT(0),
UPDATE_FILS_ERP_INFO = BIT(1),
UPDATE_AUTH_TYPE = BIT(2),
};
/**
* enum wiphy_params_flags - set_wiphy_params bitfield values
* @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed
* @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed
* @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
* @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
* @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
* @WIPHY_PARAM_DYN_ACK: dynack has been enabled
* @WIPHY_PARAM_TXQ_LIMIT: TXQ packet limit has been changed
* @WIPHY_PARAM_TXQ_MEMORY_LIMIT: TXQ memory limit has been changed
* @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum
*/
enum wiphy_params_flags {
WIPHY_PARAM_RETRY_SHORT = BIT(0),
WIPHY_PARAM_RETRY_LONG = BIT(1),
WIPHY_PARAM_FRAG_THRESHOLD = BIT(2),
WIPHY_PARAM_RTS_THRESHOLD = BIT(3),
WIPHY_PARAM_COVERAGE_CLASS = BIT(4),
WIPHY_PARAM_DYN_ACK = BIT(5),
WIPHY_PARAM_TXQ_LIMIT = BIT(6),
WIPHY_PARAM_TXQ_MEMORY_LIMIT = BIT(7),
WIPHY_PARAM_TXQ_QUANTUM = BIT(8),
};
#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
/* The per TXQ device queue limit in airtime */
#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L 5000
#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H 12000
/* The per interface airtime threshold to switch to lower queue limit */
#define IEEE80211_AQL_THRESHOLD 24000
/**
* struct cfg80211_pmksa - PMK Security Association
*
* This structure is passed to the set/del_pmksa() method for PMKSA
* caching.
*
* @bssid: The AP's BSSID (may be %NULL).
* @pmkid: The identifier to refer a PMKSA.
* @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
* derivation by a FILS STA. Otherwise, %NULL.
* @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
* the hash algorithm used to generate this.
* @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
* cache identifier (may be %NULL).
* @ssid_len: Length of the @ssid in octets.
* @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
* scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
* %NULL).
* @pmk_lifetime: Maximum lifetime for PMKSA in seconds
* (dot11RSNAConfigPMKLifetime) or 0 if not specified.
* The configured PMKSA must not be used for PMKSA caching after
* expiration and any keys derived from this PMK become invalid on
* expiration, i.e., the current association must be dropped if the PMK
* used for it expires.
* @pmk_reauth_threshold: Threshold time for reauthentication (percentage of
* PMK lifetime, dot11RSNAConfigPMKReauthThreshold) or 0 if not specified.
* Drivers are expected to trigger a full authentication instead of using
* this PMKSA for caching when reassociating to a new BSS after this
* threshold to generate a new PMK before the current one expires.
*/
struct cfg80211_pmksa {
const u8 *bssid;
const u8 *pmkid;
const u8 *pmk;
size_t pmk_len;
const u8 *ssid;
size_t ssid_len;
const u8 *cache_id;
u32 pmk_lifetime;
u8 pmk_reauth_threshold;
};
/**
* struct cfg80211_pkt_pattern - packet pattern
* @mask: bitmask where to match pattern and where to ignore bytes,
* one bit per byte, in same format as nl80211
* @pattern: bytes to match where bitmask is 1
* @pattern_len: length of pattern (in bytes)
* @pkt_offset: packet offset (in bytes)
*
* Internal note: @mask and @pattern are allocated in one chunk of
* memory, free @mask only!
*/
struct cfg80211_pkt_pattern {
const u8 *mask, *pattern;
int pattern_len;
int pkt_offset;
};
/**
* struct cfg80211_wowlan_tcp - TCP connection parameters
*
* @sock: (internal) socket for source port allocation
* @src: source IP address
* @dst: destination IP address
* @dst_mac: destination MAC address
* @src_port: source port
* @dst_port: destination port
* @payload_len: data payload length
* @payload: data payload buffer
* @payload_seq: payload sequence stamping configuration
* @data_interval: interval at which to send data packets
* @wake_len: wakeup payload match length
* @wake_data: wakeup payload match data
* @wake_mask: wakeup payload match mask
* @tokens_size: length of the tokens buffer
* @payload_tok: payload token usage configuration
*/
struct cfg80211_wowlan_tcp {
struct socket *sock;
__be32 src, dst;
u16 src_port, dst_port;
u8 dst_mac[ETH_ALEN];
int payload_len;
const u8 *payload;
struct nl80211_wowlan_tcp_data_seq payload_seq;
u32 data_interval;
u32 wake_len;
const u8 *wake_data, *wake_mask;
u32 tokens_size;
/* must be last, variable member */
struct nl80211_wowlan_tcp_data_token payload_tok;
};
/**
* struct cfg80211_wowlan - Wake on Wireless-LAN support info
*
* This structure defines the enabled WoWLAN triggers for the device.
* @any: wake up on any activity -- special trigger if device continues
* operating as normal during suspend
* @disconnect: wake up if getting disconnected
* @magic_pkt: wake up on receiving magic packet
* @patterns: wake up on receiving packet matching a pattern
* @n_patterns: number of patterns
* @gtk_rekey_failure: wake up on GTK rekey failure
* @eap_identity_req: wake up on EAP identity request packet
* @four_way_handshake: wake up on 4-way handshake
* @rfkill_release: wake up when rfkill is released
* @tcp: TCP connection establishment/wakeup parameters, see nl80211.h.
* NULL if not configured.
* @nd_config: configuration for the scan to be used for net detect wake.
*/
struct cfg80211_wowlan {
bool any, disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release;
struct cfg80211_pkt_pattern *patterns;
struct cfg80211_wowlan_tcp *tcp;
int n_patterns;
struct cfg80211_sched_scan_request *nd_config;
};
/**
* struct cfg80211_coalesce_rules - Coalesce rule parameters
*
* This structure defines coalesce rule for the device.
* @delay: maximum coalescing delay in msecs.
* @condition: condition for packet coalescence.
* see &enum nl80211_coalesce_condition.
* @patterns: array of packet patterns
* @n_patterns: number of patterns
*/
struct cfg80211_coalesce_rules {
int delay;
enum nl80211_coalesce_condition condition;
struct cfg80211_pkt_pattern *patterns;
int n_patterns;
};
/**
* struct cfg80211_coalesce - Packet coalescing settings
*
* This structure defines coalescing settings.
* @rules: array of coalesce rules
* @n_rules: number of rules
*/
struct cfg80211_coalesce {
int n_rules;
struct cfg80211_coalesce_rules rules[] __counted_by(n_rules);
};
/**
* struct cfg80211_wowlan_nd_match - information about the match
*
* @ssid: SSID of the match that triggered the wake up
* @n_channels: Number of channels where the match occurred. This
* value may be zero if the driver can't report the channels.
* @channels: center frequencies of the channels where a match
* occurred (in MHz)
*/
struct cfg80211_wowlan_nd_match {
struct cfg80211_ssid ssid;
int n_channels;
u32 channels[] __counted_by(n_channels);
};
/**
* struct cfg80211_wowlan_nd_info - net detect wake up information
*
* @n_matches: Number of match information instances provided in
* @matches. This value may be zero if the driver can't provide
* match information.
* @matches: Array of pointers to matches containing information about
* the matches that triggered the wake up.
*/
struct cfg80211_wowlan_nd_info {
int n_matches;
struct cfg80211_wowlan_nd_match *matches[] __counted_by(n_matches);
};
/**
* struct cfg80211_wowlan_wakeup - wakeup report
* @disconnect: woke up by getting disconnected
* @magic_pkt: woke up by receiving magic packet
* @gtk_rekey_failure: woke up by GTK rekey failure
* @eap_identity_req: woke up by EAP identity request packet
* @four_way_handshake: woke up by 4-way handshake
* @rfkill_release: woke up by rfkill being released
* @pattern_idx: pattern that caused wakeup, -1 if not due to pattern
* @packet_present_len: copied wakeup packet data
* @packet_len: original wakeup packet length
* @packet: The packet causing the wakeup, if any.
* @packet_80211: For pattern match, magic packet and other data
* frame triggers an 802.3 frame should be reported, for
* disconnect due to deauth 802.11 frame. This indicates which
* it is.
* @tcp_match: TCP wakeup packet received
* @tcp_connlost: TCP connection lost or failed to establish
* @tcp_nomoretokens: TCP data ran out of tokens
* @net_detect: if not %NULL, woke up because of net detect
* @unprot_deauth_disassoc: woke up due to unprotected deauth or
* disassoc frame (in MFP).
*/
struct cfg80211_wowlan_wakeup {
bool disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release, packet_80211,
tcp_match, tcp_connlost, tcp_nomoretokens,
unprot_deauth_disassoc;
s32 pattern_idx;
u32 packet_present_len, packet_len;
const void *packet;
struct cfg80211_wowlan_nd_info *net_detect;
};
/**
* struct cfg80211_gtk_rekey_data - rekey data
* @kek: key encryption key (@kek_len bytes)
* @kck: key confirmation key (@kck_len bytes)
* @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
* @kek_len: length of kek
* @kck_len: length of kck
* @akm: akm (oui, id)
*/
struct cfg80211_gtk_rekey_data {
const u8 *kek, *kck, *replay_ctr;
u32 akm;
u8 kek_len, kck_len;
};
/**
* struct cfg80211_update_ft_ies_params - FT IE Information
*
* This structure provides information needed to update the fast transition IE
*
* @md: The Mobility Domain ID, 2 Octet value
* @ie: Fast Transition IEs
* @ie_len: Length of ft_ie in octets
*/
struct cfg80211_update_ft_ies_params {
u16 md;
const u8 *ie;
size_t ie_len;
};
/**
* struct cfg80211_mgmt_tx_params - mgmt tx parameters
*
* This structure provides information needed to transmit a mgmt frame
*
* @chan: channel to use
* @offchan: indicates whether off channel operation is required
* @wait: duration for ROC
* @buf: buffer to transmit
* @len: buffer length
* @no_cck: don't use cck rates for this frame
* @dont_wait_for_ack: tells the low level not to wait for an ack
* @n_csa_offsets: length of csa_offsets array
* @csa_offsets: array of all the csa offsets in the frame
* @link_id: for MLO, the link ID to transmit on, -1 if not given; note
* that the link ID isn't validated (much), it's in range but the
* link might not exist (or be used by the receiver STA)
*/
struct cfg80211_mgmt_tx_params {
struct ieee80211_channel *chan;
bool offchan;
unsigned int wait;
const u8 *buf;
size_t len;
bool no_cck;
bool dont_wait_for_ack;
int n_csa_offsets;
const u16 *csa_offsets;
int link_id;
};
/**
* struct cfg80211_dscp_exception - DSCP exception
*
* @dscp: DSCP value that does not adhere to the user priority range definition
* @up: user priority value to which the corresponding DSCP value belongs
*/
struct cfg80211_dscp_exception {
u8 dscp;
u8 up;
};
/**
* struct cfg80211_dscp_range - DSCP range definition for user priority
*
* @low: lowest DSCP value of this user priority range, inclusive
* @high: highest DSCP value of this user priority range, inclusive
*/
struct cfg80211_dscp_range {
u8 low;
u8 high;
};
/* QoS Map Set element length defined in IEEE Std 802.11-2012, 8.4.2.97 */
#define IEEE80211_QOS_MAP_MAX_EX 21
#define IEEE80211_QOS_MAP_LEN_MIN 16
#define IEEE80211_QOS_MAP_LEN_MAX \
(IEEE80211_QOS_MAP_LEN_MIN + 2 * IEEE80211_QOS_MAP_MAX_EX)
/**
* struct cfg80211_qos_map - QoS Map Information
*
* This struct defines the Interworking QoS map setting for DSCP values
*
* @num_des: number of DSCP exceptions (0..21)
* @dscp_exception: optionally up to maximum of 21 DSCP exceptions from
* the user priority DSCP range definition
* @up: DSCP range definition for a particular user priority
*/
struct cfg80211_qos_map {
u8 num_des;
struct cfg80211_dscp_exception dscp_exception[IEEE80211_QOS_MAP_MAX_EX];
struct cfg80211_dscp_range up[8];
};
/**
* struct cfg80211_nan_band_config - NAN band specific configuration
*
* @chan: Pointer to the IEEE 802.11 channel structure. The channel to be used
* for NAN operations on this band. For 2.4 GHz band, this is always
* channel 6. For 5 GHz band, the channel is either 44 or 149, according
* to the regulatory constraints. If chan pointer is NULL the entire band
* configuration entry is considered invalid and should not be used.
* @rssi_close: RSSI close threshold used for NAN state transition algorithm
* as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State
* Transition" of Wi-Fi Aware Specification v4.0. If not
* specified (set to 0), default device value is used. The value should
* be greater than -60 dBm.
* @rssi_middle: RSSI middle threshold used for NAN state transition algorithm.
* as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State
* Transition" of Wi-Fi Aware Specification v4.0. If not
* specified (set to 0), default device value is used. The value should be
* greater than -75 dBm and less than rssi_close.
* @awake_dw_interval: Committed DW interval. Valid values range: 0-5. 0
* indicates no wakeup for DW and can't be used on 2.4GHz band, otherwise
* 2^(n-1).
* @disable_scan: If true, the device will not scan this band for cluster
* merge. Disabling scan on 2.4 GHz band is not allowed.
*/
struct cfg80211_nan_band_config {
struct ieee80211_channel *chan;
s8 rssi_close;
s8 rssi_middle;
u8 awake_dw_interval;
bool disable_scan;
};
/**
* struct cfg80211_nan_conf - NAN configuration
*
* This struct defines NAN configuration parameters
*
* @master_pref: master preference (1 - 255)
* @bands: operating bands, a bitmap of &enum nl80211_band values.
* For instance, for NL80211_BAND_2GHZ, bit 0 would be set
* (i.e. BIT(NL80211_BAND_2GHZ)).
* @cluster_id: cluster ID used for NAN synchronization. This is a MAC address
* that can take a value from 50-6F-9A-01-00-00 to 50-6F-9A-01-FF-FF.
* If NULL, the device will pick a random Cluster ID.
* @scan_period: period (in seconds) between NAN scans.
* @scan_dwell_time: dwell time (in milliseconds) for NAN scans.
* @discovery_beacon_interval: interval (in TUs) for discovery beacons.
* @enable_dw_notification: flag to enable/disable discovery window
* notifications.
* @band_cfgs: array of band specific configurations, indexed by
* &enum nl80211_band values.
* @extra_nan_attrs: pointer to additional NAN attributes.
* @extra_nan_attrs_len: length of the additional NAN attributes.
* @vendor_elems: pointer to vendor-specific elements.
* @vendor_elems_len: length of the vendor-specific elements.
*/
struct cfg80211_nan_conf {
u8 master_pref;
u8 bands;
const u8 *cluster_id;
u16 scan_period;
u16 scan_dwell_time;
u8 discovery_beacon_interval;
bool enable_dw_notification;
struct cfg80211_nan_band_config band_cfgs[NUM_NL80211_BANDS];
const u8 *extra_nan_attrs;
u16 extra_nan_attrs_len;
const u8 *vendor_elems;
u16 vendor_elems_len;
};
/**
* enum cfg80211_nan_conf_changes - indicates changed fields in NAN
* configuration
*
* @CFG80211_NAN_CONF_CHANGED_PREF: master preference
* @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands
* @CFG80211_NAN_CONF_CHANGED_CONFIG: changed additional configuration.
* When this flag is set, it indicates that some additional attribute(s)
* (other then master_pref and bands) have been changed. In this case,
* all the unchanged attributes will be properly configured to their
* previous values. The driver doesn't need to store any
* previous configuration besides master_pref and bands.
*/
enum cfg80211_nan_conf_changes {
CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1),
CFG80211_NAN_CONF_CHANGED_CONFIG = BIT(2),
};
/**
* struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter
*
* @filter: the content of the filter
* @len: the length of the filter
*/
struct cfg80211_nan_func_filter {
const u8 *filter;
u8 len;
};
/**
* struct cfg80211_nan_func - a NAN function
*
* @type: &enum nl80211_nan_function_type
* @service_id: the service ID of the function
* @publish_type: &nl80211_nan_publish_type
* @close_range: if true, the range should be limited. Threshold is
* implementation specific.
* @publish_bcast: if true, the solicited publish should be broadcasted
* @subscribe_active: if true, the subscribe is active
* @followup_id: the instance ID for follow up
* @followup_reqid: the requester instance ID for follow up
* @followup_dest: MAC address of the recipient of the follow up
* @ttl: time to live counter in DW.
* @serv_spec_info: Service Specific Info
* @serv_spec_info_len: Service Specific Info length
* @srf_include: if true, SRF is inclusive
* @srf_bf: Bloom Filter
* @srf_bf_len: Bloom Filter length
* @srf_bf_idx: Bloom Filter index
* @srf_macs: SRF MAC addresses
* @srf_num_macs: number of MAC addresses in SRF
* @rx_filters: rx filters that are matched with corresponding peer's tx_filter
* @tx_filters: filters that should be transmitted in the SDF.
* @num_rx_filters: length of &rx_filters.
* @num_tx_filters: length of &tx_filters.
* @instance_id: driver allocated id of the function.
* @cookie: unique NAN function identifier.
*/
struct cfg80211_nan_func {
enum nl80211_nan_function_type type;
u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN];
u8 publish_type;
bool close_range;
bool publish_bcast;
bool subscribe_active;
u8 followup_id;
u8 followup_reqid;
struct mac_address followup_dest;
u32 ttl;
const u8 *serv_spec_info;
u8 serv_spec_info_len;
bool srf_include;
const u8 *srf_bf;
u8 srf_bf_len;
u8 srf_bf_idx;
struct mac_address *srf_macs;
int srf_num_macs;
struct cfg80211_nan_func_filter *rx_filters;
struct cfg80211_nan_func_filter *tx_filters;
u8 num_tx_filters;
u8 num_rx_filters;
u8 instance_id;
u64 cookie;
};
/**
* struct cfg80211_pmk_conf - PMK configuration
*
* @aa: authenticator address
* @pmk_len: PMK length in bytes.
* @pmk: the PMK material
* @pmk_r0_name: PMK-R0 Name. NULL if not applicable (i.e., the PMK
* is not PMK-R0). When pmk_r0_name is not NULL, the pmk field
* holds PMK-R0.
*/
struct cfg80211_pmk_conf {
const u8 *aa;
u8 pmk_len;
const u8 *pmk;
const u8 *pmk_r0_name;
};
/**
* struct cfg80211_external_auth_params - Trigger External authentication.
*
* Commonly used across the external auth request and event interfaces.
*
* @action: action type / trigger for external authentication. Only significant
* for the authentication request event interface (driver to user space).
* @bssid: BSSID of the peer with which the authentication has
* to happen. Used by both the authentication request event and
* authentication response command interface.
* @ssid: SSID of the AP. Used by both the authentication request event and
* authentication response command interface.
* @key_mgmt_suite: AKM suite of the respective authentication. Used by the
* authentication request event interface.
* @status: status code, %WLAN_STATUS_SUCCESS for successful authentication,
* use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you
* the real status code for failures. Used only for the authentication
* response command interface (user space to driver).
* @pmkid: The identifier to refer a PMKSA.
* @mld_addr: MLD address of the peer. Used by the authentication request event
* interface. Driver indicates this to enable MLO during the authentication
* offload to user space. Driver shall look at %NL80211_ATTR_MLO_SUPPORT
* flag capability in NL80211_CMD_CONNECT to know whether the user space
* supports enabling MLO during the authentication offload.
* User space should use the address of the interface (on which the
* authentication request event reported) as self MLD address. User space
* and driver should use MLD addresses in RA, TA and BSSID fields of
* authentication frames sent or received via cfg80211. The driver
* translates the MLD addresses to/from link addresses based on the link
* chosen for the authentication.
*/
struct cfg80211_external_auth_params {
enum nl80211_external_auth_action action;
u8 bssid[ETH_ALEN] __aligned(2);
struct cfg80211_ssid ssid;
unsigned int key_mgmt_suite;
u16 status;
const u8 *pmkid;
u8 mld_addr[ETH_ALEN] __aligned(2);
};
/**
* struct cfg80211_ftm_responder_stats - FTM responder statistics
*
* @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to
* indicate the relevant values in this struct for them
* @success_num: number of FTM sessions in which all frames were successfully
* answered
* @partial_num: number of FTM sessions in which part of frames were
* successfully answered
* @failed_num: number of failed FTM sessions
* @asap_num: number of ASAP FTM sessions
* @non_asap_num: number of non-ASAP FTM sessions
* @total_duration_ms: total sessions durations - gives an indication
* of how much time the responder was busy
* @unknown_triggers_num: number of unknown FTM triggers - triggers from
* initiators that didn't finish successfully the negotiation phase with
* the responder
* @reschedule_requests_num: number of FTM reschedule requests - initiator asks
* for a new scheduling although it already has scheduled FTM slot
* @out_of_window_triggers_num: total FTM triggers out of scheduled window
*/
struct cfg80211_ftm_responder_stats {
u32 filled;
u32 success_num;
u32 partial_num;
u32 failed_num;
u32 asap_num;
u32 non_asap_num;
u64 total_duration_ms;
u32 unknown_triggers_num;
u32 reschedule_requests_num;
u32 out_of_window_triggers_num;
};
/**
* struct cfg80211_pmsr_ftm_result - FTM result
* @failure_reason: if this measurement failed (PMSR status is
* %NL80211_PMSR_STATUS_FAILURE), this gives a more precise
* reason than just "failure"
* @burst_index: if reporting partial results, this is the index
* in [0 .. num_bursts-1] of the burst that's being reported
* @num_ftmr_attempts: number of FTM request frames transmitted
* @num_ftmr_successes: number of FTM request frames acked
* @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
* fill this to indicate in how many seconds a retry is deemed possible
* by the responder
* @num_bursts_exp: actual number of bursts exponent negotiated
* @burst_duration: actual burst duration negotiated
* @ftms_per_burst: actual FTMs per burst negotiated
* @lci_len: length of LCI information (if present)
* @civicloc_len: length of civic location information (if present)
* @lci: LCI data (may be %NULL)
* @civicloc: civic location data (may be %NULL)
* @rssi_avg: average RSSI over FTM action frames reported
* @rssi_spread: spread of the RSSI over FTM action frames reported
* @tx_rate: bitrate for transmitted FTM action frame response
* @rx_rate: bitrate of received FTM action frame
* @rtt_avg: average of RTTs measured (must have either this or @dist_avg)
* @rtt_variance: variance of RTTs measured (note that standard deviation is
* the square root of the variance)
* @rtt_spread: spread of the RTTs measured
* @dist_avg: average of distances (mm) measured
* (must have either this or @rtt_avg)
* @dist_variance: variance of distances measured (see also @rtt_variance)
* @dist_spread: spread of distances measured (see also @rtt_spread)
* @num_ftmr_attempts_valid: @num_ftmr_attempts is valid
* @num_ftmr_successes_valid: @num_ftmr_successes is valid
* @rssi_avg_valid: @rssi_avg is valid
* @rssi_spread_valid: @rssi_spread is valid
* @tx_rate_valid: @tx_rate is valid
* @rx_rate_valid: @rx_rate is valid
* @rtt_avg_valid: @rtt_avg is valid
* @rtt_variance_valid: @rtt_variance is valid
* @rtt_spread_valid: @rtt_spread is valid
* @dist_avg_valid: @dist_avg is valid
* @dist_variance_valid: @dist_variance is valid
* @dist_spread_valid: @dist_spread is valid
*/
struct cfg80211_pmsr_ftm_result {
const u8 *lci;
const u8 *civicloc;
unsigned int lci_len;
unsigned int civicloc_len;
enum nl80211_peer_measurement_ftm_failure_reasons failure_reason;
u32 num_ftmr_attempts, num_ftmr_successes;
s16 burst_index;
u8 busy_retry_time;
u8 num_bursts_exp;
u8 burst_duration;
u8 ftms_per_burst;
s32 rssi_avg;
s32 rssi_spread;
struct rate_info tx_rate, rx_rate;
s64 rtt_avg;
s64 rtt_variance;
s64 rtt_spread;
s64 dist_avg;
s64 dist_variance;
s64 dist_spread;
u16 num_ftmr_attempts_valid:1,
num_ftmr_successes_valid:1,
rssi_avg_valid:1,
rssi_spread_valid:1,
tx_rate_valid:1,
rx_rate_valid:1,
rtt_avg_valid:1,
rtt_variance_valid:1,
rtt_spread_valid:1,
dist_avg_valid:1,
dist_variance_valid:1,
dist_spread_valid:1;
};
/**
* struct cfg80211_pmsr_result - peer measurement result
* @addr: address of the peer
* @host_time: host time (use ktime_get_boottime() adjust to the time when the
* measurement was made)
* @ap_tsf: AP's TSF at measurement time
* @status: status of the measurement
* @final: if reporting partial results, mark this as the last one; if not
* reporting partial results always set this flag
* @ap_tsf_valid: indicates the @ap_tsf value is valid
* @type: type of the measurement reported, note that we only support reporting
* one type at a time, but you can report multiple results separately and
* they're all aggregated for userspace.
* @ftm: FTM result
*/
struct cfg80211_pmsr_result {
u64 host_time, ap_tsf;
enum nl80211_peer_measurement_status status;
u8 addr[ETH_ALEN];
u8 final:1,
ap_tsf_valid:1;
enum nl80211_peer_measurement_type type;
union {
struct cfg80211_pmsr_ftm_result ftm;
};
};
/**
* struct cfg80211_pmsr_ftm_request_peer - FTM request data
* @requested: indicates FTM is requested
* @preamble: frame preamble to use
* @burst_period: burst period to use
* @asap: indicates to use ASAP mode
* @num_bursts_exp: number of bursts exponent
* @burst_duration: burst duration
* @ftms_per_burst: number of FTMs per burst
* @ftmr_retries: number of retries for FTM request
* @request_lci: request LCI information
* @request_civicloc: request civic location information
* @trigger_based: use trigger based ranging for the measurement
* If neither @trigger_based nor @non_trigger_based is set,
* EDCA based ranging will be used.
* @non_trigger_based: use non trigger based ranging for the measurement
* If neither @trigger_based nor @non_trigger_based is set,
* EDCA based ranging will be used.
* @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either
* @trigger_based or @non_trigger_based is set.
* @bss_color: the bss color of the responder. Optional. Set to zero to
* indicate the driver should set the BSS color. Only valid if
* @non_trigger_based or @trigger_based is set.
*
* See also nl80211 for the respective attribute documentation.
*/
struct cfg80211_pmsr_ftm_request_peer {
enum nl80211_preamble preamble;
u16 burst_period;
u8 requested:1,
asap:1,
request_lci:1,
request_civicloc:1,
trigger_based:1,
non_trigger_based:1,
lmr_feedback:1;
u8 num_bursts_exp;
u8 burst_duration;
u8 ftms_per_burst;
u8 ftmr_retries;
u8 bss_color;
};
/**
* struct cfg80211_pmsr_request_peer - peer data for a peer measurement request
* @addr: MAC address
* @chandef: channel to use
* @report_ap_tsf: report the associated AP's TSF
* @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer
*/
struct cfg80211_pmsr_request_peer {
u8 addr[ETH_ALEN];
struct cfg80211_chan_def chandef;
u8 report_ap_tsf:1;
struct cfg80211_pmsr_ftm_request_peer ftm;
};
/**
* struct cfg80211_pmsr_request - peer measurement request
* @cookie: cookie, set by cfg80211
* @nl_portid: netlink portid - used by cfg80211
* @drv_data: driver data for this request, if required for aborting,
* not otherwise freed or anything by cfg80211
* @mac_addr: MAC address used for (randomised) request
* @mac_addr_mask: MAC address mask used for randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
* @list: used by cfg80211 to hold on to the request
* @timeout: timeout (in milliseconds) for the whole operation, if
* zero it means there's no timeout
* @n_peers: number of peers to do measurements with
* @peers: per-peer measurement request data
*/
struct cfg80211_pmsr_request {
u64 cookie;
void *drv_data;
u32 n_peers;
u32 nl_portid;
u32 timeout;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
struct list_head list;
struct cfg80211_pmsr_request_peer peers[] __counted_by(n_peers);
};
/**
* struct cfg80211_update_owe_info - OWE Information
*
* This structure provides information needed for the drivers to offload OWE
* (Opportunistic Wireless Encryption) processing to the user space.
*
* Commonly used across update_owe_info request and event interfaces.
*
* @peer: MAC address of the peer device for which the OWE processing
* has to be done.
* @status: status code, %WLAN_STATUS_SUCCESS for successful OWE info
* processing, use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space
* cannot give you the real status code for failures. Used only for
* OWE update request command interface (user space to driver).
* @ie: IEs obtained from the peer or constructed by the user space. These are
* the IEs of the remote peer in the event from the host driver and
* the constructed IEs by the user space in the request interface.
* @ie_len: Length of IEs in octets.
* @assoc_link_id: MLO link ID of the AP, with which (re)association requested
* by peer. This will be filled by driver for both MLO and non-MLO station
* connections when the AP affiliated with an MLD. For non-MLD AP mode, it
* will be -1. Used only with OWE update event (driver to user space).
* @peer_mld_addr: For MLO connection, MLD address of the peer. For non-MLO
* connection, it will be all zeros. This is applicable only when
* @assoc_link_id is not -1, i.e., the AP affiliated with an MLD. Used only
* with OWE update event (driver to user space).
*/
struct cfg80211_update_owe_info {
u8 peer[ETH_ALEN] __aligned(2);
u16 status;
const u8 *ie;
size_t ie_len;
int assoc_link_id;
u8 peer_mld_addr[ETH_ALEN] __aligned(2);
};
/**
* struct mgmt_frame_regs - management frame registrations data
* @global_stypes: bitmap of management frame subtypes registered
* for the entire device
* @interface_stypes: bitmap of management frame subtypes registered
* for the given interface
* @global_mcast_stypes: mcast RX is needed globally for these subtypes
* @interface_mcast_stypes: mcast RX is needed on this interface
* for these subtypes
*/
struct mgmt_frame_regs {
u32 global_stypes, interface_stypes;
u32 global_mcast_stypes, interface_mcast_stypes;
};
/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
* in order to handle configuration requests on their interfaces.
*
* All callbacks except where otherwise noted should return 0
* on success or a negative error code.
*
* All operations are invoked with the wiphy mutex held. The RTNL may be
* held in addition (due to wireless extensions) but this cannot be relied
* upon except in cases where documented below. Note that due to ordering,
* the RTNL also cannot be acquired in any handlers.
*
* @suspend: wiphy device needs to be suspended. The variable @wow will
* be %NULL or contain the enabled Wake-on-Wireless triggers that are
* configured for the device.
* @resume: wiphy device needs to be resumed
* @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback
* to call device_set_wakeup_enable() to enable/disable wakeup from
* the device.
*
* @add_virtual_intf: create a new virtual interface with the given name,
* must set the struct wireless_dev's iftype. Beware: You must create
* the new netdev in the wiphy's network namespace! Returns the struct
* wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
* also set the address member in the wdev.
* This additionally holds the RTNL to be able to do netdev changes.
*
* @del_virtual_intf: remove the virtual interface
* This additionally holds the RTNL to be able to do netdev changes.
*
* @change_virtual_intf: change type/configuration of virtual interface,
* keep the struct wireless_dev's iftype updated.
* This additionally holds the RTNL to be able to do netdev changes.
*
* @add_intf_link: Add a new MLO link to the given interface. Note that
* the wdev->link[] data structure has been updated, so the new link
* address is available.
* @del_intf_link: Remove an MLO link from the given interface.
*
* @add_key: add a key with the given parameters. @mac_addr will be %NULL
* when adding a group key. @link_id will be -1 for non-MLO connection.
* For MLO connection, @link_id will be >= 0 for group key and -1 for
* pairwise key, @mac_addr will be peer's MLD address for MLO pairwise key.
*
* @get_key: get information about the key with the given parameters.
* @mac_addr will be %NULL when requesting information for a group
* key. All pointers given to the @callback function need not be valid
* after it returns. This function should return an error if it is
* not possible to retrieve the key, -ENOENT if it doesn't exist.
* @link_id will be -1 for non-MLO connection. For MLO connection,
* @link_id will be >= 0 for group key and -1 for pairwise key, @mac_addr
* will be peer's MLD address for MLO pairwise key.
*
* @del_key: remove a key given the @mac_addr (%NULL for a group key)
* and @key_index, return -ENOENT if the key doesn't exist. @link_id will
* be -1 for non-MLO connection. For MLO connection, @link_id will be >= 0
* for group key and -1 for pairwise key, @mac_addr will be peer's MLD
* address for MLO pairwise key.
*
* @set_default_key: set the default key on an interface. @link_id will be >= 0
* for MLO connection and -1 for non-MLO connection.
*
* @set_default_mgmt_key: set the default management frame key on an interface.
* @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
* @set_default_beacon_key: set the default Beacon frame key on an interface.
* @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
* @set_rekey_data: give the data necessary for GTK rekeying to the driver
*
* @start_ap: Start acting in AP mode defined by the parameters.
* @change_beacon: Change the beacon parameters for an access point mode
* interface. This should reject the call when AP mode wasn't started.
* @stop_ap: Stop being an AP, including stopping beaconing.
*
* @add_station: Add a new station.
* @del_station: Remove a station
* @change_station: Modify a given station. Note that flags changes are not much
* validated in cfg80211, in particular the auth/assoc/authorized flags
* might come to the driver in invalid combinations -- make sure to check
* them, also against the existing state! Drivers must call
* cfg80211_check_station_change() to validate the information.
* @get_station: get station information for the station identified by @mac
* @dump_station: dump station callback -- resume dump at index @idx
*
* @add_mpath: add a fixed mesh path
* @del_mpath: delete a given mesh path
* @change_mpath: change a given mesh path
* @get_mpath: get a mesh path for the given parameters
* @dump_mpath: dump mesh path callback -- resume dump at index @idx
* @get_mpp: get a mesh proxy path for the given parameters
* @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx
* @join_mesh: join the mesh network with the specified parameters
* (invoked with the wireless_dev mutex held)
* @leave_mesh: leave the current mesh network
* (invoked with the wireless_dev mutex held)
*
* @get_mesh_config: Get the current mesh configuration
*
* @update_mesh_config: Update mesh parameters on a running mesh.
* The mask is a bitfield which tells us which parameters to
* set, and which to leave alone.
*
* @change_bss: Modify parameters for a given BSS.
*
* @inform_bss: Called by cfg80211 while being informed about new BSS data
* for every BSS found within the reported data or frame. This is called
* from within the cfg8011 inform_bss handlers while holding the bss_lock.
* The data parameter is passed through from drv_data inside
* struct cfg80211_inform_bss.
* The new IE data for the BSS is explicitly passed.
*
* @set_txq_params: Set TX queue parameters
*
* @libertas_set_mesh_channel: Only for backward compatibility for libertas,
* as it doesn't implement join_mesh and needs to set the channel to
* join the mesh instead.
*
* @set_monitor_channel: Set the monitor mode channel for the device. If other
* interfaces are active this callback should reject the configuration.
* If no interfaces are active or the device is down, the channel should
* be stored for when a monitor interface becomes active.
*
* @scan: Request to do a scan. If returning zero, the scan request is given
* the driver, and will be valid until passed to cfg80211_scan_done().
* For scan results, call cfg80211_inform_bss(); you can call this outside
* the scan/scan_done bracket too.
* @abort_scan: Tell the driver to abort an ongoing scan. The driver shall
* indicate the status of the scan through cfg80211_scan_done().
*
* @auth: Request to authenticate with the specified peer
* (invoked with the wireless_dev mutex held)
* @assoc: Request to (re)associate with the specified peer
* (invoked with the wireless_dev mutex held)
* @deauth: Request to deauthenticate from the specified peer
* (invoked with the wireless_dev mutex held)
* @disassoc: Request to disassociate from the specified peer
* (invoked with the wireless_dev mutex held)
*
* @connect: Connect to the ESS with the specified parameters. When connected,
* call cfg80211_connect_result()/cfg80211_connect_bss() with status code
* %WLAN_STATUS_SUCCESS. If the connection fails for some reason, call
* cfg80211_connect_result()/cfg80211_connect_bss() with the status code
* from the AP or cfg80211_connect_timeout() if no frame with status code
* was received.
* The driver is allowed to roam to other BSSes within the ESS when the
* other BSS matches the connect parameters. When such roaming is initiated
* by the driver, the driver is expected to verify that the target matches
* the configured security parameters and to use Reassociation Request
* frame instead of Association Request frame.
* The connect function can also be used to request the driver to perform a
* specific roam when connected to an ESS. In that case, the prev_bssid
* parameter is set to the BSSID of the currently associated BSS as an
* indication of requesting reassociation.
* In both the driver-initiated and new connect() call initiated roaming
* cases, the result of roaming is indicated with a call to
* cfg80211_roamed(). (invoked with the wireless_dev mutex held)
* @update_connect_params: Update the connect parameters while connected to a
* BSS. The updated parameters can be used by driver/firmware for
* subsequent BSS selection (roaming) decisions and to form the
* Authentication/(Re)Association Request frames. This call does not
* request an immediate disassociation or reassociation with the current
* BSS, i.e., this impacts only subsequent (re)associations. The bits in
* changed are defined in &enum cfg80211_connect_params_changed.
* (invoked with the wireless_dev mutex held)
* @disconnect: Disconnect from the BSS/ESS or stop connection attempts if
* connection is in progress. Once done, call cfg80211_disconnected() in
* case connection was already established (invoked with the
* wireless_dev mutex held), otherwise call cfg80211_connect_timeout().
*
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
* cfg80211_ibss_joined(), also call that function when changing BSSID due
* to a merge.
* (invoked with the wireless_dev mutex held)
* @leave_ibss: Leave the IBSS.
* (invoked with the wireless_dev mutex held)
*
* @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or
* MESH mode)
*
* @set_wiphy_params: Notify that wiphy parameters have changed;
* @changed bitfield (see &enum wiphy_params_flags) describes which values
* have changed. The actual parameter values are available in
* struct wiphy. If returning an error, no value should be changed.
*
* @set_tx_power: set the transmit power according to the parameters,
* the power passed is in mBm, to get dBm use MBM_TO_DBM(). The
* wdev may be %NULL if power was set for the wiphy, and will
* always be %NULL unless the driver supports per-vif TX power
* (as advertised by the nl80211 feature flag.)
* @get_tx_power: store the current TX power into the dbm variable;
* return 0 if successful
*
* @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting
* functions to adjust rfkill hw state
*
* @dump_survey: get site survey information.
*
* @remain_on_channel: Request the driver to remain awake on the specified
* channel for the specified duration to complete an off-channel
* operation (e.g., public action frame exchange). When the driver is
* ready on the requested channel, it must indicate this with an event
* notification by calling cfg80211_ready_on_channel().
* @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
* This allows the operation to be terminated prior to timeout based on
* the duration value.
* @mgmt_tx: Transmit a management frame.
* @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management
* frame on another channel
*
* @testmode_cmd: run a test mode command; @wdev may be %NULL
* @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be
* used by the function, but 0 and 1 must not be touched. Additionally,
* return error codes other than -ENOBUFS and -ENOENT will terminate the
* dump and return to userspace with an error, so be careful. If any data
* was passed in from userspace then the data/len arguments will be present
* and point to the data contained in %NL80211_ATTR_TESTDATA.
*
* @set_bitrate_mask: set the bitrate mask configuration
*
* @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac
* devices running firmwares capable of generating the (re) association
* RSN IE. It allows for faster roaming between WPA2 BSSIDs.
* @del_pmksa: Delete a cached PMKID.
* @flush_pmksa: Flush all cached PMKIDs.
* @set_power_mgmt: Configure WLAN power management. A timeout value of -1
* allows the driver to adjust the dynamic ps timeout value.
* @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
* After configuration, the driver should (soon) send an event indicating
* the current level is above/below the configured threshold; this may
* need some care when the configuration is changed (without first being
* disabled.)
* @set_cqm_rssi_range_config: Configure two RSSI thresholds in the
* connection quality monitor. An event is to be sent only when the
* signal level is found to be outside the two values. The driver should
* set %NL80211_EXT_FEATURE_CQM_RSSI_LIST if this method is implemented.
* If it is provided then there's no point providing @set_cqm_rssi_config.
* @set_cqm_txe_config: Configure connection quality monitor TX error
* thresholds.
* @sched_scan_start: Tell the driver to start a scheduled scan.
* @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan with
* given request id. This call must stop the scheduled scan and be ready
* for starting a new one before it returns, i.e. @sched_scan_start may be
* called immediately after that again and should not fail in that case.
* The driver should not call cfg80211_sched_scan_stopped() for a requested
* stop (when this method returns 0).
*
* @update_mgmt_frame_registrations: Notify the driver that management frame
* registrations were updated. The callback is allowed to sleep.
*
* @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
* Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
* reject TX/RX mask combinations they cannot support by returning -EINVAL
* (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
*
* @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
*
* @tdls_mgmt: Transmit a TDLS management frame.
* @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
*
* @probe_client: probe an associated client, must return a cookie that it
* later passes to cfg80211_probe_status().
*
* @set_noack_map: Set the NoAck Map for the TIDs.
*
* @get_channel: Get the current operating channel for the virtual interface.
* For monitor interfaces, it should return %NULL unless there's a single
* current monitoring channel.
*
* @start_p2p_device: Start the given P2P device.
* @stop_p2p_device: Stop the given P2P device.
*
* @set_mac_acl: Sets MAC address control list in AP and P2P GO mode.
* Parameters include ACL policy, an array of MAC address of stations
* and the number of MAC addresses. If there is already a list in driver
* this new list replaces the existing one. Driver has to clear its ACL
* when number of MAC addresses entries is passed as 0. Drivers which
* advertise the support for MAC based ACL have to implement this callback.
*
* @start_radar_detection: Start radar detection in the driver.
*
* @end_cac: End running CAC, probably because a related CAC
* was finished on another phy.
*
* @update_ft_ies: Provide updated Fast BSS Transition information to the
* driver. If the SME is in the driver/firmware, this information can be
* used in building Authentication and Reassociation Request frames.
*
* @crit_proto_start: Indicates a critical protocol needs more link reliability
* for a given duration (milliseconds). The protocol is provided so the
* driver can take the most appropriate actions.
* @crit_proto_stop: Indicates critical protocol no longer needs increased link
* reliability. This operation can not fail.
* @set_coalesce: Set coalesce parameters.
*
* @channel_switch: initiate channel-switch procedure (with CSA). Driver is
* responsible for veryfing if the switch is possible. Since this is
* inherently tricky driver may decide to disconnect an interface later
* with cfg80211_stop_iface(). This doesn't mean driver can accept
* everything. It should do it's best to verify requests and reject them
* as soon as possible.
*
* @set_qos_map: Set QoS mapping information to the driver
*
* @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
* given interface This is used e.g. for dynamic HT 20/40 MHz channel width
* changes during the lifetime of the BSS.
*
* @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device
* with the given parameters; action frame exchange has been handled by
* userspace so this just has to modify the TX path to take the TS into
* account.
* If the admitted time is 0 just validate the parameters to make sure
* the session can be created at all; it is valid to just always return
* success for that but that may result in inefficient behaviour (handshake
* with the peer followed by immediate teardown when the addition is later
* rejected)
* @del_tx_ts: remove an existing TX TS
*
* @join_ocb: join the OCB network with the specified parameters
* (invoked with the wireless_dev mutex held)
* @leave_ocb: leave the current OCB network
* (invoked with the wireless_dev mutex held)
*
* @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver
* is responsible for continually initiating channel-switching operations
* and returning to the base channel for communication with the AP.
* @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
* peers must be on the base channel when the call completes.
* @start_nan: Start the NAN interface.
* @stop_nan: Stop the NAN interface.
* @add_nan_func: Add a NAN function. Returns negative value on failure.
* On success @nan_func ownership is transferred to the driver and
* it may access it outside of the scope of this function. The driver
* should free the @nan_func when no longer needed by calling
* cfg80211_free_nan_func().
* On success the driver should assign an instance_id in the
* provided @nan_func.
* @del_nan_func: Delete a NAN function.
* @nan_change_conf: changes NAN configuration. The changed parameters must
* be specified in @changes (using &enum cfg80211_nan_conf_changes);
* All other parameters must be ignored.
*
* @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
*
* @get_txq_stats: Get TXQ stats for interface or phy. If wdev is %NULL, this
* function should return phy stats, and interface stats otherwise.
*
* @set_pmk: configure the PMK to be used for offloaded 802.1X 4-Way handshake.
* If not deleted through @del_pmk the PMK remains valid until disconnect
* upon which the driver should clear it.
* (invoked with the wireless_dev mutex held)
* @del_pmk: delete the previously configured PMK for the given authenticator.
* (invoked with the wireless_dev mutex held)
*
* @external_auth: indicates result of offloaded authentication processing from
* user space
*
* @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter
* tells the driver that the frame should not be encrypted.
*
* @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
* Statistics should be cumulative, currently no way to reset is provided.
* @start_pmsr: start peer measurement (e.g. FTM)
* @abort_pmsr: abort peer measurement
*
* @update_owe_info: Provide updated OWE info to driver. Driver implementing SME
* but offloading OWE processing to the user space will get the updated
* DH IE through this interface.
*
* @probe_mesh_link: Probe direct Mesh peer's link quality by sending data frame
* and overrule HWMP path selection algorithm.
* @set_tid_config: TID specific configuration, this can be peer or BSS specific
* This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer, for the
* given TIDs. This callback may sleep.
*
* @set_sar_specs: Update the SAR (TX power) settings.
*
* @color_change: Initiate a color change.
*
* @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
* those to decrypt (Re)Association Request and encrypt (Re)Association
* Response frame.
*
* @set_radar_background: Configure dedicated offchannel chain available for
* radar/CAC detection on some hw. This chain can't be used to transmit
* or receive frames and it is bounded to a running wdev.
* Background radar/CAC detection allows to avoid the CAC downtime
* switching to a different channel during CAC detection on the selected
* radar channel.
* The caller is expected to set chandef pointer to NULL in order to
* disable background CAC/radar detection.
* @add_link_station: Add a link to a station.
* @mod_link_station: Modify a link of a station.
* @del_link_station: Remove a link of a station.
*
* @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
* @set_ttlm: set the TID to link mapping.
* @set_epcs: Enable/Disable EPCS for station mode.
* @get_radio_mask: get bitmask of radios in use.
* (invoked with the wiphy mutex held)
* @assoc_ml_reconf: Request a non-AP MLO connection to perform ML
* reconfiguration, i.e., add and/or remove links to/from the
* association using ML reconfiguration action frames. Successfully added
* links will be added to the set of valid links. Successfully removed
* links will be removed from the set of valid links. The driver must
* indicate removed links by calling cfg80211_links_removed() and added
* links by calling cfg80211_mlo_reconf_add_done(). When calling
* cfg80211_mlo_reconf_add_done() the bss pointer must be given for each
* link for which MLO reconfiguration 'add' operation was requested.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
int (*resume)(struct wiphy *wiphy);
void (*set_wakeup)(struct wiphy *wiphy, bool enabled);
struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
struct vif_params *params);
int (*del_virtual_intf)(struct wiphy *wiphy,
struct wireless_dev *wdev);
int (*change_virtual_intf)(struct wiphy *wiphy,
struct net_device *dev,
enum nl80211_iftype type,
struct vif_params *params);
int (*add_intf_link)(struct wiphy *wiphy,
struct wireless_dev *wdev,
unsigned int link_id);
void (*del_intf_link)(struct wiphy *wiphy,
struct wireless_dev *wdev,
unsigned int link_id);
int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, struct key_params *params);
int (*get_key)(struct wiphy *wiphy, struct net_device *netdev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*));
int (*del_key)(struct wiphy *wiphy, struct net_device *netdev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr);
int (*set_default_key)(struct wiphy *wiphy,
struct net_device *netdev, int link_id,
u8 key_index, bool unicast, bool multicast);
int (*set_default_mgmt_key)(struct wiphy *wiphy,
struct net_device *netdev, int link_id,
u8 key_index);
int (*set_default_beacon_key)(struct wiphy *wiphy,
struct net_device *netdev,
int link_id,
u8 key_index);
int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings);
int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_update *info);
int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id);
int (*add_station)(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac,
struct station_parameters *params);
int (*del_station)(struct wiphy *wiphy, struct net_device *dev,
struct station_del_parameters *params);
int (*change_station)(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac,
struct station_parameters *params);
int (*get_station)(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo);
int (*dump_station)(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo);
int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev,
const u8 *dst, const u8 *next_hop);
int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev,
const u8 *dst);
int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev,
const u8 *dst, const u8 *next_hop);
int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev,
u8 *dst, u8 *next_hop, struct mpath_info *pinfo);
int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *dst, u8 *next_hop,
struct mpath_info *pinfo);
int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev,
u8 *dst, u8 *mpp, struct mpath_info *pinfo);
int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *dst, u8 *mpp,
struct mpath_info *pinfo);
int (*get_mesh_config)(struct wiphy *wiphy,
struct net_device *dev,
struct mesh_config *conf);
int (*update_mesh_config)(struct wiphy *wiphy,
struct net_device *dev, u32 mask,
const struct mesh_config *nconf);
int (*join_mesh)(struct wiphy *wiphy, struct net_device *dev,
const struct mesh_config *conf,
const struct mesh_setup *setup);
int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev);
int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev,
struct ocb_setup *setup);
int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev);
int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params);
void (*inform_bss)(struct wiphy *wiphy, struct cfg80211_bss *bss,
const struct cfg80211_bss_ies *ies, void *data);
int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_txq_params *params);
int (*libertas_set_mesh_channel)(struct wiphy *wiphy,
struct net_device *dev,
struct ieee80211_channel *chan);
int (*set_monitor_channel)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef);
int (*scan)(struct wiphy *wiphy,
struct cfg80211_scan_request *request);
void (*abort_scan)(struct wiphy *wiphy, struct wireless_dev *wdev);
int (*auth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_auth_request *req);
int (*assoc)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_assoc_request *req);
int (*deauth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_deauth_request *req);
int (*disassoc)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_disassoc_request *req);
int (*connect)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme);
int (*update_connect_params)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_connect_params *sme,
u32 changed);
int (*disconnect)(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code);
int (*join_ibss)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params);
int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
int rate[NUM_NL80211_BANDS]);
int (*set_wiphy_params)(struct wiphy *wiphy, int radio_idx,
u32 changed);
int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
int radio_idx,
enum nl80211_tx_power_setting type, int mbm);
int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
int radio_idx, unsigned int link_id, int *dbm);
void (*rfkill_poll)(struct wiphy *wiphy);
#ifdef CONFIG_NL80211_TESTMODE
int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev,
void *data, int len);
int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len);
#endif
int (*set_bitrate_mask)(struct wiphy *wiphy,
struct net_device *dev,
unsigned int link_id,
const u8 *peer,
const struct cfg80211_bitrate_mask *mask);
int (*dump_survey)(struct wiphy *wiphy, struct net_device *netdev,
int idx, struct survey_info *info);
int (*set_pmksa)(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa);
int (*del_pmksa)(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa);
int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev);
int (*remain_on_channel)(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct ieee80211_channel *chan,
unsigned int duration,
u64 *cookie);
int (*cancel_remain_on_channel)(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie);
int (*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie);
int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie);
int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout);
int (*set_cqm_rssi_config)(struct wiphy *wiphy,
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst);
int (*set_cqm_rssi_range_config)(struct wiphy *wiphy,
struct net_device *dev,
s32 rssi_low, s32 rssi_high);
int (*set_cqm_txe_config)(struct wiphy *wiphy,
struct net_device *dev,
u32 rate, u32 pkts, u32 intvl);
void (*update_mgmt_frame_registrations)(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct mgmt_frame_regs *upd);
int (*set_antenna)(struct wiphy *wiphy, int radio_idx,
u32 tx_ant, u32 rx_ant);
int (*get_antenna)(struct wiphy *wiphy, int radio_idx,
u32 *tx_ant, u32 *rx_ant);
int (*sched_scan_start)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_sched_scan_request *request);
int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev,
u64 reqid);
int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_gtk_rekey_data *data);
int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, int link_id,
u8 action_code, u8 dialog_token, u16 status_code,
u32 peer_capability, bool initiator,
const u8 *buf, size_t len);
int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper);
int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, u64 *cookie);
int (*set_noack_map)(struct wiphy *wiphy,
struct net_device *dev,
u16 noack_map);
int (*get_channel)(struct wiphy *wiphy,
struct wireless_dev *wdev,
unsigned int link_id,
struct cfg80211_chan_def *chandef);
int (*start_p2p_device)(struct wiphy *wiphy,
struct wireless_dev *wdev);
void (*stop_p2p_device)(struct wiphy *wiphy,
struct wireless_dev *wdev);
int (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev,
const struct cfg80211_acl_data *params);
int (*start_radar_detection)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
u32 cac_time_ms, int link_id);
void (*end_cac)(struct wiphy *wiphy,
struct net_device *dev, unsigned int link_id);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
struct wireless_dev *wdev,
enum nl80211_crit_proto_id protocol,
u16 duration);
void (*crit_proto_stop)(struct wiphy *wiphy,
struct wireless_dev *wdev);
int (*set_coalesce)(struct wiphy *wiphy,
struct cfg80211_coalesce *coalesce);
int (*channel_switch)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_csa_settings *params);
int (*set_qos_map)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_qos_map *qos_map);
int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id,
struct cfg80211_chan_def *chandef);
int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
u8 tsid, const u8 *peer, u8 user_prio,
u16 admitted_time);
int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
u8 tsid, const u8 *peer);
int (*tdls_channel_switch)(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr, u8 oper_class,
struct cfg80211_chan_def *chandef);
void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_nan_conf *conf);
void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev);
int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_nan_func *nan_func);
void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
u64 cookie);
int (*nan_change_conf)(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct cfg80211_nan_conf *conf,
u32 changes);
int (*set_multicast_to_unicast)(struct wiphy *wiphy,
struct net_device *dev,
const bool enabled);
int (*get_txq_stats)(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct cfg80211_txq_stats *txqstats);
int (*set_pmk)(struct wiphy *wiphy, struct net_device *dev,
const struct cfg80211_pmk_conf *conf);
int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev,
const u8 *aa);
int (*external_auth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_external_auth_params *params);
int (*tx_control_port)(struct wiphy *wiphy,
struct net_device *dev,
const u8 *buf, size_t len,
const u8 *dest, const __be16 proto,
const bool noencrypt, int link_id,
u64 *cookie);
int (*get_ftm_responder_stats)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_ftm_responder_stats *ftm_stats);
int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_pmsr_request *request);
void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_pmsr_request *request);
int (*update_owe_info)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_owe_info *owe_info);
int (*probe_mesh_link)(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len);
int (*set_tid_config)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_tid_config *tid_conf);
int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, u8 tids);
int (*set_sar_specs)(struct wiphy *wiphy,
struct cfg80211_sar_specs *sar);
int (*color_change)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_color_change_settings *params);
int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_fils_aad *fils_aad);
int (*set_radar_background)(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef);
int (*add_link_station)(struct wiphy *wiphy, struct net_device *dev,
struct link_station_parameters *params);
int (*mod_link_station)(struct wiphy *wiphy, struct net_device *dev,
struct link_station_parameters *params);
int (*del_link_station)(struct wiphy *wiphy, struct net_device *dev,
struct link_station_del_parameters *params);
int (*set_hw_timestamp)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_set_hw_timestamp *hwts);
int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ttlm_params *params);
u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev);
int (*assoc_ml_reconf)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ml_reconf_req *req);
int (*set_epcs)(struct wiphy *wiphy, struct net_device *dev,
bool val);
};
/*
* wireless hardware and networking interfaces structures
* and registration/helper functions
*/
/**
* enum wiphy_flags - wiphy capability flags
*
* @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
* into two, first for legacy bands and second for 6 GHz.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
* by default -- this flag will be set depending on the kernel's default
* on wiphy_new(), but can be changed by the driver if it has a good
* reason to override the default
* @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
* on a VLAN interface). This flag also serves an extra purpose of
* supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype.
* @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
* @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
* control port protocol ethertype. The device also honours the
* control_port_no_encrypt flag.
* @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
* @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
* auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
* @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
* firmware.
* @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
* @WIPHY_FLAG_SUPPORTS_TDLS: The device supports TDLS (802.11z) operation.
* @WIPHY_FLAG_TDLS_EXTERNAL_SETUP: The device does not handle TDLS (802.11z)
* link setup/discovery operations internally. Setup, discovery and
* teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
* command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
* used for asking the driver/firmware to perform a TDLS operation.
* @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME
* @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
* when there are virtual interfaces in AP mode by calling
* cfg80211_report_obss_beacon().
* @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
* responds to probe-requests in hardware.
* @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
* @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
* @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys
* @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs,
* in order to not have them reachable in normal drivers, until we have
* complete feature/interface combinations/etc. advertisement. No driver
* should set this flag for now.
* @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys.
* @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for
* NL80211_REGDOM_SET_BY_DRIVER.
* @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver
* set this flag to update channels on beacon hints.
* @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link
* of an NSTR mobile AP MLD.
* @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
WIPHY_FLAG_SUPPORTS_MLO = BIT(1),
WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2),
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
WIPHY_FLAG_4ADDR_STATION = BIT(6),
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
WIPHY_FLAG_DISABLE_WEXT = BIT(9),
WIPHY_FLAG_MESH_AUTH = BIT(10),
WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12),
WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
WIPHY_FLAG_AP_UAPSD = BIT(14),
WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16),
WIPHY_FLAG_HAVE_AP_SME = BIT(17),
WIPHY_FLAG_REPORTS_OBSS = BIT(18),
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
WIPHY_FLAG_OFFCHAN_TX = BIT(20),
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24),
WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = BIT(25),
};
/**
* struct ieee80211_iface_limit - limit on certain interface types
* @max: maximum number of interfaces of these types
* @types: interface types (bits)
*/
struct ieee80211_iface_limit {
u16 max;
u16 types;
};
/**
* struct ieee80211_iface_combination - possible interface combination
*
* With this structure the driver can describe which interface
* combinations it supports concurrently. When set in a struct wiphy_radio,
* the combinations refer to combinations of interfaces currently active on
* that radio.
*
* Examples:
*
* 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
*
* .. code-block:: c
*
* struct ieee80211_iface_limit limits1[] = {
* { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
* { .max = 1, .types = BIT(NL80211_IFTYPE_AP), },
* };
* struct ieee80211_iface_combination combination1 = {
* .limits = limits1,
* .n_limits = ARRAY_SIZE(limits1),
* .max_interfaces = 2,
* .beacon_int_infra_match = true,
* };
*
*
* 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
*
* .. code-block:: c
*
* struct ieee80211_iface_limit limits2[] = {
* { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
* BIT(NL80211_IFTYPE_P2P_GO), },
* };
* struct ieee80211_iface_combination combination2 = {
* .limits = limits2,
* .n_limits = ARRAY_SIZE(limits2),
* .max_interfaces = 8,
* .num_different_channels = 1,
* };
*
*
* 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
*
* This allows for an infrastructure connection and three P2P connections.
*
* .. code-block:: c
*
* struct ieee80211_iface_limit limits3[] = {
* { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
* { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
* BIT(NL80211_IFTYPE_P2P_CLIENT), },
* };
* struct ieee80211_iface_combination combination3 = {
* .limits = limits3,
* .n_limits = ARRAY_SIZE(limits3),
* .max_interfaces = 4,
* .num_different_channels = 2,
* };
*
*/
struct ieee80211_iface_combination {
/**
* @limits:
* limits for the given interface types
*/
const struct ieee80211_iface_limit *limits;
/**
* @num_different_channels:
* can use up to this many different channels
*/
u32 num_different_channels;
/**
* @max_interfaces:
* maximum number of interfaces in total allowed in this group
*/
u16 max_interfaces;
/**
* @n_limits:
* number of limitations
*/
u8 n_limits;
/**
* @beacon_int_infra_match:
* In this combination, the beacon intervals between infrastructure
* and AP types must match. This is required only in special cases.
*/
bool beacon_int_infra_match;
/**
* @radar_detect_widths:
* bitmap of channel widths supported for radar detection
*/
u8 radar_detect_widths;
/**
* @radar_detect_regions:
* bitmap of regions supported for radar detection
*/
u8 radar_detect_regions;
/**
* @beacon_int_min_gcd:
* This interface combination supports different beacon intervals.
*
* = 0
* all beacon intervals for different interface must be same.
* > 0
* any beacon interval for the interface part of this combination AND
* GCD of all beacon intervals from beaconing interfaces of this
* combination must be greater or equal to this value.
*/
u32 beacon_int_min_gcd;
};
struct ieee80211_txrx_stypes {
u16 tx, rx;
};
/**
* enum wiphy_wowlan_support_flags - WoWLAN support flags
* @WIPHY_WOWLAN_ANY: supports wakeup for the special "any"
* trigger that keeps the device operating as-is and
* wakes up the host on any activity, for example a
* received packet that passed filtering; note that the
* packet should be preserved in that case
* @WIPHY_WOWLAN_MAGIC_PKT: supports wakeup on magic packet
* (see nl80211.h)
* @WIPHY_WOWLAN_DISCONNECT: supports wakeup on disconnect
* @WIPHY_WOWLAN_SUPPORTS_GTK_REKEY: supports GTK rekeying while asleep
* @WIPHY_WOWLAN_GTK_REKEY_FAILURE: supports wakeup on GTK rekey failure
* @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request
* @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure
* @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release
* @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection
*/
enum wiphy_wowlan_support_flags {
WIPHY_WOWLAN_ANY = BIT(0),
WIPHY_WOWLAN_MAGIC_PKT = BIT(1),
WIPHY_WOWLAN_DISCONNECT = BIT(2),
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = BIT(3),
WIPHY_WOWLAN_GTK_REKEY_FAILURE = BIT(4),
WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5),
WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6),
WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7),
WIPHY_WOWLAN_NET_DETECT = BIT(8),
};
struct wiphy_wowlan_tcp_support {
const struct nl80211_wowlan_tcp_data_token_feature *tok;
u32 data_payload_max;
u32 data_interval_max;
u32 wake_payload_max;
bool seq;
};
/**
* struct wiphy_wowlan_support - WoWLAN support data
* @flags: see &enum wiphy_wowlan_support_flags
* @n_patterns: number of supported wakeup patterns
* (see nl80211.h for the pattern definition)
* @pattern_max_len: maximum length of each pattern
* @pattern_min_len: minimum length of each pattern
* @max_pkt_offset: maximum Rx packet offset
* @max_nd_match_sets: maximum number of matchsets for net-detect,
* similar, but not necessarily identical, to max_match_sets for
* scheduled scans.
* See &struct cfg80211_sched_scan_request.@match_sets for more
* details.
* @tcp: TCP wakeup support information
*/
struct wiphy_wowlan_support {
u32 flags;
int n_patterns;
int pattern_max_len;
int pattern_min_len;
int max_pkt_offset;
int max_nd_match_sets;
const struct wiphy_wowlan_tcp_support *tcp;
};
/**
* struct wiphy_coalesce_support - coalesce support data
* @n_rules: maximum number of coalesce rules
* @max_delay: maximum supported coalescing delay in msecs
* @n_patterns: number of supported patterns in a rule
* (see nl80211.h for the pattern definition)
* @pattern_max_len: maximum length of each pattern
* @pattern_min_len: minimum length of each pattern
* @max_pkt_offset: maximum Rx packet offset
*/
struct wiphy_coalesce_support {
int n_rules;
int max_delay;
int n_patterns;
int pattern_max_len;
int pattern_min_len;
int max_pkt_offset;
};
/**
* enum wiphy_vendor_command_flags - validation flags for vendor commands
* @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev
* @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev
* @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running
* (must be combined with %_WDEV or %_NETDEV)
*/
enum wiphy_vendor_command_flags {
WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0),
WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1),
WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2),
};
/**
* enum wiphy_opmode_flag - Station's ht/vht operation mode information flags
*
* @STA_OPMODE_MAX_BW_CHANGED: Max Bandwidth changed
* @STA_OPMODE_SMPS_MODE_CHANGED: SMPS mode changed
* @STA_OPMODE_N_SS_CHANGED: max N_SS (number of spatial streams) changed
*
*/
enum wiphy_opmode_flag {
STA_OPMODE_MAX_BW_CHANGED = BIT(0),
STA_OPMODE_SMPS_MODE_CHANGED = BIT(1),
STA_OPMODE_N_SS_CHANGED = BIT(2),
};
/**
* struct sta_opmode_info - Station's ht/vht operation mode information
* @changed: contains value from &enum wiphy_opmode_flag
* @smps_mode: New SMPS mode value from &enum nl80211_smps_mode of a station
* @bw: new max bandwidth value from &enum nl80211_chan_width of a station
* @rx_nss: new rx_nss value of a station
*/
struct sta_opmode_info {
u32 changed;
enum nl80211_smps_mode smps_mode;
enum nl80211_chan_width bw;
u8 rx_nss;
};
#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA))
/**
* struct wiphy_vendor_command - vendor command definition
* @info: vendor command identifying information, as used in nl80211
* @flags: flags, see &enum wiphy_vendor_command_flags
* @doit: callback for the operation, note that wdev is %NULL if the
* flags didn't ask for a wdev and non-%NULL otherwise; the data
* pointer may be %NULL if userspace provided no data at all
* @dumpit: dump callback, for transferring bigger/multiple items. The
* @storage points to cb->args[5], ie. is preserved over the multiple
* dumpit calls.
* @policy: policy pointer for attributes within %NL80211_ATTR_VENDOR_DATA.
* Set this to %VENDOR_CMD_RAW_DATA if no policy can be given and the
* attribute is just raw data (e.g. a firmware command).
* @maxattr: highest attribute number in policy
* It's recommended to not have the same sub command with both @doit and
* @dumpit, so that userspace can assume certain ones are get and others
* are used with dump requests.
*/
struct wiphy_vendor_command {
struct nl80211_vendor_cmd_info info;
u32 flags;
int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
const void *data, int data_len);
int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct sk_buff *skb, const void *data, int data_len,
unsigned long *storage);
const struct nla_policy *policy;
unsigned int maxattr;
};
/**
* struct wiphy_iftype_ext_capab - extended capabilities per interface type
* @iftype: interface type
* @extended_capabilities: extended capabilities supported by the driver,
* additional capabilities might be supported by userspace; these are the
* 802.11 extended capabilities ("Extended Capabilities element") and are
* in the same format as in the information element. See IEEE Std
* 802.11-2012 8.4.2.29 for the defined fields.
* @extended_capabilities_mask: mask of the valid values
* @extended_capabilities_len: length of the extended capabilities
* @eml_capabilities: EML capabilities (for MLO)
* @mld_capa_and_ops: MLD capabilities and operations (for MLO)
*/
struct wiphy_iftype_ext_capab {
enum nl80211_iftype iftype;
const u8 *extended_capabilities;
const u8 *extended_capabilities_mask;
u8 extended_capabilities_len;
u16 eml_capabilities;
u16 mld_capa_and_ops;
};
/**
* cfg80211_get_iftype_ext_capa - lookup interface type extended capability
* @wiphy: the wiphy to look up from
* @type: the interface type to look up
*
* Return: The extended capability for the given interface @type, may be %NULL
*/
const struct wiphy_iftype_ext_capab *
cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type);
/**
* struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities
* @max_peers: maximum number of peers in a single measurement
* @report_ap_tsf: can report assoc AP's TSF for radio resource measurement
* @randomize_mac_addr: can randomize MAC address for measurement
* @ftm: FTM measurement data
* @ftm.supported: FTM measurement is supported
* @ftm.asap: ASAP-mode is supported
* @ftm.non_asap: non-ASAP-mode is supported
* @ftm.request_lci: can request LCI data
* @ftm.request_civicloc: can request civic location data
* @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble)
* @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width)
* @ftm.max_bursts_exponent: maximum burst exponent supported
* (set to -1 if not limited; note that setting this will necessarily
* forbid using the value 15 to let the responder pick)
* @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if
* not limited)
* @ftm.trigger_based: trigger based ranging measurement is supported
* @ftm.non_trigger_based: non trigger based ranging measurement is supported
*/
struct cfg80211_pmsr_capabilities {
unsigned int max_peers;
u8 report_ap_tsf:1,
randomize_mac_addr:1;
struct {
u32 preambles;
u32 bandwidths;
s8 max_bursts_exponent;
u8 max_ftms_per_burst;
u8 supported:1,
asap:1,
non_asap:1,
request_lci:1,
request_civicloc:1,
trigger_based:1,
non_trigger_based:1;
} ftm;
};
/**
* struct wiphy_iftype_akm_suites - This structure encapsulates supported akm
* suites for interface types defined in @iftypes_mask. Each type in the
* @iftypes_mask must be unique across all instances of iftype_akm_suites.
*
* @iftypes_mask: bitmask of interfaces types
* @akm_suites: points to an array of supported akm suites
* @n_akm_suites: number of supported AKM suites
*/
struct wiphy_iftype_akm_suites {
u16 iftypes_mask;
const u32 *akm_suites;
int n_akm_suites;
};
/**
* struct wiphy_radio_cfg - physical radio config of a wiphy
* This structure describes the configurations of a physical radio in a
* wiphy. It is used to denote per-radio attributes belonging to a wiphy.
*
* @rts_threshold: RTS threshold (dot11RTSThreshold);
* -1 (default) = RTS/CTS disabled
*/
struct wiphy_radio_cfg {
u32 rts_threshold;
};
/**
* struct wiphy_radio_freq_range - wiphy frequency range
* @start_freq: start range edge frequency (kHz)
* @end_freq: end range edge frequency (kHz)
*/
struct wiphy_radio_freq_range {
u32 start_freq;
u32 end_freq;
};
/**
* struct wiphy_radio - physical radio of a wiphy
* This structure describes a physical radio belonging to a wiphy.
* It is used to describe concurrent-channel capabilities. Only one channel
* can be active on the radio described by struct wiphy_radio.
*
* @freq_range: frequency range that the radio can operate on.
* @n_freq_range: number of elements in @freq_range
*
* @iface_combinations: Valid interface combinations array, should not
* list single interface types.
* @n_iface_combinations: number of entries in @iface_combinations array.
*
* @antenna_mask: bitmask of antennas connected to this radio.
*/
struct wiphy_radio {
const struct wiphy_radio_freq_range *freq_range;
int n_freq_range;
const struct ieee80211_iface_combination *iface_combinations;
int n_iface_combinations;
u32 antenna_mask;
};
/**
* enum wiphy_nan_flags - NAN capabilities
*
* @WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC: Device supports NAN configurable
* synchronization.
* @WIPHY_NAN_FLAGS_USERSPACE_DE: Device doesn't support DE offload.
*/
enum wiphy_nan_flags {
WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC = BIT(0),
WIPHY_NAN_FLAGS_USERSPACE_DE = BIT(1),
};
/**
* struct wiphy_nan_capa - NAN capabilities
*
* This structure describes the NAN capabilities of a wiphy.
*
* @flags: NAN capabilities flags, see &enum wiphy_nan_flags
* @op_mode: NAN operation mode, as defined in Wi-Fi Aware (TM) specification
* Table 81.
* @n_antennas: number of antennas supported by the device for Tx/Rx. Lower
* nibble indicates the number of TX antennas and upper nibble indicates the
* number of RX antennas. Value 0 indicates the information is not
* available.
* @max_channel_switch_time: maximum channel switch time in milliseconds.
* @dev_capabilities: NAN device capabilities as defined in Wi-Fi Aware (TM)
* specification Table 79 (Capabilities field).
*/
struct wiphy_nan_capa {
u32 flags;
u8 op_mode;
u8 n_antennas;
u16 max_channel_switch_time;
u8 dev_capabilities;
};
#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
/**
* struct wiphy - wireless hardware description
* @mtx: mutex for the data (structures) of this device
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
* the reg_notifier's request can be passed as NULL
* @regd: the driver's regulatory domain, if one was requested via
* the regulatory_hint() API. This can be used by the driver
* on the reg_notifier() if it chooses to ignore future
* regulatory domain changes caused by other drivers.
* @signal_type: signal type reported in &struct cfg80211_bss.
* @cipher_suites: supported cipher suites
* @n_cipher_suites: number of supported cipher suites
* @akm_suites: supported AKM suites. These are the default AKMs supported if
* the supported AKMs not advertized for a specific interface type in
* iftype_akm_suites.
* @n_akm_suites: number of supported AKM suites
* @iftype_akm_suites: array of supported akm suites info per interface type.
* Note that the bits in @iftypes_mask inside this structure cannot
* overlap (i.e. only one occurrence of each type is allowed across all
* instances of iftype_akm_suites).
* @num_iftype_akm_suites: number of interface types for which supported akm
* suites are specified separately.
* @retry_short: Retry limit for short frames (dot11ShortRetryLimit)
* @retry_long: Retry limit for long frames (dot11LongRetryLimit)
* @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold);
* -1 = fragmentation disabled, only odd values >= 256 used
* @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled
* @_net: the network namespace this wiphy currently lives in
* @perm_addr: permanent MAC address of this device
* @addr_mask: If the device supports multiple MAC addresses by masking,
* set this to a mask with variable bits set to 1, e.g. if the last
* four bits are variable then set it to 00-00-00-00-00-0f. The actual
* variable bits shall be determined by the interfaces added, with
* interfaces not matching the mask being rejected to be brought up.
* @n_addresses: number of addresses in @addresses.
* @addresses: If the device has more than one address, set this pointer
* to a list of addresses (6 bytes each). The first one will be used
* by default for perm_addr. In this case, the mask should be set to
* all-zeroes. In this case it is assumed that the device can handle
* the same number of arbitrary MAC addresses.
* @registered: protects ->resume and ->suspend sysfs callbacks against
* unregister hardware
* @debugfsdir: debugfs directory used for this wiphy (ieee80211/<wiphyname>).
* It will be renamed automatically on wiphy renames
* @dev: (virtual) struct device for this wiphy. The item in
* /sys/class/ieee80211/ points to this. You need use set_wiphy_dev()
* (see below).
* @wext: wireless extension handlers
* @priv: driver private data (sized according to wiphy_new() parameter)
* @interface_modes: bitmask of interfaces types valid for this wiphy,
* must be set by driver
* @iface_combinations: Valid interface combinations array, should not
* list single interface types.
* @n_iface_combinations: number of entries in @iface_combinations array.
* @software_iftypes: bitmask of software interface types, these are not
* subject to any restrictions since they are purely managed in SW.
* @flags: wiphy flags, see &enum wiphy_flags
* @regulatory_flags: wiphy regulatory flags, see
* &enum ieee80211_regulatory_flags
* @features: features advertised to nl80211, see &enum nl80211_feature_flags.
* @ext_features: extended features advertised to nl80211, see
* &enum nl80211_ext_feature_index.
* @bss_priv_size: each BSS struct has private data allocated with it,
* this variable determines its size
* @max_scan_ssids: maximum number of SSIDs the device can scan for in
* any given scan
* @max_sched_scan_reqs: maximum number of scheduled scan requests that
* the device can run concurrently.
* @max_sched_scan_ssids: maximum number of SSIDs the device can scan
* for in any given scheduled scan
* @max_match_sets: maximum number of match sets the device can handle
* when performing a scheduled scan, 0 if filtering is not
* supported.
* @max_scan_ie_len: maximum length of user-controlled IEs device can
* add to probe request frames transmitted during a scan, must not
* include fixed IEs like supported rates
* @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled
* scans
* @max_sched_scan_plans: maximum number of scan plans (scan interval and number
* of iterations) for scheduled scan supported by the device.
* @max_sched_scan_plan_interval: maximum interval (in seconds) for a
* single scan plan supported by the device.
* @max_sched_scan_plan_iterations: maximum number of iterations for a single
* scan plan supported by the device.
* @coverage_class: current coverage class
* @fw_version: firmware version for ethtool reporting
* @hw_version: hardware version for ethtool reporting
* @max_num_pmkids: maximum number of PMKIDs supported by device
* @privid: a pointer that drivers can use to identify if an arbitrary
* wiphy is theirs, e.g. in global notifiers
* @bands: information about bands/channels supported by this device
*
* @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or
* transmitted through nl80211, points to an array indexed by interface
* type
*
* @available_antennas_tx: bitmap of antennas which are available to be
* configured as TX antennas. Antenna configuration commands will be
* rejected unless this or @available_antennas_rx is set.
*
* @available_antennas_rx: bitmap of antennas which are available to be
* configured as RX antennas. Antenna configuration commands will be
* rejected unless this or @available_antennas_tx is set.
*
* @probe_resp_offload:
* Bitmap of supported protocols for probe response offloading.
* See &enum nl80211_probe_resp_offload_support_attr. Only valid
* when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
*
* @max_remain_on_channel_duration: Maximum time a remain-on-channel operation
* may request, if implemented.
*
* @wowlan: WoWLAN support information
* @wowlan_config: current WoWLAN configuration; this should usually not be
* used since access to it is necessarily racy, use the parameter passed
* to the suspend() operation instead.
*
* @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
* @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden.
* If null, then none can be over-ridden.
* @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden.
* If null, then none can be over-ridden.
*
* @wdev_list: the list of associated (virtual) interfaces; this list must
* not be modified by the driver, but can be read with RTNL/RCU protection.
*
* @max_acl_mac_addrs: Maximum number of MAC addresses that the device
* supports for ACL.
*
* @extended_capabilities: extended capabilities supported by the driver,
* additional capabilities might be supported by userspace; these are
* the 802.11 extended capabilities ("Extended Capabilities element")
* and are in the same format as in the information element. See
* 802.11-2012 8.4.2.29 for the defined fields. These are the default
* extended capabilities to be used if the capabilities are not specified
* for a specific interface type in iftype_ext_capab.
* @extended_capabilities_mask: mask of the valid values
* @extended_capabilities_len: length of the extended capabilities
* @iftype_ext_capab: array of extended capabilities per interface type
* @num_iftype_ext_capab: number of interface types for which extended
* capabilities are specified separately.
* @coalesce: packet coalescing support information
*
* @vendor_commands: array of vendor commands supported by the hardware
* @n_vendor_commands: number of vendor commands
* @vendor_events: array of vendor events supported by the hardware
* @n_vendor_events: number of vendor events
*
* @max_ap_assoc_sta: maximum number of associated stations supported in AP mode
* (including P2P GO) or 0 to indicate no such limit is advertised. The
* driver is allowed to advertise a theoretical limit that it can reach in
* some cases, but may not always reach.
*
* @max_num_csa_counters: Number of supported csa_counters in beacons
* and probe responses. This value should be set if the driver
* wishes to limit the number of csa counters. Default (0) means
* infinite.
* @bss_param_support: bitmask indicating which bss_parameters as defined in
* &struct bss_parameters the driver can actually handle in the
* .change_bss() callback. The bit positions are defined in &enum
* wiphy_bss_param_flags.
*
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
*
* @nan_supported_bands: bands supported by the device in NAN mode, a
* bitmap of &enum nl80211_band values. For instance, for
* NL80211_BAND_2GHZ, bit 0 would be set
* (i.e. BIT(NL80211_BAND_2GHZ)).
* @nan_capa: NAN capabilities
*
* @txq_limit: configuration of internal TX queue frame limit
* @txq_memory_limit: configuration internal TX queue memory limit
* @txq_quantum: configuration of internal TX queue scheduler quantum
*
* @tx_queue_len: allow setting transmit queue len for drivers not using
* wake_tx_queue
*
* @support_mbssid: can HW support association with nontransmitted AP
* @support_only_he_mbssid: don't parse MBSSID elements if it is not
* HE AP, in order to avoid compatibility issues.
* @support_mbssid must be set for this to have any effect.
*
* @pmsr_capa: peer measurement capabilities
*
* @tid_config_support: describes the per-TID config support that the
* device has
* @tid_config_support.vif: bitmap of attributes (configurations)
* supported by the driver for each vif
* @tid_config_support.peer: bitmap of attributes (configurations)
* supported by the driver for each peer
* @tid_config_support.max_retry: maximum supported retry count for
* long/short retry configuration
*
* @max_data_retry_count: maximum supported per TID retry count for
* configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
* @sar_capa: SAR control capabilities
* @rfkill: a pointer to the rfkill structure
*
* @mbssid_max_interfaces: maximum number of interfaces supported by the driver
* in a multiple BSSID set. This field must be set to a non-zero value
* by the driver to advertise MBSSID support.
* @ema_max_profile_periodicity: maximum profile periodicity supported by
* the driver. Setting this field to a non-zero value indicates that the
* driver supports enhanced multi-BSSID advertisements (EMA AP).
* @max_num_akm_suites: maximum number of AKM suites allowed for
* configuration through %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and
* %NL80211_CMD_START_AP. Set to NL80211_MAX_NR_AKM_SUITES if not set by
* driver. If set by driver minimum allowed value is
* NL80211_MAX_NR_AKM_SUITES in order to avoid compatibility issues with
* legacy userspace and maximum allowed value is
* CFG80211_MAX_NUM_AKM_SUITES.
*
* @hw_timestamp_max_peers: maximum number of peers that the driver supports
* enabling HW timestamping for concurrently. Setting this field to a
* non-zero value indicates that the driver supports HW timestamping.
* A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver
* supports enabling HW timestamping for all peers (i.e. no need to
* specify a mac address).
*
* @radio_cfg: configuration of radios belonging to a muli-radio wiphy. This
* struct contains a list of all radio specific attributes and should be
* used only for multi-radio wiphy.
*
* @radio: radios belonging to this wiphy
* @n_radio: number of radios
*/
struct wiphy {
struct mutex mtx;
/* assign these fields before you register the wiphy */
u8 perm_addr[ETH_ALEN];
u8 addr_mask[ETH_ALEN];
struct mac_address *addresses;
const struct ieee80211_txrx_stypes *mgmt_stypes;
const struct ieee80211_iface_combination *iface_combinations;
int n_iface_combinations;
u16 software_iftypes;
u16 n_addresses;
/* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
u16 interface_modes;
u16 max_acl_mac_addrs;
u32 flags, regulatory_flags, features;
u8 ext_features[DIV_ROUND_UP(NUM_NL80211_EXT_FEATURES, 8)];
u32 ap_sme_capa;
enum cfg80211_signal_type signal_type;
int bss_priv_size;
u8 max_scan_ssids;
u8 max_sched_scan_reqs;
u8 max_sched_scan_ssids;
u8 max_match_sets;
u16 max_scan_ie_len;
u16 max_sched_scan_ie_len;
u32 max_sched_scan_plans;
u32 max_sched_scan_plan_interval;
u32 max_sched_scan_plan_iterations;
int n_cipher_suites;
const u32 *cipher_suites;
int n_akm_suites;
const u32 *akm_suites;
const struct wiphy_iftype_akm_suites *iftype_akm_suites;
unsigned int num_iftype_akm_suites;
u8 retry_short;
u8 retry_long;
u32 frag_threshold;
u32 rts_threshold;
u8 coverage_class;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
#ifdef CONFIG_PM
const struct wiphy_wowlan_support *wowlan;
struct cfg80211_wowlan *wowlan_config;
#endif
u16 max_remain_on_channel_duration;
u8 max_num_pmkids;
u32 available_antennas_tx;
u32 available_antennas_rx;
u32 probe_resp_offload;
const u8 *extended_capabilities, *extended_capabilities_mask;
u8 extended_capabilities_len;
const struct wiphy_iftype_ext_capab *iftype_ext_capab;
unsigned int num_iftype_ext_capab;
const void *privid;
struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request);
struct wiphy_radio_cfg *radio_cfg;
/* fields below are read-only, assigned by cfg80211 */
const struct ieee80211_regdomain __rcu *regd;
struct device dev;
bool registered;
struct dentry *debugfsdir;
const struct ieee80211_ht_cap *ht_capa_mod_mask;
const struct ieee80211_vht_cap *vht_capa_mod_mask;
struct list_head wdev_list;
possible_net_t _net;
#ifdef CONFIG_CFG80211_WEXT
const struct iw_handler_def *wext;
#endif
const struct wiphy_coalesce_support *coalesce;
const struct wiphy_vendor_command *vendor_commands;
const struct nl80211_vendor_cmd_info *vendor_events;
int n_vendor_commands, n_vendor_events;
u16 max_ap_assoc_sta;
u8 max_num_csa_counters;
u32 bss_param_support;
u32 bss_select_support;
u8 nan_supported_bands;
struct wiphy_nan_capa nan_capa;
u32 txq_limit;
u32 txq_memory_limit;
u32 txq_quantum;
unsigned long tx_queue_len;
u8 support_mbssid:1,
support_only_he_mbssid:1;
const struct cfg80211_pmsr_capabilities *pmsr_capa;
struct {
u64 peer, vif;
u8 max_retry;
} tid_config_support;
u8 max_data_retry_count;
const struct cfg80211_sar_capa *sar_capa;
struct rfkill *rfkill;
u8 mbssid_max_interfaces;
u8 ema_max_profile_periodicity;
u16 max_num_akm_suites;
u16 hw_timestamp_max_peers;
int n_radio;
const struct wiphy_radio *radio;
char priv[] __aligned(NETDEV_ALIGN);
};
static inline struct net *wiphy_net(struct wiphy *wiphy)
{
return read_pnet(&wiphy->_net);
}
static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net)
{
write_pnet(&wiphy->_net, net);
}
/**
* wiphy_priv - return priv from wiphy
*
* @wiphy: the wiphy whose priv pointer to return
* Return: The priv of @wiphy.
*/
static inline void *wiphy_priv(struct wiphy *wiphy)
{
BUG_ON(!wiphy);
return &wiphy->priv;
}
/**
* priv_to_wiphy - return the wiphy containing the priv
*
* @priv: a pointer previously returned by wiphy_priv
* Return: The wiphy of @priv.
*/
static inline struct wiphy *priv_to_wiphy(void *priv)
{
BUG_ON(!priv);
return container_of(priv, struct wiphy, priv);
}
/**
* set_wiphy_dev - set device pointer for wiphy
*
* @wiphy: The wiphy whose device to bind
* @dev: The device to parent it to
*/
static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev)
{
wiphy->dev.parent = dev;
}
/**
* wiphy_dev - get wiphy dev pointer
*
* @wiphy: The wiphy whose device struct to look up
* Return: The dev of @wiphy.
*/
static inline struct device *wiphy_dev(struct wiphy *wiphy)
{
return wiphy->dev.parent;
}
/**
* wiphy_name - get wiphy name
*
* @wiphy: The wiphy whose name to return
* Return: The name of @wiphy.
*/
static inline const char *wiphy_name(const struct wiphy *wiphy)
{
return dev_name(&wiphy->dev);
}
/**
* wiphy_new_nm - create a new wiphy for use with cfg80211
*
* @ops: The configuration operations for this device
* @sizeof_priv: The size of the private area to allocate
* @requested_name: Request a particular name.
* NULL is valid value, and means use the default phy%d naming.
*
* Create a new wiphy and associate the given operations with it.
* @sizeof_priv bytes are allocated for private use.
*
* Return: A pointer to the new wiphy. This pointer must be
* assigned to each netdev's ieee80211_ptr for proper operation.
*/
struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
const char *requested_name);
/**
* wiphy_new - create a new wiphy for use with cfg80211
*
* @ops: The configuration operations for this device
* @sizeof_priv: The size of the private area to allocate
*
* Create a new wiphy and associate the given operations with it.
* @sizeof_priv bytes are allocated for private use.
*
* Return: A pointer to the new wiphy. This pointer must be
* assigned to each netdev's ieee80211_ptr for proper operation.
*/
static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops,
int sizeof_priv)
{
return wiphy_new_nm(ops, sizeof_priv, NULL);
}
/**
* wiphy_register - register a wiphy with cfg80211
*
* @wiphy: The wiphy to register.
*
* Return: A non-negative wiphy index or a negative error code.
*/
int wiphy_register(struct wiphy *wiphy);
/* this is a define for better error reporting (file/line) */
#define lockdep_assert_wiphy(wiphy) lockdep_assert_held(&(wiphy)->mtx)
/**
* rcu_dereference_wiphy - rcu_dereference with debug checking
* @wiphy: the wiphy to check the locking on
* @p: The pointer to read, prior to dereferencing
*
* Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
* or RTNL. Note: Please prefer wiphy_dereference() or rcu_dereference().
*/
#define rcu_dereference_wiphy(wiphy, p) \
rcu_dereference_check(p, lockdep_is_held(&wiphy->mtx))
/**
* wiphy_dereference - fetch RCU pointer when updates are prevented by wiphy mtx
* @wiphy: the wiphy to check the locking on
* @p: The pointer to read, prior to dereferencing
*
* Return: the value of the specified RCU-protected pointer, but omit the
* READ_ONCE(), because caller holds the wiphy mutex used for updates.
*/
#define wiphy_dereference(wiphy, p) \
rcu_dereference_protected(p, lockdep_is_held(&wiphy->mtx))
/**
* get_wiphy_regdom - get custom regdomain for the given wiphy
* @wiphy: the wiphy to get the regdomain from
*
* Context: Requires any of RTNL, wiphy mutex or RCU protection.
*
* Return: pointer to the regulatory domain associated with the wiphy
*/
const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy);
/**
* wiphy_unregister - deregister a wiphy from cfg80211
*
* @wiphy: The wiphy to unregister.
*
* After this call, no more requests can be made with this priv
* pointer, but the call may sleep to wait for an outstanding
* request that is being handled.
*/
void wiphy_unregister(struct wiphy *wiphy);
/**
* wiphy_free - free wiphy
*
* @wiphy: The wiphy to free
*/
void wiphy_free(struct wiphy *wiphy);
/* internal structs */
struct cfg80211_conn;
struct cfg80211_internal_bss;
struct cfg80211_cached_keys;
struct cfg80211_cqm_config;
/**
* wiphy_lock - lock the wiphy
* @wiphy: the wiphy to lock
*
* This is needed around registering and unregistering netdevs that
* aren't created through cfg80211 calls, since that requires locking
* in cfg80211 when the notifiers is called, but that cannot
* differentiate which way it's called.
*
* It can also be used by drivers for their own purposes.
*
* When cfg80211 ops are called, the wiphy is already locked.
*
* Note that this makes sure that no workers that have been queued
* with wiphy_queue_work() are running.
*/
static inline void wiphy_lock(struct wiphy *wiphy)
__acquires(&wiphy->mtx)
{
mutex_lock(&wiphy->mtx);
__acquire(&wiphy->mtx);
}
/**
* wiphy_unlock - unlock the wiphy again
* @wiphy: the wiphy to unlock
*/
static inline void wiphy_unlock(struct wiphy *wiphy)
__releases(&wiphy->mtx)
{
__release(&wiphy->mtx);
mutex_unlock(&wiphy->mtx);
}
DEFINE_GUARD(wiphy, struct wiphy *,
mutex_lock(&_T->mtx),
mutex_unlock(&_T->mtx))
struct wiphy_work;
typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
struct wiphy_work {
struct list_head entry;
wiphy_work_func_t func;
};
static inline void wiphy_work_init(struct wiphy_work *work,
wiphy_work_func_t func)
{
INIT_LIST_HEAD(&work->entry);
work->func = func;
}
/**
* wiphy_work_queue - queue work for the wiphy
* @wiphy: the wiphy to queue for
* @work: the work item
*
* This is useful for work that must be done asynchronously, and work
* queued here has the special property that the wiphy mutex will be
* held as if wiphy_lock() was called, and that it cannot be running
* after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
* use just cancel_work() instead of cancel_work_sync(), it requires
* being in a section protected by wiphy_lock().
*/
void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
/**
* wiphy_work_cancel - cancel previously queued work
* @wiphy: the wiphy, for debug purposes
* @work: the work to cancel
*
* Cancel the work *without* waiting for it, this assumes being
* called under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
/**
* wiphy_work_flush - flush previously queued work
* @wiphy: the wiphy, for debug purposes
* @work: the work to flush, this can be %NULL to flush all work
*
* Flush the work (i.e. run it if pending). This must be called
* under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
struct wiphy_delayed_work {
struct wiphy_work work;
struct wiphy *wiphy;
struct timer_list timer;
};
void wiphy_delayed_work_timer(struct timer_list *t);
static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
wiphy_work_func_t func)
{
timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0);
wiphy_work_init(&dwork->work, func);
}
/**
* wiphy_delayed_work_queue - queue delayed work for the wiphy
* @wiphy: the wiphy to queue for
* @dwork: the delayable worker
* @delay: number of jiffies to wait before queueing
*
* This is useful for work that must be done asynchronously, and work
* queued here has the special property that the wiphy mutex will be
* held as if wiphy_lock() was called, and that it cannot be running
* after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
* use just cancel_work() instead of cancel_work_sync(), it requires
* being in a section protected by wiphy_lock().
*
* Note that these are scheduled with a timer where the accuracy
* becomes less the longer in the future the scheduled timer is. Use
* wiphy_hrtimer_work_queue() if the timer must be not be late by more
* than approximately 10 percent.
*/
void wiphy_delayed_work_queue(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork,
unsigned long delay);
/**
* wiphy_delayed_work_cancel - cancel previously queued delayed work
* @wiphy: the wiphy, for debug purposes
* @dwork: the delayed work to cancel
*
* Cancel the work *without* waiting for it, this assumes being
* called under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_delayed_work_cancel(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork);
/**
* wiphy_delayed_work_flush - flush previously queued delayed work
* @wiphy: the wiphy, for debug purposes
* @dwork: the delayed work to flush
*
* Flush the work (i.e. run it if pending). This must be called
* under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_delayed_work_flush(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork);
/**
* wiphy_delayed_work_pending - Find out whether a wiphy delayable
* work item is currently pending.
*
* @wiphy: the wiphy, for debug purposes
* @dwork: the delayed work in question
*
* Return: true if timer is pending, false otherwise
*
* How wiphy_delayed_work_queue() works is by setting a timer which
* when it expires calls wiphy_work_queue() to queue the wiphy work.
* Because wiphy_delayed_work_queue() uses mod_timer(), if it is
* called twice and the second call happens before the first call
* deadline, the work will rescheduled for the second deadline and
* won't run before that.
*
* wiphy_delayed_work_pending() can be used to detect if calling
* wiphy_work_delayed_work_queue() would start a new work schedule
* or delayed a previous one. As seen below it cannot be used to
* detect precisely if the work has finished to execute nor if it
* is currently executing.
*
* CPU0 CPU1
* wiphy_delayed_work_queue(wk)
* mod_timer(wk->timer)
* wiphy_delayed_work_pending(wk) -> true
*
* [...]
* expire_timers(wk->timer)
* detach_timer(wk->timer)
* wiphy_delayed_work_pending(wk) -> false
* wk->timer->function() |
* wiphy_work_queue(wk) | delayed work pending
* list_add_tail() | returns false but
* queue_work(cfg80211_wiphy_work) | wk->func() has not
* | been run yet
* [...] |
* cfg80211_wiphy_work() |
* wk->func() V
*
*/
bool wiphy_delayed_work_pending(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork);
struct wiphy_hrtimer_work {
struct wiphy_work work;
struct wiphy *wiphy;
struct hrtimer timer;
};
enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t);
static inline void wiphy_hrtimer_work_init(struct wiphy_hrtimer_work *hrwork,
wiphy_work_func_t func)
{
hrtimer_setup(&hrwork->timer, wiphy_hrtimer_work_timer,
CLOCK_BOOTTIME, HRTIMER_MODE_REL);
wiphy_work_init(&hrwork->work, func);
}
/**
* wiphy_hrtimer_work_queue - queue hrtimer work for the wiphy
* @wiphy: the wiphy to queue for
* @hrwork: the high resolution timer worker
* @delay: the delay given as a ktime_t
*
* Please refer to wiphy_delayed_work_queue(). The difference is that
* the hrtimer work uses a high resolution timer for scheduling. This
* may be needed if timeouts might be scheduled further in the future
* and the accuracy of the normal timer is not sufficient.
*
* Expect a delay of a few milliseconds as the timer is scheduled
* with some slack and some more time may pass between queueing the
* work and its start.
*/
void wiphy_hrtimer_work_queue(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork,
ktime_t delay);
/**
* wiphy_hrtimer_work_cancel - cancel previously queued hrtimer work
* @wiphy: the wiphy, for debug purposes
* @hrtimer: the hrtimer work to cancel
*
* Cancel the work *without* waiting for it, this assumes being
* called under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_hrtimer_work_cancel(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrtimer);
/**
* wiphy_hrtimer_work_flush - flush previously queued hrtimer work
* @wiphy: the wiphy, for debug purposes
* @hrwork: the hrtimer work to flush
*
* Flush the work (i.e. run it if pending). This must be called
* under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_hrtimer_work_flush(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork);
/**
* wiphy_hrtimer_work_pending - Find out whether a wiphy hrtimer
* work item is currently pending.
*
* @wiphy: the wiphy, for debug purposes
* @hrwork: the hrtimer work in question
*
* Return: true if timer is pending, false otherwise
*
* Please refer to the wiphy_delayed_work_pending() documentation as
* this is the equivalent function for hrtimer based delayed work
* items.
*/
bool wiphy_hrtimer_work_pending(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork);
/**
* enum ieee80211_ap_reg_power - regulatory power for an Access Point
*
* @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
* @IEEE80211_REG_LPI_AP: Indoor Access Point
* @IEEE80211_REG_SP_AP: Standard power Access Point
* @IEEE80211_REG_VLP_AP: Very low power Access Point
*/
enum ieee80211_ap_reg_power {
IEEE80211_REG_UNSET_AP,
IEEE80211_REG_LPI_AP,
IEEE80211_REG_SP_AP,
IEEE80211_REG_VLP_AP,
};
/**
* struct wireless_dev - wireless device state
*
* For netdevs, this structure must be allocated by the driver
* that uses the ieee80211_ptr field in struct net_device (this
* is intentional so it can be allocated along with the netdev.)
* It need not be registered then as netdev registration will
* be intercepted by cfg80211 to see the new wireless device,
* however, drivers must lock the wiphy before registering or
* unregistering netdevs if they pre-create any netdevs (in ops
* called from cfg80211, the wiphy is already locked.)
*
* For non-netdev uses, it must also be allocated by the driver
* in response to the cfg80211 callbacks that require it, as
* there's no netdev registration in that case it may not be
* allocated outside of callback operations that return it.
*
* @wiphy: pointer to hardware description
* @iftype: interface type
* @registered: is this wdev already registered with cfg80211
* @registering: indicates we're doing registration under wiphy lock
* for the notifier
* @list: (private) Used to collect the interfaces
* @netdev: (private) Used to reference back to the netdev, may be %NULL
* @identifier: (private) Identifier used in nl80211 to identify this
* wireless device if it has no netdev
* @u: union containing data specific to @iftype
* @connected: indicates if connected or not (STA mode)
* @wext: (private) Used by the internal wireless extensions compat code
* @wext.ibss: (private) IBSS data part of wext handling
* @wext.connect: (private) connection handling data
* @wext.keys: (private) (WEP) key data
* @wext.ie: (private) extra elements for association
* @wext.ie_len: (private) length of extra elements
* @wext.bssid: (private) selected network BSSID
* @wext.ssid: (private) selected network SSID
* @wext.default_key: (private) selected default key index
* @wext.default_mgmt_key: (private) selected default management key index
* @wext.prev_bssid: (private) previous BSSID for reassociation
* @wext.prev_bssid_valid: (private) previous BSSID validity
* @use_4addr: indicates 4addr mode is used on this interface, must be
* set by driver (if supported) on add_interface BEFORE registering the
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
* @ps: powersave mode is enabled
* @ps_timeout: dynamic powersave timeout
* @ap_unexpected_nlportid: (private) netlink port ID of application
* registered for unexpected class 3 frames (AP mode)
* @conn: (private) cfg80211 software SME connection state machine data
* @connect_keys: (private) keys to set after connection is established
* @conn_bss_type: connecting/connected BSS type
* @conn_owner_nlportid: (private) connection owner socket port ID
* @disconnect_wk: (private) auto-disconnect work
* @disconnect_bssid: (private) the BSSID to use for auto-disconnect
* @event_list: (private) list for internal event processing
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
* @cqm_rssi_work: (private) CQM RSSI reporting work
* @cqm_config: (private) nl80211 RSSI monitor state
* @pmsr_list: (private) peer measurement requests
* @pmsr_lock: (private) peer measurements requests/results lock
* @pmsr_free_wk: (private) peer measurements cleanup work
* @unprot_beacon_reported: (private) timestamp of last
* unprotected beacon report
* @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
* @ap and @client for each link
* @links.cac_started: true if DFS channel availability check has been
* started
* @links.cac_start_time: timestamp (jiffies) when the dfs state was
* entered.
* @links.cac_time_ms: CAC time in ms
* @valid_links: bitmap describing what elements of @links are valid
* @radio_mask: Bitmask of radios that this interface is allowed to operate on.
*/
struct wireless_dev {
struct wiphy *wiphy;
enum nl80211_iftype iftype;
/* the remainder of this struct should be private to cfg80211 */
struct list_head list;
struct net_device *netdev;
u32 identifier;
struct list_head mgmt_registrations;
u8 mgmt_registrations_need_update:1;
bool use_4addr, is_running, registered, registering;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
/* currently used for IBSS and SME - might be rearranged later */
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
enum ieee80211_bss_type conn_bss_type;
u32 conn_owner_nlportid;
struct work_struct disconnect_wk;
u8 disconnect_bssid[ETH_ALEN];
struct list_head event_list;
spinlock_t event_lock;
u8 connected:1;
bool ps;
int ps_timeout;
u32 ap_unexpected_nlportid;
u32 owner_nlportid;
bool nl_owner_dead;
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
struct cfg80211_ibss_params ibss;
struct cfg80211_connect_params connect;
struct cfg80211_cached_keys *keys;
const u8 *ie;
size_t ie_len;
u8 bssid[ETH_ALEN];
u8 prev_bssid[ETH_ALEN];
u8 ssid[IEEE80211_MAX_SSID_LEN];
s8 default_key, default_mgmt_key;
bool prev_bssid_valid;
} wext;
#endif
struct wiphy_work cqm_rssi_work;
struct cfg80211_cqm_config __rcu *cqm_config;
struct list_head pmsr_list;
spinlock_t pmsr_lock;
struct work_struct pmsr_free_wk;
unsigned long unprot_beacon_reported;
union {
struct {
u8 connected_addr[ETH_ALEN] __aligned(2);
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len;
} client;
struct {
int beacon_interval;
struct cfg80211_chan_def preset_chandef;
struct cfg80211_chan_def chandef;
u8 id[IEEE80211_MAX_MESH_ID_LEN];
u8 id_len, id_up_len;
} mesh;
struct {
struct cfg80211_chan_def preset_chandef;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len;
} ap;
struct {
struct cfg80211_internal_bss *current_bss;
struct cfg80211_chan_def chandef;
int beacon_interval;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len;
} ibss;
struct {
struct cfg80211_chan_def chandef;
} ocb;
struct {
u8 cluster_id[ETH_ALEN] __aligned(2);
} nan;
} u;
struct {
u8 addr[ETH_ALEN] __aligned(2);
union {
struct {
unsigned int beacon_interval;
struct cfg80211_chan_def chandef;
} ap;
struct {
struct cfg80211_internal_bss *current_bss;
} client;
};
bool cac_started;
unsigned long cac_start_time;
unsigned int cac_time_ms;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 valid_links;
u32 radio_mask;
};
static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
return wdev->address;
}
static inline bool wdev_running(struct wireless_dev *wdev)
{
if (wdev->netdev)
return netif_running(wdev->netdev);
return wdev->is_running;
}
/**
* wdev_priv - return wiphy priv from wireless_dev
*
* @wdev: The wireless device whose wiphy's priv pointer to return
* Return: The wiphy priv of @wdev.
*/
static inline void *wdev_priv(struct wireless_dev *wdev)
{
BUG_ON(!wdev);
return wiphy_priv(wdev->wiphy);
}
/**
* wdev_chandef - return chandef pointer from wireless_dev
* @wdev: the wdev
* @link_id: the link ID for MLO
*
* Return: The chandef depending on the mode, or %NULL.
*/
struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
unsigned int link_id);
static inline void WARN_INVALID_LINK_ID(struct wireless_dev *wdev,
unsigned int link_id)
{
WARN_ON(link_id && !wdev->valid_links);
WARN_ON(wdev->valid_links &&
!(wdev->valid_links & BIT(link_id)));
}
#define for_each_valid_link(link_info, link_id) \
for (link_id = 0; \
link_id < ((link_info)->valid_links ? \
ARRAY_SIZE((link_info)->links) : 1); \
link_id++) \
if (!(link_info)->valid_links || \
((link_info)->valid_links & BIT(link_id)))
/**
* DOC: Utility functions
*
* cfg80211 offers a number of utility functions that can be useful.
*/
/**
* ieee80211_channel_equal - compare two struct ieee80211_channel
*
* @a: 1st struct ieee80211_channel
* @b: 2nd struct ieee80211_channel
* Return: true if center frequency of @a == @b
*/
static inline bool
ieee80211_channel_equal(struct ieee80211_channel *a,
struct ieee80211_channel *b)
{
return (a->center_freq == b->center_freq &&
a->freq_offset == b->freq_offset);
}
/**
* ieee80211_channel_to_khz - convert ieee80211_channel to frequency in KHz
* @chan: struct ieee80211_channel to convert
* Return: The corresponding frequency (in KHz)
*/
static inline u32
ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
{
return MHZ_TO_KHZ(chan->center_freq) + chan->freq_offset;
}
/**
* ieee80211_channel_to_freq_khz - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
* Return: The corresponding frequency (in KHz), or 0 if the conversion failed.
*/
u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band);
/**
* ieee80211_channel_to_frequency - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
* Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
*/
static inline int
ieee80211_channel_to_frequency(int chan, enum nl80211_band band)
{
return KHZ_TO_MHZ(ieee80211_channel_to_freq_khz(chan, band));
}
/**
* ieee80211_freq_khz_to_channel - convert frequency to channel number
* @freq: center frequency in KHz
* Return: The corresponding channel, or 0 if the conversion failed.
*/
int ieee80211_freq_khz_to_channel(u32 freq);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
* @freq: center frequency in MHz
* Return: The corresponding channel, or 0 if the conversion failed.
*/
static inline int
ieee80211_frequency_to_channel(int freq)
{
return ieee80211_freq_khz_to_channel(MHZ_TO_KHZ(freq));
}
/**
* ieee80211_get_channel_khz - get channel struct from wiphy for specified
* frequency
* @wiphy: the struct wiphy to get the channel for
* @freq: the center frequency (in KHz) of the channel
* Return: The channel struct from @wiphy at @freq.
*/
struct ieee80211_channel *
ieee80211_get_channel_khz(struct wiphy *wiphy, u32 freq);
/**
* ieee80211_get_channel - get channel struct from wiphy for specified frequency
*
* @wiphy: the struct wiphy to get the channel for
* @freq: the center frequency (in MHz) of the channel
* Return: The channel struct from @wiphy at @freq.
*/
static inline struct ieee80211_channel *
ieee80211_get_channel(struct wiphy *wiphy, int freq)
{
return ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(freq));
}
/**
* cfg80211_channel_is_psc - Check if the channel is a 6 GHz PSC
* @chan: control channel to check
*
* The Preferred Scanning Channels (PSC) are defined in
* Draft IEEE P802.11ax/D5.0, 26.17.2.3.3
*
* Return: %true if channel is a PSC, %false otherwise
*/
static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
{
if (chan->band != NL80211_BAND_6GHZ)
return false;
return ieee80211_frequency_to_channel(chan->center_freq) % 16 == 5;
}
/**
* cfg80211_radio_chandef_valid - Check if the radio supports the chandef
*
* @radio: wiphy radio
* @chandef: chandef for current channel
*
* Return: whether or not the given chandef is valid for the given radio
*/
bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
const struct cfg80211_chan_def *chandef);
/**
* cfg80211_wdev_channel_allowed - Check if the wdev may use the channel
*
* @wdev: the wireless device
* @chan: channel to check
*
* Return: whether or not the wdev may use the channel
*/
bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev,
struct ieee80211_channel *chan);
/**
* ieee80211_get_response_rate - get basic rate for a given rate
*
* @sband: the band to look for rates in
* @basic_rates: bitmap of basic rates
* @bitrate: the bitrate for which to find the basic rate
*
* Return: The basic rate corresponding to a given bitrate, that
* is the next lower bitrate contained in the basic rate map,
* which is, for this function, given as a bitmap of indices of
* rates in the band's bitrate table.
*/
const struct ieee80211_rate *
ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
u32 basic_rates, int bitrate);
/**
* ieee80211_mandatory_rates - get mandatory rates for a given band
* @sband: the band to look for rates in
*
* Return: a bitmap of the mandatory rates for the given band, bits
* are set according to the rate position in the bitrates array.
*/
u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
/*
* Radiotap parsing functions -- for controlled injection support
*
* Implemented in net/wireless/radiotap.c
* Documentation in Documentation/networking/radiotap-headers.rst
*/
struct radiotap_align_size {
uint8_t align:4, size:4;
};
struct ieee80211_radiotap_namespace {
const struct radiotap_align_size *align_size;
int n_bits;
uint32_t oui;
uint8_t subns;
};
struct ieee80211_radiotap_vendor_namespaces {
const struct ieee80211_radiotap_namespace *ns;
int n_ns;
};
/**
* struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args
* @this_arg_index: index of current arg, valid after each successful call
* to ieee80211_radiotap_iterator_next()
* @this_arg: pointer to current radiotap arg; it is valid after each
* call to ieee80211_radiotap_iterator_next() but also after
* ieee80211_radiotap_iterator_init() where it will point to
* the beginning of the actual data portion
* @this_arg_size: length of the current arg, for convenience
* @current_namespace: pointer to the current namespace definition
* (or internally %NULL if the current namespace is unknown)
* @is_radiotap_ns: indicates whether the current namespace is the default
* radiotap namespace or not
*
* @_rtheader: pointer to the radiotap header we are walking through
* @_max_length: length of radiotap header in cpu byte ordering
* @_arg_index: next argument index
* @_arg: next argument pointer
* @_next_bitmap: internal pointer to next present u32
* @_bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present
* @_vns: vendor namespace definitions
* @_next_ns_data: beginning of the next namespace's data
* @_reset_on_ext: internal; reset the arg index to 0 when going to the
* next bitmap word
*
* Describes the radiotap parser state. Fields prefixed with an underscore
* must not be used by users of the parser, only by the parser internally.
*/
struct ieee80211_radiotap_iterator {
struct ieee80211_radiotap_header *_rtheader;
const struct ieee80211_radiotap_vendor_namespaces *_vns;
const struct ieee80211_radiotap_namespace *current_namespace;
unsigned char *_arg, *_next_ns_data;
__le32 *_next_bitmap;
unsigned char *this_arg;
int this_arg_index;
int this_arg_size;
int is_radiotap_ns;
int _max_length;
int _arg_index;
uint32_t _bitmap_shifter;
int _reset_on_ext;
};
int
ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator,
struct ieee80211_radiotap_header *radiotap_header,
int max_length,
const struct ieee80211_radiotap_vendor_namespaces *vns);
int
ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator);
extern const unsigned char rfc1042_header[6];
extern const unsigned char bridge_tunnel_header[6];
/**
* ieee80211_get_hdrlen_from_skb - get header length from data
*
* @skb: the frame
*
* Given an skb with a raw 802.11 header at the data pointer this function
* returns the 802.11 header length.
*
* Return: The 802.11 header length in bytes (not including encryption
* headers). Or 0 if the data in the sk_buff is too short to contain a valid
* 802.11 header.
*/
unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
/**
* ieee80211_hdrlen - get header length in bytes from frame control
* @fc: frame control field in little-endian format
* Return: The header length in bytes.
*/
unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
/**
* ieee80211_get_mesh_hdrlen - get mesh extension header length
* @meshhdr: the mesh extension header, only the flags field
* (first byte) will be accessed
* Return: The length of the extension header, which is always at
* least 6 bytes and at most 18 if address 5 and 6 are present.
*/
unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
/**
* DOC: Data path helpers
*
* In addition to generic utilities, cfg80211 also offers
* functions that help implement the data path for devices
* that do not do the 802.11/802.3 conversion on the device.
*/
/**
* ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3
* @skb: the 802.11 data frame
* @ehdr: pointer to a &struct ethhdr that will get the header, instead
* of it being pushed into the SKB
* @addr: the device MAC address
* @iftype: the virtual interface type
* @data_offset: offset of payload after the 802.11 header
* @is_amsdu: true if the 802.11 header is A-MSDU
* Return: 0 on success. Non-zero on error.
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
u8 data_offset, bool is_amsdu);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
* @skb: the 802.11 data frame
* @addr: the device MAC address
* @iftype: the virtual interface type
* Return: 0 on success. Non-zero on error.
*/
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
}
/**
* ieee80211_is_valid_amsdu - check if subframe lengths of an A-MSDU are valid
*
* This is used to detect non-standard A-MSDU frames, e.g. the ones generated
* by ath10k and ath11k, where the subframe length includes the length of the
* mesh control field.
*
* @skb: The input A-MSDU frame without any headers.
* @mesh_hdr: the type of mesh header to test
* 0: non-mesh A-MSDU length field
* 1: big-endian mesh A-MSDU length field
* 2: little-endian mesh A-MSDU length field
* Returns: true if subframe header lengths are valid for the @mesh_hdr mode
*/
bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr);
/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
* Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
* The @list will be empty if the decode fails. The @skb must be fully
* header-less before being passed in here; it is freed in this function.
*
* @skb: The input A-MSDU frame without any headers.
* @list: The output list of 802.3 frames. It must be allocated and
* initialized by the caller.
* @addr: The device MAC address.
* @iftype: The device interface type.
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
* @check_da: DA to check in the inner ethernet header, or NULL
* @check_sa: SA to check in the inner ethernet header, or NULL
* @mesh_control: see mesh_hdr in ieee80211_is_valid_amsdu
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
const u8 *check_da, const u8 *check_sa,
u8 mesh_control);
/**
* ieee80211_get_8023_tunnel_proto - get RFC1042 or bridge tunnel encap protocol
*
* Check for RFC1042 or bridge tunnel header and fetch the encapsulated
* protocol.
*
* @hdr: pointer to the MSDU payload
* @proto: destination pointer to store the protocol
* Return: true if encapsulation was found
*/
bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto);
/**
* ieee80211_strip_8023_mesh_hdr - strip mesh header from converted 802.3 frames
*
* Strip the mesh header, which was left in by ieee80211_data_to_8023 as part
* of the MSDU data. Also move any source/destination addresses from the mesh
* header to the ethernet header (if present).
*
* @skb: The 802.3 frame with embedded mesh header
*
* Return: 0 on success. Non-zero on error.
*/
int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
* @skb: the data frame
* @qos_map: Interworking QoS mapping or %NULL if not in use
* Return: The 802.1p/1d tag.
*/
unsigned int cfg80211_classify8021d(struct sk_buff *skb,
struct cfg80211_qos_map *qos_map);
/**
* cfg80211_find_elem_match - match information element and byte array in data
*
* @eid: element ID
* @ies: data consisting of IEs
* @len: length of data
* @match: byte array to match
* @match_len: number of bytes in the match array
* @match_offset: offset in the IE data where the byte array should match.
* Note the difference to cfg80211_find_ie_match() which considers
* the offset to start from the element ID byte, but here we take
* the data portion instead.
*
* Return: %NULL if the element ID could not be found or if
* the element is invalid (claims to be longer than the given
* data) or if the byte array doesn't match; otherwise return the
* requested element struct.
*
* Note: There are no checks on the element length other than
* having to fit into the given data and being large enough for the
* byte array to match.
*/
const struct element *
cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
const u8 *match, unsigned int match_len,
unsigned int match_offset);
/**
* cfg80211_find_ie_match - match information element and byte array in data
*
* @eid: element ID
* @ies: data consisting of IEs
* @len: length of data
* @match: byte array to match
* @match_len: number of bytes in the match array
* @match_offset: offset in the IE where the byte array should match.
* If match_len is zero, this must also be set to zero.
* Otherwise this must be set to 2 or more, because the first
* byte is the element id, which is already compared to eid, and
* the second byte is the IE length.
*
* Return: %NULL if the element ID could not be found or if
* the element is invalid (claims to be longer than the given
* data) or if the byte array doesn't match, or a pointer to the first
* byte of the requested element, that is the byte containing the
* element ID.
*
* Note: There are no checks on the element length other than
* having to fit into the given data and being large enough for the
* byte array to match.
*/
static inline const u8 *
cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len,
const u8 *match, unsigned int match_len,
unsigned int match_offset)
{
/* match_offset can't be smaller than 2, unless match_len is
* zero, in which case match_offset must be zero as well.
*/
if (WARN_ON((match_len && match_offset < 2) ||
(!match_len && match_offset)))
return NULL;
return (const void *)cfg80211_find_elem_match(eid, ies, len,
match, match_len,
match_offset ?
match_offset - 2 : 0);
}
/**
* cfg80211_find_elem - find information element in data
*
* @eid: element ID
* @ies: data consisting of IEs
* @len: length of data
*
* Return: %NULL if the element ID could not be found or if
* the element is invalid (claims to be longer than the given
* data) or if the byte array doesn't match; otherwise return the
* requested element struct.
*
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
static inline const struct element *
cfg80211_find_elem(u8 eid, const u8 *ies, int len)
{
return cfg80211_find_elem_match(eid, ies, len, NULL, 0, 0);
}
/**
* cfg80211_find_ie - find information element in data
*
* @eid: element ID
* @ies: data consisting of IEs
* @len: length of data
*
* Return: %NULL if the element ID could not be found or if
* the element is invalid (claims to be longer than the given
* data), or a pointer to the first byte of the requested
* element, that is the byte containing the element ID.
*
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
{
return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0);
}
/**
* cfg80211_find_ext_elem - find information element with EID Extension in data
*
* @ext_eid: element ID Extension
* @ies: data consisting of IEs
* @len: length of data
*
* Return: %NULL if the extended element could not be found or if
* the element is invalid (claims to be longer than the given
* data) or if the byte array doesn't match; otherwise return the
* requested element struct.
*
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
static inline const struct element *
cfg80211_find_ext_elem(u8 ext_eid, const u8 *ies, int len)
{
return cfg80211_find_elem_match(WLAN_EID_EXTENSION, ies, len,
&ext_eid, 1, 0);
}
/**
* cfg80211_find_ext_ie - find information element with EID Extension in data
*
* @ext_eid: element ID Extension
* @ies: data consisting of IEs
* @len: length of data
*
* Return: %NULL if the extended element ID could not be found or if
* the element is invalid (claims to be longer than the given
* data), or a pointer to the first byte of the requested
* element, that is the byte containing the element ID.
*
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len)
{
return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len,
&ext_eid, 1, 2);
}
/**
* cfg80211_find_vendor_elem - find vendor specific information element in data
*
* @oui: vendor OUI
* @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
* @ies: data consisting of IEs
* @len: length of data
*
* Return: %NULL if the vendor specific element ID could not be found or if the
* element is invalid (claims to be longer than the given data); otherwise
* return the element structure for the requested element.
*
* Note: There are no checks on the element length other than having to fit into
* the given data.
*/
const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
const u8 *ies,
unsigned int len);
/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
*
* @oui: vendor OUI
* @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
* @ies: data consisting of IEs
* @len: length of data
*
* Return: %NULL if the vendor specific element ID could not be found or if the
* element is invalid (claims to be longer than the given data), or a pointer to
* the first byte of the requested element, that is the byte containing the
* element ID.
*
* Note: There are no checks on the element length other than having to fit into
* the given data.
*/
static inline const u8 *
cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, unsigned int len)
{
return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
}
/**
* enum cfg80211_rnr_iter_ret - reduced neighbor report iteration state
* @RNR_ITER_CONTINUE: continue iterating with the next entry
* @RNR_ITER_BREAK: break iteration and return success
* @RNR_ITER_ERROR: break iteration and return error
*/
enum cfg80211_rnr_iter_ret {
RNR_ITER_CONTINUE,
RNR_ITER_BREAK,
RNR_ITER_ERROR,
};
/**
* cfg80211_iter_rnr - iterate reduced neighbor report entries
* @elems: the frame elements to iterate RNR elements and then
* their entries in
* @elems_len: length of the elements
* @iter: iteration function, see also &enum cfg80211_rnr_iter_ret
* for the return value
* @iter_data: additional data passed to the iteration function
* Return: %true on success (after successfully iterating all entries
* or if the iteration function returned %RNR_ITER_BREAK),
* %false on error (iteration function returned %RNR_ITER_ERROR
* or elements were malformed.)
*/
bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
enum cfg80211_rnr_iter_ret
(*iter)(void *data, u8 type,
const struct ieee80211_neighbor_ap_info *info,
const u8 *tbtt_info, u8 tbtt_info_len),
void *iter_data);
/**
* cfg80211_defragment_element - Defrag the given element data into a buffer
*
* @elem: the element to defragment
* @ies: elements where @elem is contained
* @ieslen: length of @ies
* @data: buffer to store element data, or %NULL to just determine size
* @data_len: length of @data, or 0
* @frag_id: the element ID of fragments
*
* Return: length of @data, or -EINVAL on error
*
* Copy out all data from an element that may be fragmented into @data, while
* skipping all headers.
*
* The function uses memmove() internally. It is acceptable to defragment an
* element in-place.
*/
ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
size_t ieslen, u8 *data, size_t data_len,
u8 frag_id);
/**
* cfg80211_send_layer2_update - send layer 2 update frame
*
* @dev: network device
* @addr: STA MAC address
*
* Wireless drivers can use this function to update forwarding tables in bridge
* devices upon STA association.
*/
void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
/**
* DOC: Regulatory enforcement infrastructure
*
* TODO
*/
/**
* regulatory_hint - driver hint to the wireless core a regulatory domain
* @wiphy: the wireless device giving the hint (used only for reporting
* conflicts)
* @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
* should be in. If @rd is set this should be NULL. Note that if you
* set this to NULL you should still set rd->alpha2 to some accepted
* alpha2.
*
* Wireless drivers can use this function to hint to the wireless core
* what it believes should be the current regulatory domain by
* giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
* domain should be in or by providing a completely build regulatory domain.
* If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried
* for a regulatory domain structure for the respective country.
*
* The wiphy must have been registered to cfg80211 prior to this call.
* For cfg80211 drivers this means you must first use wiphy_register(),
* for mac80211 drivers you must first use ieee80211_register_hw().
*
* Drivers should check the return value, its possible you can get
* an -ENOMEM.
*
* Return: 0 on success. -ENOMEM.
*/
int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
* regulatory_set_wiphy_regd - set regdom info for self managed drivers
* @wiphy: the wireless device we want to process the regulatory domain on
* @rd: the regulatory domain information to use for this wiphy
*
* Set the regulatory domain information for self-managed wiphys, only they
* may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more
* information.
*
* Return: 0 on success. -EINVAL, -EPERM
*/
int regulatory_set_wiphy_regd(struct wiphy *wiphy,
struct ieee80211_regdomain *rd);
/**
* regulatory_set_wiphy_regd_sync - set regdom for self-managed drivers
* @wiphy: the wireless device we want to process the regulatory domain on
* @rd: the regulatory domain information to use for this wiphy
*
* This functions requires the RTNL and the wiphy mutex to be held and
* applies the new regdomain synchronously to this wiphy. For more details
* see regulatory_set_wiphy_regd().
*
* Return: 0 on success. -EINVAL, -EPERM
*/
int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy,
struct ieee80211_regdomain *rd);
/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
* @wiphy: the wireless device we want to process the regulatory domain on
* @regd: the custom regulatory domain to use for this wiphy
*
* Drivers can sometimes have custom regulatory domains which do not apply
* to a specific country. Drivers can use this to apply such custom regulatory
* domains. This routine must be called prior to wiphy registration. The
* custom regulatory domain will be trusted completely and as such previous
* default channel settings will be disregarded. If no rule is found for a
* channel on the regulatory domain the channel will be disabled.
* Drivers using this for a wiphy should also set the wiphy flag
* REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy
* that called this helper.
*/
void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
const struct ieee80211_regdomain *regd);
/**
* freq_reg_info - get regulatory information for the given frequency
* @wiphy: the wiphy for which we want to process this rule for
* @center_freq: Frequency in KHz for which we want regulatory information for
*
* Use this function to get the regulatory rule for a specific frequency on
* a given wireless device. If the device has a specific regulatory domain
* it wants to follow we respect that unless a country IE has been received
* and processed already.
*
* Return: A valid pointer, or, when an error occurs, for example if no rule
* can be found, the return value is encoded using ERR_PTR(). Use IS_ERR() to
* check and PTR_ERR() to obtain the numeric return value. The numeric return
* value will be -ERANGE if we determine the given center_freq does not even
* have a regulatory rule for a frequency range in the center_freq's band.
* See freq_in_rule_band() for our current definition of a band -- this is
* purely subjective and right now it's 802.11 specific.
*/
const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
u32 center_freq);
/**
* reg_initiator_name - map regulatory request initiator enum to name
* @initiator: the regulatory request initiator
*
* You can use this to map the regulatory request initiator enum to a
* proper string representation.
*
* Return: pointer to string representation of the initiator
*/
const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
/**
* regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom
* @wiphy: wiphy for which pre-CAC capability is checked.
*
* Pre-CAC is allowed only in some regdomains (notable ETSI).
*
* Return: %true if allowed, %false otherwise
*/
bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
/**
* DOC: Internal regulatory db functions
*
*/
/**
* reg_query_regdb_wmm - Query internal regulatory db for wmm rule
* Regulatory self-managed driver can use it to proactively
*
* @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
* @freq: the frequency (in MHz) to be queried.
* @rule: pointer to store the wmm rule from the regulatory db.
*
* Self-managed wireless drivers can use this function to query
* the internal regulatory database to check whether the given
* ISO/IEC 3166 alpha2 country and freq have wmm rule limitations.
*
* Drivers should check the return value, its possible you can get
* an -ENODATA.
*
* Return: 0 on success. -ENODATA.
*/
int reg_query_regdb_wmm(char *alpha2, int freq,
struct ieee80211_reg_rule *rule);
/*
* callbacks for asynchronous cfg80211 methods, notification
* functions and BSS handling helpers
*/
/**
* cfg80211_scan_done - notify that scan finished
*
* @request: the corresponding scan request
* @info: information about the completed scan
*/
void cfg80211_scan_done(struct cfg80211_scan_request *request,
struct cfg80211_scan_info *info);
/**
* cfg80211_sched_scan_results - notify that new scan results are available
*
* @wiphy: the wiphy which got scheduled scan results
* @reqid: identifier for the related scheduled scan request
*/
void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped
*
* @wiphy: the wiphy on which the scheduled scan stopped
* @reqid: identifier for the related scheduled scan request
*
* The driver can call this function to inform cfg80211 that the
* scheduled scan had to be stopped, for whatever reason. The driver
* is then called back via the sched_scan_stop operation when done.
*/
void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_sched_scan_stopped_locked - notify that the scheduled scan has stopped
*
* @wiphy: the wiphy on which the scheduled scan stopped
* @reqid: identifier for the related scheduled scan request
*
* The driver can call this function to inform cfg80211 that the
* scheduled scan had to be stopped, for whatever reason. The driver
* is then called back via the sched_scan_stop operation when done.
* This function should be called with the wiphy mutex held.
*/
void cfg80211_sched_scan_stopped_locked(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
* @wiphy: the wiphy reporting the BSS
* @data: the BSS metadata
* @mgmt: the management frame (probe response or beacon)
* @len: length of the management frame
* @gfp: context flags
*
* This informs cfg80211 that BSS information was found and
* the BSS should be updated/added.
*
* Return: A referenced struct, must be released with cfg80211_put_bss()!
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
struct cfg80211_inform_bss *data,
struct ieee80211_mgmt *mgmt, size_t len,
gfp_t gfp);
static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
struct ieee80211_mgmt *mgmt, size_t len,
s32 signal, gfp_t gfp)
{
struct cfg80211_inform_bss data = {
.chan = rx_channel,
.signal = signal,
};
return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
}
/**
* cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID
* @bssid: transmitter BSSID
* @max_bssid: max BSSID indicator, taken from Multiple BSSID element
* @mbssid_index: BSSID index, taken from Multiple BSSID index element
* @new_bssid: calculated nontransmitted BSSID
*/
static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
u8 mbssid_index, u8 *new_bssid)
{
u64 bssid_u64 = ether_addr_to_u64(bssid);
u64 mask = GENMASK_ULL(max_bssid - 1, 0);
u64 new_bssid_u64;
new_bssid_u64 = bssid_u64 & ~mask;
new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask;
u64_to_ether_addr(new_bssid_u64, new_bssid);
}
/**
* cfg80211_is_element_inherited - returns if element ID should be inherited
* @element: element to check
* @non_inherit_element: non inheritance element
*
* Return: %true if should be inherited, %false otherwise
*/
bool cfg80211_is_element_inherited(const struct element *element,
const struct element *non_inherit_element);
/**
* cfg80211_merge_profile - merges a MBSSID profile if it is split between IEs
* @ie: ies
* @ielen: length of IEs
* @mbssid_elem: current MBSSID element
* @sub_elem: current MBSSID subelement (profile)
* @merged_ie: location of the merged profile
* @max_copy_len: max merged profile length
*
* Return: the number of bytes merged
*/
size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
const struct element *mbssid_elem,
const struct element *sub_elem,
u8 *merged_ie, size_t max_copy_len);
/**
* enum cfg80211_bss_frame_type - frame type that the BSS data came from
* @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
* from a beacon or probe response
* @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
* @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
* @CFG80211_BSS_FTYPE_S1G_BEACON: data comes from an S1G beacon
*/
enum cfg80211_bss_frame_type {
CFG80211_BSS_FTYPE_UNKNOWN,
CFG80211_BSS_FTYPE_BEACON,
CFG80211_BSS_FTYPE_PRESP,
CFG80211_BSS_FTYPE_S1G_BEACON,
};
/**
* cfg80211_get_ies_channel_number - returns the channel number from ies
* @ie: IEs
* @ielen: length of IEs
* @band: enum nl80211_band of the channel
*
* Return: the channel number, or -1 if none could be determined.
*/
int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
enum nl80211_band band);
/**
* cfg80211_ssid_eq - compare two SSIDs
* @a: first SSID
* @b: second SSID
*
* Return: %true if SSIDs are equal, %false otherwise.
*/
static inline bool
cfg80211_ssid_eq(struct cfg80211_ssid *a, struct cfg80211_ssid *b)
{
if (WARN_ON(!a || !b))
return false;
if (a->ssid_len != b->ssid_len)
return false;
return memcmp(a->ssid, b->ssid, a->ssid_len) ? false : true;
}
/**
* cfg80211_inform_bss_data - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
* @data: the BSS metadata
* @ftype: frame type (if known)
* @bssid: the BSSID of the BSS
* @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
* @capability: the capability field sent by the peer
* @beacon_interval: the beacon interval announced by the peer
* @ie: additional IEs sent by the peer
* @ielen: length of the additional IEs
* @gfp: context flags
*
* This informs cfg80211 that BSS information was found and
* the BSS should be updated/added.
*
* Return: A referenced struct, must be released with cfg80211_put_bss()!
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
cfg80211_inform_bss_data(struct wiphy *wiphy,
struct cfg80211_inform_bss *data,
enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
gfp_t gfp);
static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
s32 signal, gfp_t gfp)
{
struct cfg80211_inform_bss data = {
.chan = rx_channel,
.signal = signal,
};
return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
capability, beacon_interval, ie, ielen,
gfp);
}
/**
* __cfg80211_get_bss - get a BSS reference
* @wiphy: the wiphy this BSS struct belongs to
* @channel: the channel to search on (or %NULL)
* @bssid: the desired BSSID (or %NULL)
* @ssid: the desired SSID (or %NULL)
* @ssid_len: length of the SSID (or 0)
* @bss_type: type of BSS, see &enum ieee80211_bss_type
* @privacy: privacy filter, see &enum ieee80211_privacy
* @use_for: indicates which use is intended
*
* Return: Reference-counted BSS on success. %NULL on error.
*/
struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
const u8 *ssid, size_t ssid_len,
enum ieee80211_bss_type bss_type,
enum ieee80211_privacy privacy,
u32 use_for);
/**
* cfg80211_get_bss - get a BSS reference
* @wiphy: the wiphy this BSS struct belongs to
* @channel: the channel to search on (or %NULL)
* @bssid: the desired BSSID (or %NULL)
* @ssid: the desired SSID (or %NULL)
* @ssid_len: length of the SSID (or 0)
* @bss_type: type of BSS, see &enum ieee80211_bss_type
* @privacy: privacy filter, see &enum ieee80211_privacy
*
* This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL.
*
* Return: Reference-counted BSS on success. %NULL on error.
*/
static inline struct cfg80211_bss *
cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel,
const u8 *bssid, const u8 *ssid, size_t ssid_len,
enum ieee80211_bss_type bss_type,
enum ieee80211_privacy privacy)
{
return __cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len,
bss_type, privacy,
NL80211_BSS_USE_FOR_NORMAL);
}
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *ssid, size_t ssid_len)
{
return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len,
IEEE80211_BSS_TYPE_IBSS,
IEEE80211_PRIVACY_ANY);
}
/**
* cfg80211_ref_bss - reference BSS struct
* @wiphy: the wiphy this BSS struct belongs to
* @bss: the BSS struct to reference
*
* Increments the refcount of the given BSS struct.
*/
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
/**
* cfg80211_put_bss - unref BSS struct
* @wiphy: the wiphy this BSS struct belongs to
* @bss: the BSS struct
*
* Decrements the refcount of the given BSS struct.
*/
void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
/**
* cfg80211_unlink_bss - unlink BSS from internal data structures
* @wiphy: the wiphy
* @bss: the bss to remove
*
* This function removes the given BSS from the internal data structures
* thereby making it no longer show up in scan results etc. Use this
* function when you detect a BSS is gone. Normally BSSes will also time
* out, so it is not necessary to use this function at all.
*/
void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
/**
* cfg80211_bss_iter - iterate all BSS entries
*
* This function iterates over the BSS entries associated with the given wiphy
* and calls the callback for the iterated BSS. The iterator function is not
* allowed to call functions that might modify the internal state of the BSS DB.
*
* @wiphy: the wiphy
* @chandef: if given, the iterator function will be called only if the channel
* of the currently iterated BSS is a subset of the given channel.
* @iter: the iterator function to call
* @iter_data: an argument to the iterator function
*/
void cfg80211_bss_iter(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
void (*iter)(struct wiphy *wiphy,
struct cfg80211_bss *bss,
void *data),
void *iter_data);
/**
* cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
* @dev: network device
* @buf: authentication frame (header + body)
* @len: length of the frame data
*
* This function is called whenever an authentication, disassociation or
* deauthentication frame has been received and processed in station mode.
* After being asked to authenticate via cfg80211_ops::auth() the driver must
* call either this function or cfg80211_auth_timeout().
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
* While connected, the driver must calls this for received and processed
* disassociation and deauthentication frames. If the frame couldn't be used
* because it was unprotected, the driver must call the function
* cfg80211_rx_unprot_mlme_mgmt() instead.
*
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
/**
* cfg80211_auth_timeout - notification of timed out authentication
* @dev: network device
* @addr: The MAC address of the device with which the authentication timed out
*
* This function may sleep. The caller must hold the corresponding wdev's
* mutex.
*/
void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
/**
* struct cfg80211_rx_assoc_resp_data - association response data
* @buf: (Re)Association Response frame (header + body)
* @len: length of the frame data
* @uapsd_queues: bitmap of queues configured for uapsd. Same format
* as the AC bitmap in the QoS info field
* @req_ies: information elements from the (Re)Association Request frame
* @req_ies_len: length of req_ies data
* @ap_mld_addr: AP MLD address (in case of MLO)
* @links: per-link information indexed by link ID, use links[0] for
* non-MLO connections
* @links.bss: the BSS that association was requested with, ownership of the
* pointer moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
* @links.status: Set this (along with a BSS pointer) for links that
* were rejected by the AP.
*/
struct cfg80211_rx_assoc_resp_data {
const u8 *buf;
size_t len;
const u8 *req_ies;
size_t req_ies_len;
int uapsd_queues;
const u8 *ap_mld_addr;
struct {
u8 addr[ETH_ALEN] __aligned(2);
struct cfg80211_bss *bss;
u16 status;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
* cfg80211_rx_assoc_resp - notification of processed association response
* @dev: network device
* @data: association response data, &struct cfg80211_rx_assoc_resp_data
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
*
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
void cfg80211_rx_assoc_resp(struct net_device *dev,
const struct cfg80211_rx_assoc_resp_data *data);
/**
* struct cfg80211_assoc_failure - association failure data
* @ap_mld_addr: AP MLD address, or %NULL
* @bss: list of BSSes, must use entry 0 for non-MLO connections
* (@ap_mld_addr is %NULL)
* @timeout: indicates the association failed due to timeout, otherwise
* the association was abandoned for a reason reported through some
* other API (e.g. deauth RX)
*/
struct cfg80211_assoc_failure {
const u8 *ap_mld_addr;
struct cfg80211_bss *bss[IEEE80211_MLD_MAX_NUM_LINKS];
bool timeout;
};
/**
* cfg80211_assoc_failure - notification of association failure
* @dev: network device
* @data: data describing the association failure
*
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
void cfg80211_assoc_failure(struct net_device *dev,
struct cfg80211_assoc_failure *data);
/**
* cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
* @dev: network device
* @buf: 802.11 frame (header + body)
* @len: length of the frame data
* @reconnect: immediate reconnect is desired (include the nl80211 attribute)
*
* This function is called whenever deauthentication has been processed in
* station mode. This includes both received deauthentication frames and
* locally generated ones. This function may sleep. The caller must hold the
* corresponding wdev's mutex.
*/
void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len,
bool reconnect);
/**
* cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame
* @dev: network device
* @buf: received management frame (header + body)
* @len: length of the frame data
*
* This function is called whenever a received deauthentication or dissassoc
* frame has been dropped in station mode because of MFP being used but the
* frame was not protected. This is also used to notify reception of a Beacon
* frame that was dropped because it did not include a valid MME MIC while
* beacon protection was enabled (BIGTK configured in station mode).
*
* This function may sleep.
*/
void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev,
const u8 *buf, size_t len);
/**
* cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP)
* @dev: network device
* @addr: The source MAC address of the frame
* @key_type: The key type that the received frame used
* @key_id: Key identifier (0..3). Can be -1 if missing.
* @tsc: The TSC value of the frame that generated the MIC failure (6 octets)
* @gfp: allocation flags
*
* This function is called whenever the local MAC detects a MIC failure in a
* received frame. This matches with MLME-MICHAELMICFAILURE.indication()
* primitive.
*/
void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
enum nl80211_key_type key_type, int key_id,
const u8 *tsc, gfp_t gfp);
/**
* cfg80211_ibss_joined - notify cfg80211 that device joined an IBSS
*
* @dev: network device
* @bssid: the BSSID of the IBSS joined
* @channel: the channel of the IBSS joined
* @gfp: allocation flags
*
* This function notifies cfg80211 that the device joined an IBSS or
* switched to a different BSSID. Before this function can be called,
* either a beacon has to have been received from the IBSS, or one of
* the cfg80211_inform_bss{,_frame} functions must have been called
* with the locally generated beacon -- this guarantees that there is
* always a scan result for this IBSS. cfg80211 will handle the rest.
*/
void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
struct ieee80211_channel *channel, gfp_t gfp);
/**
* cfg80211_notify_new_peer_candidate - notify cfg80211 of a new mesh peer
* candidate
*
* @dev: network device
* @macaddr: the MAC address of the new candidate
* @ie: information elements advertised by the peer candidate
* @ie_len: length of the information elements buffer
* @sig_dbm: signal level in dBm
* @gfp: allocation flags
*
* This function notifies cfg80211 that the mesh peer candidate has been
* detected, most likely via a beacon or, less likely, via a probe response.
* cfg80211 then sends a notification to userspace.
*/
void cfg80211_notify_new_peer_candidate(struct net_device *dev,
const u8 *macaddr, const u8 *ie, u8 ie_len,
int sig_dbm, gfp_t gfp);
/**
* DOC: RFkill integration
*
* RFkill integration in cfg80211 is almost invisible to drivers,
* as cfg80211 automatically registers an rfkill instance for each
* wireless device it knows about. Soft kill is also translated
* into disconnecting and turning all interfaces off. Drivers are
* expected to turn off the device when all interfaces are down.
*
* However, devices may have a hard RFkill line, in which case they
* also need to interact with the rfkill subsystem, via cfg80211.
* They can do this with a few helper functions documented here.
*/
/**
* wiphy_rfkill_set_hw_state_reason - notify cfg80211 about hw block state
* @wiphy: the wiphy
* @blocked: block status
* @reason: one of reasons in &enum rfkill_hard_block_reasons
*/
void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
enum rfkill_hard_block_reasons reason);
static inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
{
wiphy_rfkill_set_hw_state_reason(wiphy, blocked,
RFKILL_HARD_BLOCK_SIGNAL);
}
/**
* wiphy_rfkill_start_polling - start polling rfkill
* @wiphy: the wiphy
*/
void wiphy_rfkill_start_polling(struct wiphy *wiphy);
/**
* wiphy_rfkill_stop_polling - stop polling rfkill
* @wiphy: the wiphy
*/
static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
{
rfkill_pause_polling(wiphy->rfkill);
}
/**
* DOC: Vendor commands
*
* Occasionally, there are special protocol or firmware features that
* can't be implemented very openly. For this and similar cases, the
* vendor command functionality allows implementing the features with
* (typically closed-source) userspace and firmware, using nl80211 as
* the configuration mechanism.
*
* A driver supporting vendor commands must register them as an array
* in struct wiphy, with handlers for each one. Each command has an
* OUI and sub command ID to identify it.
*
* Note that this feature should not be (ab)used to implement protocol
* features that could openly be shared across drivers. In particular,
* it must never be required to use vendor commands to implement any
* "normal" functionality that higher-level userspace like connection
* managers etc. need.
*/
struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
int approxlen);
struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
struct wireless_dev *wdev,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
unsigned int portid,
int vendor_event_idx,
int approxlen, gfp_t gfp);
void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
/**
* cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply
* @wiphy: the wiphy
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
*
* This function allocates and pre-fills an skb for a reply to
* a vendor command. Since it is intended for a reply, calling
* it outside of a vendor command's doit() operation is invalid.
*
* The returned skb is pre-filled with some identifying data in
* a way that any data that is put into the skb (with skb_put(),
* nla_put() or similar) will end up being within the
* %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done
* with the skb is adding data for the corresponding userspace tool
* which can then read that data out of the vendor data attribute.
* You must not modify the skb in any other way.
*
* When done, call cfg80211_vendor_cmd_reply() with the skb and return
* its error code as the result of the doit() operation.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
{
return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR,
NL80211_ATTR_VENDOR_DATA, approxlen);
}
/**
* cfg80211_vendor_cmd_reply - send the reply skb
* @skb: The skb, must have been allocated with
* cfg80211_vendor_cmd_alloc_reply_skb()
*
* Since calling this function will usually be the last thing
* before returning from the vendor command doit() you should
* return the error code. Note that this function consumes the
* skb regardless of the return value.
*
* Return: An error code or 0 on success.
*/
int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
/**
* cfg80211_vendor_cmd_get_sender - get the current sender netlink ID
* @wiphy: the wiphy
*
* Return: the current netlink port ID in a vendor command handler.
*
* Context: May only be called from a vendor command handler
*/
unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy);
/**
* cfg80211_vendor_event_alloc - allocate vendor-specific event skb
* @wiphy: the wiphy
* @wdev: the wireless device
* @event_idx: index of the vendor event in the wiphy's vendor_events
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
* @gfp: allocation flags
*
* This function allocates and pre-fills an skb for an event on the
* vendor-specific multicast group.
*
* If wdev != NULL, both the ifindex and identifier of the specified
* wireless device are added to the event message before the vendor data
* attribute.
*
* When done filling the skb, call cfg80211_vendor_event() with the
* skb to send the event.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev,
int approxlen, int event_idx, gfp_t gfp)
{
return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
NL80211_ATTR_VENDOR_DATA,
0, event_idx, approxlen, gfp);
}
/**
* cfg80211_vendor_event_alloc_ucast - alloc unicast vendor-specific event skb
* @wiphy: the wiphy
* @wdev: the wireless device
* @event_idx: index of the vendor event in the wiphy's vendor_events
* @portid: port ID of the receiver
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
* @gfp: allocation flags
*
* This function allocates and pre-fills an skb for an event to send to
* a specific (userland) socket. This socket would previously have been
* obtained by cfg80211_vendor_cmd_get_sender(), and the caller MUST take
* care to register a netlink notifier to see when the socket closes.
*
* If wdev != NULL, both the ifindex and identifier of the specified
* wireless device are added to the event message before the vendor data
* attribute.
*
* When done filling the skb, call cfg80211_vendor_event() with the
* skb to send the event.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
cfg80211_vendor_event_alloc_ucast(struct wiphy *wiphy,
struct wireless_dev *wdev,
unsigned int portid, int approxlen,
int event_idx, gfp_t gfp)
{
return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
NL80211_ATTR_VENDOR_DATA,
portid, event_idx, approxlen, gfp);
}
/**
* cfg80211_vendor_event - send the event
* @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc()
* @gfp: allocation flags
*
* This function sends the given @skb, which must have been allocated
* by cfg80211_vendor_event_alloc(), as an event. It always consumes it.
*/
static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
{
__cfg80211_send_event_skb(skb, gfp);
}
#ifdef CONFIG_NL80211_TESTMODE
/**
* DOC: Test mode
*
* Test mode is a set of utility functions to allow drivers to
* interact with driver-specific tools to aid, for instance,
* factory programming.
*
* This chapter describes how drivers interact with it. For more
* information see the nl80211 book's chapter on it.
*/
/**
* cfg80211_testmode_alloc_reply_skb - allocate testmode reply
* @wiphy: the wiphy
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
*
* This function allocates and pre-fills an skb for a reply to
* the testmode command. Since it is intended for a reply, calling
* it outside of the @testmode_cmd operation is invalid.
*
* The returned skb is pre-filled with the wiphy index and set up in
* a way that any data that is put into the skb (with skb_put(),
* nla_put() or similar) will end up being within the
* %NL80211_ATTR_TESTDATA attribute, so all that needs to be done
* with the skb is adding data for the corresponding userspace tool
* which can then read that data out of the testdata attribute. You
* must not modify the skb in any other way.
*
* When done, call cfg80211_testmode_reply() with the skb and return
* its error code as the result of the @testmode_cmd operation.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
{
return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
NL80211_ATTR_TESTDATA, approxlen);
}
/**
* cfg80211_testmode_reply - send the reply skb
* @skb: The skb, must have been allocated with
* cfg80211_testmode_alloc_reply_skb()
*
* Since calling this function will usually be the last thing
* before returning from the @testmode_cmd you should return
* the error code. Note that this function consumes the skb
* regardless of the return value.
*
* Return: An error code or 0 on success.
*/
static inline int cfg80211_testmode_reply(struct sk_buff *skb)
{
return cfg80211_vendor_cmd_reply(skb);
}
/**
* cfg80211_testmode_alloc_event_skb - allocate testmode event
* @wiphy: the wiphy
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
* @gfp: allocation flags
*
* This function allocates and pre-fills an skb for an event on the
* testmode multicast group.
*
* The returned skb is set up in the same way as with
* cfg80211_testmode_alloc_reply_skb() but prepared for an event. As
* there, you should simply add data to it that will then end up in the
* %NL80211_ATTR_TESTDATA attribute. Again, you must not modify the skb
* in any other way.
*
* When done filling the skb, call cfg80211_testmode_event() with the
* skb to send the event.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
{
return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE,
NL80211_ATTR_TESTDATA, 0, -1,
approxlen, gfp);
}
/**
* cfg80211_testmode_event - send the event
* @skb: The skb, must have been allocated with
* cfg80211_testmode_alloc_event_skb()
* @gfp: allocation flags
*
* This function sends the given @skb, which must have been allocated
* by cfg80211_testmode_alloc_event_skb(), as an event. It always
* consumes it.
*/
static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
{
__cfg80211_send_event_skb(skb, gfp);
}
#define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd),
#define CFG80211_TESTMODE_DUMP(cmd) .testmode_dump = (cmd),
#else
#define CFG80211_TESTMODE_CMD(cmd)
#define CFG80211_TESTMODE_DUMP(cmd)
#endif
/**
* struct cfg80211_fils_resp_params - FILS connection response params
* @kek: KEK derived from a successful FILS connection (may be %NULL)
* @kek_len: Length of @fils_kek in octets
* @update_erp_next_seq_num: Boolean value to specify whether the value in
* @erp_next_seq_num is valid.
* @erp_next_seq_num: The next sequence number to use in ERP message in
* FILS Authentication. This value should be specified irrespective of the
* status for a FILS connection.
* @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
* @pmk_len: Length of @pmk in octets
* @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
* used for this FILS connection (may be %NULL).
*/
struct cfg80211_fils_resp_params {
const u8 *kek;
size_t kek_len;
bool update_erp_next_seq_num;
u16 erp_next_seq_num;
const u8 *pmk;
size_t pmk_len;
const u8 *pmkid;
};
/**
* struct cfg80211_connect_resp_params - Connection response params
* @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
* %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
* the real status code for failures. If this call is used to report a
* failure due to a timeout (e.g., not receiving an Authentication frame
* from the AP) instead of an explicit rejection by the AP, -1 is used to
* indicate that this is a failure, but without a status code.
* @timeout_reason is used to report the reason for the timeout in that
* case.
* @req_ie: Association request IEs (may be %NULL)
* @req_ie_len: Association request IEs length
* @resp_ie: Association response IEs (may be %NULL)
* @resp_ie_len: Association response IEs length
* @fils: FILS connection response parameters.
* @timeout_reason: Reason for connection timeout. This is used when the
* connection fails due to a timeout instead of an explicit rejection from
* the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
* not known. This value is used only if @status < 0 to indicate that the
* failure is due to a timeout and not due to explicit rejection by the AP.
* This value is ignored in other cases (@status >= 0).
* @valid_links: For MLO connection, BIT mask of the valid link ids. Otherwise
* zero.
* @ap_mld_addr: For MLO connection, MLD address of the AP. Otherwise %NULL.
* @links : For MLO connection, contains link info for the valid links indicated
* using @valid_links. For non-MLO connection, links[0] contains the
* connected AP info.
* @links.addr: For MLO connection, MAC address of the STA link. Otherwise
* %NULL.
* @links.bssid: For MLO connection, MAC address of the AP link. For non-MLO
* connection, links[0].bssid points to the BSSID of the AP (may be %NULL).
* @links.bss: For MLO connection, entry of bss to which STA link is connected.
* For non-MLO connection, links[0].bss points to entry of bss to which STA
* is connected. It can be obtained through cfg80211_get_bss() (may be
* %NULL). It is recommended to store the bss from the connect_request and
* hold a reference to it and return through this param to avoid a warning
* if the bss is expired during the connection, esp. for those drivers
* implementing connect op. Only one parameter among @bssid and @bss needs
* to be specified.
* @links.status: per-link status code, to report a status code that's not
* %WLAN_STATUS_SUCCESS for a given link, it must also be in the
* @valid_links bitmap and may have a BSS pointer (which is then released)
*/
struct cfg80211_connect_resp_params {
int status;
const u8 *req_ie;
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
struct cfg80211_fils_resp_params fils;
enum nl80211_timeout_reason timeout_reason;
const u8 *ap_mld_addr;
u16 valid_links;
struct {
const u8 *addr;
const u8 *bssid;
struct cfg80211_bss *bss;
u16 status;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
* cfg80211_connect_done - notify cfg80211 of connection result
*
* @dev: network device
* @params: connection response parameters
* @gfp: allocation flags
*
* It should be called by the underlying driver once execution of the connection
* request from connect() has been completed. This is similar to
* cfg80211_connect_bss(), but takes a structure pointer for connection response
* parameters. Only one of the functions among cfg80211_connect_bss(),
* cfg80211_connect_result(), cfg80211_connect_timeout(),
* and cfg80211_connect_done() should be called.
*/
void cfg80211_connect_done(struct net_device *dev,
struct cfg80211_connect_resp_params *params,
gfp_t gfp);
/**
* cfg80211_connect_bss - notify cfg80211 of connection result
*
* @dev: network device
* @bssid: the BSSID of the AP
* @bss: Entry of bss to which STA got connected to, can be obtained through
* cfg80211_get_bss() (may be %NULL). But it is recommended to store the
* bss from the connect_request and hold a reference to it and return
* through this param to avoid a warning if the bss is expired during the
* connection, esp. for those drivers implementing connect op.
* Only one parameter among @bssid and @bss needs to be specified.
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
* @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
* %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
* the real status code for failures. If this call is used to report a
* failure due to a timeout (e.g., not receiving an Authentication frame
* from the AP) instead of an explicit rejection by the AP, -1 is used to
* indicate that this is a failure, but without a status code.
* @timeout_reason is used to report the reason for the timeout in that
* case.
* @gfp: allocation flags
* @timeout_reason: reason for connection timeout. This is used when the
* connection fails due to a timeout instead of an explicit rejection from
* the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
* not known. This value is used only if @status < 0 to indicate that the
* failure is due to a timeout and not due to explicit rejection by the AP.
* This value is ignored in other cases (@status >= 0).
*
* It should be called by the underlying driver once execution of the connection
* request from connect() has been completed. This is similar to
* cfg80211_connect_result(), but with the option of identifying the exact bss
* entry for the connection. Only one of the functions among
* cfg80211_connect_bss(), cfg80211_connect_result(),
* cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
struct cfg80211_bss *bss, const u8 *req_ie,
size_t req_ie_len, const u8 *resp_ie,
size_t resp_ie_len, int status, gfp_t gfp,
enum nl80211_timeout_reason timeout_reason)
{
struct cfg80211_connect_resp_params params;
memset(¶ms, 0, sizeof(params));
params.status = status;
params.links[0].bssid = bssid;
params.links[0].bss = bss;
params.req_ie = req_ie;
params.req_ie_len = req_ie_len;
params.resp_ie = resp_ie;
params.resp_ie_len = resp_ie_len;
params.timeout_reason = timeout_reason;
cfg80211_connect_done(dev, ¶ms, gfp);
}
/**
* cfg80211_connect_result - notify cfg80211 of connection result
*
* @dev: network device
* @bssid: the BSSID of the AP
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
* @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
* %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
* the real status code for failures.
* @gfp: allocation flags
*
* It should be called by the underlying driver once execution of the connection
* request from connect() has been completed. This is similar to
* cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
* one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
* cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
u16 status, gfp_t gfp)
{
cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
resp_ie_len, status, gfp,
NL80211_TIMEOUT_UNSPECIFIED);
}
/**
* cfg80211_connect_timeout - notify cfg80211 of connection timeout
*
* @dev: network device
* @bssid: the BSSID of the AP
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @gfp: allocation flags
* @timeout_reason: reason for connection timeout.
*
* It should be called by the underlying driver whenever connect() has failed
* in a sequence where no explicit authentication/association rejection was
* received from the AP. This could happen, e.g., due to not being able to send
* out the Authentication or Association Request frame or timing out while
* waiting for the response. Only one of the functions among
* cfg80211_connect_bss(), cfg80211_connect_result(),
* cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
enum nl80211_timeout_reason timeout_reason)
{
cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
gfp, timeout_reason);
}
/**
* struct cfg80211_roam_info - driver initiated roaming information
*
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
* @fils: FILS related roaming information.
* @valid_links: For MLO roaming, BIT mask of the new valid links is set.
* Otherwise zero.
* @ap_mld_addr: For MLO roaming, MLD address of the new AP. Otherwise %NULL.
* @links : For MLO roaming, contains new link info for the valid links set in
* @valid_links. For non-MLO roaming, links[0] contains the new AP info.
* @links.addr: For MLO roaming, MAC address of the STA link. Otherwise %NULL.
* @links.bssid: For MLO roaming, MAC address of the new AP link. For non-MLO
* roaming, links[0].bssid points to the BSSID of the new AP. May be
* %NULL if %links.bss is set.
* @links.channel: the channel of the new AP.
* @links.bss: For MLO roaming, entry of new bss to which STA link got
* roamed. For non-MLO roaming, links[0].bss points to entry of bss to
* which STA got roamed (may be %NULL if %links.bssid is set)
*/
struct cfg80211_roam_info {
const u8 *req_ie;
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
struct cfg80211_fils_resp_params fils;
const u8 *ap_mld_addr;
u16 valid_links;
struct {
const u8 *addr;
const u8 *bssid;
struct ieee80211_channel *channel;
struct cfg80211_bss *bss;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
* cfg80211_roamed - notify cfg80211 of roaming
*
* @dev: network device
* @info: information about the new BSS. struct &cfg80211_roam_info.
* @gfp: allocation flags
*
* This function may be called with the driver passing either the BSSID of the
* new AP or passing the bss entry to avoid a race in timeout of the bss entry.
* It should be called by the underlying driver whenever it roamed from one AP
* to another while connected. Drivers which have roaming implemented in
* firmware should pass the bss entry to avoid a race in bss entry timeout where
* the bss entry of the new AP is seen in the driver, but gets timed out by the
* time it is accessed in __cfg80211_roamed() due to delay in scheduling
* rdev->event_work. In case of any failures, the reference is released
* either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
* released while disconnecting from the current bss.
*/
void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
gfp_t gfp);
/**
* cfg80211_port_authorized - notify cfg80211 of successful security association
*
* @dev: network device
* @peer_addr: BSSID of the AP/P2P GO in case of STA/GC or STA/GC MAC address
* in case of AP/P2P GO
* @td_bitmap: transition disable policy
* @td_bitmap_len: Length of transition disable policy
* @gfp: allocation flags
*
* This function should be called by a driver that supports 4 way handshake
* offload after a security association was successfully established (i.e.,
* the 4 way handshake was completed successfully). The call to this function
* should be preceded with a call to cfg80211_connect_result(),
* cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to
* indicate the 802.11 association.
* This function can also be called by AP/P2P GO driver that supports
* authentication offload. In this case the peer_mac passed is that of
* associated STA/GC.
*/
void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr,
const u8* td_bitmap, u8 td_bitmap_len, gfp_t gfp);
/**
* cfg80211_disconnected - notify cfg80211 that connection was dropped
*
* @dev: network device
* @ie: information elements of the deauth/disassoc frame (may be %NULL)
* @ie_len: length of IEs
* @reason: reason code for the disconnection, set it to 0 if unknown
* @locally_generated: disconnection was requested locally
* @gfp: allocation flags
*
* After it calls this function, the driver should enter an idle state
* and not try to connect to any AP any more.
*/
void cfg80211_disconnected(struct net_device *dev, u16 reason,
const u8 *ie, size_t ie_len,
bool locally_generated, gfp_t gfp);
/**
* cfg80211_ready_on_channel - notification of remain_on_channel start
* @wdev: wireless device
* @cookie: the request cookie
* @chan: The current channel (from remain_on_channel request)
* @duration: Duration in milliseconds that the driver intents to remain on the
* channel
* @gfp: allocation flags
*/
void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
unsigned int duration, gfp_t gfp);
/**
* cfg80211_remain_on_channel_expired - remain_on_channel duration expired
* @wdev: wireless device
* @cookie: the request cookie
* @chan: The current channel (from remain_on_channel request)
* @gfp: allocation flags
*/
void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
gfp_t gfp);
/**
* cfg80211_tx_mgmt_expired - tx_mgmt duration expired
* @wdev: wireless device
* @cookie: the requested cookie
* @chan: The current channel (from tx_mgmt request)
* @gfp: allocation flags
*/
void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan, gfp_t gfp);
/**
* cfg80211_sinfo_alloc_tid_stats - allocate per-tid statistics.
*
* @sinfo: the station information
* @gfp: allocation flags
*
* Return: 0 on success. Non-zero on error.
*/
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
/**
* cfg80211_link_sinfo_alloc_tid_stats - allocate per-tid statistics.
*
* @link_sinfo: the link station information
* @gfp: allocation flags
*
* Return: 0 on success. Non-zero on error.
*/
int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo,
gfp_t gfp);
/**
* cfg80211_sinfo_release_content - release contents of station info
* @sinfo: the station information
*
* Releases any potentially allocated sub-information of the station
* information, but not the struct itself (since it's typically on
* the stack.)
*/
static inline void cfg80211_sinfo_release_content(struct station_info *sinfo)
{
kfree(sinfo->pertid);
for (int link_id = 0; link_id < ARRAY_SIZE(sinfo->links); link_id++) {
if (sinfo->links[link_id]) {
kfree(sinfo->links[link_id]->pertid);
kfree(sinfo->links[link_id]);
}
}
}
/**
* cfg80211_new_sta - notify userspace about station
*
* @dev: the netdev
* @mac_addr: the station's address
* @sinfo: the station information
* @gfp: allocation flags
*/
void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
/**
* cfg80211_del_sta_sinfo - notify userspace about deletion of a station
* @dev: the netdev
* @mac_addr: the station's address. For MLD station, MLD address is used.
* @sinfo: the station information/statistics
* @gfp: allocation flags
*/
void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
/**
* cfg80211_del_sta - notify userspace about deletion of a station
*
* @dev: the netdev
* @mac_addr: the station's address. For MLD station, MLD address is used.
* @gfp: allocation flags
*/
static inline void cfg80211_del_sta(struct net_device *dev,
const u8 *mac_addr, gfp_t gfp)
{
cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp);
}
/**
* cfg80211_conn_failed - connection request failed notification
*
* @dev: the netdev
* @mac_addr: the station's address
* @reason: the reason for connection failure
* @gfp: allocation flags
*
* Whenever a station tries to connect to an AP and if the station
* could not connect to the AP as the AP has rejected the connection
* for some reasons, this function is called.
*
* The reason for connection failure can be any of the value from
* nl80211_connect_failed_reason enum
*/
void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
enum nl80211_connect_failed_reason reason,
gfp_t gfp);
/**
* struct cfg80211_rx_info - received management frame info
*
* @freq: Frequency on which the frame was received in kHz
* @sig_dbm: signal strength in dBm, or 0 if unknown
* @have_link_id: indicates the frame was received on a link of
* an MLD, i.e. the @link_id field is valid
* @link_id: the ID of the link the frame was received on
* @buf: Management frame (header + body)
* @len: length of the frame data
* @flags: flags, as defined in &enum nl80211_rxmgmt_flags
* @rx_tstamp: Hardware timestamp of frame RX in nanoseconds
* @ack_tstamp: Hardware timestamp of ack TX in nanoseconds
*/
struct cfg80211_rx_info {
int freq;
int sig_dbm;
bool have_link_id;
u8 link_id;
const u8 *buf;
size_t len;
u32 flags;
u64 rx_tstamp;
u64 ack_tstamp;
};
/**
* cfg80211_rx_mgmt_ext - management frame notification with extended info
* @wdev: wireless device receiving the frame
* @info: RX info as defined in struct cfg80211_rx_info
*
* This function is called whenever an Action frame is received for a station
* mode interface, but is not processed in kernel.
*
* Return: %true if a user space application has registered for this frame.
* For action frames, that makes it responsible for rejecting unrecognized
* action frames; %false otherwise, in which case for action frames the
* driver is responsible for rejecting the frame.
*/
bool cfg80211_rx_mgmt_ext(struct wireless_dev *wdev,
struct cfg80211_rx_info *info);
/**
* cfg80211_rx_mgmt_khz - notification of received, unprocessed management frame
* @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in KHz
* @sig_dbm: signal strength in dBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
* @flags: flags, as defined in enum nl80211_rxmgmt_flags
*
* This function is called whenever an Action frame is received for a station
* mode interface, but is not processed in kernel.
*
* Return: %true if a user space application has registered for this frame.
* For action frames, that makes it responsible for rejecting unrecognized
* action frames; %false otherwise, in which case for action frames the
* driver is responsible for rejecting the frame.
*/
static inline bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq,
int sig_dbm, const u8 *buf, size_t len,
u32 flags)
{
struct cfg80211_rx_info info = {
.freq = freq,
.sig_dbm = sig_dbm,
.buf = buf,
.len = len,
.flags = flags
};
return cfg80211_rx_mgmt_ext(wdev, &info);
}
/**
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
* @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in MHz
* @sig_dbm: signal strength in dBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
* @flags: flags, as defined in enum nl80211_rxmgmt_flags
*
* This function is called whenever an Action frame is received for a station
* mode interface, but is not processed in kernel.
*
* Return: %true if a user space application has registered for this frame.
* For action frames, that makes it responsible for rejecting unrecognized
* action frames; %false otherwise, in which case for action frames the
* driver is responsible for rejecting the frame.
*/
static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq,
int sig_dbm, const u8 *buf, size_t len,
u32 flags)
{
struct cfg80211_rx_info info = {
.freq = MHZ_TO_KHZ(freq),
.sig_dbm = sig_dbm,
.buf = buf,
.len = len,
.flags = flags
};
return cfg80211_rx_mgmt_ext(wdev, &info);
}
/**
* struct cfg80211_tx_status - TX status for management frame information
*
* @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
* @tx_tstamp: hardware TX timestamp in nanoseconds
* @ack_tstamp: hardware ack RX timestamp in nanoseconds
* @buf: Management frame (header + body)
* @len: length of the frame data
* @ack: Whether frame was acknowledged
*/
struct cfg80211_tx_status {
u64 cookie;
u64 tx_tstamp;
u64 ack_tstamp;
const u8 *buf;
size_t len;
bool ack;
};
/**
* cfg80211_mgmt_tx_status_ext - TX status notification with extended info
* @wdev: wireless device receiving the frame
* @status: TX status data
* @gfp: context flags
*
* This function is called whenever a management frame was requested to be
* transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
* transmission attempt with extended info.
*/
void cfg80211_mgmt_tx_status_ext(struct wireless_dev *wdev,
struct cfg80211_tx_status *status, gfp_t gfp);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
* @wdev: wireless device receiving the frame
* @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
* @buf: Management frame (header + body)
* @len: length of the frame data
* @ack: Whether frame was acknowledged
* @gfp: context flags
*
* This function is called whenever a management frame was requested to be
* transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
* transmission attempt.
*/
static inline void cfg80211_mgmt_tx_status(struct wireless_dev *wdev,
u64 cookie, const u8 *buf,
size_t len, bool ack, gfp_t gfp)
{
struct cfg80211_tx_status status = {
.cookie = cookie,
.buf = buf,
.len = len,
.ack = ack
};
cfg80211_mgmt_tx_status_ext(wdev, &status, gfp);
}
/**
* cfg80211_control_port_tx_status - notification of TX status for control
* port frames
* @wdev: wireless device receiving the frame
* @cookie: Cookie returned by cfg80211_ops::tx_control_port()
* @buf: Data frame (header + body)
* @len: length of the frame data
* @ack: Whether frame was acknowledged
* @gfp: context flags
*
* This function is called whenever a control port frame was requested to be
* transmitted with cfg80211_ops::tx_control_port() to report the TX status of
* the transmission attempt.
*/
void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack,
gfp_t gfp);
/**
* cfg80211_rx_control_port - notification about a received control port frame
* @dev: The device the frame matched to
* @skb: The skbuf with the control port frame. It is assumed that the skbuf
* is 802.3 formatted (with 802.3 header). The skb can be non-linear.
* This function does not take ownership of the skb, so the caller is
* responsible for any cleanup. The caller must also ensure that
* skb->protocol is set appropriately.
* @unencrypted: Whether the frame was received unencrypted
* @link_id: the link the frame was received on, -1 if not applicable or unknown
*
* This function is used to inform userspace about a received control port
* frame. It should only be used if userspace indicated it wants to receive
* control port frames over nl80211.
*
* The frame is the data portion of the 802.3 or 802.11 data frame with all
* network layer headers removed (e.g. the raw EAPoL frame).
*
* Return: %true if the frame was passed to userspace
*/
bool cfg80211_rx_control_port(struct net_device *dev, struct sk_buff *skb,
bool unencrypted, int link_id);
/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
* @dev: network device
* @rssi_event: the triggered RSSI event
* @rssi_level: new RSSI level value or 0 if not available
* @gfp: context flags
*
* This function is called when a configured connection quality monitoring
* rssi threshold reached event occurs.
*/
void cfg80211_cqm_rssi_notify(struct net_device *dev,
enum nl80211_cqm_rssi_threshold_event rssi_event,
s32 rssi_level, gfp_t gfp);
/**
* cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
* @dev: network device
* @peer: peer's MAC address
* @num_packets: how many packets were lost -- should be a fixed threshold
* but probably no less than maybe 50, or maybe a throughput dependent
* threshold (to account for temporary interference)
* @gfp: context flags
*/
void cfg80211_cqm_pktloss_notify(struct net_device *dev,
const u8 *peer, u32 num_packets, gfp_t gfp);
/**
* cfg80211_cqm_txe_notify - TX error rate event
* @dev: network device
* @peer: peer's MAC address
* @num_packets: how many packets were lost
* @rate: % of packets which failed transmission
* @intvl: interval (in s) over which the TX failure threshold was breached.
* @gfp: context flags
*
* Notify userspace when configured % TX failures over number of packets in a
* given interval is exceeded.
*/
void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
/**
* cfg80211_cqm_beacon_loss_notify - beacon loss event
* @dev: network device
* @gfp: context flags
*
* Notify userspace about beacon loss from the connected AP.
*/
void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp);
/**
* __cfg80211_radar_event - radar detection event
* @wiphy: the wiphy
* @chandef: chandef for the current channel
* @offchan: the radar has been detected on the offchannel chain
* @gfp: context flags
*
* This function is called when a radar is detected on the current chanenl.
*/
void __cfg80211_radar_event(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
bool offchan, gfp_t gfp);
static inline void
cfg80211_radar_event(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
gfp_t gfp)
{
__cfg80211_radar_event(wiphy, chandef, false, gfp);
}
static inline void
cfg80211_background_radar_event(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
gfp_t gfp)
{
__cfg80211_radar_event(wiphy, chandef, true, gfp);
}
/**
* cfg80211_sta_opmode_change_notify - STA's ht/vht operation mode change event
* @dev: network device
* @mac: MAC address of a station which opmode got modified
* @sta_opmode: station's current opmode value
* @gfp: context flags
*
* Driver should call this function when station's opmode modified via action
* frame.
*/
void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
struct sta_opmode_info *sta_opmode,
gfp_t gfp);
/**
* cfg80211_cac_event - Channel availability check (CAC) event
* @netdev: network device
* @chandef: chandef for the current channel
* @event: type of event
* @gfp: context flags
* @link_id: valid link_id for MLO operation or 0 otherwise.
*
* This function is called when a Channel availability check (CAC) is finished
* or aborted. This must be called to notify the completion of a CAC process,
* also by full-MAC drivers.
*/
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
enum nl80211_radar_event event, gfp_t gfp,
unsigned int link_id);
/**
* cfg80211_background_cac_abort - Channel Availability Check offchan abort event
* @wiphy: the wiphy
*
* This function is called by the driver when a Channel Availability Check
* (CAC) is aborted by a offchannel dedicated chain.
*/
void cfg80211_background_cac_abort(struct wiphy *wiphy);
/**
* cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
* @dev: network device
* @bssid: BSSID of AP (to avoid races)
* @replay_ctr: new replay counter
* @gfp: allocation flags
*/
void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
/**
* cfg80211_pmksa_candidate_notify - notify about PMKSA caching candidate
* @dev: network device
* @index: candidate index (the smaller the index, the higher the priority)
* @bssid: BSSID of AP
* @preauth: Whether AP advertises support for RSN pre-authentication
* @gfp: allocation flags
*/
void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
const u8 *bssid, bool preauth, gfp_t gfp);
/**
* cfg80211_rx_spurious_frame - inform userspace about a spurious frame
* @dev: The device the frame matched to
* @link_id: the link the frame was received on, -1 if not applicable or unknown
* @addr: the transmitter address
* @gfp: context flags
*
* This function is used in AP mode (only!) to inform userspace that
* a spurious class 3 frame was received, to be able to deauth the
* sender.
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr,
int link_id, gfp_t gfp);
/**
* cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
* @dev: The device the frame matched to
* @addr: the transmitter address
* @link_id: the link the frame was received on, -1 if not applicable or unknown
* @gfp: context flags
*
* This function is used in AP mode (only!) to inform userspace that
* an associated station sent a 4addr frame but that wasn't expected.
* It is allowed and desirable to send this event only once for each
* station to avoid event flooding.
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr,
int link_id, gfp_t gfp);
/**
* cfg80211_probe_status - notify userspace about probe status
* @dev: the device the probe was sent on
* @addr: the address of the peer
* @cookie: the cookie filled in @probe_client previously
* @acked: indicates whether probe was acked or not
* @ack_signal: signal strength (in dBm) of the ACK frame.
* @is_valid_ack_signal: indicates the ack_signal is valid or not.
* @gfp: allocation flags
*/
void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
u64 cookie, bool acked, s32 ack_signal,
bool is_valid_ack_signal, gfp_t gfp);
/**
* cfg80211_report_obss_beacon_khz - report beacon from other APs
* @wiphy: The wiphy that received the beacon
* @frame: the frame
* @len: length of the frame
* @freq: frequency the frame was received on in KHz
* @sig_dbm: signal strength in dBm, or 0 if unknown
*
* Use this function to report to userspace when a beacon was
* received. It is not useful to call this when there is no
* netdev that is in AP/GO mode.
*/
void cfg80211_report_obss_beacon_khz(struct wiphy *wiphy, const u8 *frame,
size_t len, int freq, int sig_dbm);
/**
* cfg80211_report_obss_beacon - report beacon from other APs
* @wiphy: The wiphy that received the beacon
* @frame: the frame
* @len: length of the frame
* @freq: frequency the frame was received on
* @sig_dbm: signal strength in dBm, or 0 if unknown
*
* Use this function to report to userspace when a beacon was
* received. It is not useful to call this when there is no
* netdev that is in AP/GO mode.
*/
static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
const u8 *frame, size_t len,
int freq, int sig_dbm)
{
cfg80211_report_obss_beacon_khz(wiphy, frame, len, MHZ_TO_KHZ(freq),
sig_dbm);
}
/**
* struct cfg80211_beaconing_check_config - beacon check configuration
* @iftype: the interface type to check for
* @relax: allow IR-relaxation conditions to apply (e.g. another
* interface connected already on the same channel)
* NOTE: If this is set, wiphy mutex must be held.
* @reg_power: &enum ieee80211_ap_reg_power value indicating the
* advertised/used 6 GHz regulatory power setting
*/
struct cfg80211_beaconing_check_config {
enum nl80211_iftype iftype;
enum ieee80211_ap_reg_power reg_power;
bool relax;
};
/**
* cfg80211_reg_check_beaconing - check if beaconing is allowed
* @wiphy: the wiphy
* @chandef: the channel definition
* @cfg: additional parameters for the checking
*
* Return: %true if there is no secondary channel or the secondary channel(s)
* can be used for beaconing (i.e. is not a radar channel etc.)
*/
bool cfg80211_reg_check_beaconing(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
struct cfg80211_beaconing_check_config *cfg);
/**
* cfg80211_reg_can_beacon - check if beaconing is allowed
* @wiphy: the wiphy
* @chandef: the channel definition
* @iftype: interface type
*
* Return: %true if there is no secondary channel or the secondary channel(s)
* can be used for beaconing (i.e. is not a radar channel etc.)
*/
static inline bool
cfg80211_reg_can_beacon(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
enum nl80211_iftype iftype)
{
struct cfg80211_beaconing_check_config config = {
.iftype = iftype,
};
return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
}
/**
* cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
* @wiphy: the wiphy
* @chandef: the channel definition
* @iftype: interface type
*
* Return: %true if there is no secondary channel or the secondary channel(s)
* can be used for beaconing (i.e. is not a radar channel etc.). This version
* also checks if IR-relaxation conditions apply, to allow beaconing under
* more permissive conditions.
*
* Context: Requires the wiphy mutex to be held.
*/
static inline bool
cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
enum nl80211_iftype iftype)
{
struct cfg80211_beaconing_check_config config = {
.iftype = iftype,
.relax = true,
};
return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
}
/**
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
* @dev: the device which switched channels
* @chandef: the new channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
*
* Caller must hold wiphy mutex, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id);
/**
* cfg80211_ch_switch_started_notify - notify channel switch start
* @dev: the device on which the channel switch started
* @chandef: the future channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
* channel switch on other vifs), if necessary.
*/
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
bool quiet);
/**
* ieee80211_operating_class_to_band - convert operating class to band
*
* @operating_class: the operating class to convert
* @band: band pointer to fill
*
* Return: %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_operating_class_to_band(u8 operating_class,
enum nl80211_band *band);
/**
* ieee80211_operating_class_to_chandef - convert operating class to chandef
*
* @operating_class: the operating class to convert
* @chan: the ieee80211_channel to convert
* @chandef: a pointer to the resulting chandef
*
* Return: %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_operating_class_to_chandef(u8 operating_class,
struct ieee80211_channel *chan,
struct cfg80211_chan_def *chandef);
/**
* ieee80211_chandef_to_operating_class - convert chandef to operation class
*
* @chandef: the chandef to convert
* @op_class: a pointer to the resulting operating class
*
* Return: %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class);
/**
* ieee80211_chandef_to_khz - convert chandef to frequency in KHz
*
* @chandef: the chandef to convert
*
* Return: the center frequency of chandef (1st segment) in KHz.
*/
static inline u32
ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef)
{
return MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
}
/**
* cfg80211_tdls_oper_request - request userspace to perform TDLS operation
* @dev: the device on which the operation is requested
* @peer: the MAC address of the peer device
* @oper: the requested TDLS operation (NL80211_TDLS_SETUP or
* NL80211_TDLS_TEARDOWN)
* @reason_code: the reason code for teardown request
* @gfp: allocation flags
*
* This function is used to request userspace to perform TDLS operation that
* requires knowledge of keys, i.e., link setup or teardown when the AP
* connection uses encryption. This is optional mechanism for the driver to use
* if it can automatically determine when a TDLS link could be useful (e.g.,
* based on traffic and signal strength for a peer).
*/
void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
enum nl80211_tdls_operation oper,
u16 reason_code, gfp_t gfp);
/**
* cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
* @rate: given rate_info to calculate bitrate from
*
* Return: calculated bitrate
*/
u32 cfg80211_calculate_bitrate(struct rate_info *rate);
/**
* cfg80211_unregister_wdev - remove the given wdev
* @wdev: struct wireless_dev to remove
*
* This function removes the device so it can no longer be used. It is necessary
* to call this function even when cfg80211 requests the removal of the device
* by calling the del_virtual_intf() callback. The function must also be called
* when the driver wishes to unregister the wdev, e.g. when the hardware device
* is unbound from the driver.
*
* Context: Requires the RTNL and wiphy mutex to be held.
*/
void cfg80211_unregister_wdev(struct wireless_dev *wdev);
/**
* cfg80211_register_netdevice - register the given netdev
* @dev: the netdev to register
*
* Note: In contexts coming from cfg80211 callbacks, you must call this rather
* than register_netdevice(), unregister_netdev() is impossible as the RTNL is
* held. Otherwise, both register_netdevice() and register_netdev() are usable
* instead as well.
*
* Context: Requires the RTNL and wiphy mutex to be held.
*
* Return: 0 on success. Non-zero on error.
*/
int cfg80211_register_netdevice(struct net_device *dev);
/**
* cfg80211_unregister_netdevice - unregister the given netdev
* @dev: the netdev to register
*
* Note: In contexts coming from cfg80211 callbacks, you must call this rather
* than unregister_netdevice(), unregister_netdev() is impossible as the RTNL
* is held. Otherwise, both unregister_netdevice() and unregister_netdev() are
* usable instead as well.
*
* Context: Requires the RTNL and wiphy mutex to be held.
*/
static inline void cfg80211_unregister_netdevice(struct net_device *dev)
{
#if IS_ENABLED(CONFIG_CFG80211)
cfg80211_unregister_wdev(dev->ieee80211_ptr);
#endif
}
/**
* struct cfg80211_ft_event_params - FT Information Elements
* @ies: FT IEs
* @ies_len: length of the FT IE in bytes
* @target_ap: target AP's MAC address
* @ric_ies: RIC IE
* @ric_ies_len: length of the RIC IE in bytes
*/
struct cfg80211_ft_event_params {
const u8 *ies;
size_t ies_len;
const u8 *target_ap;
const u8 *ric_ies;
size_t ric_ies_len;
};
/**
* cfg80211_ft_event - notify userspace about FT IE and RIC IE
* @netdev: network device
* @ft_event: IE information
*/
void cfg80211_ft_event(struct net_device *netdev,
struct cfg80211_ft_event_params *ft_event);
/**
* cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer
* @ies: the input IE buffer
* @len: the input length
* @attr: the attribute ID to find
* @buf: output buffer, can be %NULL if the data isn't needed, e.g.
* if the function is only called to get the needed buffer size
* @bufsize: size of the output buffer
*
* The function finds a given P2P attribute in the (vendor) IEs and
* copies its contents to the given buffer.
*
* Return: A negative error code (-%EILSEQ or -%ENOENT) if the data is
* malformed or the attribute can't be found (respectively), or the
* length of the found attribute (which can be zero).
*/
int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
enum ieee80211_p2p_attr_id attr,
u8 *buf, unsigned int bufsize);
/**
* ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC)
* @ies: the IE buffer
* @ielen: the length of the IE buffer
* @ids: an array with element IDs that are allowed before
* the split. A WLAN_EID_EXTENSION value means that the next
* EID in the list is a sub-element of the EXTENSION IE.
* @n_ids: the size of the element ID array
* @after_ric: array IE types that come after the RIC element
* @n_after_ric: size of the @after_ric array
* @offset: offset where to start splitting in the buffer
*
* This function splits an IE buffer by updating the @offset
* variable to point to the location where the buffer should be
* split.
*
* It assumes that the given IE buffer is well-formed, this
* has to be guaranteed by the caller!
*
* It also assumes that the IEs in the buffer are ordered
* correctly, if not the result of using this function will not
* be ordered correctly either, i.e. it does no reordering.
*
* Return: The offset where the next part of the buffer starts, which
* may be @ielen if the entire (remainder) of the buffer should be
* used.
*/
size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
const u8 *ids, int n_ids,
const u8 *after_ric, int n_after_ric,
size_t offset);
/**
* ieee80211_ie_split - split an IE buffer according to ordering
* @ies: the IE buffer
* @ielen: the length of the IE buffer
* @ids: an array with element IDs that are allowed before
* the split. A WLAN_EID_EXTENSION value means that the next
* EID in the list is a sub-element of the EXTENSION IE.
* @n_ids: the size of the element ID array
* @offset: offset where to start splitting in the buffer
*
* This function splits an IE buffer by updating the @offset
* variable to point to the location where the buffer should be
* split.
*
* It assumes that the given IE buffer is well-formed, this
* has to be guaranteed by the caller!
*
* It also assumes that the IEs in the buffer are ordered
* correctly, if not the result of using this function will not
* be ordered correctly either, i.e. it does no reordering.
*
* Return: The offset where the next part of the buffer starts, which
* may be @ielen if the entire (remainder) of the buffer should be
* used.
*/
static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
const u8 *ids, int n_ids, size_t offset)
{
return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset);
}
/**
* ieee80211_fragment_element - fragment the last element in skb
* @skb: The skbuf that the element was added to
* @len_pos: Pointer to length of the element to fragment
* @frag_id: The element ID to use for fragments
*
* This function fragments all data after @len_pos, adding fragmentation
* elements with the given ID as appropriate. The SKB will grow in size
* accordingly.
*/
void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id);
/**
* cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
* @wdev: the wireless device reporting the wakeup
* @wakeup: the wakeup report
* @gfp: allocation flags
*
* This function reports that the given device woke up. If it
* caused the wakeup, report the reason(s), otherwise you may
* pass %NULL as the @wakeup parameter to advertise that something
* else caused the wakeup.
*/
void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
struct cfg80211_wowlan_wakeup *wakeup,
gfp_t gfp);
/**
* cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver.
*
* @wdev: the wireless device for which critical protocol is stopped.
* @gfp: allocation flags
*
* This function can be called by the driver to indicate it has reverted
* operation back to normal. One reason could be that the duration given
* by .crit_proto_start() has expired.
*/
void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
/**
* ieee80211_get_num_supported_channels - get number of channels device has
* @wiphy: the wiphy
*
* Return: the number of channels supported by the device.
*/
unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
/**
* cfg80211_check_combinations - check interface combinations
*
* @wiphy: the wiphy
* @params: the interface combinations parameter
*
* This function can be called by the driver to check whether a
* combination of interfaces and their types are allowed according to
* the interface combinations.
*
* Return: 0 if combinations are allowed. Non-zero on error.
*/
int cfg80211_check_combinations(struct wiphy *wiphy,
struct iface_combination_params *params);
/**
* cfg80211_iter_combinations - iterate over matching combinations
*
* @wiphy: the wiphy
* @params: the interface combinations parameter
* @iter: function to call for each matching combination
* @data: pointer to pass to iter function
*
* This function can be called by the driver to check what possible
* combinations it fits in at a given moment, e.g. for channel switching
* purposes.
*
* Return: 0 on success. Non-zero on error.
*/
int cfg80211_iter_combinations(struct wiphy *wiphy,
struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
/**
* cfg80211_get_radio_idx_by_chan - get the radio index by the channel
*
* @wiphy: the wiphy
* @chan: channel for which the supported radio index is required
*
* Return: radio index on success or -EINVAL otherwise
*/
int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
const struct ieee80211_channel *chan);
/**
* cfg80211_stop_iface - trigger interface disconnection
*
* @wiphy: the wiphy
* @wdev: wireless device
* @gfp: context flags
*
* Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA
* disconnected.
*
* Note: This doesn't need any locks and is asynchronous.
*/
void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
gfp_t gfp);
/**
* cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy
* @wiphy: the wiphy to shut down
*
* This function shuts down all interfaces belonging to this wiphy by
* calling dev_close() (and treating non-netdev interfaces as needed).
* It shouldn't really be used unless there are some fatal device errors
* that really can't be recovered in any other way.
*
* Callers must hold the RTNL and be able to deal with callbacks into
* the driver while the function is running.
*/
void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
/**
* wiphy_ext_feature_set - set the extended feature flag
*
* @wiphy: the wiphy to modify.
* @ftidx: extended feature bit index.
*
* The extended features are flagged in multiple bytes (see
* &struct wiphy.@ext_features)
*/
static inline void wiphy_ext_feature_set(struct wiphy *wiphy,
enum nl80211_ext_feature_index ftidx)
{
u8 *ft_byte;
ft_byte = &wiphy->ext_features[ftidx / 8];
*ft_byte |= BIT(ftidx % 8);
}
/**
* wiphy_ext_feature_isset - check the extended feature flag
*
* @wiphy: the wiphy to modify.
* @ftidx: extended feature bit index.
*
* The extended features are flagged in multiple bytes (see
* &struct wiphy.@ext_features)
*
* Return: %true if extended feature flag is set, %false otherwise
*/
static inline bool
wiphy_ext_feature_isset(struct wiphy *wiphy,
enum nl80211_ext_feature_index ftidx)
{
u8 ft_byte;
ft_byte = wiphy->ext_features[ftidx / 8];
return (ft_byte & BIT(ftidx % 8)) != 0;
}
/**
* cfg80211_free_nan_func - free NAN function
* @f: NAN function that should be freed
*
* Frees all the NAN function and all it's allocated members.
*/
void cfg80211_free_nan_func(struct cfg80211_nan_func *f);
/**
* struct cfg80211_nan_match_params - NAN match parameters
* @type: the type of the function that triggered a match. If it is
* %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber.
* If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery
* result.
* If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up.
* @inst_id: the local instance id
* @peer_inst_id: the instance id of the peer's function
* @addr: the MAC address of the peer
* @info_len: the length of the &info
* @info: the Service Specific Info from the peer (if any)
* @cookie: unique identifier of the corresponding function
*/
struct cfg80211_nan_match_params {
enum nl80211_nan_function_type type;
u8 inst_id;
u8 peer_inst_id;
const u8 *addr;
u8 info_len;
const u8 *info;
u64 cookie;
};
/**
* cfg80211_nan_match - report a match for a NAN function.
* @wdev: the wireless device reporting the match
* @match: match notification parameters
* @gfp: allocation flags
*
* This function reports that the a NAN function had a match. This
* can be a subscribe that had a match or a solicited publish that
* was sent. It can also be a follow up that was received.
*/
void cfg80211_nan_match(struct wireless_dev *wdev,
struct cfg80211_nan_match_params *match, gfp_t gfp);
/**
* cfg80211_nan_func_terminated - notify about NAN function termination.
*
* @wdev: the wireless device reporting the match
* @inst_id: the local instance id
* @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
* @cookie: unique NAN function identifier
* @gfp: allocation flags
*
* This function reports that the a NAN function is terminated.
*/
void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
u8 inst_id,
enum nl80211_nan_func_term_reason reason,
u64 cookie, gfp_t gfp);
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
/**
* cfg80211_external_auth_request - userspace request for authentication
* @netdev: network device
* @params: External authentication parameters
* @gfp: allocation flags
* Returns: 0 on success, < 0 on error
*/
int cfg80211_external_auth_request(struct net_device *netdev,
struct cfg80211_external_auth_params *params,
gfp_t gfp);
/**
* cfg80211_pmsr_report - report peer measurement result data
* @wdev: the wireless device reporting the measurement
* @req: the original measurement request
* @result: the result data
* @gfp: allocation flags
*/
void cfg80211_pmsr_report(struct wireless_dev *wdev,
struct cfg80211_pmsr_request *req,
struct cfg80211_pmsr_result *result,
gfp_t gfp);
/**
* cfg80211_pmsr_complete - report peer measurement completed
* @wdev: the wireless device reporting the measurement
* @req: the original measurement request
* @gfp: allocation flags
*
* Report that the entire measurement completed, after this
* the request pointer will no longer be valid.
*/
void cfg80211_pmsr_complete(struct wireless_dev *wdev,
struct cfg80211_pmsr_request *req,
gfp_t gfp);
/**
* cfg80211_iftype_allowed - check whether the interface can be allowed
* @wiphy: the wiphy
* @iftype: interface type
* @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
* @check_swif: check iftype against software interfaces
*
* Check whether the interface is allowed to operate; additionally, this API
* can be used to check iftype against the software interfaces when
* check_swif is '1'.
*
* Return: %true if allowed, %false otherwise
*/
bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
bool is_4addr, u8 check_swif);
/**
* cfg80211_assoc_comeback - notification of association that was
* temporarily rejected with a comeback
* @netdev: network device
* @ap_addr: AP (MLD) address that rejected the association
* @timeout: timeout interval value TUs.
*
* this function may sleep. the caller must hold the corresponding wdev's mutex.
*/
void cfg80211_assoc_comeback(struct net_device *netdev,
const u8 *ap_addr, u32 timeout);
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
#define wiphy_printk(level, wiphy, format, args...) \
dev_printk(level, &(wiphy)->dev, format, ##args)
#define wiphy_emerg(wiphy, format, args...) \
dev_emerg(&(wiphy)->dev, format, ##args)
#define wiphy_alert(wiphy, format, args...) \
dev_alert(&(wiphy)->dev, format, ##args)
#define wiphy_crit(wiphy, format, args...) \
dev_crit(&(wiphy)->dev, format, ##args)
#define wiphy_err(wiphy, format, args...) \
dev_err(&(wiphy)->dev, format, ##args)
#define wiphy_warn(wiphy, format, args...) \
dev_warn(&(wiphy)->dev, format, ##args)
#define wiphy_notice(wiphy, format, args...) \
dev_notice(&(wiphy)->dev, format, ##args)
#define wiphy_info(wiphy, format, args...) \
dev_info(&(wiphy)->dev, format, ##args)
#define wiphy_info_once(wiphy, format, args...) \
dev_info_once(&(wiphy)->dev, format, ##args)
#define wiphy_err_ratelimited(wiphy, format, args...) \
dev_err_ratelimited(&(wiphy)->dev, format, ##args)
#define wiphy_warn_ratelimited(wiphy, format, args...) \
dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
#define wiphy_debug(wiphy, format, args...) \
wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
#define wiphy_dbg(wiphy, format, args...) \
dev_dbg(&(wiphy)->dev, format, ##args)
#if defined(VERBOSE_DEBUG)
#define wiphy_vdbg wiphy_dbg
#else
#define wiphy_vdbg(wiphy, format, args...) \
({ \
if (0) \
wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \
0; \
})
#endif
/*
* wiphy_WARN() acts like wiphy_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
* file/line information and a backtrace.
*/
#define wiphy_WARN(wiphy, format, args...) \
WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args);
/**
* cfg80211_update_owe_info_event - Notify the peer's OWE info to user space
* @netdev: network device
* @owe_info: peer's owe info
* @gfp: allocation flags
*/
void cfg80211_update_owe_info_event(struct net_device *netdev,
struct cfg80211_update_owe_info *owe_info,
gfp_t gfp);
/**
* cfg80211_bss_flush - resets all the scan entries
* @wiphy: the wiphy
*/
void cfg80211_bss_flush(struct wiphy *wiphy);
/**
* cfg80211_bss_color_notify - notify about bss color event
* @dev: network device
* @cmd: the actual event we want to notify
* @count: the number of TBTTs until the color change happens
* @color_bitmap: representations of the colors that the local BSS is aware of
* @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Return: 0 on success. Non-zero on error.
*/
int cfg80211_bss_color_notify(struct net_device *dev,
enum nl80211_commands cmd, u8 count,
u64 color_bitmap, u8 link_id);
/**
* cfg80211_obss_color_collision_notify - notify about bss color collision
* @dev: network device
* @color_bitmap: representations of the colors that the local BSS is aware of
* @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Return: 0 on success. Non-zero on error.
*/
static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
u64 color_bitmap,
u8 link_id)
{
return cfg80211_bss_color_notify(dev, NL80211_CMD_OBSS_COLOR_COLLISION,
0, color_bitmap, link_id);
}
/**
* cfg80211_color_change_started_notify - notify color change start
* @dev: the device on which the color is switched
* @count: the number of TBTTs until the color change happens
* @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Inform the userspace about the color change that has started.
*
* Return: 0 on success. Non-zero on error.
*/
static inline int cfg80211_color_change_started_notify(struct net_device *dev,
u8 count, u8 link_id)
{
return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_STARTED,
count, 0, link_id);
}
/**
* cfg80211_color_change_aborted_notify - notify color change abort
* @dev: the device on which the color is switched
* @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Inform the userspace about the color change that has aborted.
*
* Return: 0 on success. Non-zero on error.
*/
static inline int cfg80211_color_change_aborted_notify(struct net_device *dev,
u8 link_id)
{
return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_ABORTED,
0, 0, link_id);
}
/**
* cfg80211_color_change_notify - notify color change completion
* @dev: the device on which the color was switched
* @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Inform the userspace about the color change that has completed.
*
* Return: 0 on success. Non-zero on error.
*/
static inline int cfg80211_color_change_notify(struct net_device *dev,
u8 link_id)
{
return cfg80211_bss_color_notify(dev,
NL80211_CMD_COLOR_CHANGE_COMPLETED,
0, 0, link_id);
}
/**
* cfg80211_links_removed - Notify about removed STA MLD setup links.
* @dev: network device.
* @link_mask: BIT mask of removed STA MLD setup link IDs.
*
* Inform cfg80211 and the userspace about removed STA MLD setup links due to
* AP MLD removing the corresponding affiliated APs with Multi-Link
* reconfiguration. Note that it's not valid to remove all links, in this
* case disconnect instead.
* Also note that the wdev mutex must be held.
*/
void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
/**
* struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data
* @buf: MLO Reconfiguration Response frame (header + body)
* @len: length of the frame data
* @driver_initiated: Indicates whether the add links request is initiated by
* driver. This is set to true when the link reconfiguration request
* initiated by driver due to AP link recommendation requests
* (Ex: BTM (BSS Transition Management) request) handling offloaded to
* driver.
* @added_links: BIT mask of links successfully added to the association
* @links: per-link information indexed by link ID
* @links.bss: the BSS that MLO reconfiguration was requested for, ownership of
* the pointer moves to cfg80211 in the call to
* cfg80211_mlo_reconf_add_done().
*
* The BSS pointer must be set for each link for which 'add' operation was
* requested in the assoc_ml_reconf callback.
*/
struct cfg80211_mlo_reconf_done_data {
const u8 *buf;
size_t len;
bool driver_initiated;
u16 added_links;
struct {
struct cfg80211_bss *bss;
u8 *addr;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
* cfg80211_mlo_reconf_add_done - Notify about MLO reconfiguration result
* @dev: network device.
* @data: MLO reconfiguration done data, &struct cfg80211_mlo_reconf_done_data
*
* Inform cfg80211 and the userspace that processing of ML reconfiguration
* request to add links to the association is done.
*/
void cfg80211_mlo_reconf_add_done(struct net_device *dev,
struct cfg80211_mlo_reconf_done_data *data);
/**
* cfg80211_schedule_channels_check - schedule regulatory check if needed
* @wdev: the wireless device to check
*
* In case the device supports NO_IR or DFS relaxations, schedule regulatory
* channels check, as previous concurrent operation conditions may not
* hold anymore.
*/
void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
/**
* cfg80211_epcs_changed - Notify about a change in EPCS state
* @netdev: the wireless device whose EPCS state changed
* @enabled: set to true if EPCS was enabled, otherwise set to false.
*/
void cfg80211_epcs_changed(struct net_device *netdev, bool enabled);
/**
* cfg80211_next_nan_dw_notif - Notify about the next NAN Discovery Window (DW)
* @wdev: Pointer to the wireless device structure
* @chan: DW channel (6, 44 or 149)
* @gfp: Memory allocation flags
*/
void cfg80211_next_nan_dw_notif(struct wireless_dev *wdev,
struct ieee80211_channel *chan, gfp_t gfp);
/**
* cfg80211_nan_cluster_joined - Notify about NAN cluster join
* @wdev: Pointer to the wireless device structure
* @cluster_id: Cluster ID of the NAN cluster that was joined or started
* @new_cluster: Indicates if this is a new cluster or an existing one
* @gfp: Memory allocation flags
*
* This function is used to notify user space when a NAN cluster has been
* joined, providing the cluster ID and a flag whether it is a new cluster.
*/
void cfg80211_nan_cluster_joined(struct wireless_dev *wdev,
const u8 *cluster_id, bool new_cluster,
gfp_t gfp);
#ifdef CONFIG_CFG80211_DEBUGFS
/**
* wiphy_locked_debugfs_read - do a locked read in debugfs
* @wiphy: the wiphy to use
* @file: the file being read
* @buf: the buffer to fill and then read from
* @bufsize: size of the buffer
* @userbuf: the user buffer to copy to
* @count: read count
* @ppos: read position
* @handler: the read handler to call (under wiphy lock)
* @data: additional data to pass to the read handler
*
* Return: the number of characters read, or a negative errno
*/
ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
char *buf, size_t bufsize,
char __user *userbuf, size_t count,
loff_t *ppos,
ssize_t (*handler)(struct wiphy *wiphy,
struct file *file,
char *buf,
size_t bufsize,
void *data),
void *data);
/**
* wiphy_locked_debugfs_write - do a locked write in debugfs
* @wiphy: the wiphy to use
* @file: the file being written to
* @buf: the buffer to copy the user data to
* @bufsize: size of the buffer
* @userbuf: the user buffer to copy from
* @count: read count
* @handler: the write handler to call (under wiphy lock)
* @data: additional data to pass to the write handler
*
* Return: the number of characters written, or a negative errno
*/
ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
char *buf, size_t bufsize,
const char __user *userbuf, size_t count,
ssize_t (*handler)(struct wiphy *wiphy,
struct file *file,
char *buf,
size_t count,
void *data),
void *data);
#endif
/**
* cfg80211_s1g_get_start_freq_khz - get S1G chandef start frequency
* @chandef: the chandef to use
*
* Return: the chandefs starting frequency in KHz
*/
static inline u32
cfg80211_s1g_get_start_freq_khz(const struct cfg80211_chan_def *chandef)
{
u32 bw_mhz = cfg80211_chandef_get_width(chandef);
u32 center_khz =
MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
return center_khz - bw_mhz * 500 + 500;
}
/**
* cfg80211_s1g_get_end_freq_khz - get S1G chandef end frequency
* @chandef: the chandef to use
*
* Return: the chandefs ending frequency in KHz
*/
static inline u32
cfg80211_s1g_get_end_freq_khz(const struct cfg80211_chan_def *chandef)
{
u32 bw_mhz = cfg80211_chandef_get_width(chandef);
u32 center_khz =
MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
return center_khz + bw_mhz * 500 - 500;
}
/**
* cfg80211_s1g_get_primary_sibling - retrieve the sibling 1MHz subchannel
* for an S1G chandef using a 2MHz primary channel.
* @wiphy: wiphy the channel belongs to
* @chandef: the chandef to use
*
* When chandef::s1g_primary_2mhz is set to true, we are operating on a 2MHz
* primary channel. The 1MHz subchannel designated by the primary channel
* location exists within chandef::chan, whilst the 'sibling' is denoted as
* being the other 1MHz subchannel that make up the 2MHz primary channel.
*
* Returns: the sibling 1MHz &struct ieee80211_channel, or %NULL on failure.
*/
static inline struct ieee80211_channel *
cfg80211_s1g_get_primary_sibling(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef)
{
int width_mhz = cfg80211_chandef_get_width(chandef);
u32 pri_1mhz_khz, sibling_1mhz_khz, op_low_1mhz_khz, pri_index;
if (!chandef->s1g_primary_2mhz || width_mhz < 2)
return NULL;
pri_1mhz_khz = ieee80211_channel_to_khz(chandef->chan);
op_low_1mhz_khz = cfg80211_s1g_get_start_freq_khz(chandef);
/*
* Compute the index of the primary 1 MHz subchannel within the
* operating channel, relative to the lowest 1 MHz center frequency.
* Flip the least significant bit to select the even/odd sibling,
* then translate that index back into a channel frequency.
*/
pri_index = (pri_1mhz_khz - op_low_1mhz_khz) / 1000;
sibling_1mhz_khz = op_low_1mhz_khz + ((pri_index ^ 1) * 1000);
return ieee80211_get_channel_khz(wiphy, sibling_1mhz_khz);
}
#endif /* __NET_CFG80211_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NET_DST_CACHE_H
#define _NET_DST_CACHE_H
#include <linux/jiffies.h>
#include <net/dst.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_fib.h>
#endif
struct dst_cache {
struct dst_cache_pcpu __percpu *cache;
unsigned long reset_ts;
};
/**
* dst_cache_get - perform cache lookup
* @dst_cache: the cache
*
* The caller should use dst_cache_get_ip4() if it need to retrieve the
* source address to be used when xmitting to the cached dst.
* local BH must be disabled.
*/
struct dst_entry *dst_cache_get(struct dst_cache *dst_cache);
/**
* dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address
* @dst_cache: the cache
* @saddr: return value for the retrieved source address
*
* local BH must be disabled.
*/
struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr);
/**
* dst_cache_set_ip4 - store the ipv4 dst into the cache
* @dst_cache: the cache
* @dst: the entry to be cached
* @saddr: the source address to be stored inside the cache
*
* local BH must be disabled.
*/
void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
__be32 saddr);
#if IS_ENABLED(CONFIG_IPV6)
/**
* dst_cache_set_ip6 - store the ipv6 dst into the cache
* @dst_cache: the cache
* @dst: the entry to be cached
* @saddr: the source address to be stored inside the cache
*
* local BH must be disabled.
*/
void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
const struct in6_addr *saddr);
/**
* dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address
* @dst_cache: the cache
* @saddr: return value for the retrieved source address
*
* local BH must be disabled.
*/
struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
struct in6_addr *saddr);
#endif
/**
* dst_cache_reset - invalidate the cache contents
* @dst_cache: the cache
*
* This does not free the cached dst to avoid races and contentions.
* the dst will be freed on later cache lookup.
*/
static inline void dst_cache_reset(struct dst_cache *dst_cache)
{
WRITE_ONCE(dst_cache->reset_ts, jiffies);
}
/**
* dst_cache_reset_now - invalidate the cache contents immediately
* @dst_cache: the cache
*
* The caller must be sure there are no concurrent users, as this frees
* all dst_cache users immediately, rather than waiting for the next
* per-cpu usage like dst_cache_reset does. Most callers should use the
* higher speed lazily-freed dst_cache_reset function instead.
*/
void dst_cache_reset_now(struct dst_cache *dst_cache);
/**
* dst_cache_init - initialize the cache, allocating the required storage
* @dst_cache: the cache
* @gfp: allocation flags
*/
int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
/**
* dst_cache_destroy - empty the cache and free the allocated storage
* @dst_cache: the cache
*
* No synchronization is enforced: it must be called only when the cache
* is unused.
*/
void dst_cache_destroy(struct dst_cache *dst_cache);
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* USB-ACPI glue code
*
* Copyright 2012 Red Hat <mjg@redhat.com>
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/usb/hcd.h>
#include "hub.h"
/**
* usb_acpi_power_manageable - check whether usb port has
* acpi power resource.
* @hdev: USB device belonging to the usb hub
* @index: port index based zero
*
* Return true if the port has acpi power resource and false if no.
*/
bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
{
acpi_handle port_handle;
int port1 = index + 1;
port_handle = usb_get_hub_port_acpi_handle(hdev,
port1);
if (port_handle)
return acpi_bus_power_manageable(port_handle);
else
return false;
}
EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5
/**
* usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
* @hdev: USB device belonging to the usb hub
* @index: zero based port index
*
* Some USB3 ports may not support USB3 link power management U1/U2 states
* due to different retimer setup. ACPI provides _DSM method which returns 0x01
* if U1 and U2 states should be disabled. Evaluate _DSM with:
* Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
* Arg1: Revision ID = 0
* Arg2: Function Index = 5
* Arg3: (empty)
*
* Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
*/
int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
{
union acpi_object *obj;
acpi_handle port_handle;
int port1 = index + 1;
guid_t guid;
int ret;
ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
if (ret)
return ret;
port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
if (!port_handle) {
dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
return -ENODEV;
}
if (!acpi_check_dsm(port_handle, &guid, 0,
BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
return -ENODEV;
}
obj = acpi_evaluate_dsm_typed(port_handle, &guid, 0,
USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
return -EINVAL;
}
if (obj->integer.value == 0x01)
ret = 1;
ACPI_FREE(obj);
return ret;
}
EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
/**
* usb_acpi_set_power_state - control usb port's power via acpi power
* resource
* @hdev: USB device belonging to the usb hub
* @index: port index based zero
* @enable: power state expected to be set
*
* Notice to use usb_acpi_power_manageable() to check whether the usb port
* has acpi power resource before invoking this function.
*
* Returns 0 on success, else negative errno.
*/
int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
struct usb_port *port_dev;
acpi_handle port_handle;
unsigned char state;
int port1 = index + 1;
int error = -EINVAL;
if (!hub)
return -ENODEV;
port_dev = hub->ports[port1 - 1];
port_handle = (acpi_handle) usb_get_hub_port_acpi_handle(hdev, port1);
if (!port_handle)
return error;
if (enable)
state = ACPI_STATE_D0;
else
state = ACPI_STATE_D3_COLD;
error = acpi_bus_set_power(port_handle, state);
if (!error)
dev_dbg(&port_dev->dev, "acpi: power was set to %d\n", enable);
else
dev_dbg(&port_dev->dev, "acpi: power failed to be set\n");
return error;
}
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
/**
* usb_acpi_add_usb4_devlink - add device link to USB4 Host Interface for tunneled USB3 devices
*
* @udev: Tunneled USB3 device connected to a roothub.
*
* Adds a device link between a tunneled USB3 device and the USB4 Host Interface
* device to ensure correct runtime PM suspend and resume order. This function
* should only be called for tunneled USB3 devices.
* The USB4 Host Interface this tunneled device depends on is found from the roothub
* port ACPI device specific data _DSD entry.
*
* Return: negative error code on failure, 0 otherwise
*/
static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
{
struct device_link *link;
struct usb_port *port_dev;
struct usb_hub *hub;
if (!udev->parent || udev->parent->parent)
return 0;
hub = usb_hub_to_struct_hub(udev->parent);
if (!hub)
return 0;
port_dev = hub->ports[udev->portnum - 1];
struct fwnode_handle *nhi_fwnode __free(fwnode_handle) =
fwnode_find_reference(dev_fwnode(&port_dev->dev), "usb4-host-interface", 0);
if (IS_ERR(nhi_fwnode) || !nhi_fwnode->dev)
return 0;
link = device_link_add(&port_dev->child->dev, nhi_fwnode->dev,
DL_FLAG_STATELESS |
DL_FLAG_RPM_ACTIVE |
DL_FLAG_PM_RUNTIME);
if (!link) {
dev_err(&port_dev->dev, "Failed to created device link from %s to %s\n",
dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev));
return -EINVAL;
}
dev_dbg(&port_dev->dev, "Created device link from %s to %s\n",
dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev));
udev->usb4_link = link;
return 0;
}
/*
* Private to usb-acpi, all the core needs to know is that
* port_dev->location is non-zero when it has been set by the firmware.
*/
#define USB_ACPI_LOCATION_VALID (1 << 31)
static void
usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
{
enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *upc = NULL;
struct acpi_pld_info *pld = NULL;
acpi_status status;
/*
* According to 9.14 in ACPI Spec 6.2. _PLD indicates whether usb port
* is user visible and _UPC indicates whether it is connectable. If
* the port was visible and connectable, it could be freely connected
* and disconnected with USB devices. If no visible and connectable,
* a usb device is directly hard-wired to the port. If no visible and
* no connectable, the port would be not used.
*/
if (acpi_get_physical_device_location(handle, &pld) && pld)
port_dev->location = USB_ACPI_LOCATION_VALID |
pld->group_token << 8 | pld->group_position;
status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer);
if (ACPI_FAILURE(status))
goto out;
upc = buffer.pointer;
if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4)
goto out;
/* UPC states port is connectable */
if (upc->package.elements[0].integer.value)
if (!pld)
; /* keep connect_type as unknown */
else if (pld->user_visible)
connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
else
connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
else
connect_type = USB_PORT_NOT_USED;
out:
port_dev->connect_type = connect_type;
kfree(upc);
ACPI_FREE(pld);
}
static struct acpi_device *
usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
struct usb_device *udev;
struct acpi_device *adev;
acpi_handle *parent_handle;
int port1;
/* Get the struct usb_device point of port's hub */
udev = to_usb_device(port_dev->dev.parent->parent);
/*
* The root hub ports' parent is the root hub. The non-root-hub
* ports' parent is the parent hub port which the hub is
* connected to.
*/
if (!udev->parent) {
adev = ACPI_COMPANION(&udev->dev);
port1 = usb_hcd_find_raw_port_number(bus_to_hcd(udev->bus),
port_dev->portnum);
} else {
parent_handle = usb_get_hub_port_acpi_handle(udev->parent,
udev->portnum);
if (!parent_handle)
return NULL;
adev = acpi_fetch_acpi_dev(parent_handle);
port1 = port_dev->portnum;
}
return acpi_find_child_by_adr(adev, port1);
}
static struct acpi_device *
usb_acpi_find_companion_for_port(struct usb_port *port_dev)
{
struct acpi_device *adev;
adev = usb_acpi_get_companion_for_port(port_dev);
if (!adev)
return NULL;
usb_acpi_get_connect_type(port_dev, adev->handle);
return adev;
}
static struct acpi_device *
usb_acpi_find_companion_for_device(struct usb_device *udev)
{
struct acpi_device *adev;
struct usb_port *port_dev;
struct usb_hub *hub;
if (!udev->parent) {
/*
* root hub is only child (_ADR=0) under its parent, the HC.
* sysdev pointer is the HC as seen from firmware.
*/
adev = ACPI_COMPANION(udev->bus->sysdev);
return acpi_find_child_device(adev, 0, false);
}
hub = usb_hub_to_struct_hub(udev->parent);
if (!hub)
return NULL;
/* Tunneled USB3 devices depend on USB4 Host Interface, set device link to it */
if (udev->speed >= USB_SPEED_SUPER &&
udev->tunnel_mode != USB_LINK_NATIVE)
usb_acpi_add_usb4_devlink(udev);
/*
* This is an embedded USB device connected to a port and such
* devices share port's ACPI companion.
*/
port_dev = hub->ports[udev->portnum - 1];
return usb_acpi_get_companion_for_port(port_dev);
}
static struct acpi_device *usb_acpi_find_companion(struct device *dev)
{
/*
* The USB hierarchy like following:
*
* Device (EHC1)
* Device (HUBN)
* Device (PR01)
* Device (PR11)
* Device (PR12)
* Device (FN12)
* Device (FN13)
* Device (PR13)
* ...
* where HUBN is root hub, and PRNN are USB ports and devices
* connected to them, and FNNN are individualk functions for
* connected composite USB devices. PRNN and FNNN may contain
* _CRS and other methods describing sideband resources for
* the connected device.
*
* On the kernel side both root hub and embedded USB devices are
* represented as instances of usb_device structure, and ports
* are represented as usb_port structures, so the whole process
* is split into 2 parts: finding companions for devices and
* finding companions for ports.
*
* Note that we do not handle individual functions of composite
* devices yet, for that we would need to assign companions to
* devices corresponding to USB interfaces.
*/
if (is_usb_device(dev))
return usb_acpi_find_companion_for_device(to_usb_device(dev));
else if (is_usb_port(dev))
return usb_acpi_find_companion_for_port(to_usb_port(dev));
return NULL;
}
static bool usb_acpi_bus_match(struct device *dev)
{
return is_usb_device(dev) || is_usb_port(dev);}
static struct acpi_bus_type usb_acpi_bus = {
.name = "USB",
.match = usb_acpi_bus_match,
.find_companion = usb_acpi_find_companion,
};
int usb_acpi_register(void)
{
return register_acpi_bus_type(&usb_acpi_bus);
}
void usb_acpi_unregister(void)
{
unregister_acpi_bus_type(&usb_acpi_bus);
}
/*
* Copyright (c) 2017 Mellanox Technologies Inc. All rights reserved.
* Copyright (c) 2010 Voltaire Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
#include <linux/export.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
#include <linux/module.h>
#include "core_priv.h"
static struct {
const struct rdma_nl_cbs *cb_table;
/* Synchronizes between ongoing netlink commands and netlink client
* unregistration.
*/
struct rw_semaphore sem;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
bool rdma_nl_chk_listeners(unsigned int group)
{
struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
return netlink_has_listeners(rnet->nl_sock, group);
}
EXPORT_SYMBOL(rdma_nl_chk_listeners);
static bool is_nl_msg_valid(unsigned int type, unsigned int op)
{
static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = {
[RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS,
[RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS,
[RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS,
};
/*
* This BUILD_BUG_ON is intended to catch addition of new
* RDMA netlink protocol without updating the array above.
*/
BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6);
if (type >= RDMA_NL_NUM_CLIENTS)
return false;
return op < max_num_ops[type];
}
static const struct rdma_nl_cbs *
get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
{
const struct rdma_nl_cbs *cb_table;
/*
* Currently only NLDEV client is supporting netlink commands in
* non init_net net namespace.
*/
if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
return NULL;
cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
if (!cb_table) {
/*
* Didn't get valid reference of the table, attempt module
* load once.
*/
up_read(&rdma_nl_types[type].sem);
request_module("rdma-netlink-subsys-%u", type);
down_read(&rdma_nl_types[type].sem);
cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
}
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
return NULL;
return cb_table;
}
void rdma_nl_register(unsigned int index,
const struct rdma_nl_cbs cb_table[])
{
if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
return;
/* Pairs with the READ_ONCE in is_nl_valid() */
smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
}
EXPORT_SYMBOL(rdma_nl_register);
void rdma_nl_unregister(unsigned int index)
{
down_write(&rdma_nl_types[index].sem);
rdma_nl_types[index].cb_table = NULL;
up_write(&rdma_nl_types[index].sem);
}
EXPORT_SYMBOL(rdma_nl_unregister);
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int len, int client, int op, int flags)
{
*nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
if (!*nlh)
return NULL;
return nlmsg_data(*nlh);
}
EXPORT_SYMBOL(ibnl_put_msg);
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type)
{
if (nla_put(skb, type, len, data)) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
return 0;
}
EXPORT_SYMBOL(ibnl_put_attr);
static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
int type = nlh->nlmsg_type;
unsigned int index = RDMA_NL_GET_CLIENT(type);
unsigned int op = RDMA_NL_GET_OP(type);
const struct rdma_nl_cbs *cb_table;
int err = -EINVAL;
if (!is_nl_msg_valid(index, op))
return -EINVAL;
down_read(&rdma_nl_types[index].sem);
cb_table = get_cb_table(skb, index, op);
if (!cb_table)
goto done;
if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
!netlink_capable(skb, CAP_NET_ADMIN)) {
err = -EPERM;
goto done;
}
/*
* LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
* mistakenly call the .dump() function.
*/
if (index == RDMA_NL_LS) {
if (cb_table[op].doit)
err = cb_table[op].doit(skb, nlh, extack);
goto done;
}
/* FIXME: Convert IWCM to properly handle doit callbacks */
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
struct netlink_dump_control c = {
.dump = cb_table[op].dump,
};
if (c.dump)
err = netlink_dump_start(skb->sk, skb, nlh, &c);
goto done;
}
if (cb_table[op].doit)
err = cb_table[op].doit(skb, nlh, extack);
done:
up_read(&rdma_nl_types[index].sem);
return err;
}
/*
* This function is similar to netlink_rcv_skb with one exception:
* It calls to the callback for the netlink messages without NLM_F_REQUEST
* flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed
* for that consumer only.
*/
static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
struct nlmsghdr *,
struct netlink_ext_ack *))
{
struct netlink_ext_ack extack = {};
struct nlmsghdr *nlh;
int err;
while (skb->len >= nlmsg_total_size(0)) {
int msglen;
nlh = nlmsg_hdr(skb);
err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
return 0;
/*
* Generally speaking, the only requests are handled
* by the kernel, but RDMA_NL_LS is different, because it
* runs backward netlink scheme. Kernel initiates messages
* and waits for reply with data to keep pathrecord cache
* in sync.
*/
if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
(RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
goto ack;
/* Skip control messages */
if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
goto ack;
err = cb(skb, nlh, &extack);
if (err == -EINTR)
goto skip;
ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err)
netlink_ack(skb, nlh, err, &extack);
skip:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
return 0;
}
static void rdma_nl_rcv(struct sk_buff *skb)
{
rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
}
int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
{
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
int err;
err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
return (err < 0) ? err : 0;
}
EXPORT_SYMBOL(rdma_nl_unicast);
int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
{
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
int err;
err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
return (err < 0) ? err : 0;
}
EXPORT_SYMBOL(rdma_nl_unicast_wait);
int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
unsigned int group, gfp_t flags)
{
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
}
EXPORT_SYMBOL(rdma_nl_multicast);
void rdma_nl_init(void)
{
int idx;
for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
init_rwsem(&rdma_nl_types[idx].sem);
}
void rdma_nl_exit(void)
{
int idx;
for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
WARN(rdma_nl_types[idx].cb_table,
"Netlink client %d wasn't released prior to unloading %s\n",
idx, KBUILD_MODNAME);
}
int rdma_nl_net_init(struct rdma_dev_net *rnet)
{
struct net *net = read_pnet(&rnet->net);
struct netlink_kernel_cfg cfg = {
.input = rdma_nl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
};
struct sock *nls;
nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
if (!nls)
return -ENOMEM;
nls->sk_sndtimeo = 10 * HZ;
rnet->nl_sock = nls;
return 0;}
void rdma_nl_net_exit(struct rdma_dev_net *rnet)
{
netlink_kernel_release(rnet->nl_sock);
}
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PTRACE_H
#define _LINUX_PTRACE_H
#include <linux/compiler.h> /* For unlikely. */
#include <linux/sched.h> /* For struct task_struct. */
#include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */
#include <linux/err.h> /* for IS_ERR_VALUE */
#include <linux/bug.h> /* For BUG_ON. */
#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
#include <uapi/linux/ptrace.h>
#include <linux/seccomp.h>
/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
struct syscall_info {
__u64 sp;
struct seccomp_data data;
};
extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
/*
* Ptrace flags
*
* The owner ship rules for task->ptrace which holds the ptrace
* flags is simple. When a task is running it owns it's task->ptrace
* flags. When the a task is stopped the ptracer owns task->ptrace.
*/
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
#define PT_OPT_FLAG_SHIFT 3
/* PT_TRACE_* event enable flags */
#define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event)))
#define PT_TRACESYSGOOD PT_EVENT_FLAG(0)
#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern void ptrace_disable(struct task_struct *);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_notify(int exit_code, unsigned long message);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent,
const struct cred *ptracer_cred);
extern void __ptrace_unlink(struct task_struct *child);
extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
#define PTRACE_MODE_FSCREDS 0x08
#define PTRACE_MODE_REALCREDS 0x10
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
/**
* ptrace_may_access - check whether the caller is permitted to access
* a target task.
* @task: target task
* @mode: selects type of access and caller credentials
*
* Returns true on success, false on denial.
*
* One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
* be set in @mode to specify whether the access was requested through
* a filesystem syscall (should use effective capabilities and fsuid
* of the caller) or through an explicit syscall such as
* process_vm_writev or ptrace (and should use the real credentials).
*/
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
}
static inline void ptrace_unlink(struct task_struct *child)
{
if (unlikely(child->ptrace))
__ptrace_unlink(child);
}
int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
unsigned long data);
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
unsigned long data);
/**
* ptrace_parent - return the task that is tracing the given task
* @task: task to consider
*
* Returns %NULL if no one is tracing @task, or the &struct task_struct
* pointer to its tracer.
*
* Must called under rcu_read_lock(). The pointer returned might be kept
* live only by RCU. During exec, this may be called with task_lock() held
* on @task, still held from when check_unsafe_exec() was called.
*/
static inline struct task_struct *ptrace_parent(struct task_struct *task)
{
if (unlikely(task->ptrace))
return rcu_dereference(task->parent);
return NULL;
}
/**
* ptrace_event_enabled - test whether a ptrace event is enabled
* @task: ptracee of interest
* @event: %PTRACE_EVENT_* to test
*
* Test whether @event is enabled for ptracee @task.
*
* Returns %true if @event is enabled, %false otherwise.
*/
static inline bool ptrace_event_enabled(struct task_struct *task, int event)
{
return task->ptrace & PT_EVENT_FLAG(event);
}
/**
* ptrace_event - possibly stop for a ptrace event notification
* @event: %PTRACE_EVENT_* value to report
* @message: value for %PTRACE_GETEVENTMSG to return
*
* Check whether @event is enabled and, if so, report @event and @message
* to the ptrace parent.
*
* Called without locks.
*/
static inline void ptrace_event(int event, unsigned long message)
{
if (unlikely(ptrace_event_enabled(current, event))) { ptrace_notify((event << 8) | SIGTRAP, message); } else if (event == PTRACE_EVENT_EXEC) {
/* legacy EXEC report via SIGTRAP */
if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
send_sig(SIGTRAP, current, 0);
}
}
/**
* ptrace_event_pid - possibly stop for a ptrace event notification
* @event: %PTRACE_EVENT_* value to report
* @pid: process identifier for %PTRACE_GETEVENTMSG to return
*
* Check whether @event is enabled and, if so, report @event and @pid
* to the ptrace parent. @pid is reported as the pid_t seen from the
* ptrace parent's pid namespace.
*
* Called without locks.
*/
static inline void ptrace_event_pid(int event, struct pid *pid)
{
/*
* FIXME: There's a potential race if a ptracer in a different pid
* namespace than parent attaches between computing message below and
* when we acquire tasklist_lock in ptrace_stop(). If this happens,
* the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
*/
unsigned long message = 0;
struct pid_namespace *ns;
rcu_read_lock();
ns = task_active_pid_ns(rcu_dereference(current->parent));
if (ns)
message = pid_nr_ns(pid, ns);
rcu_read_unlock();
ptrace_event(event, message);
}
/**
* ptrace_init_task - initialize ptrace state for a new child
* @child: new child task
* @ptrace: true if child should be ptrace'd by parent's tracer
*
* This is called immediately after adding @child to its parent's children
* list. @ptrace is false in the normal case, and true to ptrace @child.
*
* Called with current's siglock and write_lock_irq(&tasklist_lock) held.
*/
static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
{
INIT_LIST_HEAD(&child->ptrace_entry);
INIT_LIST_HEAD(&child->ptraced);
child->jobctl = 0;
child->ptrace = 0;
child->parent = child->real_parent;
if (unlikely(ptrace) && current->ptrace) {
child->ptrace = current->ptrace;
__ptrace_link(child, current->parent, current->ptracer_cred);
if (child->ptrace & PT_SEIZED)
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
else
sigaddset(&child->pending.signal, SIGSTOP);
}
else
child->ptracer_cred = NULL;
}
/**
* ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
* @task: task in %EXIT_DEAD state
*
* Called with write_lock(&tasklist_lock) held.
*/
static inline void ptrace_release_task(struct task_struct *task)
{
BUG_ON(!list_empty(&task->ptraced));
ptrace_unlink(task);
BUG_ON(!list_empty(&task->ptrace_entry));
}
#ifndef force_successful_syscall_return
/*
* System call handlers that, upon successful completion, need to return a
* negative value should call force_successful_syscall_return() right before
* returning. On architectures where the syscall convention provides for a
* separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
* others), this macro can be used to ensure that the error flag will not get
* set. On architectures which do not support a separate error flag, the macro
* is a no-op and the spurious error condition needs to be filtered out by some
* other means (e.g., in user-level, by passing an extra argument to the
* syscall handler, or something along those lines).
*/
#define force_successful_syscall_return() do { } while (0)
#endif
#ifndef is_syscall_success
/*
* On most systems we can tell if a syscall is a success based on if the retval
* is an error value. On some systems like ia64 and powerpc they have different
* indicators of success/failure and must define their own.
*/
#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
#endif
/*
* <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
*
* These do-nothing inlines are used when the arch does not
* implement single-step. The kerneldoc comments are here
* to document the interface for all arch definitions.
*/
#ifndef arch_has_single_step
/**
* arch_has_single_step - does this CPU support user-mode single-step?
*
* If this is defined, then there must be function declarations or
* inlines for user_enable_single_step() and user_disable_single_step().
* arch_has_single_step() should evaluate to nonzero iff the machine
* supports instruction single-step for user mode.
* It can be a constant or it can test a CPU feature bit.
*/
#define arch_has_single_step() (0)
/**
* user_enable_single_step - single-step in user-mode task
* @task: either current or a task stopped in %TASK_TRACED
*
* This can only be called when arch_has_single_step() has returned nonzero.
* Set @task so that when it returns to user mode, it will trap after the
* next single instruction executes. If arch_has_block_step() is defined,
* this must clear the effects of user_enable_block_step() too.
*/
static inline void user_enable_single_step(struct task_struct *task)
{
BUG(); /* This can never be called. */
}
/**
* user_disable_single_step - cancel user-mode single-step
* @task: either current or a task stopped in %TASK_TRACED
*
* Clear @task of the effects of user_enable_single_step() and
* user_enable_block_step(). This can be called whether or not either
* of those was ever called on @task, and even if arch_has_single_step()
* returned zero.
*/
static inline void user_disable_single_step(struct task_struct *task)
{
}
#else
extern void user_enable_single_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#endif /* arch_has_single_step */
#ifndef arch_has_block_step
/**
* arch_has_block_step - does this CPU support user-mode block-step?
*
* If this is defined, then there must be a function declaration or inline
* for user_enable_block_step(), and arch_has_single_step() must be defined
* too. arch_has_block_step() should evaluate to nonzero iff the machine
* supports step-until-branch for user mode. It can be a constant or it
* can test a CPU feature bit.
*/
#define arch_has_block_step() (0)
/**
* user_enable_block_step - step until branch in user-mode task
* @task: either current or a task stopped in %TASK_TRACED
*
* This can only be called when arch_has_block_step() has returned nonzero,
* and will never be called when single-instruction stepping is being used.
* Set @task so that when it returns to user mode, it will trap after the
* next branch or trap taken.
*/
static inline void user_enable_block_step(struct task_struct *task)
{
BUG(); /* This can never be called. */
}
#else
extern void user_enable_block_step(struct task_struct *);
#endif /* arch_has_block_step */
#ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
extern void user_single_step_report(struct pt_regs *regs);
#else
static inline void user_single_step_report(struct pt_regs *regs)
{
kernel_siginfo_t info;
clear_siginfo(&info);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = 0;
info.si_uid = 0;
force_sig_info(&info);
}
#endif
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
*
* This is called with the siglock held, to decide whether or not it's
* necessary to release the siglock and call arch_ptrace_stop(). It can be
* defined to a constant if arch_ptrace_stop() is never required, or always
* is. On machines where this makes sense, it should be defined to a quick
* test to optimize out calling arch_ptrace_stop() when it would be
* superfluous. For example, if the thread has not been back to user mode
* since the last stop, the thread state might indicate that nothing needs
* to be done.
*
* This is guaranteed to be invoked once before a task stops for ptrace and
* may include arch-specific operations necessary prior to a ptrace stop.
*/
#define arch_ptrace_stop_needed() (0)
#endif
#ifndef arch_ptrace_stop
/**
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
*
* This is called with no locks held when arch_ptrace_stop_needed() has
* just returned nonzero. It is allowed to block, e.g. for user memory
* access. The arch can have machine-specific work to be done before
* ptrace stops. On ia64, register backing store gets written back to user
* memory here. Since this can be costly (requires dropping the siglock),
* we only do it when the arch requires it for this particular stop, as
* indicated by arch_ptrace_stop_needed().
*/
#define arch_ptrace_stop() do { } while (0)
#endif
#ifndef current_pt_regs
#define current_pt_regs() task_pt_regs(current)
#endif
#ifndef current_user_stack_pointer
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
#ifndef exception_ip
#define exception_ip(x) instruction_pointer(x)
#endif
extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
/*
* ptrace report for syscall entry and exit looks identical.
*/
static inline int ptrace_report_syscall(unsigned long message)
{
int ptrace = current->ptrace;
int signr;
if (!(ptrace & PT_PTRACED))
return 0;
signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0),
message);
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (signr)
send_sig(signr, current, 1);
return fatal_signal_pending(current);
}
/**
* ptrace_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
* This will be called if %SYSCALL_WORK_SYSCALL_TRACE or
* %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just
* entered the kernel for a system call. Full user register state is
* available here. Changing the values in @regs can affect the system
* call number and arguments to be tried. It is safe to block here,
* preventing the system call from beginning.
*
* Returns zero normally, or nonzero if the calling arch code should abort
* the system call. That must prevent normal entry so no system call is
* made. If @task ever returns to user mode after this, its register state
* is unspecified, but should be something harmless like an %ENOSYS error
* return. It should preserve enough information so that syscall_rollback()
* can work (see asm-generic/syscall.h).
*
* Called without locks, just after entering kernel mode.
*/
static inline __must_check int ptrace_report_syscall_entry(
struct pt_regs *regs)
{
return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
}
/**
* ptrace_report_syscall_exit - task has just finished a system call
* @regs: user register state of current task
* @step: nonzero if simulating single-step or block-step
*
* This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when
* the current task has just finished an attempted system call. Full
* user register state is available here. It is safe to block here,
* preventing signals from being processed.
*
* If @step is nonzero, this report is also in lieu of the normal
* trap that would follow the system call instruction because
* user_enable_block_step() or user_enable_single_step() was used.
* In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set.
*
* Called without locks, just before checking for pending signals.
*/
static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step)
{
if (step)
user_single_step_report(regs);
else
ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
}
#endif
// SPDX-License-Identifier: GPL-2.0
#include <linux/irq_work.h>
#include <linux/spinlock.h>
#include <linux/task_work.h>
#include <linux/resume_user_mode.h>
static struct callback_head work_exited; /* all we need is ->next == NULL */
#ifdef CONFIG_IRQ_WORK
static void task_work_set_notify_irq(struct irq_work *entry)
{
test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
}
static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
#endif
/**
* task_work_add - ask the @task to execute @work->func()
* @task: the task which should run the callback
* @work: the callback to run
* @notify: how to notify the targeted task
*
* Queue @work for task_work_run() below and notify the @task if @notify
* is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
*
* @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
* task and run the task_work, regardless of whether the task is currently
* running in the kernel or userspace.
* @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
* reschedule IPI to force the targeted task to reschedule and run task_work.
* This can be advantageous if there's no strict requirement that the
* task_work be run as soon as possible, just whenever the task enters the
* kernel anyway.
* @TWA_RESUME work is run only when the task exits the kernel and returns to
* user mode, or before entering guest mode.
* @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
* current @task and if the current context is NMI.
*
* Fails if the @task is exiting/exited and thus it can't process this @work.
* Otherwise @work->func() will be called when the @task goes through one of
* the aforementioned transitions, or exits.
*
* If the targeted task is exiting, then an error is returned and the work item
* is not queued. It's up to the caller to arrange for an alternative mechanism
* in that case.
*
* Note: there is no ordering guarantee on works queued here. The task_work
* list is LIFO.
*
* RETURNS:
* 0 if succeeds or -ESRCH.
*/
int task_work_add(struct task_struct *task, struct callback_head *work,
enum task_work_notify_mode notify)
{
struct callback_head *head;
if (notify == TWA_NMI_CURRENT) {
if (WARN_ON_ONCE(task != current))
return -EINVAL;
if (!IS_ENABLED(CONFIG_IRQ_WORK))
return -EINVAL;
} else {
kasan_record_aux_stack(work);
}
head = READ_ONCE(task->task_works);
do {
if (unlikely(head == &work_exited))
return -ESRCH;
work->next = head;
} while (!try_cmpxchg(&task->task_works, &head, work));
switch (notify) {
case TWA_NONE:
break;
case TWA_RESUME:
set_notify_resume(task);
break;
case TWA_SIGNAL:
set_notify_signal(task);
break;
case TWA_SIGNAL_NO_IPI:
__set_notify_signal(task);
break;
#ifdef CONFIG_IRQ_WORK
case TWA_NMI_CURRENT:
irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
break;
#endif
default:
WARN_ON_ONCE(1);
break;
}
return 0;
}
/**
* task_work_cancel_match - cancel a pending work added by task_work_add()
* @task: the task which should execute the work
* @match: match function to call
* @data: data to be passed in to match function
*
* RETURNS:
* The found work or NULL if not found.
*/
struct callback_head *
task_work_cancel_match(struct task_struct *task,
bool (*match)(struct callback_head *, void *data),
void *data)
{
struct callback_head **pprev = &task->task_works;
struct callback_head *work;
unsigned long flags;
if (likely(!task_work_pending(task)))
return NULL;
/*
* If cmpxchg() fails we continue without updating pprev.
* Either we raced with task_work_add() which added the
* new entry before this work, we will find it again. Or
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
work = READ_ONCE(*pprev);
while (work) {
if (!match(work, data)) {
pprev = &work->next;
work = READ_ONCE(*pprev);
} else if (try_cmpxchg(pprev, &work, work->next))
break;
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return work;
}
static bool task_work_func_match(struct callback_head *cb, void *data)
{
return cb->func == data;
}
/**
* task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
* @task: the task which should execute the func's work
* @func: identifies the func to match with a work to remove
*
* Find the last queued pending work with ->func == @func and remove
* it from queue.
*
* RETURNS:
* The found work or NULL if not found.
*/
struct callback_head *
task_work_cancel_func(struct task_struct *task, task_work_func_t func)
{
return task_work_cancel_match(task, task_work_func_match, func);
}
static bool task_work_match(struct callback_head *cb, void *data)
{
return cb == data;
}
/**
* task_work_cancel - cancel a pending work added by task_work_add()
* @task: the task which should execute the work
* @cb: the callback to remove if queued
*
* Remove a callback from a task's queue if queued.
*
* RETURNS:
* True if the callback was queued and got cancelled, false otherwise.
*/
bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
{
struct callback_head *ret;
ret = task_work_cancel_match(task, task_work_match, cb);
return ret == cb;
}
/**
* task_work_run - execute the works added by task_work_add()
*
* Flush the pending works. Should be used by the core kernel code.
* Called before the task returns to the user-mode or stops, or when
* it exits. In the latter case task_work_add() can no longer add the
* new work after task_work_run() returns.
*/
void task_work_run(void)
{
struct task_struct *task = current;
struct callback_head *work, *head, *next;
for (;;) {
/*
* work->func() can do task_work_add(), do not set
* work_exited unless the list is empty.
*/
work = READ_ONCE(task->task_works);
do {
head = NULL;
if (!work) {
if (task->flags & PF_EXITING)
head = &work_exited;
else
break;
}
} while (!try_cmpxchg(&task->task_works, &work, head)); if (!work)
break;
/*
* Synchronize with task_work_cancel_match(). It can not remove
* the first entry == work, cmpxchg(task_works) must fail.
* But it can remove another entry from the ->next list.
*/
raw_spin_lock_irq(&task->pi_lock);
raw_spin_unlock_irq(&task->pi_lock);
do {
next = work->next;
work->func(work);
work = next;
cond_resched();
} while (work);
}
}
// SPDX-License-Identifier: GPL-2.0
/*
* Kernel internal timers
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
* serialize accesses to xtime/lost_ticks).
* Copyright (C) 1998 Andrea Arcangeli
* 1999-03-10 Improved NTP compatibility by Ulrich Windl
* 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
* 2000-10-05 Implemented scalable SMP per-CPU timer handling.
* Copyright (C) 2000, 2001, 2002 Ingo Molnar
* Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
*/
#include <linux/kernel_stat.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/pid_namespace.h>
#include <linux/notifier.h>
#include <linux/thread_info.h>
#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/posix-timers.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
#include <linux/delay.h>
#include <linux/tick.h>
#include <linux/kallsyms.h>
#include <linux/irq_work.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/nohz.h>
#include <linux/sched/debug.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/random.h>
#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/div64.h>
#include <asm/timex.h>
#include <asm/io.h>
#include "tick-internal.h"
#include "timer_migration.h"
#define CREATE_TRACE_POINTS
#include <trace/events/timer.h>
__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
/*
* The timer wheel has LVL_DEPTH array levels. Each level provides an array of
* LVL_SIZE buckets. Each level is driven by its own clock and therefore each
* level has a different granularity.
*
* The level granularity is: LVL_CLK_DIV ^ level
* The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
*
* The array level of a newly armed timer depends on the relative expiry
* time. The farther the expiry time is away the higher the array level and
* therefore the granularity becomes.
*
* Contrary to the original timer wheel implementation, which aims for 'exact'
* expiry of the timers, this implementation removes the need for recascading
* the timers into the lower array levels. The previous 'classic' timer wheel
* implementation of the kernel already violated the 'exact' expiry by adding
* slack to the expiry time to provide batched expiration. The granularity
* levels provide implicit batching.
*
* This is an optimization of the original timer wheel implementation for the
* majority of the timer wheel use cases: timeouts. The vast majority of
* timeout timers (networking, disk I/O ...) are canceled before expiry. If
* the timeout expires it indicates that normal operation is disturbed, so it
* does not matter much whether the timeout comes with a slight delay.
*
* The only exception to this are networking timers with a small expiry
* time. They rely on the granularity. Those fit into the first wheel level,
* which has HZ granularity.
*
* We don't have cascading anymore. timers with a expiry time above the
* capacity of the last wheel level are force expired at the maximum timeout
* value of the last wheel level. From data sampling we know that the maximum
* value observed is 5 days (network connection tracking), so this should not
* be an issue.
*
* The currently chosen array constants values are a good compromise between
* array size and granularity.
*
* This results in the following granularity and range levels:
*
* HZ 1000 steps
* Level Offset Granularity Range
* 0 0 1 ms 0 ms - 63 ms
* 1 64 8 ms 64 ms - 511 ms
* 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
* 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
* 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
* 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
* 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
* 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
* 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
*
* HZ 300
* Level Offset Granularity Range
* 0 0 3 ms 0 ms - 210 ms
* 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
* 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
* 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
* 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
* 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
* 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
* 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
* 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
*
* HZ 250
* Level Offset Granularity Range
* 0 0 4 ms 0 ms - 255 ms
* 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
* 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
* 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
* 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
* 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
* 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
* 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
* 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
*
* HZ 100
* Level Offset Granularity Range
* 0 0 10 ms 0 ms - 630 ms
* 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
* 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
* 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
* 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
* 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
* 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
* 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
*/
/* Clock divisor for the next level */
#define LVL_CLK_SHIFT 3
#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
/*
* The time start value for each level to select the bucket at enqueue
* time. We start from the last possible delta of the previous level
* so that we can later add an extra LVL_GRAN(n) to n (see calc_index()).
*/
#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
/* Size of each clock level */
#define LVL_BITS 6
#define LVL_SIZE (1UL << LVL_BITS)
#define LVL_MASK (LVL_SIZE - 1)
#define LVL_OFFS(n) ((n) * LVL_SIZE)
/* Level depth */
#if HZ > 100
# define LVL_DEPTH 9
# else
# define LVL_DEPTH 8
#endif
/* The cutoff (max. capacity of the wheel) */
#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
/*
* The resulting wheel size. If NOHZ is configured we allocate two
* wheels so we have a separate storage for the deferrable timers.
*/
#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
#ifdef CONFIG_NO_HZ_COMMON
/*
* If multiple bases need to be locked, use the base ordering for lock
* nesting, i.e. lowest number first.
*/
# define NR_BASES 3
# define BASE_LOCAL 0
# define BASE_GLOBAL 1
# define BASE_DEF 2
#else
# define NR_BASES 1
# define BASE_LOCAL 0
# define BASE_GLOBAL 0
# define BASE_DEF 0
#endif
/**
* struct timer_base - Per CPU timer base (number of base depends on config)
* @lock: Lock protecting the timer_base
* @running_timer: When expiring timers, the lock is dropped. To make
* sure not to race against deleting/modifying a
* currently running timer, the pointer is set to the
* timer, which expires at the moment. If no timer is
* running, the pointer is NULL.
* @expiry_lock: PREEMPT_RT only: Lock is taken in softirq around
* timer expiry callback execution and when trying to
* delete a running timer and it wasn't successful in
* the first glance. It prevents priority inversion
* when callback was preempted on a remote CPU and a
* caller tries to delete the running timer. It also
* prevents a life lock, when the task which tries to
* delete a timer preempted the softirq thread which
* is running the timer callback function.
* @timer_waiters: PREEMPT_RT only: Tells, if there is a waiter
* waiting for the end of the timer callback function
* execution.
* @clk: clock of the timer base; is updated before enqueue
* of a timer; during expiry, it is 1 offset ahead of
* jiffies to avoid endless requeuing to current
* jiffies
* @next_expiry: expiry value of the first timer; it is updated when
* finding the next timer and during enqueue; the
* value is not valid, when next_expiry_recalc is set
* @cpu: Number of CPU the timer base belongs to
* @next_expiry_recalc: States, whether a recalculation of next_expiry is
* required. Value is set true, when a timer was
* deleted.
* @is_idle: Is set, when timer_base is idle. It is triggered by NOHZ
* code. This state is only used in standard
* base. Deferrable timers, which are enqueued remotely
* never wake up an idle CPU. So no matter of supporting it
* for this base.
* @timers_pending: Is set, when a timer is pending in the base. It is only
* reliable when next_expiry_recalc is not set.
* @pending_map: bitmap of the timer wheel; each bit reflects a
* bucket of the wheel. When a bit is set, at least a
* single timer is enqueued in the related bucket.
* @vectors: Array of lists; Each array member reflects a bucket
* of the timer wheel. The list contains all timers
* which are enqueued into a specific bucket.
*/
struct timer_base {
raw_spinlock_t lock;
struct timer_list *running_timer;
#ifdef CONFIG_PREEMPT_RT
spinlock_t expiry_lock;
atomic_t timer_waiters;
#endif
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
bool next_expiry_recalc;
bool is_idle;
bool timers_pending;
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
struct hlist_head vectors[WHEEL_SIZE];
} ____cacheline_aligned;
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
#ifdef CONFIG_NO_HZ_COMMON
static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
static DEFINE_MUTEX(timer_keys_mutex);
static void timer_update_keys(struct work_struct *work);
static DECLARE_WORK(timer_update_work, timer_update_keys);
#ifdef CONFIG_SMP
static unsigned int sysctl_timer_migration = 1;
DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
static void timers_update_migration(void)
{
if (sysctl_timer_migration && tick_nohz_active)
static_branch_enable(&timers_migration_enabled);
else
static_branch_disable(&timers_migration_enabled);
}
#ifdef CONFIG_SYSCTL
static int timer_migration_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
mutex_lock(&timer_keys_mutex);
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write)
timers_update_migration();
mutex_unlock(&timer_keys_mutex);
return ret;
}
static const struct ctl_table timer_sysctl[] = {
{
.procname = "timer_migration",
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = timer_migration_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
};
static int __init timer_sysctl_init(void)
{
register_sysctl("kernel", timer_sysctl);
return 0;
}
device_initcall(timer_sysctl_init);
#endif /* CONFIG_SYSCTL */
#else /* CONFIG_SMP */
static inline void timers_update_migration(void) { }
#endif /* !CONFIG_SMP */
static void timer_update_keys(struct work_struct *work)
{
mutex_lock(&timer_keys_mutex);
timers_update_migration();
static_branch_enable(&timers_nohz_active);
mutex_unlock(&timer_keys_mutex);
}
void timers_update_nohz(void)
{
schedule_work(&timer_update_work);
}
static inline bool is_timers_nohz_active(void)
{
return static_branch_unlikely(&timers_nohz_active);
}
#else
static inline bool is_timers_nohz_active(void) { return false; }
#endif /* NO_HZ_COMMON */
static unsigned long round_jiffies_common(unsigned long j, int cpu,
bool force_up)
{
int rem;
unsigned long original = j;
/*
* We don't want all cpus firing their timers at once hitting the
* same lock or cachelines, so we skew each extra cpu with an extra
* 3 jiffies. This 3 jiffies came originally from the mm/ code which
* already did this.
* The skew is done by adding 3*cpunr, then round, then subtract this
* extra offset again.
*/
j += cpu * 3;
rem = j % HZ;
/*
* If the target jiffy is just after a whole second (which can happen
* due to delays of the timer irq, long irq off times etc etc) then
* we should round down to the whole second, not up. Use 1/4th second
* as cutoff for this rounding as an extreme upper bound for this.
* But never round down if @force_up is set.
*/
if (rem < HZ/4 && !force_up) /* round down */
j = j - rem;
else /* round up */
j = j - rem + HZ;
/* now that we have rounded, subtract the extra skew again */
j -= cpu * 3;
/*
* Make sure j is still in the future. Otherwise return the
* unmodified value.
*/
return time_is_after_jiffies(j) ? j : original;
}
/**
* __round_jiffies_relative - function to round jiffies to a full second
* @j: the time in (relative) jiffies that should be rounded
* @cpu: the processor number on which the timeout will happen
*
* __round_jiffies_relative() rounds a time delta in the future (in jiffies)
* up or down to (approximately) full seconds. This is useful for timers
* for which the exact time they fire does not matter too much, as long as
* they fire approximately every X seconds.
*
* By rounding these timers to whole seconds, all such timers will fire
* at the same time, rather than at various times spread out. The goal
* of this is to have the CPU wake up less, which saves power.
*
* The exact rounding is skewed for each processor to avoid all
* processors firing at the exact same time, which could lead
* to lock contention or spurious cache line bouncing.
*
* The return value is the rounded version of the @j parameter.
*/
unsigned long __round_jiffies_relative(unsigned long j, int cpu)
{
unsigned long j0 = jiffies;
/* Use j0 because jiffies might change while we run */
return round_jiffies_common(j + j0, cpu, false) - j0;
}
EXPORT_SYMBOL_GPL(__round_jiffies_relative);
/**
* round_jiffies - function to round jiffies to a full second
* @j: the time in (absolute) jiffies that should be rounded
*
* round_jiffies() rounds an absolute time in the future (in jiffies)
* up or down to (approximately) full seconds. This is useful for timers
* for which the exact time they fire does not matter too much, as long as
* they fire approximately every X seconds.
*
* By rounding these timers to whole seconds, all such timers will fire
* at the same time, rather than at various times spread out. The goal
* of this is to have the CPU wake up less, which saves power.
*
* The return value is the rounded version of the @j parameter.
*/
unsigned long round_jiffies(unsigned long j)
{
return round_jiffies_common(j, raw_smp_processor_id(), false);
}
EXPORT_SYMBOL_GPL(round_jiffies);
/**
* round_jiffies_relative - function to round jiffies to a full second
* @j: the time in (relative) jiffies that should be rounded
*
* round_jiffies_relative() rounds a time delta in the future (in jiffies)
* up or down to (approximately) full seconds. This is useful for timers
* for which the exact time they fire does not matter too much, as long as
* they fire approximately every X seconds.
*
* By rounding these timers to whole seconds, all such timers will fire
* at the same time, rather than at various times spread out. The goal
* of this is to have the CPU wake up less, which saves power.
*
* The return value is the rounded version of the @j parameter.
*/
unsigned long round_jiffies_relative(unsigned long j)
{
return __round_jiffies_relative(j, raw_smp_processor_id());
}
EXPORT_SYMBOL_GPL(round_jiffies_relative);
/**
* __round_jiffies_up_relative - function to round jiffies up to a full second
* @j: the time in (relative) jiffies that should be rounded
* @cpu: the processor number on which the timeout will happen
*
* This is the same as __round_jiffies_relative() except that it will never
* round down. This is useful for timeouts for which the exact time
* of firing does not matter too much, as long as they don't fire too
* early.
*/
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
{
unsigned long j0 = jiffies;
/* Use j0 because jiffies might change while we run */
return round_jiffies_common(j + j0, cpu, true) - j0;
}
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
/**
* round_jiffies_up - function to round jiffies up to a full second
* @j: the time in (absolute) jiffies that should be rounded
*
* This is the same as round_jiffies() except that it will never
* round down. This is useful for timeouts for which the exact time
* of firing does not matter too much, as long as they don't fire too
* early.
*/
unsigned long round_jiffies_up(unsigned long j)
{
return round_jiffies_common(j, raw_smp_processor_id(), true);
}
EXPORT_SYMBOL_GPL(round_jiffies_up);
/**
* round_jiffies_up_relative - function to round jiffies up to a full second
* @j: the time in (relative) jiffies that should be rounded
*
* This is the same as round_jiffies_relative() except that it will never
* round down. This is useful for timeouts for which the exact time
* of firing does not matter too much, as long as they don't fire too
* early.
*/
unsigned long round_jiffies_up_relative(unsigned long j)
{
return __round_jiffies_up_relative(j, raw_smp_processor_id());
}
EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
static inline unsigned int timer_get_idx(struct timer_list *timer)
{
return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
}
static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
{
timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
idx << TIMER_ARRAYSHIFT;
}
/*
* Helper function to calculate the array index for a given expiry
* time.
*/
static inline unsigned calc_index(unsigned long expires, unsigned lvl,
unsigned long *bucket_expiry)
{
/*
* The timer wheel has to guarantee that a timer does not fire
* early. Early expiry can happen due to:
* - Timer is armed at the edge of a tick
* - Truncation of the expiry time in the outer wheel levels
*
* Round up with level granularity to prevent this.
*/
expires = (expires >> LVL_SHIFT(lvl)) + 1; *bucket_expiry = expires << LVL_SHIFT(lvl); return LVL_OFFS(lvl) + (expires & LVL_MASK);
}
static int calc_wheel_index(unsigned long expires, unsigned long clk,
unsigned long *bucket_expiry)
{
unsigned long delta = expires - clk;
unsigned int idx;
if (delta < LVL_START(1)) { idx = calc_index(expires, 0, bucket_expiry); } else if (delta < LVL_START(2)) {
idx = calc_index(expires, 1, bucket_expiry);
} else if (delta < LVL_START(3)) {
idx = calc_index(expires, 2, bucket_expiry);
} else if (delta < LVL_START(4)) {
idx = calc_index(expires, 3, bucket_expiry);
} else if (delta < LVL_START(5)) {
idx = calc_index(expires, 4, bucket_expiry);
} else if (delta < LVL_START(6)) {
idx = calc_index(expires, 5, bucket_expiry);
} else if (delta < LVL_START(7)) {
idx = calc_index(expires, 6, bucket_expiry);
} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
idx = calc_index(expires, 7, bucket_expiry);
} else if ((long) delta < 0) {
idx = clk & LVL_MASK;
*bucket_expiry = clk;
} else {
/*
* Force expire obscene large timeouts to expire at the
* capacity limit of the wheel.
*/
if (delta >= WHEEL_TIMEOUT_CUTOFF)
expires = clk + WHEEL_TIMEOUT_MAX;
idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
}
return idx;
}
static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
{
/*
* Deferrable timers do not prevent the CPU from entering dynticks and
* are not taken into account on the idle/nohz_full path. An IPI when a
* new deferrable timer is enqueued will wake up the remote CPU but
* nothing will be done with the deferrable timer base. Therefore skip
* the remote IPI for deferrable timers completely.
*/
if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE)
return;
/*
* We might have to IPI the remote CPU if the base is idle and the
* timer is pinned. If it is a non pinned timer, it is only queued
* on the remote CPU, when timer was running during queueing. Then
* everything is handled by remote CPU anyway. If the other CPU is
* on the way to idle then it can't set base->is_idle as we hold
* the base lock:
*/
if (base->is_idle) { WARN_ON_ONCE(!(timer->flags & TIMER_PINNED ||
tick_nohz_full_cpu(base->cpu)));
wake_up_nohz_cpu(base->cpu);
}
}
/*
* Enqueue the timer into the hash bucket, mark it pending in
* the bitmap, store the index in the timer flags then wake up
* the target CPU if needed.
*/
static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
unsigned int idx, unsigned long bucket_expiry)
{
hlist_add_head(&timer->entry, base->vectors + idx);
__set_bit(idx, base->pending_map);
timer_set_idx(timer, idx);
trace_timer_start(timer, bucket_expiry);
/*
* Check whether this is the new first expiring timer. The
* effective expiry time of the timer is required here
* (bucket_expiry) instead of timer->expires.
*/
if (time_before(bucket_expiry, base->next_expiry)) {
/*
* Set the next expiry time and kick the CPU so it
* can reevaluate the wheel:
*/
WRITE_ONCE(base->next_expiry, bucket_expiry);
base->timers_pending = true;
base->next_expiry_recalc = false; trigger_dyntick_cpu(base, timer);
}
}
static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
{
unsigned long bucket_expiry;
unsigned int idx;
idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
enqueue_timer(base, timer, idx, bucket_expiry);
}
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
static const struct debug_obj_descr timer_debug_descr;
struct timer_hint {
void (*function)(struct timer_list *t);
long offset;
};
#define TIMER_HINT(fn, container, timr, hintfn) \
{ \
.function = fn, \
.offset = offsetof(container, hintfn) - \
offsetof(container, timr) \
}
static const struct timer_hint timer_hints[] = {
TIMER_HINT(delayed_work_timer_fn,
struct delayed_work, timer, work.func),
TIMER_HINT(kthread_delayed_work_timer_fn,
struct kthread_delayed_work, timer, work.func),
};
static void *timer_debug_hint(void *addr)
{
struct timer_list *timer = addr;
int i;
for (i = 0; i < ARRAY_SIZE(timer_hints); i++) {
if (timer_hints[i].function == timer->function) {
void (**fn)(void) = addr + timer_hints[i].offset;
return *fn;
}
}
return timer->function;
}
static bool timer_is_static_object(void *addr)
{
struct timer_list *timer = addr;
return (timer->entry.pprev == NULL &&
timer->entry.next == TIMER_ENTRY_STATIC);
}
/*
* timer_fixup_init is called when:
* - an active object is initialized
*/
static bool timer_fixup_init(void *addr, enum debug_obj_state state)
{
struct timer_list *timer = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
timer_delete_sync(timer);
debug_object_init(timer, &timer_debug_descr);
return true;
default:
return false;
}
}
/* Stub timer callback for improperly used timers. */
static void stub_timer(struct timer_list *unused)
{
WARN_ON(1);
}
/*
* timer_fixup_activate is called when:
* - an active object is activated
* - an unknown non-static object is activated
*/
static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
{
struct timer_list *timer = addr;
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
timer_setup(timer, stub_timer, 0);
return true;
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
fallthrough;
default:
return false;
}
}
/*
* timer_fixup_free is called when:
* - an active object is freed
*/
static bool timer_fixup_free(void *addr, enum debug_obj_state state)
{
struct timer_list *timer = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
timer_delete_sync(timer);
debug_object_free(timer, &timer_debug_descr);
return true;
default:
return false;
}
}
/*
* timer_fixup_assert_init is called when:
* - an untracked/uninit-ed object is found
*/
static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
{
struct timer_list *timer = addr;
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
timer_setup(timer, stub_timer, 0);
return true;
default:
return false;
}
}
static const struct debug_obj_descr timer_debug_descr = {
.name = "timer_list",
.debug_hint = timer_debug_hint,
.is_static_object = timer_is_static_object,
.fixup_init = timer_fixup_init,
.fixup_activate = timer_fixup_activate,
.fixup_free = timer_fixup_free,
.fixup_assert_init = timer_fixup_assert_init,
};
static inline void debug_timer_init(struct timer_list *timer)
{
debug_object_init(timer, &timer_debug_descr);
}
static inline void debug_timer_activate(struct timer_list *timer)
{
debug_object_activate(timer, &timer_debug_descr);
}
static inline void debug_timer_deactivate(struct timer_list *timer)
{
debug_object_deactivate(timer, &timer_debug_descr);
}
static inline void debug_timer_assert_init(struct timer_list *timer)
{
debug_object_assert_init(timer, &timer_debug_descr);
}
static void do_init_timer(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name, struct lock_class_key *key);
void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name, struct lock_class_key *key)
{
debug_object_init_on_stack(timer, &timer_debug_descr);
do_init_timer(timer, func, flags, name, key);
}
EXPORT_SYMBOL_GPL(timer_init_key_on_stack);
void timer_destroy_on_stack(struct timer_list *timer)
{
debug_object_free(timer, &timer_debug_descr);
}
EXPORT_SYMBOL_GPL(timer_destroy_on_stack);
#else
static inline void debug_timer_init(struct timer_list *timer) { }
static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
static inline void debug_timer_assert_init(struct timer_list *timer) { }
#endif
static inline void debug_init(struct timer_list *timer)
{
debug_timer_init(timer);
trace_timer_init(timer);
}
static inline void debug_deactivate(struct timer_list *timer)
{
debug_timer_deactivate(timer);
trace_timer_cancel(timer);
}
static inline void debug_assert_init(struct timer_list *timer)
{
debug_timer_assert_init(timer);
}
static void do_init_timer(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name, struct lock_class_key *key)
{
timer->entry.pprev = NULL;
timer->function = func;
if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
flags &= TIMER_INIT_FLAGS;
timer->flags = flags | raw_smp_processor_id();
lockdep_init_map(&timer->lockdep_map, name, key, 0);
}
/**
* timer_init_key - initialize a timer
* @timer: the timer to be initialized
* @func: timer callback function
* @flags: timer flags
* @name: name of the timer
* @key: lockdep class key of the fake lock used for tracking timer
* sync lock dependencies
*
* timer_init_key() must be done to a timer prior to calling *any* of the
* other timer functions.
*/
void timer_init_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key)
{ debug_init(timer); do_init_timer(timer, func, flags, name, key);}
EXPORT_SYMBOL(timer_init_key);
static inline void detach_timer(struct timer_list *timer, bool clear_pending)
{
struct hlist_node *entry = &timer->entry;
debug_deactivate(timer);
__hlist_del(entry); if (clear_pending) entry->pprev = NULL; entry->next = LIST_POISON2;
}
static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
bool clear_pending)
{
unsigned idx = timer_get_idx(timer); if (!timer_pending(timer))
return 0;
if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
__clear_bit(idx, base->pending_map); base->next_expiry_recalc = true;
}
detach_timer(timer, clear_pending); return 1;}
static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
{
int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
* to use the deferrable base.
*/
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) index = BASE_DEF; return per_cpu_ptr(&timer_bases[index], cpu);
}
static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
{
int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
* to use the deferrable base.
*/
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
index = BASE_DEF;
return this_cpu_ptr(&timer_bases[index]);
}
static inline struct timer_base *get_timer_base(u32 tflags)
{
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
}
static inline void __forward_timer_base(struct timer_base *base,
unsigned long basej)
{
/*
* Check whether we can forward the base. We can only do that when
* @basej is past base->clk otherwise we might rewind base->clk.
*/
if (time_before_eq(basej, base->clk))
return;
/*
* If the next expiry value is > jiffies, then we fast forward to
* jiffies otherwise we forward to the next expiry value.
*/
if (time_after(base->next_expiry, basej)) { base->clk = basej;
} else {
if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) return; base->clk = base->next_expiry;
}
}
static inline void forward_timer_base(struct timer_base *base)
{
__forward_timer_base(base, READ_ONCE(jiffies));
}
/*
* We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
* that all timers which are tied to this base are locked, and the base itself
* is locked too.
*
* So __run_timers/migrate_timers can safely modify all timers which could
* be found in the base->vectors array.
*
* When a timer is migrating then the TIMER_MIGRATING flag is set and we need
* to wait until the migration is done.
*/
static struct timer_base *lock_timer_base(struct timer_list *timer,
unsigned long *flags)
__acquires(timer->base->lock)
{
for (;;) {
struct timer_base *base;
u32 tf;
/*
* We need to use READ_ONCE() here, otherwise the compiler
* might re-read @tf between the check for TIMER_MIGRATING
* and spin_lock().
*/
tf = READ_ONCE(timer->flags); if (!(tf & TIMER_MIGRATING)) { base = get_timer_base(tf);
raw_spin_lock_irqsave(&base->lock, *flags);
if (timer->flags == tf)
return base;
raw_spin_unlock_irqrestore(&base->lock, *flags);
}
cpu_relax();
}
}
#define MOD_TIMER_PENDING_ONLY 0x01
#define MOD_TIMER_REDUCE 0x02
#define MOD_TIMER_NOTPENDING 0x04
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
{
unsigned long clk = 0, flags, bucket_expiry;
struct timer_base *base, *new_base;
unsigned int idx = UINT_MAX;
int ret = 0;
debug_assert_init(timer);
/*
* This is a common optimization triggered by the networking code - if
* the timer is re-modified to have the same timeout or ends up in the
* same array bucket then just return:
*/
if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
/*
* The downside of this optimization is that it can result in
* larger granularity than you would get from adding a new
* timer with this expiry.
*/
long diff = timer->expires - expires;
if (!diff)
return 1; if (options & MOD_TIMER_REDUCE && diff <= 0)
return 1;
/*
* We lock timer base and calculate the bucket index right
* here. If the timer ends up in the same bucket, then we
* just update the expiry time and avoid the whole
* dequeue/enqueue dance.
*/
base = lock_timer_base(timer, &flags);
/*
* Has @timer been shutdown? This needs to be evaluated
* while holding base lock to prevent a race against the
* shutdown code.
*/
if (!timer->function) goto out_unlock; forward_timer_base(base); if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
time_before_eq(timer->expires, expires)) {
ret = 1; goto out_unlock;
}
clk = base->clk;
idx = calc_wheel_index(expires, clk, &bucket_expiry);
/*
* Retrieve and compare the array index of the pending
* timer. If it matches set the expiry to the new value so a
* subsequent call will exit in the expires check above.
*/
if (idx == timer_get_idx(timer)) { if (!(options & MOD_TIMER_REDUCE)) timer->expires = expires; else if (time_after(timer->expires, expires))
timer->expires = expires;
ret = 1;
goto out_unlock;
}
} else {
base = lock_timer_base(timer, &flags);
/*
* Has @timer been shutdown? This needs to be evaluated
* while holding base lock to prevent a race against the
* shutdown code.
*/
if (!timer->function) goto out_unlock; forward_timer_base(base);
}
ret = detach_if_pending(timer, base, false); if (!ret && (options & MOD_TIMER_PENDING_ONLY)) goto out_unlock; new_base = get_timer_this_cpu_base(timer->flags);
if (base != new_base) {
/*
* We are trying to schedule the timer on the new base.
* However we can't change timer's base while it is running,
* otherwise timer_delete_sync() can't detect that the timer's
* handler yet has not finished. This also guarantees that the
* timer is serialized wrt itself.
*/
if (likely(base->running_timer != timer)) {
/* See the comment in lock_timer_base() */
timer->flags |= TIMER_MIGRATING;
raw_spin_unlock(&base->lock);
base = new_base;
raw_spin_lock(&base->lock);
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
forward_timer_base(base);
}
}
debug_timer_activate(timer);
timer->expires = expires;
/*
* If 'idx' was calculated above and the base time did not advance
* between calculating 'idx' and possibly switching the base, only
* enqueue_timer() is required. Otherwise we need to (re)calculate
* the wheel index via internal_add_timer().
*/
if (idx != UINT_MAX && clk == base->clk) enqueue_timer(base, timer, idx, bucket_expiry);
else
internal_add_timer(base, timer);
out_unlock:
raw_spin_unlock_irqrestore(&base->lock, flags); return ret;}
/**
* mod_timer_pending - Modify a pending timer's timeout
* @timer: The pending timer to be modified
* @expires: New absolute timeout in jiffies
*
* mod_timer_pending() is the same for pending timers as mod_timer(), but
* will not activate inactive timers.
*
* If @timer->function == NULL then the start operation is silently
* discarded.
*
* Return:
* * %0 - The timer was inactive and not modified or was in
* shutdown state and the operation was discarded
* * %1 - The timer was active and requeued to expire at @expires
*/
int mod_timer_pending(struct timer_list *timer, unsigned long expires)
{
return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
}
EXPORT_SYMBOL(mod_timer_pending);
/**
* mod_timer - Modify a timer's timeout
* @timer: The timer to be modified
* @expires: New absolute timeout in jiffies
*
* mod_timer(timer, expires) is equivalent to:
*
* timer_delete(timer); timer->expires = expires; add_timer(timer);
*
* mod_timer() is more efficient than the above open coded sequence. In
* case that the timer is inactive, the timer_delete() part is a NOP. The
* timer is in any case activated with the new expiry time @expires.
*
* Note that if there are multiple unserialized concurrent users of the
* same timer, then mod_timer() is the only safe way to modify the timeout,
* since add_timer() cannot modify an already running timer.
*
* If @timer->function == NULL then the start operation is silently
* discarded. In this case the return value is 0 and meaningless.
*
* Return:
* * %0 - The timer was inactive and started or was in shutdown
* state and the operation was discarded
* * %1 - The timer was active and requeued to expire at @expires or
* the timer was active and not modified because @expires did
* not change the effective expiry time
*/
int mod_timer(struct timer_list *timer, unsigned long expires)
{
return __mod_timer(timer, expires, 0);
}
EXPORT_SYMBOL(mod_timer);
/**
* timer_reduce - Modify a timer's timeout if it would reduce the timeout
* @timer: The timer to be modified
* @expires: New absolute timeout in jiffies
*
* timer_reduce() is very similar to mod_timer(), except that it will only
* modify an enqueued timer if that would reduce the expiration time. If
* @timer is not enqueued it starts the timer.
*
* If @timer->function == NULL then the start operation is silently
* discarded.
*
* Return:
* * %0 - The timer was inactive and started or was in shutdown
* state and the operation was discarded
* * %1 - The timer was active and requeued to expire at @expires or
* the timer was active and not modified because @expires
* did not change the effective expiry time such that the
* timer would expire earlier than already scheduled
*/
int timer_reduce(struct timer_list *timer, unsigned long expires)
{
return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
}
EXPORT_SYMBOL(timer_reduce);
/**
* add_timer - Start a timer
* @timer: The timer to be started
*
* Start @timer to expire at @timer->expires in the future. @timer->expires
* is the absolute expiry time measured in 'jiffies'. When the timer expires
* timer->function(timer) will be invoked from soft interrupt context.
*
* The @timer->expires and @timer->function fields must be set prior
* to calling this function.
*
* If @timer->function == NULL then the start operation is silently
* discarded.
*
* If @timer->expires is already in the past @timer will be queued to
* expire at the next timer tick.
*
* This can only operate on an inactive timer. Attempts to invoke this on
* an active timer are rejected with a warning.
*/
void add_timer(struct timer_list *timer)
{
if (WARN_ON_ONCE(timer_pending(timer))) return; __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);}
EXPORT_SYMBOL(add_timer);
/**
* add_timer_local() - Start a timer on the local CPU
* @timer: The timer to be started
*
* Same as add_timer() except that the timer flag TIMER_PINNED is set.
*
* See add_timer() for further details.
*/
void add_timer_local(struct timer_list *timer)
{
if (WARN_ON_ONCE(timer_pending(timer)))
return;
timer->flags |= TIMER_PINNED;
__mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
}
EXPORT_SYMBOL(add_timer_local);
/**
* add_timer_global() - Start a timer without TIMER_PINNED flag set
* @timer: The timer to be started
*
* Same as add_timer() except that the timer flag TIMER_PINNED is unset.
*
* See add_timer() for further details.
*/
void add_timer_global(struct timer_list *timer)
{
if (WARN_ON_ONCE(timer_pending(timer)))
return;
timer->flags &= ~TIMER_PINNED;
__mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
}
EXPORT_SYMBOL(add_timer_global);
/**
* add_timer_on - Start a timer on a particular CPU
* @timer: The timer to be started
* @cpu: The CPU to start it on
*
* Same as add_timer() except that it starts the timer on the given CPU and
* the TIMER_PINNED flag is set. When timer shouldn't be a pinned timer in
* the next round, add_timer_global() should be used instead as it unsets
* the TIMER_PINNED flag.
*
* See add_timer() for further details.
*/
void add_timer_on(struct timer_list *timer, int cpu)
{
struct timer_base *new_base, *base;
unsigned long flags;
debug_assert_init(timer);
if (WARN_ON_ONCE(timer_pending(timer)))
return;
/* Make sure timer flags have TIMER_PINNED flag set */
timer->flags |= TIMER_PINNED;
new_base = get_timer_cpu_base(timer->flags, cpu);
/*
* If @timer was on a different CPU, it should be migrated with the
* old base locked to prevent other operations proceeding with the
* wrong base locked. See lock_timer_base().
*/
base = lock_timer_base(timer, &flags);
/*
* Has @timer been shutdown? This needs to be evaluated while
* holding base lock to prevent a race against the shutdown code.
*/
if (!timer->function)
goto out_unlock;
if (base != new_base) {
timer->flags |= TIMER_MIGRATING;
raw_spin_unlock(&base->lock);
base = new_base;
raw_spin_lock(&base->lock);
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | cpu);
}
forward_timer_base(base);
debug_timer_activate(timer);
internal_add_timer(base, timer);
out_unlock:
raw_spin_unlock_irqrestore(&base->lock, flags);
}
EXPORT_SYMBOL_GPL(add_timer_on);
/**
* __timer_delete - Internal function: Deactivate a timer
* @timer: The timer to be deactivated
* @shutdown: If true, this indicates that the timer is about to be
* shutdown permanently.
*
* If @shutdown is true then @timer->function is set to NULL under the
* timer base lock which prevents further rearming of the time. In that
* case any attempt to rearm @timer after this function returns will be
* silently ignored.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending and deactivated
*/
static int __timer_delete(struct timer_list *timer, bool shutdown)
{
struct timer_base *base;
unsigned long flags;
int ret = 0;
debug_assert_init(timer);
/*
* If @shutdown is set then the lock has to be taken whether the
* timer is pending or not to protect against a concurrent rearm
* which might hit between the lockless pending check and the lock
* acquisition. By taking the lock it is ensured that such a newly
* enqueued timer is dequeued and cannot end up with
* timer->function == NULL in the expiry code.
*
* If timer->function is currently executed, then this makes sure
* that the callback cannot requeue the timer.
*/
if (timer_pending(timer) || shutdown) { base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, true);
if (shutdown)
timer->function = NULL; raw_spin_unlock_irqrestore(&base->lock, flags);
}
return ret;}
/**
* timer_delete - Deactivate a timer
* @timer: The timer to be deactivated
*
* The function only deactivates a pending timer, but contrary to
* timer_delete_sync() it does not take into account whether the timer's
* callback function is concurrently executed on a different CPU or not.
* It neither prevents rearming of the timer. If @timer can be rearmed
* concurrently then the return value of this function is meaningless.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending and deactivated
*/
int timer_delete(struct timer_list *timer)
{
return __timer_delete(timer, false);
}
EXPORT_SYMBOL(timer_delete);
/**
* timer_shutdown - Deactivate a timer and prevent rearming
* @timer: The timer to be deactivated
*
* The function does not wait for an eventually running timer callback on a
* different CPU but it prevents rearming of the timer. Any attempt to arm
* @timer after this function returns will be silently ignored.
*
* This function is useful for teardown code and should only be used when
* timer_shutdown_sync() cannot be invoked due to locking or context constraints.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending
*/
int timer_shutdown(struct timer_list *timer)
{
return __timer_delete(timer, true);
}
EXPORT_SYMBOL_GPL(timer_shutdown);
/**
* __try_to_del_timer_sync - Internal function: Try to deactivate a timer
* @timer: Timer to deactivate
* @shutdown: If true, this indicates that the timer is about to be
* shutdown permanently.
*
* If @shutdown is true then @timer->function is set to NULL under the
* timer base lock which prevents further rearming of the timer. Any
* attempt to rearm @timer after this function returns will be silently
* ignored.
*
* This function cannot guarantee that the timer cannot be rearmed
* right after dropping the base lock if @shutdown is false. That
* needs to be prevented by the calling code if necessary.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending and deactivated
* * %-1 - The timer callback function is running on a different CPU
*/
static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)
{
struct timer_base *base;
unsigned long flags;
int ret = -1;
debug_assert_init(timer);
base = lock_timer_base(timer, &flags);
if (base->running_timer != timer) {
ret = detach_if_pending(timer, base, true);
if (shutdown)
timer->function = NULL;
}
raw_spin_unlock_irqrestore(&base->lock, flags);
return ret;
}
/**
* timer_delete_sync_try - Try to deactivate a timer
* @timer: Timer to deactivate
*
* This function tries to deactivate a timer. On success the timer is not
* queued and the timer callback function is not running on any CPU.
*
* This function does not guarantee that the timer cannot be rearmed right
* after dropping the base lock. That needs to be prevented by the calling
* code if necessary.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending and deactivated
* * %-1 - The timer callback function is running on a different CPU
*/
int timer_delete_sync_try(struct timer_list *timer)
{
return __try_to_del_timer_sync(timer, false);
}
EXPORT_SYMBOL(timer_delete_sync_try);
#ifdef CONFIG_PREEMPT_RT
static __init void timer_base_init_expiry_lock(struct timer_base *base)
{
spin_lock_init(&base->expiry_lock);
}
static inline void timer_base_lock_expiry(struct timer_base *base)
{
spin_lock(&base->expiry_lock);
}
static inline void timer_base_unlock_expiry(struct timer_base *base)
{
spin_unlock(&base->expiry_lock);
}
/*
* The counterpart to del_timer_wait_running().
*
* If there is a waiter for base->expiry_lock, then it was waiting for the
* timer callback to finish. Drop expiry_lock and reacquire it. That allows
* the waiter to acquire the lock and make progress.
*/
static void timer_sync_wait_running(struct timer_base *base)
__releases(&base->lock) __releases(&base->expiry_lock)
__acquires(&base->expiry_lock) __acquires(&base->lock)
{
if (atomic_read(&base->timer_waiters)) {
raw_spin_unlock_irq(&base->lock);
spin_unlock(&base->expiry_lock);
spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
}
}
/*
* This function is called on PREEMPT_RT kernels when the fast path
* deletion of a timer failed because the timer callback function was
* running.
*
* This prevents priority inversion, if the softirq thread on a remote CPU
* got preempted, and it prevents a life lock when the task which tries to
* delete a timer preempted the softirq thread running the timer callback
* function.
*/
static void del_timer_wait_running(struct timer_list *timer)
{
u32 tf;
tf = READ_ONCE(timer->flags);
if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
struct timer_base *base = get_timer_base(tf);
/*
* Mark the base as contended and grab the expiry lock,
* which is held by the softirq across the timer
* callback. Drop the lock immediately so the softirq can
* expire the next timer. In theory the timer could already
* be running again, but that's more than unlikely and just
* causes another wait loop.
*/
atomic_inc(&base->timer_waiters);
spin_lock_bh(&base->expiry_lock);
atomic_dec(&base->timer_waiters);
spin_unlock_bh(&base->expiry_lock);
}
}
#else
static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
static inline void timer_base_lock_expiry(struct timer_base *base) { }
static inline void timer_base_unlock_expiry(struct timer_base *base) { }
static inline void timer_sync_wait_running(struct timer_base *base) { }
static inline void del_timer_wait_running(struct timer_list *timer) { }
#endif
/**
* __timer_delete_sync - Internal function: Deactivate a timer and wait
* for the handler to finish.
* @timer: The timer to be deactivated
* @shutdown: If true, @timer->function will be set to NULL under the
* timer base lock which prevents rearming of @timer
*
* If @shutdown is not set the timer can be rearmed later. If the timer can
* be rearmed concurrently, i.e. after dropping the base lock then the
* return value is meaningless.
*
* If @shutdown is set then @timer->function is set to NULL under timer
* base lock which prevents rearming of the timer. Any attempt to rearm
* a shutdown timer is silently ignored.
*
* If the timer should be reused after shutdown it has to be initialized
* again.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending and deactivated
*/
static int __timer_delete_sync(struct timer_list *timer, bool shutdown)
{
int ret;
#ifdef CONFIG_LOCKDEP
unsigned long flags;
/*
* If lockdep gives a backtrace here, please reference
* the synchronization rules above.
*/
local_irq_save(flags);
lock_map_acquire(&timer->lockdep_map);
lock_map_release(&timer->lockdep_map);
local_irq_restore(flags);
#endif
/*
* don't use it in hardirq context, because it
* could lead to deadlock.
*/
WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE));
/*
* Must be able to sleep on PREEMPT_RT because of the slowpath in
* del_timer_wait_running().
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
lockdep_assert_preemption_enabled();
do {
ret = __try_to_del_timer_sync(timer, shutdown);
if (unlikely(ret < 0)) {
del_timer_wait_running(timer);
cpu_relax();
}
} while (ret < 0);
return ret;
}
/**
* timer_delete_sync - Deactivate a timer and wait for the handler to finish.
* @timer: The timer to be deactivated
*
* Synchronization rules: Callers must prevent restarting of the timer,
* otherwise this function is meaningless. It must not be called from
* interrupt contexts unless the timer is an irqsafe one. The caller must
* not hold locks which would prevent completion of the timer's callback
* function. The timer's handler must not call add_timer_on(). Upon exit
* the timer is not queued and the handler is not running on any CPU.
*
* For !irqsafe timers, the caller must not hold locks that are held in
* interrupt context. Even if the lock has nothing to do with the timer in
* question. Here's why::
*
* CPU0 CPU1
* ---- ----
* <SOFTIRQ>
* call_timer_fn();
* base->running_timer = mytimer;
* spin_lock_irq(somelock);
* <IRQ>
* spin_lock(somelock);
* timer_delete_sync(mytimer);
* while (base->running_timer == mytimer);
*
* Now timer_delete_sync() will never return and never release somelock.
* The interrupt on the other CPU is waiting to grab somelock but it has
* interrupted the softirq that CPU0 is waiting to finish.
*
* This function cannot guarantee that the timer is not rearmed again by
* some concurrent or preempting code, right after it dropped the base
* lock. If there is the possibility of a concurrent rearm then the return
* value of the function is meaningless.
*
* If such a guarantee is needed, e.g. for teardown situations then use
* timer_shutdown_sync() instead.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending and deactivated
*/
int timer_delete_sync(struct timer_list *timer)
{
return __timer_delete_sync(timer, false);
}
EXPORT_SYMBOL(timer_delete_sync);
/**
* timer_shutdown_sync - Shutdown a timer and prevent rearming
* @timer: The timer to be shutdown
*
* When the function returns it is guaranteed that:
* - @timer is not queued
* - The callback function of @timer is not running
* - @timer cannot be enqueued again. Any attempt to rearm
* @timer is silently ignored.
*
* See timer_delete_sync() for synchronization rules.
*
* This function is useful for final teardown of an infrastructure where
* the timer is subject to a circular dependency problem.
*
* A common pattern for this is a timer and a workqueue where the timer can
* schedule work and work can arm the timer. On shutdown the workqueue must
* be destroyed and the timer must be prevented from rearming. Unless the
* code has conditionals like 'if (mything->in_shutdown)' to prevent that
* there is no way to get this correct with timer_delete_sync().
*
* timer_shutdown_sync() is solving the problem. The correct ordering of
* calls in this case is:
*
* timer_shutdown_sync(&mything->timer);
* workqueue_destroy(&mything->workqueue);
*
* After this 'mything' can be safely freed.
*
* This obviously implies that the timer is not required to be functional
* for the rest of the shutdown operation.
*
* Return:
* * %0 - The timer was not pending
* * %1 - The timer was pending
*/
int timer_shutdown_sync(struct timer_list *timer)
{
return __timer_delete_sync(timer, true);
}
EXPORT_SYMBOL_GPL(timer_shutdown_sync);
static void call_timer_fn(struct timer_list *timer,
void (*fn)(struct timer_list *),
unsigned long baseclk)
{
int count = preempt_count();
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the timer from inside the
* function that is called from it, this we need to take into
* account for lockdep too. To avoid bogus "held lock freed"
* warnings as well as problems when looking into
* timer->lockdep_map, make a copy and use that here.
*/
struct lockdep_map lockdep_map;
lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
#endif
/*
* Couple the lock chain with the lock chain at
* timer_delete_sync() by acquiring the lock_map around the fn()
* call here and in timer_delete_sync().
*/
lock_map_acquire(&lockdep_map);
trace_timer_expire_entry(timer, baseclk);
fn(timer);
trace_timer_expire_exit(timer);
lock_map_release(&lockdep_map);
if (count != preempt_count()) {
WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
fn, count, preempt_count());
/*
* Restore the preempt count. That gives us a decent
* chance to survive and extract information. If the
* callback kept a lock held, bad luck, but not worse
* than the BUG() we had.
*/
preempt_count_set(count);
}
}
static void expire_timers(struct timer_base *base, struct hlist_head *head)
{
/*
* This value is required only for tracing. base->clk was
* incremented directly before expire_timers was called. But expiry
* is related to the old base->clk value.
*/
unsigned long baseclk = base->clk - 1;
while (!hlist_empty(head)) {
struct timer_list *timer;
void (*fn)(struct timer_list *);
timer = hlist_entry(head->first, struct timer_list, entry);
base->running_timer = timer;
detach_timer(timer, true);
fn = timer->function;
if (WARN_ON_ONCE(!fn)) {
/* Should never happen. Emphasis on should! */
base->running_timer = NULL;
continue;
}
if (timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
call_timer_fn(timer, fn, baseclk);
raw_spin_lock(&base->lock);
base->running_timer = NULL;
} else {
raw_spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, baseclk);
raw_spin_lock_irq(&base->lock);
base->running_timer = NULL;
timer_sync_wait_running(base);
}
}
}
static int collect_expired_timers(struct timer_base *base,
struct hlist_head *heads)
{
unsigned long clk = base->clk = base->next_expiry;
struct hlist_head *vec;
int i, levels = 0;
unsigned int idx;
for (i = 0; i < LVL_DEPTH; i++) {
idx = (clk & LVL_MASK) + i * LVL_SIZE;
if (__test_and_clear_bit(idx, base->pending_map)) {
vec = base->vectors + idx;
hlist_move_list(vec, heads++);
levels++;
}
/* Is it time to look at the next level? */
if (clk & LVL_CLK_MASK)
break;
/* Shift clock for the next level granularity */
clk >>= LVL_CLK_SHIFT;
}
return levels;
}
/*
* Find the next pending bucket of a level. Search from level start (@offset)
* + @clk upwards and if nothing there, search from start of the level
* (@offset) up to @offset + clk.
*/
static int next_pending_bucket(struct timer_base *base, unsigned offset,
unsigned clk)
{
unsigned pos, start = offset + clk;
unsigned end = offset + LVL_SIZE;
pos = find_next_bit(base->pending_map, end, start);
if (pos < end)
return pos - start;
pos = find_next_bit(base->pending_map, start, offset);
return pos < start ? pos + LVL_SIZE - start : -1;
}
/*
* Search the first expiring timer in the various clock levels. Caller must
* hold base->lock.
*
* Store next expiry time in base->next_expiry.
*/
static void timer_recalc_next_expiry(struct timer_base *base)
{
unsigned long clk, next, adj;
unsigned lvl, offset = 0;
next = base->clk + TIMER_NEXT_MAX_DELTA;
clk = base->clk;
for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
unsigned long lvl_clk = clk & LVL_CLK_MASK;
if (pos >= 0) {
unsigned long tmp = clk + (unsigned long) pos;
tmp <<= LVL_SHIFT(lvl);
if (time_before(tmp, next))
next = tmp;
/*
* If the next expiration happens before we reach
* the next level, no need to check further.
*/
if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
break;
}
/*
* Clock for the next level. If the current level clock lower
* bits are zero, we look at the next level as is. If not we
* need to advance it by one because that's going to be the
* next expiring bucket in that level. base->clk is the next
* expiring jiffy. So in case of:
*
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
* 0 0 0 0 0 0
*
* we have to look at all levels @index 0. With
*
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
* 0 0 0 0 0 2
*
* LVL0 has the next expiring bucket @index 2. The upper
* levels have the next expiring bucket @index 1.
*
* In case that the propagation wraps the next level the same
* rules apply:
*
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
* 0 0 0 0 F 2
*
* So after looking at LVL0 we get:
*
* LVL5 LVL4 LVL3 LVL2 LVL1
* 0 0 0 1 0
*
* So no propagation from LVL1 to LVL2 because that happened
* with the add already, but then we need to propagate further
* from LVL2 to LVL3.
*
* So the simple check whether the lower bits of the current
* level are 0 or not is sufficient for all cases.
*/
adj = lvl_clk ? 1 : 0;
clk >>= LVL_CLK_SHIFT;
clk += adj;
}
WRITE_ONCE(base->next_expiry, next);
base->next_expiry_recalc = false;
base->timers_pending = !(next == base->clk + TIMER_NEXT_MAX_DELTA);
}
#ifdef CONFIG_NO_HZ_COMMON
/*
* Check, if the next hrtimer event is before the next timer wheel
* event:
*/
static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
{
u64 nextevt = hrtimer_get_next_event();
/*
* If high resolution timers are enabled
* hrtimer_get_next_event() returns KTIME_MAX.
*/
if (expires <= nextevt)
return expires;
/*
* If the next timer is already expired, return the tick base
* time so the tick is fired immediately.
*/
if (nextevt <= basem)
return basem;
/*
* Round up to the next jiffy. High resolution timers are
* off, so the hrtimers are expired in the tick and we need to
* make sure that this tick really expires the timer to avoid
* a ping pong of the nohz stop code.
*
* Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
*/
return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
}
static unsigned long next_timer_interrupt(struct timer_base *base,
unsigned long basej)
{
if (base->next_expiry_recalc)
timer_recalc_next_expiry(base);
/*
* Move next_expiry for the empty base into the future to prevent an
* unnecessary raise of the timer softirq when the next_expiry value
* will be reached even if there is no timer pending.
*
* This update is also required to make timer_base::next_expiry values
* easy comparable to find out which base holds the first pending timer.
*/
if (!base->timers_pending)
WRITE_ONCE(base->next_expiry, basej + TIMER_NEXT_MAX_DELTA);
return base->next_expiry;
}
static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
struct timer_base *base_local,
struct timer_base *base_global,
struct timer_events *tevt)
{
unsigned long nextevt, nextevt_local, nextevt_global;
bool local_first;
nextevt_local = next_timer_interrupt(base_local, basej);
nextevt_global = next_timer_interrupt(base_global, basej);
local_first = time_before_eq(nextevt_local, nextevt_global);
nextevt = local_first ? nextevt_local : nextevt_global;
/*
* If the @nextevt is at max. one tick away, use @nextevt and store
* it in the local expiry value. The next global event is irrelevant in
* this case and can be left as KTIME_MAX.
*/
if (time_before_eq(nextevt, basej + 1)) {
/* If we missed a tick already, force 0 delta */
if (time_before(nextevt, basej))
nextevt = basej;
tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
/*
* This is required for the remote check only but it doesn't
* hurt, when it is done for both call sites:
*
* * The remote callers will only take care of the global timers
* as local timers will be handled by CPU itself. When not
* updating tevt->global with the already missed first global
* timer, it is possible that it will be missed completely.
*
* * The local callers will ignore the tevt->global anyway, when
* nextevt is max. one tick away.
*/
if (!local_first)
tevt->global = tevt->local;
return nextevt;
}
/*
* Update tevt.* values:
*
* If the local queue expires first, then the global event can be
* ignored. If the global queue is empty, nothing to do either.
*/
if (!local_first && base_global->timers_pending)
tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
if (base_local->timers_pending)
tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
return nextevt;
}
# ifdef CONFIG_SMP
/**
* fetch_next_timer_interrupt_remote() - Store next timers into @tevt
* @basej: base time jiffies
* @basem: base time clock monotonic
* @tevt: Pointer to the storage for the expiry values
* @cpu: Remote CPU
*
* Stores the next pending local and global timer expiry values in the
* struct pointed to by @tevt. If a queue is empty the corresponding
* field is set to KTIME_MAX. If local event expires before global
* event, global event is set to KTIME_MAX as well.
*
* Caller needs to make sure timer base locks are held (use
* timer_lock_remote_bases() for this purpose).
*/
void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem,
struct timer_events *tevt,
unsigned int cpu)
{
struct timer_base *base_local, *base_global;
/* Preset local / global events */
tevt->local = tevt->global = KTIME_MAX;
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
lockdep_assert_held(&base_local->lock);
lockdep_assert_held(&base_global->lock);
fetch_next_timer_interrupt(basej, basem, base_local, base_global, tevt);
}
/**
* timer_unlock_remote_bases - unlock timer bases of cpu
* @cpu: Remote CPU
*
* Unlocks the remote timer bases.
*/
void timer_unlock_remote_bases(unsigned int cpu)
__releases(timer_bases[BASE_LOCAL]->lock)
__releases(timer_bases[BASE_GLOBAL]->lock)
{
struct timer_base *base_local, *base_global;
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
raw_spin_unlock(&base_global->lock);
raw_spin_unlock(&base_local->lock);
}
/**
* timer_lock_remote_bases - lock timer bases of cpu
* @cpu: Remote CPU
*
* Locks the remote timer bases.
*/
void timer_lock_remote_bases(unsigned int cpu)
__acquires(timer_bases[BASE_LOCAL]->lock)
__acquires(timer_bases[BASE_GLOBAL]->lock)
{
struct timer_base *base_local, *base_global;
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
lockdep_assert_irqs_disabled();
raw_spin_lock(&base_local->lock);
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
}
/**
* timer_base_is_idle() - Return whether timer base is set idle
*
* Returns value of local timer base is_idle value.
*/
bool timer_base_is_idle(void)
{
return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle);
}
static void __run_timer_base(struct timer_base *base);
/**
* timer_expire_remote() - expire global timers of cpu
* @cpu: Remote CPU
*
* Expire timers of global base of remote CPU.
*/
void timer_expire_remote(unsigned int cpu)
{
struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
__run_timer_base(base);
}
static void timer_use_tmigr(unsigned long basej, u64 basem,
unsigned long *nextevt, bool *tick_stop_path,
bool timer_base_idle, struct timer_events *tevt)
{
u64 next_tmigr;
if (timer_base_idle)
next_tmigr = tmigr_cpu_new_timer(tevt->global);
else if (tick_stop_path)
next_tmigr = tmigr_cpu_deactivate(tevt->global);
else
next_tmigr = tmigr_quick_check(tevt->global);
/*
* If the CPU is the last going idle in timer migration hierarchy, make
* sure the CPU will wake up in time to handle remote timers.
* next_tmigr == KTIME_MAX if other CPUs are still active.
*/
if (next_tmigr < tevt->local) {
u64 tmp;
/* If we missed a tick already, force 0 delta */
if (next_tmigr < basem)
next_tmigr = basem;
tmp = div_u64(next_tmigr - basem, TICK_NSEC);
*nextevt = basej + (unsigned long)tmp;
tevt->local = next_tmigr;
}
}
# else
static void timer_use_tmigr(unsigned long basej, u64 basem,
unsigned long *nextevt, bool *tick_stop_path,
bool timer_base_idle, struct timer_events *tevt)
{
/*
* Make sure first event is written into tevt->local to not miss a
* timer on !SMP systems.
*/
tevt->local = min_t(u64, tevt->local, tevt->global);
}
# endif /* CONFIG_SMP */
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
bool *idle)
{
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
struct timer_base *base_local, *base_global;
unsigned long nextevt;
bool idle_is_possible;
/*
* When the CPU is offline, the tick is cancelled and nothing is supposed
* to try to stop it.
*/
if (WARN_ON_ONCE(cpu_is_offline(smp_processor_id()))) {
if (idle)
*idle = true;
return tevt.local;
}
base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
raw_spin_lock(&base_local->lock);
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
base_global, &tevt);
/*
* If the next event is only one jiffy ahead there is no need to call
* timer migration hierarchy related functions. The value for the next
* global timer in @tevt struct equals then KTIME_MAX. This is also
* true, when the timer base is idle.
*
* The proper timer migration hierarchy function depends on the callsite
* and whether timer base is idle or not. @nextevt will be updated when
* this CPU needs to handle the first timer migration hierarchy
* event. See timer_use_tmigr() for detailed information.
*/
idle_is_possible = time_after(nextevt, basej + 1);
if (idle_is_possible)
timer_use_tmigr(basej, basem, &nextevt, idle,
base_local->is_idle, &tevt);
/*
* We have a fresh next event. Check whether we can forward the
* base.
*/
__forward_timer_base(base_local, basej);
__forward_timer_base(base_global, basej);
/*
* Set base->is_idle only when caller is timer_base_try_to_set_idle()
*/
if (idle) {
/*
* Bases are idle if the next event is more than a tick
* away. Caution: @nextevt could have changed by enqueueing a
* global timer into timer migration hierarchy. Therefore a new
* check is required here.
*
* If the base is marked idle then any timer add operation must
* forward the base clk itself to keep granularity small. This
* idle logic is only maintained for the BASE_LOCAL and
* BASE_GLOBAL base, deferrable timers may still see large
* granularity skew (by design).
*/
if (!base_local->is_idle && time_after(nextevt, basej + 1)) {
base_local->is_idle = true;
/*
* Global timers queued locally while running in a task
* in nohz_full mode need a self-IPI to kick reprogramming
* in IRQ tail.
*/
if (tick_nohz_full_cpu(base_local->cpu))
base_global->is_idle = true;
trace_timer_base_idle(true, base_local->cpu);
}
*idle = base_local->is_idle;
/*
* When timer base is not set idle, undo the effect of
* tmigr_cpu_deactivate() to prevent inconsistent states - active
* timer base but inactive timer migration hierarchy.
*
* When timer base was already marked idle, nothing will be
* changed here.
*/
if (!base_local->is_idle && idle_is_possible)
tmigr_cpu_activate();
}
raw_spin_unlock(&base_global->lock);
raw_spin_unlock(&base_local->lock);
return cmp_next_hrtimer_event(basem, tevt.local);
}
/**
* get_next_timer_interrupt() - return the time (clock mono) of the next timer
* @basej: base time jiffies
* @basem: base time clock monotonic
*
* Returns the tick aligned clock monotonic time of the next pending timer or
* KTIME_MAX if no timer is pending. If timer of global base was queued into
* timer migration hierarchy, first global timer is not taken into account. If
* it was the last CPU of timer migration hierarchy going idle, first global
* event is taken into account.
*/
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
{
return __get_next_timer_interrupt(basej, basem, NULL);
}
/**
* timer_base_try_to_set_idle() - Try to set the idle state of the timer bases
* @basej: base time jiffies
* @basem: base time clock monotonic
* @idle: pointer to store the value of timer_base->is_idle on return;
* *idle contains the information whether tick was already stopped
*
* Returns the tick aligned clock monotonic time of the next pending timer or
* KTIME_MAX if no timer is pending. When tick was already stopped KTIME_MAX is
* returned as well.
*/
u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle)
{
if (*idle)
return KTIME_MAX;
return __get_next_timer_interrupt(basej, basem, idle);
}
/**
* timer_clear_idle - Clear the idle state of the timer base
*
* Called with interrupts disabled
*/
void timer_clear_idle(void)
{
/*
* We do this unlocked. The worst outcome is a remote pinned timer
* enqueue sending a pointless IPI, but taking the lock would just
* make the window for sending the IPI a few instructions smaller
* for the cost of taking the lock in the exit from idle
* path. Required for BASE_LOCAL only.
*/
__this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
if (tick_nohz_full_cpu(smp_processor_id()))
__this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
trace_timer_base_idle(false, smp_processor_id());
/* Activate without holding the timer_base->lock */
tmigr_cpu_activate();
}
#endif
/**
* __run_timers - run all expired timers (if any) on this CPU.
* @base: the timer vector to be processed.
*/
static inline void __run_timers(struct timer_base *base)
{
struct hlist_head heads[LVL_DEPTH];
int levels;
lockdep_assert_held(&base->lock);
if (base->running_timer)
return;
while (time_after_eq(jiffies, base->clk) &&
time_after_eq(jiffies, base->next_expiry)) {
levels = collect_expired_timers(base, heads);
/*
* The two possible reasons for not finding any expired
* timer at this clk are that all matching timers have been
* dequeued or no timer has been queued since
* base::next_expiry was set to base::clk +
* TIMER_NEXT_MAX_DELTA.
*/
WARN_ON_ONCE(!levels && !base->next_expiry_recalc
&& base->timers_pending);
/*
* While executing timers, base->clk is set 1 offset ahead of
* jiffies to avoid endless requeuing to current jiffies.
*/
base->clk++;
timer_recalc_next_expiry(base);
while (levels--)
expire_timers(base, heads + levels);
}
}
static void __run_timer_base(struct timer_base *base)
{
/* Can race against a remote CPU updating next_expiry under the lock */
if (time_before(jiffies, READ_ONCE(base->next_expiry)))
return;
timer_base_lock_expiry(base);
raw_spin_lock_irq(&base->lock);
__run_timers(base);
raw_spin_unlock_irq(&base->lock);
timer_base_unlock_expiry(base);
}
static void run_timer_base(int index)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[index]);
__run_timer_base(base);
}
/*
* This function runs timers and the timer-tq in bottom half context.
*/
static __latent_entropy void run_timer_softirq(void)
{
run_timer_base(BASE_LOCAL);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
run_timer_base(BASE_GLOBAL);
run_timer_base(BASE_DEF);
if (is_timers_nohz_active())
tmigr_handle_remote();
}
}
/*
* Called by the local, per-CPU timer interrupt on SMP.
*/
static void run_local_timers(void)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
hrtimer_run_queues();
for (int i = 0; i < NR_BASES; i++, base++) {
/*
* Raise the softirq only if required.
*
* timer_base::next_expiry can be written by a remote CPU while
* holding the lock. If this write happens at the same time than
* the lockless local read, sanity checker could complain about
* data corruption.
*
* There are two possible situations where
* timer_base::next_expiry is written by a remote CPU:
*
* 1. Remote CPU expires global timers of this CPU and updates
* timer_base::next_expiry of BASE_GLOBAL afterwards in
* next_timer_interrupt() or timer_recalc_next_expiry(). The
* worst outcome is a superfluous raise of the timer softirq
* when the not yet updated value is read.
*
* 2. A new first pinned timer is enqueued by a remote CPU
* and therefore timer_base::next_expiry of BASE_LOCAL is
* updated. When this update is missed, this isn't a
* problem, as an IPI is executed nevertheless when the CPU
* was idle before. When the CPU wasn't idle but the update
* is missed, then the timer would expire one jiffy late -
* bad luck.
*
* Those unlikely corner cases where the worst outcome is only a
* one jiffy delay or a superfluous raise of the softirq are
* not that expensive as doing the check always while holding
* the lock.
*
* Possible remote writers are using WRITE_ONCE(). Local reader
* uses therefore READ_ONCE().
*/
if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) ||
(i == BASE_DEF && tmigr_requires_handle_remote())) {
raise_timer_softirq(TIMER_SOFTIRQ);
return;
}
}
}
/*
* Called from the timer interrupt handler to charge one tick to the current
* process. user_tick is 1 if the tick is user time, 0 for system.
*/
void update_process_times(int user_tick)
{
struct task_struct *p = current;
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
run_local_timers();
rcu_sched_clock_irq(user_tick);
#ifdef CONFIG_IRQ_WORK
if (in_irq())
irq_work_tick();
#endif
sched_tick();
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
run_posix_cpu_timers();
}
#ifdef CONFIG_HOTPLUG_CPU
static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
{
struct timer_list *timer;
int cpu = new_base->cpu;
while (!hlist_empty(head)) {
timer = hlist_entry(head->first, struct timer_list, entry);
detach_timer(timer, false);
timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
internal_add_timer(new_base, timer);
}
}
int timers_prepare_cpu(unsigned int cpu)
{
struct timer_base *base;
int b;
for (b = 0; b < NR_BASES; b++) {
base = per_cpu_ptr(&timer_bases[b], cpu);
base->clk = jiffies;
base->next_expiry = base->clk + TIMER_NEXT_MAX_DELTA;
base->next_expiry_recalc = false;
base->timers_pending = false;
base->is_idle = false;
}
return 0;
}
int timers_dead_cpu(unsigned int cpu)
{
struct timer_base *old_base;
struct timer_base *new_base;
int b, i;
for (b = 0; b < NR_BASES; b++) {
old_base = per_cpu_ptr(&timer_bases[b], cpu);
new_base = get_cpu_ptr(&timer_bases[b]);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
raw_spin_lock_irq(&new_base->lock);
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
/*
* The current CPUs base clock might be stale. Update it
* before moving the timers over.
*/
forward_timer_base(new_base);
WARN_ON_ONCE(old_base->running_timer);
old_base->running_timer = NULL;
for (i = 0; i < WHEEL_SIZE; i++)
migrate_timer_list(new_base, old_base->vectors + i);
raw_spin_unlock(&old_base->lock);
raw_spin_unlock_irq(&new_base->lock);
put_cpu_ptr(&timer_bases);
}
return 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
static void __init init_timer_cpu(int cpu)
{
struct timer_base *base;
int i;
for (i = 0; i < NR_BASES; i++) {
base = per_cpu_ptr(&timer_bases[i], cpu);
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
base->next_expiry = base->clk + TIMER_NEXT_MAX_DELTA;
timer_base_init_expiry_lock(base);
}
}
static void __init init_timer_cpus(void)
{
int cpu;
for_each_possible_cpu(cpu)
init_timer_cpu(cpu);
}
void __init timers_init(void)
{
init_timer_cpus();
posix_cputimers_init_work();
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/core/dev_addr_lists.c - Functions for handling net device lists
* Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
*
* This file contains functions for working with unicast, multicast and device
* addresses lists.
*/
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/export.h>
#include <linux/list.h>
#include "dev.h"
/*
* General list handling functions
*/
static int __hw_addr_insert(struct netdev_hw_addr_list *list,
struct netdev_hw_addr *new, int addr_len)
{
struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
struct netdev_hw_addr *ha;
while (*ins_point) {
int diff;
ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
diff = memcmp(new->addr, ha->addr, addr_len);
if (diff == 0)
diff = memcmp(&new->type, &ha->type, sizeof(new->type));
parent = *ins_point;
if (diff < 0)
ins_point = &parent->rb_left;
else if (diff > 0)
ins_point = &parent->rb_right;
else
return -EEXIST;
}
rb_link_node_rcu(&new->node, parent, ins_point);
rb_insert_color(&new->node, &list->tree);
return 0;
}
static struct netdev_hw_addr*
__hw_addr_create(const unsigned char *addr, int addr_len,
unsigned char addr_type, bool global, bool sync)
{
struct netdev_hw_addr *ha;
int alloc_size;
alloc_size = sizeof(*ha);
if (alloc_size < L1_CACHE_BYTES)
alloc_size = L1_CACHE_BYTES;
ha = kmalloc(alloc_size, GFP_ATOMIC);
if (!ha)
return NULL;
memcpy(ha->addr, addr, addr_len);
ha->type = addr_type;
ha->refcount = 1;
ha->global_use = global;
ha->synced = sync ? 1 : 0;
ha->sync_cnt = 0;
return ha;
}
static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type, bool global, bool sync,
int sync_count, bool exclusive)
{
struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
struct netdev_hw_addr *ha;
if (addr_len > MAX_ADDR_LEN)
return -EINVAL;
while (*ins_point) {
int diff;
ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
diff = memcmp(addr, ha->addr, addr_len);
if (diff == 0)
diff = memcmp(&addr_type, &ha->type, sizeof(addr_type)); parent = *ins_point; if (diff < 0) {
ins_point = &parent->rb_left;
} else if (diff > 0) {
ins_point = &parent->rb_right;
} else {
if (exclusive)
return -EEXIST;
if (global) {
/* check if addr is already used as global */
if (ha->global_use) return 0;
else
ha->global_use = true;
}
if (sync) { if (ha->synced && sync_count)
return -EEXIST;
else
ha->synced++;
}
ha->refcount++;
return 0;
}
}
ha = __hw_addr_create(addr, addr_len, addr_type, global, sync);
if (!ha)
return -ENOMEM;
rb_link_node(&ha->node, parent, ins_point);
rb_insert_color(&ha->node, &list->tree);
list_add_tail_rcu(&ha->list, &list->list);
list->count++;
return 0;}
static int __hw_addr_add(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type)
{
return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
0, false);
}
static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
struct netdev_hw_addr *ha, bool global,
bool sync)
{
if (global && !ha->global_use)
return -ENOENT;
if (sync && !ha->synced)
return -ENOENT;
if (global)
ha->global_use = false;
if (sync)
ha->synced--;
if (--ha->refcount)
return 0;
rb_erase(&ha->node, &list->tree);
list_del_rcu(&ha->list);
kfree_rcu(ha, rcu_head);
list->count--;
return 0;
}
static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type)
{
struct rb_node *node;
node = list->tree.rb_node;
while (node) {
struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node);
int diff = memcmp(addr, ha->addr, addr_len);
if (diff == 0 && addr_type)
diff = memcmp(&addr_type, &ha->type, sizeof(addr_type));
if (diff < 0)
node = node->rb_left;
else if (diff > 0)
node = node->rb_right;
else
return ha;
}
return NULL;
}
static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type, bool global, bool sync)
{
struct netdev_hw_addr *ha = __hw_addr_lookup(list, addr, addr_len, addr_type);
if (!ha)
return -ENOENT;
return __hw_addr_del_entry(list, ha, global, sync);
}
static int __hw_addr_del(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type)
{
return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
}
static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr *ha,
int addr_len)
{
int err;
err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
false, true, ha->sync_cnt, false);
if (err && err != -EEXIST)
return err;
if (!err) {
ha->sync_cnt++;
ha->refcount++;
}
return 0;
}
static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
struct netdev_hw_addr *ha,
int addr_len)
{
int err;
err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
false, true);
if (err)
return;
ha->sync_cnt--;
/* address on from list is not marked synced */
__hw_addr_del_entry(from_list, ha, false, false);
}
int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len)
{
int err = 0;
struct netdev_hw_addr *ha, *tmp;
list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
if (ha->sync_cnt == ha->refcount) {
__hw_addr_unsync_one(to_list, from_list, ha, addr_len);
} else {
err = __hw_addr_sync_one(to_list, ha, addr_len);
if (err)
break;
}
}
return err;
}
EXPORT_SYMBOL(__hw_addr_sync_multiple);
/* This function only works where there is a strict 1-1 relationship
* between source and destination of they synch. If you ever need to
* sync addresses to more then 1 destination, you need to use
* __hw_addr_sync_multiple().
*/
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len)
{
int err = 0;
struct netdev_hw_addr *ha, *tmp;
list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
if (!ha->sync_cnt) {
err = __hw_addr_sync_one(to_list, ha, addr_len);
if (err)
break;
} else if (ha->refcount == 1)
__hw_addr_unsync_one(to_list, from_list, ha, addr_len);
}
return err;
}
EXPORT_SYMBOL(__hw_addr_sync);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len)
{
struct netdev_hw_addr *ha, *tmp;
list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
if (ha->sync_cnt)
__hw_addr_unsync_one(to_list, from_list, ha, addr_len);
}
}
EXPORT_SYMBOL(__hw_addr_unsync);
/**
* __hw_addr_sync_dev - Synchronize device's multicast list
* @list: address list to synchronize
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
*
* This function is intended to be called from the ndo_set_rx_mode
* function of devices that require explicit address add/remove
* notifications. The unsync function may be NULL in which case
* the addresses requiring removal will simply be removed without
* any notification to the device.
**/
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *))
{
struct netdev_hw_addr *ha, *tmp;
int err;
/* first go through and flush out any stale entries */
list_for_each_entry_safe(ha, tmp, &list->list, list) {
if (!ha->sync_cnt || ha->refcount != 1)
continue;
/* if unsync is defined and fails defer unsyncing address */
if (unsync && unsync(dev, ha->addr))
continue;
ha->sync_cnt--;
__hw_addr_del_entry(list, ha, false, false);
}
/* go through and sync new entries to the list */
list_for_each_entry_safe(ha, tmp, &list->list, list) {
if (ha->sync_cnt)
continue;
err = sync(dev, ha->addr);
if (err)
return err;
ha->sync_cnt++;
ha->refcount++;
}
return 0;
}
EXPORT_SYMBOL(__hw_addr_sync_dev);
/**
* __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking
* into account references
* @list: address list to synchronize
* @dev: device to sync
* @sync: function to call if address or reference on it should be added
* @unsync: function to call if address or some reference on it should removed
*
* This function is intended to be called from the ndo_set_rx_mode
* function of devices that require explicit address or references on it
* add/remove notifications. The unsync function may be NULL in which case
* the addresses or references on it requiring removal will simply be
* removed without any notification to the device. That is responsibility of
* the driver to identify and distribute address or references on it between
* internal address tables.
**/
int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *, int),
int (*unsync)(struct net_device *,
const unsigned char *, int))
{
struct netdev_hw_addr *ha, *tmp;
int err, ref_cnt;
/* first go through and flush out any unsynced/stale entries */
list_for_each_entry_safe(ha, tmp, &list->list, list) {
/* sync if address is not used */
if ((ha->sync_cnt << 1) <= ha->refcount)
continue;
/* if fails defer unsyncing address */
ref_cnt = ha->refcount - ha->sync_cnt;
if (unsync && unsync(dev, ha->addr, ref_cnt))
continue;
ha->refcount = (ref_cnt << 1) + 1;
ha->sync_cnt = ref_cnt;
__hw_addr_del_entry(list, ha, false, false);
}
/* go through and sync updated/new entries to the list */
list_for_each_entry_safe(ha, tmp, &list->list, list) {
/* sync if address added or reused */
if ((ha->sync_cnt << 1) >= ha->refcount)
continue;
ref_cnt = ha->refcount - ha->sync_cnt;
err = sync(dev, ha->addr, ref_cnt);
if (err)
return err;
ha->refcount = ref_cnt << 1;
ha->sync_cnt = ref_cnt;
}
return 0;
}
EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
/**
* __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on
* it from device
* @list: address list to remove synchronized addresses (references on it) from
* @dev: device to sync
* @unsync: function to call if address and references on it should be removed
*
* Remove all addresses that were added to the device by
* __hw_addr_ref_sync_dev(). This function is intended to be called from the
* ndo_stop or ndo_open functions on devices that require explicit address (or
* references on it) add/remove notifications. If the unsync function pointer
* is NULL then this function can be used to just reset the sync_cnt for the
* addresses in the list.
**/
void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *, int))
{
struct netdev_hw_addr *ha, *tmp;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
if (!ha->sync_cnt)
continue;
/* if fails defer unsyncing address */
if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
continue;
ha->refcount -= ha->sync_cnt - 1;
ha->sync_cnt = 0;
__hw_addr_del_entry(list, ha, false, false);
}
}
EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
/**
* __hw_addr_unsync_dev - Remove synchronized addresses from device
* @list: address list to remove synchronized addresses from
* @dev: device to sync
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by __hw_addr_sync_dev().
* This function is intended to be called from the ndo_stop or ndo_open
* functions on devices that require explicit address add/remove
* notifications. If the unsync function pointer is NULL then this function
* can be used to just reset the sync_cnt for the addresses in the list.
**/
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
{
struct netdev_hw_addr *ha, *tmp;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
if (!ha->sync_cnt)
continue;
/* if unsync is defined and fails defer unsyncing address */
if (unsync && unsync(dev, ha->addr))
continue;
ha->sync_cnt--;
__hw_addr_del_entry(list, ha, false, false);
}
}
EXPORT_SYMBOL(__hw_addr_unsync_dev);
static void __hw_addr_flush(struct netdev_hw_addr_list *list)
{
struct netdev_hw_addr *ha, *tmp;
list->tree = RB_ROOT;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
list_del_rcu(&ha->list);
kfree_rcu(ha, rcu_head);
}
list->count = 0;
}
void __hw_addr_init(struct netdev_hw_addr_list *list)
{
INIT_LIST_HEAD(&list->list);
list->count = 0;
list->tree = RB_ROOT;
}
EXPORT_SYMBOL(__hw_addr_init);
/*
* Device addresses handling functions
*/
/* Check that netdev->dev_addr is not written to directly as this would
* break the rbtree layout. All changes should go thru dev_addr_set() and co.
* Remove this check in mid-2024.
*/
void dev_addr_check(struct net_device *dev)
{
if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN))
return;
netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr);
netdev_warn(dev, "Expected addr: %*ph\n",
MAX_ADDR_LEN, dev->dev_addr_shadow);
netdev_WARN(dev, "Incorrect netdev->dev_addr\n");
}
/**
* dev_addr_flush - Flush device address list
* @dev: device
*
* Flush device address list and reset ->dev_addr.
*
* The caller must hold the rtnl_mutex.
*/
void dev_addr_flush(struct net_device *dev)
{
/* rtnl_mutex must be held here */
dev_addr_check(dev);
__hw_addr_flush(&dev->dev_addrs);
dev->dev_addr = NULL;
}
/**
* dev_addr_init - Init device address list
* @dev: device
*
* Init device address list and create the first element,
* used by ->dev_addr.
*
* The caller must hold the rtnl_mutex.
*/
int dev_addr_init(struct net_device *dev)
{
unsigned char addr[MAX_ADDR_LEN];
struct netdev_hw_addr *ha;
int err;
/* rtnl_mutex must be held here */
__hw_addr_init(&dev->dev_addrs);
memset(addr, 0, sizeof(addr));
err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
NETDEV_HW_ADDR_T_LAN);
if (!err) {
/*
* Get the first (previously created) address from the list
* and set dev_addr pointer to this location.
*/
ha = list_first_entry(&dev->dev_addrs.list,
struct netdev_hw_addr, list);
dev->dev_addr = ha->addr;
}
return err;
}
void dev_addr_mod(struct net_device *dev, unsigned int offset,
const void *addr, size_t len)
{
struct netdev_hw_addr *ha;
dev_addr_check(dev);
ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]);
rb_erase(&ha->node, &dev->dev_addrs.tree);
memcpy(&ha->addr[offset], addr, len);
memcpy(&dev->dev_addr_shadow[offset], addr, len);
WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len));
}
EXPORT_SYMBOL(dev_addr_mod);
/**
* dev_addr_add - Add a device address
* @dev: device
* @addr: address to add
* @addr_type: address type
*
* Add a device address to the device or increase the reference count if
* it already exists.
*
* The caller must hold the rtnl_mutex.
*/
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type)
{
int err;
ASSERT_RTNL();
err = netif_pre_changeaddr_notify(dev, addr, NULL);
if (err)
return err;
err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return err;
}
EXPORT_SYMBOL(dev_addr_add);
/**
* dev_addr_del - Release a device address.
* @dev: device
* @addr: address to delete
* @addr_type: address type
*
* Release reference to a device address and remove it from the device
* if the reference count drops to zero.
*
* The caller must hold the rtnl_mutex.
*/
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type)
{
int err;
struct netdev_hw_addr *ha;
ASSERT_RTNL();
/*
* We can not remove the first address from the list because
* dev->dev_addr points to that.
*/
ha = list_first_entry(&dev->dev_addrs.list,
struct netdev_hw_addr, list);
if (!memcmp(ha->addr, addr, dev->addr_len) &&
ha->type == addr_type && ha->refcount == 1)
return -ENOENT;
err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
addr_type);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return err;
}
EXPORT_SYMBOL(dev_addr_del);
/*
* Unicast list handling functions
*/
/**
* dev_uc_add_excl - Add a global secondary unicast address
* @dev: device
* @addr: address to add
*/
int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
{
int err;
netif_addr_lock_bh(dev);
err = __hw_addr_add_ex(&dev->uc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_UNICAST, true, false,
0, true);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_uc_add_excl);
/**
* dev_uc_add - Add a secondary unicast address
* @dev: device
* @addr: address to add
*
* Add a secondary unicast address to the device or increase
* the reference count if it already exists.
*/
int dev_uc_add(struct net_device *dev, const unsigned char *addr)
{
int err;
netif_addr_lock_bh(dev);
err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_UNICAST);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_uc_add);
/**
* dev_uc_del - Release secondary unicast address.
* @dev: device
* @addr: address to delete
*
* Release reference to a secondary unicast address and remove it
* from the device if the reference count drops to zero.
*/
int dev_uc_del(struct net_device *dev, const unsigned char *addr)
{
int err;
netif_addr_lock_bh(dev);
err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_UNICAST);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_uc_del);
/**
* dev_uc_sync - Synchronize device's unicast list to another device
* @to: destination device
* @from: source device
*
* Add newly added addresses to the destination device and release
* addresses that have no users left. The source device must be
* locked by netif_addr_lock_bh.
*
* This function is intended to be called from the dev->set_rx_mode
* function of layered software devices. This function assumes that
* addresses will only ever be synced to the @to devices and no other.
*/
int dev_uc_sync(struct net_device *to, struct net_device *from)
{
int err = 0;
if (to->addr_len != from->addr_len)
return -EINVAL;
netif_addr_lock(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_uc_sync);
/**
* dev_uc_sync_multiple - Synchronize device's unicast list to another
* device, but allow for multiple calls to sync to multiple devices.
* @to: destination device
* @from: source device
*
* Add newly added addresses to the destination device and release
* addresses that have been deleted from the source. The source device
* must be locked by netif_addr_lock_bh.
*
* This function is intended to be called from the dev->set_rx_mode
* function of layered software devices. It allows for a single source
* device to be synced to multiple destination devices.
*/
int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
{
int err = 0;
if (to->addr_len != from->addr_len)
return -EINVAL;
netif_addr_lock(to);
err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_uc_sync_multiple);
/**
* dev_uc_unsync - Remove synchronized addresses from the destination device
* @to: destination device
* @from: source device
*
* Remove all addresses that were added to the destination device by
* dev_uc_sync(). This function is intended to be called from the
* dev->stop function of layered software devices.
*/
void dev_uc_unsync(struct net_device *to, struct net_device *from)
{
if (to->addr_len != from->addr_len)
return;
/* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two
* reasons:
* 1) This is always called without any addr_list_lock, so as the
* outermost one here, it must be 0.
* 2) This is called by some callers after unlinking the upper device,
* so the dev->lower_level becomes 1 again.
* Therefore, the subclass for 'from' is 0, for 'to' is either 1 or
* larger.
*/
netif_addr_lock_bh(from);
netif_addr_lock(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
netif_addr_unlock_bh(from);
}
EXPORT_SYMBOL(dev_uc_unsync);
/**
* dev_uc_flush - Flush unicast addresses
* @dev: device
*
* Flush unicast addresses.
*/
void dev_uc_flush(struct net_device *dev)
{
netif_addr_lock_bh(dev);
__hw_addr_flush(&dev->uc);
netif_addr_unlock_bh(dev);
}
EXPORT_SYMBOL(dev_uc_flush);
/**
* dev_uc_init - Init unicast address list
* @dev: device
*
* Init unicast address list.
*/
void dev_uc_init(struct net_device *dev)
{
__hw_addr_init(&dev->uc);
}
EXPORT_SYMBOL(dev_uc_init);
/*
* Multicast list handling functions
*/
/**
* dev_mc_add_excl - Add a global secondary multicast address
* @dev: device
* @addr: address to add
*/
int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
{
int err;
netif_addr_lock_bh(dev);
err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_MULTICAST, true, false,
0, true);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_mc_add_excl);
static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
bool global)
{
int err;
netif_addr_lock_bh(dev);
err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_MULTICAST, global, false,
0, false);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
/**
* dev_mc_add - Add a multicast address
* @dev: device
* @addr: address to add
*
* Add a multicast address to the device or increase
* the reference count if it already exists.
*/
int dev_mc_add(struct net_device *dev, const unsigned char *addr)
{
return __dev_mc_add(dev, addr, false);
}
EXPORT_SYMBOL(dev_mc_add);
/**
* dev_mc_add_global - Add a global multicast address
* @dev: device
* @addr: address to add
*
* Add a global multicast address to the device.
*/
int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
{
return __dev_mc_add(dev, addr, true);
}
EXPORT_SYMBOL(dev_mc_add_global);
static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
bool global)
{
int err;
netif_addr_lock_bh(dev);
err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_MULTICAST, global, false);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
/**
* dev_mc_del - Delete a multicast address.
* @dev: device
* @addr: address to delete
*
* Release reference to a multicast address and remove it
* from the device if the reference count drops to zero.
*/
int dev_mc_del(struct net_device *dev, const unsigned char *addr)
{
return __dev_mc_del(dev, addr, false);
}
EXPORT_SYMBOL(dev_mc_del);
/**
* dev_mc_del_global - Delete a global multicast address.
* @dev: device
* @addr: address to delete
*
* Release reference to a multicast address and remove it
* from the device if the reference count drops to zero.
*/
int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
{
return __dev_mc_del(dev, addr, true);
}
EXPORT_SYMBOL(dev_mc_del_global);
/**
* dev_mc_sync - Synchronize device's multicast list to another device
* @to: destination device
* @from: source device
*
* Add newly added addresses to the destination device and release
* addresses that have no users left. The source device must be
* locked by netif_addr_lock_bh.
*
* This function is intended to be called from the ndo_set_rx_mode
* function of layered software devices.
*/
int dev_mc_sync(struct net_device *to, struct net_device *from)
{
int err = 0;
if (to->addr_len != from->addr_len)
return -EINVAL;
netif_addr_lock(to);
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_mc_sync);
/**
* dev_mc_sync_multiple - Synchronize device's multicast list to another
* device, but allow for multiple calls to sync to multiple devices.
* @to: destination device
* @from: source device
*
* Add newly added addresses to the destination device and release
* addresses that have no users left. The source device must be
* locked by netif_addr_lock_bh.
*
* This function is intended to be called from the ndo_set_rx_mode
* function of layered software devices. It allows for a single
* source device to be synced to multiple destination devices.
*/
int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
{
int err = 0;
if (to->addr_len != from->addr_len)
return -EINVAL;
netif_addr_lock(to);
err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_mc_sync_multiple);
/**
* dev_mc_unsync - Remove synchronized addresses from the destination device
* @to: destination device
* @from: source device
*
* Remove all addresses that were added to the destination device by
* dev_mc_sync(). This function is intended to be called from the
* dev->stop function of layered software devices.
*/
void dev_mc_unsync(struct net_device *to, struct net_device *from)
{
if (to->addr_len != from->addr_len)
return;
/* See the above comments inside dev_uc_unsync(). */
netif_addr_lock_bh(from);
netif_addr_lock(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
netif_addr_unlock_bh(from);
}
EXPORT_SYMBOL(dev_mc_unsync);
/**
* dev_mc_flush - Flush multicast addresses
* @dev: device
*
* Flush multicast addresses.
*/
void dev_mc_flush(struct net_device *dev)
{
netif_addr_lock_bh(dev);
__hw_addr_flush(&dev->mc);
netif_addr_unlock_bh(dev);
}
EXPORT_SYMBOL(dev_mc_flush);
/**
* dev_mc_init - Init multicast address list
* @dev: device
*
* Init multicast address list.
*/
void dev_mc_init(struct net_device *dev)
{
__hw_addr_init(&dev->mc);
}
EXPORT_SYMBOL(dev_mc_init);
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Routines having to do with the 'struct sk_buff' memory handlers.
*
* Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
* Fixes:
* Alan Cox : Fixed the worst of the load
* balancer bugs.
* Dave Platt : Interrupt stacking fix.
* Richard Kooijman : Timestamp fixes.
* Alan Cox : Changed buffer format.
* Alan Cox : destructor hook for AF_UNIX etc.
* Linus Torvalds : Better skb_clone.
* Alan Cox : Added skb_copy.
* Alan Cox : Added all the changed routines Linus
* only put in the headers
* Ray VanTassle : Fixed --skb->lock in free
* Alan Cox : skb_copy copy arp field
* Andi Kleen : slabified it.
* Robert Olsson : Removed skb_head_pool
*
* NOTE:
* The __skb_ routines should be called with interrupts
* disabled, or you better be *real* sure that the operation is atomic
* with respect to whatever list is being frobbed (e.g. via lock_sock()
* or via disabling bottom half handlers, etc).
*/
/*
* The functions in this file will not compile correctly with gcc 2.4.x
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/skbuff_ref.h>
#include <linux/splice.h>
#include <linux/cache.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <linux/bitfield.h>
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/kcov.h>
#include <linux/iov_iter.h>
#include <linux/crc32.h>
#include <net/protocol.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <net/gro.h>
#include <net/gso.h>
#include <net/hotdata.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
#include <net/mptcp.h>
#include <net/mctp.h>
#include <net/page_pool/helpers.h>
#include <net/psp/types.h>
#include <net/dropreason.h>
#include <linux/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
#include <linux/capability.h>
#include <linux/user_namespace.h>
#include <linux/indirect_call_wrapper.h>
#include <linux/textsearch.h>
#include "dev.h"
#include "devmem.h"
#include "netmem_priv.h"
#include "sock_destructor.h"
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
#define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN)
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \
GRO_MAX_HEAD_PAD))
/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
* This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
* size, and we can differentiate heads from skb_small_head_cache
* vs system slabs by looking at their size (skb_end_offset()).
*/
#define SKB_SMALL_HEAD_CACHE_SIZE \
(is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \
(SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \
SKB_SMALL_HEAD_SIZE)
#define SKB_SMALL_HEAD_HEADROOM \
SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
/* kcm_write_msgs() relies on casting paged frags to bio_vec to use
* iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
* netmem is a page.
*/
static_assert(offsetof(struct bio_vec, bv_page) ==
offsetof(skb_frag_t, netmem));
static_assert(sizeof_field(struct bio_vec, bv_page) ==
sizeof_field(skb_frag_t, netmem));
static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len));
static_assert(sizeof_field(struct bio_vec, bv_len) ==
sizeof_field(skb_frag_t, len));
static_assert(offsetof(struct bio_vec, bv_offset) ==
offsetof(skb_frag_t, offset));
static_assert(sizeof_field(struct bio_vec, bv_offset) ==
sizeof_field(skb_frag_t, offset));
#undef FN
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
static const char * const drop_reasons[] = {
[SKB_CONSUMED] = "CONSUMED",
DEFINE_DROP_REASON(FN, FN)
};
static const struct drop_reason_list drop_reasons_core = {
.reasons = drop_reasons,
.n_reasons = ARRAY_SIZE(drop_reasons),
};
const struct drop_reason_list __rcu *
drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = {
[SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core),
};
EXPORT_SYMBOL(drop_reasons_by_subsys);
/**
* drop_reasons_register_subsys - register another drop reason subsystem
* @subsys: the subsystem to register, must not be the core
* @list: the list of drop reasons within the subsystem, must point to
* a statically initialized list
*/
void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys,
const struct drop_reason_list *list)
{
if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
"invalid subsystem %d\n", subsys))
return;
/* must point to statically allocated memory, so INIT is OK */
RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list);
}
EXPORT_SYMBOL_GPL(drop_reasons_register_subsys);
/**
* drop_reasons_unregister_subsys - unregister a drop reason subsystem
* @subsys: the subsystem to remove, must not be the core
*
* Note: This will synchronize_rcu() to ensure no users when it returns.
*/
void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys)
{
if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
"invalid subsystem %d\n", subsys))
return;
RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys);
/**
* skb_panic - private function for out-of-line support
* @skb: buffer
* @sz: size
* @addr: address
* @msg: skb_over_panic or skb_under_panic
*
* Out-of-line support for skb_put() and skb_push().
* Called via the wrapper skb_over_panic() or skb_under_panic().
* Keep out of line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always reliable.
*/
static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
const char msg[])
{
pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
msg, addr, skb->len, sz, skb->head, skb->data,
(unsigned long)skb->tail, (unsigned long)skb->end,
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
#define NAPI_SKB_CACHE_SIZE 64
#define NAPI_SKB_CACHE_BULK 16
#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
struct napi_alloc_cache {
local_lock_t bh_lock;
struct page_frag_cache page;
unsigned int skb_count;
void *skb_cache[NAPI_SKB_CACHE_SIZE];
};
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
};
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
data = __page_frag_alloc_align(&nc->page, fragsz,
GFP_ATOMIC | __GFP_NOWARN, align_mask);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return data;
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
void *data;
if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
fragsz = SKB_DATA_ALIGN(fragsz);
data = __page_frag_alloc_align(nc, fragsz,
GFP_ATOMIC | __GFP_NOWARN,
align_mask);
} else {
local_bh_disable();
data = __napi_alloc_frag_align(fragsz, align_mask);
local_bh_enable();
}
return data;
}
EXPORT_SYMBOL(__netdev_alloc_frag_align);
static struct sk_buff *napi_skb_cache_get(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC | __GFP_NOWARN,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
if (unlikely(!nc->skb_count)) {
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return NULL;
}
}
skb = nc->skb_cache[--nc->skb_count];
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
return skb;
}
/**
* napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache
* @skbs: pointer to an at least @n-sized array to fill with skb pointers
* @n: number of entries to provide
*
* Tries to obtain @n &sk_buff entries from the NAPI percpu cache and writes
* the pointers into the provided array @skbs. If there are less entries
* available, tries to replenish the cache and bulk-allocates the diff from
* the MM layer if needed.
* The heads are being zeroed with either memset() or %__GFP_ZERO, so they are
* ready for {,__}build_skb_around() and don't have any data buffers attached.
* Must be called *only* from the BH context.
*
* Return: number of successfully allocated skbs (@n if no actual allocation
* needed or kmem_cache_alloc_bulk() didn't fail).
*/
u32 napi_skb_cache_get_bulk(void **skbs, u32 n)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
u32 bulk, total = n;
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
if (nc->skb_count >= n)
goto get;
/* No enough cached skbs. Try refilling the cache first */
bulk = min(NAPI_SKB_CACHE_SIZE - nc->skb_count, NAPI_SKB_CACHE_BULK);
nc->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC | __GFP_NOWARN, bulk,
&nc->skb_cache[nc->skb_count]);
if (likely(nc->skb_count >= n))
goto get;
/* Still not enough. Bulk-allocate the missing part directly, zeroed */
n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC | __GFP_ZERO | __GFP_NOWARN,
n - nc->skb_count, &skbs[nc->skb_count]);
if (likely(nc->skb_count >= n))
goto get;
/* kmem_cache didn't allocate the number we need, limit the output */
total -= n - nc->skb_count;
n = nc->skb_count;
get:
for (u32 base = nc->skb_count - n, i = 0; i < n; i++) {
u32 cache_size = kmem_cache_size(net_hotdata.skbuff_cache);
skbs[i] = nc->skb_cache[base + i];
kasan_mempool_unpoison_object(skbs[i], cache_size);
memset(skbs[i], 0, offsetof(struct sk_buff, tail));
}
nc->skb_count -= n;
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return total;
}
EXPORT_SYMBOL_GPL(napi_skb_cache_get_bulk);
static inline void __finalize_skb_around(struct sk_buff *skb, void *data,
unsigned int size)
{
struct skb_shared_info *shinfo;
size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Assumes caller memset cleared SKB */
skb->truesize = SKB_TRUESIZE(size);
refcount_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
skb_set_end_offset(skb, size);
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
skb->alloc_cpu = raw_smp_processor_id();
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
skb_set_kcov_handle(skb, kcov_common_handle());
}
static inline void *__slab_build_skb(void *data, unsigned int *size)
{
void *resized;
/* Must find the allocation size (and grow it to match). */
*size = ksize(data);
/* krealloc() will immediately return "data" when
* "ksize(data)" is requested: it is the existing upper
* bounds. As a result, GFP_ATOMIC will be ignored. Note
* that this "new" pointer needs to be passed back to the
* caller for use so the __alloc_size hinting will be
* tracked correctly.
*/
resized = krealloc(data, *size, GFP_ATOMIC);
WARN_ON_ONCE(resized != data);
return resized;
}
/* build_skb() variant which can operate on slab buffers.
* Note that this should be used sparingly as slab buffers
* cannot be combined efficiently by GRO!
*/
struct sk_buff *slab_build_skb(void *data)
{
struct sk_buff *skb;
unsigned int size;
skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
data = __slab_build_skb(data, &size);
__finalize_skb_around(skb, data, size);
return skb;
}
EXPORT_SYMBOL(slab_build_skb);
/* Caller must provide SKB that is memset cleared */
static void __build_skb_around(struct sk_buff *skb, void *data,
unsigned int frag_size)
{
unsigned int size = frag_size;
/* frag_size == 0 is considered deprecated now. Callers
* using slab buffer should use slab_build_skb() instead.
*/
if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) data = __slab_build_skb(data, &size); __finalize_skb_around(skb, data, size);}
/**
* __build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of data (must not be 0)
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated from the page
* allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc()
* allocation is deprecated, and callers should use slab_build_skb()
* instead.)
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
* Before IO, driver allocates only data buffer where NIC put incoming frame
* Driver should add room at head (NET_SKB_PAD) and
* MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
* After IO, driver calls build_skb(), to allocate sk_buff and populate it
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
__build_skb_around(skb, data, frag_size);
return skb;}
/* build_skb() is wrapper over __build_skb(), that specifically
* takes care of skb->head and skb->pfmemalloc
*/
struct sk_buff *build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb = __build_skb(data, frag_size);
if (likely(skb && frag_size)) {
skb->head_frag = 1;
skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
}
return skb;
}
EXPORT_SYMBOL(build_skb);
/**
* build_skb_around - build a network buffer around provided skb
* @skb: sk_buff provide by caller, must be memset cleared
* @data: data buffer provided by caller
* @frag_size: size of data
*/
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size)
{
if (unlikely(!skb))
return NULL;
__build_skb_around(skb, data, frag_size);
if (frag_size) {
skb->head_frag = 1;
skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
}
return skb;
}
EXPORT_SYMBOL(build_skb_around);
/**
* __napi_build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of data
*
* Version of __build_skb() that uses NAPI percpu caches to obtain
* skbuff_head instead of inplace allocation.
*
* Returns a new &sk_buff on success, %NULL on allocation failure.
*/
static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
skb = napi_skb_cache_get();
if (unlikely(!skb))
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
__build_skb_around(skb, data, frag_size);
return skb;
}
/**
* napi_build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of data
*
* Version of __napi_build_skb() that takes care of skb->head_frag
* and skb->pfmemalloc when the data is a page or page fragment.
*
* Returns a new &sk_buff on success, %NULL on allocation failure.
*/
struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb = __napi_build_skb(data, frag_size);
if (likely(skb) && frag_size) {
skb->head_frag = 1;
skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
}
return skb;
}
EXPORT_SYMBOL(napi_build_skb);
/*
* kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
* the caller if emergency pfmemalloc reserves are being used. If it is and
* the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
* may be used. Otherwise, the packet data may be discarded until enough
* memory is free
*/
static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
bool *pfmemalloc)
{
bool ret_pfmemalloc = false;
size_t obj_size;
void *obj;
obj_size = SKB_HEAD_ALIGN(*size); if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) {
obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
*size = SKB_SMALL_HEAD_CACHE_SIZE; if (obj || !(gfp_pfmemalloc_allowed(flags)))
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node);
goto out;
}
obj_size = kmalloc_size_roundup(obj_size);
/* The following cast might truncate high-order bits of obj_size, this
* is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
*/
*size = (unsigned int)obj_size;
/*
* Try a regular allocation, when that fails and we're not entitled
* to the reserves, fail.
*/
obj = kmalloc_node_track_caller(obj_size,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
if (obj || !(gfp_pfmemalloc_allowed(flags)))
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
obj = kmalloc_node_track_caller(obj_size, flags, node);
out:
if (pfmemalloc)
*pfmemalloc = ret_pfmemalloc;
return obj;}
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
*
*/
/**
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
* @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
* instead of head cache and allocate a cloned (child) skb.
* If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
* allocations in case the data is required for writeback
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of at least size bytes. The object has a reference count
* of one. The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
struct kmem_cache *cache;
struct sk_buff *skb;
bool pfmemalloc;
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE) ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC;
/* Get the HEAD */
if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
likely(node == NUMA_NO_NODE || node == numa_mem_id()))
skb = napi_skb_cache_get();
else
skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); if (unlikely(!skb)) return NULL;
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache
* line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
/* kmalloc_size_roundup() might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
prefetchw(data + SKB_WITH_OVERHEAD(size));
/*
* Only clear those fields we need to clear, not those that we will
* actually initialise below. Hence, don't put any more fields after
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
__build_skb_around(skb, data, size);
skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff_fclones *fclones;
fclones = container_of(skb, struct sk_buff_fclones, skb1);
skb->fclone = SKB_FCLONE_ORIG;
refcount_set(&fclones->fclone_ref, 1);
}
return skb;
nodata:
kmem_cache_free(cache, skb);
return NULL;}
EXPORT_SYMBOL(__alloc_skb);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @len: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has NET_SKB_PAD headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
gfp_t gfp_mask)
{
struct page_frag_cache *nc;
struct sk_buff *skb;
bool pfmemalloc;
void *data;
len += NET_SKB_PAD;
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
*/
if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
len = SKB_HEAD_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
if (in_hardirq() || irqs_disabled()) {
nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
} else {
local_bh_disable();
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
nc = this_cpu_ptr(&napi_alloc_cache.page);
data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
local_bh_enable();
}
if (unlikely(!data))
return NULL;
skb = __build_skb(data, len);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
}
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
skb_success:
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
skb_fail:
return skb;
}
EXPORT_SYMBOL(__netdev_alloc_skb);
/**
* napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
* @napi: napi instance this buffer was allocated for
* @len: length to allocate
*
* Allocate a new sk_buff for use in NAPI receive. This buffer will
* attempt to allocate the head from a special reserved region used
* only for NAPI Rx allocation. By doing this we can save several
* CPU cycles by avoiding having to disable and re-enable IRQs.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
{
gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN;
struct napi_alloc_cache *nc;
struct sk_buff *skb;
bool pfmemalloc;
void *data;
DEBUG_NET_WARN_ON_ONCE(!in_softirq());
len += NET_SKB_PAD + NET_IP_ALIGN;
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
*/
if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
len = SKB_HEAD_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
nc = this_cpu_ptr(&napi_alloc_cache);
data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!data))
return NULL;
skb = __napi_build_skb(data, len);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
}
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
skb_success:
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb->dev = napi->dev;
skb_fail:
return skb;
}
EXPORT_SYMBOL(napi_alloc_skb);
void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
int off, int size, unsigned int truesize)
{
DEBUG_NET_WARN_ON_ONCE(size > truesize);
skb_fill_netmem_desc(skb, i, netmem, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
EXPORT_SYMBOL(skb_add_rx_frag_netmem);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
DEBUG_NET_WARN_ON_ONCE(size > truesize);
skb_frag_size_add(frag, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
EXPORT_SYMBOL(skb_coalesce_rx_frag);
static void skb_drop_list(struct sk_buff **listp)
{
kfree_skb_list(*listp);
*listp = NULL;
}
static inline void skb_drop_fraglist(struct sk_buff *skb)
{
skb_drop_list(&skb_shinfo(skb)->frag_list);
}
static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
skb_walk_frags(skb, list)
skb_get(list);
}
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom)
{
#if IS_ENABLED(CONFIG_PAGE_POOL)
u32 size, truesize, len, max_head_size, off;
struct sk_buff *skb = *pskb, *nskb;
int err, i, head_off;
void *data;
/* XDP does not support fraglist so we need to linearize
* the skb.
*/
if (skb_has_frag_list(skb))
return -EOPNOTSUPP;
max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
return -ENOMEM;
size = min_t(u32, skb->len, max_head_size);
truesize = SKB_HEAD_ALIGN(size) + headroom;
data = page_pool_dev_alloc_va(pool, &truesize);
if (!data)
return -ENOMEM;
nskb = napi_build_skb(data, truesize);
if (!nskb) {
page_pool_free_va(pool, data, true);
return -ENOMEM;
}
skb_reserve(nskb, headroom);
skb_copy_header(nskb, skb);
skb_mark_for_recycle(nskb);
err = skb_copy_bits(skb, 0, nskb->data, size);
if (err) {
consume_skb(nskb);
return err;
}
skb_put(nskb, size);
head_off = skb_headroom(nskb) - skb_headroom(skb);
skb_headers_offset_update(nskb, head_off);
off = size;
len = skb->len - off;
for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
struct page *page;
u32 page_off;
size = min_t(u32, len, PAGE_SIZE);
truesize = size;
page = page_pool_dev_alloc(pool, &page_off, &truesize);
if (!page) {
consume_skb(nskb);
return -ENOMEM;
}
skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
err = skb_copy_bits(skb, off, page_address(page) + page_off,
size);
if (err) {
consume_skb(nskb);
return err;
}
len -= size;
off += size;
}
consume_skb(skb);
*pskb = nskb;
return 0;
#else
return -EOPNOTSUPP;
#endif
}
EXPORT_SYMBOL(skb_pp_cow_data);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
const struct bpf_prog *prog)
{
if (!prog->aux->xdp_has_frags)
return -EINVAL;
return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM);
}
EXPORT_SYMBOL(skb_cow_data_for_xdp);
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(netmem_ref netmem)
{
netmem = netmem_compound_head(netmem);
if (unlikely(!netmem_is_pp(netmem)))
return false;
page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
return true;
}
EXPORT_SYMBOL(napi_pp_put_page);
#endif
static bool skb_pp_recycle(struct sk_buff *skb, void *data)
{
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false;
return napi_pp_put_page(page_to_netmem(virt_to_page(data)));
}
/**
* skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
* @skb: page pool aware skb
*
* Increase the fragment reference count (pp_ref_count) of a skb. This is
* intended to gain fragment references only for page pool aware skbs,
* i.e. when skb->pp_recycle is true, and not for fragments in a
* non-pp-recycling skb. It has a fallback to increase references on normal
* pages, as page pool aware skbs may also have normal page fragments.
*/
static int skb_pp_frag_ref(struct sk_buff *skb)
{
struct skb_shared_info *shinfo;
netmem_ref head_netmem;
int i;
if (!skb->pp_recycle)
return -EINVAL;
shinfo = skb_shinfo(skb);
for (i = 0; i < shinfo->nr_frags; i++) {
head_netmem = netmem_compound_head(shinfo->frags[i].netmem);
if (likely(netmem_is_pp(head_netmem)))
page_pool_ref_netmem(head_netmem);
else
page_ref_inc(netmem_to_page(head_netmem));
}
return 0;
}
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
kmem_cache_free(net_hotdata.skb_small_head_cache, head);
else
kfree(head);
}
static void skb_free_head(struct sk_buff *skb)
{
unsigned char *head = skb->head; if (skb->head_frag) { if (skb_pp_recycle(skb, head))
return;
skb_free_frag(head);
} else {
skb_kfree_head(head, skb_end_offset(skb));
}
}
static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
if (!skb_data_unref(skb, shinfo)) goto exit; if (skb_zcopy(skb)) {
bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS;
skb_zcopy_clear(skb, true);
if (skip_unref)
goto free_head;
}
for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
free_head:
if (shinfo->frag_list)
kfree_skb_list_reason(shinfo->frag_list, reason);
skb_free_head(skb);
exit:
/* When we clone an SKB we copy the reycling bit. The pp_recycle
* bit is only set on the head though, so in order to avoid races
* while trying to recycle fragments on __skb_frag_unref() we need
* to make one SKB responsible for triggering the recycle path.
* So disable the recycling bit if an SKB is cloned and we have
* additional references to the fragmented part of the SKB.
* Eventually the last SKB will have the recycling bit set and it's
* dataref set to 0, which will trigger the recycling
*/
skb->pp_recycle = 0;}
/*
* Free an skbuff by memory without cleaning the state.
*/
static void kfree_skbmem(struct sk_buff *skb)
{
struct sk_buff_fclones *fclones;
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
kmem_cache_free(net_hotdata.skbuff_cache, skb);
return;
case SKB_FCLONE_ORIG:
fclones = container_of(skb, struct sk_buff_fclones, skb1);
/* We usually free the clone (TX completion) before original skb
* This test would have no chance to be true for the clone,
* while here, branch prediction will be good.
*/
if (refcount_read(&fclones->fclone_ref) == 1)
goto fastpath;
break;
default: /* SKB_FCLONE_CLONE */
fclones = container_of(skb, struct sk_buff_fclones, skb2);
break;
}
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones);}
void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb); if (skb->destructor) {
DEBUG_NET_WARN_ON_ONCE(in_hardirq());
skb->destructor(skb);
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb_nfct(skb));
#endif
skb_ext_put(skb);
}
/* Free everything but the sk_buff shell. */
static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason)
{
skb_release_head_state(skb);
if (likely(skb->head))
skb_release_data(skb, reason);
}
/**
* __kfree_skb - private function
* @skb: buffer
*
* Free an sk_buff. Release anything attached to the buffer.
* Clean the state. This is an internal helper function. Users should
* always call kfree_skb
*/
void __kfree_skb(struct sk_buff *skb)
{
skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED);
kfree_skbmem(skb);
}
EXPORT_SYMBOL(__kfree_skb);
static __always_inline
bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason reason)
{
if (unlikely(!skb_unref(skb)))
return false;
DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET ||
u32_get_bits(reason,
SKB_DROP_REASON_SUBSYS_MASK) >=
SKB_DROP_REASON_SUBSYS_NUM);
if (reason == SKB_CONSUMED) trace_consume_skb(skb, __builtin_return_address(0));
else
trace_kfree_skb(skb, __builtin_return_address(0), reason, sk);
return true;
}
/**
* sk_skb_reason_drop - free an sk_buff with special reason
* @sk: the socket to receive @skb, or NULL if not applicable
* @skb: buffer to free
* @reason: reason why this skb is dropped
*
* Drop a reference to the buffer and free it if the usage count has hit
* zero. Meanwhile, pass the receiving socket and drop reason to
* 'kfree_skb' tracepoint.
*/
void __fix_address
sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
{ if (__sk_skb_reason_drop(sk, skb, reason)) __kfree_skb(skb);}
EXPORT_SYMBOL(sk_skb_reason_drop);
#define KFREE_SKB_BULK_SIZE 16
struct skb_free_array {
unsigned int skb_count;
void *skb_array[KFREE_SKB_BULK_SIZE];
};
static void kfree_skb_add_bulk(struct sk_buff *skb,
struct skb_free_array *sa,
enum skb_drop_reason reason)
{
/* if SKB is a clone, don't handle this case */
if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
__kfree_skb(skb);
return;
}
skb_release_all(skb, reason);
sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE,
sa->skb_array);
sa->skb_count = 0;
}
}
void __fix_address
kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
{
struct skb_free_array sa;
sa.skb_count = 0;
while (segs) { struct sk_buff *next = segs->next; if (__sk_skb_reason_drop(NULL, segs, reason)) {
skb_poison_list(segs);
kfree_skb_add_bulk(segs, &sa, reason);
}
segs = next;
}
if (sa.skb_count) kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array);}
EXPORT_SYMBOL(kfree_skb_list_reason);
/* Dump skb information and contents.
*
* Must only be called from net_ratelimit()-ed paths.
*
* Dumps whole packets if full_pkt, only headers otherwise.
*/
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
{
struct skb_shared_info *sh = skb_shinfo(skb);
struct net_device *dev = skb->dev;
struct sock *sk = skb->sk;
struct sk_buff *list_skb;
bool has_mac, has_trans;
int headroom, tailroom;
int i, len, seg_len;
if (full_pkt)
len = skb->len;
else
len = min_t(int, skb->len, MAX_HEADER + 128);
headroom = skb_headroom(skb);
tailroom = skb_tailroom(skb);
has_mac = skb_mac_header_was_set(skb);
has_trans = skb_transport_header_was_set(skb);
printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
"mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
"shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
"csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
"hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n"
"priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n"
"encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n",
level, skb->len, headroom, skb_headlen(skb), tailroom,
has_mac ? skb->mac_header : -1,
has_mac ? skb_mac_header_len(skb) : -1,
skb->mac_len,
skb->network_header,
has_trans ? skb_network_header_len(skb) : -1,
has_trans ? skb->transport_header : -1,
sh->tx_flags, sh->nr_frags,
sh->gso_size, sh->gso_type, sh->gso_segs,
skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed,
skb->csum_complete_sw, skb->csum_valid, skb->csum_level,
skb->hash, skb->sw_hash, skb->l4_hash,
ntohs(skb->protocol), skb->pkt_type, skb->skb_iif,
skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all,
skb->encapsulation, skb->inner_protocol, skb->inner_mac_header,
skb->inner_network_header, skb->inner_transport_header);
if (dev)
printk("%sdev name=%s feat=%pNF\n",
level, dev->name, &dev->features);
if (sk)
printk("%ssk family=%hu type=%u proto=%u\n",
level, sk->sk_family, sk->sk_type, sk->sk_protocol);
if (full_pkt && headroom)
print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
16, 1, skb->head, headroom, false);
seg_len = min_t(int, skb_headlen(skb), len);
if (seg_len)
print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
16, 1, skb->data, seg_len, false);
len -= seg_len;
if (full_pkt && tailroom)
print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
16, 1, skb_tail_pointer(skb), tailroom, false);
for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 p_off, p_len, copied;
struct page *p;
u8 *vaddr;
if (skb_frag_is_net_iov(frag)) {
printk("%sskb frag %d: not readable\n", level, i);
len -= skb_frag_size(frag);
if (!len)
break;
continue;
}
skb_frag_foreach_page(frag, skb_frag_off(frag),
skb_frag_size(frag), p, p_off, p_len,
copied) {
seg_len = min_t(int, p_len, len);
vaddr = kmap_atomic(p);
print_hex_dump(level, "skb frag: ",
DUMP_PREFIX_OFFSET,
16, 1, vaddr + p_off, seg_len, false);
kunmap_atomic(vaddr);
len -= seg_len;
if (!len)
break;
}
}
if (full_pkt && skb_has_frag_list(skb)) {
printk("skb fraglist:\n");
skb_walk_frags(skb, list_skb)
skb_dump(level, list_skb, true);
}
}
EXPORT_SYMBOL(skb_dump);
/**
* skb_tx_error - report an sk_buff xmit error
* @skb: buffer that triggered an error
*
* Report xmit error if a device callback is tracking this skb.
* skb must be freed afterwards.
*/
void skb_tx_error(struct sk_buff *skb)
{
if (skb) {
skb_zcopy_downgrade_managed(skb);
skb_zcopy_clear(skb, true);
}
}
EXPORT_SYMBOL(skb_tx_error);
#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
*
* Drop a ref to the buffer and free it if the usage count has hit zero
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
* is being dropped after a failure and notes that
*/
void consume_skb(struct sk_buff *skb)
{ if (!skb_unref(skb))
return;
trace_consume_skb(skb, __builtin_return_address(0));
__kfree_skb(skb);}
EXPORT_SYMBOL(consume_skb);
#endif
/**
* __consume_stateless_skb - free an skbuff, assuming it is stateless
* @skb: buffer to free
*
* Alike consume_skb(), but this variant assumes that this is the last
* skb reference and all the head states have been already dropped
*/
void __consume_stateless_skb(struct sk_buff *skb)
{
trace_consume_skb(skb, __builtin_return_address(0));
skb_release_data(skb, SKB_CONSUMED);
kfree_skbmem(skb);
}
static void napi_skb_cache_put(struct sk_buff *skb)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
u32 i;
if (!kasan_mempool_poison_object(skb))
return;
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
kmem_cache_size(net_hotdata.skbuff_cache));
kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
}
void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
{
skb_release_all(skb, reason);
napi_skb_cache_put(skb);
}
void napi_skb_free_stolen_head(struct sk_buff *skb)
{
if (unlikely(skb->slow_gro)) {
nf_reset_ct(skb);
skb_dst_drop(skb);
skb_ext_put(skb);
skb_orphan(skb);
skb->slow_gro = 0;
}
napi_skb_cache_put(skb);
}
void napi_consume_skb(struct sk_buff *skb, int budget)
{
/* Zero budget indicate non-NAPI context called us, like netpoll */
if (unlikely(!budget)) {
dev_consume_skb_any(skb);
return;
}
DEBUG_NET_WARN_ON_ONCE(!in_softirq());
if (!skb_unref(skb))
return;
/* if reaching here SKB is ready to free */
trace_consume_skb(skb, __builtin_return_address(0));
/* if SKB is a clone, don't handle this case */
if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
__kfree_skb(skb);
return;
}
skb_release_all(skb, SKB_CONSUMED);
napi_skb_cache_put(skb);
}
EXPORT_SYMBOL(napi_consume_skb);
/* Make sure a field is contained by headers group */
#define CHECK_SKB_FIELD(field) \
BUILD_BUG_ON(offsetof(struct sk_buff, field) != \
offsetof(struct sk_buff, headers.field)); \
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;
/* We do not copy old->sk */
new->dev = old->dev;
memcpy(new->cb, old->cb, sizeof(old->cb));
skb_dst_copy(new, old); __skb_ext_copy(new, old); __nf_copy(new, old, false);
/* Note : this field could be in the headers group.
* It is not yet because we do not want to have a 16 bit hole
*/
new->queue_mapping = old->queue_mapping;
memcpy(&new->headers, &old->headers, sizeof(new->headers));
CHECK_SKB_FIELD(protocol);
CHECK_SKB_FIELD(csum);
CHECK_SKB_FIELD(hash);
CHECK_SKB_FIELD(priority);
CHECK_SKB_FIELD(skb_iif);
CHECK_SKB_FIELD(vlan_proto);
CHECK_SKB_FIELD(vlan_tci);
CHECK_SKB_FIELD(transport_header);
CHECK_SKB_FIELD(network_header);
CHECK_SKB_FIELD(mac_header);
CHECK_SKB_FIELD(inner_protocol);
CHECK_SKB_FIELD(inner_transport_header);
CHECK_SKB_FIELD(inner_network_header);
CHECK_SKB_FIELD(inner_mac_header);
CHECK_SKB_FIELD(mark);
#ifdef CONFIG_NETWORK_SECMARK
CHECK_SKB_FIELD(secmark);
#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
CHECK_SKB_FIELD(napi_id);
#endif
CHECK_SKB_FIELD(alloc_cpu);
#ifdef CONFIG_XPS
CHECK_SKB_FIELD(sender_cpu);
#endif
#ifdef CONFIG_NET_SCHED
CHECK_SKB_FIELD(tc_index);
#endif
}
/*
* You should not add any new code to this function. Add it to
* __copy_skb_header above instead.
*/
static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
{
#define C(x) n->x = skb->x
n->next = n->prev = NULL;
n->sk = NULL;
__copy_skb_header(n, skb);
C(len);
C(data_len);
C(mac_len);
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
n->peeked = 0;
C(pfmemalloc);
C(pp_recycle);
n->destructor = NULL;
C(tail);
C(end);
C(head);
C(head_frag);
C(data);
C(truesize);
refcount_set(&n->users, 1);
atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1;
return n;
#undef C
}
/**
* alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
* @first: first sk_buff of the msg
*/
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
{
struct sk_buff *n;
n = alloc_skb(0, GFP_ATOMIC);
if (!n)
return NULL;
n->len = first->len;
n->data_len = first->len;
n->truesize = first->truesize;
skb_shinfo(n)->frag_list = first;
__copy_skb_header(n, first);
n->destructor = NULL;
return n;
}
EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
/**
* skb_morph - morph one skb into another
* @dst: the skb to receive the contents
* @src: the skb to supply the contents
*
* This is identical to skb_clone except that the target skb is
* supplied by the user.
*
* The target skb is returned upon exit.
*/
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
{
skb_release_all(dst, SKB_CONSUMED);
return __skb_clone(dst, src);
}
EXPORT_SYMBOL_GPL(skb_morph);
int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
{
unsigned long max_pg, num_pg, new_pg, old_pg, rlim;
struct user_struct *user;
if (capable(CAP_IPC_LOCK) || !size)
return 0;
rlim = rlimit(RLIMIT_MEMLOCK);
if (rlim == RLIM_INFINITY)
return 0;
num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
max_pg = rlim >> PAGE_SHIFT;
user = mmp->user ? : current_user();
old_pg = atomic_long_read(&user->locked_vm);
do {
new_pg = old_pg + num_pg;
if (new_pg > max_pg)
return -ENOBUFS;
} while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg));
if (!mmp->user) {
mmp->user = get_uid(user);
mmp->num_pg = num_pg;
} else {
mmp->num_pg += num_pg;
}
return 0;
}
EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
void mm_unaccount_pinned_pages(struct mmpin *mmp)
{
if (mmp->user) {
atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
free_uid(mmp->user);
}
}
EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size,
bool devmem)
{
struct ubuf_info_msgzc *uarg;
struct sk_buff *skb;
WARN_ON_ONCE(!in_task());
skb = sock_omalloc(sk, 0, GFP_KERNEL);
if (!skb)
return NULL;
BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
uarg = (void *)skb->cb;
uarg->mmp.user = NULL;
if (likely(!devmem) && mm_account_pinned_pages(&uarg->mmp, size)) {
kfree_skb(skb);
return NULL;
}
uarg->ubuf.ops = &msg_zerocopy_ubuf_ops;
uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
uarg->len = 1;
uarg->bytelen = size;
uarg->zerocopy = 1;
uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
refcount_set(&uarg->ubuf.refcnt, 1);
sock_hold(sk);
return &uarg->ubuf;
}
static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg)
{
return container_of((void *)uarg, struct sk_buff, cb);
}
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg, bool devmem)
{
if (uarg) {
struct ubuf_info_msgzc *uarg_zc;
const u32 byte_limit = 1 << 19; /* limit to a few TSO */
u32 bytelen, next;
/* there might be non MSG_ZEROCOPY users */
if (uarg->ops != &msg_zerocopy_ubuf_ops)
return NULL;
/* realloc only when socket is locked (TCP, UDP cork),
* so uarg->len and sk_zckey access is serialized
*/
if (!sock_owned_by_user(sk)) {
WARN_ON_ONCE(1);
return NULL;
}
uarg_zc = uarg_to_msgzc(uarg);
bytelen = uarg_zc->bytelen + size;
if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) {
/* TCP can create new skb to attach new uarg */
if (sk->sk_type == SOCK_STREAM)
goto new_alloc;
return NULL;
}
next = (u32)atomic_read(&sk->sk_zckey);
if ((u32)(uarg_zc->id + uarg_zc->len) == next) {
if (likely(!devmem) &&
mm_account_pinned_pages(&uarg_zc->mmp, size))
return NULL;
uarg_zc->len++;
uarg_zc->bytelen = bytelen;
atomic_set(&sk->sk_zckey, ++next);
/* no extra ref when appending to datagram (MSG_MORE) */
if (sk->sk_type == SOCK_STREAM)
net_zcopy_get(uarg);
return uarg;
}
}
new_alloc:
return msg_zerocopy_alloc(sk, size, devmem);
}
EXPORT_SYMBOL_GPL(msg_zerocopy_realloc);
static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
{
struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
u32 old_lo, old_hi;
u64 sum_len;
old_lo = serr->ee.ee_info;
old_hi = serr->ee.ee_data;
sum_len = old_hi - old_lo + 1ULL + len;
if (sum_len >= (1ULL << 32))
return false;
if (lo != old_hi + 1)
return false;
serr->ee.ee_data += len;
return true;
}
static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg)
{
struct sk_buff *tail, *skb = skb_from_uarg(uarg);
struct sock_exterr_skb *serr;
struct sock *sk = skb->sk;
struct sk_buff_head *q;
unsigned long flags;
bool is_zerocopy;
u32 lo, hi;
u16 len;
mm_unaccount_pinned_pages(&uarg->mmp);
/* if !len, there was only 1 call, and it was aborted
* so do not queue a completion notification
*/
if (!uarg->len || sock_flag(sk, SOCK_DEAD))
goto release;
len = uarg->len;
lo = uarg->id;
hi = uarg->id + len - 1;
is_zerocopy = uarg->zerocopy;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = 0;
serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
serr->ee.ee_data = hi;
serr->ee.ee_info = lo;
if (!is_zerocopy)
serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
q = &sk->sk_error_queue;
spin_lock_irqsave(&q->lock, flags);
tail = skb_peek_tail(q);
if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
!skb_zerocopy_notify_extend(tail, lo, len)) {
__skb_queue_tail(q, skb);
skb = NULL;
}
spin_unlock_irqrestore(&q->lock, flags);
sk_error_report(sk);
release:
consume_skb(skb);
sock_put(sk);
}
static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg,
bool success)
{
struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
uarg_zc->zerocopy = uarg_zc->zerocopy & success;
if (refcount_dec_and_test(&uarg->refcnt))
__msg_zerocopy_callback(uarg_zc);
}
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk;
atomic_dec(&sk->sk_zckey);
uarg_to_msgzc(uarg)->len--;
if (have_uref)
msg_zerocopy_complete(NULL, uarg, true);
}
EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
const struct ubuf_info_ops msg_zerocopy_ubuf_ops = {
.complete = msg_zerocopy_complete,
};
EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg,
struct net_devmem_dmabuf_binding *binding)
{
int err, orig_len = skb->len;
if (uarg->ops->link_skb) {
err = uarg->ops->link_skb(skb, uarg);
if (err)
return err;
} else {
struct ubuf_info *orig_uarg = skb_zcopy(skb);
/* An skb can only point to one uarg. This edge case happens
* when TCP appends to an skb, but zerocopy_realloc triggered
* a new alloc.
*/
if (orig_uarg && uarg != orig_uarg)
return -EEXIST;
}
err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len,
binding);
if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
struct sock *save_sk = skb->sk;
/* Streams do not free skb on error. Reset to prev state. */
iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
skb->sk = sk;
___pskb_trim(skb, orig_len);
skb->sk = save_sk;
return err;
}
skb_zcopy_set(skb, uarg, NULL);
return skb->len - orig_len;
}
EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
void __skb_zcopy_downgrade_managed(struct sk_buff *skb)
{
int i;
skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_ref(skb, i);
}
EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed);
static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
gfp_t gfp_mask)
{
if (skb_zcopy(orig)) {
if (skb_zcopy(nskb)) {
/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
if (!gfp_mask) {
WARN_ON_ONCE(1);
return -ENOMEM;
}
if (skb_uarg(nskb) == skb_uarg(orig))
return 0;
if (skb_copy_ubufs(nskb, GFP_ATOMIC))
return -EIO;
}
skb_zcopy_set(nskb, skb_uarg(orig), NULL);
}
return 0;
}
/**
* skb_copy_ubufs - copy userspace skb frags buffers to kernel
* @skb: the skb to modify
* @gfp_mask: allocation priority
*
* This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
* It will copy all frags into kernel and drop the reference
* to userspace pages.
*
* If this function is called from an interrupt gfp_mask() must be
* %GFP_ATOMIC.
*
* Returns 0 on success or a negative error code on failure
* to allocate kernel memory to copy to.
*/
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{
int num_frags = skb_shinfo(skb)->nr_frags;
struct page *page, *head = NULL;
int i, order, psize, new_frags;
u32 d_off;
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
return -EINVAL;
if (!skb_frags_readable(skb))
return -EFAULT;
if (!num_frags)
goto release;
/* We might have to allocate high order pages, so compute what minimum
* page order is needed.
*/
order = 0;
while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
order++;
psize = (PAGE_SIZE << order);
new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
for (i = 0; i < new_frags; i++) {
page = alloc_pages(gfp_mask | __GFP_COMP, order);
if (!page) {
while (head) {
struct page *next = (struct page *)page_private(head);
put_page(head);
head = next;
}
return -ENOMEM;
}
set_page_private(page, (unsigned long)head);
head = page;
}
page = head;
d_off = 0;
for (i = 0; i < num_frags; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u32 p_off, p_len, copied;
struct page *p;
u8 *vaddr;
skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
p, p_off, p_len, copied) {
u32 copy, done = 0;
vaddr = kmap_atomic(p);
while (done < p_len) {
if (d_off == psize) {
d_off = 0;
page = (struct page *)page_private(page);
}
copy = min_t(u32, psize - d_off, p_len - done);
memcpy(page_address(page) + d_off,
vaddr + p_off + done, copy);
done += copy;
d_off += copy;
}
kunmap_atomic(vaddr);
}
}
/* skb frags release userspace buffers */
for (i = 0; i < num_frags; i++)
skb_frag_unref(skb, i);
/* skb frags point to kernel buffers */
for (i = 0; i < new_frags - 1; i++) {
__skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize);
head = (struct page *)page_private(head);
}
__skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0,
d_off);
skb_shinfo(skb)->nr_frags = new_frags;
release:
skb_zcopy_clear(skb, false);
return 0;
}
EXPORT_SYMBOL_GPL(skb_copy_ubufs);
/**
* skb_clone - duplicate an sk_buff
* @skb: buffer to clone
* @gfp_mask: allocation priority
*
* Duplicate an &sk_buff. The new one is not owned by a socket. Both
* copies share the same packet data but not structure. The new
* buffer has a reference count of 1. If the allocation fails the
* function returns %NULL otherwise the new buffer is returned.
*
* If this function is called from an interrupt gfp_mask() must be
* %GFP_ATOMIC.
*/
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff_fclones *fclones = container_of(skb,
struct sk_buff_fclones,
skb1);
struct sk_buff *n;
if (skb_orphan_frags(skb, gfp_mask)) return NULL; if (skb->fclone == SKB_FCLONE_ORIG &&
refcount_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2;
refcount_set(&fclones->fclone_ref, 2);
n->fclone = SKB_FCLONE_CLONE;
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask);
if (!n)
return NULL;
n->fclone = SKB_FCLONE_UNAVAILABLE;
}
return __skb_clone(n, skb);}
EXPORT_SYMBOL(skb_clone);
void skb_headers_offset_update(struct sk_buff *skb, int off)
{
/* Only adjust this if it actually is csum_start rather than csum */
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start += off;
/* {transport,network,mac}_header and tail are relative to skb->head */
skb->transport_header += off;
skb->network_header += off;
if (skb_mac_header_was_set(skb))
skb->mac_header += off;
skb->inner_transport_header += off;
skb->inner_network_header += off;
skb->inner_mac_header += off;
}
EXPORT_SYMBOL(skb_headers_offset_update);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
{
__copy_skb_header(new, old);
skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
}
EXPORT_SYMBOL(skb_copy_header);
static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
{
if (skb_pfmemalloc(skb))
return SKB_ALLOC_RX;
return 0;
}
/**
* skb_copy - create private copy of an sk_buff
* @skb: buffer to copy
* @gfp_mask: allocation priority
*
* Make a copy of both an &sk_buff and its data. This is used when the
* caller wishes to modify the data and needs a private copy of the
* data to alter. Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1.
*
* As by-product this function converts non-linear &sk_buff to linear
* one, so that &sk_buff becomes completely private and caller is allowed
* to modify all the data of returned buffer. This means that this
* function is not recommended for use in circumstances when only
* header is going to be modified. Use pskb_copy() instead.
*/
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;
unsigned int size;
int headerlen;
if (!skb_frags_readable(skb))
return NULL;
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
headerlen = skb_headroom(skb);
size = skb_end_offset(skb) + skb->data_len;
n = __alloc_skb(size, gfp_mask,
skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
/* Set the data pointer */
skb_reserve(n, headerlen);
/* Set the tail pointer and length */
skb_put(n, skb->len);
BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
skb_copy_header(n, skb);
return n;
}
EXPORT_SYMBOL(skb_copy);
/**
* __pskb_copy_fclone - create copy of an sk_buff with private head.
* @skb: buffer to copy
* @headroom: headroom of new skb
* @gfp_mask: allocation priority
* @fclone: if true allocate the copy of the skb from the fclone
* cache instead of the head cache; it is recommended to set this
* to true for the cases where the copy will likely be cloned
*
* Make a copy of both an &sk_buff and part of its data, located
* in header. Fragmented data remain shared. This is used when
* the caller wishes to modify only header of &sk_buff and needs
* private copy of the header to alter. Returns %NULL on failure
* or the pointer to the buffer on success.
* The returned buffer has a reference count of 1.
*/
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
gfp_t gfp_mask, bool fclone)
{
unsigned int size = skb_headlen(skb) + headroom;
int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
if (!n)
goto out;
/* Set the data pointer */
skb_reserve(n, headroom);
/* Set the tail pointer and length */
skb_put(n, skb_headlen(skb));
/* Copy the bytes */
skb_copy_from_linear_data(skb, n->data, n->len);
n->truesize += skb->data_len;
n->data_len = skb->data_len;
n->len = skb->len;
if (skb_shinfo(skb)->nr_frags) {
int i;
if (skb_orphan_frags(skb, gfp_mask) ||
skb_zerocopy_clone(n, skb, gfp_mask)) {
kfree_skb(n);
n = NULL;
goto out;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
skb_frag_ref(skb, i);
}
skb_shinfo(n)->nr_frags = i;
}
if (skb_has_frag_list(skb)) {
skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb_clone_fraglist(n);
}
skb_copy_header(n, skb);
out:
return n;
}
EXPORT_SYMBOL(__pskb_copy_fclone);
/**
* pskb_expand_head - reallocate header of &sk_buff
* @skb: buffer to reallocate
* @nhead: room to add at head
* @ntail: room to add at tail
* @gfp_mask: allocation priority
*
* Expands (or creates identical copy, if @nhead and @ntail are zero)
* header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
* reference count of 1. Returns zero in the case of success or error,
* if expansion failed. In the last case, &sk_buff is not changed.
*
* All the pointers pointing into skb header may change and must be
* reloaded after call to this function.
*/
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
gfp_t gfp_mask)
{
unsigned int osize = skb_end_offset(skb);
unsigned int size = osize + nhead + ntail;
long off;
u8 *data;
int i;
BUG_ON(nhead < 0); BUG_ON(skb_shared(skb)); skb_zcopy_downgrade_managed(skb);
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC; data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata; size = SKB_WITH_OVERHEAD(size);
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void.
*/
memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
memcpy((struct skb_shared_info *)(data + size),
skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
/*
* if shinfo is shared we must drop the old head gracefully, but if it
* is not we can just drop the old head and let the existing refcount
* be since all we did is relocate the values
*/
if (skb_cloned(skb)) { if (skb_orphan_frags(skb, gfp_mask))
goto nofrags;
if (skb_zcopy(skb)) refcount_inc(&skb_uarg(skb)->refcnt); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_frag_ref(skb, i); if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
skb_release_data(skb, SKB_CONSUMED);
} else {
skb_free_head(skb);
}
off = (data + nhead) - skb->head;
skb->head = data;
skb->head_frag = 0;
skb->data += off;
skb_set_end_offset(skb, size);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
off = nhead;
#endif
skb->tail += off;
skb_headers_offset_update(skb, nhead);
skb->cloned = 0;
skb->hdr_len = 0;
skb->nohdr = 0;
atomic_set(&skb_shinfo(skb)->dataref, 1);
skb_metadata_clear(skb);
/* It is not generally safe to change skb->truesize.
* For the moment, we really care of rx path, or
* when skb is orphaned (not attached to a socket).
*/
if (!skb->sk || skb->destructor == sock_edemux) skb->truesize += size - osize; return 0;
nofrags:
skb_kfree_head(data, size);
nodata:
return -ENOMEM;}
EXPORT_SYMBOL(pskb_expand_head);
/* Make private copy of skb with writable head and some headroom */
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
{
struct sk_buff *skb2;
int delta = headroom - skb_headroom(skb);
if (delta <= 0)
skb2 = pskb_copy(skb, GFP_ATOMIC);
else {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
GFP_ATOMIC)) {
kfree_skb(skb2);
skb2 = NULL;
}
}
return skb2;
}
EXPORT_SYMBOL(skb_realloc_headroom);
/* Note: We plan to rework this in linux-6.4 */
int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
{
unsigned int saved_end_offset, saved_truesize;
struct skb_shared_info *shinfo;
int res;
saved_end_offset = skb_end_offset(skb);
saved_truesize = skb->truesize;
res = pskb_expand_head(skb, 0, 0, pri);
if (res)
return res;
skb->truesize = saved_truesize;
if (likely(skb_end_offset(skb) == saved_end_offset))
return 0;
/* We can not change skb->end if the original or new value
* is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
*/
if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM ||
skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) {
/* We think this path should not be taken.
* Add a temporary trace to warn us just in case.
*/
pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n",
saved_end_offset, skb_end_offset(skb));
WARN_ON_ONCE(1);
return 0;
}
shinfo = skb_shinfo(skb);
/* We are about to change back skb->end,
* we need to move skb_shinfo() to its new location.
*/
memmove(skb->head + saved_end_offset,
shinfo,
offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
skb_set_end_offset(skb, saved_end_offset);
return 0;
}
/**
* skb_expand_head - reallocate header of &sk_buff
* @skb: buffer to reallocate
* @headroom: needed headroom
*
* Unlike skb_realloc_headroom, this one does not allocate a new skb
* if possible; copies skb->sk to new skb as needed
* and frees original skb in case of failures.
*
* It expect increased headroom and generates warning otherwise.
*/
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
{
int delta = headroom - skb_headroom(skb);
int osize = skb_end_offset(skb);
struct sock *sk = skb->sk;
if (WARN_ONCE(delta <= 0,
"%s is expecting an increase in the headroom", __func__))
return skb;
delta = SKB_DATA_ALIGN(delta);
/* pskb_expand_head() might crash, if skb is shared. */
if (skb_shared(skb) || !is_skb_wmem(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!nskb))
goto fail;
if (sk)
skb_set_owner_w(nskb, sk);
consume_skb(skb);
skb = nskb;
}
if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
goto fail;
if (sk && is_skb_wmem(skb)) {
delta = skb_end_offset(skb) - osize;
refcount_add(delta, &sk->sk_wmem_alloc);
skb->truesize += delta;
}
return skb;
fail:
kfree_skb(skb);
return NULL;
}
EXPORT_SYMBOL(skb_expand_head);
/**
* skb_copy_expand - copy and expand sk_buff
* @skb: buffer to copy
* @newheadroom: new free bytes at head
* @newtailroom: new free bytes at tail
* @gfp_mask: allocation priority
*
* Make a copy of both an &sk_buff and its data and while doing so
* allocate additional space.
*
* This is used when the caller wishes to modify the data and needs a
* private copy of the data to alter as well as more space for new fields.
* Returns %NULL on failure or the pointer to the buffer
* on success. The returned buffer has a reference count of 1.
*
* You must pass %GFP_ATOMIC as the allocation priority if this function
* is called from an interrupt.
*/
struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
gfp_t gfp_mask)
{
/*
* Allocate the copy buffer
*/
int head_copy_len, head_copy_off;
struct sk_buff *n;
int oldheadroom;
if (!skb_frags_readable(skb))
return NULL;
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
oldheadroom = skb_headroom(skb);
n = __alloc_skb(newheadroom + skb->len + newtailroom,
gfp_mask, skb_alloc_rx_flag(skb),
NUMA_NO_NODE);
if (!n)
return NULL;
skb_reserve(n, newheadroom);
/* Set the tail pointer and length */
skb_put(n, skb->len);
head_copy_len = oldheadroom;
head_copy_off = 0;
if (newheadroom <= head_copy_len)
head_copy_len = newheadroom;
else
head_copy_off = newheadroom - head_copy_len;
/* Copy the linear header and data. */
BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
skb->len + head_copy_len));
skb_copy_header(n, skb);
skb_headers_offset_update(n, newheadroom - oldheadroom);
return n;
}
EXPORT_SYMBOL(skb_copy_expand);
/**
* __skb_pad - zero pad the tail of an skb
* @skb: buffer to pad
* @pad: space to pad
* @free_on_error: free buffer on error
*
* Ensure that a buffer is followed by a padding area that is zero
* filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire.
*
* May return error in out of memory cases. The skb is freed on error
* if @free_on_error is true.
*/
int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
{
int err;
int ntail;
/* If the skbuff is non linear tailroom is always zero.. */
if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
memset(skb->data+skb->len, 0, pad);
return 0;
}
ntail = skb->data_len + pad - (skb->end - skb->tail);
if (likely(skb_cloned(skb) || ntail > 0)) {
err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
if (unlikely(err))
goto free_skb;
}
/* FIXME: The use of this function with non-linear skb's really needs
* to be audited.
*/
err = skb_linearize(skb);
if (unlikely(err))
goto free_skb;
memset(skb->data + skb->len, 0, pad);
return 0;
free_skb:
if (free_on_error)
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL(__skb_pad);
/**
* pskb_put - add data to the tail of a potentially fragmented buffer
* @skb: start of the buffer to use
* @tail: tail fragment of the buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the potentially
* fragmented buffer. @tail must be the last fragment of @skb -- or
* @skb itself. If this would exceed the total buffer size the kernel
* will panic. A pointer to the first byte of the extra data is
* returned.
*/
void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
{
if (tail != skb) {
skb->data_len += len;
skb->len += len;
}
return skb_put(tail, len);
}
EXPORT_SYMBOL_GPL(pskb_put);
/**
* skb_put - add data to a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer. If this would
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
void *skb_put(struct sk_buff *skb, unsigned int len)
{
void *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len;
skb->len += len;
if (unlikely(skb->tail > skb->end))
skb_over_panic(skb, len, __builtin_return_address(0));
return tmp;
}
EXPORT_SYMBOL(skb_put);
/**
* skb_push - add data to the start of a buffer
* @skb: buffer to use
* @len: amount of data to add
*
* This function extends the used data area of the buffer at the buffer
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
void *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
if (unlikely(skb->data < skb->head))
skb_under_panic(skb, len, __builtin_return_address(0));
return skb->data;
}
EXPORT_SYMBOL(skb_push);
/**
* skb_pull - remove data from the start of a buffer
* @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
* the memory to the headroom. A pointer to the next data in the buffer
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
void *skb_pull(struct sk_buff *skb, unsigned int len)
{
return skb_pull_inline(skb, len);}
EXPORT_SYMBOL(skb_pull);
/**
* skb_pull_data - remove data from the start of a buffer returning its
* original position.
* @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
* the memory to the headroom. A pointer to the original data in the buffer
* is returned after checking if there is enough data to pull. Once the
* data has been pulled future pushes will overwrite the old data.
*/
void *skb_pull_data(struct sk_buff *skb, size_t len)
{
void *data = skb->data;
if (skb->len < len)
return NULL;
skb_pull(skb, len);
return data;
}
EXPORT_SYMBOL(skb_pull_data);
/**
* skb_trim - remove end from a buffer
* @skb: buffer to alter
* @len: new length
*
* Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified.
* The skb must be linear.
*/
void skb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->len > len)
__skb_trim(skb, len);}
EXPORT_SYMBOL(skb_trim);
/* Trims skb to length len. It can change skb pointers.
*/
int ___pskb_trim(struct sk_buff *skb, unsigned int len)
{
struct sk_buff **fragp;
struct sk_buff *frag;
int offset = skb_headlen(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int i;
int err;
if (skb_cloned(skb) &&
unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
return err;
i = 0;
if (offset >= len)
goto drop_pages;
for (; i < nfrags; i++) {
int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (end < len) {
offset = end;
continue;
}
skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
drop_pages:
skb_shinfo(skb)->nr_frags = i;
for (; i < nfrags; i++)
skb_frag_unref(skb, i);
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
goto done;
}
for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
fragp = &frag->next) {
int end = offset + frag->len;
if (skb_shared(frag)) {
struct sk_buff *nfrag;
nfrag = skb_clone(frag, GFP_ATOMIC);
if (unlikely(!nfrag))
return -ENOMEM;
nfrag->next = frag->next;
consume_skb(frag);
frag = nfrag;
*fragp = frag;
}
if (end < len) {
offset = end;
continue;
}
if (end > len &&
unlikely((err = pskb_trim(frag, len - offset))))
return err;
if (frag->next)
skb_drop_list(&frag->next);
break;
}
done:
if (len > skb_headlen(skb)) {
skb->data_len -= skb->len - len;
skb->len = len;
} else {
skb->len = len;
skb->data_len = 0;
skb_set_tail_pointer(skb, len);
}
if (!skb->sk || skb->destructor == sock_edemux)
skb_condense(skb);
return 0;
}
EXPORT_SYMBOL(___pskb_trim);
/* Note : use pskb_trim_rcsum() instead of calling this directly
*/
int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_COMPLETE) {
int delta = skb->len - len;
skb->csum = csum_block_sub(skb->csum,
skb_checksum(skb, len, delta, 0),
len);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
if (offset + sizeof(__sum16) > hdlen)
return -EINVAL;
}
return __pskb_trim(skb, len);
}
EXPORT_SYMBOL(pskb_trim_rcsum_slow);
/**
* __pskb_pull_tail - advance tail of skb header
* @skb: buffer to reallocate
* @delta: number of bytes to advance tail
*
* The function makes a sense only on a fragmented &sk_buff,
* it expands header moving its tail forward and copying necessary
* data from fragmented part.
*
* &sk_buff MUST have reference count of 1.
*
* Returns %NULL (and &sk_buff does not change) if pull failed
* or value of new tail of skb in the case of success.
*
* All the pointers pointing into skb header may change and must be
* reloaded after call to this function.
*/
/* Moves tail of skb head forward, copying data from fragmented part,
* when it is necessary.
* 1. It may fail due to malloc failure.
* 2. It may change skb pointers.
*
* It is pretty complicated. Luckily, it is called only in exceptional cases.
*/
void *__pskb_pull_tail(struct sk_buff *skb, int delta)
{
/* If skb has not enough free space at tail, get new one
* plus 128 bytes for future expansions. If we have enough
* room at tail, reallocate without expansion only if skb is cloned.
*/
int i, k, eat = (skb->tail + delta) - skb->end;
if (!skb_frags_readable(skb))
return NULL;
if (eat > 0 || skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
GFP_ATOMIC))
return NULL;
}
BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
skb_tail_pointer(skb), delta));
/* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb.
*/
if (!skb_has_frag_list(skb))
goto pull_pages;
/* Estimate size of pulled pages. */
eat = delta;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size >= eat)
goto pull_pages;
eat -= size;
}
/* If we need update frag list, we are in troubles.
* Certainly, it is possible to add an offset to skb data,
* but taking into account that pulling is expected to
* be very rare operation, it is worth to fight against
* further bloating skb head and crucify ourselves here instead.
* Pure masohism, indeed. 8)8)
*/
if (eat) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
struct sk_buff *clone = NULL;
struct sk_buff *insp = NULL;
do {
if (list->len <= eat) {
/* Eaten as whole. */
eat -= list->len;
list = list->next;
insp = list;
} else {
/* Eaten partially. */
if (skb_is_gso(skb) && !list->head_frag &&
skb_headlen(list))
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
if (skb_shared(list)) {
/* Sucks! We need to fork list. :-( */
clone = skb_clone(list, GFP_ATOMIC);
if (!clone)
return NULL;
insp = list->next;
list = clone;
} else {
/* This may be pulled without
* problems. */
insp = list;
}
if (!pskb_pull(list, eat)) {
kfree_skb(clone);
return NULL;
}
break;
}
} while (eat);
/* Free pulled out fragments. */
while ((list = skb_shinfo(skb)->frag_list) != insp) {
skb_shinfo(skb)->frag_list = list->next;
consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {
clone->next = list;
skb_shinfo(skb)->frag_list = clone;
}
}
/* Success! Now we may commit changes to skb data. */
pull_pages:
eat = delta;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size <= eat) {
skb_frag_unref(skb, i);
eat -= size;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
*frag = skb_shinfo(skb)->frags[i];
if (eat) {
skb_frag_off_add(frag, eat);
skb_frag_size_sub(frag, eat);
if (!i)
goto end;
eat = 0;
}
k++;
}
}
skb_shinfo(skb)->nr_frags = k;
end:
skb->tail += delta;
skb->data_len -= delta;
if (!skb->data_len)
skb_zcopy_clear(skb, false);
return skb_tail_pointer(skb);
}
EXPORT_SYMBOL(__pskb_pull_tail);
/**
* skb_copy_bits - copy bits from skb to kernel buffer
* @skb: source skb
* @offset: offset in source
* @to: destination buffer
* @len: number of bytes to copy
*
* Copy the specified number of bytes from the source skb to the
* destination buffer.
*
* CAUTION ! :
* If its prototype is ever changed,
* check arch/{*}/net/{*}.S files,
* since it is called from BPF assembly code.
*/
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
{
int start = skb_headlen(skb);
struct sk_buff *frag_iter;
int i, copy;
if (offset > (int)skb->len - len)
goto fault;
/* Copy header. */
if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
skb_copy_from_linear_data_offset(skb, offset, to, copy);
if ((len -= copy) == 0)
return 0;
offset += copy;
to += copy;
}
if (!skb_frags_readable(skb))
goto fault;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(f);
if ((copy = end - offset) > 0) {
u32 p_off, p_len, copied;
struct page *p;
u8 *vaddr;
if (copy > len)
copy = len;
skb_frag_foreach_page(f,
skb_frag_off(f) + offset - start,
copy, p, p_off, p_len, copied) {
vaddr = kmap_atomic(p);
memcpy(to + copied, vaddr + p_off, p_len);
kunmap_atomic(vaddr);
}
if ((len -= copy) == 0)
return 0;
offset += copy;
to += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
if (skb_copy_bits(frag_iter, offset - start, to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
to += copy;
}
start = end;
}
if (!len)
return 0;
fault:
return -EFAULT;
}
EXPORT_SYMBOL(skb_copy_bits);
/*
* Callback from splice_to_pipe(), if we need to release some pages
* at the end of the spd in case we error'ed out in filling the pipe.
*/
static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
put_page(spd->pages[i]);
}
static struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
struct sock *sk)
{
struct page_frag *pfrag = sk_page_frag(sk);
if (!sk_page_frag_refill(sk, pfrag))
return NULL;
*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
memcpy(page_address(pfrag->page) + pfrag->offset,
page_address(page) + *offset, *len);
*offset = pfrag->offset;
pfrag->offset += *len;
return pfrag->page;
}
static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
struct page *page,
unsigned int offset)
{
return spd->nr_pages &&
spd->pages[spd->nr_pages - 1] == page &&
(spd->partial[spd->nr_pages - 1].offset +
spd->partial[spd->nr_pages - 1].len == offset);
}
/*
* Fill page/offset/length into spd, if it can hold more pages.
*/
static bool spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
unsigned int *len, unsigned int offset, bool linear,
struct sock *sk)
{
if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
return true;
if (linear) {
page = linear_to_page(page, len, &offset, sk);
if (!page)
return true;
}
if (spd_can_coalesce(spd, page, offset)) {
spd->partial[spd->nr_pages - 1].len += *len;
return false;
}
get_page(page);
spd->pages[spd->nr_pages] = page;
spd->partial[spd->nr_pages].len = *len;
spd->partial[spd->nr_pages].offset = offset;
spd->nr_pages++;
return false;
}
static bool __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
unsigned int *len,
struct splice_pipe_desc *spd, bool linear,
struct sock *sk)
{
if (!*len)
return true;
/* skip this segment if already processed */
if (*off >= plen) {
*off -= plen;
return false;
}
/* ignore any bits we already processed */
poff += *off;
plen -= *off;
*off = 0;
do {
unsigned int flen = min(*len, plen);
if (spd_fill_page(spd, page, &flen, poff, linear, sk))
return true;
poff += flen;
plen -= flen;
*len -= flen;
if (!*len)
return true;
} while (plen);
return false;
}
/*
* Map linear and fragment data from the skb to spd. It reports true if the
* pipe is full or if we already spliced the requested length.
*/
static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
unsigned int *offset, unsigned int *len,
struct splice_pipe_desc *spd, struct sock *sk)
{
struct sk_buff *iter;
int seg;
/* map the linear part :
* If skb->head_frag is set, this 'linear' part is backed by a
* fragment, and if the head is not shared with any clones then
* we can avoid a copy since we own the head portion of this page.
*/
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
offset, len, spd,
skb_head_is_locked(skb),
sk))
return true;
/*
* then map the fragments
*/
if (!skb_frags_readable(skb))
return false;
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (WARN_ON_ONCE(!skb_frag_page(f)))
return false;
if (__splice_segment(skb_frag_page(f),
skb_frag_off(f), skb_frag_size(f),
offset, len, spd, false, sk))
return true;
}
skb_walk_frags(skb, iter) {
if (*offset >= iter->len) {
*offset -= iter->len;
continue;
}
/* __skb_splice_bits() only fails if the output has no room
* left, so no point in going over the frag_list for the error
* case.
*/
if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
return true;
}
return false;
}
/*
* Map data from the skb to a pipe. Should handle both the linear part,
* the fragments, and the frag list.
*/
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
unsigned int flags)
{
struct partial_page partial[MAX_SKB_FRAGS];
struct page *pages[MAX_SKB_FRAGS];
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
.nr_pages_max = MAX_SKB_FRAGS,
.ops = &nosteal_pipe_buf_ops,
.spd_release = sock_spd_release,
};
int ret = 0;
__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
if (spd.nr_pages)
ret = splice_to_pipe(pipe, &spd);
return ret;
}
EXPORT_SYMBOL_GPL(skb_splice_bits);
static int sendmsg_locked(struct sock *sk, struct msghdr *msg)
{
struct socket *sock = sk->sk_socket;
size_t size = msg_data_left(msg);
if (!sock)
return -EINVAL;
if (!sock->ops->sendmsg_locked)
return sock_no_sendmsg_locked(sk, msg, size);
return sock->ops->sendmsg_locked(sk, msg, size);
}
static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg)
{
struct socket *sock = sk->sk_socket;
if (!sock)
return -EINVAL;
return sock_sendmsg(sock, msg);
}
typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
int len, sendmsg_func sendmsg, int flags)
{
int more_hint = sk_is_tcp(sk) ? MSG_MORE : 0;
unsigned int orig_len = len;
struct sk_buff *head = skb;
unsigned short fragidx;
int slen, ret;
do_frag_list:
/* Deal with head data */
while (offset < skb_headlen(skb) && len) {
struct kvec kv;
struct msghdr msg;
slen = min_t(int, len, skb_headlen(skb) - offset);
kv.iov_base = skb->data + offset;
kv.iov_len = slen;
memset(&msg, 0, sizeof(msg));
msg.msg_flags = MSG_DONTWAIT | flags;
if (slen < len)
msg.msg_flags |= more_hint;
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
sendmsg_unlocked, sk, &msg);
if (ret <= 0)
goto error;
offset += ret;
len -= ret;
}
/* All the data was skb head? */
if (!len)
goto out;
/* Make offset relative to start of frags */
offset -= skb_headlen(skb);
/* Find where we are in frag list */
for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
if (offset < skb_frag_size(frag))
break;
offset -= skb_frag_size(frag);
}
for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
slen = min_t(size_t, len, skb_frag_size(frag) - offset);
while (slen) {
struct bio_vec bvec;
struct msghdr msg = {
.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT |
flags,
};
if (slen < len)
msg.msg_flags |= more_hint;
bvec_set_page(&bvec, skb_frag_page(frag), slen,
skb_frag_off(frag) + offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
slen);
ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
sendmsg_unlocked, sk, &msg);
if (ret <= 0)
goto error;
len -= ret;
offset += ret;
slen -= ret;
}
offset = 0;
}
if (len) {
/* Process any frag lists */
if (skb == head) {
if (skb_has_frag_list(skb)) {
skb = skb_shinfo(skb)->frag_list;
goto do_frag_list;
}
} else if (skb->next) {
skb = skb->next;
goto do_frag_list;
}
}
out:
return orig_len - len;
error:
return orig_len == len ? ret : orig_len - len;
}
/* Send skb data on a socket. Socket must be locked. */
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len)
{
return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0);
}
EXPORT_SYMBOL_GPL(skb_send_sock_locked);
int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
int offset, int len, int flags)
{
return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags);
}
EXPORT_SYMBOL_GPL(skb_send_sock_locked_with_flags);
/* Send skb data on a socket. Socket must be unlocked. */
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
{
return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0);
}
/**
* skb_store_bits - store bits from kernel buffer to skb
* @skb: destination buffer
* @offset: offset in destination
* @from: source buffer
* @len: number of bytes to copy
*
* Copy the specified number of bytes from the source buffer to the
* destination skb. This function handles all the messy bits of
* traversing fragment lists and such.
*/
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
{
int start = skb_headlen(skb);
struct sk_buff *frag_iter;
int i, copy;
if (offset > (int)skb->len - len)
goto fault;
if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
skb_copy_to_linear_data_offset(skb, offset, from, copy);
if ((len -= copy) == 0)
return 0;
offset += copy;
from += copy;
}
if (!skb_frags_readable(skb))
goto fault;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u32 p_off, p_len, copied;
struct page *p;
u8 *vaddr;
if (copy > len)
copy = len;
skb_frag_foreach_page(frag,
skb_frag_off(frag) + offset - start,
copy, p, p_off, p_len, copied) {
vaddr = kmap_atomic(p);
memcpy(vaddr + p_off, from + copied, p_len);
kunmap_atomic(vaddr);
}
if ((len -= copy) == 0)
return 0;
offset += copy;
from += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
if (skb_store_bits(frag_iter, offset - start,
from, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
from += copy;
}
start = end;
}
if (!len)
return 0;
fault:
return -EFAULT;
}
EXPORT_SYMBOL(skb_store_bits);
/* Checksum skb data. */
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int pos = 0;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
csum = csum_partial(skb->data + offset, copy, csum);
if ((len -= copy) == 0)
return csum;
offset += copy;
pos = copy;
}
if (WARN_ON_ONCE(!skb_frags_readable(skb)))
return 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u32 p_off, p_len, copied;
struct page *p;
__wsum csum2;
u8 *vaddr;
if (copy > len)
copy = len;
skb_frag_foreach_page(frag,
skb_frag_off(frag) + offset - start,
copy, p, p_off, p_len, copied) {
vaddr = kmap_atomic(p);
csum2 = csum_partial(vaddr + p_off, p_len, 0);
kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos);
pos += p_len;
}
if (!(len -= copy))
return csum;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
__wsum csum2;
if (copy > len)
copy = len;
csum2 = skb_checksum(frag_iter, offset - start, copy,
0);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
return csum;
offset += copy;
pos += copy;
}
start = end;
}
BUG_ON(len);
return csum;
}
EXPORT_SYMBOL(skb_checksum);
/* Both of above in one bottle. */
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
u8 *to, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int pos = 0;
__wsum csum = 0;
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
csum = csum_partial_copy_nocheck(skb->data + offset, to,
copy);
if ((len -= copy) == 0)
return csum;
offset += copy;
to += copy;
pos = copy;
}
if (!skb_frags_readable(skb))
return 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 p_off, p_len, copied;
struct page *p;
__wsum csum2;
u8 *vaddr;
if (copy > len)
copy = len;
skb_frag_foreach_page(frag,
skb_frag_off(frag) + offset - start,
copy, p, p_off, p_len, copied) {
vaddr = kmap_atomic(p);
csum2 = csum_partial_copy_nocheck(vaddr + p_off,
to + copied,
p_len);
kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos);
pos += p_len;
}
if (!(len -= copy))
return csum;
offset += copy;
to += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
__wsum csum2;
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
csum2 = skb_copy_and_csum_bits(frag_iter,
offset - start,
to, copy);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
return csum;
offset += copy;
to += copy;
pos += copy;
}
start = end;
}
BUG_ON(len);
return csum;
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
#ifdef CONFIG_NET_CRC32C
u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
if (copy > 0) {
copy = min(copy, len);
crc = crc32c(crc, skb->data + offset, copy);
len -= copy;
if (len == 0)
return crc;
offset += copy;
}
if (WARN_ON_ONCE(!skb_frags_readable(skb)))
return 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
copy = end - offset;
if (copy > 0) {
u32 p_off, p_len, copied;
struct page *p;
u8 *vaddr;
copy = min(copy, len);
skb_frag_foreach_page(frag,
skb_frag_off(frag) + offset - start,
copy, p, p_off, p_len, copied) {
vaddr = kmap_atomic(p);
crc = crc32c(crc, vaddr + p_off, p_len);
kunmap_atomic(vaddr);
}
len -= copy;
if (len == 0)
return crc;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
copy = end - offset;
if (copy > 0) {
copy = min(copy, len);
crc = skb_crc32c(frag_iter, offset - start, copy, crc);
len -= copy;
if (len == 0)
return crc;
offset += copy;
}
start = end;
}
BUG_ON(len);
return crc;
}
EXPORT_SYMBOL(skb_crc32c);
#endif /* CONFIG_NET_CRC32C */
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
{
__sum16 sum;
sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
/* See comments in __skb_checksum_complete(). */
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev, skb);
}
if (!skb_shared(skb))
skb->csum_valid = !sum;
return sum;
}
EXPORT_SYMBOL(__skb_checksum_complete_head);
/* This function assumes skb->csum already holds pseudo header's checksum,
* which has been changed from the hardware checksum, for example, by
* __skb_checksum_validate_complete(). And, the original skb->csum must
* have been validated unsuccessfully for CHECKSUM_COMPLETE case.
*
* It returns non-zero if the recomputed checksum is still invalid, otherwise
* zero. The new checksum is stored back into skb->csum unless the skb is
* shared.
*/
__sum16 __skb_checksum_complete(struct sk_buff *skb)
{
__wsum csum;
__sum16 sum;
csum = skb_checksum(skb, 0, skb->len, 0);
sum = csum_fold(csum_add(skb->csum, csum));
/* This check is inverted, because we already knew the hardware
* checksum is invalid before calling this function. So, if the
* re-computed checksum is valid instead, then we have a mismatch
* between the original skb->csum and skb_checksum(). This means either
* the original hardware checksum is incorrect or we screw up skb->csum
* when moving skb->data around.
*/
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev, skb);
}
if (!skb_shared(skb)) {
/* Save full packet checksum */
skb->csum = csum;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum_complete_sw = 1;
skb->csum_valid = !sum;
}
return sum;
}
EXPORT_SYMBOL(__skb_checksum_complete);
/**
* skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
* @from: source buffer
*
* Calculates the amount of linear headroom needed in the 'to' skb passed
* into skb_zerocopy().
*/
unsigned int
skb_zerocopy_headlen(const struct sk_buff *from)
{
unsigned int hlen = 0;
if (!from->head_frag ||
skb_headlen(from) < L1_CACHE_BYTES ||
skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
hlen = skb_headlen(from);
if (!hlen)
hlen = from->len;
}
if (skb_has_frag_list(from))
hlen = from->len;
return hlen;
}
EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
/**
* skb_zerocopy - Zero copy skb to skb
* @to: destination buffer
* @from: source buffer
* @len: number of bytes to copy from source buffer
* @hlen: size of linear headroom in destination buffer
*
* Copies up to `len` bytes from `from` to `to` by creating references
* to the frags in the source buffer.
*
* The `hlen` as calculated by skb_zerocopy_headlen() specifies the
* headroom in the `to` buffer.
*
* Return value:
* 0: everything is OK
* -ENOMEM: couldn't orphan frags of @from due to lack of memory
* -EFAULT: skb_copy_bits() found some problem with skb geometry
*/
int
skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
int ret;
struct page *page;
unsigned int offset;
BUG_ON(!from->head_frag && !hlen);
/* dont bother with small payloads */
if (len <= skb_tailroom(to))
return skb_copy_bits(from, 0, skb_put(to, len), len);
if (hlen) {
ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
if (unlikely(ret))
return ret;
len -= hlen;
} else {
plen = min_t(int, skb_headlen(from), len);
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
__skb_fill_netmem_desc(to, 0, page_to_netmem(page),
offset, plen);
get_page(page);
j = 1;
len -= plen;
}
}
skb_len_add(to, len + plen);
if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
skb_tx_error(from);
return -ENOMEM;
}
skb_zerocopy_clone(to, from, GFP_ATOMIC);
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
int size;
if (!len)
break;
skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
len);
skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
len -= size;
skb_frag_ref(to, j);
j++;
}
skb_shinfo(to)->nr_frags = j;
return 0;
}
EXPORT_SYMBOL_GPL(skb_zerocopy);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
{
__wsum csum;
long csstart;
if (skb->ip_summed == CHECKSUM_PARTIAL)
csstart = skb_checksum_start_offset(skb);
else
csstart = skb_headlen(skb);
BUG_ON(csstart > skb_headlen(skb));
skb_copy_from_linear_data(skb, to, csstart);
csum = 0;
if (csstart != skb->len)
csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
skb->len - csstart);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
long csstuff = csstart + skb->csum_offset;
*((__sum16 *)(to + csstuff)) = csum_fold(csum);
}
}
EXPORT_SYMBOL(skb_copy_and_csum_dev);
/**
* skb_dequeue - remove from the head of the queue
* @list: list to dequeue from
*
* Remove the head of the list. The list lock is taken so the function
* may be used safely with other locking list functions. The head item is
* returned or %NULL if the list is empty.
*/
struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
unsigned long flags;
struct sk_buff *result;
spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue(list);
spin_unlock_irqrestore(&list->lock, flags);
return result;
}
EXPORT_SYMBOL(skb_dequeue);
/**
* skb_dequeue_tail - remove from the tail of the queue
* @list: list to dequeue from
*
* Remove the tail of the list. The list lock is taken so the function
* may be used safely with other locking list functions. The tail item is
* returned or %NULL if the list is empty.
*/
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
{
unsigned long flags;
struct sk_buff *result;
spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue_tail(list);
spin_unlock_irqrestore(&list->lock, flags);
return result;
}
EXPORT_SYMBOL(skb_dequeue_tail);
/**
* skb_queue_purge_reason - empty a list
* @list: list to empty
* @reason: drop reason
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function takes the list
* lock and is atomic with respect to other list locking functions.
*/
void skb_queue_purge_reason(struct sk_buff_head *list,
enum skb_drop_reason reason)
{
struct sk_buff_head tmp;
unsigned long flags;
if (skb_queue_empty_lockless(list))
return;
__skb_queue_head_init(&tmp);
spin_lock_irqsave(&list->lock, flags);
skb_queue_splice_init(list, &tmp);
spin_unlock_irqrestore(&list->lock, flags);
__skb_queue_purge_reason(&tmp, reason);}
EXPORT_SYMBOL(skb_queue_purge_reason);
/**
* skb_rbtree_purge - empty a skb rbtree
* @root: root of the rbtree to empty
* Return value: the sum of truesizes of all purged skbs.
*
* Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
* the list and one reference dropped. This function does not take
* any lock. Synchronization should be handled by the caller (e.g., TCP
* out-of-order queue is protected by the socket lock).
*/
unsigned int skb_rbtree_purge(struct rb_root *root)
{
struct rb_node *p = rb_first(root);
unsigned int sum = 0;
while (p) {
struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
p = rb_next(p);
rb_erase(&skb->rbnode, root);
sum += skb->truesize;
kfree_skb(skb);
}
return sum;
}
void skb_errqueue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb, *next;
struct sk_buff_head kill;
unsigned long flags;
__skb_queue_head_init(&kill);
spin_lock_irqsave(&list->lock, flags);
skb_queue_walk_safe(list, skb, next) {
if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY ||
SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING)
continue;
__skb_unlink(skb, list);
__skb_queue_tail(&kill, skb);
}
spin_unlock_irqrestore(&list->lock, flags);
__skb_queue_purge(&kill);
}
EXPORT_SYMBOL(skb_errqueue_purge);
/**
* skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the start of the list. This function takes the
* list lock and can be used safely with other locking &sk_buff functions
* safely.
*
* A buffer cannot be placed on two lists at the same time.
*/
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_queue_head(list, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_queue_head);
/**
* skb_queue_tail - queue a buffer at the list tail
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the tail of the list. This function takes the
* list lock and can be used safely with other locking &sk_buff functions
* safely.
*
* A buffer cannot be placed on two lists at the same time.
*/
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_queue_tail(list, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_queue_tail);
/**
* skb_unlink - remove a buffer from a list
* @skb: buffer to remove
* @list: list to use
*
* Remove a packet from a list. The list locks are taken and this
* function is atomic with respect to other list locked calls
*
* You must know what list the SKB is on.
*/
void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_unlink(skb, list);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_unlink);
/**
* skb_append - append a buffer
* @old: buffer to insert after
* @newsk: buffer to insert
* @list: list to use
*
* Place a packet after a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls.
* A buffer cannot be placed on two lists at the same time.
*/
void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
__skb_queue_after(list, old, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
EXPORT_SYMBOL(skb_append);
static inline void skb_split_inside_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, const int pos)
{
int i;
skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
pos - len);
/* And move data appendix as is. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
skb1->unreadable = skb->unreadable;
skb_shinfo(skb)->nr_frags = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
skb->data_len = 0;
skb->len = len;
skb_set_tail_pointer(skb, len);
}
static inline void skb_split_no_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, int pos)
{
int i, k = 0;
const int nfrags = skb_shinfo(skb)->nr_frags;
skb_shinfo(skb)->nr_frags = 0;
skb1->len = skb1->data_len = skb->len - len;
skb->len = len;
skb->data_len = len - pos;
for (i = 0; i < nfrags; i++) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (pos + size > len) {
skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
if (pos < len) {
/* Split frag.
* We have two variants in this case:
* 1. Move all the frag to the second
* part, if it is possible. F.e.
* this approach is mandatory for TUX,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
skb_frag_ref(skb, i);
skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
skb_shinfo(skb)->nr_frags++;
}
k++;
} else
skb_shinfo(skb)->nr_frags++;
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
skb1->unreadable = skb->unreadable;
}
/**
* skb_split - Split fragmented skb to two parts at length len.
* @skb: the buffer to split
* @skb1: the buffer to receive the second part
* @len: new length for skb
*/
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{
int pos = skb_headlen(skb);
const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
skb_zcopy_downgrade_managed(skb);
skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
skb_zerocopy_clone(skb1, skb, 0);
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
skb_split_no_header(skb, skb1, len, pos);
}
EXPORT_SYMBOL(skb_split);
/* Shifting from/to a cloned skb is a no-go.
*
* Caller cannot keep skb_shinfo related pointers past calling here!
*/
static int skb_prepare_for_shift(struct sk_buff *skb)
{
return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
}
/**
* skb_shift - Shifts paged data partially from skb to another
* @tgt: buffer into which tail data gets added
* @skb: buffer from which the paged data comes from
* @shiftlen: shift up to this many bytes
*
* Attempts to shift up to shiftlen worth of bytes, which may be less than
* the length of the skb, from skb to tgt. Returns number bytes shifted.
* It's up to caller to free skb if everything was shifted.
*
* If @tgt runs out of frags, the whole operation is aborted.
*
* Skb cannot include anything else but paged data while tgt is allowed
* to have non-paged data as well.
*
* TODO: full sized shift could be optimized but that would need
* specialized skb free'er to handle frags without up-to-date nr_frags.
*/
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
{
int from, to, merge, todo;
skb_frag_t *fragfrom, *fragto;
BUG_ON(shiftlen > skb->len);
if (skb_headlen(skb))
return 0;
if (skb_zcopy(tgt) || skb_zcopy(skb))
return 0;
DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle);
DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb));
todo = shiftlen;
from = 0;
to = skb_shinfo(tgt)->nr_frags;
fragfrom = &skb_shinfo(skb)->frags[from];
/* Actual merge is delayed until the point when we know we can
* commit all, so that we don't have to undo partial changes
*/
if (!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
skb_frag_off(fragfrom))) {
merge = -1;
} else {
merge = to - 1;
todo -= skb_frag_size(fragfrom);
if (todo < 0) {
if (skb_prepare_for_shift(skb) ||
skb_prepare_for_shift(tgt))
return 0;
/* All previous frag pointers might be stale! */
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, shiftlen);
skb_frag_size_sub(fragfrom, shiftlen);
skb_frag_off_add(fragfrom, shiftlen);
goto onlymerged;
}
from++;
}
/* Skip full, not-fitting skb to avoid expensive operations */
if ((shiftlen == skb->len) &&
(skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
return 0;
if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
return 0;
while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
if (to == MAX_SKB_FRAGS)
return 0;
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[to];
if (todo >= skb_frag_size(fragfrom)) {
*fragto = *fragfrom;
todo -= skb_frag_size(fragfrom);
from++;
to++;
} else {
__skb_frag_ref(fragfrom);
skb_frag_page_copy(fragto, fragfrom);
skb_frag_off_copy(fragto, fragfrom);
skb_frag_size_set(fragto, todo);
skb_frag_off_add(fragfrom, todo);
skb_frag_size_sub(fragfrom, todo);
todo = 0;
to++;
break;
}
}
/* Ready to "commit" this state change to tgt */
skb_shinfo(tgt)->nr_frags = to;
if (merge >= 0) {
fragfrom = &skb_shinfo(skb)->frags[0];
fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, skb_frag_size(fragfrom));
__skb_frag_unref(fragfrom, skb->pp_recycle);
}
/* Reposition in the original skb */
to = 0;
while (from < skb_shinfo(skb)->nr_frags)
skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
skb_shinfo(skb)->nr_frags = to;
BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
onlymerged:
/* Most likely the tgt won't ever need its checksum anymore, skb on
* the other hand might need it if it needs to be resent
*/
tgt->ip_summed = CHECKSUM_PARTIAL;
skb->ip_summed = CHECKSUM_PARTIAL;
skb_len_add(skb, -shiftlen);
skb_len_add(tgt, shiftlen);
return shiftlen;
}
/**
* skb_prepare_seq_read - Prepare a sequential read of skb data
* @skb: the buffer to read
* @from: lower offset of data to be read
* @to: upper offset of data to be read
* @st: state variable
*
* Initializes the specified state variable. Must be called before
* invoking skb_seq_read() for the first time.
*/
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int to, struct skb_seq_state *st)
{
st->lower_offset = from;
st->upper_offset = to;
st->root_skb = st->cur_skb = skb;
st->frag_idx = st->stepped_offset = 0;
st->frag_data = NULL;
st->frag_off = 0;
}
EXPORT_SYMBOL(skb_prepare_seq_read);
/**
* skb_seq_read - Sequentially read skb data
* @consumed: number of bytes consumed by the caller so far
* @data: destination pointer for data to be returned
* @st: state variable
*
* Reads a block of skb data at @consumed relative to the
* lower offset specified to skb_prepare_seq_read(). Assigns
* the head of the data block to @data and returns the length
* of the block or 0 if the end of the skb data or the upper
* offset has been reached.
*
* The caller is not required to consume all of the data
* returned, i.e. @consumed is typically set to the number
* of bytes already consumed and the next call to
* skb_seq_read() will return the remaining part of the block.
*
* Note 1: The size of each block of data returned can be arbitrary,
* this limitation is the cost for zerocopy sequential
* reads of potentially non linear data.
*
* Note 2: Fragment lists within fragments are not implemented
* at the moment, state->root_skb could be replaced with
* a stack for this purpose.
*/
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st)
{
unsigned int block_limit, abs_offset = consumed + st->lower_offset;
skb_frag_t *frag;
if (unlikely(abs_offset >= st->upper_offset)) {
if (st->frag_data) {
kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
return 0;
}
next_skb:
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit && !st->frag_data) {
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
if (!skb_frags_readable(st->cur_skb))
return 0;
if (st->frag_idx == 0 && !st->frag_data)
st->stepped_offset += skb_headlen(st->cur_skb);
while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
unsigned int pg_idx, pg_off, pg_sz;
frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
pg_idx = 0;
pg_off = skb_frag_off(frag);
pg_sz = skb_frag_size(frag);
if (skb_frag_must_loop(skb_frag_page(frag))) {
pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
pg_off = offset_in_page(pg_off + st->frag_off);
pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
PAGE_SIZE - pg_off);
}
block_limit = pg_sz + st->stepped_offset;
if (abs_offset < block_limit) {
if (!st->frag_data)
st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
*data = (u8 *)st->frag_data + pg_off +
(abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
if (st->frag_data) {
kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
st->stepped_offset += pg_sz;
st->frag_off += pg_sz;
if (st->frag_off == skb_frag_size(frag)) {
st->frag_off = 0;
st->frag_idx++;
}
}
if (st->frag_data) {
kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
} else if (st->cur_skb->next) {
st->cur_skb = st->cur_skb->next;
st->frag_idx = 0;
goto next_skb;
}
return 0;
}
EXPORT_SYMBOL(skb_seq_read);
/**
* skb_abort_seq_read - Abort a sequential read of skb data
* @st: state variable
*
* Must be called if skb_seq_read() was not called until it
* returned 0.
*/
void skb_abort_seq_read(struct skb_seq_state *st)
{
if (st->frag_data)
kunmap_atomic(st->frag_data);
}
EXPORT_SYMBOL(skb_abort_seq_read);
/**
* skb_copy_seq_read() - copy from a skb_seq_state to a buffer
* @st: source skb_seq_state
* @offset: offset in source
* @to: destination buffer
* @len: number of bytes to copy
*
* Copy @len bytes from @offset bytes into the source @st to the destination
* buffer @to. `offset` should increase (or be unchanged) with each subsequent
* call to this function. If offset needs to decrease from the previous use `st`
* should be reset first.
*
* Return: 0 on success or -EINVAL if the copy ended early
*/
int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len)
{
const u8 *data;
u32 sqlen;
for (;;) {
sqlen = skb_seq_read(offset, &data, st);
if (sqlen == 0)
return -EINVAL;
if (sqlen >= len) {
memcpy(to, data, len);
return 0;
}
memcpy(to, data, sqlen);
to += sqlen;
offset += sqlen;
len -= sqlen;
}
}
EXPORT_SYMBOL(skb_copy_seq_read);
#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
struct ts_config *conf,
struct ts_state *state)
{
return skb_seq_read(offset, text, TS_SKB_CB(state));
}
static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
{
skb_abort_seq_read(TS_SKB_CB(state));
}
/**
* skb_find_text - Find a text pattern in skb data
* @skb: the buffer to look in
* @from: search offset
* @to: search limit
* @config: textsearch configuration
*
* Finds a pattern in the skb data according to the specified
* textsearch configuration. Use textsearch_next() to retrieve
* subsequent occurrences of the pattern. Returns the offset
* to the first occurrence or UINT_MAX if no match was found.
*/
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config)
{
unsigned int patlen = config->ops->get_pattern_len(config);
struct ts_state state;
unsigned int ret;
BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
config->get_next_block = skb_ts_get_next_block;
config->finish = skb_ts_finish;
skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
ret = textsearch_find(config, &state);
return (ret + patlen <= to - from ? ret : UINT_MAX);
}
EXPORT_SYMBOL(skb_find_text);
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
int offset, size_t size, size_t max_frags)
{
int i = skb_shinfo(skb)->nr_frags;
if (skb_can_coalesce(skb, i, page, offset)) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
} else if (i < max_frags) {
skb_zcopy_downgrade_managed(skb);
get_page(page);
skb_fill_page_desc_noacc(skb, i, page, offset, size);
} else {
return -EMSGSIZE;
}
return 0;
}
EXPORT_SYMBOL_GPL(skb_append_pagefrags);
/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
* @len: length of data pulled
*
* This function performs an skb_pull on the packet and updates
* the CHECKSUM_COMPLETE checksum. It should be used on
* receive path processing instead of skb_pull unless you know
* that the checksum difference is zero (e.g., a valid IP header)
* or you are setting ip_summed to CHECKSUM_NONE.
*/
void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
{
unsigned char *data = skb->data;
BUG_ON(len > skb->len);
__skb_pull(skb, len);
skb_postpull_rcsum(skb, data, len);
return skb->data;
}
EXPORT_SYMBOL_GPL(skb_pull_rcsum);
static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
{
skb_frag_t head_frag;
struct page *page;
page = virt_to_head_page(frag_skb->head);
skb_frag_fill_page_desc(&head_frag, page, frag_skb->data -
(unsigned char *)page_address(page),
skb_headlen(frag_skb));
return head_frag;
}
struct sk_buff *skb_segment_list(struct sk_buff *skb,
netdev_features_t features,
unsigned int offset)
{
struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
unsigned int tnl_hlen = skb_tnl_header_len(skb);
unsigned int delta_truesize = 0;
unsigned int delta_len = 0;
struct sk_buff *tail = NULL;
struct sk_buff *nskb, *tmp;
int len_diff, err;
skb_push(skb, -skb_network_offset(skb) + offset);
/* Ensure the head is writeable before touching the shared info */
err = skb_unclone(skb, GFP_ATOMIC);
if (err)
goto err_linearize;
skb_shinfo(skb)->frag_list = NULL;
while (list_skb) {
nskb = list_skb;
list_skb = list_skb->next;
err = 0;
delta_truesize += nskb->truesize;
if (skb_shared(nskb)) {
tmp = skb_clone(nskb, GFP_ATOMIC);
if (tmp) {
consume_skb(nskb);
nskb = tmp;
err = skb_unclone(nskb, GFP_ATOMIC);
} else {
err = -ENOMEM;
}
}
if (!tail)
skb->next = nskb;
else
tail->next = nskb;
if (unlikely(err)) {
nskb->next = list_skb;
goto err_linearize;
}
tail = nskb;
delta_len += nskb->len;
skb_push(nskb, -skb_network_offset(nskb) + offset);
skb_release_head_state(nskb);
len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
__copy_skb_header(nskb, skb);
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
nskb->transport_header += len_diff;
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
nskb->data - tnl_hlen,
offset + tnl_hlen);
if (skb_needs_linearize(nskb, features) &&
__skb_linearize(nskb))
goto err_linearize;
}
skb->truesize = skb->truesize - delta_truesize;
skb->data_len = skb->data_len - delta_len;
skb->len = skb->len - delta_len;
skb_gso_reset(skb);
skb->prev = tail;
if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb))
goto err_linearize;
skb_get(skb);
return skb;
err_linearize:
kfree_skb_list(skb->next);
skb->next = NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(skb_segment_list);
/**
* skb_segment - Perform protocol segmentation on skb.
* @head_skb: buffer to segment
* @features: features for the output path (see dev->features)
*
* This function performs segmentation on the given skb. It returns
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
struct sk_buff *skb_segment(struct sk_buff *head_skb,
netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
unsigned int mss = skb_shinfo(head_skb)->gso_size;
unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
unsigned int offset = doffset;
unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int partial_segs = 0;
unsigned int headroom;
unsigned int len = head_skb->len;
struct sk_buff *frag_skb;
skb_frag_t *frag;
__be16 proto;
bool csum, sg;
int err = -ENOMEM;
int i = 0;
int nfrags, pos;
if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
struct sk_buff *check_skb;
for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
if (skb_headlen(check_skb) && !check_skb->head_frag) {
/* gso_size is untrusted, and we have a frag_list with
* a linear non head_frag item.
*
* If head_skb's headlen does not fit requested gso_size,
* it means that the frag_list members do NOT terminate
* on exact gso_size boundaries. Hence we cannot perform
* skb_frag_t page sharing. Therefore we must fallback to
* copying the frag_list skbs; we do so by disabling SG.
*/
features &= ~NETIF_F_SG;
break;
}
}
}
__skb_push(head_skb, doffset);
proto = skb_network_protocol(head_skb, NULL);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
sg = !!(features & NETIF_F_SG);
csum = !!can_checksum_protocol(features, proto);
if (sg && csum && (mss != GSO_BY_FRAGS)) {
if (!(features & NETIF_F_GSO_PARTIAL)) {
struct sk_buff *iter;
unsigned int frag_len;
if (!list_skb ||
!net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
goto normal;
/* If we get here then all the required
* GSO features except frag_list are supported.
* Try to split the SKB to multiple GSO SKBs
* with no frag_list.
* Currently we can do that only when the buffers don't
* have a linear part and all the buffers except
* the last are of the same length.
*/
frag_len = list_skb->len;
skb_walk_frags(head_skb, iter) {
if (frag_len != iter->len && iter->next)
goto normal;
if (skb_headlen(iter) && !iter->head_frag)
goto normal;
len -= iter->len;
}
if (len != frag_len)
goto normal;
}
/* GSO partial only requires that we trim off any excess that
* doesn't fit into an MSS sized block, so take care of that
* now.
* Cap len to not accidentally hit GSO_BY_FRAGS.
*/
partial_segs = min(len, GSO_BY_FRAGS - 1) / mss;
if (partial_segs > 1)
mss *= partial_segs;
else
partial_segs = 0;
}
normal:
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
if (skb_orphan_frags(head_skb, GFP_ATOMIC))
return ERR_PTR(-ENOMEM);
nfrags = skb_shinfo(head_skb)->nr_frags;
frag = skb_shinfo(head_skb)->frags;
frag_skb = head_skb;
do {
struct sk_buff *nskb;
skb_frag_t *nskb_frag;
int hsize;
int size;
if (unlikely(mss == GSO_BY_FRAGS)) {
len = list_skb->len;
} else {
len = head_skb->len - offset;
if (len > mss)
len = mss;
}
hsize = skb_headlen(head_skb) - offset;
if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) &&
(skb_headlen(list_skb) == len || sg)) {
BUG_ON(skb_headlen(list_skb) > len);
nskb = skb_clone(list_skb, GFP_ATOMIC);
if (unlikely(!nskb))
goto err;
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
frag_skb = list_skb;
pos += skb_headlen(list_skb);
while (pos < offset + len) {
BUG_ON(i >= nfrags);
size = skb_frag_size(frag);
if (pos + size > offset + len)
break;
i++;
pos += size;
frag++;
}
list_skb = list_skb->next;
if (unlikely(pskb_trim(nskb, len))) {
kfree_skb(nskb);
goto err;
}
hsize = skb_end_offset(nskb);
if (skb_cow_head(nskb, doffset + headroom)) {
kfree_skb(nskb);
goto err;
}
nskb->truesize += skb_end_offset(nskb) - hsize;
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
if (hsize < 0)
hsize = 0;
if (hsize > len || !sg)
hsize = len;
nskb = __alloc_skb(hsize + doffset + headroom,
GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
NUMA_NO_NODE);
if (unlikely(!nskb))
goto err;
skb_reserve(nskb, headroom);
__skb_put(nskb, doffset);
}
if (segs)
tail->next = nskb;
else
segs = nskb;
tail = nskb;
__copy_skb_header(nskb, head_skb);
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
skb_reset_mac_len(nskb);
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
nskb->data - tnl_hlen,
doffset + tnl_hlen);
if (nskb->len == len + doffset)
goto perform_csum_check;
if (!sg) {
if (!csum) {
if (!nskb->remcsum_offload)
nskb->ip_summed = CHECKSUM_NONE;
SKB_GSO_CB(nskb)->csum =
skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb,
len),
len);
SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + doffset;
} else {
if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
goto err;
}
continue;
}
nskb_frag = skb_shinfo(nskb)->frags;
skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
SKBFL_SHARED_FRAG;
if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
goto err;
while (pos < offset + len) {
if (i >= nfrags) {
if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
skb_zerocopy_clone(nskb, list_skb,
GFP_ATOMIC))
goto err;
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
frag_skb = list_skb;
if (!skb_headlen(list_skb)) {
BUG_ON(!nfrags);
} else {
BUG_ON(!list_skb->head_frag);
/* to make room for head_frag. */
i--;
frag--;
}
list_skb = list_skb->next;
}
if (unlikely(skb_shinfo(nskb)->nr_frags >=
MAX_SKB_FRAGS)) {
net_warn_ratelimited(
"skb_segment: too many frags: %u %u\n",
pos, mss);
err = -EINVAL;
goto err;
}
*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
__skb_frag_ref(nskb_frag);
size = skb_frag_size(nskb_frag);
if (pos < offset) {
skb_frag_off_add(nskb_frag, offset - pos);
skb_frag_size_sub(nskb_frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
if (pos + size <= offset + len) {
i++;
frag++;
pos += size;
} else {
skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
goto skip_fraglist;
}
nskb_frag++;
}
skip_fraglist:
nskb->data_len = len - hsize;
nskb->len += nskb->data_len;
nskb->truesize += nskb->data_len;
perform_csum_check:
if (!csum) {
if (skb_has_shared_frag(nskb) &&
__skb_linearize(nskb))
goto err;
if (!nskb->remcsum_offload)
nskb->ip_summed = CHECKSUM_NONE;
SKB_GSO_CB(nskb)->csum =
skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + doffset;
}
} while ((offset += len) < head_skb->len);
/* Some callers want to get the end of the list.
* Put it in segs->prev to avoid walking the list.
* (see validate_xmit_skb_list() for example)
*/
segs->prev = tail;
if (partial_segs) {
struct sk_buff *iter;
int type = skb_shinfo(head_skb)->gso_type;
unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
/* Update type to add partial and then remove dodgy if set */
type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
type &= ~SKB_GSO_DODGY;
/* Update GSO info and prepare to start updating headers on
* our way back down the stack of protocols.
*/
for (iter = segs; iter; iter = iter->next) {
skb_shinfo(iter)->gso_size = gso_size;
skb_shinfo(iter)->gso_segs = partial_segs;
skb_shinfo(iter)->gso_type = type;
SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
}
if (tail->len - doffset <= gso_size)
skb_shinfo(tail)->gso_size = 0;
else if (tail != segs)
skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
}
/* Following permits correct backpressure, for protocols
* using skb_set_owner_w().
* Idea is to tranfert ownership from head_skb to last segment.
*/
if (head_skb->destructor == sock_wfree) {
swap(tail->truesize, head_skb->truesize);
swap(tail->destructor, head_skb->destructor);
swap(tail->sk, head_skb->sk);
}
return segs;
err:
kfree_skb_list(segs);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(skb_segment);
#ifdef CONFIG_SKB_EXTENSIONS
#define SKB_EXT_ALIGN_VALUE 8
#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
static const u8 skb_ext_type_len[] = {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
#endif
#ifdef CONFIG_XFRM
[SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
#endif
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
[TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
#endif
#if IS_ENABLED(CONFIG_MPTCP)
[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
#endif
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
[SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
#endif
#if IS_ENABLED(CONFIG_INET_PSP)
[SKB_EXT_PSP] = SKB_EXT_CHUNKSIZEOF(struct psp_skb_ext),
#endif
};
static __always_inline unsigned int skb_ext_total_length(void)
{
unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext);
int i;
for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++)
l += skb_ext_type_len[i];
return l;
}
static void skb_extensions_init(void)
{
BUILD_BUG_ON(SKB_EXT_NUM >= 8);
#if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
BUILD_BUG_ON(skb_ext_total_length() > 255);
#endif
skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
}
#else
static void skb_extensions_init(void) {}
#endif
/* The SKB kmem_cache slab is critical for network performance. Never
* merge/alias the slab with similar sized objects. This avoids fragmentation
* that hurts performance of kmem_cache_{alloc,free}_bulk APIs.
*/
#ifndef CONFIG_SLUB_TINY
#define FLAG_SKB_NO_MERGE SLAB_NO_MERGE
#else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
#define FLAG_SKB_NO_MERGE 0
#endif
void __init skb_init(void)
{
net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|
FLAG_SKB_NO_MERGE,
offsetof(struct sk_buff, cb),
sizeof_field(struct sk_buff, cb),
NULL);
net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
/* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes.
* struct skb_shared_info is located at the end of skb->head,
* and should not be copied to/from user.
*/
net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
SKB_SMALL_HEAD_CACHE_SIZE,
0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC,
0,
SKB_SMALL_HEAD_HEADROOM,
NULL);
skb_extensions_init();
}
static int
__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
unsigned int recursion_level)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int elt = 0;
if (unlikely(recursion_level >= 24))
return -EMSGSIZE;
if (copy > 0) {
if (copy > len)
copy = len;
sg_set_buf(sg, skb->data + offset, copy);
elt++;
if ((len -= copy) == 0)
return elt;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (unlikely(elt && sg_is_last(&sg[elt - 1])))
return -EMSGSIZE;
if (copy > len)
copy = len;
sg_set_page(&sg[elt], skb_frag_page(frag), copy,
skb_frag_off(frag) + offset - start);
elt++;
if (!(len -= copy))
return elt;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end, ret;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (unlikely(elt && sg_is_last(&sg[elt - 1])))
return -EMSGSIZE;
if (copy > len)
copy = len;
ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
copy, recursion_level + 1);
if (unlikely(ret < 0))
return ret;
elt += ret;
if ((len -= copy) == 0)
return elt;
offset += copy;
}
start = end;
}
BUG_ON(len);
return elt;
}
/**
* skb_to_sgvec - Fill a scatter-gather list from a socket buffer
* @skb: Socket buffer containing the buffers to be mapped
* @sg: The scatter-gather list to map into
* @offset: The offset into the buffer's contents to start mapping
* @len: Length of buffer space to be mapped
*
* Fill the specified scatter-gather list with mappings/pointers into a
* region of the buffer space attached to a socket buffer. Returns either
* the number of scatterlist items used, or -EMSGSIZE if the contents
* could not fit.
*/
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
if (nsg <= 0)
return nsg;
sg_mark_end(&sg[nsg - 1]);
return nsg;
}
EXPORT_SYMBOL_GPL(skb_to_sgvec);
/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
* sglist without mark the sg which contain last skb data as the end.
* So the caller can mannipulate sg list as will when padding new data after
* the first call without calling sg_unmark_end to expend sg list.
*
* Scenario to use skb_to_sgvec_nomark:
* 1. sg_init_table
* 2. skb_to_sgvec_nomark(payload1)
* 3. skb_to_sgvec_nomark(payload2)
*
* This is equivalent to:
* 1. sg_init_table
* 2. skb_to_sgvec(payload1)
* 3. sg_unmark_end
* 4. skb_to_sgvec(payload2)
*
* When mapping multiple payload conditionally, skb_to_sgvec_nomark
* is more preferable.
*/
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len)
{
return __skb_to_sgvec(skb, sg, offset, len, 0);
}
EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
/**
* skb_cow_data - Check that a socket buffer's data buffers are writable
* @skb: The socket buffer to check.
* @tailbits: Amount of trailing space to be added
* @trailer: Returned pointer to the skb where the @tailbits space begins
*
* Make sure that the data buffers attached to a socket buffer are
* writable. If they are not, private copies are made of the data buffers
* and the socket buffer is set to use these instead.
*
* If @tailbits is given, make sure that there is space to write @tailbits
* bytes of data beyond current end of socket buffer. @trailer will be
* set to point to the skb in which this space begins.
*
* The number of scatterlist elements required to completely map the
* COW'd and extended socket buffer will be returned.
*/
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
{
int copyflag;
int elt;
struct sk_buff *skb1, **skb_p;
/* If skb is cloned or its head is paged, reallocate
* head pulling out all the pages (pages are considered not writable
* at the moment even if they are anonymous).
*/
if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
!__pskb_pull_tail(skb, __skb_pagelen(skb)))
return -ENOMEM;
/* Easy case. Most of packets will go this way. */
if (!skb_has_frag_list(skb)) {
/* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more
* space, 128 bytes is fair. */
if (skb_tailroom(skb) < tailbits &&
pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
return -ENOMEM;
/* Voila! */
*trailer = skb;
return 1;
}
/* Misery. We are in troubles, going to mincer fragments... */
elt = 1;
skb_p = &skb_shinfo(skb)->frag_list;
copyflag = 0;
while ((skb1 = *skb_p) != NULL) {
int ntail = 0;
/* The fragment is partially pulled by someone,
* this can happen on input. Copy it and everything
* after it. */
if (skb_shared(skb1))
copyflag = 1;
/* If the skb is the last, worry about trailer. */
if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags ||
skb_has_frag_list(skb1) ||
skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128;
}
if (copyflag ||
skb_cloned(skb1) ||
ntail ||
skb_shinfo(skb1)->nr_frags ||
skb_has_frag_list(skb1)) {
struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */
if (ntail == 0)
skb2 = skb_copy(skb1, GFP_ATOMIC);
else
skb2 = skb_copy_expand(skb1,
skb_headroom(skb1),
ntail,
GFP_ATOMIC);
if (unlikely(skb2 == NULL))
return -ENOMEM;
if (skb1->sk)
skb_set_owner_w(skb2, skb1->sk);
/* Looking around. Are we still alive?
* OK, link new skb, drop old one */
skb2->next = skb1->next;
*skb_p = skb2;
kfree_skb(skb1);
skb1 = skb2;
}
elt++;
*trailer = skb1;
skb_p = &skb1->next;
}
return elt;
}
EXPORT_SYMBOL_GPL(skb_cow_data);
static void sock_rmem_free(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
static void skb_set_err_queue(struct sk_buff *skb)
{
/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
* So, it is safe to (mis)use it to mark skbs on the error queue.
*/
skb->pkt_type = PACKET_OUTGOING;
BUILD_BUG_ON(PACKET_OUTGOING == 0);
}
/*
* Note: We dont mem charge error packets (no sk_forward_alloc changes)
*/
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)READ_ONCE(sk->sk_rcvbuf))
return -ENOMEM;
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_rmem_free;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
skb_set_err_queue(skb);
/* before exiting rcu section, make sure dst is refcounted */
skb_dst_force(skb);
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
static bool is_icmp_err_skb(const struct sk_buff *skb)
{
return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
}
struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
{
struct sk_buff_head *q = &sk->sk_error_queue;
struct sk_buff *skb, *skb_next = NULL;
bool icmp_next = false;
unsigned long flags;
if (skb_queue_empty_lockless(q))
return NULL;
spin_lock_irqsave(&q->lock, flags);
skb = __skb_dequeue(q);
if (skb && (skb_next = skb_peek(q))) {
icmp_next = is_icmp_err_skb(skb_next);
if (icmp_next)
sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
}
spin_unlock_irqrestore(&q->lock, flags);
if (is_icmp_err_skb(skb) && !icmp_next)
sk->sk_err = 0;
if (skb_next)
sk_error_report(sk);
return skb;
}
EXPORT_SYMBOL(sock_dequeue_err_skb);
/**
* skb_clone_sk - create clone of skb, and take reference to socket
* @skb: the skb to clone
*
* This function creates a clone of a buffer that holds a reference on
* sk_refcnt. Buffers created via this function are meant to be
* returned using sock_queue_err_skb, or free via kfree_skb.
*
* When passing buffers allocated with this function to sock_queue_err_skb
* it is necessary to wrap the call with sock_hold/sock_put in order to
* prevent the socket from being released prior to being enqueued on
* the sk_error_queue.
*/
struct sk_buff *skb_clone_sk(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct sk_buff *clone;
if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
return NULL;
clone = skb_clone(skb, GFP_ATOMIC);
if (!clone) {
sock_put(sk);
return NULL;
}
clone->sk = sk;
clone->destructor = sock_efree;
return clone;
}
EXPORT_SYMBOL(skb_clone_sk);
static void __skb_complete_tx_timestamp(struct sk_buff *skb,
struct sock *sk,
int tstype,
bool opt_stats)
{
struct sock_exterr_skb *serr;
int err;
BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = tstype;
serr->opt_stats = opt_stats;
serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk_is_tcp(sk))
serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
}
err = sock_queue_err_skb(sk, skb);
if (err)
kfree_skb(skb);
}
static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
{
bool ret;
if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data)))
return true;
read_lock_bh(&sk->sk_callback_lock);
ret = sk->sk_socket && sk->sk_socket->file &&
file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
read_unlock_bh(&sk->sk_callback_lock);
return ret;
}
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps)
{
struct sock *sk = skb->sk;
if (!skb_may_tx_timestamp(sk, false))
goto err;
/* Take a reference to prevent skb_orphan() from freeing the socket,
* but only if the socket refcount is not zero.
*/
if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
*skb_hwtstamps(skb) = *hwtstamps;
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
sock_put(sk);
return;
}
err:
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps,
int tstype)
{
switch (tstype) {
case SCM_TSTAMP_SCHED:
return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP;
case SCM_TSTAMP_SND:
return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF :
SKBTX_SW_TSTAMP);
case SCM_TSTAMP_ACK:
return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK;
case SCM_TSTAMP_COMPLETION:
return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP;
}
return false;
}
static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk,
int tstype)
{
int op;
switch (tstype) {
case SCM_TSTAMP_SCHED:
op = BPF_SOCK_OPS_TSTAMP_SCHED_CB;
break;
case SCM_TSTAMP_SND:
if (hwtstamps) {
op = BPF_SOCK_OPS_TSTAMP_SND_HW_CB;
*skb_hwtstamps(skb) = *hwtstamps;
} else {
op = BPF_SOCK_OPS_TSTAMP_SND_SW_CB;
}
break;
case SCM_TSTAMP_ACK:
op = BPF_SOCK_OPS_TSTAMP_ACK_CB;
break;
default:
return;
}
bpf_skops_tx_timestamping(sk, skb, op);
}
void __skb_tstamp_tx(struct sk_buff *orig_skb,
const struct sk_buff *ack_skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype)
{
struct sk_buff *skb;
bool tsonly, opt_stats = false;
u32 tsflags;
if (!sk)
return;
if (skb_shinfo(orig_skb)->tx_flags & SKBTX_BPF)
skb_tstamp_tx_report_bpf_timestamping(orig_skb, hwtstamps,
sk, tstype);
if (!skb_tstamp_tx_report_so_timestamping(orig_skb, hwtstamps, tstype))
return;
tsflags = READ_ONCE(sk->sk_tsflags);
if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
return;
tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
if (!skb_may_tx_timestamp(sk, tsonly))
return;
if (tsonly) {
#ifdef CONFIG_INET
if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
sk_is_tcp(sk)) {
skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
ack_skb);
opt_stats = true;
} else
#endif
skb = alloc_skb(0, GFP_ATOMIC);
} else {
skb = skb_clone(orig_skb, GFP_ATOMIC);
if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
kfree_skb(skb);
return;
}
}
if (!skb)
return;
if (tsonly) {
skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
SKBTX_ANY_TSTAMP;
skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
}
if (hwtstamps)
*skb_hwtstamps(skb) = *hwtstamps;
else
__net_timestamp(skb);
__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
}
EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps)
{
return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
SCM_TSTAMP_SND);
}
EXPORT_SYMBOL_GPL(skb_tstamp_tx);
#ifdef CONFIG_WIRELESS
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
{
struct sock *sk = skb->sk;
struct sock_exterr_skb *serr;
int err = 1;
skb->wifi_acked_valid = 1;
skb->wifi_acked = acked;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
/* Take a reference to prevent skb_orphan() from freeing the socket,
* but only if the socket refcount is not zero.
*/
if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
err = sock_queue_err_skb(sk, skb);
sock_put(sk);
}
if (err)
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
#endif /* CONFIG_WIRELESS */
/**
* skb_partial_csum_set - set up and verify partial csum values for packet
* @skb: the skb to set
* @start: the number of bytes after skb->data to start checksumming.
* @off: the offset from start to place the checksum.
*
* For untrusted partially-checksummed packets, we need to make sure the values
* for skb->csum_start and skb->csum_offset are valid so we don't oops.
*
* This function checks and sets those values and skb->ip_summed: if this
* returns false you should drop the packet.
*/
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
u32 csum_start = skb_headroom(skb) + (u32)start;
if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) {
net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
start, off, skb_headroom(skb), skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = csum_start;
skb->csum_offset = off;
skb->transport_header = csum_start;
return true;
}
EXPORT_SYMBOL_GPL(skb_partial_csum_set);
static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
unsigned int max)
{
if (skb_headlen(skb) >= len)
return 0;
/* If we need to pullup then pullup to the max, so we
* won't need to do it again.
*/
if (max > skb->len)
max = skb->len;
if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
return -ENOMEM;
if (skb_headlen(skb) < len)
return -EPROTO;
return 0;
}
#define MAX_TCP_HDR_LEN (15 * 4)
static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
typeof(IPPROTO_IP) proto,
unsigned int off)
{
int err;
switch (proto) {
case IPPROTO_TCP:
err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
off + MAX_TCP_HDR_LEN);
if (!err && !skb_partial_csum_set(skb, off,
offsetof(struct tcphdr,
check)))
err = -EPROTO;
return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
case IPPROTO_UDP:
err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
off + sizeof(struct udphdr));
if (!err && !skb_partial_csum_set(skb, off,
offsetof(struct udphdr,
check)))
err = -EPROTO;
return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
}
return ERR_PTR(-EPROTO);
}
/* This value should be large enough to cover a tagged ethernet header plus
* maximally sized IP and TCP or UDP headers.
*/
#define MAX_IP_HDR_LEN 128
static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
{
unsigned int off;
bool fragment;
__sum16 *csum;
int err;
fragment = false;
err = skb_maybe_pull_tail(skb,
sizeof(struct iphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
if (ip_is_fragment(ip_hdr(skb)))
fragment = true;
off = ip_hdrlen(skb);
err = -EPROTO;
if (fragment)
goto out;
csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
if (IS_ERR(csum))
return PTR_ERR(csum);
if (recalculate)
*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - off,
ip_hdr(skb)->protocol, 0);
err = 0;
out:
return err;
}
/* This value should be large enough to cover a tagged ethernet header plus
* an IPv6 header, all options, and a maximal TCP or UDP header.
*/
#define MAX_IPV6_HDR_LEN 256
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
{
int err;
u8 nexthdr;
unsigned int off;
unsigned int len;
bool fragment;
bool done;
__sum16 *csum;
fragment = false;
done = false;
off = sizeof(struct ipv6hdr);
err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
nexthdr = ipv6_hdr(skb)->nexthdr;
len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
while (off <= len && !done) {
switch (nexthdr) {
case IPPROTO_DSTOPTS:
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING: {
struct ipv6_opt_hdr *hp;
err = skb_maybe_pull_tail(skb,
off +
sizeof(struct ipv6_opt_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
nexthdr = hp->nexthdr;
off += ipv6_optlen(hp);
break;
}
case IPPROTO_AH: {
struct ip_auth_hdr *hp;
err = skb_maybe_pull_tail(skb,
off +
sizeof(struct ip_auth_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ip_auth_hdr, skb, off);
nexthdr = hp->nexthdr;
off += ipv6_authlen(hp);
break;
}
case IPPROTO_FRAGMENT: {
struct frag_hdr *hp;
err = skb_maybe_pull_tail(skb,
off +
sizeof(struct frag_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct frag_hdr, skb, off);
if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
fragment = true;
nexthdr = hp->nexthdr;
off += sizeof(struct frag_hdr);
break;
}
default:
done = true;
break;
}
}
err = -EPROTO;
if (!done || fragment)
goto out;
csum = skb_checksum_setup_ip(skb, nexthdr, off);
if (IS_ERR(csum))
return PTR_ERR(csum);
if (recalculate)
*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - off, nexthdr, 0);
err = 0;
out:
return err;
}
/**
* skb_checksum_setup - set up partial checksum offset
* @skb: the skb to set up
* @recalculate: if true the pseudo-header checksum will be recalculated
*/
int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
{
int err;
switch (skb->protocol) {
case htons(ETH_P_IP):
err = skb_checksum_setup_ipv4(skb, recalculate);
break;
case htons(ETH_P_IPV6):
err = skb_checksum_setup_ipv6(skb, recalculate);
break;
default:
err = -EPROTO;
break;
}
return err;
}
EXPORT_SYMBOL(skb_checksum_setup);
/**
* skb_checksum_maybe_trim - maybe trims the given skb
* @skb: the skb to check
* @transport_len: the data length beyond the network header
*
* Checks whether the given skb has data beyond the given transport length.
* If so, returns a cloned skb trimmed to this transport length.
* Otherwise returns the provided skb. Returns NULL in error cases
* (e.g. transport_len exceeds skb length or out-of-memory).
*
* Caller needs to set the skb transport header and free any returned skb if it
* differs from the provided skb.
*/
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int transport_len)
{
struct sk_buff *skb_chk;
unsigned int len = skb_transport_offset(skb) + transport_len;
int ret;
if (skb->len < len)
return NULL;
else if (skb->len == len)
return skb;
skb_chk = skb_clone(skb, GFP_ATOMIC);
if (!skb_chk)
return NULL;
ret = pskb_trim_rcsum(skb_chk, len);
if (ret) {
kfree_skb(skb_chk);
return NULL;
}
return skb_chk;
}
/**
* skb_checksum_trimmed - validate checksum of an skb
* @skb: the skb to check
* @transport_len: the data length beyond the network header
* @skb_chkf: checksum function to use
*
* Applies the given checksum function skb_chkf to the provided skb.
* Returns a checked and maybe trimmed skb. Returns NULL on error.
*
* If the skb has data beyond the given transport length, then a
* trimmed & cloned skb is checked and returned.
*
* Caller needs to set the skb transport header and free any returned skb if it
* differs from the provided skb.
*/
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
__sum16(*skb_chkf)(struct sk_buff *skb))
{
struct sk_buff *skb_chk;
unsigned int offset = skb_transport_offset(skb);
__sum16 ret;
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
if (!skb_chk)
goto err;
if (!pskb_may_pull(skb_chk, offset))
goto err;
skb_pull_rcsum(skb_chk, offset);
ret = skb_chkf(skb_chk);
skb_push_rcsum(skb_chk, offset);
if (ret)
goto err;
return skb_chk;
err:
if (skb_chk && skb_chk != skb)
kfree_skb(skb_chk);
return NULL;
}
EXPORT_SYMBOL(skb_checksum_trimmed);
void __skb_warn_lro_forwarding(const struct sk_buff *skb)
{
net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
skb->dev->name);
}
EXPORT_SYMBOL(__skb_warn_lro_forwarding);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
kmem_cache_free(net_hotdata.skbuff_cache, skb);
} else {
__kfree_skb(skb);
}
}
EXPORT_SYMBOL(kfree_skb_partial);
/**
* skb_try_coalesce - try to merge skb to prior one
* @to: prior buffer
* @from: buffer to add
* @fragstolen: pointer to boolean
* @delta_truesize: how much more was allocated than was requested
*/
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
bool *fragstolen, int *delta_truesize)
{
struct skb_shared_info *to_shinfo, *from_shinfo;
int i, delta, len = from->len;
*fragstolen = false;
if (skb_cloned(to))
return false;
/* In general, avoid mixing page_pool and non-page_pool allocated
* pages within the same SKB. In theory we could take full
* references if @from is cloned and !@to->pp_recycle but its
* tricky (due to potential race with the clone disappearing) and
* rare, so not worth dealing with.
*/
if (to->pp_recycle != from->pp_recycle)
return false;
if (skb_frags_readable(from) != skb_frags_readable(to))
return false;
if (len <= skb_tailroom(to) && skb_frags_readable(from)) {
if (len)
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
*delta_truesize = 0;
return true;
}
to_shinfo = skb_shinfo(to);
from_shinfo = skb_shinfo(from);
if (to_shinfo->frag_list || from_shinfo->frag_list)
return false;
if (skb_zcopy(to) || skb_zcopy(from))
return false;
if (skb_headlen(from) != 0) {
struct page *page;
unsigned int offset;
if (to_shinfo->nr_frags +
from_shinfo->nr_frags >= MAX_SKB_FRAGS)
return false;
if (skb_head_is_locked(from))
return false;
delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
skb_fill_page_desc(to, to_shinfo->nr_frags,
page, offset, skb_headlen(from));
*fragstolen = true;
} else {
if (to_shinfo->nr_frags +
from_shinfo->nr_frags > MAX_SKB_FRAGS)
return false;
delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
}
WARN_ON_ONCE(delta < len);
memcpy(to_shinfo->frags + to_shinfo->nr_frags,
from_shinfo->frags,
from_shinfo->nr_frags * sizeof(skb_frag_t));
to_shinfo->nr_frags += from_shinfo->nr_frags;
if (!skb_cloned(from))
from_shinfo->nr_frags = 0;
/* if the skb is not cloned this does nothing
* since we set nr_frags to 0.
*/
if (skb_pp_frag_ref(from)) {
for (i = 0; i < from_shinfo->nr_frags; i++)
__skb_frag_ref(&from_shinfo->frags[i]);
}
to->truesize += delta;
to->len += len;
to->data_len += len;
*delta_truesize = delta;
return true;
}
EXPORT_SYMBOL(skb_try_coalesce);
/**
* skb_scrub_packet - scrub an skb
*
* @skb: buffer to clean
* @xnet: packet is crossing netns
*
* skb_scrub_packet can be used after encapsulating or decapsulating a packet
* into/from a tunnel. Some information have to be cleared during these
* operations.
* skb_scrub_packet can also be used to clean a skb before injecting it in
* another namespace (@xnet == true). We have to clear all information in the
* skb that could impact namespace isolation.
*/
void skb_scrub_packet(struct sk_buff *skb, bool xnet)
{
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->ignore_df = 0;
skb_dst_drop(skb);
skb_ext_reset(skb);
nf_reset_ct(skb);
nf_reset_trace(skb);
#ifdef CONFIG_NET_SWITCHDEV
skb->offload_fwd_mark = 0;
skb->offload_l3_fwd_mark = 0;
#endif
ipvs_reset(skb);
if (!xnet)
return;
skb->mark = 0;
skb_clear_tstamp(skb);
}
EXPORT_SYMBOL_GPL(skb_scrub_packet);
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
int mac_len, meta_len;
void *meta;
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
return NULL;
}
mac_len = skb->data - skb_mac_header(skb);
if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
mac_len - VLAN_HLEN - ETH_TLEN);
}
meta_len = skb_metadata_len(skb);
if (meta_len) {
meta = skb_metadata_end(skb) - meta_len;
memmove(meta + VLAN_HLEN, meta, meta_len);
}
skb->mac_header += VLAN_HLEN;
return skb;
}
struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
{
struct vlan_hdr *vhdr;
u16 vlan_tci;
if (unlikely(skb_vlan_tag_present(skb))) {
/* vlan_tci is already set-up so leave this for another time */
return skb;
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto err_free;
/* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
goto err_free;
vhdr = (struct vlan_hdr *)skb->data;
vlan_tci = ntohs(vhdr->h_vlan_TCI);
__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
skb_pull_rcsum(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vhdr);
skb = skb_reorder_vlan_header(skb);
if (unlikely(!skb))
goto err_free;
skb_reset_network_header(skb);
if (!skb_transport_header_was_set(skb))
skb_reset_transport_header(skb);
skb_reset_mac_len(skb);
return skb;
err_free:
kfree_skb(skb);
return NULL;
}
EXPORT_SYMBOL(skb_vlan_untag);
int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
{
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
EXPORT_SYMBOL(skb_ensure_writable);
int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev)
{
int needed_headroom = dev->needed_headroom;
int needed_tailroom = dev->needed_tailroom;
/* For tail taggers, we need to pad short frames ourselves, to ensure
* that the tail tag does not fail at its role of being at the end of
* the packet, once the conduit interface pads the frame. Account for
* that pad length here, and pad later.
*/
if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
needed_tailroom += ETH_ZLEN - skb->len;
/* skb_headroom() returns unsigned int... */
needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
/* No reallocation needed, yay! */
return 0;
return pskb_expand_head(skb, needed_headroom, needed_tailroom,
GFP_ATOMIC);
}
EXPORT_SYMBOL(skb_ensure_writable_head_tail);
/* remove VLAN header from packet and update csum accordingly.
* expects a non skb_vlan_tag_present skb with a vlan tag payload
*/
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
{
int offset = skb->data - skb_mac_header(skb);
int err;
if (WARN_ONCE(offset,
"__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
offset)) {
return -EINVAL;
}
err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
if (unlikely(err))
return err;
skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
vlan_remove_tag(skb, vlan_tci);
skb->mac_header += VLAN_HLEN;
if (skb_network_offset(skb) < ETH_HLEN)
skb_set_network_header(skb, ETH_HLEN);
skb_reset_mac_len(skb);
return err;
}
EXPORT_SYMBOL(__skb_vlan_pop);
/* Pop a vlan tag either from hwaccel or from payload.
* Expects skb->data at mac header.
*/
int skb_vlan_pop(struct sk_buff *skb)
{
u16 vlan_tci;
__be16 vlan_proto;
int err;
if (likely(skb_vlan_tag_present(skb))) {
__vlan_hwaccel_clear_tag(skb);
} else {
if (unlikely(!eth_type_vlan(skb->protocol)))
return 0;
err = __skb_vlan_pop(skb, &vlan_tci);
if (err)
return err;
}
/* move next vlan tag to hw accel tag */
if (likely(!eth_type_vlan(skb->protocol)))
return 0;
vlan_proto = skb->protocol;
err = __skb_vlan_pop(skb, &vlan_tci);
if (unlikely(err))
return err;
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
}
EXPORT_SYMBOL(skb_vlan_pop);
/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
* Expects skb->data at mac header.
*/
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
{
if (skb_vlan_tag_present(skb)) {
int offset = skb->data - skb_mac_header(skb);
int err;
if (WARN_ONCE(offset,
"skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
offset)) {
return -EINVAL;
}
err = __vlan_insert_tag(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (err)
return err;
skb->protocol = skb->vlan_proto;
skb->network_header -= VLAN_HLEN;
skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
}
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
}
EXPORT_SYMBOL(skb_vlan_push);
/**
* skb_eth_pop() - Drop the Ethernet header at the head of a packet
*
* @skb: Socket buffer to modify
*
* Drop the Ethernet header of @skb.
*
* Expects that skb->data points to the mac header and that no VLAN tags are
* present.
*
* Returns 0 on success, -errno otherwise.
*/
int skb_eth_pop(struct sk_buff *skb)
{
if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
skb_network_offset(skb) < ETH_HLEN)
return -EPROTO;
skb_pull_rcsum(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
return 0;
}
EXPORT_SYMBOL(skb_eth_pop);
/**
* skb_eth_push() - Add a new Ethernet header at the head of a packet
*
* @skb: Socket buffer to modify
* @dst: Destination MAC address of the new header
* @src: Source MAC address of the new header
*
* Prepend @skb with a new Ethernet header.
*
* Expects that skb->data points to the mac header, which must be empty.
*
* Returns 0 on success, -errno otherwise.
*/
int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
const unsigned char *src)
{
struct ethhdr *eth;
int err;
if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
return -EPROTO;
err = skb_cow_head(skb, sizeof(*eth));
if (err < 0)
return err;
skb_push(skb, sizeof(*eth));
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
eth = eth_hdr(skb);
ether_addr_copy(eth->h_dest, dst);
ether_addr_copy(eth->h_source, src);
eth->h_proto = skb->protocol;
skb_postpush_rcsum(skb, eth, sizeof(*eth));
return 0;
}
EXPORT_SYMBOL(skb_eth_push);
/* Update the ethertype of hdr and the skb csum value if required. */
static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
__be16 ethertype)
{
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be16 diff[] = { ~hdr->h_proto, ethertype };
skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
}
hdr->h_proto = ethertype;
}
/**
* skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
* the packet
*
* @skb: buffer
* @mpls_lse: MPLS label stack entry to push
* @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
* @mac_len: length of the MAC header
* @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
* ethernet
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
int mac_len, bool ethernet)
{
struct mpls_shim_hdr *lse;
int err;
if (unlikely(!eth_p_mpls(mpls_proto)))
return -EINVAL;
/* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
if (skb->encapsulation)
return -EINVAL;
err = skb_cow_head(skb, MPLS_HLEN);
if (unlikely(err))
return err;
if (!skb->inner_protocol) {
skb_set_inner_network_header(skb, skb_network_offset(skb));
skb_set_inner_protocol(skb, skb->protocol);
}
skb_push(skb, MPLS_HLEN);
memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
mac_len);
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
skb_reset_mac_len(skb);
lse = mpls_hdr(skb);
lse->label_stack_entry = mpls_lse;
skb_postpush_rcsum(skb, lse, MPLS_HLEN);
if (ethernet && mac_len >= ETH_HLEN)
skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
skb->protocol = mpls_proto;
return 0;
}
EXPORT_SYMBOL_GPL(skb_mpls_push);
/**
* skb_mpls_pop() - pop the outermost MPLS header
*
* @skb: buffer
* @next_proto: ethertype of header after popped MPLS header
* @mac_len: length of the MAC header
* @ethernet: flag to indicate if the packet is ethernet
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
bool ethernet)
{
int err;
if (unlikely(!eth_p_mpls(skb->protocol)))
return 0;
err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
mac_len);
__skb_pull(skb, MPLS_HLEN);
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
if (ethernet && mac_len >= ETH_HLEN) {
struct ethhdr *hdr;
/* use mpls_hdr() to get ethertype to account for VLANs. */
hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
skb_mod_eth_type(skb, hdr, next_proto);
}
skb->protocol = next_proto;
return 0;
}
EXPORT_SYMBOL_GPL(skb_mpls_pop);
/**
* skb_mpls_update_lse() - modify outermost MPLS header and update csum
*
* @skb: buffer
* @mpls_lse: new MPLS label stack entry to update to
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
{
int err;
if (unlikely(!eth_p_mpls(skb->protocol)))
return -EINVAL;
err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
}
mpls_hdr(skb)->label_stack_entry = mpls_lse;
return 0;
}
EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
/**
* skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
*
* @skb: buffer
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
int skb_mpls_dec_ttl(struct sk_buff *skb)
{
u32 lse;
u8 ttl;
if (unlikely(!eth_p_mpls(skb->protocol)))
return -EINVAL;
if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
return -ENOMEM;
lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
if (!--ttl)
return -EINVAL;
lse &= ~MPLS_LS_TTL_MASK;
lse |= ttl << MPLS_LS_TTL_SHIFT;
return skb_mpls_update_lse(skb, cpu_to_be32(lse));
}
EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
/**
* alloc_skb_with_frags - allocate skb with page frags
*
* @header_len: size of linear part
* @data_len: needed length in frags
* @order: max page order desired.
* @errcode: pointer to error code if any
* @gfp_mask: allocation mask
*
* This can be used to allocate a paged skb, given a maximal order for frags.
*/
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long data_len,
int order,
int *errcode,
gfp_t gfp_mask)
{
unsigned long chunk;
struct sk_buff *skb;
struct page *page;
int nr_frags = 0;
*errcode = -EMSGSIZE;
if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
return NULL;
*errcode = -ENOBUFS;
skb = alloc_skb(header_len, gfp_mask);
if (!skb)
return NULL;
while (data_len) {
if (nr_frags == MAX_SKB_FRAGS)
goto failure;
while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
order--;
if (order) {
page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP |
__GFP_NOWARN,
order);
if (!page) {
order--;
continue;
}
} else {
page = alloc_page(gfp_mask);
if (!page)
goto failure;
}
chunk = min_t(unsigned long, data_len,
PAGE_SIZE << order);
skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
nr_frags++;
skb->truesize += (PAGE_SIZE << order);
data_len -= chunk;
}
return skb;
failure:
kfree_skb(skb);
return NULL;
}
EXPORT_SYMBOL(alloc_skb_with_frags);
/* carve out the first off bytes from skb when off < headlen */
static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
const int headlen, gfp_t gfp_mask)
{
int i;
unsigned int size = skb_end_offset(skb);
int new_hlen = headlen - off;
u8 *data;
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
size = SKB_WITH_OVERHEAD(size);
/* Copy real data, and all frags */
skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
skb->len -= off;
memcpy((struct skb_shared_info *)(data + size),
skb_shinfo(skb),
offsetof(struct skb_shared_info,
frags[skb_shinfo(skb)->nr_frags]));
if (skb_cloned(skb)) {
/* drop the old head gracefully */
if (skb_orphan_frags(skb, gfp_mask)) {
skb_kfree_head(data, size);
return -ENOMEM;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
skb_release_data(skb, SKB_CONSUMED);
} else {
/* we can reuse existing recount- all we did was
* relocate values
*/
skb_free_head(skb);
}
skb->head = data;
skb->data = data;
skb->head_frag = 0;
skb_set_end_offset(skb, size);
skb_set_tail_pointer(skb, skb_headlen(skb));
skb_headers_offset_update(skb, 0);
skb->cloned = 0;
skb->hdr_len = 0;
skb->nohdr = 0;
atomic_set(&skb_shinfo(skb)->dataref, 1);
return 0;
}
static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
/* carve out the first eat bytes from skb's frag_list. May recurse into
* pskb_carve()
*/
static int pskb_carve_frag_list(struct skb_shared_info *shinfo, int eat,
gfp_t gfp_mask)
{
struct sk_buff *list = shinfo->frag_list;
struct sk_buff *clone = NULL;
struct sk_buff *insp = NULL;
do {
if (!list) {
pr_err("Not enough bytes to eat. Want %d\n", eat);
return -EFAULT;
}
if (list->len <= eat) {
/* Eaten as whole. */
eat -= list->len;
list = list->next;
insp = list;
} else {
/* Eaten partially. */
if (skb_shared(list)) {
clone = skb_clone(list, gfp_mask);
if (!clone)
return -ENOMEM;
insp = list->next;
list = clone;
} else {
/* This may be pulled without problems. */
insp = list;
}
if (pskb_carve(list, eat, gfp_mask) < 0) {
kfree_skb(clone);
return -ENOMEM;
}
break;
}
} while (eat);
/* Free pulled out fragments. */
while ((list = shinfo->frag_list) != insp) {
shinfo->frag_list = list->next;
consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {
clone->next = list;
shinfo->frag_list = clone;
}
return 0;
}
/* carve off first len bytes from skb. Split line (off) is in the
* non-linear part of skb
*/
static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
int pos, gfp_t gfp_mask)
{
int i, k = 0;
unsigned int size = skb_end_offset(skb);
u8 *data;
const int nfrags = skb_shinfo(skb)->nr_frags;
struct skb_shared_info *shinfo;
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
size = SKB_WITH_OVERHEAD(size);
memcpy((struct skb_shared_info *)(data + size),
skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
if (skb_orphan_frags(skb, gfp_mask)) {
skb_kfree_head(data, size);
return -ENOMEM;
}
shinfo = (struct skb_shared_info *)(data + size);
for (i = 0; i < nfrags; i++) {
int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (pos + fsize > off) {
shinfo->frags[k] = skb_shinfo(skb)->frags[i];
if (pos < off) {
/* Split frag.
* We have two variants in this case:
* 1. Move all the frag to the second
* part, if it is possible. F.e.
* this approach is mandatory for TUX,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
skb_frag_off_add(&shinfo->frags[0], off - pos);
skb_frag_size_sub(&shinfo->frags[0], off - pos);
}
skb_frag_ref(skb, i);
k++;
}
pos += fsize;
}
shinfo->nr_frags = k;
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
/* split line is in frag list */
if (k == 0 && pskb_carve_frag_list(shinfo, off - pos, gfp_mask)) {
/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
if (skb_has_frag_list(skb))
kfree_skb_list(skb_shinfo(skb)->frag_list);
skb_kfree_head(data, size);
return -ENOMEM;
}
skb_release_data(skb, SKB_CONSUMED);
skb->head = data;
skb->head_frag = 0;
skb->data = data;
skb_set_end_offset(skb, size);
skb_reset_tail_pointer(skb);
skb_headers_offset_update(skb, 0);
skb->cloned = 0;
skb->hdr_len = 0;
skb->nohdr = 0;
skb->len -= off;
skb->data_len = skb->len;
atomic_set(&skb_shinfo(skb)->dataref, 1);
return 0;
}
/* remove len bytes from the beginning of the skb */
static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
{
int headlen = skb_headlen(skb);
if (len < headlen)
return pskb_carve_inside_header(skb, len, headlen, gfp);
else
return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
}
/* Extract to_copy bytes starting at off from skb, and return this in
* a new skb
*/
struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
int to_copy, gfp_t gfp)
{
struct sk_buff *clone = skb_clone(skb, gfp);
if (!clone)
return NULL;
if (pskb_carve(clone, off, gfp) < 0 ||
pskb_trim(clone, to_copy)) {
kfree_skb(clone);
return NULL;
}
return clone;
}
EXPORT_SYMBOL(pskb_extract);
/**
* skb_condense - try to get rid of fragments/frag_list if possible
* @skb: buffer
*
* Can be used to save memory before skb is added to a busy queue.
* If packet has bytes in frags and enough tail room in skb->head,
* pull all of them, so that we can free the frags right now and adjust
* truesize.
* Notes:
* We do not reallocate skb->head thus can not fail.
* Caller must re-evaluate skb->truesize if needed.
*/
void skb_condense(struct sk_buff *skb)
{
if (skb->data_len) {
if (skb->data_len > skb->end - skb->tail ||
skb_cloned(skb) || !skb_frags_readable(skb))
return;
/* Nice, we can free page frag(s) right now */
__pskb_pull_tail(skb, skb->data_len);
}
/* At this point, skb->truesize might be over estimated,
* because skb had a fragment, and fragments do not tell
* their truesize.
* When we pulled its content into skb->head, fragment
* was freed, but __pskb_pull_tail() could not possibly
* adjust skb->truesize, not knowing the frag truesize.
*/
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
}
EXPORT_SYMBOL(skb_condense);
#ifdef CONFIG_SKB_EXTENSIONS
static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
{
return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
}
/**
* __skb_ext_alloc - allocate a new skb extensions storage
*
* @flags: See kmalloc().
*
* Returns the newly allocated pointer. The pointer can later attached to a
* skb via __skb_ext_set().
* Note: caller must handle the skb_ext as an opaque data.
*/
struct skb_ext *__skb_ext_alloc(gfp_t flags)
{
struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
if (new) {
memset(new->offset, 0, sizeof(new->offset));
refcount_set(&new->refcnt, 1);
}
return new;
}
static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
unsigned int old_active)
{
struct skb_ext *new;
if (refcount_read(&old->refcnt) == 1)
return old;
new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
if (!new)
return NULL;
memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
refcount_set(&new->refcnt, 1);
#ifdef CONFIG_XFRM
if (old_active & (1 << SKB_EXT_SEC_PATH)) {
struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
unsigned int i;
for (i = 0; i < sp->len; i++)
xfrm_state_hold(sp->xvec[i]);
}
#endif
#ifdef CONFIG_MCTP_FLOWS
if (old_active & (1 << SKB_EXT_MCTP)) {
struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
if (flow->key)
refcount_inc(&flow->key->refs);
}
#endif
__skb_ext_put(old);
return new;
}
/**
* __skb_ext_set - attach the specified extension storage to this skb
* @skb: buffer
* @id: extension id
* @ext: extension storage previously allocated via __skb_ext_alloc()
*
* Existing extensions, if any, are cleared.
*
* Returns the pointer to the extension.
*/
void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
struct skb_ext *ext)
{
unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
skb_ext_put(skb);
newlen = newoff + skb_ext_type_len[id];
ext->chunks = newlen;
ext->offset[id] = newoff;
skb->extensions = ext;
skb->active_extensions = 1 << id;
return skb_ext_get_ptr(ext, id);
}
EXPORT_SYMBOL_NS_GPL(__skb_ext_set, "NETDEV_INTERNAL");
/**
* skb_ext_add - allocate space for given extension, COW if needed
* @skb: buffer
* @id: extension to allocate space for
*
* Allocates enough space for the given extension.
* If the extension is already present, a pointer to that extension
* is returned.
*
* If the skb was cloned, COW applies and the returned memory can be
* modified without changing the extension space of clones buffers.
*
* Returns pointer to the extension or NULL on allocation failure.
*/
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
{
struct skb_ext *new, *old = NULL;
unsigned int newlen, newoff;
if (skb->active_extensions) {
old = skb->extensions;
new = skb_ext_maybe_cow(old, skb->active_extensions);
if (!new)
return NULL;
if (__skb_ext_exist(new, id))
goto set_active;
newoff = new->chunks;
} else {
newoff = SKB_EXT_CHUNKSIZEOF(*new);
new = __skb_ext_alloc(GFP_ATOMIC);
if (!new)
return NULL;
}
newlen = newoff + skb_ext_type_len[id];
new->chunks = newlen;
new->offset[id] = newoff;
set_active:
skb->slow_gro = 1;
skb->extensions = new;
skb->active_extensions |= 1 << id;
return skb_ext_get_ptr(new, id);
}
EXPORT_SYMBOL(skb_ext_add);
#ifdef CONFIG_XFRM
static void skb_ext_put_sp(struct sec_path *sp)
{
unsigned int i;
for (i = 0; i < sp->len; i++)
xfrm_state_put(sp->xvec[i]);
}
#endif
#ifdef CONFIG_MCTP_FLOWS
static void skb_ext_put_mctp(struct mctp_flow *flow)
{
if (flow->key)
mctp_key_unref(flow->key);
}
#endif
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
{
struct skb_ext *ext = skb->extensions;
skb->active_extensions &= ~(1 << id);
if (skb->active_extensions == 0) {
skb->extensions = NULL;
__skb_ext_put(ext);
#ifdef CONFIG_XFRM
} else if (id == SKB_EXT_SEC_PATH &&
refcount_read(&ext->refcnt) == 1) {
struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
skb_ext_put_sp(sp);
sp->len = 0;
#endif
}
}
EXPORT_SYMBOL(__skb_ext_del);
void __skb_ext_put(struct skb_ext *ext)
{
/* If this is last clone, nothing can increment
* it after check passes. Avoids one atomic op.
*/
if (refcount_read(&ext->refcnt) == 1)
goto free_now;
if (!refcount_dec_and_test(&ext->refcnt))
return;
free_now:
#ifdef CONFIG_XFRM
if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
#endif
#ifdef CONFIG_MCTP_FLOWS
if (__skb_ext_exist(ext, SKB_EXT_MCTP))
skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP));
#endif
kmem_cache_free(skbuff_ext_cache, ext);
}
EXPORT_SYMBOL(__skb_ext_put);
#endif /* CONFIG_SKB_EXTENSIONS */
static void kfree_skb_napi_cache(struct sk_buff *skb)
{
/* if SKB is a clone, don't handle this case */
if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
__kfree_skb(skb);
return;
}
local_bh_disable();
__napi_kfree_skb(skb, SKB_CONSUMED);
local_bh_enable();
}
/**
* skb_attempt_defer_free - queue skb for remote freeing
* @skb: buffer
*
* Put @skb in a per-cpu list, using the cpu which
* allocated the skb/pages to reduce false sharing
* and memory zone spinlock contention.
*/
void skb_attempt_defer_free(struct sk_buff *skb)
{
struct skb_defer_node *sdn;
unsigned long defer_count;
int cpu = skb->alloc_cpu;
unsigned int defer_max;
bool kick;
if (cpu == raw_smp_processor_id() ||
WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
!cpu_online(cpu)) {
nodefer: kfree_skb_napi_cache(skb);
return;
}
DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(skb->destructor);
DEBUG_NET_WARN_ON_ONCE(skb_nfct(skb));
sdn = per_cpu_ptr(net_hotdata.skb_defer_nodes, cpu) + numa_node_id();
defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
defer_count = atomic_long_inc_return(&sdn->defer_count);
if (defer_count >= defer_max)
goto nodefer;
llist_add(&skb->ll_node, &sdn->defer_list);
/* Send an IPI every time queue reaches half capacity. */
kick = (defer_count - 1) == (defer_max >> 1);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
if (unlikely(kick))
kick_defer_list_purge(cpu);
}
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
size_t offset, size_t len)
{
const char *kaddr;
__wsum csum;
kaddr = kmap_local_page(page);
csum = csum_partial(kaddr + offset, len, 0);
kunmap_local(kaddr);
skb->csum = csum_block_add(skb->csum, csum, skb->len);
}
/**
* skb_splice_from_iter - Splice (or copy) pages to skbuff
* @skb: The buffer to add pages to
* @iter: Iterator representing the pages to be added
* @maxsize: Maximum amount of pages to be added
*
* This is a common helper function for supporting MSG_SPLICE_PAGES. It
* extracts pages from an iterator and adds them to the socket buffer if
* possible, copying them to fragments if not possible (such as if they're slab
* pages).
*
* Returns the amount of data spliced/copied or -EMSGSIZE if there's
* insufficient space in the buffer to transfer anything.
*/
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
ssize_t maxsize)
{
size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags);
struct page *pages[8], **ppages = pages;
ssize_t spliced = 0, ret = 0;
unsigned int i;
while (iter->count > 0) {
ssize_t space, nr, len;
size_t off;
ret = -EMSGSIZE;
space = frag_limit - skb_shinfo(skb)->nr_frags;
if (space < 0)
break;
/* We might be able to coalesce without increasing nr_frags */
nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages));
len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off);
if (len <= 0) {
ret = len ?: -EIO;
break;
}
i = 0;
do {
struct page *page = pages[i++];
size_t part = min_t(size_t, PAGE_SIZE - off, len);
ret = -EIO;
if (WARN_ON_ONCE(!sendpage_ok(page)))
goto out;
ret = skb_append_pagefrags(skb, page, off, part,
frag_limit);
if (ret < 0) {
iov_iter_revert(iter, len);
goto out;
}
if (skb->ip_summed == CHECKSUM_NONE)
skb_splice_csum_page(skb, page, off, part);
off = 0;
spliced += part;
maxsize -= part;
len -= part;
} while (len > 0);
if (maxsize <= 0)
break;
}
out:
skb_len_add(skb, spliced);
return spliced ?: ret;
}
EXPORT_SYMBOL(skb_splice_from_iter);
static __always_inline
size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
__wsum *csum = priv2;
__wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len);
*csum = csum_block_add(*csum, next, progress);
return 0;
}
static __always_inline
size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
__wsum next, *csum = priv2;
next = csum_and_copy_from_user(iter_from, to + progress, len);
*csum = csum_block_add(*csum, next, progress);
return next ? 0 : len;
}
bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
{
size_t copied;
if (WARN_ON_ONCE(!i->data_source))
return false;
copied = iterate_and_advance2(i, bytes, addr, csum,
copy_from_user_iter_csum,
memcpy_from_iter_csum);
if (likely(copied == bytes))
return true;
iov_iter_revert(i, copied);
return false;
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
void get_netmem(netmem_ref netmem)
{
struct net_iov *niov;
if (netmem_is_net_iov(netmem)) {
niov = netmem_to_net_iov(netmem);
if (net_is_devmem_iov(niov))
net_devmem_get_net_iov(netmem_to_net_iov(netmem));
return;
}
get_page(netmem_to_page(netmem));
}
EXPORT_SYMBOL(get_netmem);
void put_netmem(netmem_ref netmem)
{
struct net_iov *niov;
if (netmem_is_net_iov(netmem)) {
niov = netmem_to_net_iov(netmem);
if (net_is_devmem_iov(niov))
net_devmem_put_net_iov(netmem_to_net_iov(netmem));
return;
}
put_page(netmem_to_page(netmem));
}
EXPORT_SYMBOL(put_netmem);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CGROUP_H
#define _LINUX_CGROUP_H
/*
* cgroup interface
*
* Copyright (C) 2003 BULL SA
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
*
*/
#include <linux/sched.h>
#include <linux/nodemask.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
#include <linux/kernel_stat.h>
#include <linux/cgroup-defs.h>
#include <linux/cgroup_namespace.h>
struct kernel_clone_args;
/*
* All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
#define CGROUP_WEIGHT_MIN 1
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
#ifdef CONFIG_CGROUPS
enum css_task_iter_flags {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
};
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
unsigned int flags;
struct list_head *cset_pos;
struct list_head *cset_head;
struct list_head *tcset_pos;
struct list_head *tcset_head;
struct list_head *task_pos;
struct list_head *cur_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
struct task_struct *cur_task;
struct list_head iters_node; /* css_set->task_iters */
};
enum cgroup_lifetime_events {
CGROUP_LIFETIME_ONLINE,
CGROUP_LIFETIME_OFFLINE,
};
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
extern spinlock_t css_set_lock;
extern struct blocking_notifier_head cgroup_lifetime_notifier;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
#undef SUBSYS
#define SUBSYS(_x) \
extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
#include <linux/cgroup_subsys.h>
#undef SUBSYS
/**
* cgroup_subsys_enabled - fast test on whether a subsys is enabled
* @ss: subsystem in question
*/
#define cgroup_subsys_enabled(ss) \
static_branch_likely(&ss ## _enabled_key)
/**
* cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
* @ss: subsystem in question
*/
#define cgroup_subsys_on_dfl(ss) \
static_branch_likely(&ss ## _on_dfl_key)
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
void cgroup_file_show(struct cgroup_file *cfile, bool show);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
extern int cgroup_can_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
void cgroup_exit(struct task_struct *p);
void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
/*
* Iteration helpers and macros.
*/
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *parent);
struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it);
/**
* css_for_each_child - iterate through children of a css
* @pos: the css * to use as the loop cursor
* @parent: css whose children to walk
*
* Walk @parent's children. Must be called under rcu_read_lock().
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
*
* It is allowed to temporarily drop RCU read lock during iteration. The
* caller is responsible for ensuring that @pos remains accessible until
* the start of the next iteration by, for example, bumping the css refcnt.
*/
#define css_for_each_child(pos, parent) \
for ((pos) = css_next_child(NULL, (parent)); (pos); \
(pos) = css_next_child((pos), (parent)))
/**
* css_for_each_descendant_pre - pre-order walk of a css's descendants
* @pos: the css * to use as the loop cursor
* @root: css whose descendants to walk
*
* Walk @root's descendants. @root is included in the iteration and the
* first node to be visited. Must be called under rcu_read_lock().
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
*
* For example, the following guarantees that a descendant can't escape
* state updates of its ancestors.
*
* my_online(@css)
* {
* Lock @css's parent and @css;
* Inherit state from the parent;
* Unlock both.
* }
*
* my_update_state(@css)
* {
* css_for_each_descendant_pre(@pos, @css) {
* Lock @pos;
* if (@pos == @css)
* Update @css's state;
* else
* Verify @pos is alive and inherit state from its parent;
* Unlock @pos;
* }
* }
*
* As long as the inheriting step, including checking the parent state, is
* enclosed inside @pos locking, double-locking the parent isn't necessary
* while inheriting. The state update to the parent is guaranteed to be
* visible by walking order and, as long as inheriting operations to the
* same @pos are atomic to each other, multiple updates racing each other
* still result in the correct state. It's guaranateed that at least one
* inheritance happens for any css after the latest update to its parent.
*
* If checking parent's state requires locking the parent, each inheriting
* iteration should lock and unlock both @pos->parent and @pos.
*
* Alternatively, a subsystem may choose to use a single global lock to
* synchronize ->css_online() and ->css_offline() against tree-walking
* operations.
*
* It is allowed to temporarily drop RCU read lock during iteration. The
* caller is responsible for ensuring that @pos remains accessible until
* the start of the next iteration by, for example, bumping the css refcnt.
*/
#define css_for_each_descendant_pre(pos, css) \
for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
(pos) = css_next_descendant_pre((pos), (css)))
/**
* css_for_each_descendant_post - post-order walk of a css's descendants
* @pos: the css * to use as the loop cursor
* @css: css whose descendants to walk
*
* Similar to css_for_each_descendant_pre() but performs post-order
* traversal instead. @root is included in the iteration and the last
* node to be visited.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
*
* Note that the walk visibility guarantee example described in pre-order
* walk doesn't apply the same to post-order walks.
*/
#define css_for_each_descendant_post(pos, css) \
for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
(pos) = css_next_descendant_post((pos), (css)))
/**
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
* @dst_css: the destination css
* @tset: taskset to iterate
*
* @tset may contain multiple tasks and they may belong to multiple
* processes.
*
* On the v2 hierarchy, there may be tasks from multiple processes and they
* may not share the source or destination csses.
*
* On traditional hierarchies, when there are multiple tasks in @tset, if a
* task of a process is in @tset, all tasks of the process are in @tset.
* Also, all are guaranteed to share the same source and destination csses.
*
* Iteration is not in any specific order.
*/
#define cgroup_taskset_for_each(task, dst_css, tset) \
for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
(task); \
(task) = cgroup_taskset_next((tset), &(dst_css)))
/**
* cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
* @leader: the loop cursor
* @dst_css: the destination css
* @tset: taskset to iterate
*
* Iterate threadgroup leaders of @tset. For single-task migrations, @tset
* may not contain any.
*/
#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
(leader); \
(leader) = cgroup_taskset_next((tset), &(dst_css))) \
if ((leader) != (leader)->group_leader) \
; \
else
/*
* Inline functions.
*/
#ifdef CONFIG_DEBUG_CGROUP_REF
void css_get(struct cgroup_subsys_state *css);
void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
bool css_tryget(struct cgroup_subsys_state *css);
bool css_tryget_online(struct cgroup_subsys_state *css);
void css_put(struct cgroup_subsys_state *css);
void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
#else
#define CGROUP_REF_FN_ATTRS static inline
#define CGROUP_REF_EXPORT(fn)
#include <linux/cgroup_refcnt.h>
#endif
static inline u64 cgroup_id(const struct cgroup *cgrp)
{
return cgrp->kn->id;
}
/**
* css_is_dying - test whether the specified css is dying
* @css: target css
*
* Test whether @css is in the process of offlining or already offline. In
* most cases, ->css_online() and ->css_offline() callbacks should be
* enough; however, the actual offline operations are RCU delayed and this
* test returns %true also when @css is scheduled to be offlined.
*
* This is useful, for example, when the use case requires synchronous
* behavior with respect to cgroup removal. cgroup removal schedules css
* offlining but the css can seem alive while the operation is being
* delayed. If the delay affects user visible semantics, this test can be
* used to resolve the situation.
*/
static inline bool css_is_dying(struct cgroup_subsys_state *css)
{
return css->flags & CSS_DYING;
}
static inline bool css_is_online(struct cgroup_subsys_state *css)
{
return css->flags & CSS_ONLINE;
}
static inline bool css_is_self(struct cgroup_subsys_state *css)
{
if (css == &css->cgroup->self) {
/* cgroup::self should not have subsystem association */
WARN_ON(css->ss != NULL);
return true;
}
return false;
}
static inline void cgroup_get(struct cgroup *cgrp)
{
css_get(&cgrp->self);
}
static inline bool cgroup_tryget(struct cgroup *cgrp)
{
return css_tryget(&cgrp->self);
}
static inline void cgroup_put(struct cgroup *cgrp)
{
css_put(&cgrp->self);
}
extern struct mutex cgroup_mutex;
static inline void cgroup_lock(void)
{
mutex_lock(&cgroup_mutex);
}
static inline void cgroup_unlock(void)
{
mutex_unlock(&cgroup_mutex);
}
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
* @__c: extra condition expression to be passed to rcu_dereference_check()
*
* A task's css_set is RCU protected, initialized and exited while holding
* task_lock(), and can only be modified while holding both cgroup_mutex
* and task_lock() while the task is alive. This macro verifies that the
* caller is inside proper critical section and returns @task's css_set.
*
* The caller can also specify additional allowed conditions via @__c, such
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
rcu_read_lock_sched_held() || \
lockdep_is_held(&cgroup_mutex) || \
lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
#else
#define task_css_set_check(task, __c) \
rcu_dereference((task)->cgroups)
#endif
/**
* task_css_check - obtain css for (task, subsys) w/ extra access conds
* @task: the target task
* @subsys_id: the target subsystem ID
* @__c: extra condition expression to be passed to rcu_dereference_check()
*
* Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
* synchronization rules are the same as task_css_set_check().
*/
#define task_css_check(task, subsys_id, __c) \
task_css_set_check((task), (__c))->subsys[(subsys_id)]
/**
* task_css_set - obtain a task's css_set
* @task: the task to obtain css_set for
*
* See task_css_set_check().
*/
static inline struct css_set *task_css_set(struct task_struct *task)
{ return task_css_set_check(task, false);}
/**
* task_css - obtain css for (task, subsys)
* @task: the target task
* @subsys_id: the target subsystem ID
*
* See task_css_check().
*/
static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
int subsys_id)
{ return task_css_check(task, subsys_id, false);}
/**
* task_get_css - find and get the css for (task, subsys)
* @task: the target task
* @subsys_id: the target subsystem ID
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
* valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
{
struct cgroup_subsys_state *css;
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
/*
* Can't use css_tryget_online() here. A task which has
* PF_EXITING set may stay associated with an offline css.
* If such task calls this function, css_tryget_online()
* will keep failing.
*/
if (likely(css_tryget(css)))
break;
cpu_relax();
}
rcu_read_unlock();
return css;
}
/**
* task_css_is_root - test whether a task belongs to the root css
* @task: the target task
* @subsys_id: the target subsystem ID
*
* Test whether @task belongs to the root css on the specified subsystem.
* May be invoked in any context.
*/
static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
{
return task_css_check(task, subsys_id, true) ==
init_css_set.subsys[subsys_id];
}
static inline struct cgroup *task_cgroup(struct task_struct *task,
int subsys_id)
{
return task_css(task, subsys_id)->cgroup;
}
static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
{
return task_css_set(task)->dfl_cgrp;
}
static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
struct cgroup_subsys_state *parent_css = cgrp->self.parent;
if (parent_css)
return container_of(parent_css, struct cgroup, self);
return NULL;
}
/**
* cgroup_is_descendant - test ancestry
* @cgrp: the cgroup to be tested
* @ancestor: possible ancestor of @cgrp
*
* Test whether @cgrp is a descendant of @ancestor. It also returns %true
* if @cgrp == @ancestor. This function is safe to call as long as @cgrp
* and @ancestor are accessible.
*/
static inline bool cgroup_is_descendant(struct cgroup *cgrp,
struct cgroup *ancestor)
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
return cgrp->ancestors[ancestor->level] == ancestor;
}
/**
* cgroup_ancestor - find ancestor of cgroup
* @cgrp: cgroup to find ancestor of
* @ancestor_level: level of ancestor to find starting from root
*
* Find ancestor of cgroup at specified level starting from root if it exists
* and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
* @ancestor_level.
*
* This function is safe to call as long as @cgrp is accessible.
*/
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
if (ancestor_level < 0 || ancestor_level > cgrp->level)
return NULL;
return cgrp->ancestors[ancestor_level];
}
/**
* task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
* @task: the task to be tested
* @ancestor: possible ancestor of @task's cgroup
*
* Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
* It follows all the same rules as cgroup_is_descendant, and only applies
* to the default hierarchy.
*/
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
struct css_set *cset = task_css_set(task);
return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
}
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
cgrp->nr_populated_threaded_children;
}
/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
return kernfs_ino(cgrp->kn);
}
/* cft/css accessors for cftype->write() operation */
static inline struct cftype *of_cft(struct kernfs_open_file *of)
{
return of->kn->priv;
}
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
/* cft/css accessors for cftype->seq_*() operations */
static inline struct cftype *seq_cft(struct seq_file *seq)
{
return of_cft(seq->private);
}
static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
{
return of_css(seq->private);
}
/*
* Name / path handling functions. All are thin wrappers around the kernfs
* counterparts and can be called under any context.
*/
static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
{
return kernfs_name(cgrp->kn, buf, buflen);
}
static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
{
return kernfs_path(cgrp->kn, buf, buflen);
}
static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
{
pr_cont_kernfs_name(cgrp->kn);
}
static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
{
pr_cont_kernfs_path(cgrp->kn);
}
bool cgroup_psi_enabled(void);
static inline void cgroup_init_kthreadd(void)
{
/*
* kthreadd is inherited by all kthreads, keep it in the root so
* that the new kthreads are guaranteed to stay in the root until
* initialization is finished.
*/
current->no_cgroup_migration = 1;
}
static inline void cgroup_kthread_ready(void)
{
/*
* This kthread finished initialization. The creator should have
* set PF_NO_SETAFFINITY if this kthread should stay in the root.
*/
current->no_cgroup_migration = 0;
}
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
struct cgroup *__cgroup_get_from_id(u64 id);
struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
struct cgroup;
static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline void cgroup_lock(void) {}
static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
static inline int cgroup_can_fork(struct task_struct *p,
struct kernel_clone_args *kargs) { return 0; }
static inline void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
static inline void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
static inline void cgroup_exit(struct task_struct *p) {}
static inline void cgroup_release(struct task_struct *p) {}
static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
static inline void cgroup_init_kthreadd(void) {}
static inline void cgroup_kthread_ready(void) {}
static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
return NULL;
}
static inline bool cgroup_psi_enabled(void)
{
return false;
}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
return true;
}
static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
{}
#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUPS
/*
* cgroup scalable recursive statistics.
*/
void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
void css_rstat_flush(struct cgroup_subsys_state *css);
/*
* Basic resource stats.
*/
#ifdef CONFIG_CGROUP_CPUACCT
void cpuacct_charge(struct task_struct *tsk, u64 cputime);
void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
#else
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
static inline void cpuacct_account_field(struct task_struct *tsk, int index,
u64 val) {}
#endif
void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
void __cgroup_account_cputime_field(struct cgroup *cgrp,
enum cpu_usage_stat index, u64 delta_exec);
static inline void cgroup_account_cputime(struct task_struct *task,
u64 delta_exec)
{
struct cgroup *cgrp;
cpuacct_charge(task, delta_exec);
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime(cgrp, delta_exec);
}
static inline void cgroup_account_cputime_field(struct task_struct *task,
enum cpu_usage_stat index,
u64 delta_exec)
{
struct cgroup *cgrp;
cpuacct_account_field(task, index, delta_exec);
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime_field(cgrp, index, delta_exec);
}
#else /* CONFIG_CGROUPS */
static inline void cgroup_account_cputime(struct task_struct *task,
u64 delta_exec) {}
static inline void cgroup_account_cputime_field(struct task_struct *task,
enum cpu_usage_stat index,
u64 delta_exec) {}
#endif /* CONFIG_CGROUPS */
/*
* sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
* definition in cgroup-defs.h.
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */
static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
#ifdef CONFIG_CGROUPS
void cgroup_enter_frozen(void);
void cgroup_leave_frozen(bool always_leave);
void cgroup_update_frozen(struct cgroup *cgrp);
void cgroup_freeze(struct cgroup *cgrp, bool freeze);
void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
struct cgroup *dst);
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return task->frozen;
}
#else /* !CONFIG_CGROUPS */
static inline void cgroup_enter_frozen(void) { }
static inline void cgroup_leave_frozen(bool always_leave) { }
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return false;
}
#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUP_BPF
static inline void cgroup_bpf_get(struct cgroup *cgrp)
{
percpu_ref_get(&cgrp->bpf.refcnt);
}
static inline void cgroup_bpf_put(struct cgroup *cgrp)
{
percpu_ref_put(&cgrp->bpf.refcnt);
}
#else /* CONFIG_CGROUP_BPF */
static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
#endif /* _LINUX_CGROUP_H */
/*
* Non-physical true random number generator based on timing jitter --
* Jitter RNG standalone code.
*
* Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023
*
* Design
* ======
*
* See https://www.chronox.de/jent.html
*
* License
* =======
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL2 are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* This Jitterentropy RNG is based on the jitterentropy library
* version 3.4.0 provided at https://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
#error "The CPU Jitter random number generator must not be compiled with optimizations. See documentation. Use the compiler switch -O0 for compiling jitterentropy.c."
#endif
typedef unsigned long long __u64;
typedef long long __s64;
typedef unsigned int __u32;
typedef unsigned char u8;
#define NULL ((void *) 0)
/* The entropy pool */
struct rand_data {
/* SHA3-256 is used as conditioner */
#define DATA_SIZE_BITS 256
/* all data values that are vital to maintain the security
* of the RNG are marked as SENSITIVE. A user must not
* access that information while the RNG executes its loops to
* calculate the next random value. */
void *hash_state; /* SENSITIVE hash state entropy pool */
__u64 prev_time; /* SENSITIVE Previous time stamp */
__u64 last_delta; /* SENSITIVE stuck test */
__s64 last_delta2; /* SENSITIVE stuck test */
unsigned int flags; /* Flags used to initialize */
unsigned int osr; /* Oversample rate */
#define JENT_MEMORY_ACCESSLOOPS 128
#define JENT_MEMORY_SIZE \
(CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS * \
CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE)
unsigned char *mem; /* Memory access location with size of
* memblocks * memblocksize */
unsigned int memlocation; /* Pointer to byte in *mem */
unsigned int memblocks; /* Number of memory blocks in *mem */
unsigned int memblocksize; /* Size of one memory block in bytes */
unsigned int memaccessloops; /* Number of memory accesses per random
* bit generation */
/* Repetition Count Test */
unsigned int rct_count; /* Number of stuck values */
/* Adaptive Proportion Test cutoff values */
unsigned int apt_cutoff; /* Intermittent health test failure */
unsigned int apt_cutoff_permanent; /* Permanent health test failure */
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
#define JENT_APT_WORD_MASK (JENT_APT_LSB - 1)
unsigned int apt_observations; /* Number of collected observations */
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
unsigned int health_failure; /* Record health failure */
unsigned int apt_base_set:1; /* APT base reference set? */
};
/* Flags that can be used to initialize the RNG */
#define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more
* entropy, saves MEMORY_SIZE RAM for
* entropy collector */
/* -- error codes for init function -- */
#define JENT_ENOTIME 1 /* Timer service not available */
#define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */
#define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */
#define JENT_EVARVAR 5 /* Timer does not produce variations of
* variations (2nd derivation of time is
* zero). */
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
#define JENT_EHEALTH 9 /* Health test failed during initialization */
#define JENT_ERCT 10 /* RCT failed during initialization */
#define JENT_EHASH 11 /* Hash self test failed */
#define JENT_EMEM 12 /* Can't allocate memory for initialization */
#define JENT_RCT_FAILURE 1 /* Failure in RCT health test. */
#define JENT_APT_FAILURE 2 /* Failure in APT health test. */
#define JENT_PERMANENT_FAILURE_SHIFT 16
#define JENT_PERMANENT_FAILURE(x) (x << JENT_PERMANENT_FAILURE_SHIFT)
#define JENT_RCT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_RCT_FAILURE)
#define JENT_APT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_APT_FAILURE)
/*
* The output n bits can receive more than n bits of min entropy, of course,
* but the fixed output of the conditioning function can only asymptotically
* approach the output size bits of min entropy, not attain that bound. Random
* maps will tend to have output collisions, which reduces the creditable
* output entropy (that is what SP 800-90B Section 3.1.5.1.2 attempts to bound).
*
* The value "64" is justified in Appendix A.4 of the current 90C draft,
* and aligns with NIST's in "epsilon" definition in this document, which is
* that a string can be considered "full entropy" if you can bound the min
* entropy in each bit of output to at least 1-epsilon, where epsilon is
* required to be <= 2^(-32).
*/
#define JENT_ENTROPY_SAFETY_FACTOR 64
#include <linux/array_size.h>
#include <linux/fips.h>
#include <linux/minmax.h>
#include "jitterentropy.h"
/***************************************************************************
* Adaptive Proportion Test
*
* This test complies with SP800-90B section 4.4.2.
***************************************************************************/
/*
* See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B
* APT.
* https://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf
* In the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)).
* (The original formula wasn't correct because the first symbol must
* necessarily have been observed, so there is no chance of observing 0 of these
* symbols.)
*
* For the alpha < 2^-53, R cannot be used as it uses a float data type without
* arbitrary precision. A SageMath script is used to calculate those cutoff
* values.
*
* For any value above 14, this yields the maximal allowable value of 512
* (by FIPS 140-2 IG 7.19 Resolution # 16, we cannot choose a cutoff value that
* renders the test unable to fail).
*/
static const unsigned int jent_apt_cutoff_lookup[15] = {
325, 422, 459, 477, 488, 494, 499, 502,
505, 507, 508, 509, 510, 511, 512 };
static const unsigned int jent_apt_cutoff_permanent_lookup[15] = {
355, 447, 479, 494, 502, 507, 510, 512,
512, 512, 512, 512, 512, 512, 512 };
static void jent_apt_init(struct rand_data *ec, unsigned int osr)
{
/*
* Establish the apt_cutoff based on the presumed entropy rate of
* 1/osr.
*/
if (osr >= ARRAY_SIZE(jent_apt_cutoff_lookup)) {
ec->apt_cutoff = jent_apt_cutoff_lookup[
ARRAY_SIZE(jent_apt_cutoff_lookup) - 1];
ec->apt_cutoff_permanent = jent_apt_cutoff_permanent_lookup[
ARRAY_SIZE(jent_apt_cutoff_permanent_lookup) - 1];
} else {
ec->apt_cutoff = jent_apt_cutoff_lookup[osr - 1];
ec->apt_cutoff_permanent =
jent_apt_cutoff_permanent_lookup[osr - 1];
}
}
/*
* Reset the APT counter
*
* @ec [in] Reference to entropy collector
*/
static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked)
{
/* Reset APT counter */
ec->apt_count = 0;
ec->apt_base = delta_masked;
ec->apt_observations = 0;
}
/*
* Insert a new entropy event into APT
*
* @ec [in] Reference to entropy collector
* @delta_masked [in] Masked time delta to process
*/
static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
{
/* Initialize the base reference */
if (!ec->apt_base_set) {
ec->apt_base = delta_masked;
ec->apt_base_set = 1;
return;
}
if (delta_masked == ec->apt_base) {
ec->apt_count++;
/* Note, ec->apt_count starts with one. */
if (ec->apt_count >= ec->apt_cutoff_permanent)
ec->health_failure |= JENT_APT_FAILURE_PERMANENT;
else if (ec->apt_count >= ec->apt_cutoff)
ec->health_failure |= JENT_APT_FAILURE;
}
ec->apt_observations++; if (ec->apt_observations >= JENT_APT_WINDOW_SIZE) jent_apt_reset(ec, delta_masked);
}
/***************************************************************************
* Stuck Test and its use as Repetition Count Test
*
* The Jitter RNG uses an enhanced version of the Repetition Count Test
* (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical
* back-to-back values, the input to the RCT is the counting of the stuck
* values during the generation of one Jitter RNG output block.
*
* The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8.
*
* During the counting operation, the Jitter RNG always calculates the RCT
* cut-off value of C. If that value exceeds the allowed cut-off value,
* the Jitter RNG output block will be calculated completely but discarded at
* the end. The caller of the Jitter RNG is informed with an error code.
***************************************************************************/
/*
* Repetition Count Test as defined in SP800-90B section 4.4.1
*
* @ec [in] Reference to entropy collector
* @stuck [in] Indicator whether the value is stuck
*/
static void jent_rct_insert(struct rand_data *ec, int stuck)
{ if (stuck) {
ec->rct_count++;
/*
* The cutoff value is based on the following consideration:
* alpha = 2^-30 or 2^-60 as recommended in SP800-90B.
* In addition, we require an entropy value H of 1/osr as this
* is the minimum entropy required to provide full entropy.
* Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr
* deltas for inserting them into the entropy pool which should
* then have (close to) DATA_SIZE_BITS bits of entropy in the
* conditioned output.
*
* Note, ec->rct_count (which equals to value B in the pseudo
* code of SP800-90B section 4.4.1) starts with zero. Hence
* we need to subtract one from the cutoff value as calculated
* following SP800-90B. Thus C = ceil(-log_2(alpha)/H) = 30*osr
* or 60*osr.
*/
if ((unsigned int)ec->rct_count >= (60 * ec->osr)) {
ec->rct_count = -1;
ec->health_failure |= JENT_RCT_FAILURE_PERMANENT;
} else if ((unsigned int)ec->rct_count >= (30 * ec->osr)) {
ec->rct_count = -1;
ec->health_failure |= JENT_RCT_FAILURE;
}
} else {
/* Reset RCT */
ec->rct_count = 0;
}
}
static inline __u64 jent_delta(__u64 prev, __u64 next)
{
#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
return (prev < next) ? (next - prev) :
(JENT_UINT64_MAX - prev + 1 + next);
}
/*
* Stuck test by checking the:
* 1st derivative of the jitter measurement (time delta)
* 2nd derivative of the jitter measurement (delta of time deltas)
* 3rd derivative of the jitter measurement (delta of delta of time deltas)
*
* All values must always be non-zero.
*
* @ec [in] Reference to entropy collector
* @current_delta [in] Jitter time delta
*
* @return
* 0 jitter measurement not stuck (good bit)
* 1 jitter measurement stuck (reject bit)
*/
static int jent_stuck(struct rand_data *ec, __u64 current_delta)
{
__u64 delta2 = jent_delta(ec->last_delta, current_delta);
__u64 delta3 = jent_delta(ec->last_delta2, delta2);
ec->last_delta = current_delta;
ec->last_delta2 = delta2;
/*
* Insert the result of the comparison of two back-to-back time
* deltas.
*/
jent_apt_insert(ec, current_delta);
if (!current_delta || !delta2 || !delta3) {
/* RCT with a stuck bit */
jent_rct_insert(ec, 1); return 1;
}
/* RCT with a non-stuck bit */
jent_rct_insert(ec, 0);
return 0;
}
/*
* Report any health test failures
*
* @ec [in] Reference to entropy collector
*
* @return a bitmask indicating which tests failed
* 0 No health test failure
* 1 RCT failure
* 2 APT failure
* 1<<JENT_PERMANENT_FAILURE_SHIFT RCT permanent failure
* 2<<JENT_PERMANENT_FAILURE_SHIFT APT permanent failure
*/
static unsigned int jent_health_failure(struct rand_data *ec)
{
/* Test is only enabled in FIPS mode */
if (!fips_enabled)
return 0;
return ec->health_failure;
}
/***************************************************************************
* Noise sources
***************************************************************************/
/*
* Update of the loop count used for the next round of
* an entropy collection.
*
* Input:
* @bits is the number of low bits of the timer to consider
* @min is the number of bits we shift the timer value to the right at
* the end to make sure we have a guaranteed minimum value
*
* @return Newly calculated loop counter
*/
static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min)
{
__u64 time = 0;
__u64 shuffle = 0;
unsigned int i = 0;
unsigned int mask = (1<<bits) - 1;
jent_get_nstime(&time);
/*
* We fold the time value as much as possible to ensure that as many
* bits of the time stamp are included as possible.
*/
for (i = 0; ((DATA_SIZE_BITS + bits - 1) / bits) > i; i++) { shuffle ^= time & mask;
time = time >> bits;
}
/*
* We add a lower boundary value to ensure we have a minimum
* RNG loop count.
*/
return (shuffle + (1<<min));
}
/*
* CPU Jitter noise source -- this is the noise source based on the CPU
* execution time jitter
*
* This function injects the individual bits of the time value into the
* entropy pool using a hash.
*
* ec [in] entropy collector
* time [in] time stamp to be injected
* stuck [in] Is the time stamp identified as stuck?
*
* Output:
* updated hash context in the entropy collector or error code
*/
static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck)
{
#define SHA3_HASH_LOOP (1<<3)
struct {
int rct_count;
unsigned int apt_observations;
unsigned int apt_count;
unsigned int apt_base;
} addtl = {
ec->rct_count,
ec->apt_observations,
ec->apt_count,
ec->apt_base
};
return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl),
SHA3_HASH_LOOP, stuck);
}
/*
* Memory Access noise source -- this is a noise source based on variations in
* memory access times
*
* This function performs memory accesses which will add to the timing
* variations due to an unknown amount of CPU wait states that need to be
* added when accessing memory. The memory size should be larger than the L1
* caches as outlined in the documentation and the associated testing.
*
* The L1 cache has a very high bandwidth, albeit its access rate is usually
* slower than accessing CPU registers. Therefore, L1 accesses only add minimal
* variations as the CPU has hardly to wait. Starting with L2, significant
* variations are added because L2 typically does not belong to the CPU any more
* and therefore a wider range of CPU wait states is necessary for accesses.
* L3 and real memory accesses have even a wider range of wait states. However,
* to reliably access either L3 or memory, the ec->mem memory must be quite
* large which is usually not desirable.
*
* @ec [in] Reference to the entropy collector with the memory access data -- if
* the reference to the memory block to be accessed is NULL, this noise
* source is disabled
* @loop_cnt [in] if a value not equal to 0 is set, use the given value
* number of loops to perform the LFSR
*/
static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
{
unsigned int wrap = 0;
__u64 i = 0;
#define MAX_ACC_LOOP_BIT 7
#define MIN_ACC_LOOP_BIT 0
__u64 acc_loop_cnt =
jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
if (NULL == ec || NULL == ec->mem) return;
wrap = ec->memblocksize * ec->memblocks;
/*
* testing purposes -- allow test app to set the counter, not
* needed during runtime
*/
if (loop_cnt)
acc_loop_cnt = loop_cnt;
for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { unsigned char *tmpval = ec->mem + ec->memlocation;
/*
* memory access: just add 1 to one byte,
* wrap at 255 -- memory access implies read
* from and write to memory location
*/
*tmpval = (*tmpval + 1) & 0xff;
/*
* Addition of memblocksize - 1 to pointer
* with wrap around logic to ensure that every
* memory location is hit evenly
*/
ec->memlocation = ec->memlocation + ec->memblocksize - 1;
ec->memlocation = ec->memlocation % wrap;
}
}
/***************************************************************************
* Start of entropy processing logic
***************************************************************************/
/*
* This is the heart of the entropy generation: calculate time deltas and
* use the CPU jitter in the time deltas. The jitter is injected into the
* entropy pool.
*
* WARNING: ensure that ->prev_time is primed before using the output
* of this function! This can be done by calling this function
* and not using its result.
*
* @ec [in] Reference to entropy collector
*
* @return result of stuck test
*/
static int jent_measure_jitter(struct rand_data *ec, __u64 *ret_current_delta)
{
__u64 time = 0;
__u64 current_delta = 0;
int stuck;
/* Invoke one noise source before time measurement to add variations */
jent_memaccess(ec, 0);
/*
* Get time stamp and calculate time delta to previous
* invocation to measure the timing variations
*/
jent_get_nstime(&time);
current_delta = jent_delta(ec->prev_time, time);
ec->prev_time = time;
/* Check whether we have a stuck measurement. */
stuck = jent_stuck(ec, current_delta);
/* Now call the next noise sources which also injects the data */
if (jent_condition_data(ec, current_delta, stuck))
stuck = 1;
/* return the raw entropy value */
if (ret_current_delta)
*ret_current_delta = current_delta;
return stuck;
}
/*
* Generator of one 64 bit random number
* Function fills rand_data->hash_state
*
* @ec [in] Reference to entropy collector
*/
static void jent_gen_entropy(struct rand_data *ec)
{
unsigned int k = 0, safety_factor = 0;
if (fips_enabled)
safety_factor = JENT_ENTROPY_SAFETY_FACTOR;
/* priming of the ->prev_time value */
jent_measure_jitter(ec, NULL); while (!jent_health_failure(ec)) {
/* If a stuck measurement is received, repeat measurement */
if (jent_measure_jitter(ec, NULL))
continue;
/*
* We multiply the loop value with ->osr to obtain the
* oversampling rate requested by the caller
*/
if (++k >= ((DATA_SIZE_BITS + safety_factor) * ec->osr))
break;
}
}
/*
* Entry function: Obtain entropy for the caller.
*
* This function invokes the entropy gathering logic as often to generate
* as many bytes as requested by the caller. The entropy gathering logic
* creates 64 bit per invocation.
*
* This function truncates the last 64 bit entropy value output to the exact
* size specified by the caller.
*
* @ec [in] Reference to entropy collector
* @data [in] pointer to buffer for storing random data -- buffer must already
* exist
* @len [in] size of the buffer, specifying also the requested number of random
* in bytes
*
* @return 0 when request is fulfilled or an error
*
* The following error codes can occur:
* -1 entropy_collector is NULL or the generation failed
* -2 Intermittent health failure
* -3 Permanent health failure
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
{
unsigned char *p = data;
if (!ec) return -1; while (len > 0) {
unsigned int tocopy, health_test_result;
jent_gen_entropy(ec);
health_test_result = jent_health_failure(ec);
if (health_test_result > JENT_PERMANENT_FAILURE_SHIFT) {
/*
* At this point, the Jitter RNG instance is considered
* as a failed instance. There is no rerun of the
* startup test any more, because the caller
* is assumed to not further use this instance.
*/
return -3; } else if (health_test_result) {
/*
* Perform startup health tests and return permanent
* error if it fails.
*/
if (jent_entropy_init(0, 0, NULL, ec)) {
/* Mark the permanent error */
ec->health_failure &=
JENT_RCT_FAILURE_PERMANENT |
JENT_APT_FAILURE_PERMANENT;
return -3;
}
return -2;
}
tocopy = min(DATA_SIZE_BITS / 8, len);
if (jent_read_random_block(ec->hash_state, p, tocopy))
return -1; len -= tocopy;
p += tocopy;
}
return 0;
}
/***************************************************************************
* Initialization logic
***************************************************************************/
struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags,
void *hash_state)
{
struct rand_data *entropy_collector;
entropy_collector = jent_zalloc(sizeof(struct rand_data));
if (!entropy_collector)
return NULL; if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) {
/* Allocate memory for adding variations based on memory
* access
*/
entropy_collector->mem = jent_kvzalloc(JENT_MEMORY_SIZE);
if (!entropy_collector->mem) {
jent_zfree(entropy_collector);
return NULL;
}
entropy_collector->memblocksize =
CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE;
entropy_collector->memblocks =
CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS;
entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS;
}
/* verify and set the oversampling rate */
if (osr == 0)
osr = 1; /* H_submitter = 1 / osr */
entropy_collector->osr = osr;
entropy_collector->flags = flags;
entropy_collector->hash_state = hash_state;
/* Initialize the APT */
jent_apt_init(entropy_collector, osr);
/* fill the data pad with non-zero values */
jent_gen_entropy(entropy_collector);
return entropy_collector;
}
void jent_entropy_collector_free(struct rand_data *entropy_collector)
{
jent_kvzfree(entropy_collector->mem, JENT_MEMORY_SIZE);
entropy_collector->mem = NULL;
jent_zfree(entropy_collector);
}
int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state,
struct rand_data *p_ec)
{
/*
* If caller provides an allocated ec, reuse it which implies that the
* health test entropy data is used to further still the available
* entropy pool.
*/
struct rand_data *ec = p_ec;
int i, time_backwards = 0, ret = 0, ec_free = 0;
unsigned int health_test_result;
if (!ec) {
ec = jent_entropy_collector_alloc(osr, flags, hash_state);
if (!ec)
return JENT_EMEM;
ec_free = 1;
} else {
/* Reset the APT */
jent_apt_reset(ec, 0);
/* Ensure that a new APT base is obtained */
ec->apt_base_set = 0;
/* Reset the RCT */
ec->rct_count = 0;
/* Reset intermittent, leave permanent health test result */
ec->health_failure &= (~JENT_RCT_FAILURE);
ec->health_failure &= (~JENT_APT_FAILURE);
}
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
* loop counts may show some slight skew and we produce
* false positives.
*
* Moreover, only old systems show potentially problematic
* jitter entropy that could potentially be caught here. But
* the RNG is intended for hardware that is available or widely
* used, but not old systems that are long out of favor. Thus,
* no statistical tests.
*/
/*
* We could add a check for system capabilities such as clock_getres or
* check for CONFIG_X86_TSC, but it does not make much sense as the
* following sanity checks verify that we have a high-resolution
* timer.
*/
/*
* TESTLOOPCOUNT needs some loops to identify edge systems. 100 is
* definitely too little.
*
* SP800-90B requires at least 1024 initial test cycles.
*/
#define TESTLOOPCOUNT 1024
#define CLEARCACHE 100
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
__u64 start_time = 0, end_time = 0, delta = 0;
/* Invoke core entropy collection logic */
jent_measure_jitter(ec, &delta);
end_time = ec->prev_time;
start_time = ec->prev_time - delta;
/* test whether timer works */
if (!start_time || !end_time) {
ret = JENT_ENOTIME;
goto out;
}
/*
* test whether timer is fine grained enough to provide
* delta even when called shortly after each other -- this
* implies that we also have a high resolution timer
*/
if (!delta || (end_time == start_time)) {
ret = JENT_ECOARSETIME;
goto out;
}
/*
* up to here we did not modify any variable that will be
* evaluated later, but we already performed some work. Thus we
* already have had an impact on the caches, branch prediction,
* etc. with the goal to clear it to get the worst case
* measurements.
*/
if (i < CLEARCACHE)
continue;
/* test whether we have an increasing timer */
if (!(end_time > start_time))
time_backwards++;
}
/*
* we allow up to three times the time running backwards.
* CLOCK_REALTIME is affected by adjtime and NTP operations. Thus,
* if such an operation just happens to interfere with our test, it
* should not fail. The value of 3 should cover the NTP case being
* performed during our test run.
*/
if (time_backwards > 3) {
ret = JENT_ENOMONOTONIC;
goto out;
}
/* Did we encounter a health test failure? */
health_test_result = jent_health_failure(ec);
if (health_test_result) {
ret = (health_test_result & JENT_RCT_FAILURE) ? JENT_ERCT :
JENT_EHEALTH;
goto out;
}
out:
if (ec_free)
jent_entropy_collector_free(ec);
return ret;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/bitmap.c
* Helper functions for bitmap.h.
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/slab.h>
/**
* DOC: bitmap introduction
*
* bitmaps provide an array of bits, implemented using an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
*
* The possible unused bits in the last, partially used word
* of a bitmap are 'don't care'. The implementation makes
* no particular effort to keep them zero. It ensures that
* their value will not affect the results of any operation.
* The bitmap operations that return Boolean (bitmap_empty,
* for example) or scalar (bitmap_weight, for example) results
* carefully filter out these unused bits from impacting their
* results.
*
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
* for the best explanations of this ordering.
*/
bool __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return false;
return true;
}
EXPORT_SYMBOL(__bitmap_equal);
bool __bitmap_or_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2,
const unsigned long *bitmap3,
unsigned int bits)
{
unsigned int k, lim = bits / BITS_PER_LONG;
unsigned long tmp;
for (k = 0; k < lim; ++k) {
if ((bitmap1[k] | bitmap2[k]) != bitmap3[k])
return false;
}
if (!(bits % BITS_PER_LONG))
return true;
tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k];
return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0;
}
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
unsigned int k, lim = BITS_TO_LONGS(bits);
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @nbits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned shift, unsigned nbits)
{
unsigned k, lim = BITS_TO_LONGS(nbits);
unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take lower rem bits of
* word above and make them the top rem bits of result.
*/
if (!rem || off + k + 1 >= lim)
upper = 0;
else {
upper = src[off + k + 1];
if (off + k + 1 == lim - 1)
upper &= mask;
upper <<= (BITS_PER_LONG - rem);
}
lower = src[off + k];
if (off + k == lim - 1)
lower &= mask;
lower >>= rem;
dst[k] = lower | upper;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_right);
/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @nbits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
* and those MS bits shifted off the top are lost.
*/
void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
int k;
unsigned int lim = BITS_TO_LONGS(nbits);
unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take upper rem bits of
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
lower = src[k - 1] >> (BITS_PER_LONG - rem);
else
lower = 0;
upper = src[k] << rem;
dst[k + off] = lower | upper;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_left);
/**
* bitmap_cut() - remove bit region from bitmap and right shift remaining bits
* @dst: destination bitmap, might overlap with src
* @src: source bitmap
* @first: start bit of region to be removed
* @cut: number of bits to remove
* @nbits: bitmap size, in bits
*
* Set the n-th bit of @dst iff the n-th bit of @src is set and
* n is less than @first, or the m-th bit of @src is set for any
* m such that @first <= n < nbits, and m = n + @cut.
*
* In pictures, example for a big-endian 32-bit architecture:
*
* The @src bitmap is::
*
* 31 63
* | |
* 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101
* | | | |
* 16 14 0 32
*
* if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is::
*
* 31 63
* | |
* 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010
* | | |
* 14 (bit 17 0 32
* from @src)
*
* Note that @dst and @src might overlap partially or entirely.
*
* This is implemented in the obvious way, with a shift and carry
* step for each moved bit. Optimisation is left as an exercise
* for the compiler.
*/
void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits);
unsigned long keep = 0, carry;
int i;
if (first % BITS_PER_LONG) {
keep = src[first / BITS_PER_LONG] &
(~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
}
memmove(dst, src, len * sizeof(*dst));
while (cut--) {
for (i = first / BITS_PER_LONG; i < len; i++) {
if (i < len - 1)
carry = dst[i + 1] & 1UL;
else
carry = 0;
dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1));
}
}
dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG);
dst[first / BITS_PER_LONG] |= keep;
}
EXPORT_SYMBOL(bitmap_cut);
bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_xor);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_andnot);
void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
const unsigned long *mask, unsigned int nbits)
{
unsigned int k;
unsigned int nr = BITS_TO_LONGS(nbits);
for (k = 0; k < nr; k++)
dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]);
}
EXPORT_SYMBOL(__bitmap_replace);
bool __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return true;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return true;
return false;
}
EXPORT_SYMBOL(__bitmap_intersects);
bool __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return false;
return true;
}
EXPORT_SYMBOL(__bitmap_subset);
#define BITMAP_WEIGHT(FETCH, bits) \
({ \
unsigned int __bits = (bits), idx, w = 0; \
\
for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \
w += hweight_long(FETCH); \
\
if (__bits % BITS_PER_LONG) \
w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \
\
w; \
})
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{
return BITMAP_WEIGHT(bitmap[idx], bits);
}
EXPORT_SYMBOL(__bitmap_weight);
unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
}
EXPORT_SYMBOL(__bitmap_weight_and);
unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits);
}
EXPORT_SYMBOL(__bitmap_weight_andnot);
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned int size = start + len;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (len - bits_to_set >= 0) { *p |= mask_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (len) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(__bitmap_set);
void __bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned int size = start + len;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (len - bits_to_clear >= 0) { *p &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (len) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}
EXPORT_SYMBOL(__bitmap_clear);
/**
* bitmap_find_next_zero_area_off - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
* @align_offset: Alignment offset for zero area.
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds plus @align_offset
* is multiple of that power of 2.
*/
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
end = index + nr;
if (end > size)
return end;
i = find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto again;
}
return index;
}
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @nbits)
* @nbits: number of valid bit positions in @buf
*
* Map the bit at position @pos in @buf (of length @nbits) to the
* ordinal of which set bit it is. If it is not set or if @pos
* is not a valid bit position, map to -1.
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
* and other @pos values will get mapped to -1. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits)
{
if (pos >= nbits || !test_bit(pos, buf))
return -1;
return bitmap_weight(buf, pos);
}
/**
* bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
* @dst: remapped result
* @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
* @nbits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* If either of the @old and @new bitmaps are empty, or if @src and
* @dst point to the same location, then this routine copies @src
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
* (the identity map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @src comes into this routine
* with bits 1, 5 and 7 set, then @dst should leave with bits 1,
* 13 and 15 set.
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
unsigned int nbits)
{
unsigned int oldbit, w;
if (dst == src) /* following doesn't handle inplace remaps */
return;
bitmap_zero(dst, nbits);
w = bitmap_weight(new, nbits);
for_each_set_bit(oldbit, src, nbits) {
int n = bitmap_pos_to_ord(old, oldbit, nbits);
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
set_bit(find_nth_bit(new, nbits, n % w), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
* @oldbit: bit position to be mapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
* (the identity map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @oldbit is 5, then this routine
* returns 13.
*/
int bitmap_bitremap(int oldbit, const unsigned long *old,
const unsigned long *new, int bits)
{
int w = bitmap_weight(new, bits);
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
return oldbit;
else
return find_nth_bit(new, bits, n % w);
}
EXPORT_SYMBOL(bitmap_bitremap);
#ifdef CONFIG_NUMA
/**
* bitmap_onto - translate one bitmap relative to another
* @dst: resulting translated bitmap
* @orig: original untranslated bitmap
* @relmap: bitmap relative to which translated
* @bits: number of bits in each of these bitmaps
*
* Set the n-th bit of @dst iff there exists some m such that the
* n-th bit of @relmap is set, the m-th bit of @orig is set, and
* the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
* (If you understood the previous sentence the first time your
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
* using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
* weight of (number of set bits in) @relmap are mapped nowhere.
* In particular, if for all bits m set in @orig, m >= W, then
* @dst will end up empty. In situations where the possibility
* of such an empty result is not desired, one way to avoid it is
* to use the bitmap_fold() operator, below, to first fold the
* @orig bitmap over itself so that all its set bits x are in the
* range 0 <= x < W. The bitmap_fold() operator does this by
* setting the bit (m % W) in @dst, for each bit (m) set in @orig.
*
* Example [1] for bitmap_onto():
* Let's say @relmap has bits 30-39 set, and @orig has bits
* 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
* @dst will have bits 31, 33, 35, 37 and 39 set.
*
* When bit 0 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the first bit (if any)
* that is turned on in @relmap. Since bit 0 was off in the
* above example, we leave off that bit (bit 30) in @dst.
*
* When bit 1 is set in @orig (as in the above example), it
* means turn on the bit in @dst corresponding to whatever
* is the second bit that is turned on in @relmap. The second
* bit in @relmap that was turned on in the above example was
* bit 31, so we turned on bit 31 in @dst.
*
* Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
* because they were the 4th, 6th, 8th and 10th set bits
* set in @relmap, and the 4th, 6th, 8th and 10th bits of
* @orig (i.e. bits 3, 5, 7 and 9) were also set.
*
* When bit 11 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the twelfth bit that is
* turned on in @relmap. In the above example, there were
* only ten bits turned on in @relmap (30..39), so that bit
* 11 was set in @orig had no affect on @dst.
*
* Example [2] for bitmap_fold() + bitmap_onto():
* Let's say @relmap has these ten bits set::
*
* 40 41 42 43 45 48 53 61 74 95
*
* (for the curious, that's 40 plus the first ten terms of the
* Fibonacci sequence.)
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
* avoid the possibility of an empty @dst result::
*
* unsigned long *tmp; // a temporary bitmap's bits
*
* bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
* bitmap_onto(dst, tmp, relmap, bits);
*
* Then this table shows what various values of @dst would be, for
* various @orig's. I list the zero-based positions of each set bit.
* The tmp column shows the intermediate result, as computed by
* using bitmap_fold() to fold the @orig bitmap modulo ten
* (the weight of @relmap):
*
* =============== ============== =================
* @orig tmp @dst
* 0 0 40
* 1 1 41
* 9 9 95
* 10 0 40 [#f1]_
* 1 3 5 7 1 3 5 7 41 43 48 61
* 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
* 0 9 18 27 0 9 8 7 40 61 74 95
* 0 10 20 30 0 40
* 0 11 22 33 0 1 2 3 40 41 42 43
* 0 12 24 36 0 2 4 6 40 42 45 53
* 78 102 211 1 2 8 41 42 74 [#f1]_
* =============== ============== =================
*
* .. [#f1]
*
* For these marked lines, if we hadn't first done bitmap_fold()
* into tmp, then the @dst result would have been empty.
*
* If either of @orig or @relmap is empty (no set bits), then @dst
* will be returned empty.
*
* If (as explained above) the only set bits in @orig are in positions
* m where m >= W, (where W is the weight of @relmap) then @dst will
* once again be returned empty.
*
* All bits in @dst not set by the above rule are cleared.
*/
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits)
{
unsigned int n, m; /* same meaning as in above comment */
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, bits);
/*
* The following code is a more efficient, but less
* obvious, equivalent to the loop:
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
* n = find_nth_bit(orig, bits, m);
* if (test_bit(m, orig))
* set_bit(n, dst);
* }
*/
m = 0;
for_each_set_bit(n, relmap, bits) {
/* m == bitmap_pos_to_ord(relmap, n, bits) */
if (test_bit(m, orig))
set_bit(n, dst);
m++;
}
}
/**
* bitmap_fold - fold larger bitmap into smaller, modulo specified size
* @dst: resulting smaller bitmap
* @orig: original larger bitmap
* @sz: specified size
* @nbits: number of bits in each of these bitmaps
*
* For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
* Clear all other bits in @dst. See further the comment and
* Example [2] for bitmap_onto() for why and how to use this.
*/
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits)
{
unsigned int oldbit;
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, nbits);
for_each_set_bit(oldbit, orig, nbits)
set_bit(oldbit % sz, dst);
}
#endif /* CONFIG_NUMA */
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
{
return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
flags);
}
EXPORT_SYMBOL(bitmap_alloc);
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
{
return bitmap_alloc(nbits, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(bitmap_zalloc);
unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
{
return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long),
flags, node);
}
EXPORT_SYMBOL(bitmap_alloc_node);
unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
{
return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
}
EXPORT_SYMBOL(bitmap_zalloc_node);
void bitmap_free(const unsigned long *bitmap)
{
kfree(bitmap);
}
EXPORT_SYMBOL(bitmap_free);
static void devm_bitmap_free(void *data)
{
unsigned long *bitmap = data;
bitmap_free(bitmap);
}
unsigned long *devm_bitmap_alloc(struct device *dev,
unsigned int nbits, gfp_t flags)
{
unsigned long *bitmap;
int ret;
bitmap = bitmap_alloc(nbits, flags);
if (!bitmap)
return NULL;
ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap);
if (ret)
return NULL;
return bitmap;
}
EXPORT_SYMBOL_GPL(devm_bitmap_alloc);
unsigned long *devm_bitmap_zalloc(struct device *dev,
unsigned int nbits, gfp_t flags)
{
return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO);
}
EXPORT_SYMBOL_GPL(devm_bitmap_zalloc);
#if BITS_PER_LONG == 64
/**
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
* @bitmap: array of unsigned longs, the destination bitmap
* @buf: array of u32 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
{
unsigned int i, halfwords;
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
bitmap[i/2] = (unsigned long) buf[i];
if (++i < halfwords)
bitmap[i/2] |= ((unsigned long) buf[i]) << 32;
}
/* Clear tail bits in last word beyond nbits. */
if (nbits % BITS_PER_LONG)
bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
}
EXPORT_SYMBOL(bitmap_from_arr32);
/**
* bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits
* @buf: array of u32 (in host byte order), the dest bitmap
* @bitmap: array of unsigned longs, the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
{
unsigned int i, halfwords;
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
if (++i < halfwords)
buf[i] = (u32) (bitmap[i/2] >> 32);
}
/* Clear tail bits in last element of array beyond nbits. */
if (nbits % BITS_PER_LONG)
buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
}
EXPORT_SYMBOL(bitmap_to_arr32);
#endif
#if BITS_PER_LONG == 32
/**
* bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
* @bitmap: array of unsigned longs, the destination bitmap
* @buf: array of u64 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
{
int n;
for (n = nbits; n > 0; n -= 64) {
u64 val = *buf++;
*bitmap++ = val;
if (n > 32)
*bitmap++ = val >> 32;
}
/*
* Clear tail bits in the last word beyond nbits.
*
* Negative index is OK because here we point to the word next
* to the last word of the bitmap, except for nbits == 0, which
* is tested implicitly.
*/
if (nbits % BITS_PER_LONG)
bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
}
EXPORT_SYMBOL(bitmap_from_arr64);
/**
* bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
* @buf: array of u64 (in host byte order), the dest bitmap
* @bitmap: array of unsigned longs, the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
{
const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
while (bitmap < end) {
*buf = *bitmap++;
if (bitmap < end)
*buf |= (u64)(*bitmap++) << 32;
buf++;
}
/* Clear tail bits in the last element of array beyond nbits. */
if (nbits % 64)
buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
}
EXPORT_SYMBOL(bitmap_to_arr64);
#endif
/* +++ deflate.c */
/* deflate.c -- compress data using the deflation algorithm
* Copyright (C) 1995-1996 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/*
* ALGORITHM
*
* The "deflation" process depends on being able to identify portions
* of the input text which are identical to earlier input (within a
* sliding window trailing behind the input currently being processed).
*
* The most straightforward technique turns out to be the fastest for
* most input files: try all possible matches and select the longest.
* The key feature of this algorithm is that insertions into the string
* dictionary are very simple and thus fast, and deletions are avoided
* completely. Insertions are performed at each input character, whereas
* string matches are performed only when the previous match ends. So it
* is preferable to spend more time in matches to allow very fast string
* insertions and avoid deletions. The matching algorithm for small
* strings is inspired from that of Rabin & Karp. A brute force approach
* is used to find longer strings when a small match has been found.
* A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
* (by Leonid Broukhis).
* A previous version of this file used a more sophisticated algorithm
* (by Fiala and Greene) which is guaranteed to run in linear amortized
* time, but has a larger average cost, uses more memory and is patented.
* However the F&G algorithm may be faster for some highly redundant
* files if the parameter max_chain_length (described below) is too large.
*
* ACKNOWLEDGEMENTS
*
* The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
* I found it in 'freeze' written by Leonid Broukhis.
* Thanks to many people for bug reports and testing.
*
* REFERENCES
*
* Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
* Available in ftp://ds.internic.net/rfc/rfc1951.txt
*
* A description of the Rabin and Karp algorithm is given in the book
* "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
*
* Fiala,E.R., and Greene,D.H.
* Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
*
*/
#include <linux/module.h>
#include <linux/zutil.h>
#include "defutil.h"
/* architecture-specific bits */
#ifdef CONFIG_ZLIB_DFLTCC
# include "../zlib_dfltcc/dfltcc_deflate.h"
#else
#define DEFLATE_RESET_HOOK(strm) do {} while (0)
#define DEFLATE_HOOK(strm, flush, bstate) 0
#define DEFLATE_NEED_CHECKSUM(strm) 1
#define DEFLATE_DFLTCC_ENABLED() 0
#endif
/* ===========================================================================
* Function prototypes.
*/
typedef block_state (*compress_func) (deflate_state *s, int flush);
/* Compression function. Returns the block state after the call. */
static void fill_window (deflate_state *s);
static block_state deflate_stored (deflate_state *s, int flush);
static block_state deflate_fast (deflate_state *s, int flush);
static block_state deflate_slow (deflate_state *s, int flush);
static void lm_init (deflate_state *s);
static void putShortMSB (deflate_state *s, uInt b);
static int read_buf (z_streamp strm, Byte *buf, unsigned size);
static uInt longest_match (deflate_state *s, IPos cur_match);
#ifdef DEBUG_ZLIB
static void check_match (deflate_state *s, IPos start, IPos match,
int length);
#endif
/* ===========================================================================
* Local data
*/
#define NIL 0
/* Tail of hash chains */
#ifndef TOO_FAR
# define TOO_FAR 4096
#endif
/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
/* Minimum amount of lookahead, except at the end of the input file.
* See deflate.c for comments about the MIN_MATCH+1.
*/
/* Workspace to be allocated for deflate processing */
typedef struct deflate_workspace {
/* State memory for the deflator */
deflate_state deflate_memory;
#ifdef CONFIG_ZLIB_DFLTCC
/* State memory for s390 hardware deflate */
struct dfltcc_deflate_state dfltcc_memory;
#endif
Byte *window_memory;
Pos *prev_memory;
Pos *head_memory;
char *overlay_memory;
} deflate_workspace;
#ifdef CONFIG_ZLIB_DFLTCC
/* dfltcc_state must be doubleword aligned for DFLTCC call */
static_assert(offsetof(struct deflate_workspace, dfltcc_memory) % 8 == 0);
#endif
/* Values for max_lazy_match, good_match and max_chain_length, depending on
* the desired pack level (0..9). The values given below have been tuned to
* exclude worst case performance for pathological files. Better values may be
* found for specific files.
*/
typedef struct config_s {
ush good_length; /* reduce lazy search above this match length */
ush max_lazy; /* do not perform lazy search above this match length */
ush nice_length; /* quit search above this match length */
ush max_chain;
compress_func func;
} config;
static const config configuration_table[10] = {
/* good lazy nice chain */
/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
/* 2 */ {4, 5, 16, 8, deflate_fast},
/* 3 */ {4, 6, 32, 32, deflate_fast},
/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
/* 5 */ {8, 16, 32, 32, deflate_slow},
/* 6 */ {8, 16, 128, 128, deflate_slow},
/* 7 */ {8, 32, 128, 256, deflate_slow},
/* 8 */ {32, 128, 258, 1024, deflate_slow},
/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
* For deflate_fast() (levels <= 3) good is ignored and lazy has a different
* meaning.
*/
/* ===========================================================================
* Update a hash value with the given input byte
* IN assertion: all calls to UPDATE_HASH are made with consecutive
* input characters, so that a running hash key can be computed from the
* previous key instead of complete recalculation each time.
*/
#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
/* ===========================================================================
* Insert string str in the dictionary and set match_head to the previous head
* of the hash chain (the most recent string with same hash key). Return
* the previous length of the hash chain.
* IN assertion: all calls to INSERT_STRING are made with consecutive
* input characters and the first MIN_MATCH bytes of str are valid
* (except for the last MIN_MATCH-1 bytes of the input file).
*/
#define INSERT_STRING(s, str, match_head) \
(UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
s->head[s->ins_h] = (Pos)(str))
/* ===========================================================================
* Initialize the hash table (avoiding 64K overflow for 16 bit systems).
* prev[] will be initialized on the fly.
*/
#define CLEAR_HASH(s) \
s->head[s->hash_size-1] = NIL; \
memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
/* ========================================================================= */
int zlib_deflateInit2(
z_streamp strm,
int level,
int method,
int windowBits,
int memLevel,
int strategy
)
{
deflate_state *s;
int noheader = 0;
deflate_workspace *mem;
char *next;
ush *overlay;
/* We overlay pending_buf and d_buf+l_buf. This works since the average
* output size for (length,distance) codes is <= 24 bits.
*/
if (strm == NULL) return Z_STREAM_ERROR;
strm->msg = NULL;
if (level == Z_DEFAULT_COMPRESSION) level = 6;
mem = (deflate_workspace *) strm->workspace;
if (windowBits < 0) { /* undocumented feature: suppress zlib header */
noheader = 1;
windowBits = -windowBits;
}
if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
return Z_STREAM_ERROR;
}
/*
* Direct the workspace's pointers to the chunks that were allocated
* along with the deflate_workspace struct.
*/
next = (char *) mem;
next += sizeof(*mem);
#ifdef CONFIG_ZLIB_DFLTCC
/*
* DFLTCC requires the window to be page aligned.
* Thus, we overallocate and take the aligned portion of the buffer.
*/
mem->window_memory = (Byte *) PTR_ALIGN(next, PAGE_SIZE);
#else
mem->window_memory = (Byte *) next;
#endif
next += zlib_deflate_window_memsize(windowBits);
mem->prev_memory = (Pos *) next;
next += zlib_deflate_prev_memsize(windowBits);
mem->head_memory = (Pos *) next;
next += zlib_deflate_head_memsize(memLevel);
mem->overlay_memory = next;
s = (deflate_state *) &(mem->deflate_memory);
strm->state = (struct internal_state *)s;
s->strm = strm;
s->noheader = noheader;
s->w_bits = windowBits;
s->w_size = 1 << s->w_bits;
s->w_mask = s->w_size - 1;
s->hash_bits = memLevel + 7;
s->hash_size = 1 << s->hash_bits;
s->hash_mask = s->hash_size - 1;
s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
s->window = (Byte *) mem->window_memory;
s->prev = (Pos *) mem->prev_memory;
s->head = (Pos *) mem->head_memory;
s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
overlay = (ush *) mem->overlay_memory;
s->pending_buf = (uch *) overlay;
s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
s->level = level;
s->strategy = strategy;
s->method = (Byte)method;
return zlib_deflateReset(strm);
}
/* ========================================================================= */
int zlib_deflateReset(
z_streamp strm
)
{
deflate_state *s;
if (strm == NULL || strm->state == NULL)
return Z_STREAM_ERROR;
strm->total_in = strm->total_out = 0;
strm->msg = NULL;
strm->data_type = Z_UNKNOWN;
s = (deflate_state *)strm->state;
s->pending = 0;
s->pending_out = s->pending_buf;
if (s->noheader < 0) {
s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
}
s->status = s->noheader ? BUSY_STATE : INIT_STATE;
strm->adler = 1;
s->last_flush = Z_NO_FLUSH;
zlib_tr_init(s);
lm_init(s);
DEFLATE_RESET_HOOK(strm);
return Z_OK;
}
/* =========================================================================
* Put a short in the pending buffer. The 16-bit value is put in MSB order.
* IN assertion: the stream state is correct and there is enough room in
* pending_buf.
*/
static void putShortMSB(
deflate_state *s,
uInt b
)
{
put_byte(s, (Byte)(b >> 8));
put_byte(s, (Byte)(b & 0xff));
}
/* ========================================================================= */
int zlib_deflate(
z_streamp strm,
int flush
)
{
int old_flush; /* value of flush param for previous deflate call */
deflate_state *s;
if (strm == NULL || strm->state == NULL ||
flush > Z_FINISH || flush < 0) {
return Z_STREAM_ERROR;
}
s = (deflate_state *) strm->state;
if ((strm->next_in == NULL && strm->avail_in != 0) ||
(s->status == FINISH_STATE && flush != Z_FINISH)) {
return Z_STREAM_ERROR;
}
if (strm->avail_out == 0) return Z_BUF_ERROR;
s->strm = strm; /* just in case */
old_flush = s->last_flush;
s->last_flush = flush;
/* Write the zlib header */
if (s->status == INIT_STATE) {
uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
uInt level_flags = (s->level-1) >> 1;
if (level_flags > 3) level_flags = 3;
header |= (level_flags << 6);
if (s->strstart != 0) header |= PRESET_DICT;
header += 31 - (header % 31);
s->status = BUSY_STATE;
putShortMSB(s, header);
/* Save the adler32 of the preset dictionary: */
if (s->strstart != 0) {
putShortMSB(s, (uInt)(strm->adler >> 16));
putShortMSB(s, (uInt)(strm->adler & 0xffff));
}
strm->adler = 1L;
}
/* Flush as much pending output as possible */
if (s->pending != 0) {
flush_pending(strm);
if (strm->avail_out == 0) {
/* Since avail_out is 0, deflate will be called again with
* more output space, but possibly with both pending and
* avail_in equal to zero. There won't be anything to do,
* but this is not an error situation so make sure we
* return OK instead of BUF_ERROR at next call of deflate:
*/
s->last_flush = -1;
return Z_OK;
}
/* Make sure there is something to do and avoid duplicate consecutive
* flushes. For repeated and useless calls with Z_FINISH, we keep
* returning Z_STREAM_END instead of Z_BUFF_ERROR.
*/
} else if (strm->avail_in == 0 && flush <= old_flush &&
flush != Z_FINISH) {
return Z_BUF_ERROR;
}
/* User must not provide more input after the first FINISH: */
if (s->status == FINISH_STATE && strm->avail_in != 0) {
return Z_BUF_ERROR;
}
/* Start a new block or continue the current one.
*/
if (strm->avail_in != 0 || s->lookahead != 0 ||
(flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
block_state bstate;
bstate = DEFLATE_HOOK(strm, flush, &bstate) ? bstate :
(*(configuration_table[s->level].func))(s, flush);
if (bstate == finish_started || bstate == finish_done) {
s->status = FINISH_STATE;
}
if (bstate == need_more || bstate == finish_started) {
if (strm->avail_out == 0) {
s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
}
return Z_OK;
/* If flush != Z_NO_FLUSH && avail_out == 0, the next call
* of deflate should use the same flush parameter to make sure
* that the flush is complete. So we don't have to output an
* empty block here, this will be done at next call. This also
* ensures that for a very small output buffer, we emit at most
* one empty block.
*/
}
if (bstate == block_done) {
if (flush == Z_PARTIAL_FLUSH) {
zlib_tr_align(s);
} else if (flush == Z_PACKET_FLUSH) {
/* Output just the 3-bit `stored' block type value,
but not a zero length. */
zlib_tr_stored_type_only(s);
} else { /* FULL_FLUSH or SYNC_FLUSH */
zlib_tr_stored_block(s, (char*)0, 0L, 0);
/* For a full flush, this empty block will be recognized
* as a special marker by inflate_sync().
*/
if (flush == Z_FULL_FLUSH) {
CLEAR_HASH(s); /* forget history */
}
}
flush_pending(strm);
if (strm->avail_out == 0) {
s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
return Z_OK;
}
}
}
Assert(strm->avail_out > 0, "bug2");
if (flush != Z_FINISH) return Z_OK;
if (!s->noheader) {
/* Write zlib trailer (adler32) */
putShortMSB(s, (uInt)(strm->adler >> 16));
putShortMSB(s, (uInt)(strm->adler & 0xffff));
}
flush_pending(strm);
/* If avail_out is zero, the application will call deflate again
* to flush the rest.
*/
if (!s->noheader) {
s->noheader = -1; /* write the trailer only once! */
}
if (s->pending == 0) {
Assert(s->bi_valid == 0, "bi_buf not flushed");
return Z_STREAM_END;
}
return Z_OK;
}
/* ========================================================================= */
int zlib_deflateEnd(
z_streamp strm
)
{
int status;
deflate_state *s;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
s = (deflate_state *) strm->state;
status = s->status;
if (status != INIT_STATE && status != BUSY_STATE &&
status != FINISH_STATE) {
return Z_STREAM_ERROR;
}
strm->state = NULL;
return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
}
/* ===========================================================================
* Read a new buffer from the current input stream, update the adler32
* and total number of bytes read. All deflate() input goes through
* this function so some applications may wish to modify it to avoid
* allocating a large strm->next_in buffer and copying from it.
* (See also flush_pending()).
*/
static int read_buf(
z_streamp strm,
Byte *buf,
unsigned size
)
{
unsigned len = strm->avail_in;
if (len > size) len = size;
if (len == 0) return 0;
strm->avail_in -= len;
if (!DEFLATE_NEED_CHECKSUM(strm)) {}
else if (!((deflate_state *)(strm->state))->noheader) {
strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
}
memcpy(buf, strm->next_in, len);
strm->next_in += len;
strm->total_in += len;
return (int)len;
}
/* ===========================================================================
* Initialize the "longest match" routines for a new zlib stream
*/
static void lm_init(
deflate_state *s
)
{
s->window_size = (ulg)2L*s->w_size;
CLEAR_HASH(s);
/* Set the default configuration parameters:
*/
s->max_lazy_match = configuration_table[s->level].max_lazy;
s->good_match = configuration_table[s->level].good_length;
s->nice_match = configuration_table[s->level].nice_length;
s->max_chain_length = configuration_table[s->level].max_chain;
s->strstart = 0;
s->block_start = 0L;
s->lookahead = 0;
s->match_length = s->prev_length = MIN_MATCH-1;
s->match_available = 0;
s->ins_h = 0;
}
/* ===========================================================================
* Set match_start to the longest match starting at the given string and
* return its length. Matches shorter or equal to prev_length are discarded,
* in which case the result is equal to prev_length and match_start is
* garbage.
* IN assertions: cur_match is the head of the hash chain for the current
* string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
* OUT assertion: the match length is not greater than s->lookahead.
*/
/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
* match.S. The code will be functionally equivalent.
*/
static uInt longest_match(
deflate_state *s,
IPos cur_match /* current match */
)
{
unsigned chain_length = s->max_chain_length;/* max hash chain length */
register Byte *scan = s->window + s->strstart; /* current string */
register Byte *match; /* matched string */
register int len; /* length of current match */
int best_len = s->prev_length; /* best match length so far */
int nice_match = s->nice_match; /* stop if match long enough */
IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
s->strstart - (IPos)MAX_DIST(s) : NIL;
/* Stop when cur_match becomes <= limit. To simplify the code,
* we prevent matches with the string of window index 0.
*/
Pos *prev = s->prev;
uInt wmask = s->w_mask;
#ifdef UNALIGNED_OK
/* Compare two bytes at a time. Note: this is not always beneficial.
* Try with and without -DUNALIGNED_OK to check.
*/
register Byte *strend = s->window + s->strstart + MAX_MATCH - 1;
register ush scan_start = *(ush*)scan;
register ush scan_end = *(ush*)(scan+best_len-1);
#else
register Byte *strend = s->window + s->strstart + MAX_MATCH;
register Byte scan_end1 = scan[best_len-1];
register Byte scan_end = scan[best_len];
#endif
/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
* It is easy to get rid of this optimization if necessary.
*/
Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
/* Do not waste too much time if we already have a good match: */
if (s->prev_length >= s->good_match) {
chain_length >>= 2;
}
/* Do not look for matches beyond the end of the input. This is necessary
* to make deflate deterministic.
*/
if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
do {
Assert(cur_match < s->strstart, "no future");
match = s->window + cur_match;
/* Skip to next match if the match length cannot increase
* or if the match length is less than 2:
*/
#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
/* This code assumes sizeof(unsigned short) == 2. Do not use
* UNALIGNED_OK if your compiler uses a different size.
*/
if (*(ush*)(match+best_len-1) != scan_end ||
*(ush*)match != scan_start) continue;
/* It is not necessary to compare scan[2] and match[2] since they are
* always equal when the other bytes match, given that the hash keys
* are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
* strstart+3, +5, ... up to strstart+257. We check for insufficient
* lookahead only every 4th comparison; the 128th check will be made
* at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
* necessary to put more guard bytes at the end of the window, or
* to check more often for insufficient lookahead.
*/
Assert(scan[2] == match[2], "scan[2]?");
scan++, match++;
do {
} while (*(ush*)(scan+=2) == *(ush*)(match+=2) &&
*(ush*)(scan+=2) == *(ush*)(match+=2) &&
*(ush*)(scan+=2) == *(ush*)(match+=2) &&
*(ush*)(scan+=2) == *(ush*)(match+=2) &&
scan < strend);
/* The funny "do {}" generates better code on most compilers */
/* Here, scan <= window+strstart+257 */
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
if (*scan == *match) scan++;
len = (MAX_MATCH - 1) - (int)(strend-scan);
scan = strend - (MAX_MATCH-1);
#else /* UNALIGNED_OK */
if (match[best_len] != scan_end ||
match[best_len-1] != scan_end1 ||
*match != *scan ||
*++match != scan[1]) continue;
/* The check at best_len-1 can be removed because it will be made
* again later. (This heuristic is not always a win.)
* It is not necessary to compare scan[2] and match[2] since they
* are always equal when the other bytes match, given that
* the hash keys are equal and that HASH_BITS >= 8.
*/
scan += 2, match++;
Assert(*scan == *match, "match[2]?");
/* We check for insufficient lookahead only every 8th comparison;
* the 256th check will be made at strstart+258.
*/
do {
} while (*++scan == *++match && *++scan == *++match &&
*++scan == *++match && *++scan == *++match &&
*++scan == *++match && *++scan == *++match &&
*++scan == *++match && *++scan == *++match &&
scan < strend);
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
len = MAX_MATCH - (int)(strend - scan);
scan = strend - MAX_MATCH;
#endif /* UNALIGNED_OK */
if (len > best_len) {
s->match_start = cur_match;
best_len = len;
if (len >= nice_match) break;
#ifdef UNALIGNED_OK
scan_end = *(ush*)(scan+best_len-1);
#else
scan_end1 = scan[best_len-1];
scan_end = scan[best_len];
#endif
}
} while ((cur_match = prev[cur_match & wmask]) > limit
&& --chain_length != 0);
if ((uInt)best_len <= s->lookahead) return best_len;
return s->lookahead;
}
#ifdef DEBUG_ZLIB
/* ===========================================================================
* Check that the match at match_start is indeed a match.
*/
static void check_match(
deflate_state *s,
IPos start,
IPos match,
int length
)
{
/* check that the match is indeed a match */
if (memcmp((char *)s->window + match, (char *)s->window + start, length)) {
fprintf(stderr, " start %u, match %u, length %d\n",
start, match, length);
do {
fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
} while (--length != 0);
z_error("invalid match");
}
if (z_verbose > 1) {
fprintf(stderr,"\\[%d,%d]", start-match, length);
do { putc(s->window[start++], stderr); } while (--length != 0);
}
}
#else
# define check_match(s, start, match, length)
#endif
/* ===========================================================================
* Fill the window when the lookahead becomes insufficient.
* Updates strstart and lookahead.
*
* IN assertion: lookahead < MIN_LOOKAHEAD
* OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
* At least one byte has been read, or avail_in == 0; reads are
* performed for at least two bytes (required for the zip translate_eol
* option -- not supported here).
*/
static void fill_window(
deflate_state *s
)
{
register unsigned n, m;
register Pos *p;
unsigned more; /* Amount of free space at the end of the window. */
uInt wsize = s->w_size;
do {
more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
/* Deal with !@#$% 64K limit: */
if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
more = wsize;
} else if (more == (unsigned)(-1)) {
/* Very unlikely, but possible on 16 bit machine if strstart == 0
* and lookahead == 1 (input done one byte at time)
*/
more--;
/* If the window is almost full and there is insufficient lookahead,
* move the upper half to the lower one to make room in the upper half.
*/
} else if (s->strstart >= wsize+MAX_DIST(s)) {
memcpy((char *)s->window, (char *)s->window+wsize,
(unsigned)wsize);
s->match_start -= wsize;
s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
s->block_start -= (long) wsize;
/* Slide the hash table (could be avoided with 32 bit values
at the expense of memory usage). We slide even when level == 0
to keep the hash table consistent if we switch back to level > 0
later. (Using level 0 permanently is not an optimal usage of
zlib, so we don't care about this pathological case.)
*/
n = s->hash_size;
p = &s->head[n];
do {
m = *--p;
*p = (Pos)(m >= wsize ? m-wsize : NIL);
} while (--n);
n = wsize;
p = &s->prev[n];
do {
m = *--p;
*p = (Pos)(m >= wsize ? m-wsize : NIL);
/* If n is not on any hash chain, prev[n] is garbage but
* its value will never be used.
*/
} while (--n);
more += wsize;
}
if (s->strm->avail_in == 0) return;
/* If there was no sliding:
* strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
* more == window_size - lookahead - strstart
* => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
* => more >= window_size - 2*WSIZE + 2
* In the BIG_MEM or MMAP case (not yet supported),
* window_size == input_size + MIN_LOOKAHEAD &&
* strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
* Otherwise, window_size == 2*WSIZE so more >= 2.
* If there was sliding, more >= WSIZE. So in all cases, more >= 2.
*/
Assert(more >= 2, "more < 2");
n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
s->lookahead += n;
/* Initialize the hash value now that we have some input: */
if (s->lookahead >= MIN_MATCH) {
s->ins_h = s->window[s->strstart];
UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
#if MIN_MATCH != 3
Call UPDATE_HASH() MIN_MATCH-3 more times
#endif
}
/* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
* but this is not important since only literal bytes will be emitted.
*/
} while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
}
/* ===========================================================================
* Flush the current block, with given end-of-file flag.
* IN assertion: strstart is set to the end of the current match.
*/
#define FLUSH_BLOCK_ONLY(s, eof) { \
zlib_tr_flush_block(s, (s->block_start >= 0L ? \
(char *)&s->window[(unsigned)s->block_start] : \
NULL), \
(ulg)((long)s->strstart - s->block_start), \
(eof)); \
s->block_start = s->strstart; \
flush_pending(s->strm); \
Tracev((stderr,"[FLUSH]")); \
}
/* Same but force premature exit if necessary. */
#define FLUSH_BLOCK(s, eof) { \
FLUSH_BLOCK_ONLY(s, eof); \
if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
}
/* ===========================================================================
* Copy without compression as much as possible from the input stream, return
* the current block state.
* This function does not insert new strings in the dictionary since
* uncompressible data is probably not useful. This function is used
* only for the level=0 compression option.
* NOTE: this function should be optimized to avoid extra copying from
* window to pending_buf.
*/
static block_state deflate_stored(
deflate_state *s,
int flush
)
{
/* Stored blocks are limited to 0xffff bytes, pending_buf is limited
* to pending_buf_size, and each stored block has a 5 byte header:
*/
ulg max_block_size = 0xffff;
ulg max_start;
if (max_block_size > s->pending_buf_size - 5) {
max_block_size = s->pending_buf_size - 5;
}
/* Copy as much as possible from input to output: */
for (;;) {
/* Fill the window as much as possible: */
if (s->lookahead <= 1) {
Assert(s->strstart < s->w_size+MAX_DIST(s) ||
s->block_start >= (long)s->w_size, "slide too late");
fill_window(s);
if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
if (s->lookahead == 0) break; /* flush the current block */
}
Assert(s->block_start >= 0L, "block gone");
s->strstart += s->lookahead;
s->lookahead = 0;
/* Emit a stored block if pending_buf will be full: */
max_start = s->block_start + max_block_size;
if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
/* strstart == 0 is possible when wraparound on 16-bit machine */
s->lookahead = (uInt)(s->strstart - max_start);
s->strstart = (uInt)max_start;
FLUSH_BLOCK(s, 0);
}
/* Flush if we may have to slide, otherwise block_start may become
* negative and the data will be gone:
*/
if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
FLUSH_BLOCK(s, 0);
}
}
FLUSH_BLOCK(s, flush == Z_FINISH);
return flush == Z_FINISH ? finish_done : block_done;
}
/* ===========================================================================
* Compress as much as possible from the input stream, return the current
* block state.
* This function does not perform lazy evaluation of matches and inserts
* new strings in the dictionary only for unmatched strings or for short
* matches. It is used only for the fast compression options.
*/
static block_state deflate_fast(
deflate_state *s,
int flush
)
{
IPos hash_head = NIL; /* head of the hash chain */
int bflush; /* set if current block must be flushed */
for (;;) {
/* Make sure that we always have enough lookahead, except
* at the end of the input file. We need MAX_MATCH bytes
* for the next match, plus MIN_MATCH bytes to insert the
* string following the next match.
*/
if (s->lookahead < MIN_LOOKAHEAD) {
fill_window(s);
if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
return need_more;
}
if (s->lookahead == 0) break; /* flush the current block */
}
/* Insert the string window[strstart .. strstart+2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
if (s->lookahead >= MIN_MATCH) {
INSERT_STRING(s, s->strstart, hash_head);
}
/* Find the longest match, discarding those <= prev_length.
* At this point we have always match_length < MIN_MATCH
*/
if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
/* To simplify the code, we prevent matches with the string
* of window index 0 (in particular we have to avoid a match
* of the string with itself at the start of the input file).
*/
if (s->strategy != Z_HUFFMAN_ONLY) {
s->match_length = longest_match (s, hash_head);
}
/* longest_match() sets match_start */
}
if (s->match_length >= MIN_MATCH) {
check_match(s, s->strstart, s->match_start, s->match_length);
bflush = zlib_tr_tally(s, s->strstart - s->match_start,
s->match_length - MIN_MATCH);
s->lookahead -= s->match_length;
/* Insert new strings in the hash table only if the match length
* is not too large. This saves time but degrades compression.
*/
if (s->match_length <= s->max_insert_length &&
s->lookahead >= MIN_MATCH) {
s->match_length--; /* string at strstart already in hash table */
do {
s->strstart++;
INSERT_STRING(s, s->strstart, hash_head);
/* strstart never exceeds WSIZE-MAX_MATCH, so there are
* always MIN_MATCH bytes ahead.
*/
} while (--s->match_length != 0);
s->strstart++;
} else {
s->strstart += s->match_length;
s->match_length = 0;
s->ins_h = s->window[s->strstart];
UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
#if MIN_MATCH != 3
Call UPDATE_HASH() MIN_MATCH-3 more times
#endif
/* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
* matter since it will be recomputed at next deflate call.
*/
}
} else {
/* No match, output a literal byte */
Tracevv((stderr,"%c", s->window[s->strstart]));
bflush = zlib_tr_tally (s, 0, s->window[s->strstart]);
s->lookahead--;
s->strstart++;
}
if (bflush) FLUSH_BLOCK(s, 0);
}
FLUSH_BLOCK(s, flush == Z_FINISH);
return flush == Z_FINISH ? finish_done : block_done;
}
/* ===========================================================================
* Same as above, but achieves better compression. We use a lazy
* evaluation for matches: a match is finally adopted only if there is
* no better match at the next window position.
*/
static block_state deflate_slow(
deflate_state *s,
int flush
)
{
IPos hash_head = NIL; /* head of hash chain */
int bflush; /* set if current block must be flushed */
/* Process the input block. */
for (;;) {
/* Make sure that we always have enough lookahead, except
* at the end of the input file. We need MAX_MATCH bytes
* for the next match, plus MIN_MATCH bytes to insert the
* string following the next match.
*/
if (s->lookahead < MIN_LOOKAHEAD) {
fill_window(s);
if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
return need_more;
}
if (s->lookahead == 0) break; /* flush the current block */
}
/* Insert the string window[strstart .. strstart+2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
if (s->lookahead >= MIN_MATCH) {
INSERT_STRING(s, s->strstart, hash_head);
}
/* Find the longest match, discarding those <= prev_length.
*/
s->prev_length = s->match_length, s->prev_match = s->match_start;
s->match_length = MIN_MATCH-1;
if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
s->strstart - hash_head <= MAX_DIST(s)) {
/* To simplify the code, we prevent matches with the string
* of window index 0 (in particular we have to avoid a match
* of the string with itself at the start of the input file).
*/
if (s->strategy != Z_HUFFMAN_ONLY) {
s->match_length = longest_match (s, hash_head);
}
/* longest_match() sets match_start */
if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
(s->match_length == MIN_MATCH &&
s->strstart - s->match_start > TOO_FAR))) {
/* If prev_match is also MIN_MATCH, match_start is garbage
* but we will ignore the current match anyway.
*/
s->match_length = MIN_MATCH-1;
}
}
/* If there was a match at the previous step and the current
* match is not better, output the previous match:
*/
if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
/* Do not insert strings in hash table beyond this. */
check_match(s, s->strstart-1, s->prev_match, s->prev_length);
bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match,
s->prev_length - MIN_MATCH);
/* Insert in hash table all strings up to the end of the match.
* strstart-1 and strstart are already inserted. If there is not
* enough lookahead, the last two strings are not inserted in
* the hash table.
*/
s->lookahead -= s->prev_length-1;
s->prev_length -= 2;
do {
if (++s->strstart <= max_insert) {
INSERT_STRING(s, s->strstart, hash_head);
}
} while (--s->prev_length != 0);
s->match_available = 0;
s->match_length = MIN_MATCH-1;
s->strstart++;
if (bflush) FLUSH_BLOCK(s, 0);
} else if (s->match_available) {
/* If there was no match at the previous position, output a
* single literal. If there was a match but the current match
* is longer, truncate the previous match to a single literal.
*/
Tracevv((stderr,"%c", s->window[s->strstart-1]));
if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) {
FLUSH_BLOCK_ONLY(s, 0);
}
s->strstart++;
s->lookahead--;
if (s->strm->avail_out == 0) return need_more;
} else {
/* There is no previous match to compare with, wait for
* the next step to decide.
*/
s->match_available = 1;
s->strstart++;
s->lookahead--;
}
}
Assert (flush != Z_NO_FLUSH, "no flush?");
if (s->match_available) {
Tracevv((stderr,"%c", s->window[s->strstart-1]));
zlib_tr_tally (s, 0, s->window[s->strstart-1]);
s->match_available = 0;
}
FLUSH_BLOCK(s, flush == Z_FINISH);
return flush == Z_FINISH ? finish_done : block_done;
}
int zlib_deflate_workspacesize(int windowBits, int memLevel)
{
if (windowBits < 0) /* undocumented feature: suppress zlib header */
windowBits = -windowBits;
/* Since the return value is typically passed to vmalloc() unchecked... */
BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 ||
windowBits > 15);
return sizeof(deflate_workspace)
+ zlib_deflate_window_memsize(windowBits)
+ zlib_deflate_prev_memsize(windowBits)
+ zlib_deflate_head_memsize(memLevel)
+ zlib_deflate_overlay_memsize(memLevel);
}
int zlib_deflate_dfltcc_enabled(void)
{
return DEFLATE_DFLTCC_ENABLED();
}
// SPDX-License-Identifier: GPL-2.0
/*
* NETLINK Netlink attributes
*
* Authors: Thomas Graf <tgraf@suug.ch>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/nospec.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <net/netlink.h>
/* For these data types, attribute length should be exactly the given
* size. However, to maintain compatibility with broken commands, if the
* attribute length does not match the expected size a warning is emitted
* to the user that the command is sending invalid data and needs to be fixed.
*/
static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
[NLA_U8] = sizeof(u8),
[NLA_U16] = sizeof(u16),
[NLA_U32] = sizeof(u32),
[NLA_U64] = sizeof(u64),
[NLA_S8] = sizeof(s8),
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
[NLA_BE16] = sizeof(__be16),
[NLA_BE32] = sizeof(__be32),
};
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_U8] = sizeof(u8),
[NLA_U16] = sizeof(u16),
[NLA_U32] = sizeof(u32),
[NLA_U64] = sizeof(u64),
[NLA_MSECS] = sizeof(u64),
[NLA_NESTED] = NLA_HDRLEN,
[NLA_S8] = sizeof(s8),
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
[NLA_BE16] = sizeof(__be16),
[NLA_BE32] = sizeof(__be32),
};
/*
* Nested policies might refer back to the original
* policy in some cases, and userspace could try to
* abuse that and recurse by nesting in the right
* ways. Limit recursion to avoid this problem.
*/
#define MAX_POLICY_RECURSION_DEPTH 10
static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack,
struct nlattr **tb, unsigned int depth);
static int validate_nla_bitfield32(const struct nlattr *nla,
const u32 valid_flags_mask)
{
const struct nla_bitfield32 *bf = nla_data(nla);
if (!valid_flags_mask)
return -EINVAL;
/*disallow invalid bit selector */
if (bf->selector & ~valid_flags_mask)
return -EINVAL;
/*disallow invalid bit values */
if (bf->value & ~valid_flags_mask)
return -EINVAL;
/*disallow valid bit values that are not selected*/
if (bf->value & ~bf->selector)
return -EINVAL;
return 0;
}
static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack,
unsigned int validate, unsigned int depth)
{
const struct nlattr *entry;
int rem;
nla_for_each_attr(entry, head, len, rem) {
int ret;
if (nla_len(entry) == 0) continue; if (nla_len(entry) < NLA_HDRLEN) { NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy,
"Array element too short");
return -ERANGE;
}
ret = __nla_validate_parse(nla_data(entry), nla_len(entry),
maxtype, policy, validate, extack,
NULL, depth + 1);
if (ret < 0)
return ret;
}
return 0;
}
void nla_get_range_unsigned(const struct nla_policy *pt,
struct netlink_range_validation *range)
{
WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR &&
(pt->min < 0 || pt->max < 0));
range->min = 0; switch (pt->type) {
case NLA_U8:
range->max = U8_MAX;
break;
case NLA_U16:
case NLA_BE16:
case NLA_BINARY:
range->max = U16_MAX;
break;
case NLA_U32:
case NLA_BE32:
range->max = U32_MAX;
break;
case NLA_U64:
case NLA_UINT:
case NLA_MSECS:
range->max = U64_MAX;
break;
default:
WARN_ON_ONCE(1);
return;
}
switch (pt->validation_type) {
case NLA_VALIDATE_RANGE:
case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
range->min = pt->min;
range->max = pt->max;
break;
case NLA_VALIDATE_RANGE_PTR:
*range = *pt->range;
break;
case NLA_VALIDATE_MIN:
range->min = pt->min;
break;
case NLA_VALIDATE_MAX:
range->max = pt->max;
break;
default:
break;
}
}
static int nla_validate_range_unsigned(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack,
unsigned int validate)
{
struct netlink_range_validation range;
u64 value;
switch (pt->type) {
case NLA_U8:
value = nla_get_u8(nla);
break;
case NLA_U16:
value = nla_get_u16(nla);
break;
case NLA_U32:
value = nla_get_u32(nla);
break;
case NLA_U64:
value = nla_get_u64(nla);
break;
case NLA_UINT:
value = nla_get_uint(nla); break;
case NLA_MSECS:
value = nla_get_u64(nla);
break;
case NLA_BINARY:
value = nla_len(nla);
break;
case NLA_BE16:
value = ntohs(nla_get_be16(nla));
break;
case NLA_BE32:
value = ntohl(nla_get_be32(nla));
break;
default:
return -EINVAL;
}
nla_get_range_unsigned(pt, &range); if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG && pt->type == NLA_BINARY && value > range.max) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, pt->type);
if (validate & NL_VALIDATE_STRICT_ATTRS) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"invalid attribute length");
return -EINVAL;
}
/* this assumes min <= max (don't validate against min) */
return 0;
}
if (value < range.min || value > range.max) { bool binary = pt->type == NLA_BINARY; if (binary) NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"binary attribute size out of range");
else
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"integer out of range");
return -ERANGE;
}
return 0;
}
void nla_get_range_signed(const struct nla_policy *pt,
struct netlink_range_validation_signed *range)
{
switch (pt->type) {
case NLA_S8:
range->min = S8_MIN;
range->max = S8_MAX;
break;
case NLA_S16:
range->min = S16_MIN;
range->max = S16_MAX;
break;
case NLA_S32:
range->min = S32_MIN;
range->max = S32_MAX;
break;
case NLA_S64:
case NLA_SINT:
range->min = S64_MIN;
range->max = S64_MAX;
break;
default:
WARN_ON_ONCE(1);
return;
}
switch (pt->validation_type) {
case NLA_VALIDATE_RANGE:
range->min = pt->min;
range->max = pt->max;
break;
case NLA_VALIDATE_RANGE_PTR:
*range = *pt->range_signed;
break;
case NLA_VALIDATE_MIN:
range->min = pt->min;
break;
case NLA_VALIDATE_MAX:
range->max = pt->max;
break;
default:
break;
}
}
static int nla_validate_int_range_signed(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct netlink_range_validation_signed range;
s64 value;
switch (pt->type) {
case NLA_S8:
value = nla_get_s8(nla);
break;
case NLA_S16:
value = nla_get_s16(nla);
break;
case NLA_S32:
value = nla_get_s32(nla);
break;
case NLA_S64:
value = nla_get_s64(nla);
break;
case NLA_SINT:
value = nla_get_sint(nla); break;
default:
return -EINVAL;
}
nla_get_range_signed(pt, &range); if (value < range.min || value > range.max) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"integer out of range");
return -ERANGE;
}
return 0;
}
static int nla_validate_int_range(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack,
unsigned int validate)
{
switch (pt->type) {
case NLA_U8:
case NLA_U16:
case NLA_U32:
case NLA_U64:
case NLA_UINT:
case NLA_MSECS:
case NLA_BINARY:
case NLA_BE16:
case NLA_BE32:
return nla_validate_range_unsigned(pt, nla, extack, validate);
case NLA_S8:
case NLA_S16:
case NLA_S32:
case NLA_S64:
case NLA_SINT:
return nla_validate_int_range_signed(pt, nla, extack);
default:
WARN_ON(1);
return -EINVAL;
}
}
static int nla_validate_mask(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
u64 value;
switch (pt->type) {
case NLA_U8:
value = nla_get_u8(nla);
break;
case NLA_U16:
value = nla_get_u16(nla);
break;
case NLA_U32:
value = nla_get_u32(nla);
break;
case NLA_U64:
value = nla_get_u64(nla);
break;
case NLA_UINT:
value = nla_get_uint(nla); break;
case NLA_BE16:
value = ntohs(nla_get_be16(nla));
break;
case NLA_BE32:
value = ntohl(nla_get_be32(nla));
break;
default:
return -EINVAL;
}
if (value & ~(u64)pt->mask) { NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set");
return -EINVAL;
}
return 0;
}
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack, unsigned int depth)
{
u16 strict_start_type = policy[0].strict_start_type;
const struct nla_policy *pt;
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
int err = -ERANGE;
if (strict_start_type && type >= strict_start_type)
validate |= NL_VALIDATE_STRICT;
if (type <= 0 || type > maxtype)
return 0;
type = array_index_nospec(type, maxtype + 1);
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
if (validate & NL_VALIDATE_STRICT_ATTRS) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"invalid attribute length");
return -EINVAL;
}
}
if (validate & NL_VALIDATE_NESTED) { if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
!(nla->nla_type & NLA_F_NESTED)) {
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"NLA_F_NESTED is missing");
return -EINVAL;
}
if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY && pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"NLA_F_NESTED not expected");
return -EINVAL;
}
}
switch (pt->type) {
case NLA_REJECT:
if (extack && pt->reject_message) {
NL_SET_BAD_ATTR(extack, nla);
extack->_msg = pt->reject_message;
return -EINVAL;
}
err = -EINVAL;
goto out_err;
case NLA_FLAG:
if (attrlen > 0) goto out_err;
break;
case NLA_SINT:
case NLA_UINT:
if (attrlen != sizeof(u32) && attrlen != sizeof(u64)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"invalid attribute length");
return -EINVAL;
}
break;
case NLA_BITFIELD32:
if (attrlen != sizeof(struct nla_bitfield32))
goto out_err;
err = validate_nla_bitfield32(nla, pt->bitfield32_valid); if (err) goto out_err;
break;
case NLA_NUL_STRING:
if (pt->len)
minlen = min_t(int, attrlen, pt->len + 1);
else
minlen = attrlen;
if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
err = -EINVAL;
goto out_err;
}
fallthrough;
case NLA_STRING:
if (attrlen < 1) goto out_err; if (pt->len) {
char *buf = nla_data(nla);
if (buf[attrlen - 1] == '\0')
attrlen--;
if (attrlen > pt->len) goto out_err;
}
break;
case NLA_BINARY:
if (pt->len && attrlen > pt->len) goto out_err;
break;
case NLA_NESTED:
/* a nested attributes is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
if (attrlen < NLA_HDRLEN) goto out_err; if (pt->nested_policy) { err = __nla_validate_parse(nla_data(nla), nla_len(nla),
pt->len, pt->nested_policy,
validate, extack, NULL,
depth + 1);
if (err < 0) {
/*
* return directly to preserve the inner
* error message/attribute pointer
*/
return err;
}
}
break;
case NLA_NESTED_ARRAY:
/* a nested array attribute is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
if (attrlen < NLA_HDRLEN) goto out_err; if (pt->nested_policy) {
int err;
err = nla_validate_array(nla_data(nla), nla_len(nla),
pt->len, pt->nested_policy,
extack, validate, depth);
if (err < 0) {
/*
* return directly to preserve the inner
* error message/attribute pointer
*/
return err;
}
}
break;
case NLA_UNSPEC:
if (validate & NL_VALIDATE_UNSPEC) { NL_SET_ERR_MSG_ATTR(extack, nla,
"Unsupported attribute");
return -EINVAL;
}
if (attrlen < pt->len)
goto out_err;
break;
default:
if (pt->len)
minlen = pt->len;
else
minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen)
goto out_err;
}
/* further validation */
switch (pt->validation_type) {
case NLA_VALIDATE_NONE:
/* nothing to do */
break;
case NLA_VALIDATE_RANGE_PTR:
case NLA_VALIDATE_RANGE:
case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
case NLA_VALIDATE_MIN:
case NLA_VALIDATE_MAX:
err = nla_validate_int_range(pt, nla, extack, validate); if (err)
return err;
break;
case NLA_VALIDATE_MASK:
err = nla_validate_mask(pt, nla, extack);
if (err)
return err;
break;
case NLA_VALIDATE_FUNCTION:
if (pt->validate) {
err = pt->validate(nla, extack);
if (err)
return err;
}
break;
}
return 0;
out_err:
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"Attribute failed policy validation");
return err;
}
static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack,
struct nlattr **tb, unsigned int depth)
{
const struct nlattr *nla;
int rem;
if (depth >= MAX_POLICY_RECURSION_DEPTH) { NL_SET_ERR_MSG(extack,
"allowed policy recursion depth exceeded");
return -EINVAL;
}
if (tb) memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); if (type == 0 || type > maxtype) { if (validate & NL_VALIDATE_MAXTYPE) { NL_SET_ERR_MSG_ATTR(extack, nla,
"Unknown attribute type");
return -EINVAL;
}
continue;
}
type = array_index_nospec(type, maxtype + 1);
if (policy) { int err = validate_nla(nla, maxtype, policy,
validate, extack, depth);
if (err < 0) return err;
}
if (tb)
tb[type] = (struct nlattr *)nla;
}
if (unlikely(rem > 0)) { pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
rem, current->comm);
NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes"); if (validate & NL_VALIDATE_TRAILING)
return -EINVAL;
}
return 0;}
/**
* __nla_validate - Validate a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation depends on the validate flags passed, see
* &enum netlink_validation for more details on that.
* See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
int __nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
extack, NULL, 0);
}
EXPORT_SYMBOL(__nla_validate);
/**
* nla_policy_len - Determine the max. length of a policy
* @p: policy to use
* @n: number of policies
*
* Determines the max. length of the policy. It is currently used
* to allocated Netlink buffers roughly the size of the actual
* message.
*
* Returns 0 on success or a negative error code.
*/
int
nla_policy_len(const struct nla_policy *p, int n)
{
int i, len = 0;
for (i = 0; i < n; i++, p++) {
if (p->len)
len += nla_total_size(p->len);
else if (nla_attr_len[p->type])
len += nla_total_size(nla_attr_len[p->type]);
else if (nla_attr_minlen[p->type])
len += nla_total_size(nla_attr_minlen[p->type]);
}
return len;
}
EXPORT_SYMBOL(nla_policy_len);
/**
* __nla_parse - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
* @validate: validation strictness
* @extack: extended ACK pointer
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessible via the attribute type.
* Validation is controlled by the @validate parameter.
*
* Returns 0 on success or a negative error code.
*/
int __nla_parse(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
extack, tb, 0);
}
EXPORT_SYMBOL(__nla_parse);
/**
* nla_find - Find a specific attribute in a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @attrtype: type of attribute to look for
*
* Returns the first attribute in the stream matching the specified type.
*/
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
{
const struct nlattr *nla;
int rem;
nla_for_each_attr(nla, head, len, rem)
if (nla_type(nla) == attrtype)
return (struct nlattr *)nla;
return NULL;
}
EXPORT_SYMBOL(nla_find);
/**
* nla_strscpy - Copy string attribute payload into a sized buffer
* @dst: Where to copy the string to.
* @nla: Attribute to copy the string from.
* @dstsize: Size of destination buffer.
*
* Copies at most dstsize - 1 bytes into the destination buffer.
* Unlike strscpy() the destination buffer is always padded out.
*
* Return:
* * srclen - Returns @nla length (not including the trailing %NUL).
* * -E2BIG - If @dstsize is 0 or greater than U16_MAX or @nla length greater
* than @dstsize.
*/
ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla);
ssize_t ret;
size_t len;
if (dstsize == 0 || WARN_ON_ONCE(dstsize > U16_MAX))
return -E2BIG;
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
if (srclen >= dstsize) {
len = dstsize - 1;
ret = -E2BIG;
} else {
len = srclen;
ret = len;
}
memcpy(dst, src, len);
/* Zero pad end of dst. */
memset(dst + len, 0, dstsize - len);
return ret;
}
EXPORT_SYMBOL(nla_strscpy);
/**
* nla_strdup - Copy string attribute payload into a newly allocated buffer
* @nla: attribute to copy the string from
* @flags: the type of memory to allocate (see kmalloc).
*
* Returns a pointer to the allocated buffer or NULL on error.
*/
char *nla_strdup(const struct nlattr *nla, gfp_t flags)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla), *dst;
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
dst = kmalloc(srclen + 1, flags);
if (dst != NULL) {
memcpy(dst, src, srclen);
dst[srclen] = '\0';
}
return dst;
}
EXPORT_SYMBOL(nla_strdup);
/**
* nla_memcpy - Copy a netlink attribute into another memory area
* @dest: where to copy to memcpy
* @src: netlink attribute to copy from
* @count: size of the destination area
*
* Note: The number of bytes copied is limited by the length of
* attribute's payload. memcpy
*
* Returns the number of bytes copied.
*/
int nla_memcpy(void *dest, const struct nlattr *src, int count)
{
int minlen = min_t(int, count, nla_len(src));
memcpy(dest, nla_data(src), minlen);
if (count > minlen)
memset(dest + minlen, 0, count - minlen);
return minlen;
}
EXPORT_SYMBOL(nla_memcpy);
/**
* nla_memcmp - Compare an attribute with sized memory area
* @nla: netlink attribute
* @data: memory area
* @size: size of memory area
*/
int nla_memcmp(const struct nlattr *nla, const void *data,
size_t size)
{
int d = nla_len(nla) - size;
if (d == 0)
d = memcmp(nla_data(nla), data, size);
return d;
}
EXPORT_SYMBOL(nla_memcmp);
/**
* nla_strcmp - Compare a string attribute against a string
* @nla: netlink string attribute
* @str: another string
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
int len = strlen(str);
char *buf = nla_data(nla);
int attrlen = nla_len(nla);
int d;
while (attrlen > 0 && buf[attrlen - 1] == '\0')
attrlen--;
d = attrlen - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
return d;
}
EXPORT_SYMBOL(nla_strcmp);
#ifdef CONFIG_NET
/**
* __nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct nlattr *nla;
nla = skb_put(skb, nla_total_size(attrlen));
nla->nla_type = attrtype;
nla->nla_len = nla_attr_size(attrlen);
memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
return nla;
}
EXPORT_SYMBOL(__nla_reserve);
/**
* __nla_reserve_64bit - reserve room for attribute on the skb and align it
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @padattr: attribute type for the padding
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it. It also ensure that this
* attribute will have a 64-bit aligned nla_data() area.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr)
{
nla_align_64bit(skb, padattr);
return __nla_reserve(skb, attrtype, attrlen);
}
EXPORT_SYMBOL(__nla_reserve_64bit);
/**
* __nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the payload.
*/
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
return skb_put_zero(skb, NLA_ALIGN(attrlen));
}
EXPORT_SYMBOL(__nla_reserve_nohdr);
/**
* nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return NULL;
return __nla_reserve(skb, attrtype, attrlen);}
EXPORT_SYMBOL(nla_reserve);
/**
* nla_reserve_64bit - reserve room for attribute on the skb and align it
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @padattr: attribute type for the padding
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it. It also ensure that this
* attribute will have a 64-bit aligned nla_data() area.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
int padattr)
{
size_t len;
if (nla_need_padding_for_64bit(skb))
len = nla_total_size_64bit(attrlen);
else
len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len))
return NULL;
return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);}
EXPORT_SYMBOL(nla_reserve_64bit);
/**
* nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return NULL;
return __nla_reserve_nohdr(skb, attrlen);
}
EXPORT_SYMBOL(nla_reserve_nohdr);
/**
* __nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data)
{
struct nlattr *nla;
nla = __nla_reserve(skb, attrtype, attrlen);
memcpy(nla_data(nla), data, attrlen);
}
EXPORT_SYMBOL(__nla_put);
/**
* __nla_put_64bit - Add a netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
* @padattr: attribute type for the padding
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr)
{
struct nlattr *nla;
nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
memcpy(nla_data(nla), data, attrlen);
}
EXPORT_SYMBOL(__nla_put_64bit);
/**
* __nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute payload.
*/
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
void *start;
start = __nla_reserve_nohdr(skb, attrlen);
memcpy(start, data, attrlen);
}
EXPORT_SYMBOL(__nla_put_nohdr);
/**
* nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return -EMSGSIZE;
__nla_put(skb, attrtype, attrlen, data);
return 0;}
EXPORT_SYMBOL(nla_put);
/**
* nla_put_64bit - Add a netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
* @padattr: attribute type for the padding
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr)
{
size_t len;
if (nla_need_padding_for_64bit(skb))
len = nla_total_size_64bit(attrlen);
else
len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len))
return -EMSGSIZE;
__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
return 0;}
EXPORT_SYMBOL(nla_put_64bit);
/**
* nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
__nla_put_nohdr(skb, attrlen, data);
return 0;
}
EXPORT_SYMBOL(nla_put_nohdr);
/**
* nla_append - Add a netlink attribute without header or padding
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_append(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
skb_put_data(skb, data, attrlen);
return 0;
}
EXPORT_SYMBOL(nla_append);
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* x86 APERF/MPERF KHz calculation for
* /sys/.../cpufreq/scaling_cur_freq
*
* Copyright (C) 2017 Intel Corp.
* Author: Len Brown <len.brown@intel.com>
*/
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/math64.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/sched/isolation.h>
#include <linux/sched/topology.h>
#include <linux/smp.h>
#include <linux/syscore_ops.h>
#include <asm/cpu.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
#include "cpu.h"
struct aperfmperf {
seqcount_t seq;
unsigned long last_update;
u64 acnt;
u64 mcnt;
u64 aperf;
u64 mperf;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct aperfmperf, cpu_samples) = {
.seq = SEQCNT_ZERO(cpu_samples.seq)
};
static void init_counter_refs(void)
{
u64 aperf, mperf;
rdmsrq(MSR_IA32_APERF, aperf);
rdmsrq(MSR_IA32_MPERF, mperf);
this_cpu_write(cpu_samples.aperf, aperf);
this_cpu_write(cpu_samples.mperf, mperf);
}
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
/*
* APERF/MPERF frequency ratio computation.
*
* The scheduler wants to do frequency invariant accounting and needs a <1
* ratio to account for the 'current' frequency, corresponding to
* freq_curr / freq_max.
*
* Since the frequency freq_curr on x86 is controlled by micro-controller and
* our P-state setting is little more than a request/hint, we need to observe
* the effective frequency 'BusyMHz', i.e. the average frequency over a time
* interval after discarding idle time. This is given by:
*
* BusyMHz = delta_APERF / delta_MPERF * freq_base
*
* where freq_base is the max non-turbo P-state.
*
* The freq_max term has to be set to a somewhat arbitrary value, because we
* can't know which turbo states will be available at a given point in time:
* it all depends on the thermal headroom of the entire package. We set it to
* the turbo level with 4 cores active.
*
* Benchmarks show that's a good compromise between the 1C turbo ratio
* (freq_curr/freq_max would rarely reach 1) and something close to freq_base,
* which would ignore the entire turbo range (a conspicuous part, making
* freq_curr/freq_max always maxed out).
*
* An exception to the heuristic above is the Atom uarch, where we choose the
* highest turbo level for freq_max since Atom's are generally oriented towards
* power efficiency.
*
* Setting freq_max to anything less than the 1C turbo ratio makes the ratio
* freq_curr / freq_max to eventually grow >1, in which case we clip it to 1.
*/
DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key);
static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;
static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;
void arch_set_max_freq_ratio(bool turbo_disabled)
{
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
arch_turbo_freq_ratio;
}
EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
static bool __init turbo_disabled(void)
{
u64 misc_en;
int err;
err = rdmsrq_safe(MSR_IA32_MISC_ENABLE, &misc_en);
if (err)
return false;
return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
{
int err;
err = rdmsrq_safe(MSR_ATOM_CORE_RATIOS, base_freq);
if (err)
return false;
err = rdmsrq_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
if (err)
return false;
*base_freq = (*base_freq >> 16) & 0x3F; /* max P state */
*turbo_freq = *turbo_freq & 0x3F; /* 1C turbo */
return true;
}
#define X86_MATCH(vfm) \
X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, NULL)
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = {
X86_MATCH(INTEL_XEON_PHI_KNL),
X86_MATCH(INTEL_XEON_PHI_KNM),
{}
};
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = {
X86_MATCH(INTEL_SKYLAKE_X),
{}
};
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = {
X86_MATCH(INTEL_ATOM_GOLDMONT),
X86_MATCH(INTEL_ATOM_GOLDMONT_D),
X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS),
{}
};
static bool __init knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
int num_delta_fratio)
{
int fratio, delta_fratio, found;
int err, i;
u64 msr;
err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr);
if (err)
return false;
fratio = (msr >> 8) & 0xFF;
i = 16;
found = 0;
do {
if (found >= num_delta_fratio) {
*turbo_freq = fratio;
return true;
}
delta_fratio = (msr >> (i + 5)) & 0x7;
if (delta_fratio) {
found += 1;
fratio -= delta_fratio;
}
i += 8;
} while (i < 64);
return true;
}
static bool __init skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
{
u64 ratios, counts;
u32 group_size;
int err, i;
err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
if (err)
return false;
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
if (err)
return false;
for (i = 0; i < 64; i += 8) {
group_size = (counts >> i) & 0xFF;
if (group_size >= size) {
*turbo_freq = (ratios >> i) & 0xFF;
return true;
}
}
return false;
}
static bool __init core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
{
u64 msr;
int err;
err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq);
if (err)
return false;
err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr);
if (err)
return false;
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
*turbo_freq = (msr >> 24) & 0xFF; /* 4C turbo */
/* The CPU may have less than 4 cores */
if (!*turbo_freq)
*turbo_freq = msr & 0xFF; /* 1C turbo */
return true;
}
static bool __init intel_set_max_freq_ratio(void)
{
u64 base_freq, turbo_freq;
u64 turbo_ratio;
if (slv_set_max_freq_ratio(&base_freq, &turbo_freq))
goto out;
if (x86_match_cpu(has_glm_turbo_ratio_limits) &&
skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
goto out;
if (x86_match_cpu(has_knl_turbo_ratio_limits) &&
knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
goto out;
if (x86_match_cpu(has_skx_turbo_ratio_limits) &&
skx_set_max_freq_ratio(&base_freq, &turbo_freq, 4))
goto out;
if (core_set_max_freq_ratio(&base_freq, &turbo_freq))
goto out;
return false;
out:
/*
* Some hypervisors advertise X86_FEATURE_APERFMPERF
* but then fill all MSR's with zeroes.
* Some CPUs have turbo boost but don't declare any turbo ratio
* in MSR_TURBO_RATIO_LIMIT.
*/
if (!base_freq || !turbo_freq) {
pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n");
return false;
}
turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq);
if (!turbo_ratio) {
pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n");
return false;
}
arch_turbo_freq_ratio = turbo_ratio;
arch_set_max_freq_ratio(turbo_disabled());
return true;
}
#ifdef CONFIG_PM_SLEEP
static struct syscore_ops freq_invariance_syscore_ops = {
.resume = init_counter_refs,
};
static void register_freq_invariance_syscore_ops(void)
{
register_syscore_ops(&freq_invariance_syscore_ops);
}
#else
static inline void register_freq_invariance_syscore_ops(void) {}
#endif
static void freq_invariance_enable(void)
{
if (static_branch_unlikely(&arch_scale_freq_key)) {
WARN_ON_ONCE(1);
return;
}
static_branch_enable_cpuslocked(&arch_scale_freq_key);
register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
}
void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled)
{
arch_turbo_freq_ratio = ratio;
arch_set_max_freq_ratio(turbo_disabled);
freq_invariance_enable();
}
static void __init bp_init_freq_invariance(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
if (intel_set_max_freq_ratio()) {
guard(cpus_read_lock)();
freq_invariance_enable();
}
}
static void disable_freq_invariance_workfn(struct work_struct *work)
{
int cpu;
static_branch_disable(&arch_scale_freq_key);
/*
* Set arch_freq_scale to a default value on all cpus
* This negates the effect of scaling
*/
for_each_possible_cpu(cpu)
per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
}
static DECLARE_WORK(disable_freq_invariance_work,
disable_freq_invariance_workfn);
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
static DEFINE_STATIC_KEY_FALSE(arch_hybrid_cap_scale_key);
struct arch_hybrid_cpu_scale {
unsigned long capacity;
unsigned long freq_ratio;
};
static struct arch_hybrid_cpu_scale __percpu *arch_cpu_scale;
/**
* arch_enable_hybrid_capacity_scale() - Enable hybrid CPU capacity scaling
*
* Allocate memory for per-CPU data used by hybrid CPU capacity scaling,
* initialize it and set the static key controlling its code paths.
*
* Must be called before arch_set_cpu_capacity().
*/
bool arch_enable_hybrid_capacity_scale(void)
{
int cpu;
if (static_branch_unlikely(&arch_hybrid_cap_scale_key)) {
WARN_ONCE(1, "Hybrid CPU capacity scaling already enabled");
return true;
}
arch_cpu_scale = alloc_percpu(struct arch_hybrid_cpu_scale);
if (!arch_cpu_scale)
return false;
for_each_possible_cpu(cpu) {
per_cpu_ptr(arch_cpu_scale, cpu)->capacity = SCHED_CAPACITY_SCALE;
per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio = arch_max_freq_ratio;
}
static_branch_enable(&arch_hybrid_cap_scale_key);
pr_info("Hybrid CPU capacity scaling enabled\n");
return true;
}
/**
* arch_set_cpu_capacity() - Set scale-invariance parameters for a CPU
* @cpu: Target CPU.
* @cap: Capacity of @cpu at its maximum frequency, relative to @max_cap.
* @max_cap: System-wide maximum CPU capacity.
* @cap_freq: Frequency of @cpu corresponding to @cap.
* @base_freq: Frequency of @cpu at which MPERF counts.
*
* The units in which @cap and @max_cap are expressed do not matter, so long
* as they are consistent, because the former is effectively divided by the
* latter. Analogously for @cap_freq and @base_freq.
*
* After calling this function for all CPUs, call arch_rebuild_sched_domains()
* to let the scheduler know that capacity-aware scheduling can be used going
* forward.
*/
void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap,
unsigned long cap_freq, unsigned long base_freq)
{
if (static_branch_likely(&arch_hybrid_cap_scale_key)) {
WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity,
div_u64(cap << SCHED_CAPACITY_SHIFT, max_cap));
WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio,
div_u64(cap_freq << SCHED_CAPACITY_SHIFT, base_freq));
} else {
WARN_ONCE(1, "Hybrid CPU capacity scaling not enabled");
}
}
unsigned long arch_scale_cpu_capacity(int cpu)
{ if (static_branch_unlikely(&arch_hybrid_cap_scale_key)) return READ_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity); return SCHED_CAPACITY_SCALE;}
EXPORT_SYMBOL_GPL(arch_scale_cpu_capacity);
static void scale_freq_tick(u64 acnt, u64 mcnt)
{
u64 freq_scale, freq_ratio;
if (!arch_scale_freq_invariant())
return;
if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
goto error;
if (static_branch_unlikely(&arch_hybrid_cap_scale_key))
freq_ratio = READ_ONCE(this_cpu_ptr(arch_cpu_scale)->freq_ratio);
else
freq_ratio = arch_max_freq_ratio;
if (check_mul_overflow(mcnt, freq_ratio, &mcnt) || !mcnt)
goto error;
freq_scale = div64_u64(acnt, mcnt);
if (!freq_scale)
goto error;
if (freq_scale > SCHED_CAPACITY_SCALE)
freq_scale = SCHED_CAPACITY_SCALE;
this_cpu_write(arch_freq_scale, freq_scale);
return;
error:
pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
schedule_work(&disable_freq_invariance_work);
}
#else
static inline void bp_init_freq_invariance(void) { }
static inline void scale_freq_tick(u64 acnt, u64 mcnt) { }
#endif /* CONFIG_X86_64 && CONFIG_SMP */
void arch_scale_freq_tick(void)
{
struct aperfmperf *s = this_cpu_ptr(&cpu_samples);
u64 acnt, mcnt, aperf, mperf;
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return;
rdmsrq(MSR_IA32_APERF, aperf);
rdmsrq(MSR_IA32_MPERF, mperf);
acnt = aperf - s->aperf;
mcnt = mperf - s->mperf;
s->aperf = aperf;
s->mperf = mperf;
raw_write_seqcount_begin(&s->seq);
s->last_update = jiffies;
s->acnt = acnt;
s->mcnt = mcnt;
raw_write_seqcount_end(&s->seq);
scale_freq_tick(acnt, mcnt);
}
/*
* Discard samples older than the define maximum sample age of 20ms. There
* is no point in sending IPIs in such a case. If the scheduler tick was
* not running then the CPU is either idle or isolated.
*/
#define MAX_SAMPLE_AGE ((unsigned long)HZ / 50)
int arch_freq_get_on_cpu(int cpu)
{
struct aperfmperf *s = per_cpu_ptr(&cpu_samples, cpu);
unsigned int seq, freq;
unsigned long last;
u64 acnt, mcnt;
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
goto fallback;
do {
seq = raw_read_seqcount_begin(&s->seq);
last = s->last_update;
acnt = s->acnt;
mcnt = s->mcnt;
} while (read_seqcount_retry(&s->seq, seq));
/*
* Bail on invalid count and when the last update was too long ago,
* which covers idle and NOHZ full CPUs.
*/
if (!mcnt || (jiffies - last) > MAX_SAMPLE_AGE)
goto fallback;
return div64_u64((cpu_khz * acnt), mcnt);
fallback:
freq = cpufreq_quick_get(cpu);
return freq ? freq : cpu_khz;
}
static int __init bp_init_aperfmperf(void)
{
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return 0;
init_counter_refs();
bp_init_freq_invariance();
return 0;
}
early_initcall(bp_init_aperfmperf);
void ap_init_aperfmperf(void)
{
if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
init_counter_refs();
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/mm/swapfile.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie
*/
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/shmem_fs.h>
#include <linux/blk-cgroup.h>
#include <linux/random.h>
#include <linux/writeback.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
#include <linux/memcontrol.h>
#include <linux/poll.h>
#include <linux/oom.h>
#include <linux/swapfile.h>
#include <linux/export.h>
#include <linux/sort.h>
#include <linux/completion.h>
#include <linux/suspend.h>
#include <linux/zswap.h>
#include <linux/plist.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
#include "swap_table.h"
#include "internal.h"
#include "swap.h"
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
static void swap_entries_free(struct swap_info_struct *si,
struct swap_cluster_info *ci,
swp_entry_t entry, unsigned int nr_pages);
static void swap_range_alloc(struct swap_info_struct *si,
unsigned int nr_entries);
static bool folio_swapcache_freeable(struct folio *folio);
static void move_cluster(struct swap_info_struct *si,
struct swap_cluster_info *ci, struct list_head *list,
enum swap_cluster_flags new_flags);
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
atomic_long_t nr_swap_pages;
/*
* Some modules use swappable objects and may try to swap them out under
* memory pressure (via the shrinker). Before doing so, they may wish to
* check to see if any swap space is available.
*/
EXPORT_SYMBOL_GPL(nr_swap_pages);
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority = -1;
unsigned long swapfile_maximum_size;
#ifdef CONFIG_MIGRATION
bool swap_migration_ad_supported;
#endif /* CONFIG_MIGRATION */
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
static const char Unused_offset[] = "Unused swap offset entry ";
/*
* all active swap_info_structs
* protected with swap_lock, and ordered by priority.
*/
static PLIST_HEAD(swap_active_head);
/*
* all available (active, not full) swap_info_structs
* protected with swap_avail_lock, ordered by priority.
* This is used by folio_alloc_swap() instead of swap_active_head
* because swap_active_head includes all swap_info_structs,
* but folio_alloc_swap() doesn't need to look at full ones.
* This uses its own lock instead of swap_lock because when a
* swap_info_struct changes between not-full/full, it needs to
* add/remove itself to/from this list, but the swap_info_struct->lock
* is held and the locking order requires swap_lock to be taken
* before any swap_info_struct->lock.
*/
static struct plist_head *swap_avail_heads;
static DEFINE_SPINLOCK(swap_avail_lock);
struct swap_info_struct *swap_info[MAX_SWAPFILES];
static struct kmem_cache *swap_table_cachep;
static DEFINE_MUTEX(swapon_mutex);
static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
/* Activity counter to indicate that a swapon or swapoff has occurred */
static atomic_t proc_poll_event = ATOMIC_INIT(0);
atomic_t nr_rotate_swap = ATOMIC_INIT(0);
struct percpu_swap_cluster {
struct swap_info_struct *si[SWAP_NR_ORDERS];
unsigned long offset[SWAP_NR_ORDERS];
local_lock_t lock;
};
static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = {
.si = { NULL },
.offset = { SWAP_ENTRY_INVALID },
.lock = INIT_LOCAL_LOCK(),
};
/* May return NULL on invalid type, caller must check for NULL return */
static struct swap_info_struct *swap_type_to_info(int type)
{
if (type >= MAX_SWAPFILES)
return NULL;
return READ_ONCE(swap_info[type]); /* rcu_dereference() */
}
/* May return NULL on invalid entry, caller must check for NULL return */
static struct swap_info_struct *swap_entry_to_info(swp_entry_t entry)
{
return swap_type_to_info(swp_type(entry));
}
static inline unsigned char swap_count(unsigned char ent)
{
return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
}
/*
* Use the second highest bit of inuse_pages counter as the indicator
* if one swap device is on the available plist, so the atomic can
* still be updated arithmetically while having special data embedded.
*
* inuse_pages counter is the only thing indicating if a device should
* be on avail_lists or not (except swapon / swapoff). By embedding the
* off-list bit in the atomic counter, updates no longer need any lock
* to check the list status.
*
* This bit will be set if the device is not on the plist and not
* usable, will be cleared if the device is on the plist.
*/
#define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2))
#define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT)
static long swap_usage_in_pages(struct swap_info_struct *si)
{
return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK;
}
/* Reclaim the swap entry anyway if possible */
#define TTRS_ANYWAY 0x1
/*
* Reclaim the swap entry if there are no more mappings of the
* corresponding page
*/
#define TTRS_UNMAPPED 0x2
/* Reclaim the swap entry if swap is getting full */
#define TTRS_FULL 0x4
static bool swap_only_has_cache(struct swap_info_struct *si,
unsigned long offset, int nr_pages)
{
unsigned char *map = si->swap_map + offset;
unsigned char *map_end = map + nr_pages;
do {
VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
if (*map != SWAP_HAS_CACHE)
return false;
} while (++map < map_end);
return true;
}
static bool swap_is_last_map(struct swap_info_struct *si,
unsigned long offset, int nr_pages, bool *has_cache)
{
unsigned char *map = si->swap_map + offset;
unsigned char *map_end = map + nr_pages;
unsigned char count = *map;
if (swap_count(count) != 1 && swap_count(count) != SWAP_MAP_SHMEM)
return false;
while (++map < map_end) {
if (*map != count)
return false;
}
*has_cache = !!(count & SWAP_HAS_CACHE);
return true;
}
/*
* returns number of pages in the folio that backs the swap entry. If positive,
* the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
* folio was associated with the swap entry.
*/
static int __try_to_reclaim_swap(struct swap_info_struct *si,
unsigned long offset, unsigned long flags)
{
const swp_entry_t entry = swp_entry(si->type, offset);
struct swap_cluster_info *ci;
struct folio *folio;
int ret, nr_pages;
bool need_reclaim;
again:
folio = swap_cache_get_folio(entry);
if (!folio)
return 0;
nr_pages = folio_nr_pages(folio);
ret = -nr_pages;
/*
* When this function is called from scan_swap_map_slots() and it's
* called by vmscan.c at reclaiming folios. So we hold a folio lock
* here. We have to use trylock for avoiding deadlock. This is a special
* case and you should use folio_free_swap() with explicit folio_lock()
* in usual operations.
*/
if (!folio_trylock(folio))
goto out;
/*
* Offset could point to the middle of a large folio, or folio
* may no longer point to the expected offset before it's locked.
*/
if (!folio_matches_swap_entry(folio, entry)) {
folio_unlock(folio);
folio_put(folio);
goto again;
}
offset = swp_offset(folio->swap);
need_reclaim = ((flags & TTRS_ANYWAY) ||
((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
if (!need_reclaim || !folio_swapcache_freeable(folio))
goto out_unlock;
/*
* It's safe to delete the folio from swap cache only if the folio's
* swap_map is HAS_CACHE only, which means the slots have no page table
* reference or pending writeback, and can't be allocated to others.
*/
ci = swap_cluster_lock(si, offset);
need_reclaim = swap_only_has_cache(si, offset, nr_pages);
swap_cluster_unlock(ci);
if (!need_reclaim)
goto out_unlock;
swap_cache_del_folio(folio);
folio_set_dirty(folio);
ret = nr_pages;
out_unlock:
folio_unlock(folio);
out:
folio_put(folio);
return ret;
}
static inline struct swap_extent *first_se(struct swap_info_struct *sis)
{
struct rb_node *rb = rb_first(&sis->swap_extent_root);
return rb_entry(rb, struct swap_extent, rb_node);
}
static inline struct swap_extent *next_se(struct swap_extent *se)
{
struct rb_node *rb = rb_next(&se->rb_node);
return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
}
/*
* swapon tell device that all the old swap contents can be discarded,
* to allow the swap device to optimize its wear-levelling.
*/
static int discard_swap(struct swap_info_struct *si)
{
struct swap_extent *se;
sector_t start_block;
sector_t nr_blocks;
int err = 0;
/* Do not discard the swap header page! */
se = first_se(si);
start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
if (nr_blocks) {
err = blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_KERNEL);
if (err)
return err;
cond_resched();
}
for (se = next_se(se); se; se = next_se(se)) {
start_block = se->start_block << (PAGE_SHIFT - 9);
nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_KERNEL);
if (err)
break;
cond_resched();
}
return err; /* That will often be -EOPNOTSUPP */
}
static struct swap_extent *
offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
{
struct swap_extent *se;
struct rb_node *rb;
rb = sis->swap_extent_root.rb_node;
while (rb) {
se = rb_entry(rb, struct swap_extent, rb_node);
if (offset < se->start_page)
rb = rb->rb_left;
else if (offset >= se->start_page + se->nr_pages)
rb = rb->rb_right;
else
return se;
}
/* It *must* be present */
BUG();
}
sector_t swap_folio_sector(struct folio *folio)
{
struct swap_info_struct *sis = __swap_entry_to_info(folio->swap);
struct swap_extent *se;
sector_t sector;
pgoff_t offset;
offset = swp_offset(folio->swap);
se = offset_to_swap_extent(sis, offset);
sector = se->start_block + (offset - se->start_page);
return sector << (PAGE_SHIFT - 9);
}
/*
* swap allocation tell device that a cluster of swap can now be discarded,
* to allow the swap device to optimize its wear-levelling.
*/
static void discard_swap_cluster(struct swap_info_struct *si,
pgoff_t start_page, pgoff_t nr_pages)
{
struct swap_extent *se = offset_to_swap_extent(si, start_page);
while (nr_pages) {
pgoff_t offset = start_page - se->start_page;
sector_t start_block = se->start_block + offset;
sector_t nr_blocks = se->nr_pages - offset;
if (nr_blocks > nr_pages)
nr_blocks = nr_pages;
start_page += nr_blocks;
nr_pages -= nr_blocks;
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_NOIO))
break;
se = next_se(se);
}
}
#define LATENCY_LIMIT 256
static inline bool cluster_is_empty(struct swap_cluster_info *info)
{
return info->count == 0;
}
static inline bool cluster_is_discard(struct swap_cluster_info *info)
{
return info->flags == CLUSTER_FLAG_DISCARD;
}
static inline bool cluster_table_is_alloced(struct swap_cluster_info *ci)
{
return rcu_dereference_protected(ci->table, lockdep_is_held(&ci->lock));
}
static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order)
{
if (unlikely(ci->flags > CLUSTER_FLAG_USABLE))
return false;
if (!cluster_table_is_alloced(ci))
return false;
if (!order)
return true;
return cluster_is_empty(ci) || order == ci->order;
}
static inline unsigned int cluster_index(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
return ci - si->cluster_info;
}
static inline unsigned int cluster_offset(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
return cluster_index(si, ci) * SWAPFILE_CLUSTER;
}
static struct swap_table *swap_table_alloc(gfp_t gfp)
{
struct folio *folio;
if (!SWP_TABLE_USE_PAGE)
return kmem_cache_zalloc(swap_table_cachep, gfp);
folio = folio_alloc(gfp | __GFP_ZERO, 0);
if (folio)
return folio_address(folio);
return NULL;
}
static void swap_table_free_folio_rcu_cb(struct rcu_head *head)
{
struct folio *folio;
folio = page_folio(container_of(head, struct page, rcu_head));
folio_put(folio);
}
static void swap_table_free(struct swap_table *table)
{
if (!SWP_TABLE_USE_PAGE) {
kmem_cache_free(swap_table_cachep, table);
return;
}
call_rcu(&(folio_page(virt_to_folio(table), 0)->rcu_head),
swap_table_free_folio_rcu_cb);
}
static void swap_cluster_free_table(struct swap_cluster_info *ci)
{
unsigned int ci_off;
struct swap_table *table;
/* Only empty cluster's table is allow to be freed */
lockdep_assert_held(&ci->lock);
VM_WARN_ON_ONCE(!cluster_is_empty(ci));
for (ci_off = 0; ci_off < SWAPFILE_CLUSTER; ci_off++)
VM_WARN_ON_ONCE(!swp_tb_is_null(__swap_table_get(ci, ci_off)));
table = (void *)rcu_dereference_protected(ci->table, true);
rcu_assign_pointer(ci->table, NULL);
swap_table_free(table);
}
/*
* Allocate swap table for one cluster. Attempt an atomic allocation first,
* then fallback to sleeping allocation.
*/
static struct swap_cluster_info *
swap_cluster_alloc_table(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
struct swap_table *table;
/*
* Only cluster isolation from the allocator does table allocation.
* Swap allocator uses percpu clusters and holds the local lock.
*/
lockdep_assert_held(&ci->lock);
lockdep_assert_held(&this_cpu_ptr(&percpu_swap_cluster)->lock);
/* The cluster must be free and was just isolated from the free list. */
VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci));
table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (table) {
rcu_assign_pointer(ci->table, table);
return ci;
}
/*
* Try a sleep allocation. Each isolated free cluster may cause
* a sleep allocation, but there is a limited number of them, so
* the potential recursive allocation is limited.
*/
spin_unlock(&ci->lock);
if (!(si->flags & SWP_SOLIDSTATE))
spin_unlock(&si->global_cluster_lock);
local_unlock(&percpu_swap_cluster.lock);
table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | GFP_KERNEL);
/*
* Back to atomic context. We might have migrated to a new CPU with a
* usable percpu cluster. But just keep using the isolated cluster to
* make things easier. Migration indicates a slight change of workload
* so using a new free cluster might not be a bad idea, and the worst
* could happen with ignoring the percpu cluster is fragmentation,
* which is acceptable since this fallback and race is rare.
*/
local_lock(&percpu_swap_cluster.lock);
if (!(si->flags & SWP_SOLIDSTATE))
spin_lock(&si->global_cluster_lock);
spin_lock(&ci->lock);
/* Nothing except this helper should touch a dangling empty cluster. */
if (WARN_ON_ONCE(cluster_table_is_alloced(ci))) {
if (table)
swap_table_free(table);
return ci;
}
if (!table) {
move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
spin_unlock(&ci->lock);
return NULL;
}
rcu_assign_pointer(ci->table, table);
return ci;
}
static void move_cluster(struct swap_info_struct *si,
struct swap_cluster_info *ci, struct list_head *list,
enum swap_cluster_flags new_flags)
{
VM_WARN_ON(ci->flags == new_flags);
BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX);
lockdep_assert_held(&ci->lock);
spin_lock(&si->lock);
if (ci->flags == CLUSTER_FLAG_NONE)
list_add_tail(&ci->list, list);
else
list_move_tail(&ci->list, list);
spin_unlock(&si->lock);
ci->flags = new_flags;
}
/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
schedule_work(&si->discard_work);
}
static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
swap_cluster_free_table(ci);
move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
ci->order = 0;
}
/*
* Isolate and lock the first cluster that is not contented on a list,
* clean its flag before taken off-list. Cluster flag must be in sync
* with list status, so cluster updaters can always know the cluster
* list status without touching si lock.
*
* Note it's possible that all clusters on a list are contented so
* this returns NULL for an non-empty list.
*/
static struct swap_cluster_info *isolate_lock_cluster(
struct swap_info_struct *si, struct list_head *list, int order)
{
struct swap_cluster_info *ci, *found = NULL;
spin_lock(&si->lock);
list_for_each_entry(ci, list, list) {
if (!spin_trylock(&ci->lock))
continue;
/* We may only isolate and clear flags of following lists */
VM_BUG_ON(!ci->flags);
VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE &&
ci->flags != CLUSTER_FLAG_FULL);
list_del(&ci->list);
ci->flags = CLUSTER_FLAG_NONE;
found = ci;
break;
}
spin_unlock(&si->lock);
if (found && !cluster_table_is_alloced(found)) {
/* Only an empty free cluster's swap table can be freed. */
VM_WARN_ON_ONCE(list != &si->free_clusters);
VM_WARN_ON_ONCE(!cluster_is_empty(found));
return swap_cluster_alloc_table(si, found);
}
return found;
}
/*
* Doing discard actually. After a cluster discard is finished, the cluster
* will be added to free cluster list. Discard cluster is a bit special as
* they don't participate in allocation or reclaim, so clusters marked as
* CLUSTER_FLAG_DISCARD must remain off-list or on discard list.
*/
static bool swap_do_scheduled_discard(struct swap_info_struct *si)
{
struct swap_cluster_info *ci;
bool ret = false;
unsigned int idx;
spin_lock(&si->lock);
while (!list_empty(&si->discard_clusters)) {
ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
/*
* Delete the cluster from list to prepare for discard, but keep
* the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be
* pointing to it, or ran into by relocate_cluster.
*/
list_del(&ci->list);
idx = cluster_index(si, ci);
spin_unlock(&si->lock);
discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
SWAPFILE_CLUSTER);
spin_lock(&ci->lock);
/*
* Discard is done, clear its flags as it's off-list, then
* return the cluster to allocation list.
*/
ci->flags = CLUSTER_FLAG_NONE;
__free_cluster(si, ci);
spin_unlock(&ci->lock);
ret = true;
spin_lock(&si->lock);
}
spin_unlock(&si->lock);
return ret;
}
static void swap_discard_work(struct work_struct *work)
{
struct swap_info_struct *si;
si = container_of(work, struct swap_info_struct, discard_work);
swap_do_scheduled_discard(si);
}
static void swap_users_ref_free(struct percpu_ref *ref)
{
struct swap_info_struct *si;
si = container_of(ref, struct swap_info_struct, users);
complete(&si->comp);
}
/*
* Must be called after freeing if ci->count == 0, moves the cluster to free
* or discard list.
*/
static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
VM_BUG_ON(ci->count != 0);
VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
lockdep_assert_held(&ci->lock);
/*
* If the swap is discardable, prepare discard the cluster
* instead of free it immediately. The cluster will be freed
* after discard.
*/
if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
(SWP_WRITEOK | SWP_PAGE_DISCARD)) {
swap_cluster_schedule_discard(si, ci);
return;
}
__free_cluster(si, ci);
}
/*
* Must be called after freeing if ci->count != 0, moves the cluster to
* nonfull list.
*/
static void partial_free_cluster(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER);
lockdep_assert_held(&ci->lock);
if (ci->flags != CLUSTER_FLAG_NONFULL)
move_cluster(si, ci, &si->nonfull_clusters[ci->order],
CLUSTER_FLAG_NONFULL);
}
/*
* Must be called after allocation, moves the cluster to full or frag list.
* Note: allocation doesn't acquire si lock, and may drop the ci lock for
* reclaim, so the cluster could be any where when called.
*/
static void relocate_cluster(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
lockdep_assert_held(&ci->lock);
/* Discard cluster must remain off-list or on discard list */
if (cluster_is_discard(ci))
return;
if (!ci->count) {
if (ci->flags != CLUSTER_FLAG_FREE)
free_cluster(si, ci);
} else if (ci->count != SWAPFILE_CLUSTER) {
if (ci->flags != CLUSTER_FLAG_FRAG)
move_cluster(si, ci, &si->frag_clusters[ci->order],
CLUSTER_FLAG_FRAG);
} else {
if (ci->flags != CLUSTER_FLAG_FULL)
move_cluster(si, ci, &si->full_clusters,
CLUSTER_FLAG_FULL);
}
}
/*
* The cluster corresponding to page_nr will be used. The cluster will not be
* added to free cluster list and its usage counter will be increased by 1.
* Only used for initialization.
*/
static int inc_cluster_info_page(struct swap_info_struct *si,
struct swap_cluster_info *cluster_info, unsigned long page_nr)
{
unsigned long idx = page_nr / SWAPFILE_CLUSTER;
struct swap_table *table;
struct swap_cluster_info *ci;
ci = cluster_info + idx;
if (!ci->table) {
table = swap_table_alloc(GFP_KERNEL);
if (!table)
return -ENOMEM;
rcu_assign_pointer(ci->table, table);
}
ci->count++;
VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
VM_BUG_ON(ci->flags);
return 0;
}
static bool cluster_reclaim_range(struct swap_info_struct *si,
struct swap_cluster_info *ci,
unsigned long start, unsigned long end)
{
unsigned char *map = si->swap_map;
unsigned long offset = start;
int nr_reclaim;
spin_unlock(&ci->lock);
do {
switch (READ_ONCE(map[offset])) {
case 0:
offset++;
break;
case SWAP_HAS_CACHE:
nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
if (nr_reclaim > 0)
offset += nr_reclaim;
else
goto out;
break;
default:
goto out;
}
} while (offset < end);
out:
spin_lock(&ci->lock);
/*
* Recheck the range no matter reclaim succeeded or not, the slot
* could have been be freed while we are not holding the lock.
*/
for (offset = start; offset < end; offset++)
if (READ_ONCE(map[offset]))
return false;
return true;
}
static bool cluster_scan_range(struct swap_info_struct *si,
struct swap_cluster_info *ci,
unsigned long start, unsigned int nr_pages,
bool *need_reclaim)
{
unsigned long offset, end = start + nr_pages;
unsigned char *map = si->swap_map;
if (cluster_is_empty(ci))
return true;
for (offset = start; offset < end; offset++) {
switch (READ_ONCE(map[offset])) {
case 0:
continue;
case SWAP_HAS_CACHE:
if (!vm_swap_full())
return false;
*need_reclaim = true;
continue;
default:
return false;
}
}
return true;
}
/*
* Currently, the swap table is not used for count tracking, just
* do a sanity check here to ensure nothing leaked, so the swap
* table should be empty upon freeing.
*/
static void swap_cluster_assert_table_empty(struct swap_cluster_info *ci,
unsigned int start, unsigned int nr)
{
unsigned int ci_off = start % SWAPFILE_CLUSTER;
unsigned int ci_end = ci_off + nr;
unsigned long swp_tb;
if (IS_ENABLED(CONFIG_DEBUG_VM)) {
do {
swp_tb = __swap_table_get(ci, ci_off);
VM_WARN_ON_ONCE(!swp_tb_is_null(swp_tb));
} while (++ci_off < ci_end);
}
}
static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
unsigned int start, unsigned char usage,
unsigned int order)
{
unsigned int nr_pages = 1 << order;
lockdep_assert_held(&ci->lock);
if (!(si->flags & SWP_WRITEOK))
return false;
/*
* The first allocation in a cluster makes the
* cluster exclusive to this order
*/
if (cluster_is_empty(ci))
ci->order = order;
memset(si->swap_map + start, usage, nr_pages);
swap_cluster_assert_table_empty(ci, start, nr_pages);
swap_range_alloc(si, nr_pages);
ci->count += nr_pages;
return true;
}
/* Try use a new cluster for current CPU and allocate from it. */
static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
struct swap_cluster_info *ci,
unsigned long offset,
unsigned int order,
unsigned char usage)
{
unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER);
unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
unsigned int nr_pages = 1 << order;
bool need_reclaim, ret;
lockdep_assert_held(&ci->lock);
if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER)
goto out;
for (end -= nr_pages; offset <= end; offset += nr_pages) {
need_reclaim = false;
if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim))
continue;
if (need_reclaim) {
ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages);
/*
* Reclaim drops ci->lock and cluster could be used
* by another order. Not checking flag as off-list
* cluster has no flag set, and change of list
* won't cause fragmentation.
*/
if (!cluster_is_usable(ci, order))
goto out;
if (cluster_is_empty(ci))
offset = start;
/* Reclaim failed but cluster is usable, try next */
if (!ret)
continue;
}
if (!cluster_alloc_range(si, ci, offset, usage, order))
break;
found = offset;
offset += nr_pages;
if (ci->count < SWAPFILE_CLUSTER && offset <= end)
next = offset;
break;
}
out:
relocate_cluster(si, ci);
swap_cluster_unlock(ci);
if (si->flags & SWP_SOLIDSTATE) {
this_cpu_write(percpu_swap_cluster.offset[order], next);
this_cpu_write(percpu_swap_cluster.si[order], si);
} else {
si->global_cluster->next[order] = next;
}
return found;
}
static unsigned int alloc_swap_scan_list(struct swap_info_struct *si,
struct list_head *list,
unsigned int order,
unsigned char usage,
bool scan_all)
{
unsigned int found = SWAP_ENTRY_INVALID;
do {
struct swap_cluster_info *ci = isolate_lock_cluster(si, list, order);
unsigned long offset;
if (!ci)
break;
offset = cluster_offset(si, ci);
found = alloc_swap_scan_cluster(si, ci, offset, order, usage);
if (found)
break;
} while (scan_all);
return found;
}
static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
{
long to_scan = 1;
unsigned long offset, end;
struct swap_cluster_info *ci;
unsigned char *map = si->swap_map;
int nr_reclaim;
if (force)
to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER;
while ((ci = isolate_lock_cluster(si, &si->full_clusters, 0))) {
offset = cluster_offset(si, ci);
end = min(si->max, offset + SWAPFILE_CLUSTER);
to_scan--;
while (offset < end) {
if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
spin_unlock(&ci->lock);
nr_reclaim = __try_to_reclaim_swap(si, offset,
TTRS_ANYWAY);
spin_lock(&ci->lock);
if (nr_reclaim) {
offset += abs(nr_reclaim);
continue;
}
}
offset++;
}
/* in case no swap cache is reclaimed */
if (ci->flags == CLUSTER_FLAG_NONE)
relocate_cluster(si, ci);
swap_cluster_unlock(ci);
if (to_scan <= 0)
break;
}
}
static void swap_reclaim_work(struct work_struct *work)
{
struct swap_info_struct *si;
si = container_of(work, struct swap_info_struct, reclaim_work);
swap_reclaim_full_clusters(si, true);
}
/*
* Try to allocate swap entries with specified order and try set a new
* cluster for current CPU too.
*/
static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
unsigned char usage)
{
struct swap_cluster_info *ci;
unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
/*
* Swapfile is not block device so unable
* to allocate large entries.
*/
if (order && !(si->flags & SWP_BLKDEV))
return 0;
if (!(si->flags & SWP_SOLIDSTATE)) {
/* Serialize HDD SWAP allocation for each device. */
spin_lock(&si->global_cluster_lock);
offset = si->global_cluster->next[order];
if (offset == SWAP_ENTRY_INVALID)
goto new_cluster;
ci = swap_cluster_lock(si, offset);
/* Cluster could have been used by another order */
if (cluster_is_usable(ci, order)) {
if (cluster_is_empty(ci))
offset = cluster_offset(si, ci);
found = alloc_swap_scan_cluster(si, ci, offset,
order, usage);
} else {
swap_cluster_unlock(ci);
}
if (found)
goto done;
}
new_cluster:
/*
* If the device need discard, prefer new cluster over nonfull
* to spread out the writes.
*/
if (si->flags & SWP_PAGE_DISCARD) {
found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
false);
if (found)
goto done;
}
if (order < PMD_ORDER) {
found = alloc_swap_scan_list(si, &si->nonfull_clusters[order],
order, usage, true);
if (found)
goto done;
}
if (!(si->flags & SWP_PAGE_DISCARD)) {
found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
false);
if (found)
goto done;
}
/* Try reclaim full clusters if free and nonfull lists are drained */
if (vm_swap_full())
swap_reclaim_full_clusters(si, false);
if (order < PMD_ORDER) {
/*
* Scan only one fragment cluster is good enough. Order 0
* allocation will surely success, and large allocation
* failure is not critical. Scanning one cluster still
* keeps the list rotated and reclaimed (for HAS_CACHE).
*/
found = alloc_swap_scan_list(si, &si->frag_clusters[order], order,
usage, false);
if (found)
goto done;
}
/*
* We don't have free cluster but have some clusters in discarding,
* do discard now and reclaim them.
*/
if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si))
goto new_cluster;
if (order)
goto done;
/* Order 0 stealing from higher order */
for (int o = 1; o < SWAP_NR_ORDERS; o++) {
/*
* Clusters here have at least one usable slots and can't fail order 0
* allocation, but reclaim may drop si->lock and race with another user.
*/
found = alloc_swap_scan_list(si, &si->frag_clusters[o],
0, usage, true);
if (found)
goto done;
found = alloc_swap_scan_list(si, &si->nonfull_clusters[o],
0, usage, true);
if (found)
goto done;
}
done:
if (!(si->flags & SWP_SOLIDSTATE))
spin_unlock(&si->global_cluster_lock);
return found;
}
/* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */
static void del_from_avail_list(struct swap_info_struct *si, bool swapoff)
{
int nid;
unsigned long pages;
spin_lock(&swap_avail_lock);
if (swapoff) {
/*
* Forcefully remove it. Clear the SWP_WRITEOK flags for
* swapoff here so it's synchronized by both si->lock and
* swap_avail_lock, to ensure the result can be seen by
* add_to_avail_list.
*/
lockdep_assert_held(&si->lock);
si->flags &= ~SWP_WRITEOK;
atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
} else {
/*
* If not called by swapoff, take it off-list only if it's
* full and SWAP_USAGE_OFFLIST_BIT is not set (strictly
* si->inuse_pages == pages), any concurrent slot freeing,
* or device already removed from plist by someone else
* will make this return false.
*/
pages = si->pages;
if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
pages | SWAP_USAGE_OFFLIST_BIT))
goto skip;
}
for_each_node(nid)
plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
skip:
spin_unlock(&swap_avail_lock);
}
/* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */
static void add_to_avail_list(struct swap_info_struct *si, bool swapon)
{
int nid;
long val;
unsigned long pages;
spin_lock(&swap_avail_lock);
/* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */
if (swapon) {
lockdep_assert_held(&si->lock);
si->flags |= SWP_WRITEOK;
} else {
if (!(READ_ONCE(si->flags) & SWP_WRITEOK))
goto skip;
}
if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT))
goto skip;
val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
/*
* When device is full and device is on the plist, only one updater will
* see (inuse_pages == si->pages) and will call del_from_avail_list. If
* that updater happen to be here, just skip adding.
*/
pages = si->pages;
if (val == pages) {
/* Just like the cmpxchg in del_from_avail_list */
if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
pages | SWAP_USAGE_OFFLIST_BIT))
goto skip;
}
for_each_node(nid)
plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
skip:
spin_unlock(&swap_avail_lock);
}
/*
* swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock
* within each cluster, so the total contribution to the global counter should
* always be positive and cannot exceed the total number of usable slots.
*/
static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries)
{
long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages);
/*
* If device is full, and SWAP_USAGE_OFFLIST_BIT is not set,
* remove it from the plist.
*/
if (unlikely(val == si->pages)) {
del_from_avail_list(si, false);
return true;
}
return false;
}
static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries)
{
long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages);
/*
* If device is not full, and SWAP_USAGE_OFFLIST_BIT is set,
* add it to the plist.
*/
if (unlikely(val & SWAP_USAGE_OFFLIST_BIT))
add_to_avail_list(si, false);
}
static void swap_range_alloc(struct swap_info_struct *si,
unsigned int nr_entries)
{
if (swap_usage_add(si, nr_entries)) {
if (vm_swap_full())
schedule_work(&si->reclaim_work);
}
atomic_long_sub(nr_entries, &nr_swap_pages);
}
static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
unsigned int nr_entries)
{
unsigned long begin = offset;
unsigned long end = offset + nr_entries - 1;
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
unsigned int i;
/*
* Use atomic clear_bit operations only on zeromap instead of non-atomic
* bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
*/
for (i = 0; i < nr_entries; i++) {
clear_bit(offset + i, si->zeromap);
zswap_invalidate(swp_entry(si->type, offset + i));
}
if (si->flags & SWP_BLKDEV)
swap_slot_free_notify =
si->bdev->bd_disk->fops->swap_slot_free_notify;
else
swap_slot_free_notify = NULL;
while (offset <= end) {
arch_swap_invalidate_page(si->type, offset);
if (swap_slot_free_notify)
swap_slot_free_notify(si->bdev, offset);
offset++;
}
__swap_cache_clear_shadow(swp_entry(si->type, begin), nr_entries);
/*
* Make sure that try_to_unuse() observes si->inuse_pages reaching 0
* only after the above cleanups are done.
*/
smp_wmb();
atomic_long_add(nr_entries, &nr_swap_pages);
swap_usage_sub(si, nr_entries);
}
static bool get_swap_device_info(struct swap_info_struct *si)
{
if (!percpu_ref_tryget_live(&si->users))
return false;
/*
* Guarantee the si->users are checked before accessing other
* fields of swap_info_struct, and si->flags (SWP_WRITEOK) is
* up to dated.
*
* Paired with the spin_unlock() after setup_swap_info() in
* enable_swap_info(), and smp_wmb() in swapoff.
*/
smp_rmb();
return true;
}
/*
* Fast path try to get swap entries with specified order from current
* CPU's swap entry pool (a cluster).
*/
static bool swap_alloc_fast(swp_entry_t *entry,
int order)
{
struct swap_cluster_info *ci;
struct swap_info_struct *si;
unsigned int offset, found = SWAP_ENTRY_INVALID;
/*
* Once allocated, swap_info_struct will never be completely freed,
* so checking it's liveness by get_swap_device_info is enough.
*/
si = this_cpu_read(percpu_swap_cluster.si[order]);
offset = this_cpu_read(percpu_swap_cluster.offset[order]);
if (!si || !offset || !get_swap_device_info(si))
return false;
ci = swap_cluster_lock(si, offset);
if (cluster_is_usable(ci, order)) {
if (cluster_is_empty(ci))
offset = cluster_offset(si, ci);
found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE);
if (found)
*entry = swp_entry(si->type, found);
} else {
swap_cluster_unlock(ci);
}
put_swap_device(si);
return !!found;
}
/* Rotate the device and switch to a new cluster */
static bool swap_alloc_slow(swp_entry_t *entry,
int order)
{
int node;
unsigned long offset;
struct swap_info_struct *si, *next;
node = numa_node_id();
spin_lock(&swap_avail_lock);
start_over:
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
/* Rotate the device and switch to a new cluster */
plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
spin_unlock(&swap_avail_lock);
if (get_swap_device_info(si)) {
offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE);
put_swap_device(si);
if (offset) {
*entry = swp_entry(si->type, offset);
return true;
}
if (order)
return false;
}
spin_lock(&swap_avail_lock);
/*
* if we got here, it's likely that si was almost full before,
* and since scan_swap_map_slots() can drop the si->lock,
* multiple callers probably all tried to get a page from the
* same si and it filled up before we could get one; or, the si
* filled up between us dropping swap_avail_lock and taking
* si->lock. Since we dropped the swap_avail_lock, the
* swap_avail_head list may have been modified; so if next is
* still in the swap_avail_head list then try it, otherwise
* start over if we have not gotten any slots.
*/
if (plist_node_empty(&next->avail_lists[node]))
goto start_over;
}
spin_unlock(&swap_avail_lock);
return false;
}
/**
* folio_alloc_swap - allocate swap space for a folio
* @folio: folio we want to move to swap
* @gfp: gfp mask for shadow nodes
*
* Allocate swap space for the folio and add the folio to the
* swap cache.
*
* Context: Caller needs to hold the folio lock.
* Return: Whether the folio was added to the swap cache.
*/
int folio_alloc_swap(struct folio *folio, gfp_t gfp)
{
unsigned int order = folio_order(folio);
unsigned int size = 1 << order;
swp_entry_t entry = {};
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
if (order) {
/*
* Reject large allocation when THP_SWAP is disabled,
* the caller should split the folio and try again.
*/
if (!IS_ENABLED(CONFIG_THP_SWAP))
return -EAGAIN;
/*
* Allocation size should never exceed cluster size
* (HPAGE_PMD_SIZE).
*/
if (size > SWAPFILE_CLUSTER) {
VM_WARN_ON_ONCE(1);
return -EINVAL;
}
}
local_lock(&percpu_swap_cluster.lock);
if (!swap_alloc_fast(&entry, order))
swap_alloc_slow(&entry, order);
local_unlock(&percpu_swap_cluster.lock);
/* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */
if (mem_cgroup_try_charge_swap(folio, entry))
goto out_free;
if (!entry.val)
return -ENOMEM;
swap_cache_add_folio(folio, entry, NULL);
return 0;
out_free:
put_swap_folio(folio, entry);
return -ENOMEM;
}
static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *si;
unsigned long offset;
if (!entry.val)
goto out;
si = swap_entry_to_info(entry);
if (!si)
goto bad_nofile;
if (data_race(!(si->flags & SWP_USED)))
goto bad_device;
offset = swp_offset(entry);
if (offset >= si->max)
goto bad_offset;
if (data_race(!si->swap_map[swp_offset(entry)]))
goto bad_free;
return si;
bad_free:
pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
goto out;
bad_offset:
pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
goto out;
bad_device:
pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
goto out;
bad_nofile:
pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
out:
return NULL;
}
static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
struct swap_cluster_info *ci,
swp_entry_t entry,
unsigned char usage)
{
unsigned long offset = swp_offset(entry);
unsigned char count;
unsigned char has_cache;
count = si->swap_map[offset];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache);
has_cache = 0;
} else if (count == SWAP_MAP_SHMEM) {
/*
* Or we could insist on shmem.c using a special
* swap_shmem_free() and free_shmem_swap_and_cache()...
*/
count = 0;
} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
if (count == COUNT_CONTINUED) {
if (swap_count_continued(si, offset, count))
count = SWAP_MAP_MAX | COUNT_CONTINUED;
else
count = SWAP_MAP_MAX;
} else
count--;
}
usage = count | has_cache;
if (usage)
WRITE_ONCE(si->swap_map[offset], usage);
else
swap_entries_free(si, ci, entry, 1);
return usage;
}
/*
* When we get a swap entry, if there aren't some other ways to
* prevent swapoff, such as the folio in swap cache is locked, RCU
* reader side is locked, etc., the swap entry may become invalid
* because of swapoff. Then, we need to enclose all swap related
* functions with get_swap_device() and put_swap_device(), unless the
* swap functions call get/put_swap_device() by themselves.
*
* RCU reader side lock (including any spinlock) is sufficient to
* prevent swapoff, because synchronize_rcu() is called in swapoff()
* before freeing data structures.
*
* Check whether swap entry is valid in the swap device. If so,
* return pointer to swap_info_struct, and keep the swap entry valid
* via preventing the swap device from being swapoff, until
* put_swap_device() is called. Otherwise return NULL.
*
* Notice that swapoff or swapoff+swapon can still happen before the
* percpu_ref_tryget_live() in get_swap_device() or after the
* percpu_ref_put() in put_swap_device() if there isn't any other way
* to prevent swapoff. The caller must be prepared for that. For
* example, the following situation is possible.
*
* CPU1 CPU2
* do_swap_page()
* ... swapoff+swapon
* __read_swap_cache_async()
* swapcache_prepare()
* __swap_duplicate()
* // check swap_map
* // verify PTE not changed
*
* In __swap_duplicate(), the swap_map need to be checked before
* changing partly because the specified swap entry may be for another
* swap device which has been swapoff. And in do_swap_page(), after
* the page is read from the swap device, the PTE is verified not
* changed with the page table locked to check whether the swap device
* has been swapoff or swapoff+swapon.
*/
struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
struct swap_info_struct *si;
unsigned long offset;
if (!entry.val)
goto out;
si = swap_entry_to_info(entry);
if (!si)
goto bad_nofile;
if (!get_swap_device_info(si))
goto out;
offset = swp_offset(entry);
if (offset >= si->max)
goto put_out;
return si;
bad_nofile:
pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
out:
return NULL;
put_out:
pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
percpu_ref_put(&si->users);
return NULL;
}
static void swap_entries_put_cache(struct swap_info_struct *si,
swp_entry_t entry, int nr)
{
unsigned long offset = swp_offset(entry);
struct swap_cluster_info *ci;
ci = swap_cluster_lock(si, offset);
if (swap_only_has_cache(si, offset, nr)) {
swap_entries_free(si, ci, entry, nr);
} else {
for (int i = 0; i < nr; i++, entry.val++)
swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
}
swap_cluster_unlock(ci);
}
static bool swap_entries_put_map(struct swap_info_struct *si,
swp_entry_t entry, int nr)
{
unsigned long offset = swp_offset(entry);
struct swap_cluster_info *ci;
bool has_cache = false;
unsigned char count;
int i;
if (nr <= 1)
goto fallback;
count = swap_count(data_race(si->swap_map[offset]));
if (count != 1 && count != SWAP_MAP_SHMEM)
goto fallback;
ci = swap_cluster_lock(si, offset);
if (!swap_is_last_map(si, offset, nr, &has_cache)) {
goto locked_fallback;
}
if (!has_cache)
swap_entries_free(si, ci, entry, nr);
else
for (i = 0; i < nr; i++)
WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
swap_cluster_unlock(ci);
return has_cache;
fallback:
ci = swap_cluster_lock(si, offset);
locked_fallback:
for (i = 0; i < nr; i++, entry.val++) {
count = swap_entry_put_locked(si, ci, entry, 1);
if (count == SWAP_HAS_CACHE)
has_cache = true;
}
swap_cluster_unlock(ci);
return has_cache;
}
/*
* Only functions with "_nr" suffix are able to free entries spanning
* cross multi clusters, so ensure the range is within a single cluster
* when freeing entries with functions without "_nr" suffix.
*/
static bool swap_entries_put_map_nr(struct swap_info_struct *si,
swp_entry_t entry, int nr)
{
int cluster_nr, cluster_rest;
unsigned long offset = swp_offset(entry);
bool has_cache = false;
cluster_rest = SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER;
while (nr) {
cluster_nr = min(nr, cluster_rest);
has_cache |= swap_entries_put_map(si, entry, cluster_nr);
cluster_rest = SWAPFILE_CLUSTER;
nr -= cluster_nr;
entry.val += cluster_nr;
}
return has_cache;
}
/*
* Check if it's the last ref of swap entry in the freeing path.
* Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
*/
static inline bool __maybe_unused swap_is_last_ref(unsigned char count)
{
return (count == SWAP_HAS_CACHE) || (count == 1) ||
(count == SWAP_MAP_SHMEM);
}
/*
* Drop the last ref of swap entries, caller have to ensure all entries
* belong to the same cgroup and cluster.
*/
static void swap_entries_free(struct swap_info_struct *si,
struct swap_cluster_info *ci,
swp_entry_t entry, unsigned int nr_pages)
{
unsigned long offset = swp_offset(entry);
unsigned char *map = si->swap_map + offset;
unsigned char *map_end = map + nr_pages;
/* It should never free entries across different clusters */
VM_BUG_ON(ci != __swap_offset_to_cluster(si, offset + nr_pages - 1));
VM_BUG_ON(cluster_is_empty(ci));
VM_BUG_ON(ci->count < nr_pages);
ci->count -= nr_pages;
do {
VM_BUG_ON(!swap_is_last_ref(*map));
*map = 0;
} while (++map < map_end);
mem_cgroup_uncharge_swap(entry, nr_pages);
swap_range_free(si, offset, nr_pages);
swap_cluster_assert_table_empty(ci, offset, nr_pages);
if (!ci->count)
free_cluster(si, ci);
else
partial_free_cluster(si, ci);
}
/*
* Caller has made sure that the swap device corresponding to entry
* is still around or has not been recycled.
*/
void swap_free_nr(swp_entry_t entry, int nr_pages)
{
int nr;
struct swap_info_struct *sis;
unsigned long offset = swp_offset(entry);
sis = _swap_info_get(entry);
if (!sis)
return;
while (nr_pages) {
nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
swap_entries_put_map(sis, swp_entry(sis->type, offset), nr);
offset += nr;
nr_pages -= nr;
}
}
/*
* Called after dropping swapcache to decrease refcnt to swap entries.
*/
void put_swap_folio(struct folio *folio, swp_entry_t entry)
{
struct swap_info_struct *si;
int size = 1 << swap_entry_order(folio_order(folio));
si = _swap_info_get(entry);
if (!si)
return;
swap_entries_put_cache(si, entry, size);
}
int __swap_count(swp_entry_t entry)
{
struct swap_info_struct *si = __swap_entry_to_info(entry);
pgoff_t offset = swp_offset(entry);
return swap_count(si->swap_map[offset]);
}
/*
* How many references to @entry are currently swapped out?
* This does not give an exact answer when swap count is continued,
* but does include the high COUNT_CONTINUED flag to allow for that.
*/
bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
{
pgoff_t offset = swp_offset(entry);
struct swap_cluster_info *ci;
int count;
ci = swap_cluster_lock(si, offset);
count = swap_count(si->swap_map[offset]);
swap_cluster_unlock(ci);
return !!count;
}
/*
* How many references to @entry are currently swapped out?
* This considers COUNT_CONTINUED so it returns exact answer.
*/
int swp_swapcount(swp_entry_t entry)
{
int count, tmp_count, n;
struct swap_info_struct *si;
struct swap_cluster_info *ci;
struct page *page;
pgoff_t offset;
unsigned char *map;
si = _swap_info_get(entry);
if (!si)
return 0;
offset = swp_offset(entry);
ci = swap_cluster_lock(si, offset);
count = swap_count(si->swap_map[offset]);
if (!(count & COUNT_CONTINUED))
goto out;
count &= ~COUNT_CONTINUED;
n = SWAP_MAP_MAX + 1;
page = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
VM_BUG_ON(page_private(page) != SWP_CONTINUED);
do {
page = list_next_entry(page, lru);
map = kmap_local_page(page);
tmp_count = map[offset];
kunmap_local(map);
count += (tmp_count & ~COUNT_CONTINUED) * n;
n *= (SWAP_CONT_MAX + 1);
} while (tmp_count & COUNT_CONTINUED);
out:
swap_cluster_unlock(ci);
return count;
}
static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
swp_entry_t entry, int order)
{
struct swap_cluster_info *ci;
unsigned char *map = si->swap_map;
unsigned int nr_pages = 1 << order;
unsigned long roffset = swp_offset(entry);
unsigned long offset = round_down(roffset, nr_pages);
int i;
bool ret = false;
ci = swap_cluster_lock(si, offset);
if (nr_pages == 1) {
if (swap_count(map[roffset]))
ret = true;
goto unlock_out;
}
for (i = 0; i < nr_pages; i++) {
if (swap_count(map[offset + i])) {
ret = true;
break;
}
}
unlock_out:
swap_cluster_unlock(ci);
return ret;
}
static bool folio_swapped(struct folio *folio)
{
swp_entry_t entry = folio->swap;
struct swap_info_struct *si = _swap_info_get(entry);
if (!si)
return false;
if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
return swap_entry_swapped(si, entry);
return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
}
static bool folio_swapcache_freeable(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (!folio_test_swapcache(folio))
return false;
if (folio_test_writeback(folio))
return false;
/*
* Once hibernation has begun to create its image of memory,
* there's a danger that one of the calls to folio_free_swap()
* - most probably a call from __try_to_reclaim_swap() while
* hibernation is allocating its own swap pages for the image,
* but conceivably even a call from memory reclaim - will free
* the swap from a folio which has already been recorded in the
* image as a clean swapcache folio, and then reuse its swap for
* another page of the image. On waking from hibernation, the
* original folio might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
* Hibernation suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
return false;
return true;
}
/**
* folio_free_swap() - Free the swap space used for this folio.
* @folio: The folio to remove.
*
* If swap is getting full, or if there are no more mappings of this folio,
* then call folio_free_swap to free its swap space.
*
* Return: true if we were able to release the swap space.
*/
bool folio_free_swap(struct folio *folio)
{
if (!folio_swapcache_freeable(folio))
return false;
if (folio_swapped(folio))
return false;
swap_cache_del_folio(folio);
folio_set_dirty(folio);
return true;
}
/**
* free_swap_and_cache_nr() - Release reference on range of swap entries and
* reclaim their cache if no more references remain.
* @entry: First entry of range.
* @nr: Number of entries in range.
*
* For each swap entry in the contiguous range, release a reference. If any swap
* entries become free, try to reclaim their underlying folios, if present. The
* offset range is defined by [entry.offset, entry.offset + nr).
*/
void free_swap_and_cache_nr(swp_entry_t entry, int nr)
{
const unsigned long start_offset = swp_offset(entry);
const unsigned long end_offset = start_offset + nr;
struct swap_info_struct *si;
bool any_only_cache = false;
unsigned long offset;
si = get_swap_device(entry);
if (!si)
return;
if (WARN_ON(end_offset > si->max))
goto out;
/*
* First free all entries in the range.
*/
any_only_cache = swap_entries_put_map_nr(si, entry, nr);
/*
* Short-circuit the below loop if none of the entries had their
* reference drop to zero.
*/
if (!any_only_cache)
goto out;
/*
* Now go back over the range trying to reclaim the swap cache.
*/
for (offset = start_offset; offset < end_offset; offset += nr) {
nr = 1;
if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
/*
* Folios are always naturally aligned in swap so
* advance forward to the next boundary. Zero means no
* folio was found for the swap entry, so advance by 1
* in this case. Negative value means folio was found
* but could not be reclaimed. Here we can still advance
* to the next boundary.
*/
nr = __try_to_reclaim_swap(si, offset,
TTRS_UNMAPPED | TTRS_FULL);
if (nr == 0)
nr = 1;
else if (nr < 0)
nr = -nr;
nr = ALIGN(offset + 1, nr) - offset;
}
}
out:
put_swap_device(si);
}
#ifdef CONFIG_HIBERNATION
swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si = swap_type_to_info(type);
unsigned long offset;
swp_entry_t entry = {0};
if (!si)
goto fail;
/* This is called for allocating swap entry, not cache */
if (get_swap_device_info(si)) {
if (si->flags & SWP_WRITEOK) {
/*
* Grab the local lock to be complaint
* with swap table allocation.
*/
local_lock(&percpu_swap_cluster.lock);
offset = cluster_alloc_swap_entry(si, 0, 1);
local_unlock(&percpu_swap_cluster.lock);
if (offset)
entry = swp_entry(si->type, offset);
}
put_swap_device(si);
}
fail:
return entry;
}
/*
* Find the swap type that corresponds to given device (if any).
*
* @offset - number of the PAGE_SIZE-sized block of the device, starting
* from 0, in which the swap header is expected to be located.
*
* This is needed for the suspend to disk (aka swsusp).
*/
int swap_type_of(dev_t device, sector_t offset)
{
int type;
if (!device)
return -1;
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
if (device == sis->bdev->bd_dev) {
struct swap_extent *se = first_se(sis);
if (se->start_block == offset) {
spin_unlock(&swap_lock);
return type;
}
}
}
spin_unlock(&swap_lock);
return -ENODEV;
}
int find_first_swap(dev_t *device)
{
int type;
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
*device = sis->bdev->bd_dev;
spin_unlock(&swap_lock);
return type;
}
spin_unlock(&swap_lock);
return -ENODEV;
}
/*
* Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
* corresponding to given index in swap_info (swap type).
*/
sector_t swapdev_block(int type, pgoff_t offset)
{
struct swap_info_struct *si = swap_type_to_info(type);
struct swap_extent *se;
if (!si || !(si->flags & SWP_WRITEOK))
return 0;
se = offset_to_swap_extent(si, offset);
return se->start_block + (offset - se->start_page);
}
/*
* Return either the total number of swap pages of given type, or the number
* of free pages of that type (depending on @free)
*
* This is needed for software suspend
*/
unsigned int count_swap_pages(int type, int free)
{
unsigned int n = 0;
spin_lock(&swap_lock);
if ((unsigned int)type < nr_swapfiles) {
struct swap_info_struct *sis = swap_info[type];
spin_lock(&sis->lock);
if (sis->flags & SWP_WRITEOK) {
n = sis->pages;
if (free)
n -= swap_usage_in_pages(sis);
}
spin_unlock(&sis->lock);
}
spin_unlock(&swap_lock);
return n;
}
#endif /* CONFIG_HIBERNATION */
static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
{
return pte_same(pte_swp_clear_flags(pte), swp_pte);
}
/*
* No need to decide whether this PTE shares the swap entry with others,
* just let do_wp_page work it out if a write is requested later - to
* force COW, vm_page_prot omits write permission from any private vma.
*/
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct folio *folio)
{
struct page *page;
struct folio *swapcache;
spinlock_t *ptl;
pte_t *pte, new_pte, old_pte;
bool hwpoisoned = false;
int ret = 1;
/*
* If the folio is removed from swap cache by others, continue to
* unuse other PTEs. try_to_unuse may try again if we missed this one.
*/
if (!folio_matches_swap_entry(folio, entry))
return 0;
swapcache = folio;
folio = ksm_might_need_to_copy(folio, vma, addr);
if (unlikely(!folio))
return -ENOMEM;
else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
hwpoisoned = true;
folio = swapcache;
}
page = folio_file_page(folio, swp_offset(entry));
if (PageHWPoison(page))
hwpoisoned = true;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
swp_entry_to_pte(entry)))) {
ret = 0;
goto out;
}
old_pte = ptep_get(pte);
if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
swp_entry_t swp_entry;
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
if (hwpoisoned) {
swp_entry = make_hwpoison_entry(page);
} else {
swp_entry = make_poisoned_swp_entry();
}
new_pte = swp_entry_to_pte(swp_entry);
ret = 0;
goto setpte;
}
/*
* Some architectures may have to restore extra metadata to the page
* when reading from swap. This metadata may be indexed by swap entry
* so this must be called before swap_free().
*/
arch_swap_restore(folio_swap(entry, folio), folio);
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
folio_get(folio);
if (folio == swapcache) {
rmap_t rmap_flags = RMAP_NONE;
/*
* See do_swap_page(): writeback would be problematic.
* However, we do a folio_wait_writeback() just before this
* call and have the folio locked.
*/
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
if (pte_swp_exclusive(old_pte))
rmap_flags |= RMAP_EXCLUSIVE;
/*
* We currently only expect small !anon folios, which are either
* fully exclusive or fully shared. If we ever get large folios
* here, we have to be careful.
*/
if (!folio_test_anon(folio)) {
VM_WARN_ON_ONCE(folio_test_large(folio));
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
} else {
folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
}
} else { /* ksm created a completely new copy */
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
}
new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
if (pte_swp_soft_dirty(old_pte))
new_pte = pte_mksoft_dirty(new_pte);
if (pte_swp_uffd_wp(old_pte))
new_pte = pte_mkuffd_wp(new_pte);
setpte:
set_pte_at(vma->vm_mm, addr, pte, new_pte);
swap_free(entry);
out:
if (pte)
pte_unmap_unlock(pte, ptl);
if (folio != swapcache) {
folio_unlock(folio);
folio_put(folio);
}
return ret;
}
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned int type)
{
pte_t *pte = NULL;
struct swap_info_struct *si;
si = swap_info[type];
do {
struct folio *folio;
unsigned long offset;
unsigned char swp_count;
swp_entry_t entry;
int ret;
pte_t ptent;
if (!pte++) {
pte = pte_offset_map(pmd, addr);
if (!pte)
break;
}
ptent = ptep_get_lockless(pte);
if (!is_swap_pte(ptent))
continue;
entry = pte_to_swp_entry(ptent);
if (swp_type(entry) != type)
continue;
offset = swp_offset(entry);
pte_unmap(pte);
pte = NULL;
folio = swap_cache_get_folio(entry);
if (!folio) {
struct vm_fault vmf = {
.vma = vma,
.address = addr,
.real_address = addr,
.pmd = pmd,
};
folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf);
}
if (!folio) {
swp_count = READ_ONCE(si->swap_map[offset]);
if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
continue;
return -ENOMEM;
}
folio_lock(folio);
folio_wait_writeback(folio);
ret = unuse_pte(vma, pmd, addr, entry, folio);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
return ret;
}
folio_free_swap(folio);
folio_unlock(folio);
folio_put(folio);
} while (addr += PAGE_SIZE, addr != end);
if (pte)
pte_unmap(pte);
return 0;
}
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned int type)
{
pmd_t *pmd;
unsigned long next;
int ret;
pmd = pmd_offset(pud, addr);
do {
cond_resched();
next = pmd_addr_end(addr, end);
ret = unuse_pte_range(vma, pmd, addr, next, type);
if (ret)
return ret;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned int type)
{
pud_t *pud;
unsigned long next;
int ret;
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
ret = unuse_pmd_range(vma, pud, addr, next, type);
if (ret)
return ret;
} while (pud++, addr = next, addr != end);
return 0;
}
static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned int type)
{
p4d_t *p4d;
unsigned long next;
int ret;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d))
continue;
ret = unuse_pud_range(vma, p4d, addr, next, type);
if (ret)
return ret;
} while (p4d++, addr = next, addr != end);
return 0;
}
static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
{
pgd_t *pgd;
unsigned long addr, end, next;
int ret;
addr = vma->vm_start;
end = vma->vm_end;
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
ret = unuse_p4d_range(vma, pgd, addr, next, type);
if (ret)
return ret;
} while (pgd++, addr = next, addr != end);
return 0;
}
static int unuse_mm(struct mm_struct *mm, unsigned int type)
{
struct vm_area_struct *vma;
int ret = 0;
VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
if (check_stable_address_space(mm))
goto unlock;
for_each_vma(vmi, vma) {
if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
ret = unuse_vma(vma, type);
if (ret)
break;
}
cond_resched();
}
unlock:
mmap_read_unlock(mm);
return ret;
}
/*
* Scan swap_map from current position to next entry still in use.
* Return 0 if there are no inuse entries after prev till end of
* the map.
*/
static unsigned int find_next_to_unuse(struct swap_info_struct *si,
unsigned int prev)
{
unsigned int i;
unsigned char count;
/*
* No need for swap_lock here: we're just looking
* for whether an entry is in use, not modifying it; false
* hits are okay, and sys_swapoff() has already prevented new
* allocations from this area (while holding swap_lock).
*/
for (i = prev + 1; i < si->max; i++) {
count = READ_ONCE(si->swap_map[i]);
if (count && swap_count(count) != SWAP_MAP_BAD)
break;
if ((i % LATENCY_LIMIT) == 0)
cond_resched();
}
if (i == si->max)
i = 0;
return i;
}
static int try_to_unuse(unsigned int type)
{
struct mm_struct *prev_mm;
struct mm_struct *mm;
struct list_head *p;
int retval = 0;
struct swap_info_struct *si = swap_info[type];
struct folio *folio;
swp_entry_t entry;
unsigned int i;
if (!swap_usage_in_pages(si))
goto success;
retry:
retval = shmem_unuse(type);
if (retval)
return retval;
prev_mm = &init_mm;
mmget(prev_mm);
spin_lock(&mmlist_lock);
p = &init_mm.mmlist;
while (swap_usage_in_pages(si) &&
!signal_pending(current) &&
(p = p->next) != &init_mm.mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!mmget_not_zero(mm))
continue;
spin_unlock(&mmlist_lock);
mmput(prev_mm);
prev_mm = mm;
retval = unuse_mm(mm, type);
if (retval) {
mmput(prev_mm);
return retval;
}
/*
* Make sure that we aren't completely killing
* interactive performance.
*/
cond_resched();
spin_lock(&mmlist_lock);
}
spin_unlock(&mmlist_lock);
mmput(prev_mm);
i = 0;
while (swap_usage_in_pages(si) &&
!signal_pending(current) &&
(i = find_next_to_unuse(si, i)) != 0) {
entry = swp_entry(type, i);
folio = swap_cache_get_folio(entry);
if (!folio)
continue;
/*
* It is conceivable that a racing task removed this folio from
* swap cache just before we acquired the page lock. The folio
* might even be back in swap cache on another swap area. But
* that is okay, folio_free_swap() only removes stale folios.
*/
folio_lock(folio);
folio_wait_writeback(folio);
folio_free_swap(folio);
folio_unlock(folio);
folio_put(folio);
}
/*
* Lets check again to see if there are still swap entries in the map.
* If yes, we would need to do retry the unuse logic again.
* Under global memory pressure, swap entries can be reinserted back
* into process space after the mmlist loop above passes over them.
*
* Limit the number of retries? No: when mmget_not_zero()
* above fails, that mm is likely to be freeing swap from
* exit_mmap(), which proceeds at its own independent pace;
* and even shmem_writeout() could have been preempted after
* folio_alloc_swap(), temporarily hiding that swap. It's easy
* and robust (though cpu-intensive) just to keep retrying.
*/
if (swap_usage_in_pages(si)) {
if (!signal_pending(current))
goto retry;
return -EINTR;
}
success:
/*
* Make sure that further cleanups after try_to_unuse() returns happen
* after swap_range_free() reduces si->inuse_pages to 0.
*/
smp_mb();
return 0;
}
/*
* After a successful try_to_unuse, if no swap is now in use, we know
* we can empty the mmlist. swap_lock must be held on entry and exit.
* Note that mmlist_lock nests inside swap_lock, and an mm must be
* added to the mmlist just after page_duplicate - before would be racy.
*/
static void drain_mmlist(void)
{
struct list_head *p, *next;
unsigned int type;
for (type = 0; type < nr_swapfiles; type++)
if (swap_usage_in_pages(swap_info[type]))
return;
spin_lock(&mmlist_lock);
list_for_each_safe(p, next, &init_mm.mmlist)
list_del_init(p);
spin_unlock(&mmlist_lock);
}
/*
* Free all of a swapdev's extent information
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
struct rb_node *rb = sis->swap_extent_root.rb_node;
struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
rb_erase(rb, &sis->swap_extent_root);
kfree(se);
}
if (sis->flags & SWP_ACTIVATED) {
struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
sis->flags &= ~SWP_ACTIVATED;
if (mapping->a_ops->swap_deactivate)
mapping->a_ops->swap_deactivate(swap_file);
}
}
/*
* Add a block range (and the corresponding page range) into this swapdev's
* extent tree.
*
* This function rather assumes that it is called in ascending page order.
*/
int
add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
{
struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
struct swap_extent *se;
struct swap_extent *new_se;
/*
* place the new node at the right most since the
* function is called in ascending page order.
*/
while (*link) {
parent = *link;
link = &parent->rb_right;
}
if (parent) {
se = rb_entry(parent, struct swap_extent, rb_node);
BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
/* Merge it */
se->nr_pages += nr_pages;
return 0;
}
}
/* No merge, insert a new extent. */
new_se = kmalloc(sizeof(*se), GFP_KERNEL);
if (new_se == NULL)
return -ENOMEM;
new_se->start_page = start_page;
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
rb_link_node(&new_se->rb_node, parent, link);
rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
return 1;
}
EXPORT_SYMBOL_GPL(add_swap_extent);
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
* onto a contiguous range of disk blocks. A rbtree of swap extents is
* built at swapon time and is then used at swap_writepage/swap_read_folio
* time for locating where on disk a page belongs.
*
* If the swapfile is an S_ISBLK block device, a single extent is installed.
* This is done so that the main operating code can treat S_ISBLK and S_ISREG
* swap files identically.
*
* Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
* extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
* swapfiles are handled *identically* after swapon time.
*
* For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
* and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray
* blocks are found which do not fall within the PAGE_SIZE alignment
* requirements, they are simply tossed out - we will never use those blocks
* for swapping.
*
* For all swap devices we set S_SWAPFILE across the life of the swapon. This
* prevents users from writing to the swap device, which will corrupt memory.
*
* The amount of disk space which a single swap extent represents varies.
* Typically it is in the 1-4 megabyte range. So we can have hundreds of
* extents in the rbtree. - akpm.
*/
static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
struct inode *inode = mapping->host;
int ret;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
return ret;
}
if (mapping->a_ops->swap_activate) {
ret = mapping->a_ops->swap_activate(sis, swap_file, span);
if (ret < 0)
return ret;
sis->flags |= SWP_ACTIVATED;
if ((sis->flags & SWP_FS_OPS) &&
sio_pool_init() != 0) {
destroy_swap_extents(sis);
return -ENOMEM;
}
return ret;
}
return generic_swapfile_activate(sis, swap_file, span);
}
static int swap_node(struct swap_info_struct *si)
{
struct block_device *bdev;
if (si->bdev)
bdev = si->bdev;
else
bdev = si->swap_file->f_inode->i_sb->s_bdev;
return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
}
static void setup_swap_info(struct swap_info_struct *si, int prio,
unsigned char *swap_map,
struct swap_cluster_info *cluster_info,
unsigned long *zeromap)
{
int i;
if (prio >= 0)
si->prio = prio;
else
si->prio = --least_priority;
/*
* the plist prio is negated because plist ordering is
* low-to-high, while swap ordering is high-to-low
*/
si->list.prio = -si->prio;
for_each_node(i) {
if (si->prio >= 0)
si->avail_lists[i].prio = -si->prio;
else {
if (swap_node(si) == i)
si->avail_lists[i].prio = 1;
else
si->avail_lists[i].prio = -si->prio;
}
}
si->swap_map = swap_map;
si->cluster_info = cluster_info;
si->zeromap = zeromap;
}
static void _enable_swap_info(struct swap_info_struct *si)
{
atomic_long_add(si->pages, &nr_swap_pages);
total_swap_pages += si->pages;
assert_spin_locked(&swap_lock);
/*
* both lists are plists, and thus priority ordered.
* swap_active_head needs to be priority ordered for swapoff(),
* which on removal of any swap_info_struct with an auto-assigned
* (i.e. negative) priority increments the auto-assigned priority
* of any lower-priority swap_info_structs.
* swap_avail_head needs to be priority ordered for folio_alloc_swap(),
* which allocates swap pages from the highest available priority
* swap_info_struct.
*/
plist_add(&si->list, &swap_active_head);
/* Add back to available list */
add_to_avail_list(si, true);
}
static void enable_swap_info(struct swap_info_struct *si, int prio,
unsigned char *swap_map,
struct swap_cluster_info *cluster_info,
unsigned long *zeromap)
{
spin_lock(&swap_lock);
spin_lock(&si->lock);
setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
spin_unlock(&si->lock);
spin_unlock(&swap_lock);
/*
* Finished initializing swap device, now it's safe to reference it.
*/
percpu_ref_resurrect(&si->users);
spin_lock(&swap_lock);
spin_lock(&si->lock);
_enable_swap_info(si);
spin_unlock(&si->lock);
spin_unlock(&swap_lock);
}
static void reinsert_swap_info(struct swap_info_struct *si)
{
spin_lock(&swap_lock);
spin_lock(&si->lock);
setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
_enable_swap_info(si);
spin_unlock(&si->lock);
spin_unlock(&swap_lock);
}
/*
* Called after clearing SWP_WRITEOK, ensures cluster_alloc_range
* see the updated flags, so there will be no more allocations.
*/
static void wait_for_allocation(struct swap_info_struct *si)
{
unsigned long offset;
unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER);
struct swap_cluster_info *ci;
BUG_ON(si->flags & SWP_WRITEOK);
for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) {
ci = swap_cluster_lock(si, offset);
swap_cluster_unlock(ci);
}
}
static void free_cluster_info(struct swap_cluster_info *cluster_info,
unsigned long maxpages)
{
struct swap_cluster_info *ci;
int i, nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
if (!cluster_info)
return;
for (i = 0; i < nr_clusters; i++) {
ci = cluster_info + i;
/* Cluster with bad marks count will have a remaining table */
spin_lock(&ci->lock);
if (rcu_dereference_protected(ci->table, true)) {
ci->count = 0;
swap_cluster_free_table(ci);
}
spin_unlock(&ci->lock);
}
kvfree(cluster_info);
}
/*
* Called after swap device's reference count is dead, so
* neither scan nor allocation will use it.
*/
static void flush_percpu_swap_cluster(struct swap_info_struct *si)
{
int cpu, i;
struct swap_info_struct **pcp_si;
for_each_possible_cpu(cpu) {
pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu);
/*
* Invalidate the percpu swap cluster cache, si->users
* is dead, so no new user will point to it, just flush
* any existing user.
*/
for (i = 0; i < SWAP_NR_ORDERS; i++)
cmpxchg(&pcp_si[i], si, NULL);
}
}
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
unsigned char *swap_map;
unsigned long *zeromap;
struct swap_cluster_info *cluster_info;
struct file *swap_file, *victim;
struct address_space *mapping;
struct inode *inode;
struct filename *pathname;
unsigned int maxpages;
int err, found = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
BUG_ON(!current->mm);
pathname = getname(specialfile);
if (IS_ERR(pathname))
return PTR_ERR(pathname);
victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
err = PTR_ERR(victim);
if (IS_ERR(victim))
goto out;
mapping = victim->f_mapping;
spin_lock(&swap_lock);
plist_for_each_entry(p, &swap_active_head, list) {
if (p->flags & SWP_WRITEOK) {
if (p->swap_file->f_mapping == mapping) {
found = 1;
break;
}
}
}
if (!found) {
err = -EINVAL;
spin_unlock(&swap_lock);
goto out_dput;
}
if (!security_vm_enough_memory_mm(current->mm, p->pages))
vm_unacct_memory(p->pages);
else {
err = -ENOMEM;
spin_unlock(&swap_lock);
goto out_dput;
}
spin_lock(&p->lock);
del_from_avail_list(p, true);
if (p->prio < 0) {
struct swap_info_struct *si = p;
int nid;
plist_for_each_entry_continue(si, &swap_active_head, list) {
si->prio++;
si->list.prio--;
for_each_node(nid) {
if (si->avail_lists[nid].prio != 1)
si->avail_lists[nid].prio--;
}
}
least_priority++;
}
plist_del(&p->list, &swap_active_head);
atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
wait_for_allocation(p);
set_current_oom_origin();
err = try_to_unuse(p->type);
clear_current_oom_origin();
if (err) {
/* re-insert swap space back into swap_list */
reinsert_swap_info(p);
goto out_dput;
}
/*
* Wait for swap operations protected by get/put_swap_device()
* to complete. Because of synchronize_rcu() here, all swap
* operations protected by RCU reader side lock (including any
* spinlock) will be waited too. This makes it easy to
* prevent folio_test_swapcache() and the following swap cache
* operations from racing with swapoff.
*/
percpu_ref_kill(&p->users);
synchronize_rcu();
wait_for_completion(&p->comp);
flush_work(&p->discard_work);
flush_work(&p->reclaim_work);
flush_percpu_swap_cluster(p);
destroy_swap_extents(p);
if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p);
if (!p->bdev || !bdev_nonrot(p->bdev))
atomic_dec(&nr_rotate_swap);
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
spin_lock(&p->lock);
drain_mmlist();
swap_file = p->swap_file;
p->swap_file = NULL;
swap_map = p->swap_map;
p->swap_map = NULL;
zeromap = p->zeromap;
p->zeromap = NULL;
maxpages = p->max;
cluster_info = p->cluster_info;
p->max = 0;
p->cluster_info = NULL;
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
arch_swap_invalidate_area(p->type);
zswap_swapoff(p->type);
mutex_unlock(&swapon_mutex);
kfree(p->global_cluster);
p->global_cluster = NULL;
vfree(swap_map);
kvfree(zeromap);
free_cluster_info(cluster_info, maxpages);
/* Destroy swap account information */
swap_cgroup_swapoff(p->type);
inode = mapping->host;
inode_lock(inode);
inode->i_flags &= ~S_SWAPFILE;
inode_unlock(inode);
filp_close(swap_file, NULL);
/*
* Clear the SWP_USED flag after all resources are freed so that swapon
* can reuse this swap_info in alloc_swap_info() safely. It is ok to
* not hold p->lock after we cleared its SWP_WRITEOK.
*/
spin_lock(&swap_lock);
p->flags = 0;
spin_unlock(&swap_lock);
err = 0;
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
out_dput:
filp_close(victim, NULL);
out:
putname(pathname);
return err;
}
#ifdef CONFIG_PROC_FS
static __poll_t swaps_poll(struct file *file, poll_table *wait)
{
struct seq_file *seq = file->private_data;
poll_wait(file, &proc_poll_wait, wait);
if (seq->poll_event != atomic_read(&proc_poll_event)) {
seq->poll_event = atomic_read(&proc_poll_event);
return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
}
return EPOLLIN | EPOLLRDNORM;
}
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
struct swap_info_struct *si;
int type;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
if (!l)
return SEQ_START_TOKEN;
for (type = 0; (si = swap_type_to_info(type)); type++) {
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
return si;
}
return NULL;
}
static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
struct swap_info_struct *si = v;
int type;
if (v == SEQ_START_TOKEN)
type = 0;
else
type = si->type + 1;
++(*pos);
for (; (si = swap_type_to_info(type)); type++) {
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
return si;
}
return NULL;
}
static void swap_stop(struct seq_file *swap, void *v)
{
mutex_unlock(&swapon_mutex);
}
static int swap_show(struct seq_file *swap, void *v)
{
struct swap_info_struct *si = v;
struct file *file;
int len;
unsigned long bytes, inuse;
if (si == SEQ_START_TOKEN) {
seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
return 0;
}
bytes = K(si->pages);
inuse = K(swap_usage_in_pages(si));
file = si->swap_file;
len = seq_file_path(swap, file, " \t\n\\");
seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file_inode(file)->i_mode) ?
"partition" : "file\t",
bytes, bytes < 10000000 ? "\t" : "",
inuse, inuse < 10000000 ? "\t" : "",
si->prio);
return 0;
}
static const struct seq_operations swaps_op = {
.start = swap_start,
.next = swap_next,
.stop = swap_stop,
.show = swap_show
};
static int swaps_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int ret;
ret = seq_open(file, &swaps_op);
if (ret)
return ret;
seq = file->private_data;
seq->poll_event = atomic_read(&proc_poll_event);
return 0;
}
static const struct proc_ops swaps_proc_ops = {
.proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = swaps_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
.proc_poll = swaps_poll,
};
static int __init procswaps_init(void)
{
proc_create("swaps", 0, NULL, &swaps_proc_ops);
return 0;
}
__initcall(procswaps_init);
#endif /* CONFIG_PROC_FS */
#ifdef MAX_SWAPFILES_CHECK
static int __init max_swapfiles_check(void)
{
MAX_SWAPFILES_CHECK();
return 0;
}
late_initcall(max_swapfiles_check);
#endif
static struct swap_info_struct *alloc_swap_info(void)
{
struct swap_info_struct *p;
struct swap_info_struct *defer = NULL;
unsigned int type;
int i;
p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
if (percpu_ref_init(&p->users, swap_users_ref_free,
PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
kvfree(p);
return ERR_PTR(-ENOMEM);
}
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
if (!(swap_info[type]->flags & SWP_USED))
break;
}
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
percpu_ref_exit(&p->users);
kvfree(p);
return ERR_PTR(-EPERM);
}
if (type >= nr_swapfiles) {
p->type = type;
/*
* Publish the swap_info_struct after initializing it.
* Note that kvzalloc() above zeroes all its fields.
*/
smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
nr_swapfiles++;
} else {
defer = p;
p = swap_info[type];
/*
* Do not memset this entry: a racing procfs swap_next()
* would be relying on p->type to remain valid.
*/
}
p->swap_extent_root = RB_ROOT;
plist_node_init(&p->list, 0);
for_each_node(i)
plist_node_init(&p->avail_lists[i], 0);
p->flags = SWP_USED;
spin_unlock(&swap_lock);
if (defer) {
percpu_ref_exit(&defer->users);
kvfree(defer);
}
spin_lock_init(&p->lock);
spin_lock_init(&p->cont_lock);
atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT);
init_completion(&p->comp);
return p;
}
static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
{
if (S_ISBLK(inode->i_mode)) {
si->bdev = I_BDEV(inode);
/*
* Zoned block devices contain zones that have a sequential
* write only restriction. Hence zoned block devices are not
* suitable for swapping. Disallow them here.
*/
if (bdev_is_zoned(si->bdev))
return -EINVAL;
si->flags |= SWP_BLKDEV;
} else if (S_ISREG(inode->i_mode)) {
si->bdev = inode->i_sb->s_bdev;
}
return 0;
}
/*
* Find out how many pages are allowed for a single swap device. There
* are two limiting factors:
* 1) the number of bits for the swap offset in the swp_entry_t type, and
* 2) the number of bits in the swap pte, as defined by the different
* architectures.
*
* In order to find the largest possible bit mask, a swap entry with
* swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
* decoded to a swp_entry_t again, and finally the swap offset is
* extracted.
*
* This will mask all the bits from the initial ~0UL mask that can't
* be encoded in either the swp_entry_t or the architecture definition
* of a swap pte.
*/
unsigned long generic_max_swapfile_size(void)
{
return swp_offset(pte_to_swp_entry(
swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
}
/* Can be overridden by an architecture for additional checks. */
__weak unsigned long arch_max_swapfile_size(void)
{
return generic_max_swapfile_size();
}
static unsigned long read_swap_header(struct swap_info_struct *si,
union swap_header *swap_header,
struct inode *inode)
{
int i;
unsigned long maxpages;
unsigned long swapfilepages;
unsigned long last_page;
if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
pr_err("Unable to find swap-space signature\n");
return 0;
}
/* swap partition endianness hack... */
if (swab32(swap_header->info.version) == 1) {
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
return 0;
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
/* Check the swap header's sub-version */
if (swap_header->info.version != 1) {
pr_warn("Unable to handle swap header version %d\n",
swap_header->info.version);
return 0;
}
maxpages = swapfile_maximum_size;
last_page = swap_header->info.last_page;
if (!last_page) {
pr_warn("Empty swap-file\n");
return 0;
}
if (last_page > maxpages) {
pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
K(maxpages), K(last_page));
}
if (maxpages > last_page) {
maxpages = last_page + 1;
/* p->max is an unsigned int: don't overflow it */
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
}
if (!maxpages)
return 0;
swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
if (swapfilepages && maxpages > swapfilepages) {
pr_warn("Swap area shorter than signature indicates\n");
return 0;
}
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
return 0;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
return 0;
return maxpages;
}
static int setup_swap_map(struct swap_info_struct *si,
union swap_header *swap_header,
unsigned char *swap_map,
unsigned long maxpages)
{
unsigned long i;
swap_map[0] = SWAP_MAP_BAD; /* omit header page */
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
return -EINVAL;
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
si->pages--;
}
}
if (!si->pages) {
pr_warn("Empty swap-file\n");
return -EINVAL;
}
return 0;
}
static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
union swap_header *swap_header,
unsigned long maxpages)
{
unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
struct swap_cluster_info *cluster_info;
int err = -ENOMEM;
unsigned long i;
cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
if (!cluster_info)
goto err;
for (i = 0; i < nr_clusters; i++)
spin_lock_init(&cluster_info[i].lock);
if (!(si->flags & SWP_SOLIDSTATE)) {
si->global_cluster = kmalloc(sizeof(*si->global_cluster),
GFP_KERNEL);
if (!si->global_cluster)
goto err_free;
for (i = 0; i < SWAP_NR_ORDERS; i++)
si->global_cluster->next[i] = SWAP_ENTRY_INVALID;
spin_lock_init(&si->global_cluster_lock);
}
/*
* Mark unusable pages as unavailable. The clusters aren't
* marked free yet, so no list operations are involved yet.
*
* See setup_swap_map(): header page, bad pages,
* and the EOF part of the last cluster.
*/
err = inc_cluster_info_page(si, cluster_info, 0);
if (err)
goto err;
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr >= maxpages)
continue;
err = inc_cluster_info_page(si, cluster_info, page_nr);
if (err)
goto err;
}
for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) {
err = inc_cluster_info_page(si, cluster_info, i);
if (err)
goto err;
}
INIT_LIST_HEAD(&si->free_clusters);
INIT_LIST_HEAD(&si->full_clusters);
INIT_LIST_HEAD(&si->discard_clusters);
for (i = 0; i < SWAP_NR_ORDERS; i++) {
INIT_LIST_HEAD(&si->nonfull_clusters[i]);
INIT_LIST_HEAD(&si->frag_clusters[i]);
}
for (i = 0; i < nr_clusters; i++) {
struct swap_cluster_info *ci = &cluster_info[i];
if (ci->count) {
ci->flags = CLUSTER_FLAG_NONFULL;
list_add_tail(&ci->list, &si->nonfull_clusters[0]);
} else {
ci->flags = CLUSTER_FLAG_FREE;
list_add_tail(&ci->list, &si->free_clusters);
}
}
return cluster_info;
err_free:
free_cluster_info(cluster_info, maxpages);
err:
return ERR_PTR(err);
}
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
struct swap_info_struct *si;
struct filename *name;
struct file *swap_file = NULL;
struct address_space *mapping;
struct dentry *dentry;
int prio;
int error;
union swap_header *swap_header;
int nr_extents;
sector_t span;
unsigned long maxpages;
unsigned char *swap_map = NULL;
unsigned long *zeromap = NULL;
struct swap_cluster_info *cluster_info = NULL;
struct folio *folio = NULL;
struct inode *inode = NULL;
bool inced_nr_rotate_swap = false;
if (swap_flags & ~SWAP_FLAGS_VALID)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!swap_avail_heads)
return -ENOMEM;
si = alloc_swap_info();
if (IS_ERR(si))
return PTR_ERR(si);
INIT_WORK(&si->discard_work, swap_discard_work);
INIT_WORK(&si->reclaim_work, swap_reclaim_work);
name = getname(specialfile);
if (IS_ERR(name)) {
error = PTR_ERR(name);
name = NULL;
goto bad_swap;
}
swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
if (IS_ERR(swap_file)) {
error = PTR_ERR(swap_file);
swap_file = NULL;
goto bad_swap;
}
si->swap_file = swap_file;
mapping = swap_file->f_mapping;
dentry = swap_file->f_path.dentry;
inode = mapping->host;
error = claim_swapfile(si, inode);
if (unlikely(error))
goto bad_swap;
inode_lock(inode);
if (d_unlinked(dentry) || cant_mount(dentry)) {
error = -ENOENT;
goto bad_swap_unlock_inode;
}
if (IS_SWAPFILE(inode)) {
error = -EBUSY;
goto bad_swap_unlock_inode;
}
/*
* The swap subsystem needs a major overhaul to support this.
* It doesn't work yet so just disable it for now.
*/
if (mapping_min_folio_order(mapping) > 0) {
error = -EINVAL;
goto bad_swap_unlock_inode;
}
/*
* Read the swap header.
*/
if (!mapping->a_ops->read_folio) {
error = -EINVAL;
goto bad_swap_unlock_inode;
}
folio = read_mapping_folio(mapping, 0, swap_file);
if (IS_ERR(folio)) {
error = PTR_ERR(folio);
goto bad_swap_unlock_inode;
}
swap_header = kmap_local_folio(folio, 0);
maxpages = read_swap_header(si, swap_header, inode);
if (unlikely(!maxpages)) {
error = -EINVAL;
goto bad_swap_unlock_inode;
}
si->max = maxpages;
si->pages = maxpages - 1;
nr_extents = setup_swap_extents(si, &span);
if (nr_extents < 0) {
error = nr_extents;
goto bad_swap_unlock_inode;
}
if (si->pages != si->max - 1) {
pr_err("swap:%u != (max:%u - 1)\n", si->pages, si->max);
error = -EINVAL;
goto bad_swap_unlock_inode;
}
maxpages = si->max;
/* OK, set up the swap map and apply the bad block list */
swap_map = vzalloc(maxpages);
if (!swap_map) {
error = -ENOMEM;
goto bad_swap_unlock_inode;
}
error = swap_cgroup_swapon(si->type, maxpages);
if (error)
goto bad_swap_unlock_inode;
error = setup_swap_map(si, swap_header, swap_map, maxpages);
if (error)
goto bad_swap_unlock_inode;
/*
* Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
* be above MAX_PAGE_ORDER incase of a large swap file.
*/
zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
GFP_KERNEL | __GFP_ZERO);
if (!zeromap) {
error = -ENOMEM;
goto bad_swap_unlock_inode;
}
if (si->bdev && bdev_stable_writes(si->bdev))
si->flags |= SWP_STABLE_WRITES;
if (si->bdev && bdev_synchronous(si->bdev))
si->flags |= SWP_SYNCHRONOUS_IO;
if (si->bdev && bdev_nonrot(si->bdev)) {
si->flags |= SWP_SOLIDSTATE;
} else {
atomic_inc(&nr_rotate_swap);
inced_nr_rotate_swap = true;
}
cluster_info = setup_clusters(si, swap_header, maxpages);
if (IS_ERR(cluster_info)) {
error = PTR_ERR(cluster_info);
cluster_info = NULL;
goto bad_swap_unlock_inode;
}
if ((swap_flags & SWAP_FLAG_DISCARD) &&
si->bdev && bdev_max_discard_sectors(si->bdev)) {
/*
* When discard is enabled for swap with no particular
* policy flagged, we set all swap discard flags here in
* order to sustain backward compatibility with older
* swapon(8) releases.
*/
si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
SWP_PAGE_DISCARD);
/*
* By flagging sys_swapon, a sysadmin can tell us to
* either do single-time area discards only, or to just
* perform discards for released swap page-clusters.
* Now it's time to adjust the p->flags accordingly.
*/
if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
si->flags &= ~SWP_PAGE_DISCARD;
else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
si->flags &= ~SWP_AREA_DISCARD;
/* issue a swapon-time discard if it's still required */
if (si->flags & SWP_AREA_DISCARD) {
int err = discard_swap(si);
if (unlikely(err))
pr_err("swapon: discard_swap(%p): %d\n",
si, err);
}
}
error = zswap_swapon(si->type, maxpages);
if (error)
goto bad_swap_unlock_inode;
/*
* Flush any pending IO and dirty mappings before we start using this
* swap device.
*/
inode->i_flags |= S_SWAPFILE;
error = inode_drain_writes(inode);
if (error) {
inode->i_flags &= ~S_SWAPFILE;
goto free_swap_zswap;
}
mutex_lock(&swapon_mutex);
prio = -1;
if (swap_flags & SWAP_FLAG_PREFER)
prio = swap_flags & SWAP_FLAG_PRIO_MASK;
enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
K(si->pages), name->name, si->prio, nr_extents,
K((unsigned long long)span),
(si->flags & SWP_SOLIDSTATE) ? "SS" : "",
(si->flags & SWP_DISCARDABLE) ? "D" : "",
(si->flags & SWP_AREA_DISCARD) ? "s" : "",
(si->flags & SWP_PAGE_DISCARD) ? "c" : "");
mutex_unlock(&swapon_mutex);
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
error = 0;
goto out;
free_swap_zswap:
zswap_swapoff(si->type);
bad_swap_unlock_inode:
inode_unlock(inode);
bad_swap:
kfree(si->global_cluster);
si->global_cluster = NULL;
inode = NULL;
destroy_swap_extents(si);
swap_cgroup_swapoff(si->type);
spin_lock(&swap_lock);
si->swap_file = NULL;
si->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
kvfree(zeromap);
if (cluster_info)
free_cluster_info(cluster_info, maxpages);
if (inced_nr_rotate_swap)
atomic_dec(&nr_rotate_swap);
if (swap_file)
filp_close(swap_file, NULL);
out:
if (!IS_ERR_OR_NULL(folio))
folio_release_kmap(folio, swap_header);
if (name)
putname(name);
if (inode)
inode_unlock(inode);
return error;
}
void si_swapinfo(struct sysinfo *val)
{
unsigned int type;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *si = swap_info[type];
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
nr_to_be_unused += swap_usage_in_pages(si);
}
val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
/*
* Verify that nr swap entries are valid and increment their swap map counts.
*
* Returns error code in following case.
* - success -> 0
* - swp_entry is invalid -> EINVAL
* - swap-cache reference is requested but there is already one. -> EEXIST
* - swap-cache reference is requested but the entry is not used. -> ENOENT
* - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
{
struct swap_info_struct *si;
struct swap_cluster_info *ci;
unsigned long offset;
unsigned char count;
unsigned char has_cache;
int err, i;
si = swap_entry_to_info(entry);
if (WARN_ON_ONCE(!si)) {
pr_err("%s%08lx\n", Bad_file, entry.val);
return -EINVAL;
}
offset = swp_offset(entry);
VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
VM_WARN_ON(usage == 1 && nr > 1);
ci = swap_cluster_lock(si, offset);
err = 0;
for (i = 0; i < nr; i++) {
count = si->swap_map[offset + i];
/*
* swapin_readahead() doesn't check if a swap entry is valid, so the
* swap entry could be SWAP_MAP_BAD. Check here with lock held.
*/
if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
err = -ENOENT;
goto unlock_out;
}
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
if (!count && !has_cache) {
err = -ENOENT;
} else if (usage == SWAP_HAS_CACHE) {
if (has_cache)
err = -EEXIST;
} else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
err = -EINVAL;
}
if (err)
goto unlock_out;
}
for (i = 0; i < nr; i++) {
count = si->swap_map[offset + i];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
if (usage == SWAP_HAS_CACHE)
has_cache = SWAP_HAS_CACHE;
else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
count += usage;
else if (swap_count_continued(si, offset + i, count))
count = COUNT_CONTINUED;
else {
/*
* Don't need to rollback changes, because if
* usage == 1, there must be nr == 1.
*/
err = -ENOMEM;
goto unlock_out;
}
WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
}
unlock_out:
swap_cluster_unlock(ci);
return err;
}
/*
* Help swapoff by noting that swap entry belongs to shmem/tmpfs
* (in which case its reference count is never incremented).
*/
void swap_shmem_alloc(swp_entry_t entry, int nr)
{
__swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
}
/*
* Increase reference count of swap entry by 1.
* Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
* but could not be atomically allocated. Returns 0, just as if it succeeded,
* if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
* might occur if a page table entry has got corrupted.
*/
int swap_duplicate(swp_entry_t entry)
{
int err = 0;
while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
err = add_swap_count_continuation(entry, GFP_ATOMIC);
return err;
}
/*
* @entry: first swap entry from which we allocate nr swap cache.
*
* Called when allocating swap cache for existing swap entries,
* This can return error codes. Returns 0 at success.
* -EEXIST means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
int swapcache_prepare(swp_entry_t entry, int nr)
{
return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
}
/*
* Caller should ensure entries belong to the same folio so
* the entries won't span cross cluster boundary.
*/
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
{
swap_entries_put_cache(si, entry, nr);
}
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
* page of the original vmalloc'ed swap_map, to hold the continuation count
* (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
* again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
*
* These continuation pages are seldom referenced: the common paths all work
* on the original swap_map, only referring to a continuation page when the
* low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
*
* add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
* page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
* can be called after dropping locks.
*/
int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
{
struct swap_info_struct *si;
struct swap_cluster_info *ci;
struct page *head;
struct page *page;
struct page *list_page;
pgoff_t offset;
unsigned char count;
int ret = 0;
/*
* When debugging, it's easier to use __GFP_ZERO here; but it's better
* for latency not to zero a page while GFP_ATOMIC and holding locks.
*/
page = alloc_page(gfp_mask | __GFP_HIGHMEM);
si = get_swap_device(entry);
if (!si) {
/*
* An acceptable race has occurred since the failing
* __swap_duplicate(): the swap device may be swapoff
*/
goto outer;
}
offset = swp_offset(entry);
ci = swap_cluster_lock(si, offset);
count = swap_count(si->swap_map[offset]);
if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
/*
* The higher the swap count, the more likely it is that tasks
* will race to add swap count continuation: we need to avoid
* over-provisioning.
*/
goto out;
}
if (!page) {
ret = -ENOMEM;
goto out;
}
head = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
spin_lock(&si->cont_lock);
/*
* Page allocation does not initialize the page's lru field,
* but it does always reset its private field.
*/
if (!page_private(head)) {
BUG_ON(count & COUNT_CONTINUED);
INIT_LIST_HEAD(&head->lru);
set_page_private(head, SWP_CONTINUED);
si->flags |= SWP_CONTINUED;
}
list_for_each_entry(list_page, &head->lru, lru) {
unsigned char *map;
/*
* If the previous map said no continuation, but we've found
* a continuation page, free our allocation and use this one.
*/
if (!(count & COUNT_CONTINUED))
goto out_unlock_cont;
map = kmap_local_page(list_page) + offset;
count = *map;
kunmap_local(map);
/*
* If this continuation count now has some space in it,
* free our allocation and use this one.
*/
if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
goto out_unlock_cont;
}
list_add_tail(&page->lru, &head->lru);
page = NULL; /* now it's attached, don't free it */
out_unlock_cont:
spin_unlock(&si->cont_lock);
out:
swap_cluster_unlock(ci);
put_swap_device(si);
outer:
if (page)
__free_page(page);
return ret;
}
/*
* swap_count_continued - when the original swap_map count is incremented
* from SWAP_MAP_MAX, check if there is already a continuation page to carry
* into, carry if so, or else fail until a new continuation page is allocated;
* when the original swap_map count is decremented from 0 with continuation,
* borrow from the continuation and report whether it still holds more.
* Called while __swap_duplicate() or caller of swap_entry_put_locked()
* holds cluster lock.
*/
static bool swap_count_continued(struct swap_info_struct *si,
pgoff_t offset, unsigned char count)
{
struct page *head;
struct page *page;
unsigned char *map;
bool ret;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head) != SWP_CONTINUED) {
BUG_ON(count & COUNT_CONTINUED);
return false; /* need to add count continuation */
}
spin_lock(&si->cont_lock);
offset &= ~PAGE_MASK;
page = list_next_entry(head, lru);
map = kmap_local_page(page) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
/*
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
kunmap_local(map);
page = list_next_entry(page, lru);
BUG_ON(page == head);
map = kmap_local_page(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
kunmap_local(map);
page = list_next_entry(page, lru);
if (page == head) {
ret = false; /* add count continuation */
goto out;
}
map = kmap_local_page(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
kunmap_local(map);
while ((page = list_prev_entry(page, lru)) != head) {
map = kmap_local_page(page) + offset;
*map = COUNT_CONTINUED;
kunmap_local(map);
}
ret = true; /* incremented */
} else { /* decrementing */
/*
* Think of how you subtract 1 from 1000
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
kunmap_local(map);
page = list_next_entry(page, lru);
BUG_ON(page == head);
map = kmap_local_page(page) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
kunmap_local(map);
while ((page = list_prev_entry(page, lru)) != head) {
map = kmap_local_page(page) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
kunmap_local(map);
}
ret = count == COUNT_CONTINUED;
}
out:
spin_unlock(&si->cont_lock);
return ret;
}
/*
* free_swap_count_continuations - swapoff free all the continuation pages
* appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
*/
static void free_swap_count_continuations(struct swap_info_struct *si)
{
pgoff_t offset;
for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
struct page *head;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head)) {
struct page *page, *next;
list_for_each_entry_safe(page, next, &head->lru, lru) {
list_del(&page->lru);
__free_page(page);
}
}
}
}
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
static bool __has_usable_swap(void)
{
return !plist_head_empty(&swap_active_head);
}
void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
{
struct swap_info_struct *si, *next;
int nid = folio_nid(folio);
if (!(gfp & __GFP_IO))
return;
if (!__has_usable_swap())
return;
if (!blk_cgroup_congested())
return;
/*
* We've already scheduled a throttle, avoid taking the global swap
* lock.
*/
if (current->throttle_disk)
return;
spin_lock(&swap_avail_lock);
plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
avail_lists[nid]) {
if (si->bdev) {
blkcg_schedule_throttle(si->bdev->bd_disk, true);
break;
}
}
spin_unlock(&swap_avail_lock);}
#endif
static int __init swapfile_init(void)
{
int nid;
swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
GFP_KERNEL);
if (!swap_avail_heads) {
pr_emerg("Not enough memory for swap heads, swap is disabled\n");
return -ENOMEM;
}
for_each_node(nid)
plist_head_init(&swap_avail_heads[nid]);
swapfile_maximum_size = arch_max_swapfile_size();
/*
* Once a cluster is freed, it's swap table content is read
* only, and all swap cache readers (swap_cache_*) verifies
* the content before use. So it's safe to use RCU slab here.
*/
if (!SWP_TABLE_USE_PAGE)
swap_table_cachep = kmem_cache_create("swap_table",
sizeof(struct swap_table),
0, SLAB_PANIC | SLAB_TYPESAFE_BY_RCU, NULL);
#ifdef CONFIG_MIGRATION
if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
swap_migration_ad_supported = true;
#endif /* CONFIG_MIGRATION */
return 0;
}
subsys_initcall(swapfile_init);
// SPDX-License-Identifier: GPL-2.0+
/*
* Maple Tree implementation
* Copyright (c) 2018-2022 Oracle Corporation
* Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
* Matthew Wilcox <willy@infradead.org>
* Copyright (c) 2023 ByteDance
* Author: Peng Zhang <zhangpeng.00@bytedance.com>
*/
/*
* DOC: Interesting implementation details of the Maple Tree
*
* Each node type has a number of slots for entries and a number of slots for
* pivots. In the case of dense nodes, the pivots are implied by the position
* and are simply the slot index + the minimum of the node.
*
* In regular B-Tree terms, pivots are called keys. The term pivot is used to
* indicate that the tree is specifying ranges. Pivots may appear in the
* subtree with an entry attached to the value whereas keys are unique to a
* specific position of a B-tree. Pivot values are inclusive of the slot with
* the same index.
*
*
* The following illustrates the layout of a range64 nodes slots and pivots.
*
*
* Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
* ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
* │ │ │ │ │ │ │ │ └─ Implied maximum
* │ │ │ │ │ │ │ └─ Pivot 14
* │ │ │ │ │ │ └─ Pivot 13
* │ │ │ │ │ └─ Pivot 12
* │ │ │ │ └─ Pivot 11
* │ │ │ └─ Pivot 2
* │ │ └─ Pivot 1
* │ └─ Pivot 0
* └─ Implied minimum
*
* Slot contents:
* Internal (non-leaf) nodes contain pointers to other nodes.
* Leaf nodes contain entries.
*
* The location of interest is often referred to as an offset. All offsets have
* a slot, but the last offset has an implied pivot from the node above (or
* UINT_MAX for the root node.
*
* Ranges complicate certain write activities. When modifying any of
* the B-tree variants, it is known that one entry will either be added or
* deleted. When modifying the Maple Tree, one store operation may overwrite
* the entire data set, or one half of the tree, or the middle half of the tree.
*
*/
#include <linux/maple_tree.h>
#include <linux/xarray.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/limits.h>
#include <asm/barrier.h>
#define CREATE_TRACE_POINTS
#include <trace/events/maple_tree.h>
#define TP_FCT tracepoint_string(__func__)
/*
* Kernel pointer hashing renders much of the maple tree dump useless as tagged
* pointers get hashed to arbitrary values.
*
* If CONFIG_DEBUG_VM_MAPLE_TREE is set we are in a debug mode where it is
* permissible to bypass this. Otherwise remain cautious and retain the hashing.
*
* Userland doesn't know about %px so also use %p there.
*/
#if defined(__KERNEL__) && defined(CONFIG_DEBUG_VM_MAPLE_TREE)
#define PTR_FMT "%px"
#else
#define PTR_FMT "%p"
#endif
#define MA_ROOT_PARENT 1
/*
* Maple state flags
* * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
*/
#define MA_STATE_PREALLOC 1
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
#define ma_enode_ptr(x) ((struct maple_enode *)(x))
static struct kmem_cache *maple_node_cache;
#ifdef CONFIG_DEBUG_MAPLE_TREE
static const unsigned long mt_max[] = {
[maple_dense] = MAPLE_NODE_SLOTS,
[maple_leaf_64] = ULONG_MAX,
[maple_range_64] = ULONG_MAX,
[maple_arange_64] = ULONG_MAX,
};
#define mt_node_max(x) mt_max[mte_node_type(x)]
#endif
static const unsigned char mt_slots[] = {
[maple_dense] = MAPLE_NODE_SLOTS,
[maple_leaf_64] = MAPLE_RANGE64_SLOTS,
[maple_range_64] = MAPLE_RANGE64_SLOTS,
[maple_arange_64] = MAPLE_ARANGE64_SLOTS,
};
#define mt_slot_count(x) mt_slots[mte_node_type(x)]
static const unsigned char mt_pivots[] = {
[maple_dense] = 0,
[maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
[maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
[maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
};
#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
static const unsigned char mt_min_slots[] = {
[maple_dense] = MAPLE_NODE_SLOTS / 2,
[maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
[maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
[maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
};
#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
struct maple_big_node {
unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
union {
struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
struct {
unsigned long padding[MAPLE_BIG_NODE_GAPS];
unsigned long gap[MAPLE_BIG_NODE_GAPS];
};
};
unsigned char b_end;
enum maple_type type;
};
/*
* The maple_subtree_state is used to build a tree to replace a segment of an
* existing tree in a more atomic way. Any walkers of the older tree will hit a
* dead node and restart on updates.
*/
struct maple_subtree_state {
struct ma_state *orig_l; /* Original left side of subtree */
struct ma_state *orig_r; /* Original right side of subtree */
struct ma_state *l; /* New left side of subtree */
struct ma_state *m; /* New middle of subtree (rare) */
struct ma_state *r; /* New right side of subtree */
struct ma_topiary *free; /* nodes to be freed */
struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
struct maple_big_node *bn;
};
#ifdef CONFIG_KASAN_STACK
/* Prevent mas_wr_bnode() from exceeding the stack frame limit */
#define noinline_for_kasan noinline_for_stack
#else
#define noinline_for_kasan inline
#endif
/* Functions */
static inline struct maple_node *mt_alloc_one(gfp_t gfp)
{
return kmem_cache_alloc(maple_node_cache, gfp);
}
static inline void mt_free_bulk(size_t size, void __rcu **nodes)
{
kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
}
static void mt_return_sheaf(struct slab_sheaf *sheaf)
{
kmem_cache_return_sheaf(maple_node_cache, GFP_NOWAIT, sheaf);
}
static struct slab_sheaf *mt_get_sheaf(gfp_t gfp, int count)
{
return kmem_cache_prefill_sheaf(maple_node_cache, gfp, count);
}
static int mt_refill_sheaf(gfp_t gfp, struct slab_sheaf **sheaf,
unsigned int size)
{
return kmem_cache_refill_sheaf(maple_node_cache, gfp, sheaf, size);
}
/*
* ma_free_rcu() - Use rcu callback to free a maple node
* @node: The node to free
*
* The maple tree uses the parent pointer to indicate this node is no longer in
* use and will be freed.
*/
static void ma_free_rcu(struct maple_node *node)
{
WARN_ON(node->parent != ma_parent_ptr(node));
kfree_rcu(node, rcu);
}
static void mt_set_height(struct maple_tree *mt, unsigned char height)
{
unsigned int new_flags = mt->ma_flags;
new_flags &= ~MT_FLAGS_HEIGHT_MASK;
MT_BUG_ON(mt, height > MAPLE_HEIGHT_MAX);
new_flags |= height << MT_FLAGS_HEIGHT_OFFSET;
mt->ma_flags = new_flags;
}
static unsigned int mas_mt_height(struct ma_state *mas)
{
return mt_height(mas->tree);
}
static inline unsigned int mt_attr(struct maple_tree *mt)
{
return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
}
static __always_inline enum maple_type mte_node_type(
const struct maple_enode *entry)
{
return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
MAPLE_NODE_TYPE_MASK;
}
static __always_inline bool ma_is_dense(const enum maple_type type)
{
return type < maple_leaf_64;
}
static __always_inline bool ma_is_leaf(const enum maple_type type)
{
return type < maple_range_64;
}
static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
{
return ma_is_leaf(mte_node_type(entry));
}
/*
* We also reserve values with the bottom two bits set to '10' which are
* below 4096
*/
static __always_inline bool mt_is_reserved(const void *entry)
{
return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
xa_is_internal(entry);
}
static __always_inline void mas_set_err(struct ma_state *mas, long err)
{
mas->node = MA_ERROR(err);
mas->status = ma_error;
}
static __always_inline bool mas_is_ptr(const struct ma_state *mas)
{
return mas->status == ma_root;
}
static __always_inline bool mas_is_start(const struct ma_state *mas)
{
return mas->status == ma_start;
}
static __always_inline bool mas_is_none(const struct ma_state *mas)
{
return mas->status == ma_none;
}
static __always_inline bool mas_is_paused(const struct ma_state *mas)
{
return mas->status == ma_pause;
}
static __always_inline bool mas_is_overflow(struct ma_state *mas)
{
return mas->status == ma_overflow;
}
static inline bool mas_is_underflow(struct ma_state *mas)
{
return mas->status == ma_underflow;
}
static __always_inline struct maple_node *mte_to_node(
const struct maple_enode *entry)
{
return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
}
/*
* mte_to_mat() - Convert a maple encoded node to a maple topiary node.
* @entry: The maple encoded node
*
* Return: a maple topiary pointer
*/
static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
{
return (struct maple_topiary *)
((unsigned long)entry & ~MAPLE_NODE_MASK);
}
/*
* mas_mn() - Get the maple state node.
* @mas: The maple state
*
* Return: the maple node (not encoded - bare pointer).
*/
static inline struct maple_node *mas_mn(const struct ma_state *mas)
{
return mte_to_node(mas->node);
}
/*
* mte_set_node_dead() - Set a maple encoded node as dead.
* @mn: The maple encoded node.
*/
static inline void mte_set_node_dead(struct maple_enode *mn)
{
mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
smp_wmb(); /* Needed for RCU */
}
/* Bit 1 indicates the root is a node */
#define MAPLE_ROOT_NODE 0x02
/* maple_type stored bit 3-6 */
#define MAPLE_ENODE_TYPE_SHIFT 0x03
/* Bit 2 means a NULL somewhere below */
#define MAPLE_ENODE_NULL 0x04
static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
enum maple_type type)
{
return (void *)((unsigned long)node | (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
}
static inline void *mte_mk_root(const struct maple_enode *node)
{
return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
}
static inline void *mte_safe_root(const struct maple_enode *node)
{
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
}
static inline void __maybe_unused *mte_set_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
}
static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
}
static inline bool __maybe_unused mte_has_null(const struct maple_enode *node)
{
return (unsigned long)node & MAPLE_ENODE_NULL;
}
static __always_inline bool ma_is_root(struct maple_node *node)
{
return ((unsigned long)node->parent & MA_ROOT_PARENT);
}
static __always_inline bool mte_is_root(const struct maple_enode *node)
{
return ma_is_root(mte_to_node(node));
}
static inline bool mas_is_root_limits(const struct ma_state *mas)
{
return !mas->min && mas->max == ULONG_MAX;
}
static __always_inline bool mt_is_alloc(struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
}
/*
* The Parent Pointer
* Excluding root, the parent pointer is 256B aligned like all other tree nodes.
* When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
* bit values need an extra bit to store the offset. This extra bit comes from
* a reuse of the last bit in the node type. This is possible by using bit 1 to
* indicate if bit 2 is part of the type or the slot.
*
* Node types:
* 0b??1 = Root
* 0b?00 = 16 bit nodes
* 0b010 = 32 bit nodes
* 0b110 = 64 bit nodes
*
* Slot size and alignment
* 0b??1 : Root
* 0b?00 : 16 bit values, type in 0-1, slot in 2-7
* 0b010 : 32 bit values, type in 0-2, slot in 3-7
* 0b110 : 64 bit values, type in 0-2, slot in 3-7
*/
#define MAPLE_PARENT_ROOT 0x01
#define MAPLE_PARENT_SLOT_SHIFT 0x03
#define MAPLE_PARENT_SLOT_MASK 0xF8
#define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
#define MAPLE_PARENT_16B_SLOT_MASK 0xFC
#define MAPLE_PARENT_RANGE64 0x06
#define MAPLE_PARENT_RANGE32 0x02
#define MAPLE_PARENT_NOT_RANGE16 0x02
/*
* mte_parent_shift() - Get the parent shift for the slot storage.
* @parent: The parent pointer cast as an unsigned long
* Return: The shift into that pointer to the star to of the slot
*/
static inline unsigned long mte_parent_shift(unsigned long parent)
{
/* Note bit 1 == 0 means 16B */
if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
return MAPLE_PARENT_SLOT_SHIFT;
return MAPLE_PARENT_16B_SLOT_SHIFT;
}
/*
* mte_parent_slot_mask() - Get the slot mask for the parent.
* @parent: The parent pointer cast as an unsigned long.
* Return: The slot mask for that parent.
*/
static inline unsigned long mte_parent_slot_mask(unsigned long parent)
{
/* Note bit 1 == 0 means 16B */
if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
return MAPLE_PARENT_SLOT_MASK;
return MAPLE_PARENT_16B_SLOT_MASK;
}
/*
* mas_parent_type() - Return the maple_type of the parent from the stored
* parent type.
* @mas: The maple state
* @enode: The maple_enode to extract the parent's enum
* Return: The node->parent maple_type
*/
static inline
enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
{
unsigned long p_type;
p_type = (unsigned long)mte_to_node(enode)->parent;
if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
return 0;
p_type &= MAPLE_NODE_MASK;
p_type &= ~mte_parent_slot_mask(p_type);
switch (p_type) {
case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
if (mt_is_alloc(mas->tree))
return maple_arange_64;
return maple_range_64;
}
return 0;
}
/*
* mas_set_parent() - Set the parent node and encode the slot
* @mas: The maple state
* @enode: The encoded maple node.
* @parent: The encoded maple node that is the parent of @enode.
* @slot: The slot that @enode resides in @parent.
*
* Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
* parent type.
*/
static inline
void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
const struct maple_enode *parent, unsigned char slot)
{
unsigned long val = (unsigned long)parent;
unsigned long shift;
unsigned long type;
enum maple_type p_type = mte_node_type(parent);
MAS_BUG_ON(mas, p_type == maple_dense);
MAS_BUG_ON(mas, p_type == maple_leaf_64);
switch (p_type) {
case maple_range_64:
case maple_arange_64:
shift = MAPLE_PARENT_SLOT_SHIFT;
type = MAPLE_PARENT_RANGE64;
break;
default:
case maple_dense:
case maple_leaf_64:
shift = type = 0;
break;
}
val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
val |= (slot << shift) | type;
mte_to_node(enode)->parent = ma_parent_ptr(val);
}
/*
* mte_parent_slot() - get the parent slot of @enode.
* @enode: The encoded maple node.
*
* Return: The slot in the parent node where @enode resides.
*/
static __always_inline
unsigned int mte_parent_slot(const struct maple_enode *enode)
{
unsigned long val = (unsigned long)mte_to_node(enode)->parent;
if (unlikely(val & MA_ROOT_PARENT)) return 0;
/*
* Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
* by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
*/
return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
}
/*
* mte_parent() - Get the parent of @node.
* @enode: The encoded maple node.
*
* Return: The parent maple node.
*/
static __always_inline
struct maple_node *mte_parent(const struct maple_enode *enode)
{
return (void *)((unsigned long)
(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
}
/*
* ma_dead_node() - check if the @enode is dead.
* @enode: The encoded maple node
*
* Return: true if dead, false otherwise.
*/
static __always_inline bool ma_dead_node(const struct maple_node *node)
{
struct maple_node *parent;
/* Do not reorder reads from the node prior to the parent check */
smp_rmb(); parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
return (parent == node);
}
/*
* mte_dead_node() - check if the @enode is dead.
* @enode: The encoded maple node
*
* Return: true if dead, false otherwise.
*/
static __always_inline bool mte_dead_node(const struct maple_enode *enode)
{
struct maple_node *node;
node = mte_to_node(enode);
return ma_dead_node(node);
}
/*
* ma_pivots() - Get a pointer to the maple node pivots.
* @node: the maple node
* @type: the node type
*
* In the event of a dead node, this array may be %NULL
*
* Return: A pointer to the maple node pivots
*/
static inline unsigned long *ma_pivots(struct maple_node *node,
enum maple_type type)
{
switch (type) {
case maple_arange_64:
return node->ma64.pivot;
case maple_range_64:
case maple_leaf_64:
return node->mr64.pivot;
case maple_dense:
return NULL;
}
return NULL;
}
/*
* ma_gaps() - Get a pointer to the maple node gaps.
* @node: the maple node
* @type: the node type
*
* Return: A pointer to the maple node gaps
*/
static inline unsigned long *ma_gaps(struct maple_node *node,
enum maple_type type)
{
switch (type) {
case maple_arange_64:
return node->ma64.gap;
case maple_range_64:
case maple_leaf_64:
case maple_dense:
return NULL;
}
return NULL;
}
/*
* mas_safe_pivot() - get the pivot at @piv or mas->max.
* @mas: The maple state
* @pivots: The pointer to the maple node pivots
* @piv: The pivot to fetch
* @type: The maple node type
*
* Return: The pivot at @piv within the limit of the @pivots array, @mas->max
* otherwise.
*/
static __always_inline unsigned long
mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
unsigned char piv, enum maple_type type)
{
if (piv >= mt_pivots[type]) return mas->max; return pivots[piv];
}
/*
* mas_safe_min() - Return the minimum for a given offset.
* @mas: The maple state
* @pivots: The pointer to the maple node pivots
* @offset: The offset into the pivot array
*
* Return: The minimum range value that is contained in @offset.
*/
static inline unsigned long
mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
{
if (likely(offset)) return pivots[offset - 1] + 1; return mas->min;
}
/*
* mte_set_pivot() - Set a pivot to a value in an encoded maple node.
* @mn: The encoded maple node
* @piv: The pivot offset
* @val: The value of the pivot
*/
static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
unsigned long val)
{
struct maple_node *node = mte_to_node(mn);
enum maple_type type = mte_node_type(mn);
BUG_ON(piv >= mt_pivots[type]);
switch (type) {
case maple_range_64:
case maple_leaf_64:
node->mr64.pivot[piv] = val;
break;
case maple_arange_64:
node->ma64.pivot[piv] = val;
break;
case maple_dense:
break;
}
}
/*
* ma_slots() - Get a pointer to the maple node slots.
* @mn: The maple node
* @mt: The maple node type
*
* Return: A pointer to the maple node slots
*/
static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
{
switch (mt) {
case maple_arange_64:
return mn->ma64.slot;
case maple_range_64:
case maple_leaf_64:
return mn->mr64.slot;
case maple_dense:
return mn->slot;
}
return NULL;
}
static inline bool mt_write_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
lockdep_is_held(&mt->ma_lock);
}
static __always_inline bool mt_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_lock_is_held(mt) :
lockdep_is_held(&mt->ma_lock);
}
static __always_inline void *mt_slot(const struct maple_tree *mt,
void __rcu **slots, unsigned char offset)
{
return rcu_dereference_check(slots[offset], mt_locked(mt));
}
static __always_inline void *mt_slot_locked(struct maple_tree *mt,
void __rcu **slots, unsigned char offset)
{
return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
}
/*
* mas_slot_locked() - Get the slot value when holding the maple tree lock.
* @mas: The maple state
* @slots: The pointer to the slots
* @offset: The offset into the slots array to fetch
*
* Return: The entry stored in @slots at the @offset.
*/
static __always_inline void *mas_slot_locked(struct ma_state *mas,
void __rcu **slots, unsigned char offset)
{
return mt_slot_locked(mas->tree, slots, offset);
}
/*
* mas_slot() - Get the slot value when not holding the maple tree lock.
* @mas: The maple state
* @slots: The pointer to the slots
* @offset: The offset into the slots array to fetch
*
* Return: The entry stored in @slots at the @offset
*/
static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
unsigned char offset)
{
return mt_slot(mas->tree, slots, offset);
}
/*
* mas_root() - Get the maple tree root.
* @mas: The maple state.
*
* Return: The pointer to the root of the tree
*/
static __always_inline void *mas_root(struct ma_state *mas)
{
return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
}
static inline void *mt_root_locked(struct maple_tree *mt)
{
return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
}
/*
* mas_root_locked() - Get the maple tree root when holding the maple tree lock.
* @mas: The maple state.
*
* Return: The pointer to the root of the tree
*/
static inline void *mas_root_locked(struct ma_state *mas)
{
return mt_root_locked(mas->tree);
}
static inline struct maple_metadata *ma_meta(struct maple_node *mn,
enum maple_type mt)
{
switch (mt) {
case maple_arange_64:
return &mn->ma64.meta;
default:
return &mn->mr64.meta;
}
}
/*
* ma_set_meta() - Set the metadata information of a node.
* @mn: The maple node
* @mt: The maple node type
* @offset: The offset of the highest sub-gap in this node.
* @end: The end of the data in this node.
*/
static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
unsigned char offset, unsigned char end)
{
struct maple_metadata *meta = ma_meta(mn, mt); meta->gap = offset;
meta->end = end;
}
/*
* mt_clear_meta() - clear the metadata information of a node, if it exists
* @mt: The maple tree
* @mn: The maple node
* @type: The maple node type
*/
static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
enum maple_type type)
{
struct maple_metadata *meta;
unsigned long *pivots;
void __rcu **slots;
void *next;
switch (type) {
case maple_range_64:
pivots = mn->mr64.pivot;
if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
slots = mn->mr64.slot;
next = mt_slot_locked(mt, slots,
MAPLE_RANGE64_SLOTS - 1);
if (unlikely((mte_to_node(next) &&
mte_node_type(next))))
return; /* no metadata, could be node */
}
fallthrough;
case maple_arange_64:
meta = ma_meta(mn, type);
break;
default:
return;
}
meta->gap = 0;
meta->end = 0;
}
/*
* ma_meta_end() - Get the data end of a node from the metadata
* @mn: The maple node
* @mt: The maple node type
*/
static inline unsigned char ma_meta_end(struct maple_node *mn,
enum maple_type mt)
{
struct maple_metadata *meta = ma_meta(mn, mt);
return meta->end;
}
/*
* ma_meta_gap() - Get the largest gap location of a node from the metadata
* @mn: The maple node
*/
static inline unsigned char ma_meta_gap(struct maple_node *mn)
{
return mn->ma64.meta.gap;
}
/*
* ma_set_meta_gap() - Set the largest gap location in a nodes metadata
* @mn: The maple node
* @mt: The maple node type
* @offset: The location of the largest gap.
*/
static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
unsigned char offset)
{
struct maple_metadata *meta = ma_meta(mn, mt);
meta->gap = offset;
}
/*
* mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
* @mat: the ma_topiary, a linked list of dead nodes.
* @dead_enode: the node to be marked as dead and added to the tail of the list
*
* Add the @dead_enode to the linked list in @mat.
*/
static inline void mat_add(struct ma_topiary *mat,
struct maple_enode *dead_enode)
{
mte_set_node_dead(dead_enode);
mte_to_mat(dead_enode)->next = NULL;
if (!mat->tail) {
mat->tail = mat->head = dead_enode;
return;
}
mte_to_mat(mat->tail)->next = dead_enode;
mat->tail = dead_enode;
}
static void mt_free_walk(struct rcu_head *head);
static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free);
/*
* mas_mat_destroy() - Free all nodes and subtrees in a dead list.
* @mas: the maple state
* @mat: the ma_topiary linked list of dead nodes to free.
*
* Destroy walk a dead list.
*/
static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
{
struct maple_enode *next;
struct maple_node *node;
bool in_rcu = mt_in_rcu(mas->tree);
while (mat->head) {
next = mte_to_mat(mat->head)->next;
node = mte_to_node(mat->head);
mt_destroy_walk(mat->head, mas->tree, !in_rcu);
if (in_rcu)
call_rcu(&node->rcu, mt_free_walk);
mat->head = next;
}
}
/*
* mas_descend() - Descend into the slot stored in the ma_state.
* @mas: the maple state.
*
* Note: Not RCU safe, only use in write side or debug code.
*/
static inline void mas_descend(struct ma_state *mas)
{
enum maple_type type;
unsigned long *pivots;
struct maple_node *node;
void __rcu **slots;
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type); slots = ma_slots(node, type); if (mas->offset)
mas->min = pivots[mas->offset - 1] + 1;
mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); mas->node = mas_slot(mas, slots, mas->offset);
}
/*
* mas_ascend() - Walk up a level of the tree.
* @mas: The maple state
*
* Sets the @mas->max and @mas->min for the parent node of mas->node. This
* may cause several levels of walking up to find the correct min and max.
* May find a dead node which will cause a premature return.
* Return: 1 on dead node, 0 otherwise
*/
static int mas_ascend(struct ma_state *mas)
{
struct maple_enode *p_enode; /* parent enode. */
struct maple_enode *a_enode; /* ancestor enode. */
struct maple_node *a_node; /* ancestor node. */
struct maple_node *p_node; /* parent node. */
unsigned char a_slot;
enum maple_type a_type;
unsigned long min, max;
unsigned long *pivots;
bool set_max = false, set_min = false;
a_node = mas_mn(mas);
if (ma_is_root(a_node)) {
mas->offset = 0; return 0;
}
p_node = mte_parent(mas->node); if (unlikely(a_node == p_node)) return 1; a_type = mas_parent_type(mas, mas->node); mas->offset = mte_parent_slot(mas->node); a_enode = mt_mk_node(p_node, a_type);
/* Check to make sure all parent information is still accurate */
if (p_node != mte_parent(mas->node))
return 1;
mas->node = a_enode;
if (mte_is_root(a_enode)) {
mas->max = ULONG_MAX;
mas->min = 0;
return 0;
}
min = 0;
max = ULONG_MAX;
/*
* !mas->offset implies that parent node min == mas->min.
* mas->offset > 0 implies that we need to walk up to find the
* implied pivot min.
*/
if (!mas->offset) {
min = mas->min;
set_min = true;
}
if (mas->max == ULONG_MAX)
set_max = true;
do {
p_enode = a_enode;
a_type = mas_parent_type(mas, p_enode);
a_node = mte_parent(p_enode);
a_slot = mte_parent_slot(p_enode); a_enode = mt_mk_node(a_node, a_type); pivots = ma_pivots(a_node, a_type); if (unlikely(ma_dead_node(a_node)))
return 1;
if (!set_min && a_slot) {
set_min = true;
min = pivots[a_slot - 1] + 1;
}
if (!set_max && a_slot < mt_pivots[a_type]) {
set_max = true;
max = pivots[a_slot];
}
if (unlikely(ma_dead_node(a_node)))
return 1;
if (unlikely(ma_is_root(a_node)))
break;
} while (!set_min || !set_max); mas->max = max;
mas->min = min;
return 0;
}
/*
* mas_pop_node() - Get a previously allocated maple node from the maple state.
* @mas: The maple state
*
* Return: A pointer to a maple node.
*/
static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas)
{
struct maple_node *ret;
if (mas->alloc) {
ret = mas->alloc;
mas->alloc = NULL;
goto out;
}
if (WARN_ON_ONCE(!mas->sheaf))
return NULL;
ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf);
out:
memset(ret, 0, sizeof(*ret));
return ret;
}
/*
* mas_alloc_nodes() - Allocate nodes into a maple state
* @mas: The maple state
* @gfp: The GFP Flags
*/
static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
{
if (!mas->node_request)
return;
if (mas->node_request == 1) {
if (mas->sheaf)
goto use_sheaf;
if (mas->alloc)
return;
mas->alloc = mt_alloc_one(gfp);
if (!mas->alloc)
goto error; mas->node_request = 0; return;
}
use_sheaf:
if (unlikely(mas->alloc)) {
kfree(mas->alloc);
mas->alloc = NULL;
}
if (mas->sheaf) {
unsigned long refill;
refill = mas->node_request;
if (kmem_cache_sheaf_size(mas->sheaf) >= refill) {
mas->node_request = 0;
return;
}
if (mt_refill_sheaf(gfp, &mas->sheaf, refill))
goto error; mas->node_request = 0; return;
}
mas->sheaf = mt_get_sheaf(gfp, mas->node_request);
if (likely(mas->sheaf)) {
mas->node_request = 0;
return;
}
error:
mas_set_err(mas, -ENOMEM);}
static inline void mas_empty_nodes(struct ma_state *mas)
{
mas->node_request = 0; if (mas->sheaf) {
mt_return_sheaf(mas->sheaf);
mas->sheaf = NULL;
}
if (mas->alloc) {
kfree(mas->alloc);
mas->alloc = NULL;
}
}
/*
* mas_free() - Free an encoded maple node
* @mas: The maple state
* @used: The encoded maple node to free.
*
* Uses rcu free if necessary, pushes @used back on the maple state allocations
* otherwise.
*/
static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
{
ma_free_rcu(mte_to_node(used));
}
/*
* mas_start() - Sets up maple state for operations.
* @mas: The maple state.
*
* If mas->status == ma_start, then set the min, max and depth to
* defaults.
*
* Return:
* - If mas->node is an error or not mas_start, return NULL.
* - If it's an empty tree: NULL & mas->status == ma_none
* - If it's a single entry: The entry & mas->status == ma_root
* - If it's a tree: NULL & mas->status == ma_active
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
if (likely(mas_is_start(mas))) {
struct maple_enode *root;
mas->min = 0;
mas->max = ULONG_MAX;
retry:
mas->depth = 0; root = mas_root(mas);
/* Tree with nodes */
if (likely(xa_is_node(root))) {
mas->depth = 0;
mas->status = ma_active;
mas->node = mte_safe_root(root);
mas->offset = 0;
if (mte_dead_node(mas->node))
goto retry; return NULL;
}
mas->node = NULL;
/* empty tree */
if (unlikely(!root)) {
mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
}
/* Single entry tree */
mas->status = ma_root;
mas->offset = MAPLE_NODE_SLOTS;
/* Single entry tree. */
if (mas->index > 0)
return NULL;
return root;
}
return NULL;
}
/*
* ma_data_end() - Find the end of the data in a node.
* @node: The maple node
* @type: The maple node type
* @pivots: The array of pivots in the node
* @max: The maximum value in the node
*
* Uses metadata to find the end of the data when possible.
* Return: The zero indexed last slot with data (may be null).
*/
static __always_inline unsigned char ma_data_end(struct maple_node *node,
enum maple_type type, unsigned long *pivots, unsigned long max)
{
unsigned char offset;
if (!pivots)
return 0;
if (type == maple_arange_64)
return ma_meta_end(node, type);
offset = mt_pivots[type] - 1;
if (likely(!pivots[offset])) return ma_meta_end(node, type); if (likely(pivots[offset] == max)) return offset; return mt_pivots[type];
}
/*
* mas_data_end() - Find the end of the data (slot).
* @mas: the maple state
*
* This method is optimized to check the metadata of a node if the node type
* supports data end metadata.
*
* Return: The zero indexed last slot with data (may be null).
*/
static inline unsigned char mas_data_end(struct ma_state *mas)
{
enum maple_type type;
struct maple_node *node;
unsigned char offset;
unsigned long *pivots;
type = mte_node_type(mas->node);
node = mas_mn(mas);
if (type == maple_arange_64)
return ma_meta_end(node, type);
pivots = ma_pivots(node, type); if (unlikely(ma_dead_node(node)))
return 0;
offset = mt_pivots[type] - 1; if (likely(!pivots[offset])) return ma_meta_end(node, type); if (likely(pivots[offset] == mas->max)) return offset;
return mt_pivots[type];
}
/*
* mas_leaf_max_gap() - Returns the largest gap in a leaf node
* @mas: the maple state
*
* Return: The maximum gap in the leaf.
*/
static unsigned long mas_leaf_max_gap(struct ma_state *mas)
{
enum maple_type mt;
unsigned long pstart, gap, max_gap;
struct maple_node *mn;
unsigned long *pivots;
void __rcu **slots;
unsigned char i;
unsigned char max_piv;
mt = mte_node_type(mas->node);
mn = mas_mn(mas);
slots = ma_slots(mn, mt);
max_gap = 0;
if (unlikely(ma_is_dense(mt))) {
gap = 0;
for (i = 0; i < mt_slots[mt]; i++) {
if (slots[i]) {
if (gap > max_gap)
max_gap = gap;
gap = 0;
} else {
gap++;
}
}
if (gap > max_gap)
max_gap = gap;
return max_gap;
}
/*
* Check the first implied pivot optimizes the loop below and slot 1 may
* be skipped if there is a gap in slot 0.
*/
pivots = ma_pivots(mn, mt);
if (likely(!slots[0])) {
max_gap = pivots[0] - mas->min + 1;
i = 2;
} else {
i = 1;
}
/* reduce max_piv as the special case is checked before the loop */
max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
/*
* Check end implied pivot which can only be a gap on the right most
* node.
*/
if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
gap = ULONG_MAX - pivots[max_piv];
if (gap > max_gap)
max_gap = gap;
if (max_gap > pivots[max_piv] - mas->min)
return max_gap;
}
for (; i <= max_piv; i++) {
/* data == no gap. */
if (likely(slots[i]))
continue;
pstart = pivots[i - 1];
gap = pivots[i] - pstart;
if (gap > max_gap)
max_gap = gap;
/* There cannot be two gaps in a row. */
i++;
}
return max_gap;
}
/*
* ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
* @node: The maple node
* @gaps: The pointer to the gaps
* @mt: The maple node type
* @off: Pointer to store the offset location of the gap.
*
* Uses the metadata data end to scan backwards across set gaps.
*
* Return: The maximum gap value
*/
static inline unsigned long
ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
unsigned char *off)
{
unsigned char offset, i;
unsigned long max_gap = 0;
i = offset = ma_meta_end(node, mt);
do {
if (gaps[i] > max_gap) {
max_gap = gaps[i];
offset = i;
}
} while (i--);
*off = offset;
return max_gap;
}
/*
* mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
* @mas: The maple state.
*
* Return: The gap value.
*/
static inline unsigned long mas_max_gap(struct ma_state *mas)
{
unsigned long *gaps;
unsigned char offset;
enum maple_type mt;
struct maple_node *node;
mt = mte_node_type(mas->node);
if (ma_is_leaf(mt))
return mas_leaf_max_gap(mas);
node = mas_mn(mas);
MAS_BUG_ON(mas, mt != maple_arange_64);
offset = ma_meta_gap(node);
gaps = ma_gaps(node, mt);
return gaps[offset];
}
/*
* mas_parent_gap() - Set the parent gap and any gaps above, as needed
* @mas: The maple state
* @offset: The gap offset in the parent to set
* @new: The new gap value.
*
* Set the parent gap then continue to set the gap upwards, using the metadata
* of the parent to see if it is necessary to check the node above.
*/
static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
unsigned long new)
{
unsigned long meta_gap = 0;
struct maple_node *pnode;
struct maple_enode *penode;
unsigned long *pgaps;
unsigned char meta_offset;
enum maple_type pmt;
pnode = mte_parent(mas->node);
pmt = mas_parent_type(mas, mas->node);
penode = mt_mk_node(pnode, pmt);
pgaps = ma_gaps(pnode, pmt);
ascend:
MAS_BUG_ON(mas, pmt != maple_arange_64);
meta_offset = ma_meta_gap(pnode);
meta_gap = pgaps[meta_offset];
pgaps[offset] = new;
if (meta_gap == new)
return;
if (offset != meta_offset) {
if (meta_gap > new)
return;
ma_set_meta_gap(pnode, pmt, offset);
} else if (new < meta_gap) {
new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
ma_set_meta_gap(pnode, pmt, meta_offset);
}
if (ma_is_root(pnode))
return;
/* Go to the parent node. */
pnode = mte_parent(penode);
pmt = mas_parent_type(mas, penode);
pgaps = ma_gaps(pnode, pmt);
offset = mte_parent_slot(penode);
penode = mt_mk_node(pnode, pmt);
goto ascend;
}
/*
* mas_update_gap() - Update a nodes gaps and propagate up if necessary.
* @mas: the maple state.
*/
static inline void mas_update_gap(struct ma_state *mas)
{
unsigned char pslot;
unsigned long p_gap;
unsigned long max_gap;
if (!mt_is_alloc(mas->tree))
return;
if (mte_is_root(mas->node))
return;
max_gap = mas_max_gap(mas);
pslot = mte_parent_slot(mas->node);
p_gap = ma_gaps(mte_parent(mas->node),
mas_parent_type(mas, mas->node))[pslot];
if (p_gap != max_gap)
mas_parent_gap(mas, pslot, max_gap);
}
/*
* mas_adopt_children() - Set the parent pointer of all nodes in @parent to
* @parent with the slot encoded.
* @mas: the maple state (for the tree)
* @parent: the maple encoded node containing the children.
*/
static inline void mas_adopt_children(struct ma_state *mas,
struct maple_enode *parent)
{
enum maple_type type = mte_node_type(parent);
struct maple_node *node = mte_to_node(parent);
void __rcu **slots = ma_slots(node, type);
unsigned long *pivots = ma_pivots(node, type);
struct maple_enode *child;
unsigned char offset;
offset = ma_data_end(node, type, pivots, mas->max);
do {
child = mas_slot_locked(mas, slots, offset);
mas_set_parent(mas, child, parent, offset);
} while (offset--);
}
/*
* mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
* node as dead.
* @mas: the maple state with the new node
* @old_enode: The old maple encoded node to replace.
* @new_height: if we are inserting a root node, update the height of the tree
*/
static inline void mas_put_in_tree(struct ma_state *mas,
struct maple_enode *old_enode, char new_height)
__must_hold(mas->tree->ma_lock)
{
unsigned char offset;
void __rcu **slots;
if (mte_is_root(mas->node)) {
mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
mt_set_height(mas->tree, new_height);
} else {
offset = mte_parent_slot(mas->node);
slots = ma_slots(mte_parent(mas->node),
mas_parent_type(mas, mas->node));
rcu_assign_pointer(slots[offset], mas->node);
}
mte_set_node_dead(old_enode);
}
/*
* mas_replace_node() - Replace a node by putting it in the tree, marking it
* dead, and freeing it.
* the parent encoding to locate the maple node in the tree.
* @mas: the ma_state with @mas->node pointing to the new node.
* @old_enode: The old maple encoded node.
* @new_height: The new height of the tree as a result of the operation
*/
static inline void mas_replace_node(struct ma_state *mas,
struct maple_enode *old_enode, unsigned char new_height)
__must_hold(mas->tree->ma_lock)
{
mas_put_in_tree(mas, old_enode, new_height);
mas_free(mas, old_enode);
}
/*
* mas_find_child() - Find a child who has the parent @mas->node.
* @mas: the maple state with the parent.
* @child: the maple state to store the child.
*/
static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
__must_hold(mas->tree->ma_lock)
{
enum maple_type mt;
unsigned char offset;
unsigned char end;
unsigned long *pivots;
struct maple_enode *entry;
struct maple_node *node;
void __rcu **slots;
mt = mte_node_type(mas->node);
node = mas_mn(mas);
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
end = ma_data_end(node, mt, pivots, mas->max);
for (offset = mas->offset; offset <= end; offset++) {
entry = mas_slot_locked(mas, slots, offset);
if (mte_parent(entry) == node) {
*child = *mas;
mas->offset = offset + 1;
child->offset = offset;
mas_descend(child);
child->offset = 0;
return true;
}
}
return false;
}
/*
* mab_shift_right() - Shift the data in mab right. Note, does not clean out the
* old data or set b_node->b_end.
* @b_node: the maple_big_node
* @shift: the shift count
*/
static inline void mab_shift_right(struct maple_big_node *b_node,
unsigned char shift)
{
unsigned long size = b_node->b_end * sizeof(unsigned long);
memmove(b_node->pivot + shift, b_node->pivot, size);
memmove(b_node->slot + shift, b_node->slot, size);
if (b_node->type == maple_arange_64)
memmove(b_node->gap + shift, b_node->gap, size);
}
/*
* mab_middle_node() - Check if a middle node is needed (unlikely)
* @b_node: the maple_big_node that contains the data.
* @split: the potential split location
* @slot_count: the size that can be stored in a single node being considered.
*
* Return: true if a middle node is required.
*/
static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
unsigned char slot_count)
{
unsigned char size = b_node->b_end;
if (size >= 2 * slot_count)
return true;
if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
return true;
return false;
}
/*
* mab_no_null_split() - ensure the split doesn't fall on a NULL
* @b_node: the maple_big_node with the data
* @split: the suggested split location
* @slot_count: the number of slots in the node being considered.
*
* Return: the split location.
*/
static inline int mab_no_null_split(struct maple_big_node *b_node,
unsigned char split, unsigned char slot_count)
{
if (!b_node->slot[split]) {
/*
* If the split is less than the max slot && the right side will
* still be sufficient, then increment the split on NULL.
*/
if ((split < slot_count - 1) &&
(b_node->b_end - split) > (mt_min_slots[b_node->type]))
split++;
else
split--;
}
return split;
}
/*
* mab_calc_split() - Calculate the split location and if there needs to be two
* splits.
* @mas: The maple state
* @bn: The maple_big_node with the data
* @mid_split: The second split, if required. 0 otherwise.
*
* Return: The first split location. The middle split is set in @mid_split.
*/
static inline int mab_calc_split(struct ma_state *mas,
struct maple_big_node *bn, unsigned char *mid_split)
{
unsigned char b_end = bn->b_end;
int split = b_end / 2; /* Assume equal split. */
unsigned char slot_count = mt_slots[bn->type];
/*
* To support gap tracking, all NULL entries are kept together and a node cannot
* end on a NULL entry, with the exception of the left-most leaf. The
* limitation means that the split of a node must be checked for this condition
* and be able to put more data in one direction or the other.
*
* Although extremely rare, it is possible to enter what is known as the 3-way
* split scenario. The 3-way split comes about by means of a store of a range
* that overwrites the end and beginning of two full nodes. The result is a set
* of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
* also be located in different parent nodes which are also full. This can
* carry upwards all the way to the root in the worst case.
*/
if (unlikely(mab_middle_node(bn, split, slot_count))) {
split = b_end / 3;
*mid_split = split * 2;
} else {
*mid_split = 0;
}
/* Avoid ending a node on a NULL entry */
split = mab_no_null_split(bn, split, slot_count);
if (unlikely(*mid_split))
*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
return split;
}
/*
* mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
* and set @b_node->b_end to the next free slot.
* @mas: The maple state
* @mas_start: The starting slot to copy
* @mas_end: The end slot to copy (inclusively)
* @b_node: The maple_big_node to place the data
* @mab_start: The starting location in maple_big_node to store the data.
*/
static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
unsigned char mas_end, struct maple_big_node *b_node,
unsigned char mab_start)
{
enum maple_type mt;
struct maple_node *node;
void __rcu **slots;
unsigned long *pivots, *gaps;
int i = mas_start, j = mab_start;
unsigned char piv_end;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
if (!i) {
b_node->pivot[j] = pivots[i++];
if (unlikely(i > mas_end))
goto complete;
j++;
}
piv_end = min(mas_end, mt_pivots[mt]);
for (; i < piv_end; i++, j++) {
b_node->pivot[j] = pivots[i];
if (unlikely(!b_node->pivot[j]))
goto complete;
if (unlikely(mas->max == b_node->pivot[j]))
goto complete;
}
b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
complete:
b_node->b_end = ++j;
j -= mab_start;
slots = ma_slots(node, mt);
memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
gaps = ma_gaps(node, mt);
memcpy(b_node->gap + mab_start, gaps + mas_start,
sizeof(unsigned long) * j);
}
}
/*
* mas_leaf_set_meta() - Set the metadata of a leaf if possible.
* @node: The maple node
* @mt: The maple type
* @end: The node end
*/
static inline void mas_leaf_set_meta(struct maple_node *node,
enum maple_type mt, unsigned char end)
{
if (end < mt_slots[mt] - 1)
ma_set_meta(node, mt, 0, end);
}
/*
* mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
* @b_node: the maple_big_node that has the data
* @mab_start: the start location in @b_node.
* @mab_end: The end location in @b_node (inclusively)
* @mas: The maple state with the maple encoded node.
*/
static inline void mab_mas_cp(struct maple_big_node *b_node,
unsigned char mab_start, unsigned char mab_end,
struct ma_state *mas, bool new_max)
{
int i, j = 0;
enum maple_type mt = mte_node_type(mas->node);
struct maple_node *node = mte_to_node(mas->node);
void __rcu **slots = ma_slots(node, mt);
unsigned long *pivots = ma_pivots(node, mt);
unsigned long *gaps = NULL;
unsigned char end;
if (mab_end - mab_start > mt_pivots[mt])
mab_end--;
if (!pivots[mt_pivots[mt] - 1])
slots[mt_pivots[mt]] = NULL;
i = mab_start;
do {
pivots[j++] = b_node->pivot[i++];
} while (i <= mab_end && likely(b_node->pivot[i]));
memcpy(slots, b_node->slot + mab_start,
sizeof(void *) * (i - mab_start));
if (new_max)
mas->max = b_node->pivot[i - 1];
end = j - 1;
if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
unsigned long max_gap = 0;
unsigned char offset = 0;
gaps = ma_gaps(node, mt);
do {
gaps[--j] = b_node->gap[--i];
if (gaps[j] > max_gap) {
offset = j;
max_gap = gaps[j];
}
} while (j);
ma_set_meta(node, mt, offset, end);
} else {
mas_leaf_set_meta(node, mt, end);
}
}
/*
* mas_store_b_node() - Store an @entry into the b_node while also copying the
* data from a maple encoded node.
* @wr_mas: the maple write state
* @b_node: the maple_big_node to fill with data
* @offset_end: the offset to end copying
*
* Return: The actual end of the data stored in @b_node
*/
static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
struct maple_big_node *b_node, unsigned char offset_end)
{
unsigned char slot;
unsigned char b_end;
/* Possible underflow of piv will wrap back to 0 before use. */
unsigned long piv;
struct ma_state *mas = wr_mas->mas;
b_node->type = wr_mas->type;
b_end = 0;
slot = mas->offset;
if (slot) {
/* Copy start data up to insert. */
mas_mab_cp(mas, 0, slot - 1, b_node, 0);
b_end = b_node->b_end;
piv = b_node->pivot[b_end - 1];
} else
piv = mas->min - 1;
if (piv + 1 < mas->index) {
/* Handle range starting after old range */
b_node->slot[b_end] = wr_mas->content;
if (!wr_mas->content)
b_node->gap[b_end] = mas->index - 1 - piv;
b_node->pivot[b_end++] = mas->index - 1;
}
/* Store the new entry. */
mas->offset = b_end;
b_node->slot[b_end] = wr_mas->entry;
b_node->pivot[b_end] = mas->last;
/* Appended. */
if (mas->last >= mas->max)
goto b_end;
/* Handle new range ending before old range ends */
piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
if (piv > mas->last) {
if (offset_end != slot)
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
offset_end);
b_node->slot[++b_end] = wr_mas->content;
if (!wr_mas->content)
b_node->gap[b_end] = piv - mas->last + 1;
b_node->pivot[b_end] = piv;
}
slot = offset_end + 1;
if (slot > mas->end)
goto b_end;
/* Copy end data to the end of the node. */
mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
b_node->b_end--;
return;
b_end:
b_node->b_end = b_end;
}
/*
* mas_prev_sibling() - Find the previous node with the same parent.
* @mas: the maple state
*
* Return: True if there is a previous sibling, false otherwise.
*/
static inline bool mas_prev_sibling(struct ma_state *mas)
{
unsigned int p_slot = mte_parent_slot(mas->node);
/* For root node, p_slot is set to 0 by mte_parent_slot(). */
if (!p_slot)
return false;
mas_ascend(mas);
mas->offset = p_slot - 1;
mas_descend(mas);
return true;
}
/*
* mas_next_sibling() - Find the next node with the same parent.
* @mas: the maple state
*
* Return: true if there is a next sibling, false otherwise.
*/
static inline bool mas_next_sibling(struct ma_state *mas)
{
MA_STATE(parent, mas->tree, mas->index, mas->last);
if (mte_is_root(mas->node))
return false;
parent = *mas;
mas_ascend(&parent);
parent.offset = mte_parent_slot(mas->node) + 1;
if (parent.offset > mas_data_end(&parent))
return false;
*mas = parent;
mas_descend(mas);
return true;
}
/*
* mas_node_or_none() - Set the enode and state.
* @mas: the maple state
* @enode: The encoded maple node.
*
* Set the node to the enode and the status.
*/
static inline void mas_node_or_none(struct ma_state *mas,
struct maple_enode *enode)
{
if (enode) {
mas->node = enode;
mas->status = ma_active;
} else {
mas->node = NULL;
mas->status = ma_none;
}
}
/*
* mas_wr_node_walk() - Find the correct offset for the index in the @mas.
* If @mas->index cannot be found within the containing
* node, we traverse to the last entry in the node.
* @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
*/
static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char count, offset;
if (unlikely(ma_is_dense(wr_mas->type))) {
wr_mas->r_max = wr_mas->r_min = mas->index;
mas->offset = mas->index = mas->min;
return;
}
wr_mas->node = mas_mn(wr_mas->mas); wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
wr_mas->pivots, mas->max);
offset = mas->offset;
while (offset < count && mas->index > wr_mas->pivots[offset]) offset++; wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); wr_mas->offset_end = mas->offset = offset;
}
/*
* mast_rebalance_next() - Rebalance against the next node
* @mast: The maple subtree state
*/
static inline void mast_rebalance_next(struct maple_subtree_state *mast)
{
unsigned char b_end = mast->bn->b_end;
mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
mast->bn, b_end);
mast->orig_r->last = mast->orig_r->max;
}
/*
* mast_rebalance_prev() - Rebalance against the previous node
* @mast: The maple subtree state
*/
static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
{
unsigned char end = mas_data_end(mast->orig_l) + 1;
unsigned char b_end = mast->bn->b_end;
mab_shift_right(mast->bn, end);
mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
mast->l->min = mast->orig_l->min;
mast->orig_l->index = mast->orig_l->min;
mast->bn->b_end = end + b_end;
mast->l->offset += end;
}
/*
* mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
* the node to the right. Checking the nodes to the right then the left at each
* level upwards until root is reached.
* Data is copied into the @mast->bn.
* @mast: The maple_subtree_state.
*/
static inline
bool mast_spanning_rebalance(struct maple_subtree_state *mast)
{
struct ma_state r_tmp = *mast->orig_r;
struct ma_state l_tmp = *mast->orig_l;
unsigned char depth = 0;
do {
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
depth++;
if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
mast->orig_r->offset++;
do {
mas_descend(mast->orig_r);
mast->orig_r->offset = 0;
} while (--depth);
mast_rebalance_next(mast);
*mast->orig_l = l_tmp;
return true;
} else if (mast->orig_l->offset != 0) {
mast->orig_l->offset--;
do {
mas_descend(mast->orig_l);
mast->orig_l->offset =
mas_data_end(mast->orig_l);
} while (--depth);
mast_rebalance_prev(mast);
*mast->orig_r = r_tmp;
return true;
}
} while (!mte_is_root(mast->orig_r->node));
*mast->orig_r = r_tmp;
*mast->orig_l = l_tmp;
return false;
}
/*
* mast_ascend() - Ascend the original left and right maple states.
* @mast: the maple subtree state.
*
* Ascend the original left and right sides. Set the offsets to point to the
* data already in the new tree (@mast->l and @mast->r).
*/
static inline void mast_ascend(struct maple_subtree_state *mast)
{
MA_WR_STATE(wr_mas, mast->orig_r, NULL);
mas_ascend(mast->orig_l);
mas_ascend(mast->orig_r);
mast->orig_r->offset = 0;
mast->orig_r->index = mast->r->max;
/* last should be larger than or equal to index */
if (mast->orig_r->last < mast->orig_r->index)
mast->orig_r->last = mast->orig_r->index;
wr_mas.type = mte_node_type(mast->orig_r->node);
mas_wr_node_walk(&wr_mas);
/* Set up the left side of things */
mast->orig_l->offset = 0;
mast->orig_l->index = mast->l->min;
wr_mas.mas = mast->orig_l;
wr_mas.type = mte_node_type(mast->orig_l->node);
mas_wr_node_walk(&wr_mas);
mast->bn->type = wr_mas.type;
}
/*
* mas_new_ma_node() - Create and return a new maple node. Helper function.
* @mas: the maple state with the allocations.
* @b_node: the maple_big_node with the type encoding.
*
* Use the node type from the maple_big_node to allocate a new node from the
* ma_state. This function exists mainly for code readability.
*
* Return: A new maple encoded node
*/
static inline struct maple_enode
*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
{
return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
}
/*
* mas_mab_to_node() - Set up right and middle nodes
*
* @mas: the maple state that contains the allocations.
* @b_node: the node which contains the data.
* @left: The pointer which will have the left node
* @right: The pointer which may have the right node
* @middle: the pointer which may have the middle node (rare)
* @mid_split: the split location for the middle node
*
* Return: the split of left.
*/
static inline unsigned char mas_mab_to_node(struct ma_state *mas,
struct maple_big_node *b_node, struct maple_enode **left,
struct maple_enode **right, struct maple_enode **middle,
unsigned char *mid_split)
{
unsigned char split = 0;
unsigned char slot_count = mt_slots[b_node->type];
*left = mas_new_ma_node(mas, b_node);
*right = NULL;
*middle = NULL;
*mid_split = 0;
if (b_node->b_end < slot_count) {
split = b_node->b_end;
} else {
split = mab_calc_split(mas, b_node, mid_split);
*right = mas_new_ma_node(mas, b_node);
}
if (*mid_split)
*middle = mas_new_ma_node(mas, b_node);
return split;
}
/*
* mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
* pointer.
* @b_node: the big node to add the entry
* @mas: the maple state to get the pivot (mas->max)
* @entry: the entry to add, if NULL nothing happens.
*/
static inline void mab_set_b_end(struct maple_big_node *b_node,
struct ma_state *mas,
void *entry)
{
if (!entry)
return;
b_node->slot[b_node->b_end] = entry;
if (mt_is_alloc(mas->tree))
b_node->gap[b_node->b_end] = mas_max_gap(mas);
b_node->pivot[b_node->b_end++] = mas->max;
}
/*
* mas_set_split_parent() - combine_then_separate helper function. Sets the parent
* of @mas->node to either @left or @right, depending on @slot and @split
*
* @mas: the maple state with the node that needs a parent
* @left: possible parent 1
* @right: possible parent 2
* @slot: the slot the mas->node was placed
* @split: the split location between @left and @right
*/
static inline void mas_set_split_parent(struct ma_state *mas,
struct maple_enode *left,
struct maple_enode *right,
unsigned char *slot, unsigned char split)
{
if (mas_is_none(mas))
return;
if ((*slot) <= split)
mas_set_parent(mas, mas->node, left, *slot);
else if (right)
mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
(*slot)++;
}
/*
* mte_mid_split_check() - Check if the next node passes the mid-split
* @l: Pointer to left encoded maple node.
* @m: Pointer to middle encoded maple node.
* @r: Pointer to right encoded maple node.
* @slot: The offset
* @split: The split location.
* @mid_split: The middle split.
*/
static inline void mte_mid_split_check(struct maple_enode **l,
struct maple_enode **r,
struct maple_enode *right,
unsigned char slot,
unsigned char *split,
unsigned char mid_split)
{
if (*r == right)
return;
if (slot < mid_split)
return;
*l = *r;
*r = right;
*split = mid_split;
}
/*
* mast_set_split_parents() - Helper function to set three nodes parents. Slot
* is taken from @mast->l.
* @mast: the maple subtree state
* @left: the left node
* @right: the right node
* @split: the split location.
*/
static inline void mast_set_split_parents(struct maple_subtree_state *mast,
struct maple_enode *left,
struct maple_enode *middle,
struct maple_enode *right,
unsigned char split,
unsigned char mid_split)
{
unsigned char slot;
struct maple_enode *l = left;
struct maple_enode *r = right;
if (mas_is_none(mast->l))
return;
if (middle)
r = middle;
slot = mast->l->offset;
mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
mas_set_split_parent(mast->l, l, r, &slot, split);
mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
mas_set_split_parent(mast->m, l, r, &slot, split);
mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
mas_set_split_parent(mast->r, l, r, &slot, split);
}
/*
* mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
* @in_rcu: If the tree is in rcu mode
*
* The node will either be RCU freed or pushed back on the maple state.
*/
static inline void mas_topiary_node(struct ma_state *mas,
struct ma_state *tmp_mas, bool in_rcu)
{
struct maple_node *tmp;
struct maple_enode *enode;
if (mas_is_none(tmp_mas))
return;
enode = tmp_mas->node;
tmp = mte_to_node(enode);
mte_set_node_dead(enode);
ma_free_rcu(tmp);
}
/*
* mas_topiary_replace() - Replace the data with new data, then repair the
* parent links within the new tree. Iterate over the dead sub-tree and collect
* the dead subtrees and topiary the nodes that are no longer of use.
*
* The new tree will have up to three children with the correct parent. Keep
* track of the new entries as they need to be followed to find the next level
* of new entries.
*
* The old tree will have up to three children with the old parent. Keep track
* of the old entries as they may have more nodes below replaced. Nodes within
* [index, last] are dead subtrees, others need to be freed and followed.
*
* @mas: The maple state pointing at the new data
* @old_enode: The maple encoded node being replaced
* @new_height: The new height of the tree as a result of the operation
*
*/
static inline void mas_topiary_replace(struct ma_state *mas,
struct maple_enode *old_enode, unsigned char new_height)
{
struct ma_state tmp[3], tmp_next[3];
MA_TOPIARY(subtrees, mas->tree);
bool in_rcu;
int i, n;
/* Place data in tree & then mark node as old */
mas_put_in_tree(mas, old_enode, new_height);
/* Update the parent pointers in the tree */
tmp[0] = *mas;
tmp[0].offset = 0;
tmp[1].status = ma_none;
tmp[2].status = ma_none;
while (!mte_is_leaf(tmp[0].node)) {
n = 0;
for (i = 0; i < 3; i++) {
if (mas_is_none(&tmp[i]))
continue;
while (n < 3) {
if (!mas_find_child(&tmp[i], &tmp_next[n]))
break;
n++;
}
mas_adopt_children(&tmp[i], tmp[i].node);
}
if (MAS_WARN_ON(mas, n == 0))
break;
while (n < 3)
tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++)
tmp[i] = tmp_next[i];
}
/* Collect the old nodes that need to be discarded */
if (mte_is_leaf(old_enode))
return mas_free(mas, old_enode);
tmp[0] = *mas;
tmp[0].offset = 0;
tmp[0].node = old_enode;
tmp[1].status = ma_none;
tmp[2].status = ma_none;
in_rcu = mt_in_rcu(mas->tree);
do {
n = 0;
for (i = 0; i < 3; i++) {
if (mas_is_none(&tmp[i]))
continue;
while (n < 3) {
if (!mas_find_child(&tmp[i], &tmp_next[n]))
break;
if ((tmp_next[n].min >= tmp_next->index) &&
(tmp_next[n].max <= tmp_next->last)) {
mat_add(&subtrees, tmp_next[n].node);
tmp_next[n].status = ma_none;
} else {
n++;
}
}
}
if (MAS_WARN_ON(mas, n == 0))
break;
while (n < 3)
tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++) {
mas_topiary_node(mas, &tmp[i], in_rcu);
tmp[i] = tmp_next[i];
}
} while (!mte_is_leaf(tmp[0].node));
for (i = 0; i < 3; i++)
mas_topiary_node(mas, &tmp[i], in_rcu);
mas_mat_destroy(mas, &subtrees);
}
/*
* mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
* @old_enode: The old maple encoded node that is being replaced.
* @new_height: The new height of the tree as a result of the operation
*
* Updates gap as necessary.
*/
static inline void mas_wmb_replace(struct ma_state *mas,
struct maple_enode *old_enode, unsigned char new_height)
{
/* Insert the new data in the tree */
mas_topiary_replace(mas, old_enode, new_height);
if (mte_is_leaf(mas->node))
return;
mas_update_gap(mas);
}
/*
* mast_cp_to_nodes() - Copy data out to nodes.
* @mast: The maple subtree state
* @left: The left encoded maple node
* @middle: The middle encoded maple node
* @right: The right encoded maple node
* @split: The location to split between left and (middle ? middle : right)
* @mid_split: The location to split between middle and right.
*/
static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
struct maple_enode *left, struct maple_enode *middle,
struct maple_enode *right, unsigned char split, unsigned char mid_split)
{
bool new_lmax = true;
mas_node_or_none(mast->l, left);
mas_node_or_none(mast->m, middle);
mas_node_or_none(mast->r, right);
mast->l->min = mast->orig_l->min;
if (split == mast->bn->b_end) {
mast->l->max = mast->orig_r->max;
new_lmax = false;
}
mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
if (middle) {
mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
mast->m->min = mast->bn->pivot[split] + 1;
split = mid_split;
}
mast->r->max = mast->orig_r->max;
if (right) {
mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
mast->r->min = mast->bn->pivot[split] + 1;
}
}
/*
* mast_combine_cp_left - Copy in the original left side of the tree into the
* combined data set in the maple subtree state big node.
* @mast: The maple subtree state
*/
static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
{
unsigned char l_slot = mast->orig_l->offset;
if (!l_slot)
return;
mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
}
/*
* mast_combine_cp_right: Copy in the original right side of the tree into the
* combined data set in the maple subtree state big node.
* @mast: The maple subtree state
*/
static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
{
if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
return;
mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
mt_slot_count(mast->orig_r->node), mast->bn,
mast->bn->b_end);
mast->orig_r->last = mast->orig_r->max;
}
/*
* mast_sufficient: Check if the maple subtree state has enough data in the big
* node to create at least one sufficient node
* @mast: the maple subtree state
*/
static inline bool mast_sufficient(struct maple_subtree_state *mast)
{
if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
return true;
return false;
}
/*
* mast_overflow: Check if there is too much data in the subtree state for a
* single node.
* @mast: The maple subtree state
*/
static inline bool mast_overflow(struct maple_subtree_state *mast)
{
if (mast->bn->b_end > mt_slot_count(mast->orig_l->node))
return true;
return false;
}
static inline void *mtree_range_walk(struct ma_state *mas)
{
unsigned long *pivots;
unsigned char offset;
struct maple_node *node;
struct maple_enode *next, *last;
enum maple_type type;
void __rcu **slots;
unsigned char end;
unsigned long max, min;
unsigned long prev_max, prev_min;
next = mas->node;
min = mas->min;
max = mas->max;
do {
last = next;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type); end = ma_data_end(node, type, pivots, max);
prev_min = min;
prev_max = max;
if (pivots[0] >= mas->index) {
offset = 0;
max = pivots[0];
goto next;
}
offset = 1;
while (offset < end) { if (pivots[offset] >= mas->index) {
max = pivots[offset];
break;
}
offset++;
}
min = pivots[offset - 1] + 1;
next:
slots = ma_slots(node, type); next = mt_slot(mas->tree, slots, offset); if (unlikely(ma_dead_node(node)))
goto dead_node;
} while (!ma_is_leaf(type));
mas->end = end;
mas->offset = offset;
mas->index = min;
mas->last = max;
mas->min = prev_min;
mas->max = prev_max;
mas->node = last;
return (void *)next;
dead_node:
mas_reset(mas); return NULL;}
/*
* mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
* @mas: The starting maple state
* @mast: The maple_subtree_state, keeps track of 4 maple states.
* @count: The estimated count of iterations needed.
*
* Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
* is hit. First @b_node is split into two entries which are inserted into the
* next iteration of the loop. @b_node is returned populated with the final
* iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
* nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
* to account of what has been copied into the new sub-tree. The update of
* orig_l_mas->last is used in mas_consume to find the slots that will need to
* be either freed or destroyed. orig_l_mas->depth keeps track of the height of
* the new sub-tree in case the sub-tree becomes the full tree.
*/
static void mas_spanning_rebalance(struct ma_state *mas,
struct maple_subtree_state *mast, unsigned char count)
{
unsigned char split, mid_split;
unsigned char slot = 0;
unsigned char new_height = 0; /* used if node is a new root */
struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
struct maple_enode *old_enode;
MA_STATE(l_mas, mas->tree, mas->index, mas->index);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
MA_STATE(m_mas, mas->tree, mas->index, mas->index);
/*
* The tree needs to be rebalanced and leaves need to be kept at the same level.
* Rebalancing is done by use of the ``struct maple_topiary``.
*/
mast->l = &l_mas;
mast->m = &m_mas;
mast->r = &r_mas;
l_mas.status = r_mas.status = m_mas.status = ma_none;
/* Check if this is not root and has sufficient data. */
if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
mast_spanning_rebalance(mast);
/*
* Each level of the tree is examined and balanced, pushing data to the left or
* right, or rebalancing against left or right nodes is employed to avoid
* rippling up the tree to limit the amount of churn. Once a new sub-section of
* the tree is created, there may be a mix of new and old nodes. The old nodes
* will have the incorrect parent pointers and currently be in two trees: the
* original tree and the partially new tree. To remedy the parent pointers in
* the old tree, the new data is swapped into the active tree and a walk down
* the tree is performed and the parent pointers are updated.
* See mas_topiary_replace() for more information.
*/
while (count--) {
mast->bn->b_end--;
mast->bn->type = mte_node_type(mast->orig_l->node);
split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
&mid_split);
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
new_height++;
/*
* Copy data from next level in the tree to mast->bn from next
* iteration
*/
memset(mast->bn, 0, sizeof(struct maple_big_node));
mast->bn->type = mte_node_type(left);
/* Root already stored in l->node. */
if (mas_is_root_limits(mast->l))
goto new_root;
mast_ascend(mast);
mast_combine_cp_left(mast);
l_mas.offset = mast->bn->b_end;
mab_set_b_end(mast->bn, &l_mas, left);
mab_set_b_end(mast->bn, &m_mas, middle);
mab_set_b_end(mast->bn, &r_mas, right);
/* Copy anything necessary out of the right node. */
mast_combine_cp_right(mast);
mast->orig_l->last = mast->orig_l->max;
if (mast_sufficient(mast)) {
if (mast_overflow(mast))
continue;
if (mast->orig_l->node == mast->orig_r->node) {
/*
* The data in b_node should be stored in one
* node and in the tree
*/
slot = mast->l->offset;
break;
}
continue;
}
/* May be a new root stored in mast->bn */
if (mas_is_root_limits(mast->orig_l))
break;
mast_spanning_rebalance(mast);
/* rebalancing from other nodes may require another loop. */
if (!count)
count++;
}
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
mte_node_type(mast->orig_l->node));
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
new_height++;
mas_set_parent(mas, left, l_mas.node, slot);
if (middle)
mas_set_parent(mas, middle, l_mas.node, ++slot);
if (right)
mas_set_parent(mas, right, l_mas.node, ++slot);
if (mas_is_root_limits(mast->l)) {
new_root:
mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
while (!mte_is_root(mast->orig_l->node))
mast_ascend(mast);
} else {
mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
}
old_enode = mast->orig_l->node;
mas->depth = l_mas.depth;
mas->node = l_mas.node;
mas->min = l_mas.min;
mas->max = l_mas.max;
mas->offset = l_mas.offset;
mas_wmb_replace(mas, old_enode, new_height);
mtree_range_walk(mas);
return;
}
/*
* mas_rebalance() - Rebalance a given node.
* @mas: The maple state
* @b_node: The big maple node.
*
* Rebalance two nodes into a single node or two new nodes that are sufficient.
* Continue upwards until tree is sufficient.
*/
static inline void mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
char empty_count = mas_mt_height(mas);
struct maple_subtree_state mast;
unsigned char shift, b_end = ++b_node->b_end;
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(TP_FCT, mas);
/*
* Rebalancing occurs if a node is insufficient. Data is rebalanced
* against the node to the right if it exists, otherwise the node to the
* left of this node is rebalanced against this node. If rebalancing
* causes just one node to be produced instead of two, then the parent
* is also examined and rebalanced if it is insufficient. Every level
* tries to combine the data in the same way. If one node contains the
* entire range of the tree, then that node is used as a new root node.
*/
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
mast.bn = b_node;
mast.bn->type = mte_node_type(mas->node);
l_mas = r_mas = *mas;
if (mas_next_sibling(&r_mas)) {
mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
r_mas.last = r_mas.index = r_mas.max;
} else {
mas_prev_sibling(&l_mas);
shift = mas_data_end(&l_mas) + 1;
mab_shift_right(b_node, shift);
mas->offset += shift;
mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
b_node->b_end = shift + b_end;
l_mas.index = l_mas.last = l_mas.min;
}
return mas_spanning_rebalance(mas, &mast, empty_count);
}
/*
* mas_split_final_node() - Split the final node in a subtree operation.
* @mast: the maple subtree state
* @mas: The maple state
*/
static inline void mas_split_final_node(struct maple_subtree_state *mast,
struct ma_state *mas)
{
struct maple_enode *ancestor;
if (mte_is_root(mas->node)) {
if (mt_is_alloc(mas->tree))
mast->bn->type = maple_arange_64;
else
mast->bn->type = maple_range_64;
}
/*
* Only a single node is used here, could be root.
* The Big_node data should just fit in a single node.
*/
ancestor = mas_new_ma_node(mas, mast->bn);
mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
mast->l->node = ancestor;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
mas->offset = mast->bn->b_end - 1;
}
/*
* mast_fill_bnode() - Copy data into the big node in the subtree state
* @mast: The maple subtree state
* @mas: the maple state
* @skip: The number of entries to skip for new nodes insertion.
*/
static inline void mast_fill_bnode(struct maple_subtree_state *mast,
struct ma_state *mas,
unsigned char skip)
{
bool cp = true;
unsigned char split;
memset(mast->bn, 0, sizeof(struct maple_big_node));
if (mte_is_root(mas->node)) {
cp = false;
} else {
mas_ascend(mas);
mas->offset = mte_parent_slot(mas->node);
}
if (cp && mast->l->offset)
mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
split = mast->bn->b_end;
mab_set_b_end(mast->bn, mast->l, mast->l->node);
mast->r->offset = mast->bn->b_end;
mab_set_b_end(mast->bn, mast->r, mast->r->node);
if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
cp = false;
if (cp)
mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
mast->bn, mast->bn->b_end);
mast->bn->b_end--;
mast->bn->type = mte_node_type(mas->node);
}
/*
* mast_split_data() - Split the data in the subtree state big node into regular
* nodes.
* @mast: The maple subtree state
* @mas: The maple state
* @split: The location to split the big node
*/
static inline void mast_split_data(struct maple_subtree_state *mast,
struct ma_state *mas, unsigned char split)
{
unsigned char p_slot;
mab_mas_cp(mast->bn, 0, split, mast->l, true);
mte_set_pivot(mast->r->node, 0, mast->r->max);
mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
mast->l->offset = mte_parent_slot(mas->node);
mast->l->max = mast->bn->pivot[split];
mast->r->min = mast->l->max + 1;
if (mte_is_leaf(mas->node))
return;
p_slot = mast->orig_l->offset;
mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
&p_slot, split);
mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
&p_slot, split);
}
/*
* mas_push_data() - Instead of splitting a node, it is beneficial to push the
* data to the right or left node if there is room.
* @mas: The maple state
* @mast: The maple subtree state
* @left: Push left or not.
*
* Keeping the height of the tree low means faster lookups.
*
* Return: True if pushed, false otherwise.
*/
static inline bool mas_push_data(struct ma_state *mas,
struct maple_subtree_state *mast, bool left)
{
unsigned char slot_total = mast->bn->b_end;
unsigned char end, space, split;
MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
tmp_mas = *mas;
tmp_mas.depth = mast->l->depth;
if (left && !mas_prev_sibling(&tmp_mas))
return false;
else if (!left && !mas_next_sibling(&tmp_mas))
return false;
end = mas_data_end(&tmp_mas);
slot_total += end;
space = 2 * mt_slot_count(mas->node) - 2;
/* -2 instead of -1 to ensure there isn't a triple split */
if (ma_is_leaf(mast->bn->type))
space--;
if (mas->max == ULONG_MAX)
space--;
if (slot_total >= space)
return false;
/* Get the data; Fill mast->bn */
mast->bn->b_end++;
if (left) {
mab_shift_right(mast->bn, end + 1);
mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
mast->bn->b_end = slot_total + 1;
} else {
mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
}
/* Configure mast for splitting of mast->bn */
split = mt_slots[mast->bn->type] - 2;
if (left) {
/* Switch mas to prev node */
*mas = tmp_mas;
/* Start using mast->l for the left side. */
tmp_mas.node = mast->l->node;
*mast->l = tmp_mas;
} else {
tmp_mas.node = mast->r->node;
*mast->r = tmp_mas;
split = slot_total - split;
}
split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
/* Update parent slot for split calculation. */
if (left)
mast->orig_l->offset += end + 1;
mast_split_data(mast, mas, split);
mast_fill_bnode(mast, mas, 2);
mas_split_final_node(mast, mas);
return true;
}
/*
* mas_split() - Split data that is too big for one node into two.
* @mas: The maple state
* @b_node: The maple big node
*/
static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
{
struct maple_subtree_state mast;
int height = 0;
unsigned int orig_height = mas_mt_height(mas);
unsigned char mid_split, split = 0;
struct maple_enode *old;
/*
* Splitting is handled differently from any other B-tree; the Maple
* Tree splits upwards. Splitting up means that the split operation
* occurs when the walk of the tree hits the leaves and not on the way
* down. The reason for splitting up is that it is impossible to know
* how much space will be needed until the leaf is (or leaves are)
* reached. Since overwriting data is allowed and a range could
* overwrite more than one range or result in changing one entry into 3
* entries, it is impossible to know if a split is required until the
* data is examined.
*
* Splitting is a balancing act between keeping allocations to a minimum
* and avoiding a 'jitter' event where a tree is expanded to make room
* for an entry followed by a contraction when the entry is removed. To
* accomplish the balance, there are empty slots remaining in both left
* and right nodes after a split.
*/
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(TP_FCT, mas);
mast.l = &l_mas;
mast.r = &r_mas;
mast.orig_l = &prev_l_mas;
mast.orig_r = &prev_r_mas;
mast.bn = b_node;
while (height++ <= orig_height) {
if (mt_slots[b_node->type] > b_node->b_end) {
mas_split_final_node(&mast, mas);
break;
}
l_mas = r_mas = *mas;
l_mas.node = mas_new_ma_node(mas, b_node);
r_mas.node = mas_new_ma_node(mas, b_node);
/*
* Another way that 'jitter' is avoided is to terminate a split up early if the
* left or right node has space to spare. This is referred to as "pushing left"
* or "pushing right" and is similar to the B* tree, except the nodes left or
* right can rarely be reused due to RCU, but the ripple upwards is halted which
* is a significant savings.
*/
/* Try to push left. */
if (mas_push_data(mas, &mast, true)) {
height++;
break;
}
/* Try to push right. */
if (mas_push_data(mas, &mast, false)) {
height++;
break;
}
split = mab_calc_split(mas, b_node, &mid_split);
mast_split_data(&mast, mas, split);
/*
* Usually correct, mab_mas_cp in the above call overwrites
* r->max.
*/
mast.r->max = mas->max;
mast_fill_bnode(&mast, mas, 1);
prev_l_mas = *mast.l;
prev_r_mas = *mast.r;
}
/* Set the original node as dead */
old = mas->node;
mas->node = l_mas.node;
mas_wmb_replace(mas, old, height);
mtree_range_walk(mas);
return;
}
/*
* mas_commit_b_node() - Commit the big node into the tree.
* @wr_mas: The maple write state
* @b_node: The maple big node
*/
static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas,
struct maple_big_node *b_node)
{
enum store_type type = wr_mas->mas->store_type;
WARN_ON_ONCE(type != wr_rebalance && type != wr_split_store);
if (type == wr_rebalance)
return mas_rebalance(wr_mas->mas, b_node);
return mas_split(wr_mas->mas, b_node);
}
/*
* mas_root_expand() - Expand a root to a node
* @mas: The maple state
* @entry: The entry to store into the tree
*/
static inline void mas_root_expand(struct ma_state *mas, void *entry)
{
void *contents = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
struct maple_node *node;
void __rcu **slots;
unsigned long *pivots;
int slot = 0;
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
mas->status = ma_active;
if (mas->index) {
if (contents) {
rcu_assign_pointer(slots[slot], contents);
if (likely(mas->index > 1))
slot++;
}
pivots[slot++] = mas->index - 1;
}
rcu_assign_pointer(slots[slot], entry);
mas->offset = slot;
pivots[slot] = mas->last;
if (mas->last != ULONG_MAX)
pivots[++slot] = ULONG_MAX;
mt_set_height(mas->tree, 1);
ma_set_meta(node, maple_leaf_64, 0, slot);
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
return;
}
/*
* mas_store_root() - Storing value into root.
* @mas: The maple state
* @entry: The entry to store.
*
* There is no root node now and we are storing a value into the root - this
* function either assigns the pointer or expands into a node.
*/
static inline void mas_store_root(struct ma_state *mas, void *entry)
{
if (!entry) { if (!mas->index) rcu_assign_pointer(mas->tree->ma_root, NULL);
} else if (likely((mas->last != 0) || (mas->index != 0)))
mas_root_expand(mas, entry); else if (((unsigned long) (entry) & 3) == 2)
mas_root_expand(mas, entry);
else {
rcu_assign_pointer(mas->tree->ma_root, entry);
mas->status = ma_start;
}
}
/*
* mas_is_span_wr() - Check if the write needs to be treated as a write that
* spans the node.
* @wr_mas: The maple write state
*
* Spanning writes are writes that start in one node and end in another OR if
* the write of a %NULL will cause the node to end with a %NULL.
*
* Return: True if this is a spanning write, false otherwise.
*/
static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
{
unsigned long max = wr_mas->r_max;
unsigned long last = wr_mas->mas->last;
enum maple_type type = wr_mas->type;
void *entry = wr_mas->entry;
/* Contained in this pivot, fast path */
if (last < max)
return false;
if (ma_is_leaf(type)) {
max = wr_mas->mas->max;
if (last < max)
return false;
}
if (last == max) {
/*
* The last entry of leaf node cannot be NULL unless it is the
* rightmost node (writing ULONG_MAX), otherwise it spans slots.
*/
if (entry || last == ULONG_MAX)
return false;
}
trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry); return true;
}
static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
{
wr_mas->type = mte_node_type(wr_mas->mas->node);
mas_wr_node_walk(wr_mas);
wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
}
static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
{
wr_mas->mas->max = wr_mas->r_max;
wr_mas->mas->min = wr_mas->r_min;
wr_mas->mas->node = wr_mas->content;
wr_mas->mas->offset = 0;
wr_mas->mas->depth++;
}
/*
* mas_wr_walk() - Walk the tree for a write.
* @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
*
* Return: True if it's contained in a node, false on spanning write.
*/
static bool mas_wr_walk(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
while (true) {
mas_wr_walk_descend(wr_mas); if (unlikely(mas_is_span_wr(wr_mas)))
return false;
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
mas->offset);
if (ma_is_leaf(wr_mas->type))
return true;
if (mas->end < mt_slots[wr_mas->type] - 1)
wr_mas->vacant_height = mas->depth + 1;
if (ma_is_root(mas_mn(mas))) {
/* root needs more than 2 entries to be sufficient + 1 */
if (mas->end > 2)
wr_mas->sufficient_height = 1;
} else if (mas->end > mt_min_slots[wr_mas->type] + 1) wr_mas->sufficient_height = mas->depth + 1; mas_wr_walk_traverse(wr_mas);
}
return true;
}
static void mas_wr_walk_index(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
while (true) {
mas_wr_walk_descend(wr_mas);
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
mas->offset);
if (ma_is_leaf(wr_mas->type))
return;
mas_wr_walk_traverse(wr_mas);
}
}
/*
* mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
* @l_wr_mas: The left maple write state
* @r_wr_mas: The right maple write state
*/
static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
struct ma_wr_state *r_wr_mas)
{
struct ma_state *r_mas = r_wr_mas->mas;
struct ma_state *l_mas = l_wr_mas->mas;
unsigned char l_slot;
l_slot = l_mas->offset;
if (!l_wr_mas->content)
l_mas->index = l_wr_mas->r_min;
if ((l_mas->index == l_wr_mas->r_min) &&
(l_slot &&
!mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
if (l_slot > 1)
l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
else
l_mas->index = l_mas->min;
l_mas->offset = l_slot - 1;
}
if (!r_wr_mas->content) {
if (r_mas->last < r_wr_mas->r_max)
r_mas->last = r_wr_mas->r_max;
r_mas->offset++;
} else if ((r_mas->last == r_wr_mas->r_max) &&
(r_mas->last < r_mas->max) &&
!mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
r_wr_mas->type, r_mas->offset + 1);
r_mas->offset++;
}
}
static inline void *mas_state_walk(struct ma_state *mas)
{
void *entry;
entry = mas_start(mas);
if (mas_is_none(mas))
return NULL;
if (mas_is_ptr(mas))
return entry;
return mtree_range_walk(mas);
}
/*
* mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
* to date.
*
* @mas: The maple state.
*
* Note: Leaves mas in undesirable state.
* Return: The entry for @mas->index or %NULL on dead node.
*/
static inline void *mtree_lookup_walk(struct ma_state *mas)
{
unsigned long *pivots;
unsigned char offset;
struct maple_node *node;
struct maple_enode *next;
enum maple_type type;
void __rcu **slots;
unsigned char end;
next = mas->node;
do {
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
end = mt_pivots[type];
offset = 0;
do {
if (pivots[offset] >= mas->index)
break;
} while (++offset < end);
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
if (unlikely(ma_dead_node(node)))
goto dead_node;
} while (!ma_is_leaf(type));
return (void *)next;
dead_node:
mas_reset(mas);
return NULL;
}
static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
/*
* mas_new_root() - Create a new root node that only contains the entry passed
* in.
* @mas: The maple state
* @entry: The entry to store.
*
* Only valid when the index == 0 and the last == ULONG_MAX
*/
static inline void mas_new_root(struct ma_state *mas, void *entry)
{
struct maple_enode *root = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
struct maple_node *node;
void __rcu **slots;
unsigned long *pivots;
WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX);
if (!entry) {
mt_set_height(mas->tree, 0);
rcu_assign_pointer(mas->tree->ma_root, entry);
mas->status = ma_start;
goto done;
}
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
mas->status = ma_active;
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
mt_set_height(mas->tree, 1);
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
done:
if (xa_is_node(root))
mte_destroy_walk(root, mas->tree);
return;
}
/*
* mas_wr_spanning_store() - Create a subtree with the store operation completed
* and new nodes where necessary, then place the sub-tree in the actual tree.
* Note that mas is expected to point to the node which caused the store to
* span.
* @wr_mas: The maple write state
*/
static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas)
{
struct maple_subtree_state mast;
struct maple_big_node b_node;
struct ma_state *mas;
unsigned char height;
/* Left and Right side of spanning store */
MA_STATE(l_mas, NULL, 0, 0);
MA_STATE(r_mas, NULL, 0, 0);
MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
/*
* A store operation that spans multiple nodes is called a spanning
* store and is handled early in the store call stack by the function
* mas_is_span_wr(). When a spanning store is identified, the maple
* state is duplicated. The first maple state walks the left tree path
* to ``index``, the duplicate walks the right tree path to ``last``.
* The data in the two nodes are combined into a single node, two nodes,
* or possibly three nodes (see the 3-way split above). A ``NULL``
* written to the last entry of a node is considered a spanning store as
* a rebalance is required for the operation to complete and an overflow
* of data may happen.
*/
mas = wr_mas->mas;
trace_ma_op(TP_FCT, mas);
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);
/*
* Node rebalancing may occur due to this store, so there may be three new
* entries per level plus a new root.
*/
height = mas_mt_height(mas);
/*
* Set up right side. Need to get to the next offset after the spanning
* store to ensure it's not NULL and to combine both the next node and
* the node with the start together.
*/
r_mas = *mas;
/* Avoid overflow, walk to next slot in the tree. */
if (r_mas.last + 1)
r_mas.last++;
r_mas.index = r_mas.last;
mas_wr_walk_index(&r_wr_mas);
r_mas.last = r_mas.index = mas->last;
/* Set up left side. */
l_mas = *mas;
mas_wr_walk_index(&l_wr_mas);
if (!wr_mas->entry) {
mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
mas->offset = l_mas.offset;
mas->index = l_mas.index;
mas->last = l_mas.last = r_mas.last;
}
/* expanding NULLs may make this cover the entire range */
if (!l_mas.index && r_mas.last == ULONG_MAX) {
mas_set_range(mas, 0, ULONG_MAX);
return mas_new_root(mas, wr_mas->entry);
}
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
/* Copy r_mas into b_node if there is anything to copy. */
if (r_mas.max > r_mas.last)
mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
&b_node, b_node.b_end + 1);
else
b_node.b_end++;
/* Stop spanning searches by searching for just index. */
l_mas.index = l_mas.last = mas->index;
mast.bn = &b_node;
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
/* Combine l_mas and r_mas and split them up evenly again. */
return mas_spanning_rebalance(mas, &mast, height + 1);
}
/*
* mas_wr_node_store() - Attempt to store the value in a node
* @wr_mas: The maple write state
*
* Attempts to reuse the node, but may allocate.
*/
static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
struct ma_state *mas = wr_mas->mas;
void __rcu **dst_slots;
unsigned long *dst_pivots;
unsigned char dst_offset, offset_end = wr_mas->offset_end;
struct maple_node reuse, *newnode;
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
unsigned char height = mas_mt_height(mas);
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
/* set up node. */
if (in_rcu) {
newnode = mas_pop_node(mas);
} else {
memset(&reuse, 0, sizeof(struct maple_node));
newnode = &reuse;
}
newnode->parent = mas_mn(mas)->parent;
dst_pivots = ma_pivots(newnode, wr_mas->type);
dst_slots = ma_slots(newnode, wr_mas->type);
/* Copy from start to insert point */
memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
/* Handle insert of new range starting after old range */
if (wr_mas->r_min < mas->index) {
rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
dst_pivots[mas->offset++] = mas->index - 1;
}
/* Store the new entry and range end. */
if (mas->offset < node_pivots)
dst_pivots[mas->offset] = mas->last;
rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
/*
* this range wrote to the end of the node or it overwrote the rest of
* the data
*/
if (offset_end > mas->end)
goto done;
dst_offset = mas->offset + 1;
/* Copy to the end of node if necessary. */
copy_size = mas->end - offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
sizeof(void *) * copy_size);
memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
sizeof(unsigned long) * (copy_size - 1));
if (new_end < node_pivots)
dst_pivots[new_end] = mas->max;
done:
mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
if (in_rcu) {
struct maple_enode *old_enode = mas->node;
mas->node = mt_mk_node(newnode, wr_mas->type);
mas_replace_node(mas, old_enode, height);
} else {
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);
mas_update_gap(mas);
mas->end = new_end;
return;
}
/*
* mas_wr_slot_store: Attempt to store a value in a slot.
* @wr_mas: the maple write state
*/
static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char offset = mas->offset;
void __rcu **slots = wr_mas->slots;
bool gap = false;
gap |= !mt_slot_locked(mas->tree, slots, offset); gap |= !mt_slot_locked(mas->tree, slots, offset + 1); if (wr_mas->offset_end - offset == 1) { if (mas->index == wr_mas->r_min) {
/* Overwriting the range and a part of the next one */
rcu_assign_pointer(slots[offset], wr_mas->entry);
wr_mas->pivots[offset] = mas->last;
} else {
/* Overwriting a part of the range and the next one */
rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
wr_mas->pivots[offset] = mas->index - 1;
mas->offset++; /* Keep mas accurate. */
}
} else {
WARN_ON_ONCE(mt_in_rcu(mas->tree));
/*
* Expand the range, only partially overwriting the previous and
* next ranges
*/
gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
wr_mas->pivots[offset] = mas->index - 1;
wr_mas->pivots[offset + 1] = mas->last;
mas->offset++; /* Keep mas accurate. */
}
trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);
/*
* Only update gap when the new entry is empty or there is an empty
* entry in the original two ranges.
*/
if (!wr_mas->entry || gap) mas_update_gap(mas);
return;
}
static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
if (!wr_mas->slots[wr_mas->offset_end]) {
/* If this one is null, the next and prev are not */
mas->last = wr_mas->end_piv;
} else {
/* Check next slot(s) if we are overwriting the end */
if ((mas->last == wr_mas->end_piv) && (mas->end != wr_mas->offset_end) &&
!wr_mas->slots[wr_mas->offset_end + 1]) {
wr_mas->offset_end++; if (wr_mas->offset_end == mas->end)
mas->last = mas->max;
else
mas->last = wr_mas->pivots[wr_mas->offset_end];
wr_mas->end_piv = mas->last;
}
}
if (!wr_mas->content) {
/* If this one is null, the next and prev are not */
mas->index = wr_mas->r_min;
} else {
/* Check prev slot if we are overwriting the start */
if (mas->index == wr_mas->r_min && mas->offset &&
!wr_mas->slots[mas->offset - 1]) {
mas->offset--; wr_mas->r_min = mas->index = mas_safe_min(mas, wr_mas->pivots, mas->offset);
wr_mas->r_max = wr_mas->pivots[mas->offset];
}
}
}
static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
while ((wr_mas->offset_end < wr_mas->mas->end) && (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) wr_mas->offset_end++; if (wr_mas->offset_end < wr_mas->mas->end) wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
}
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char new_end = mas->end + 2;
new_end -= wr_mas->offset_end - mas->offset;
if (wr_mas->r_min == mas->index)
new_end--;
if (wr_mas->end_piv == mas->last)
new_end--;
return new_end;
}
/*
* mas_wr_append: Attempt to append
* @wr_mas: the maple write state
* @new_end: The end of the node after the modification
*
* This is currently unsafe in rcu mode since the end of the node may be cached
* by readers while the node contents may be updated which could result in
* inaccurate information.
*/
static inline void mas_wr_append(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
struct ma_state *mas = wr_mas->mas;
void __rcu **slots;
unsigned char end = mas->end;
if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end]; ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
}
slots = wr_mas->slots; if (new_end == end + 1) { if (mas->last == wr_mas->r_max) {
/* Append to end of range */
rcu_assign_pointer(slots[new_end], wr_mas->entry);
wr_mas->pivots[end] = mas->index - 1;
mas->offset = new_end;
} else {
/* Append to start of range */
rcu_assign_pointer(slots[new_end], wr_mas->content);
wr_mas->pivots[end] = mas->last;
rcu_assign_pointer(slots[end], wr_mas->entry);
}
} else {
/* Append to the range without touching any boundaries. */
rcu_assign_pointer(slots[new_end], wr_mas->content);
wr_mas->pivots[end + 1] = mas->last;
rcu_assign_pointer(slots[end + 1], wr_mas->entry);
wr_mas->pivots[end] = mas->index - 1;
mas->offset = end + 1;
}
if (!wr_mas->content || !wr_mas->entry) mas_update_gap(mas); mas->end = new_end;
trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry);
return;
}
/*
* mas_wr_bnode() - Slow path for a modification.
* @wr_mas: The write maple state
*
* This is where split, rebalance end up.
*/
static void mas_wr_bnode(struct ma_wr_state *wr_mas)
{
struct maple_big_node b_node;
trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
mas_commit_b_node(wr_mas, &b_node);
}
/*
* mas_wr_store_entry() - Internal call to store a value
* @wr_mas: The maple write state
*/
static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas; unsigned char new_end = mas_wr_new_end(wr_mas); switch (mas->store_type) {
case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
if (!!wr_mas->entry ^ !!wr_mas->content) mas_update_gap(mas);
break;
case wr_append:
mas_wr_append(wr_mas, new_end);
break;
case wr_slot_store:
mas_wr_slot_store(wr_mas); break;
case wr_node_store:
mas_wr_node_store(wr_mas, new_end);
break;
case wr_spanning_store:
mas_wr_spanning_store(wr_mas);
break;
case wr_split_store:
case wr_rebalance:
mas_wr_bnode(wr_mas);
break;
case wr_new_root:
mas_new_root(mas, wr_mas->entry);
break;
case wr_store_root:
mas_store_root(mas, wr_mas->entry); break;
case wr_invalid:
MT_BUG_ON(mas->tree, 1);
}
return;}
static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
if (!mas_is_active(mas)) {
if (mas_is_start(mas)) goto set_content; if (unlikely(mas_is_paused(mas))) goto reset; if (unlikely(mas_is_none(mas))) goto reset; if (unlikely(mas_is_overflow(mas))) goto reset;
if (unlikely(mas_is_underflow(mas)))
goto reset;
}
/*
* A less strict version of mas_is_span_wr() where we allow spanning
* writes within this node. This is to stop partial walks in
* mas_prealloc() from being reset.
*/
if (mas->last > mas->max)
goto reset;
if (wr_mas->entry)
goto set_content;
if (mte_is_leaf(mas->node) && mas->last == mas->max) goto reset;
goto set_content;
reset:
mas_reset(mas);
set_content:
wr_mas->content = mas_start(mas);
}
/**
* mas_prealloc_calc() - Calculate number of nodes needed for a
* given store oepration
* @wr_mas: The maple write state
* @entry: The entry to store into the tree
*
* Return: Number of nodes required for preallocation.
*/
static inline void mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry)
{ struct ma_state *mas = wr_mas->mas;
unsigned char height = mas_mt_height(mas);
int ret = height * 3 + 1;
unsigned char delta = height - wr_mas->vacant_height;
switch (mas->store_type) {
case wr_exact_fit:
case wr_append:
case wr_slot_store:
ret = 0;
break;
case wr_spanning_store:
if (wr_mas->sufficient_height < wr_mas->vacant_height)
ret = (height - wr_mas->sufficient_height) * 3 + 1;
else
ret = delta * 3 + 1;
break;
case wr_split_store:
ret = delta * 2 + 1;
break;
case wr_rebalance:
if (wr_mas->sufficient_height < wr_mas->vacant_height)
ret = (height - wr_mas->sufficient_height) * 2 + 1;
else
ret = delta * 2 + 1;
break;
case wr_node_store:
ret = mt_in_rcu(mas->tree) ? 1 : 0;
break;
case wr_new_root:
ret = 1;
break;
case wr_store_root:
if (likely((mas->last != 0) || (mas->index != 0)))
ret = 1;
else if (((unsigned long) (entry) & 3) == 2)
ret = 1;
else
ret = 0;
break;
case wr_invalid:
WARN_ON_ONCE(1);
}
mas->node_request = ret;}
/*
* mas_wr_store_type() - Determine the store type for a given
* store operation.
* @wr_mas: The maple write state
*
* Return: the type of store needed for the operation
*/
static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char new_end;
if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) return wr_store_root; if (unlikely(!mas_wr_walk(wr_mas)))
return wr_spanning_store;
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
if (!wr_mas->entry)
mas_wr_extend_null(wr_mas); if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) return wr_exact_fit; if (unlikely(!mas->index && mas->last == ULONG_MAX))
return wr_new_root;
new_end = mas_wr_new_end(wr_mas);
/* Potential spanning rebalance collapsing a node */
if (new_end < mt_min_slots[wr_mas->type]) { if (!mte_is_root(mas->node))
return wr_rebalance;
return wr_node_store;
}
if (new_end >= mt_slots[wr_mas->type]) return wr_split_store; if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) return wr_append; if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
(wr_mas->offset_end - mas->offset == 1)))
return wr_slot_store;
return wr_node_store;
}
/**
* mas_wr_preallocate() - Preallocate enough nodes for a store operation
* @wr_mas: The maple write state
* @entry: The entry that will be stored
*
*/
static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
{
struct ma_state *mas = wr_mas->mas;
mas_wr_prealloc_setup(wr_mas);
mas->store_type = mas_wr_store_type(wr_mas);
mas_prealloc_calc(wr_mas, entry);
if (!mas->node_request)
return;
mas_alloc_nodes(mas, GFP_NOWAIT);
}
/**
* mas_insert() - Internal call to insert a value
* @mas: The maple state
* @entry: The entry to store
*
* Return: %NULL or the contents that already exists at the requested index
* otherwise. The maple state needs to be checked for error conditions.
*/
static inline void *mas_insert(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
/*
* Inserting a new range inserts either 0, 1, or 2 pivots within the
* tree. If the insert fits exactly into an existing gap with a value
* of NULL, then the slot only needs to be written with the new value.
* If the range being inserted is adjacent to another range, then only a
* single pivot needs to be inserted (as well as writing the entry). If
* the new range is within a gap but does not touch any other ranges,
* then two pivots need to be inserted: the start - 1, and the end. As
* usual, the entry must be written. Most operations require a new node
* to be allocated and replace an existing node to ensure RCU safety,
* when in RCU mode. The exception to requiring a newly allocated node
* is when inserting at the end of a node (appending). When done
* carefully, appending can reuse the node in place.
*/
wr_mas.content = mas_start(mas);
if (wr_mas.content)
goto exists;
mas_wr_preallocate(&wr_mas, entry);
if (mas_is_err(mas))
return NULL;
/* spanning writes always overwrite something */
if (mas->store_type == wr_spanning_store)
goto exists;
/* At this point, we are at the leaf node that needs to be altered. */
if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) {
wr_mas.offset_end = mas->offset;
wr_mas.end_piv = wr_mas.r_max;
if (wr_mas.content || (mas->last > wr_mas.r_max))
goto exists;
}
mas_wr_store_entry(&wr_mas);
return wr_mas.content;
exists:
mas_set_err(mas, -EEXIST);
return wr_mas.content;
}
/**
* mas_alloc_cyclic() - Internal call to find somewhere to store an entry
* @mas: The maple state.
* @startp: Pointer to ID.
* @range_lo: Lower bound of range to search.
* @range_hi: Upper bound of range to search.
* @entry: The entry to store.
* @next: Pointer to next ID to allocate.
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 if the allocation succeeded without wrapping, 1 if the
* allocation succeeded after wrapping, or -EBUSY if there are no
* free entries.
*/
int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
void *entry, unsigned long range_lo, unsigned long range_hi,
unsigned long *next, gfp_t gfp)
{
unsigned long min = range_lo;
int ret = 0;
range_lo = max(min, *next);
ret = mas_empty_area(mas, range_lo, range_hi, 1);
if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) {
mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED;
ret = 1;
}
if (ret < 0 && range_lo > min) {
mas_reset(mas);
ret = mas_empty_area(mas, min, range_hi, 1);
if (ret == 0)
ret = 1;
}
if (ret < 0)
return ret;
do {
mas_insert(mas, entry);
} while (mas_nomem(mas, gfp));
if (mas_is_err(mas))
return xa_err(mas->node);
*startp = mas->index;
*next = *startp + 1;
if (*next == 0)
mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL(mas_alloc_cyclic);
static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
{
retry:
mas_set(mas, index); mas_state_walk(mas); if (mas_is_start(mas)) goto retry;
}
static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
struct maple_node *node, const unsigned long index)
{
if (unlikely(ma_dead_node(node))) { mas_rewalk(mas, index);
return true;
}
return false;
}
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
* tree. The prev value will be mas->node[mas->offset] or the status will be
* ma_none.
* @mas: The maple state
* @min: The lower limit to search
*
* The prev node value will be mas->node[mas->offset] or the status will be
* ma_none.
* Return: 1 if the node is dead, 0 otherwise.
*/
static int mas_prev_node(struct ma_state *mas, unsigned long min)
{
enum maple_type mt;
int offset, level;
void __rcu **slots;
struct maple_node *node;
unsigned long *pivots;
unsigned long max;
node = mas_mn(mas);
if (!mas->min)
goto no_entry;
max = mas->min - 1;
if (max < min)
goto no_entry;
level = 0;
do {
if (ma_is_root(node))
goto no_entry;
/* Walk up. */
if (unlikely(mas_ascend(mas)))
return 1;
offset = mas->offset;
level++;
node = mas_mn(mas);
} while (!offset);
offset--;
mt = mte_node_type(mas->node);
while (level > 1) {
level--;
slots = ma_slots(node, mt);
mas->node = mas_slot(mas, slots, offset);
if (unlikely(ma_dead_node(node)))
return 1;
mt = mte_node_type(mas->node);
node = mas_mn(mas);
pivots = ma_pivots(node, mt);
offset = ma_data_end(node, mt, pivots, max);
if (unlikely(ma_dead_node(node)))
return 1;
}
slots = ma_slots(node, mt);
mas->node = mas_slot(mas, slots, offset);
pivots = ma_pivots(node, mt);
if (unlikely(ma_dead_node(node)))
return 1;
if (likely(offset))
mas->min = pivots[offset - 1] + 1;
mas->max = max;
mas->offset = mas_data_end(mas);
if (unlikely(mte_dead_node(mas->node)))
return 1;
mas->end = mas->offset;
return 0;
no_entry:
if (unlikely(ma_dead_node(node)))
return 1;
mas->status = ma_underflow;
return 0;
}
/*
* mas_prev_slot() - Get the entry in the previous slot
*
* @mas: The maple state
* @min: The minimum starting range
* @empty: Can be empty
*
* Return: The entry in the previous slot which is possibly NULL
*/
static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
{
void *entry;
void __rcu **slots;
unsigned long pivot;
enum maple_type type;
unsigned long *pivots;
struct maple_node *node;
unsigned long save_point = mas->index;
retry:
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (mas->min <= min) {
pivot = mas_safe_min(mas, pivots, mas->offset);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (pivot <= min)
goto underflow;
}
again:
if (likely(mas->offset)) {
mas->offset--;
mas->last = mas->index - 1;
mas->index = mas_safe_min(mas, pivots, mas->offset);
} else {
if (mas->index <= min)
goto underflow;
if (mas_prev_node(mas, min)) {
mas_rewalk(mas, save_point);
goto retry;
}
if (WARN_ON_ONCE(mas_is_underflow(mas)))
return NULL;
mas->last = mas->max;
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
mas->index = pivots[mas->offset - 1] + 1;
}
slots = ma_slots(node, type);
entry = mas_slot(mas, slots, mas->offset);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (likely(entry))
return entry;
if (!empty) {
if (mas->index <= min)
goto underflow;
goto again;
}
return entry;
underflow:
mas->status = ma_underflow;
return NULL;
}
/*
* mas_next_node() - Get the next node at the same level in the tree.
* @mas: The maple state
* @node: The maple node
* @max: The maximum pivot value to check.
*
* The next value will be mas->node[mas->offset] or the status will have
* overflowed.
* Return: 1 on dead node, 0 otherwise.
*/
static int mas_next_node(struct ma_state *mas, struct maple_node *node,
unsigned long max)
{
unsigned long min;
unsigned long *pivots;
struct maple_enode *enode;
struct maple_node *tmp;
int level = 0;
unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
if (mas->max >= max)
goto overflow;
min = mas->max + 1;
level = 0;
do {
if (ma_is_root(node)) goto overflow;
/* Walk up. */
if (unlikely(mas_ascend(mas)))
return 1;
level++;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt); node_end = ma_data_end(node, mt, pivots, mas->max); if (unlikely(ma_dead_node(node)))
return 1;
} while (unlikely(mas->offset == node_end)); slots = ma_slots(node, mt); mas->offset++; enode = mas_slot(mas, slots, mas->offset); if (unlikely(ma_dead_node(node)))
return 1;
if (level > 1)
mas->offset = 0;
while (unlikely(level > 1)) { level--;
mas->node = enode;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
slots = ma_slots(node, mt); enode = mas_slot(mas, slots, 0); if (unlikely(ma_dead_node(node)))
return 1;
}
if (!mas->offset) pivots = ma_pivots(node, mt); mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
tmp = mte_to_node(enode);
mt = mte_node_type(enode);
pivots = ma_pivots(tmp, mt); mas->end = ma_data_end(tmp, mt, pivots, mas->max);
if (unlikely(ma_dead_node(node)))
return 1;
mas->node = enode;
mas->min = min;
return 0;
overflow:
if (unlikely(ma_dead_node(node)))
return 1;
mas->status = ma_overflow;
return 0;
}
/*
* mas_next_slot() - Get the entry in the next slot
*
* @mas: The maple state
* @max: The maximum starting range
* @empty: Can be empty
*
* Return: The entry in the next slot which is possibly NULL
*/
static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
{
void __rcu **slots;
unsigned long *pivots;
unsigned long pivot;
enum maple_type type;
struct maple_node *node;
unsigned long save_point = mas->last;
void *entry;
retry:
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type); if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; if (mas->max >= max) { if (likely(mas->offset < mas->end))
pivot = pivots[mas->offset];
else
pivot = mas->max;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry;
if (pivot >= max) { /* Was at the limit, next will extend beyond */
mas->status = ma_overflow; return NULL;
}
}
if (likely(mas->offset < mas->end)) { mas->index = pivots[mas->offset] + 1;
again:
mas->offset++;
if (likely(mas->offset < mas->end))
mas->last = pivots[mas->offset];
else
mas->last = mas->max;
} else {
if (mas->last >= max) {
mas->status = ma_overflow;
return NULL;
}
if (mas_next_node(mas, node, max)) { mas_rewalk(mas, save_point); goto retry;
}
if (WARN_ON_ONCE(mas_is_overflow(mas)))
return NULL;
mas->offset = 0;
mas->index = mas->min;
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type); mas->last = pivots[0];
}
slots = ma_slots(node, type); entry = mt_slot(mas->tree, slots, mas->offset); if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; if (entry) return entry; if (!empty) { if (mas->last >= max) {
mas->status = ma_overflow;
return NULL;
}
mas->index = mas->last + 1;
goto again;
}
return entry;
}
/*
* mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
* highest gap address of a given size in a given node and descend.
* @mas: The maple state
* @size: The needed size.
*
* Return: True if found in a leaf, false otherwise.
*
*/
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
unsigned long *gap_min, unsigned long *gap_max)
{
enum maple_type type = mte_node_type(mas->node);
struct maple_node *node = mas_mn(mas);
unsigned long *pivots, *gaps;
void __rcu **slots;
unsigned long gap = 0;
unsigned long max, min;
unsigned char offset;
if (unlikely(mas_is_err(mas)))
return true;
if (ma_is_dense(type)) {
/* dense nodes. */
mas->offset = (unsigned char)(mas->index - mas->min);
return true;
}
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
gaps = ma_gaps(node, type);
offset = mas->offset;
min = mas_safe_min(mas, pivots, offset);
/* Skip out of bounds. */
while (mas->last < min)
min = mas_safe_min(mas, pivots, --offset);
max = mas_safe_pivot(mas, pivots, offset, type);
while (mas->index <= max) {
gap = 0;
if (gaps)
gap = gaps[offset];
else if (!mas_slot(mas, slots, offset))
gap = max - min + 1;
if (gap) {
if ((size <= gap) && (size <= mas->last - min + 1))
break;
if (!gaps) {
/* Skip the next slot, it cannot be a gap. */
if (offset < 2)
goto ascend;
offset -= 2;
max = pivots[offset];
min = mas_safe_min(mas, pivots, offset);
continue;
}
}
if (!offset)
goto ascend;
offset--;
max = min - 1;
min = mas_safe_min(mas, pivots, offset);
}
if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
goto no_space;
if (unlikely(ma_is_leaf(type))) {
mas->offset = offset;
*gap_min = min;
*gap_max = min + gap - 1;
return true;
}
/* descend, only happens under lock. */
mas->node = mas_slot(mas, slots, offset);
mas->min = min;
mas->max = max;
mas->offset = mas_data_end(mas);
return false;
ascend:
if (!mte_is_root(mas->node))
return false;
no_space:
mas_set_err(mas, -EBUSY);
return false;
}
static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
{
enum maple_type type = mte_node_type(mas->node);
unsigned long pivot, min, gap = 0;
unsigned char offset, data_end;
unsigned long *gaps, *pivots;
void __rcu **slots;
struct maple_node *node;
bool found = false;
if (ma_is_dense(type)) {
mas->offset = (unsigned char)(mas->index - mas->min);
return true;
}
node = mas_mn(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
gaps = ma_gaps(node, type);
offset = mas->offset;
min = mas_safe_min(mas, pivots, offset);
data_end = ma_data_end(node, type, pivots, mas->max);
for (; offset <= data_end; offset++) {
pivot = mas_safe_pivot(mas, pivots, offset, type);
/* Not within lower bounds */
if (mas->index > pivot)
goto next_slot;
if (gaps)
gap = gaps[offset];
else if (!mas_slot(mas, slots, offset))
gap = min(pivot, mas->last) - max(mas->index, min) + 1;
else
goto next_slot;
if (gap >= size) {
if (ma_is_leaf(type)) {
found = true;
break;
}
mas->node = mas_slot(mas, slots, offset);
mas->min = min;
mas->max = pivot;
offset = 0;
break;
}
next_slot:
min = pivot + 1;
if (mas->last <= pivot) {
mas_set_err(mas, -EBUSY);
return true;
}
}
mas->offset = offset;
return found;
}
/**
* mas_walk() - Search for @mas->index in the tree.
* @mas: The maple state.
*
* mas->index and mas->last will be set to the range if there is a value. If
* mas->status is ma_none, reset to ma_start
*
* Return: the entry at the location or %NULL.
*/
void *mas_walk(struct ma_state *mas)
{
void *entry;
if (!mas_is_active(mas) && !mas_is_start(mas))
mas->status = ma_start;
retry: entry = mas_state_walk(mas); if (mas_is_start(mas)) { goto retry; } else if (mas_is_none(mas)) {
mas->index = 0;
mas->last = ULONG_MAX; } else if (mas_is_ptr(mas)) { if (!mas->index) {
mas->last = 0;
return entry;
}
mas->index = 1;
mas->last = ULONG_MAX;
mas->status = ma_none;
return NULL;
}
return entry;
}
EXPORT_SYMBOL_GPL(mas_walk);
static inline bool mas_rewind_node(struct ma_state *mas)
{
unsigned char slot;
do {
if (mte_is_root(mas->node)) {
slot = mas->offset;
if (!slot)
return false;
} else {
mas_ascend(mas);
slot = mas->offset;
}
} while (!slot);
mas->offset = --slot;
return true;
}
/*
* mas_skip_node() - Internal function. Skip over a node.
* @mas: The maple state.
*
* Return: true if there is another node, false otherwise.
*/
static inline bool mas_skip_node(struct ma_state *mas)
{
if (mas_is_err(mas))
return false;
do {
if (mte_is_root(mas->node)) {
if (mas->offset >= mas_data_end(mas)) {
mas_set_err(mas, -EBUSY);
return false;
}
} else {
mas_ascend(mas);
}
} while (mas->offset >= mas_data_end(mas));
mas->offset++;
return true;
}
/*
* mas_awalk() - Allocation walk. Search from low address to high, for a gap of
* @size
* @mas: The maple state
* @size: The size of the gap required
*
* Search between @mas->index and @mas->last for a gap of @size.
*/
static inline void mas_awalk(struct ma_state *mas, unsigned long size)
{
struct maple_enode *last = NULL;
/*
* There are 4 options:
* go to child (descend)
* go back to parent (ascend)
* no gap found. (return, error == -EBUSY)
* found the gap. (return)
*/
while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
if (last == mas->node)
mas_skip_node(mas);
else
last = mas->node;
}
}
/*
* mas_sparse_area() - Internal function. Return upper or lower limit when
* searching for a gap in an empty tree.
* @mas: The maple state
* @min: the minimum range
* @max: The maximum range
* @size: The size of the gap
* @fwd: Searching forward or back
*/
static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size, bool fwd)
{
if (!unlikely(mas_is_none(mas)) && min == 0) {
min++;
/*
* At this time, min is increased, we need to recheck whether
* the size is satisfied.
*/
if (min > max || max - min + 1 < size)
return -EBUSY;
}
/* mas_is_ptr */
if (fwd) {
mas->index = min;
mas->last = min + size - 1;
} else {
mas->last = max;
mas->index = max - size + 1;
}
return 0;
}
/*
* mas_empty_area() - Get the lowest address within the range that is
* sufficient for the size requested.
* @mas: The maple state
* @min: The lowest value of the range
* @max: The highest value of the range
* @size: The size needed
*/
int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size)
{
unsigned char offset;
unsigned long *pivots;
enum maple_type mt;
struct maple_node *node;
if (min > max)
return -EINVAL;
if (size == 0 || max - min < size - 1)
return -EINVAL;
if (mas_is_start(mas))
mas_start(mas);
else if (mas->offset >= 2)
mas->offset -= 2;
else if (!mas_skip_node(mas))
return -EBUSY;
/* Empty set */
if (mas_is_none(mas) || mas_is_ptr(mas))
return mas_sparse_area(mas, min, max, size, true);
/* The start of the window can only be within these values */
mas->index = min;
mas->last = max;
mas_awalk(mas, size);
if (unlikely(mas_is_err(mas)))
return xa_err(mas->node);
offset = mas->offset;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
min = mas_safe_min(mas, pivots, offset);
if (mas->index < min)
mas->index = min;
mas->last = mas->index + size - 1;
mas->end = ma_data_end(node, mt, pivots, mas->max);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area);
/*
* mas_empty_area_rev() - Get the highest address within the range that is
* sufficient for the size requested.
* @mas: The maple state
* @min: The lowest value of the range
* @max: The highest value of the range
* @size: The size needed
*/
int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size)
{
struct maple_enode *last = mas->node;
if (min > max)
return -EINVAL;
if (size == 0 || max - min < size - 1)
return -EINVAL;
if (mas_is_start(mas))
mas_start(mas);
else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
return -EBUSY;
if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
return mas_sparse_area(mas, min, max, size, false);
else if (mas->offset >= 2)
mas->offset -= 2;
else
mas->offset = mas_data_end(mas);
/* The start of the window can only be within these values. */
mas->index = min;
mas->last = max;
while (!mas_rev_awalk(mas, size, &min, &max)) {
if (last == mas->node) {
if (!mas_rewind_node(mas))
return -EBUSY;
} else {
last = mas->node;
}
}
if (mas_is_err(mas))
return xa_err(mas->node);
if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
return -EBUSY;
/* Trim the upper limit to the max. */
if (max < mas->last)
mas->last = max;
mas->index = mas->last - size + 1;
mas->end = mas_data_end(mas);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area_rev);
/*
* mte_dead_leaves() - Mark all leaves of a node as dead.
* @enode: the encoded node
* @mt: the maple tree
* @slots: Pointer to the slot array
*
* Must hold the write lock.
*
* Return: The number of leaves marked as dead.
*/
static inline
unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
void __rcu **slots)
{
struct maple_node *node;
enum maple_type type;
void *entry;
int offset;
for (offset = 0; offset < mt_slot_count(enode); offset++) {
entry = mt_slot(mt, slots, offset);
type = mte_node_type(entry);
node = mte_to_node(entry);
/* Use both node and type to catch LE & BE metadata */
if (!node || !type)
break;
mte_set_node_dead(entry);
node->type = type;
rcu_assign_pointer(slots[offset], node);
}
return offset;
}
/**
* mte_dead_walk() - Walk down a dead tree to just before the leaves
* @enode: The maple encoded node
* @offset: The starting offset
*
* Note: This can only be used from the RCU callback context.
*/
static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
{
struct maple_node *node, *next;
void __rcu **slots = NULL;
next = mte_to_node(*enode);
do {
*enode = ma_enode_ptr(next);
node = mte_to_node(*enode);
slots = ma_slots(node, node->type);
next = rcu_dereference_protected(slots[offset],
lock_is_held(&rcu_callback_map));
offset = 0;
} while (!ma_is_leaf(next->type));
return slots;
}
/**
* mt_free_walk() - Walk & free a tree in the RCU callback context
* @head: The RCU head that's within the node.
*
* Note: This can only be used from the RCU callback context.
*/
static void mt_free_walk(struct rcu_head *head)
{
void __rcu **slots;
struct maple_node *node, *start;
struct maple_enode *enode;
unsigned char offset;
enum maple_type type;
node = container_of(head, struct maple_node, rcu);
if (ma_is_leaf(node->type))
goto free_leaf;
start = node;
enode = mt_mk_node(node, node->type);
slots = mte_dead_walk(&enode, 0);
node = mte_to_node(enode);
do {
mt_free_bulk(node->slot_len, slots);
offset = node->parent_slot + 1;
enode = node->piv_parent;
if (mte_to_node(enode) == node)
goto free_leaf;
type = mte_node_type(enode);
slots = ma_slots(mte_to_node(enode), type);
if ((offset < mt_slots[type]) &&
rcu_dereference_protected(slots[offset],
lock_is_held(&rcu_callback_map)))
slots = mte_dead_walk(&enode, offset);
node = mte_to_node(enode);
} while ((node != start) || (node->slot_len < offset));
slots = ma_slots(node, node->type);
mt_free_bulk(node->slot_len, slots);
free_leaf:
kfree(node);
}
static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
{
struct maple_node *node;
struct maple_enode *next = *enode;
void __rcu **slots = NULL;
enum maple_type type;
unsigned char next_offset = 0;
do {
*enode = next;
node = mte_to_node(*enode);
type = mte_node_type(*enode);
slots = ma_slots(node, type);
next = mt_slot_locked(mt, slots, next_offset);
if ((mte_dead_node(next)))
next = mt_slot_locked(mt, slots, ++next_offset);
mte_set_node_dead(*enode);
node->type = type;
node->piv_parent = prev;
node->parent_slot = offset;
offset = next_offset;
next_offset = 0;
prev = *enode;
} while (!mte_is_leaf(next));
return slots;
}
static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free)
{
void __rcu **slots;
struct maple_node *node = mte_to_node(enode);
struct maple_enode *start;
if (mte_is_leaf(enode)) {
mte_set_node_dead(enode);
node->type = mte_node_type(enode);
goto free_leaf;
}
start = enode;
slots = mte_destroy_descend(&enode, mt, start, 0);
node = mte_to_node(enode); // Updated in the above call.
do {
enum maple_type type;
unsigned char offset;
struct maple_enode *parent, *tmp;
node->slot_len = mte_dead_leaves(enode, mt, slots);
if (free)
mt_free_bulk(node->slot_len, slots);
offset = node->parent_slot + 1;
enode = node->piv_parent;
if (mte_to_node(enode) == node)
goto free_leaf;
type = mte_node_type(enode);
slots = ma_slots(mte_to_node(enode), type);
if (offset >= mt_slots[type])
goto next;
tmp = mt_slot_locked(mt, slots, offset);
if (mte_node_type(tmp) && mte_to_node(tmp)) {
parent = enode;
enode = tmp;
slots = mte_destroy_descend(&enode, mt, parent, offset);
}
next:
node = mte_to_node(enode);
} while (start != enode);
node = mte_to_node(enode);
node->slot_len = mte_dead_leaves(enode, mt, slots);
if (free)
mt_free_bulk(node->slot_len, slots);
free_leaf:
if (free)
kfree(node);
else
mt_clear_meta(mt, node, node->type);
}
/*
* mte_destroy_walk() - Free a tree or sub-tree.
* @enode: the encoded maple node (maple_enode) to start
* @mt: the tree to free - needed for node types.
*
* Must hold the write lock.
*/
static inline void mte_destroy_walk(struct maple_enode *enode,
struct maple_tree *mt)
{
struct maple_node *node = mte_to_node(enode);
if (mt_in_rcu(mt)) {
mt_destroy_walk(enode, mt, false);
call_rcu(&node->rcu, mt_free_walk);
} else {
mt_destroy_walk(enode, mt, true);
}
}
/* Interface */
/**
* mas_store() - Store an @entry.
* @mas: The maple state.
* @entry: The entry to store.
*
* The @mas->index and @mas->last is used to set the range for the @entry.
*
* Return: the first entry between mas->index and mas->last or %NULL.
*/
void *mas_store(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(TP_FCT, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MAS_WARN_ON(mas, mas->index > mas->last))
pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last,
entry);
if (mas->index > mas->last) {
mas_set_err(mas, -EINVAL);
return NULL;
}
#endif
/*
* Storing is the same operation as insert with the added caveat that it
* can overwrite entries. Although this seems simple enough, one may
* want to examine what happens if a single store operation was to
* overwrite multiple entries within a self-balancing B-Tree.
*/
mas_wr_prealloc_setup(&wr_mas);
mas->store_type = mas_wr_store_type(&wr_mas);
if (mas->mas_flags & MA_STATE_PREALLOC) {
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); return wr_mas.content;
}
mas_prealloc_calc(&wr_mas, entry);
if (!mas->node_request)
goto store;
mas_alloc_nodes(mas, GFP_NOWAIT);
if (mas_is_err(mas))
return NULL;
store: mas_wr_store_entry(&wr_mas); mas_destroy(mas); return wr_mas.content;
}
EXPORT_SYMBOL_GPL(mas_store);
/**
* mas_store_gfp() - Store a value into the tree.
* @mas: The maple state
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations if necessary.
*
* Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
* be allocated.
*/
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
unsigned long index = mas->index;
unsigned long last = mas->last;
MA_WR_STATE(wr_mas, mas, entry);
int ret = 0;
retry:
mas_wr_preallocate(&wr_mas, entry);
if (unlikely(mas_nomem(mas, gfp))) {
if (!entry)
__mas_set_range(mas, index, last);
goto retry;
}
if (mas_is_err(mas)) {
ret = xa_err(mas->node);
goto out;
}
mas_wr_store_entry(&wr_mas);
out:
mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
/**
* mas_store_prealloc() - Store a value into the tree using memory
* preallocated in the maple state.
* @mas: The maple state
* @entry: The entry to store.
*/
void mas_store_prealloc(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
if (mas->store_type == wr_store_root) {
mas_wr_prealloc_setup(&wr_mas);
goto store;
}
mas_wr_walk_descend(&wr_mas);
if (mas->store_type != wr_spanning_store) {
/* set wr_mas->content to current slot */
wr_mas.content = mas_slot_locked(mas, wr_mas.slots, mas->offset);
mas_wr_end_piv(&wr_mas);
}
store:
trace_ma_write(TP_FCT, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
mas_destroy(mas);
}
EXPORT_SYMBOL_GPL(mas_store_prealloc);
/**
* mas_preallocate() - Preallocate enough nodes for a store operation
* @mas: The maple state
* @entry: The entry that will be stored
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -ENOMEM if memory could not be allocated.
*/
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
mas_wr_prealloc_setup(&wr_mas);
mas->store_type = mas_wr_store_type(&wr_mas);
mas_prealloc_calc(&wr_mas, entry);
if (!mas->node_request)
goto set_flag;
mas->mas_flags &= ~MA_STATE_PREALLOC;
mas_alloc_nodes(mas, gfp);
if (mas_is_err(mas)) {
int ret = xa_err(mas->node);
mas->node_request = 0;
mas_destroy(mas);
mas_reset(mas);
return ret;
}
set_flag:
mas->mas_flags |= MA_STATE_PREALLOC;
return 0;
}
EXPORT_SYMBOL_GPL(mas_preallocate);
/*
* mas_destroy() - destroy a maple state.
* @mas: The maple state
*
* Upon completion, check the left-most node and rebalance against the node to
* the right if necessary. Frees any allocated nodes associated with this maple
* state.
*/
void mas_destroy(struct ma_state *mas)
{
mas->mas_flags &= ~MA_STATE_PREALLOC;
mas_empty_nodes(mas);}
EXPORT_SYMBOL_GPL(mas_destroy);
static void mas_may_activate(struct ma_state *mas)
{
if (!mas->node) {
mas->status = ma_start;
} else if (mas->index > mas->max || mas->index < mas->min) {
mas->status = ma_start;
} else {
mas->status = ma_active;
}
}
static bool mas_next_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
bool was_none = mas_is_none(mas);
if (unlikely(mas->last >= max)) {
mas->status = ma_overflow;
return true;
}
switch (mas->status) {
case ma_active:
return false;
case ma_none:
fallthrough;
case ma_pause:
mas->status = ma_start;
fallthrough;
case ma_start:
mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
break;
case ma_overflow:
/* Overflowed before, but the max changed */
mas_may_activate(mas);
break;
case ma_underflow:
/* The user expects the mas to be one before where it is */
mas_may_activate(mas);
*entry = mas_walk(mas);
if (*entry)
return true;
break;
case ma_root:
break;
case ma_error:
return true;
}
if (likely(mas_is_active(mas))) /* Fast path */
return false;
if (mas_is_ptr(mas)) {
*entry = NULL;
if (was_none && mas->index == 0) {
mas->index = mas->last = 0;
return true;
}
mas->index = 1;
mas->last = ULONG_MAX;
mas->status = ma_none;
return true;
}
if (mas_is_none(mas))
return true;
return false;
}
/**
* mas_next() - Get the next entry.
* @mas: The maple state
* @max: The maximum index to check.
*
* Returns the next entry after @mas->index.
* Must hold rcu_read_lock or the write lock.
* Can return the zero entry.
*
* Return: The next entry or %NULL
*/
void *mas_next(struct ma_state *mas, unsigned long max)
{
void *entry = NULL;
if (mas_next_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
return mas_next_slot(mas, max, false);
}
EXPORT_SYMBOL_GPL(mas_next);
/**
* mas_next_range() - Advance the maple state to the next range
* @mas: The maple state
* @max: The maximum index to check.
*
* Sets @mas->index and @mas->last to the range.
* Must hold rcu_read_lock or the write lock.
* Can return the zero entry.
*
* Return: The next entry or %NULL
*/
void *mas_next_range(struct ma_state *mas, unsigned long max)
{
void *entry = NULL;
if (mas_next_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_next_range);
/**
* mt_next() - get the next value in the maple tree
* @mt: The maple tree
* @index: The start index
* @max: The maximum index to check
*
* Takes RCU read lock internally to protect the search, which does not
* protect the returned pointer after dropping RCU read lock.
* See also: Documentation/core-api/maple_tree.rst
*
* Return: The entry higher than @index or %NULL if nothing is found.
*/
void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
{
void *entry = NULL;
MA_STATE(mas, mt, index, index);
rcu_read_lock();
entry = mas_next(&mas, max);
rcu_read_unlock();
return entry;
}
EXPORT_SYMBOL_GPL(mt_next);
static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
{
if (unlikely(mas->index <= min)) {
mas->status = ma_underflow;
return true;
}
switch (mas->status) {
case ma_active:
return false;
case ma_start:
break;
case ma_none:
fallthrough;
case ma_pause:
mas->status = ma_start;
break;
case ma_underflow:
/* underflowed before but the min changed */
mas_may_activate(mas);
break;
case ma_overflow:
/* User expects mas to be one after where it is */
mas_may_activate(mas);
*entry = mas_walk(mas);
if (*entry)
return true;
break;
case ma_root:
break;
case ma_error:
return true;
}
if (mas_is_start(mas))
mas_walk(mas);
if (unlikely(mas_is_ptr(mas))) {
if (!mas->index) {
mas->status = ma_none;
return true;
}
mas->index = mas->last = 0;
*entry = mas_root(mas);
return true;
}
if (mas_is_none(mas)) {
if (mas->index) {
/* Walked to out-of-range pointer? */
mas->index = mas->last = 0;
mas->status = ma_root;
*entry = mas_root(mas);
return true;
}
return true;
}
return false;
}
/**
* mas_prev() - Get the previous entry
* @mas: The maple state
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
* Will reset mas to ma_start if the status is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
*/
void *mas_prev(struct ma_state *mas, unsigned long min)
{
void *entry = NULL;
if (mas_prev_setup(mas, min, &entry))
return entry;
return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_prev);
/**
* mas_prev_range() - Advance to the previous range
* @mas: The maple state
* @min: The minimum value to check.
*
* Sets @mas->index and @mas->last to the range.
* Must hold rcu_read_lock or the write lock.
* Will reset mas to ma_start if the node is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
*/
void *mas_prev_range(struct ma_state *mas, unsigned long min)
{
void *entry = NULL;
if (mas_prev_setup(mas, min, &entry))
return entry;
return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_prev_range);
/**
* mt_prev() - get the previous value in the maple tree
* @mt: The maple tree
* @index: The start index
* @min: The minimum index to check
*
* Takes RCU read lock internally to protect the search, which does not
* protect the returned pointer after dropping RCU read lock.
* See also: Documentation/core-api/maple_tree.rst
*
* Return: The entry before @index or %NULL if nothing is found.
*/
void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
{
void *entry = NULL;
MA_STATE(mas, mt, index, index);
rcu_read_lock();
entry = mas_prev(&mas, min);
rcu_read_unlock();
return entry;
}
EXPORT_SYMBOL_GPL(mt_prev);
/**
* mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
* @mas: The maple state to pause
*
* Some users need to pause a walk and drop the lock they're holding in
* order to yield to a higher priority thread or carry out an operation
* on an entry. Those users should call this function before they drop
* the lock. It resets the @mas to be suitable for the next iteration
* of the loop after the user has reacquired the lock. If most entries
* found during a walk require you to call mas_pause(), the mt_for_each()
* iterator may be more appropriate.
*
*/
void mas_pause(struct ma_state *mas)
{
mas->status = ma_pause;
mas->node = NULL;
}
EXPORT_SYMBOL_GPL(mas_pause);
/**
* mas_find_setup() - Internal function to set up mas_find*().
* @mas: The maple state
* @max: The maximum index
* @entry: Pointer to the entry
*
* Returns: True if entry is the answer, false otherwise.
*/
static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
{
switch (mas->status) {
case ma_active:
if (mas->last < max)
return false;
return true;
case ma_start:
break;
case ma_pause:
if (unlikely(mas->last >= max))
return true;
mas->index = ++mas->last;
mas->status = ma_start;
break;
case ma_none:
if (unlikely(mas->last >= max))
return true;
mas->index = mas->last;
mas->status = ma_start;
break;
case ma_underflow:
/* mas is pointing at entry before unable to go lower */
if (unlikely(mas->index >= max)) {
mas->status = ma_overflow;
return true;
}
mas_may_activate(mas);
*entry = mas_walk(mas);
if (*entry)
return true;
break;
case ma_overflow:
if (unlikely(mas->last >= max))
return true;
mas_may_activate(mas);
*entry = mas_walk(mas);
if (*entry)
return true;
break;
case ma_root:
break;
case ma_error:
return true;
}
if (mas_is_start(mas)) {
/* First run or continue */
if (mas->index > max)
return true;
*entry = mas_walk(mas);
if (*entry)
return true;
}
if (unlikely(mas_is_ptr(mas)))
goto ptr_out_of_range;
if (unlikely(mas_is_none(mas)))
return true;
if (mas->index == max)
return true;
return false;
ptr_out_of_range:
mas->status = ma_none;
mas->index = 1;
mas->last = ULONG_MAX;
return true;
}
/**
* mas_find() - On the first call, find the entry at or after mas->index up to
* %max. Otherwise, find the entry after mas->index.
* @mas: The maple state
* @max: The maximum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
void *mas_find(struct ma_state *mas, unsigned long max)
{
void *entry = NULL; if (mas_find_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
entry = mas_next_slot(mas, max, false);
/* Ignore overflow */
mas->status = ma_active;
return entry;
}
EXPORT_SYMBOL_GPL(mas_find);
/**
* mas_find_range() - On the first call, find the entry at or after
* mas->index up to %max. Otherwise, advance to the next slot mas->index.
* @mas: The maple state
* @max: The maximum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
void *mas_find_range(struct ma_state *mas, unsigned long max)
{
void *entry = NULL;
if (mas_find_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_find_range);
/**
* mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
* @mas: The maple state
* @min: The minimum index
* @entry: Pointer to the entry
*
* Returns: True if entry is the answer, false otherwise.
*/
static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
switch (mas->status) {
case ma_active:
goto active;
case ma_start:
break;
case ma_pause:
if (unlikely(mas->index <= min)) {
mas->status = ma_underflow;
return true;
}
mas->last = --mas->index;
mas->status = ma_start;
break;
case ma_none:
if (mas->index <= min)
goto none;
mas->last = mas->index;
mas->status = ma_start;
break;
case ma_overflow: /* user expects the mas to be one after where it is */
if (unlikely(mas->index <= min)) {
mas->status = ma_underflow;
return true;
}
mas->status = ma_active;
break;
case ma_underflow: /* user expects the mas to be one before where it is */
if (unlikely(mas->index <= min))
return true;
mas->status = ma_active;
break;
case ma_root:
break;
case ma_error:
return true;
}
if (mas_is_start(mas)) {
/* First run or continue */
if (mas->index < min)
return true;
*entry = mas_walk(mas);
if (*entry)
return true;
}
if (unlikely(mas_is_ptr(mas)))
goto none;
if (unlikely(mas_is_none(mas))) {
/*
* Walked to the location, and there was nothing so the previous
* location is 0.
*/
mas->last = mas->index = 0;
mas->status = ma_root;
*entry = mas_root(mas);
return true;
}
active:
if (mas->index < min)
return true;
return false;
none:
mas->status = ma_none;
return true;
}
/**
* mas_find_rev: On the first call, find the first non-null entry at or below
* mas->index down to %min. Otherwise find the first non-null entry below
* mas->index down to %min.
* @mas: The maple state
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
void *mas_find_rev(struct ma_state *mas, unsigned long min)
{
void *entry = NULL;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
/**
* mas_find_range_rev: On the first call, find the first non-null entry at or
* below mas->index down to %min. Otherwise advance to the previous slot after
* mas->index down to %min.
* @mas: The maple state
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
{
void *entry = NULL;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_find_range_rev);
/**
* mas_erase() - Find the range in which index resides and erase the entire
* range.
* @mas: The maple state
*
* Must hold the write lock.
* Searches for @mas->index, sets @mas->index and @mas->last to the range and
* erases that range.
*
* Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
*/
void *mas_erase(struct ma_state *mas)
{
void *entry;
unsigned long index = mas->index;
MA_WR_STATE(wr_mas, mas, NULL);
if (!mas_is_active(mas) || !mas_is_start(mas))
mas->status = ma_start;
write_retry:
entry = mas_state_walk(mas);
if (!entry)
return NULL;
/* Must reset to ensure spanning writes of last slot are detected */
mas_reset(mas);
mas_wr_preallocate(&wr_mas, NULL);
if (mas_nomem(mas, GFP_KERNEL)) {
/* in case the range of entry changed when unlocked */
mas->index = mas->last = index;
goto write_retry;
}
if (mas_is_err(mas))
goto out;
mas_wr_store_entry(&wr_mas);
out:
mas_destroy(mas);
return entry;
}
EXPORT_SYMBOL_GPL(mas_erase);
/**
* mas_nomem() - Check if there was an error allocating and do the allocation
* if necessary If there are allocations, then free them.
* @mas: The maple state
* @gfp: The GFP_FLAGS to use for allocations
* Return: true on allocation, false otherwise.
*/
bool mas_nomem(struct ma_state *mas, gfp_t gfp)
__must_hold(mas->tree->ma_lock)
{
if (likely(mas->node != MA_ERROR(-ENOMEM)))
return false;
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
mas_alloc_nodes(mas, gfp);
mtree_lock(mas->tree);
} else {
mas_alloc_nodes(mas, gfp);
}
if (!mas->sheaf && !mas->alloc)
return false;
mas->status = ma_start;
return true;
}
void __init maple_tree_init(void)
{
struct kmem_cache_args args = {
.align = sizeof(struct maple_node),
.sheaf_capacity = 32,
};
maple_node_cache = kmem_cache_create("maple_node",
sizeof(struct maple_node), &args,
SLAB_PANIC);
}
/**
* mtree_load() - Load a value stored in a maple tree
* @mt: The maple tree
* @index: The index to load
*
* Return: the entry or %NULL
*/
void *mtree_load(struct maple_tree *mt, unsigned long index)
{
MA_STATE(mas, mt, index, index);
void *entry;
trace_ma_read(TP_FCT, &mas);
rcu_read_lock();
retry:
entry = mas_start(&mas);
if (unlikely(mas_is_none(&mas)))
goto unlock;
if (unlikely(mas_is_ptr(&mas))) {
if (index)
entry = NULL;
goto unlock;
}
entry = mtree_lookup_walk(&mas);
if (!entry && unlikely(mas_is_start(&mas)))
goto retry;
unlock:
rcu_read_unlock();
if (xa_is_zero(entry))
return NULL;
return entry;
}
EXPORT_SYMBOL(mtree_load);
/**
* mtree_store_range() - Store an entry at a given range.
* @mt: The maple tree
* @index: The start of the range
* @last: The end of the range
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations
*
* Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
* be allocated.
*/
int mtree_store_range(struct maple_tree *mt, unsigned long index,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(mas, mt, index, last);
int ret = 0;
trace_ma_write(TP_FCT, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (index > last)
return -EINVAL;
mtree_lock(mt);
ret = mas_store_gfp(&mas, entry, gfp);
mtree_unlock(mt);
return ret;
}
EXPORT_SYMBOL(mtree_store_range);
/**
* mtree_store() - Store an entry at a given index.
* @mt: The maple tree
* @index: The index to store the value
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations
*
* Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
* be allocated.
*/
int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
gfp_t gfp)
{
return mtree_store_range(mt, index, index, entry, gfp);
}
EXPORT_SYMBOL(mtree_store);
/**
* mtree_insert_range() - Insert an entry at a given range if there is no value.
* @mt: The maple tree
* @first: The start of the range
* @last: The end of the range
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
* request, -ENOMEM if memory could not be allocated.
*/
int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
int ret = 0;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (first > last)
return -EINVAL;
mtree_lock(mt);
retry:
mas_insert(&ms, entry);
if (mas_nomem(&ms, gfp))
goto retry;
mtree_unlock(mt);
if (mas_is_err(&ms))
ret = xa_err(ms.node);
mas_destroy(&ms);
return ret;
}
EXPORT_SYMBOL(mtree_insert_range);
/**
* mtree_insert() - Insert an entry at a given index if there is no value.
* @mt: The maple tree
* @index : The index to store the value
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
* request, -ENOMEM if memory could not be allocated.
*/
int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
gfp_t gfp)
{
return mtree_insert_range(mt, index, index, entry, gfp);
}
EXPORT_SYMBOL(mtree_insert);
int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp)
{
int ret = 0;
MA_STATE(mas, mt, 0, 0);
if (!mt_is_alloc(mt))
return -EINVAL;
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
mtree_lock(mt);
retry:
ret = mas_empty_area(&mas, min, max, size);
if (ret)
goto unlock;
mas_insert(&mas, entry);
/*
* mas_nomem() may release the lock, causing the allocated area
* to be unavailable, so try to allocate a free area again.
*/
if (mas_nomem(&mas, gfp))
goto retry;
if (mas_is_err(&mas))
ret = xa_err(mas.node);
else
*startp = mas.index;
unlock:
mtree_unlock(mt);
mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
/**
* mtree_alloc_cyclic() - Find somewhere to store this entry in the tree.
* @mt: The maple tree.
* @startp: Pointer to ID.
* @range_lo: Lower bound of range to search.
* @range_hi: Upper bound of range to search.
* @entry: The entry to store.
* @next: Pointer to next ID to allocate.
* @gfp: The GFP_FLAGS to use for allocations.
*
* Finds an empty entry in @mt after @next, stores the new index into
* the @id pointer, stores the entry at that index, then updates @next.
*
* @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag.
*
* Context: Any context. Takes and releases the mt.lock. May sleep if
* the @gfp flags permit.
*
* Return: 0 if the allocation succeeded without wrapping, 1 if the
* allocation succeeded after wrapping, -ENOMEM if memory could not be
* allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no
* free entries.
*/
int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long range_lo, unsigned long range_hi,
unsigned long *next, gfp_t gfp)
{
int ret;
MA_STATE(mas, mt, 0, 0);
if (!mt_is_alloc(mt))
return -EINVAL;
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
mtree_lock(mt);
ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi,
next, gfp);
mtree_unlock(mt);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_cyclic);
int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp)
{
int ret = 0;
MA_STATE(mas, mt, 0, 0);
if (!mt_is_alloc(mt))
return -EINVAL;
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
mtree_lock(mt);
retry:
ret = mas_empty_area_rev(&mas, min, max, size);
if (ret)
goto unlock;
mas_insert(&mas, entry);
/*
* mas_nomem() may release the lock, causing the allocated area
* to be unavailable, so try to allocate a free area again.
*/
if (mas_nomem(&mas, gfp))
goto retry;
if (mas_is_err(&mas))
ret = xa_err(mas.node);
else
*startp = mas.index;
unlock:
mtree_unlock(mt);
mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);
/**
* mtree_erase() - Find an index and erase the entire range.
* @mt: The maple tree
* @index: The index to erase
*
* Erasing is the same as a walk to an entry then a store of a NULL to that
* ENTIRE range. In fact, it is implemented as such using the advanced API.
*
* Return: The entry stored at the @index or %NULL
*/
void *mtree_erase(struct maple_tree *mt, unsigned long index)
{
void *entry = NULL;
MA_STATE(mas, mt, index, index);
trace_ma_op(TP_FCT, &mas);
mtree_lock(mt);
entry = mas_erase(&mas);
mtree_unlock(mt);
return entry;
}
EXPORT_SYMBOL(mtree_erase);
/*
* mas_dup_free() - Free an incomplete duplication of a tree.
* @mas: The maple state of a incomplete tree.
*
* The parameter @mas->node passed in indicates that the allocation failed on
* this node. This function frees all nodes starting from @mas->node in the
* reverse order of mas_dup_build(). There is no need to hold the source tree
* lock at this time.
*/
static void mas_dup_free(struct ma_state *mas)
{
struct maple_node *node;
enum maple_type type;
void __rcu **slots;
unsigned char count, i;
/* Maybe the first node allocation failed. */
if (mas_is_none(mas))
return;
while (!mte_is_root(mas->node)) {
mas_ascend(mas);
if (mas->offset) {
mas->offset--;
do {
mas_descend(mas);
mas->offset = mas_data_end(mas);
} while (!mte_is_leaf(mas->node));
mas_ascend(mas);
}
node = mte_to_node(mas->node);
type = mte_node_type(mas->node);
slots = ma_slots(node, type);
count = mas_data_end(mas) + 1;
for (i = 0; i < count; i++)
((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
mt_free_bulk(count, slots);
}
node = mte_to_node(mas->node);
kfree(node);
}
/*
* mas_copy_node() - Copy a maple node and replace the parent.
* @mas: The maple state of source tree.
* @new_mas: The maple state of new tree.
* @parent: The parent of the new node.
*
* Copy @mas->node to @new_mas->node, set @parent to be the parent of
* @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
*/
static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
struct maple_pnode *parent)
{
struct maple_node *node = mte_to_node(mas->node);
struct maple_node *new_node = mte_to_node(new_mas->node);
unsigned long val;
/* Copy the node completely. */
memcpy(new_node, node, sizeof(struct maple_node));
/* Update the parent node pointer. */
val = (unsigned long)node->parent & MAPLE_NODE_MASK;
new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
}
/*
* mas_dup_alloc() - Allocate child nodes for a maple node.
* @mas: The maple state of source tree.
* @new_mas: The maple state of new tree.
* @gfp: The GFP_FLAGS to use for allocations.
*
* This function allocates child nodes for @new_mas->node during the duplication
* process. If memory allocation fails, @mas is set to -ENOMEM.
*/
static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
gfp_t gfp)
{
struct maple_node *node = mte_to_node(mas->node);
struct maple_node *new_node = mte_to_node(new_mas->node);
enum maple_type type;
unsigned char count, i;
void __rcu **slots;
void __rcu **new_slots;
unsigned long val;
/* Allocate memory for child nodes. */
type = mte_node_type(mas->node);
new_slots = ma_slots(new_node, type); count = mas->node_request = mas_data_end(mas) + 1;
mas_alloc_nodes(mas, gfp);
if (unlikely(mas_is_err(mas)))
return;
slots = ma_slots(node, type); for (i = 0; i < count; i++) { val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
val &= MAPLE_NODE_MASK;
new_slots[i] = ma_mnode_ptr((unsigned long)mas_pop_node(mas) |
val);
}
}
/*
* mas_dup_build() - Build a new maple tree from a source tree
* @mas: The maple state of source tree, need to be in MAS_START state.
* @new_mas: The maple state of new tree, need to be in MAS_START state.
* @gfp: The GFP_FLAGS to use for allocations.
*
* This function builds a new tree in DFS preorder. If the memory allocation
* fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
* last node. mas_dup_free() will free the incomplete duplication of a tree.
*
* Note that the attributes of the two trees need to be exactly the same, and the
* new tree needs to be empty, otherwise -EINVAL will be set in @mas.
*/
static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
gfp_t gfp)
{
struct maple_node *node;
struct maple_pnode *parent = NULL;
struct maple_enode *root;
enum maple_type type;
if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) || unlikely(!mtree_empty(new_mas->tree))) { mas_set_err(mas, -EINVAL); return;
}
root = mas_start(mas);
if (mas_is_ptr(mas) || mas_is_none(mas)) goto set_new_tree;
node = mt_alloc_one(gfp);
if (!node) {
new_mas->status = ma_none;
mas_set_err(mas, -ENOMEM);
return;
}
type = mte_node_type(mas->node);
root = mt_mk_node(node, type);
new_mas->node = root;
new_mas->min = 0;
new_mas->max = ULONG_MAX;
root = mte_mk_root(root);
while (1) {
mas_copy_node(mas, new_mas, parent); if (!mte_is_leaf(mas->node)) {
/* Only allocate child nodes for non-leaf nodes. */
mas_dup_alloc(mas, new_mas, gfp); if (unlikely(mas_is_err(mas))) goto empty_mas;
} else {
/*
* This is the last leaf node and duplication is
* completed.
*/
if (mas->max == ULONG_MAX)
goto done;
/* This is not the last leaf node and needs to go up. */
do {
mas_ascend(mas);
mas_ascend(new_mas);
} while (mas->offset == mas_data_end(mas));
/* Move to the next subtree. */
mas->offset++;
new_mas->offset++;
}
mas_descend(mas);
parent = ma_parent_ptr(mte_to_node(new_mas->node));
mas_descend(new_mas);
mas->offset = 0;
new_mas->offset = 0;
}
done:
/* Specially handle the parent of the root node. */
mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
set_new_tree:
/* Make them the same height */
new_mas->tree->ma_flags = mas->tree->ma_flags;
rcu_assign_pointer(new_mas->tree->ma_root, root);
empty_mas:
mas_empty_nodes(mas);}
/**
* __mt_dup(): Duplicate an entire maple tree
* @mt: The source maple tree
* @new: The new maple tree
* @gfp: The GFP_FLAGS to use for allocations
*
* This function duplicates a maple tree in Depth-First Search (DFS) pre-order
* traversal. It uses memcpy() to copy nodes in the source tree and allocate
* new child nodes in non-leaf nodes. The new node is exactly the same as the
* source node except for all the addresses stored in it. It will be faster than
* traversing all elements in the source tree and inserting them one by one into
* the new tree.
* The user needs to ensure that the attributes of the source tree and the new
* tree are the same, and the new tree needs to be an empty tree, otherwise
* -EINVAL will be returned.
* Note that the user needs to manually lock the source tree and the new tree.
*
* Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
* the attributes of the two trees are different or the new tree is not an empty
* tree.
*/
int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
{
int ret = 0;
MA_STATE(mas, mt, 0, 0);
MA_STATE(new_mas, new, 0, 0);
mas_dup_build(&mas, &new_mas, gfp);
if (unlikely(mas_is_err(&mas))) {
ret = xa_err(mas.node); if (ret == -ENOMEM)
mas_dup_free(&new_mas);
}
return ret;}
EXPORT_SYMBOL(__mt_dup);
/**
* mtree_dup(): Duplicate an entire maple tree
* @mt: The source maple tree
* @new: The new maple tree
* @gfp: The GFP_FLAGS to use for allocations
*
* This function duplicates a maple tree in Depth-First Search (DFS) pre-order
* traversal. It uses memcpy() to copy nodes in the source tree and allocate
* new child nodes in non-leaf nodes. The new node is exactly the same as the
* source node except for all the addresses stored in it. It will be faster than
* traversing all elements in the source tree and inserting them one by one into
* the new tree.
* The user needs to ensure that the attributes of the source tree and the new
* tree are the same, and the new tree needs to be an empty tree, otherwise
* -EINVAL will be returned.
*
* Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
* the attributes of the two trees are different or the new tree is not an empty
* tree.
*/
int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
{
int ret = 0;
MA_STATE(mas, mt, 0, 0);
MA_STATE(new_mas, new, 0, 0);
mas_lock(&new_mas);
mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
mas_dup_build(&mas, &new_mas, gfp);
mas_unlock(&mas);
if (unlikely(mas_is_err(&mas))) {
ret = xa_err(mas.node);
if (ret == -ENOMEM)
mas_dup_free(&new_mas);
}
mas_unlock(&new_mas);
return ret;
}
EXPORT_SYMBOL(mtree_dup);
/**
* __mt_destroy() - Walk and free all nodes of a locked maple tree.
* @mt: The maple tree
*
* Note: Does not handle locking.
*/
void __mt_destroy(struct maple_tree *mt)
{
void *root = mt_root_locked(mt);
rcu_assign_pointer(mt->ma_root, NULL);
if (xa_is_node(root))
mte_destroy_walk(root, mt);
mt->ma_flags = mt_attr(mt);
}
EXPORT_SYMBOL_GPL(__mt_destroy);
/**
* mtree_destroy() - Destroy a maple tree
* @mt: The maple tree
*
* Frees all resources used by the tree. Handles locking.
*/
void mtree_destroy(struct maple_tree *mt)
{
mtree_lock(mt);
__mt_destroy(mt);
mtree_unlock(mt);
}
EXPORT_SYMBOL(mtree_destroy);
/**
* mt_find() - Search from the start up until an entry is found.
* @mt: The maple tree
* @index: Pointer which contains the start location of the search
* @max: The maximum value of the search range
*
* Takes RCU read lock internally to protect the search, which does not
* protect the returned pointer after dropping RCU read lock.
* See also: Documentation/core-api/maple_tree.rst
*
* In case that an entry is found @index is updated to point to the next
* possible entry independent whether the found entry is occupying a
* single index or a range if indices.
*
* Return: The entry at or after the @index or %NULL
*/
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
{
MA_STATE(mas, mt, *index, *index);
void *entry;
#ifdef CONFIG_DEBUG_MAPLE_TREE
unsigned long copy = *index;
#endif
trace_ma_read(TP_FCT, &mas);
if ((*index) > max) return NULL; rcu_read_lock();
retry:
entry = mas_state_walk(&mas); if (mas_is_start(&mas)) goto retry; if (unlikely(xa_is_zero(entry)))
entry = NULL;
if (entry)
goto unlock;
while (mas_is_active(&mas) && (mas.last < max)) {
entry = mas_next_slot(&mas, max, false);
if (likely(entry && !xa_is_zero(entry)))
break;
}
if (unlikely(xa_is_zero(entry))) entry = NULL;unlock: rcu_read_unlock();
if (likely(entry)) {
*index = mas.last + 1;
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
pr_err("index not increased! %lx <= %lx\n",
*index, copy);
#endif
}
return entry;
}
EXPORT_SYMBOL(mt_find);
/**
* mt_find_after() - Search from the start up until an entry is found.
* @mt: The maple tree
* @index: Pointer which contains the start location of the search
* @max: The maximum value to check
*
* Same as mt_find() except that it checks @index for 0 before
* searching. If @index == 0, the search is aborted. This covers a wrap
* around of @index to 0 in an iterator loop.
*
* Return: The entry at or after the @index or %NULL
*/
void *mt_find_after(struct maple_tree *mt, unsigned long *index,
unsigned long max)
{
if (!(*index))
return NULL;
return mt_find(mt, index, max);
}
EXPORT_SYMBOL(mt_find_after);
#ifdef CONFIG_DEBUG_MAPLE_TREE
atomic_t maple_tree_tests_run;
EXPORT_SYMBOL_GPL(maple_tree_tests_run);
atomic_t maple_tree_tests_passed;
EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
#ifndef __KERNEL__
extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
void mt_set_non_kernel(unsigned int val)
{
kmem_cache_set_non_kernel(maple_node_cache, val);
}
extern void kmem_cache_set_callback(struct kmem_cache *cachep,
void (*callback)(void *));
void mt_set_callback(void (*callback)(void *))
{
kmem_cache_set_callback(maple_node_cache, callback);
}
extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private);
void mt_set_private(void *private)
{
kmem_cache_set_private(maple_node_cache, private);
}
extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
unsigned long mt_get_alloc_size(void)
{
return kmem_cache_get_alloc(maple_node_cache);
}
extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
void mt_zero_nr_tallocated(void)
{
kmem_cache_zero_nr_tallocated(maple_node_cache);
}
extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
unsigned int mt_nr_tallocated(void)
{
return kmem_cache_nr_tallocated(maple_node_cache);
}
extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
unsigned int mt_nr_allocated(void)
{
return kmem_cache_nr_allocated(maple_node_cache);
}
void mt_cache_shrink(void)
{
}
#else
/*
* mt_cache_shrink() - For testing, don't use this.
*
* Certain testcases can trigger an OOM when combined with other memory
* debugging configuration options. This function is used to reduce the
* possibility of an out of memory even due to kmem_cache objects remaining
* around for longer than usual.
*/
void mt_cache_shrink(void)
{
kmem_cache_shrink(maple_node_cache);
}
EXPORT_SYMBOL_GPL(mt_cache_shrink);
#endif /* not defined __KERNEL__ */
/*
* mas_get_slot() - Get the entry in the maple state node stored at @offset.
* @mas: The maple state
* @offset: The offset into the slot array to fetch.
*
* Return: The entry stored at @offset.
*/
static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
unsigned char offset)
{
return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
offset);
}
/* Depth first search, post-order */
static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
{
struct maple_enode *p, *mn = mas->node;
unsigned long p_min, p_max;
mas_next_node(mas, mas_mn(mas), max);
if (!mas_is_overflow(mas))
return;
if (mte_is_root(mn))
return;
mas->node = mn;
mas_ascend(mas);
do {
p = mas->node;
p_min = mas->min;
p_max = mas->max;
mas_prev_node(mas, 0);
} while (!mas_is_underflow(mas));
mas->node = p;
mas->max = p_max;
mas->min = p_min;
}
/* Tree validations */
static void mt_dump_node(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format);
static void mt_dump_range(unsigned long min, unsigned long max,
unsigned int depth, enum mt_dump_format format)
{
static const char spaces[] = " ";
switch(format) {
case mt_dump_hex:
if (min == max)
pr_info("%.*s%lx: ", depth * 2, spaces, min);
else
pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
break;
case mt_dump_dec:
if (min == max)
pr_info("%.*s%lu: ", depth * 2, spaces, min);
else
pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
}
}
static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
unsigned int depth, enum mt_dump_format format)
{
mt_dump_range(min, max, depth, format);
if (xa_is_value(entry))
pr_cont("value %ld (0x%lx) [" PTR_FMT "]\n", xa_to_value(entry),
xa_to_value(entry), entry);
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
else if (mt_is_reserved(entry))
pr_cont("UNKNOWN ENTRY (" PTR_FMT ")\n", entry);
else
pr_cont(PTR_FMT "\n", entry);
}
static void mt_dump_range64(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format)
{
struct maple_range_64 *node = &mte_to_node(entry)->mr64;
bool leaf = mte_is_leaf(entry);
unsigned long first = min;
int i;
pr_cont(" contents: ");
for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
switch(format) {
case mt_dump_hex:
pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]);
break;
case mt_dump_dec:
pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]);
}
}
pr_cont(PTR_FMT "\n", node->slot[i]);
for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
unsigned long last = max;
if (i < (MAPLE_RANGE64_SLOTS - 1))
last = node->pivot[i];
else if (!node->slot[i] && max != mt_node_max(entry))
break;
if (last == 0 && i > 0)
break;
if (leaf)
mt_dump_entry(mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
else if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
switch(format) {
case mt_dump_hex:
pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
case mt_dump_dec:
pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
}
}
first = last + 1;
}
}
static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format)
{
struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
unsigned long first = min;
int i;
pr_cont(" contents: ");
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
switch (format) {
case mt_dump_hex:
pr_cont("%lx ", node->gap[i]);
break;
case mt_dump_dec:
pr_cont("%lu ", node->gap[i]);
}
}
pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
switch (format) {
case mt_dump_hex:
pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]);
break;
case mt_dump_dec:
pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]);
}
}
pr_cont(PTR_FMT "\n", node->slot[i]);
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
unsigned long last = max;
if (i < (MAPLE_ARANGE64_SLOTS - 1))
last = node->pivot[i];
else if (!node->slot[i])
break;
if (last == 0 && i > 0)
break;
if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
switch(format) {
case mt_dump_hex:
pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
case mt_dump_dec:
pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
}
}
first = last + 1;
}
}
static void mt_dump_node(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format)
{
struct maple_node *node = mte_to_node(entry);
unsigned int type = mte_node_type(entry);
unsigned int i;
mt_dump_range(min, max, depth, format);
pr_cont("node " PTR_FMT " depth %d type %d parent " PTR_FMT, node,
depth, type, node ? node->parent : NULL);
switch (type) {
case maple_dense:
pr_cont("\n");
for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
if (min + i > max)
pr_cont("OUT OF RANGE: ");
mt_dump_entry(mt_slot(mt, node->slot, i),
min + i, min + i, depth, format);
}
break;
case maple_leaf_64:
case maple_range_64:
mt_dump_range64(mt, entry, min, max, depth, format);
break;
case maple_arange_64:
mt_dump_arange64(mt, entry, min, max, depth, format);
break;
default:
pr_cont(" UNKNOWN TYPE\n");
}
}
void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
{
void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
pr_info("maple_tree(" PTR_FMT ") flags %X, height %u root " PTR_FMT "\n",
mt, mt->ma_flags, mt_height(mt), entry);
if (xa_is_node(entry))
mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
else if (entry)
mt_dump_entry(entry, 0, 0, 0, format);
else
pr_info("(empty)\n");
}
EXPORT_SYMBOL_GPL(mt_dump);
/*
* Calculate the maximum gap in a node and check if that's what is reported in
* the parent (unless root).
*/
static void mas_validate_gaps(struct ma_state *mas)
{
struct maple_enode *mte = mas->node;
struct maple_node *p_mn, *node = mte_to_node(mte);
enum maple_type mt = mte_node_type(mas->node);
unsigned long gap = 0, max_gap = 0;
unsigned long p_end, p_start = mas->min;
unsigned char p_slot, offset;
unsigned long *gaps = NULL;
unsigned long *pivots = ma_pivots(node, mt);
unsigned int i;
if (ma_is_dense(mt)) {
for (i = 0; i < mt_slot_count(mte); i++) {
if (mas_get_slot(mas, i)) {
if (gap > max_gap)
max_gap = gap;
gap = 0;
continue;
}
gap++;
}
goto counted;
}
gaps = ma_gaps(node, mt);
for (i = 0; i < mt_slot_count(mte); i++) {
p_end = mas_safe_pivot(mas, pivots, i, mt);
if (!gaps) {
if (!mas_get_slot(mas, i))
gap = p_end - p_start + 1;
} else {
void *entry = mas_get_slot(mas, i);
gap = gaps[i];
MT_BUG_ON(mas->tree, !entry);
if (gap > p_end - p_start + 1) {
pr_err(PTR_FMT "[%u] %lu >= %lu - %lu + 1 (%lu)\n",
mas_mn(mas), i, gap, p_end, p_start,
p_end - p_start + 1);
MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
}
}
if (gap > max_gap)
max_gap = gap;
p_start = p_end + 1;
if (p_end >= mas->max)
break;
}
counted:
if (mt == maple_arange_64) {
MT_BUG_ON(mas->tree, !gaps);
offset = ma_meta_gap(node);
if (offset > i) {
pr_err("gap offset " PTR_FMT "[%u] is invalid\n", node, offset);
MT_BUG_ON(mas->tree, 1);
}
if (gaps[offset] != max_gap) {
pr_err("gap " PTR_FMT "[%u] is not the largest gap %lu\n",
node, offset, max_gap);
MT_BUG_ON(mas->tree, 1);
}
for (i++ ; i < mt_slot_count(mte); i++) {
if (gaps[i] != 0) {
pr_err("gap " PTR_FMT "[%u] beyond node limit != 0\n",
node, i);
MT_BUG_ON(mas->tree, 1);
}
}
}
if (mte_is_root(mte))
return;
p_slot = mte_parent_slot(mas->node);
p_mn = mte_parent(mte);
MT_BUG_ON(mas->tree, max_gap > mas->max);
if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
pr_err("gap " PTR_FMT "[%u] != %lu\n", p_mn, p_slot, max_gap);
mt_dump(mas->tree, mt_dump_hex);
MT_BUG_ON(mas->tree, 1);
}
}
static void mas_validate_parent_slot(struct ma_state *mas)
{
struct maple_node *parent;
struct maple_enode *node;
enum maple_type p_type;
unsigned char p_slot;
void __rcu **slots;
int i;
if (mte_is_root(mas->node))
return;
p_slot = mte_parent_slot(mas->node);
p_type = mas_parent_type(mas, mas->node);
parent = mte_parent(mas->node);
slots = ma_slots(parent, p_type);
MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
/* Check prev/next parent slot for duplicate node entry */
for (i = 0; i < mt_slots[p_type]; i++) {
node = mas_slot(mas, slots, i);
if (i == p_slot) {
if (node != mas->node)
pr_err("parent " PTR_FMT "[%u] does not have " PTR_FMT "\n",
parent, i, mas_mn(mas));
MT_BUG_ON(mas->tree, node != mas->node);
} else if (node == mas->node) {
pr_err("Invalid child " PTR_FMT " at parent " PTR_FMT "[%u] p_slot %u\n",
mas_mn(mas), parent, i, p_slot);
MT_BUG_ON(mas->tree, node == mas->node);
}
}
}
static void mas_validate_child_slot(struct ma_state *mas)
{
enum maple_type type = mte_node_type(mas->node);
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
struct maple_enode *child;
unsigned char i;
if (mte_is_leaf(mas->node))
return;
for (i = 0; i < mt_slots[type]; i++) {
child = mas_slot(mas, slots, i);
if (!child) {
pr_err("Non-leaf node lacks child at " PTR_FMT "[%u]\n",
mas_mn(mas), i);
MT_BUG_ON(mas->tree, 1);
}
if (mte_parent_slot(child) != i) {
pr_err("Slot error at " PTR_FMT "[%u]: child " PTR_FMT " has pslot %u\n",
mas_mn(mas), i, mte_to_node(child),
mte_parent_slot(child));
MT_BUG_ON(mas->tree, 1);
}
if (mte_parent(child) != mte_to_node(mas->node)) {
pr_err("child " PTR_FMT " has parent " PTR_FMT " not " PTR_FMT "\n",
mte_to_node(child), mte_parent(child),
mte_to_node(mas->node));
MT_BUG_ON(mas->tree, 1);
}
if (i < mt_pivots[type] && pivots[i] == mas->max)
break;
}
}
/*
* Validate all pivots are within mas->min and mas->max, check metadata ends
* where the maximum ends and ensure there is no slots or pivots set outside of
* the end of the data.
*/
static void mas_validate_limits(struct ma_state *mas)
{
int i;
unsigned long prev_piv = 0;
enum maple_type type = mte_node_type(mas->node);
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
for (i = 0; i < mt_slots[type]; i++) {
unsigned long piv;
piv = mas_safe_pivot(mas, pivots, i, type);
if (!piv && (i != 0)) {
pr_err("Missing node limit pivot at " PTR_FMT "[%u]",
mas_mn(mas), i);
MAS_WARN_ON(mas, 1);
}
if (prev_piv > piv) {
pr_err(PTR_FMT "[%u] piv %lu < prev_piv %lu\n",
mas_mn(mas), i, piv, prev_piv);
MAS_WARN_ON(mas, piv < prev_piv);
}
if (piv < mas->min) {
pr_err(PTR_FMT "[%u] %lu < %lu\n", mas_mn(mas), i,
piv, mas->min);
MAS_WARN_ON(mas, piv < mas->min);
}
if (piv > mas->max) {
pr_err(PTR_FMT "[%u] %lu > %lu\n", mas_mn(mas), i,
piv, mas->max);
MAS_WARN_ON(mas, piv > mas->max);
}
prev_piv = piv;
if (piv == mas->max)
break;
}
if (mas_data_end(mas) != i) {
pr_err("node" PTR_FMT ": data_end %u != the last slot offset %u\n",
mas_mn(mas), mas_data_end(mas), i);
MT_BUG_ON(mas->tree, 1);
}
for (i += 1; i < mt_slots[type]; i++) {
void *entry = mas_slot(mas, slots, i);
if (entry && (i != mt_slots[type] - 1)) {
pr_err(PTR_FMT "[%u] should not have entry " PTR_FMT "\n",
mas_mn(mas), i, entry);
MT_BUG_ON(mas->tree, entry != NULL);
}
if (i < mt_pivots[type]) {
unsigned long piv = pivots[i];
if (!piv)
continue;
pr_err(PTR_FMT "[%u] should not have piv %lu\n",
mas_mn(mas), i, piv);
MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
}
}
}
static void mt_validate_nulls(struct maple_tree *mt)
{
void *entry, *last = (void *)1;
unsigned char offset = 0;
void __rcu **slots;
MA_STATE(mas, mt, 0, 0);
mas_start(&mas);
if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
return;
while (!mte_is_leaf(mas.node))
mas_descend(&mas);
slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
do {
entry = mas_slot(&mas, slots, offset);
if (!last && !entry) {
pr_err("Sequential nulls end at " PTR_FMT "[%u]\n",
mas_mn(&mas), offset);
}
MT_BUG_ON(mt, !last && !entry);
last = entry;
if (offset == mas_data_end(&mas)) {
mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
if (mas_is_overflow(&mas))
return;
offset = 0;
slots = ma_slots(mte_to_node(mas.node),
mte_node_type(mas.node));
} else {
offset++;
}
} while (!mas_is_overflow(&mas));
}
/*
* validate a maple tree by checking:
* 1. The limits (pivots are within mas->min to mas->max)
* 2. The gap is correctly set in the parents
*/
void mt_validate(struct maple_tree *mt)
__must_hold(mas->tree->ma_lock)
{
unsigned char end;
MA_STATE(mas, mt, 0, 0);
mas_start(&mas);
if (!mas_is_active(&mas))
return;
while (!mte_is_leaf(mas.node))
mas_descend(&mas);
while (!mas_is_overflow(&mas)) {
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
(!mte_is_root(mas.node)))) {
pr_err("Invalid size %u of " PTR_FMT "\n",
end, mas_mn(&mas));
}
mas_validate_parent_slot(&mas);
mas_validate_limits(&mas);
mas_validate_child_slot(&mas);
if (mt_is_alloc(mt))
mas_validate_gaps(&mas);
mas_dfs_postorder(&mas, ULONG_MAX);
}
mt_validate_nulls(mt);
}
EXPORT_SYMBOL_GPL(mt_validate);
void mas_dump(const struct ma_state *mas)
{
pr_err("MAS: tree=" PTR_FMT " enode=" PTR_FMT " ",
mas->tree, mas->node);
switch (mas->status) {
case ma_active:
pr_err("(ma_active)");
break;
case ma_none:
pr_err("(ma_none)");
break;
case ma_root:
pr_err("(ma_root)");
break;
case ma_start:
pr_err("(ma_start) ");
break;
case ma_pause:
pr_err("(ma_pause) ");
break;
case ma_overflow:
pr_err("(ma_overflow) ");
break;
case ma_underflow:
pr_err("(ma_underflow) ");
break;
case ma_error:
pr_err("(ma_error) ");
break;
}
pr_err("Store Type: ");
switch (mas->store_type) {
case wr_invalid:
pr_err("invalid store type\n");
break;
case wr_new_root:
pr_err("new_root\n");
break;
case wr_store_root:
pr_err("store_root\n");
break;
case wr_exact_fit:
pr_err("exact_fit\n");
break;
case wr_split_store:
pr_err("split_store\n");
break;
case wr_slot_store:
pr_err("slot_store\n");
break;
case wr_append:
pr_err("append\n");
break;
case wr_node_store:
pr_err("node_store\n");
break;
case wr_spanning_store:
pr_err("spanning_store\n");
break;
case wr_rebalance:
pr_err("rebalance\n");
break;
}
pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
mas->index, mas->last);
pr_err(" min=%lx max=%lx sheaf=" PTR_FMT ", request %lu depth=%u, flags=%x\n",
mas->min, mas->max, mas->sheaf, mas->node_request, mas->depth,
mas->mas_flags);
if (mas->index > mas->last)
pr_err("Check index & last\n");
}
EXPORT_SYMBOL_GPL(mas_dump);
void mas_wr_dump(const struct ma_wr_state *wr_mas)
{
pr_err("WR_MAS: node=" PTR_FMT " r_min=%lx r_max=%lx\n",
wr_mas->node, wr_mas->r_min, wr_mas->r_max);
pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
wr_mas->end_piv);
}
EXPORT_SYMBOL_GPL(mas_wr_dump);
#endif /* CONFIG_DEBUG_MAPLE_TREE */
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/prctl.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/random.h>
#include <linux/user-return-notifier.h>
#include <linux/dmi.h>
#include <linux/utsname.h>
#include <linux/stackprotector.h>
#include <linux/cpuidle.h>
#include <linux/acpi.h>
#include <linux/elf-randomize.h>
#include <linux/static_call.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <linux/entry-common.h>
#include <asm/cpu.h>
#include <asm/cpuid/api.h>
#include <asm/apic.h>
#include <linux/uaccess.h>
#include <asm/mwait.h>
#include <asm/fpu/api.h>
#include <asm/fpu/sched.h>
#include <asm/fpu/xstate.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>
#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/vm86.h>
#include <asm/switch_to.h>
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/spec-ctrl.h>
#include <asm/io_bitmap.h>
#include <asm/proto.h>
#include <asm/frame.h>
#include <asm/unwind.h>
#include <asm/tdx.h>
#include <asm/mmu_context.h>
#include <asm/msr.h>
#include <asm/shstk.h>
#include "process.h"
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
.x86_tss = {
/*
* .sp0 is only used when entering ring 0 from a lower
* privilege level. Since the init task never runs anything
* but ring 0 code, there is no need for a valid value here.
* Poison it.
*/
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
#ifdef CONFIG_X86_32
.sp1 = TOP_OF_INIT_STACK,
.ss0 = __KERNEL_DS,
.ss1 = __KERNEL_CS,
#endif
.io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
},
};
EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
DEFINE_PER_CPU(bool, __tss_limit_invalid);
EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
/*
* The cache may be in an incoherent state and needs flushing during kexec.
* E.g., on SME/TDX platforms, dirty cacheline aliases with and without
* encryption bit(s) can coexist and the cache needs to be flushed before
* booting to the new kernel to avoid the silent memory corruption due to
* dirty cachelines with different encryption property being written back
* to the memory.
*/
DEFINE_PER_CPU(bool, cache_state_incoherent);
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/* fpu_clone() will initialize the "dst_fpu" memory */
memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(*dst), 0);
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
return 0;
}
#ifdef CONFIG_X86_64
void arch_release_task_struct(struct task_struct *tsk)
{
if (fpu_state_size_dynamic() && !(tsk->flags & (PF_KTHREAD | PF_USER_WORKER)))
fpstate_free(x86_task_fpu(tsk));
}
#endif
/*
* Free thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
if (test_thread_flag(TIF_IO_BITMAP))
io_bitmap_exit(tsk);
free_vm86(t);
shstk_free(tsk);
fpu__drop(tsk);
}
static int set_new_tls(struct task_struct *p, unsigned long tls)
{
struct user_desc __user *utls = (struct user_desc __user *)tls;
if (in_ia32_syscall()) return do_set_thread_area(p, -1, utls, 0);
else
return do_set_thread_area_64(p, ARCH_SET_FS, tls);
}
__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
int (*fn)(void *), void *fn_arg)
{
schedule_tail(prev);
/* Is this a kernel thread? */
if (unlikely(fn)) {
fn(fn_arg);
/*
* A kernel thread is allowed to return here after successfully
* calling kernel_execve(). Exit to userspace to complete the
* execve() syscall.
*/
regs->ax = 0;
}
syscall_exit_to_user_mode(regs);
}
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
u64 clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
struct inactive_task_frame *frame;
struct fork_frame *fork_frame;
struct pt_regs *childregs;
unsigned long new_ssp;
int ret = 0;
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
frame->bp = encode_frame_pointer(childregs);
frame->ret_addr = (unsigned long) ret_from_fork_asm;
p->thread.sp = (unsigned long) fork_frame;
p->thread.io_bitmap = NULL;
clear_tsk_thread_flag(p, TIF_IO_BITMAP);
p->thread.iopl_warn = 0;
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
#ifdef CONFIG_X86_64
current_save_fsgs();
p->thread.fsindex = current->thread.fsindex;
p->thread.fsbase = current->thread.fsbase;
p->thread.gsindex = current->thread.gsindex;
p->thread.gsbase = current->thread.gsbase;
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
if (p->mm && (clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM)
set_bit(MM_CONTEXT_LOCK_LAM, &p->mm->context.flags);
#else
p->thread.sp0 = (unsigned long) (childregs + 1);
savesegment(gs, p->thread.gs);
/*
* Clear all status flags including IF and set fixed bit. 64bit
* does not have this initialization as the frame does not contain
* flags. The flags consistency (especially vs. AC) is there
* ensured via objtool, which lacks 32bit support.
*/
frame->flags = X86_EFLAGS_FIXED;
#endif
/*
* Allocate a new shadow stack for thread if needed. If shadow stack,
* is disabled, new_ssp will remain 0, and fpu_clone() will know not to
* update it.
*/
new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size);
if (IS_ERR_VALUE(new_ssp))
return PTR_ERR((void *)new_ssp);
fpu_clone(p, clone_flags, args->fn, new_ssp);
/* Kernel thread ? */
if (unlikely(p->flags & PF_KTHREAD)) { p->thread.pkru = pkru_get_init_value();
memset(childregs, 0, sizeof(struct pt_regs));
kthread_frame_init(frame, args->fn, args->fn_arg);
return 0;
}
/*
* Clone current's PKRU value from hardware. tsk->thread.pkru
* is only valid when scheduled out.
*/
p->thread.pkru = read_pkru();
frame->bx = 0;
*childregs = *current_pt_regs();
childregs->ax = 0;
if (sp)
childregs->sp = sp; if (unlikely(args->fn)) {
/*
* A user space thread, but it doesn't return to
* ret_after_fork().
*
* In order to indicate that to tools like gdb,
* we reset the stack and instruction pointers.
*
* It does the same kernel frame setup to return to a kernel
* function that a kernel thread does.
*/
childregs->sp = 0;
childregs->ip = 0;
kthread_frame_init(frame, args->fn, args->fn_arg);
return 0;
}
/* Set a new TLS for the child thread? */
if (clone_flags & CLONE_SETTLS) ret = set_new_tls(p, tls); if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
io_bitmap_share(p);
return ret;
}
static void pkru_flush_thread(void)
{
/*
* If PKRU is enabled the default PKRU value has to be loaded into
* the hardware right here (similar to context switch).
*/
pkru_write_default();
}
void flush_thread(void)
{
struct task_struct *tsk = current;
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
fpu_flush_thread();
pkru_flush_thread();
}
void disable_TSC(void)
{
preempt_disable();
if (!test_and_set_thread_flag(TIF_NOTSC))
/*
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
cr4_set_bits(X86_CR4_TSD);
preempt_enable();
}
static void enable_TSC(void)
{
preempt_disable();
if (test_and_clear_thread_flag(TIF_NOTSC))
/*
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
cr4_clear_bits(X86_CR4_TSD);
preempt_enable();
}
int get_tsc_mode(unsigned long adr)
{
unsigned int val;
if (test_thread_flag(TIF_NOTSC))
val = PR_TSC_SIGSEGV;
else
val = PR_TSC_ENABLE;
return put_user(val, (unsigned int __user *)adr);
}
int set_tsc_mode(unsigned int val)
{
if (val == PR_TSC_SIGSEGV)
disable_TSC();
else if (val == PR_TSC_ENABLE)
enable_TSC();
else
return -EINVAL;
return 0;
}
DEFINE_PER_CPU(u64, msr_misc_features_shadow);
static void set_cpuid_faulting(bool on)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
u64 msrval;
msrval = this_cpu_read(msr_misc_features_shadow);
msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
this_cpu_write(msr_misc_features_shadow, msrval);
wrmsrq(MSR_MISC_FEATURES_ENABLES, msrval);
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
if (on)
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_CPUID_USER_DIS_BIT);
else
msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_CPUID_USER_DIS_BIT);
}
}
static void disable_cpuid(void)
{
preempt_disable();
if (!test_and_set_thread_flag(TIF_NOCPUID)) {
/*
* Must flip the CPU state synchronously with
* TIF_NOCPUID in the current running context.
*/
set_cpuid_faulting(true);
}
preempt_enable();
}
static void enable_cpuid(void)
{
preempt_disable();
if (test_and_clear_thread_flag(TIF_NOCPUID)) {
/*
* Must flip the CPU state synchronously with
* TIF_NOCPUID in the current running context.
*/
set_cpuid_faulting(false);
}
preempt_enable();
}
static int get_cpuid_mode(void)
{
return !test_thread_flag(TIF_NOCPUID);
}
static int set_cpuid_mode(unsigned long cpuid_enabled)
{
if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
return -ENODEV;
if (cpuid_enabled)
enable_cpuid();
else
disable_cpuid();
return 0;
}
/*
* Called immediately after a successful exec.
*/
void arch_setup_new_exec(void)
{
/* If cpuid was previously disabled for this task, re-enable it. */
if (test_thread_flag(TIF_NOCPUID))
enable_cpuid();
/*
* Don't inherit TIF_SSBD across exec boundary when
* PR_SPEC_DISABLE_NOEXEC is used.
*/
if (test_thread_flag(TIF_SSBD) &&
task_spec_ssb_noexec(current)) {
clear_thread_flag(TIF_SSBD);
task_clear_spec_ssb_disable(current);
task_clear_spec_ssb_noexec(current);
speculation_ctrl_update(read_thread_flags());
}
mm_reset_untag_mask(current->mm);
}
#ifdef CONFIG_X86_IOPL_IOPERM
static inline void switch_to_bitmap(unsigned long tifp)
{
/*
* Invalidate I/O bitmap if the previous task used it. This prevents
* any possible leakage of an active I/O bitmap.
*
* If the next task has an I/O bitmap it will handle it on exit to
* user mode.
*/
if (tifp & _TIF_IO_BITMAP)
tss_invalidate_io_bitmap();
}
static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
{
/*
* Copy at least the byte range of the incoming tasks bitmap which
* covers the permitted I/O ports.
*
* If the previous task which used an I/O bitmap had more bits
* permitted, then the copy needs to cover those as well so they
* get turned off.
*/
memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
max(tss->io_bitmap.prev_max, iobm->max));
/*
* Store the new max and the sequence number of this bitmap
* and a pointer to the bitmap itself.
*/
tss->io_bitmap.prev_max = iobm->max;
tss->io_bitmap.prev_sequence = iobm->sequence;
}
/**
* native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
*/
void native_tss_update_io_bitmap(void)
{
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct thread_struct *t = ¤t->thread;
u16 *base = &tss->x86_tss.io_bitmap_base;
if (!test_thread_flag(TIF_IO_BITMAP)) {
native_tss_invalidate_io_bitmap();
return;
}
if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
*base = IO_BITMAP_OFFSET_VALID_ALL;
} else {
struct io_bitmap *iobm = t->io_bitmap;
if (WARN_ON_ONCE(!iobm)) {
clear_thread_flag(TIF_IO_BITMAP);
native_tss_invalidate_io_bitmap();
}
/*
* Only copy bitmap data when the sequence number differs. The
* update time is accounted to the incoming task.
*/
if (tss->io_bitmap.prev_sequence != iobm->sequence)
tss_copy_io_bitmap(tss, iobm);
/* Enable the bitmap */
*base = IO_BITMAP_OFFSET_VALID_MAP;
}
/*
* Make sure that the TSS limit is covering the IO bitmap. It might have
* been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
* access from user space to trigger a #GP because the bitmap is outside
* the TSS limit.
*/
refresh_tss_limit();
}
#else /* CONFIG_X86_IOPL_IOPERM */
static inline void switch_to_bitmap(unsigned long tifp) { }
#endif
#ifdef CONFIG_SMP
struct ssb_state {
struct ssb_state *shared_state;
raw_spinlock_t lock;
unsigned int disable_state;
unsigned long local_state;
};
#define LSTATE_SSB 0
static DEFINE_PER_CPU(struct ssb_state, ssb_state);
void speculative_store_bypass_ht_init(void)
{
struct ssb_state *st = this_cpu_ptr(&ssb_state);
unsigned int this_cpu = smp_processor_id();
unsigned int cpu;
st->local_state = 0;
/*
* Shared state setup happens once on the first bringup
* of the CPU. It's not destroyed on CPU hotunplug.
*/
if (st->shared_state)
return;
raw_spin_lock_init(&st->lock);
/*
* Go over HT siblings and check whether one of them has set up the
* shared state pointer already.
*/
for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
if (cpu == this_cpu)
continue;
if (!per_cpu(ssb_state, cpu).shared_state)
continue;
/* Link it to the state of the sibling: */
st->shared_state = per_cpu(ssb_state, cpu).shared_state;
return;
}
/*
* First HT sibling to come up on the core. Link shared state of
* the first HT sibling to itself. The siblings on the same core
* which come up later will see the shared state pointer and link
* themselves to the state of this CPU.
*/
st->shared_state = st;
}
/*
* Logic is: First HT sibling enables SSBD for both siblings in the core
* and last sibling to disable it, disables it for the whole core. This how
* MSR_SPEC_CTRL works in "hardware":
*
* CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
*/
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
{
struct ssb_state *st = this_cpu_ptr(&ssb_state);
u64 msr = x86_amd_ls_cfg_base;
if (!static_cpu_has(X86_FEATURE_ZEN)) {
msr |= ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrq(MSR_AMD64_LS_CFG, msr);
return;
}
if (tifn & _TIF_SSBD) {
/*
* Since this can race with prctl(), block reentry on the
* same CPU.
*/
if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
return;
msr |= x86_amd_ls_cfg_ssbd_mask;
raw_spin_lock(&st->shared_state->lock);
/* First sibling enables SSBD: */
if (!st->shared_state->disable_state)
wrmsrq(MSR_AMD64_LS_CFG, msr);
st->shared_state->disable_state++;
raw_spin_unlock(&st->shared_state->lock);
} else {
if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
return;
raw_spin_lock(&st->shared_state->lock);
st->shared_state->disable_state--;
if (!st->shared_state->disable_state)
wrmsrq(MSR_AMD64_LS_CFG, msr);
raw_spin_unlock(&st->shared_state->lock);
}
}
#else
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
{
u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrq(MSR_AMD64_LS_CFG, msr);
}
#endif
static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
{
/*
* SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
* so ssbd_tif_to_spec_ctrl() just works.
*/
wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}
/*
* Update the MSRs managing speculation control, during context switch.
*
* tifp: Previous task's thread flags
* tifn: Next task's thread flags
*/
static __always_inline void __speculation_ctrl_update(unsigned long tifp,
unsigned long tifn)
{
unsigned long tif_diff = tifp ^ tifn;
u64 msr = x86_spec_ctrl_base;
bool updmsr = false;
lockdep_assert_irqs_disabled();
/* Handle change of TIF_SSBD depending on the mitigation method. */
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
if (tif_diff & _TIF_SSBD)
amd_set_ssb_virt_state(tifn);
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn);
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
updmsr |= !!(tif_diff & _TIF_SSBD);
msr |= ssbd_tif_to_spec_ctrl(tifn);
}
/* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
msr |= stibp_tif_to_spec_ctrl(tifn);
}
if (updmsr)
update_spec_ctrl_cond(msr);
}
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
{
if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
if (task_spec_ssb_disable(tsk))
set_tsk_thread_flag(tsk, TIF_SSBD);
else
clear_tsk_thread_flag(tsk, TIF_SSBD);
if (task_spec_ib_disable(tsk))
set_tsk_thread_flag(tsk, TIF_SPEC_IB);
else
clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
}
/* Return the updated threadinfo flags*/
return read_task_thread_flags(tsk);
}
void speculation_ctrl_update(unsigned long tif)
{
unsigned long flags;
/* Forced update. Make sure all relevant TIF flags are different */
local_irq_save(flags);
__speculation_ctrl_update(~tif, tif);
local_irq_restore(flags);
}
/* Called from seccomp/prctl update */
void speculation_ctrl_update_current(void)
{
preempt_disable();
speculation_ctrl_update(speculation_ctrl_update_tif(current));
preempt_enable();
}
static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
{
unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
newval = cr4 ^ mask;
if (newval != cr4) {
this_cpu_write(cpu_tlbstate.cr4, newval);
__write_cr4(newval);
}
}
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{
unsigned long tifp, tifn;
tifn = read_task_thread_flags(next_p);
tifp = read_task_thread_flags(prev_p);
switch_to_bitmap(tifp);
propagate_user_return_notify(prev_p, next_p);
if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
arch_has_block_step()) {
unsigned long debugctl, msk;
rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl &= ~DEBUGCTLMSR_BTF;
msk = tifn & _TIF_BLOCKSTEP;
debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
}
if ((tifp ^ tifn) & _TIF_NOTSC)
cr4_toggle_bits_irqsoff(X86_CR4_TSD);
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
__speculation_ctrl_update(tifp, tifn);
} else {
speculation_ctrl_update_tif(prev_p);
tifn = speculation_ctrl_update_tif(next_p);
/* Enforce MSR update to ensure consistent state */
__speculation_ctrl_update(~tifn, tifn);
}
}
/*
* Idle related variables and functions
*/
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
/*
* We use this if we don't have any better idle routine..
*/
void __cpuidle default_idle(void)
{
raw_safe_halt();
raw_local_irq_disable();
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
#endif
DEFINE_STATIC_CALL_NULL(x86_idle, default_idle);
static bool x86_idle_set(void)
{
return !!static_call_query(x86_idle);
}
#ifndef CONFIG_SMP
static inline void __noreturn play_dead(void)
{
BUG();
}
#endif
void arch_cpu_idle_enter(void)
{
tsc_verify_tsc_adjust(false);
local_touch_nmi();
}
void __noreturn arch_cpu_idle_dead(void)
{
play_dead();
}
/*
* Called from the generic idle code.
*/
void __cpuidle arch_cpu_idle(void)
{
static_call(x86_idle)();
}
EXPORT_SYMBOL_GPL(arch_cpu_idle);
#ifdef CONFIG_XEN
bool xen_set_default_idle(void)
{
bool ret = x86_idle_set();
static_call_update(x86_idle, default_idle);
return ret;
}
#endif
struct cpumask cpus_stop_mask;
void __noreturn stop_this_cpu(void *dummy)
{
struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
unsigned int cpu = smp_processor_id();
local_irq_disable();
/*
* Remove this CPU from the online mask and disable it
* unconditionally. This might be redundant in case that the reboot
* vector was handled late and stop_other_cpus() sent an NMI.
*
* According to SDM and APM NMIs can be accepted even after soft
* disabling the local APIC.
*/
set_cpu_online(cpu, false);
disable_local_APIC();
mcheck_cpu_clear(c);
if (this_cpu_read(cache_state_incoherent))
wbinvd();
/*
* This brings a cache line back and dirties it, but
* native_stop_other_cpus() will overwrite cpus_stop_mask after it
* observed that all CPUs reported stop. This write will invalidate
* the related cache line on this CPU.
*/
cpumask_clear_cpu(cpu, &cpus_stop_mask);
#ifdef CONFIG_SMP
if (smp_ops.stop_this_cpu) {
smp_ops.stop_this_cpu();
BUG();
}
#endif
for (;;) {
/*
* Use native_halt() so that memory contents don't change
* (stack usage and variables) after possibly issuing the
* wbinvd() above.
*/
native_halt();
}
}
/*
* Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf
* exists and whenever MONITOR/MWAIT extensions are present there is at
* least one C1 substate.
*
* Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
* is passed to kernel commandline parameter.
*/
static __init bool prefer_mwait_c1_over_halt(void)
{
const struct cpuinfo_x86 *c = &boot_cpu_data;
u32 eax, ebx, ecx, edx;
/* If override is enforced on the command line, fall back to HALT. */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return false;
/* MWAIT is not supported on this platform. Fallback to HALT */
if (!cpu_has(c, X86_FEATURE_MWAIT))
return false;
/* Monitor has a bug or APIC stops in C1E. Fallback to HALT */
if (boot_cpu_has_bug(X86_BUG_MONITOR) || boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
return false;
cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
/*
* If MWAIT extensions are not available, it is safe to use MWAIT
* with EAX=0, ECX=0.
*/
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED))
return true;
/*
* If MWAIT extensions are available, there should be at least one
* MWAIT C1 substate present.
*/
return !!(edx & MWAIT_C1_SUBSTATE_MASK);
}
/*
* MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
* with interrupts enabled and no flags, which is backwards compatible with the
* original MWAIT implementation.
*/
static __cpuidle void mwait_idle(void)
{
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (!current_set_polling_and_test()) {
const void *addr = ¤t_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0);
if (need_resched())
goto out;
__sti_mwait(0, 0);
raw_local_irq_disable();
}
out:
__current_clr_polling();
}
void __init select_idle_routine(void)
{
if (boot_option_idle_override == IDLE_POLL) {
if (IS_ENABLED(CONFIG_SMP) && __max_threads_per_core > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
return;
}
/* Required to guard against xen_set_default_idle() */
if (x86_idle_set())
return;
if (prefer_mwait_c1_over_halt()) {
pr_info("using mwait in idle threads\n");
static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n");
static_call_update(x86_idle, tdx_halt);
} else {
static_call_update(x86_idle, default_idle);
}
}
void amd_e400_c1e_apic_setup(void)
{
if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
local_irq_disable();
tick_broadcast_force();
local_irq_enable();
}
}
void __init arch_post_acpi_subsys_init(void)
{
u32 lo, hi;
if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
return;
/*
* AMD E400 detection needs to happen after ACPI has been enabled. If
* the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
* MSR_K8_INT_PENDING_MSG.
*/
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
return;
boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
if (IS_ENABLED(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE))
static_branch_enable(&arch_needs_tick_broadcast);
pr_info("System has AMD C1E erratum E400. Workaround enabled.\n");
}
static int __init idle_setup(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads\n");
boot_option_idle_override = IDLE_POLL;
cpu_idle_poll_ctrl(true);
} else if (!strcmp(str, "halt")) {
/* 'idle=halt' HALT for idle. C-states are disabled. */
boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) {
/* 'idle=nomwait' disables MWAIT for idle */
boot_option_idle_override = IDLE_NOMWAIT;
} else {
return -EINVAL;
}
return 0;
}
early_param("idle", idle_setup);
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(8192);
return sp & ~0xf;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (mmap_is_ia32())
return randomize_page(mm->brk, SZ_32M);
return randomize_page(mm->brk, SZ_1G);
}
/*
* Called from fs/proc with a reference on @p to find the function
* which called into schedule(). This needs to be done carefully
* because the task might wake up and we might look at a stack
* changing under us.
*/
unsigned long __get_wchan(struct task_struct *p)
{
struct unwind_state state;
unsigned long addr = 0;
if (!try_get_task_stack(p))
return 0;
for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr)
break;
if (in_sched_functions(addr))
continue;
break;
}
put_task_stack(p);
return addr;
}
SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
{
switch (option) {
case ARCH_GET_CPUID:
return get_cpuid_mode();
case ARCH_SET_CPUID:
return set_cpuid_mode(arg2);
case ARCH_GET_XCOMP_SUPP:
case ARCH_GET_XCOMP_PERM:
case ARCH_REQ_XCOMP_PERM:
case ARCH_GET_XCOMP_GUEST_PERM:
case ARCH_REQ_XCOMP_GUEST_PERM:
return fpu_xstate_prctl(option, arg2);
}
if (!in_ia32_syscall())
return do_arch_prctl_64(current, option, arg2);
return -EINVAL;
}
SYSCALL_DEFINE0(ni_syscall)
{
return -ENOSYS;
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IEEE802154_CORE_H
#define __IEEE802154_CORE_H
#include <net/cfg802154.h>
struct cfg802154_registered_device {
const struct cfg802154_ops *ops;
struct list_head list;
/* wpan_phy index, internal only */
int wpan_phy_idx;
/* also protected by devlist_mtx */
int opencount;
wait_queue_head_t dev_wait;
/* protected by RTNL only */
int num_running_ifaces;
/* associated wpan interfaces, protected by rtnl or RCU */
struct list_head wpan_dev_list;
int devlist_generation, wpan_dev_id;
/* must be last because of the way we do wpan_phy_priv(),
* and it should at least be aligned to NETDEV_ALIGN
*/
struct wpan_phy wpan_phy __aligned(NETDEV_ALIGN);
};
static inline struct cfg802154_registered_device *
wpan_phy_to_rdev(struct wpan_phy *wpan_phy)
{
BUG_ON(!wpan_phy); return container_of(wpan_phy, struct cfg802154_registered_device,
wpan_phy);
}
extern struct list_head cfg802154_rdev_list;
extern int cfg802154_rdev_list_generation;
int cfg802154_switch_netns(struct cfg802154_registered_device *rdev,
struct net *net);
/* free object */
void cfg802154_dev_free(struct cfg802154_registered_device *rdev);
struct cfg802154_registered_device *
cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx);
struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx);
#endif /* __IEEE802154_CORE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RCULIST_H
#define _LINUX_RCULIST_H
#ifdef __KERNEL__
/*
* RCU-protected list version
*/
#include <linux/list.h>
#include <linux/rcupdate.h>
/*
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
* @list: list to be initialized
*
* You should instead use INIT_LIST_HEAD() for normal initialization and
* cleanup tasks, when readers have no access to the list being initialized.
* However, if the list being initialized is visible to readers, you
* need to keep the compiler from being too mischievous.
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
WRITE_ONCE(list->next, list);
WRITE_ONCE(list->prev, list);
}
/*
* return the ->next pointer of a list_head in an rcu safe
* way, we must not access it directly
*/
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
/*
* Return the ->prev pointer of a list_head in an rcu safe way. Don't
* access it directly.
*
* Any list traversed with list_bidir_prev_rcu() must never use
* list_del_rcu(). Doing so will poison the ->prev pointer that
* list_bidir_prev_rcu() relies on, which will result in segfaults.
* To prevent these segfaults, use list_bidir_del_rcu() instead
* of list_del_rcu().
*/
#define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev)))
/**
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
!list_is_head(pos, (head)); \
pos = rcu_dereference(pos->next))
/**
* list_tail_rcu - returns the prev pointer of the head of the list
* @head: the head of the list
*
* Note: This should only be used with the list header, and even then
* only if list_del() and similar primitives are not also used on the
* list header.
*/
#define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev)))
/*
* Check during list traversal that we are within an RCU reader
*/
#define check_arg_count_one(dummy)
#ifdef CONFIG_PROVE_RCU_LIST
#define __list_check_rcu(dummy, cond, extra...) \
({ \
check_arg_count_one(extra); \
RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \
"RCU-list traversed in non-reader section!"); \
})
#define __list_check_srcu(cond) \
({ \
RCU_LOCKDEP_WARN(!(cond), \
"RCU-list traversed without holding the required lock!");\
})
#else
#define __list_check_rcu(dummy, cond, extra...) \
({ check_arg_count_one(extra); })
#define __list_check_srcu(cond) ({ })
#endif
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
if (!__list_add_valid(new, prev, next))
return;
new->next = next;
new->prev = prev;
rcu_assign_pointer(list_next_rcu(prev), new);
next->prev = new;
}
/**
* list_add_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}
/**
* list_add_tail_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_tail_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_tail_rcu(struct list_head *new,
struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}
/**
* list_del_rcu - deletes entry from list without re-initialization
* @entry: the element to delete from the list.
*
* Note: list_empty() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_del_rcu()
* or list_add_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*
* Note that the caller is not permitted to immediately free
* the newly deleted entry. Instead, either synchronize_rcu()
* or call_rcu() must be used to defer freeing until an RCU
* grace period has elapsed.
*/
static inline void list_del_rcu(struct list_head *entry)
{
__list_del_entry(entry);
entry->prev = LIST_POISON2;
}
/**
* list_bidir_del_rcu - deletes entry from list without re-initialization
* @entry: the element to delete from the list.
*
* In contrast to list_del_rcu() doesn't poison the prev pointer thus
* allowing backwards traversal via list_bidir_prev_rcu().
*
* Note: list_empty() on entry does not return true after this because
* the entry is in a special undefined state that permits RCU-based
* lockfree reverse traversal. In particular this means that we can not
* poison the forward and backwards pointers that may still be used for
* walking the list.
*
* The caller must take whatever precautions are necessary (such as
* holding appropriate locks) to avoid racing with another list-mutation
* primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on
* this same list. However, it is perfectly legal to run concurrently
* with the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*
* Note that list_del_rcu() and list_bidir_del_rcu() must not be used on
* the same list.
*
* Note that the caller is not permitted to immediately free
* the newly deleted entry. Instead, either synchronize_rcu()
* or call_rcu() must be used to defer freeing until an RCU
* grace period has elapsed.
*/
static inline void list_bidir_del_rcu(struct list_head *entry)
{
__list_del_entry(entry);
}
/**
* hlist_del_init_rcu - deletes entry from hash list with re-initialization
* @n: the element to delete from the hash list.
*
* Note: list_unhashed() on the node return true after this. It is
* useful for RCU based read lockfree traversal if the writer side
* must know if the list entry is still hashed or already unhashed.
*
* In particular, it means that we can not poison the forward pointers
* that may still be used for walking the hash list and we can only
* zero the pprev pointer so list_unhashed() will return true after
* this.
*
* The caller must take whatever precautions are necessary (such as
* holding appropriate locks) to avoid racing with another
* list-mutation primitive, such as hlist_add_head_rcu() or
* hlist_del_rcu(), running on this same list. However, it is
* perfectly legal to run concurrently with the _rcu list-traversal
* primitives, such as hlist_for_each_entry_rcu().
*/
static inline void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) { __hlist_del(n); WRITE_ONCE(n->pprev, NULL);
}
}
/**
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The @old entry will be replaced with the @new entry atomically from
* the perspective of concurrent readers. It is the caller's responsibility
* to synchronize with concurrent updaters, if any.
*
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->prev = old->prev;
rcu_assign_pointer(list_next_rcu(new->prev), new);
new->next->prev = new;
old->prev = LIST_POISON2;
}
/**
* __list_splice_init_rcu - join an RCU-protected list into an existing list.
* @list: the RCU-protected list to splice
* @prev: points to the last element of the existing list
* @next: points to the first element of the existing list
* @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*
* The list pointed to by @prev and @next can be RCU-read traversed
* concurrently with this function.
*
* Note that this function blocks.
*
* Important note: the caller must take whatever action is necessary to prevent
* any other updates to the existing list. In principle, it is possible to
* modify the list as soon as sync() begins execution. If this sort of thing
* becomes necessary, an alternative version based on call_rcu() could be
* created. But only if -really- needed -- there is no shortage of RCU API
* members.
*/
static inline void __list_splice_init_rcu(struct list_head *list,
struct list_head *prev,
struct list_head *next,
void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
/*
* "first" and "last" tracking list, so initialize it. RCU readers
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
* instead of INIT_LIST_HEAD().
*/
INIT_LIST_HEAD_RCU(list);
/*
* At this point, the list body still points to the source list.
* Wait for any readers to finish using the list before splicing
* the list body into the new list. Any new readers will see
* an empty list.
*/
sync();
ASSERT_EXCLUSIVE_ACCESS(*first);
ASSERT_EXCLUSIVE_ACCESS(*last);
/*
* Readers are finished with the source list, so perform splice.
* The order is important if the new list is global and accessible
* to concurrent RCU readers. Note that RCU readers are not
* permitted to traverse the prev pointers without excluding
* this function.
*/
last->next = next;
rcu_assign_pointer(list_next_rcu(prev), first);
first->prev = prev;
next->prev = last;
}
/**
* list_splice_init_rcu - splice an RCU-protected list into an existing list,
* designed for stacks.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
* @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head, head->next, sync);
}
/**
* list_splice_tail_init_rcu - splice an RCU-protected list into an existing
* list, designed for queues.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
* @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head->prev, head, sync);
}
/**
* list_entry_rcu - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
container_of(READ_ONCE(ptr), type, member)
/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
*
* They do not exist because they would lead to subtle race conditions:
*
* if (!list_empty_rcu(mylist)) {
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
* do_something(bar);
* }
*
* The list might be non-empty when list_empty_rcu() checks it, but it
* might have become empty by the time that list_first_entry_rcu() rereads
* the ->next pointer, which would result in a SEGV.
*
* When not using RCU, it is OK for list_first_entry() to re-read that
* pointer because both functions should be protected by some lock that
* blocks writers.
*
* When using RCU, list_empty() uses READ_ONCE() to fetch the
* RCU-protected ->next pointer and then compares it to the address of the
* list head. However, it neither dereferences this pointer nor provides
* this pointer to its caller. Thus, READ_ONCE() suffices (that is,
* rcu_dereference() is not needed), which means that list_empty() can be
* used anywhere you would want to use list_empty_rcu(). Just don't
* expect anything useful to happen if you do a subsequent lockless
* call to list_first_entry_rcu()!!!
*
* See list_first_or_null_rcu for an alternative.
*/
/**
* list_first_or_null_rcu - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
/**
* list_next_or_null_rcu - get the next element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the ptr is at the end of the list, NULL is returned.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_next_or_null_rcu(head, ptr, type, member) \
({ \
struct list_head *__head = (head); \
struct list_head *__ptr = (ptr); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__next != __head) ? list_entry_rcu(__next, type, \
member) : NULL; \
})
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
* @cond: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_entry_rcu(pos, head, member, cond...) \
for (__list_check_rcu(dummy, ## cond, 0), \
pos = list_entry_rcu((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_srcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
* @cond: lockdep expression for the lock required to traverse the list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by srcu_read_lock().
* The lockdep expression srcu_read_lock_held() can be passed as the
* cond argument from read side.
*/
#define list_for_each_entry_srcu(pos, head, member, cond) \
for (__list_check_srcu(cond), \
pos = list_entry_rcu((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
* list_entry_lockless - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* This primitive may safely run concurrently with the _rcu
* list-mutation primitives such as list_add_rcu(), but requires some
* implicit RCU read-side guarding. One example is running within a special
* exception-time environment where preemption is disabled and where lockdep
* cannot be invoked. Another example is when items are added to the list,
* but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))READ_ONCE(ptr), type, member)
/**
* list_for_each_entry_lockless - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* This primitive may safely run concurrently with the _rcu
* list-mutation primitives such as list_add_rcu(), but requires some
* implicit RCU read-side guarding. One example is running within a special
* exception-time environment where preemption is disabled and where lockdep
* cannot be invoked. Another example is when items are added to the list,
* but never deleted.
*/
#define list_for_each_entry_lockless(pos, head, member) \
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_continue_rcu - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position which must have been in the list when the RCU read
* lock was taken.
* This would typically require either that you obtained the node from a
* previous walk of the list in the same RCU read-side critical section, or
* that you held some sort of non-RCU reference (such as a reference count)
* to keep the node alive *and* in the list.
*
* This iterator is similar to list_for_each_entry_from_rcu() except
* this starts after the given position and that one starts at the given
* position.
*/
#define list_for_each_entry_continue_rcu(pos, head, member) \
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_from_rcu - iterate over a list from current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_node within the struct.
*
* Iterate over the tail of a list starting from a given position,
* which must have been in the list when the RCU read lock was taken.
* This would typically require either that you obtained the node from a
* previous walk of the list in the same RCU read-side critical section, or
* that you held some sort of non-RCU reference (such as a reference count)
* to keep the node alive *and* in the list.
*
* This iterator is similar to list_for_each_entry_continue_rcu() except
* this starts from the given position and that one starts from the position
* after the given position.
*/
#define list_for_each_entry_from_rcu(pos, head, member) \
for (; &(pos)->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
/**
* hlist_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
* Note: list_unhashed() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the hash list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry().
*/
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n); WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
* hlist_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The @old entry will be replaced with the @new entry atomically from
* the perspective of concurrent readers. It is the caller's responsibility
* to synchronize with concurrent updaters, if any.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
{
struct hlist_node *next = old->next;
new->next = next;
WRITE_ONCE(new->pprev, old->pprev);
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
if (next)
WRITE_ONCE(new->next->pprev, &new->next); WRITE_ONCE(old->pprev, LIST_POISON2);
}
/**
* hlists_swap_heads_rcu - swap the lists the hlist heads point to
* @left: The hlist head on the left
* @right: The hlist head on the right
*
* The lists start out as [@left ][node1 ... ] and
* [@right ][node2 ... ]
* The lists end up as [@left ][node2 ... ]
* [@right ][node1 ... ]
*/
static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
{
struct hlist_node *node1 = left->first;
struct hlist_node *node2 = right->first;
rcu_assign_pointer(left->first, node2);
rcu_assign_pointer(right->first, node1);
WRITE_ONCE(node2->pprev, &left->first);
WRITE_ONCE(node1->pprev, &right->first);
}
/*
* return the first or the next element in an RCU protected hlist
*/
#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
/**
* hlist_add_head_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_first_rcu(h), n); if (first) WRITE_ONCE(first->pprev, &n->next);
}
/**
* hlist_add_tail_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_add_tail_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *i, *last = NULL;
/* Note: write side code, so rcu accessors are not needed. */
for (i = h->first; i; i = i->next)
last = i;
if (last) {
n->next = last->next;
WRITE_ONCE(n->pprev, &last->next);
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_add_head_rcu(n, h);
}
}
/**
* hlist_add_before_rcu
* @n: the new element to add to the hash list.
* @next: the existing element to add the new element before.
*
* Description:
* Adds the specified element to the specified hlist
* before the specified node while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
WRITE_ONCE(n->pprev, next->pprev);
n->next = next;
rcu_assign_pointer(hlist_pprev_rcu(n), n);
WRITE_ONCE(next->pprev, &n->next);
}
/**
* hlist_add_behind_rcu
* @n: the new element to add to the hash list.
* @prev: the existing element to add the new element after.
*
* Description:
* Adds the specified element to the specified hlist
* after the specified node while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
WRITE_ONCE(n->pprev, &prev->next);
rcu_assign_pointer(hlist_next_rcu(prev), n);
if (n->next)
WRITE_ONCE(n->next->pprev, &n->next);
}
#define __hlist_for_each_rcu(pos, head) \
for (pos = rcu_dereference(hlist_first_rcu(head)); \
pos; \
pos = rcu_dereference(hlist_next_rcu(pos)))
/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
* @cond: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu(pos, head, member, cond...) \
for (__list_check_rcu(dummy, ## cond, 0), \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_srcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
* @cond: lockdep expression for the lock required to traverse the list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by srcu_read_lock().
* The lockdep expression srcu_read_lock_held() can be passed as the
* cond argument from read side.
*/
#define hlist_for_each_entry_srcu(pos, head, member, cond) \
for (__list_check_srcu(cond), \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*
* This is the same as hlist_for_each_entry_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu_bh(pos, head, member) \
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
* @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
* @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
* @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
#endif /* __KERNEL__ */
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/const.h>
#include <linux/limits.h>
#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
#define __RENAME(x) __asm__(#x)
#define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r)
#define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r)
#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
FIELD_PREP(GENMASK(7, 1), func))
/* Overridden by KUnit tests. */
#ifndef fortify_panic
# define fortify_panic(func, write, avail, size, retfail) \
__fortify_panic(FORTIFY_REASON(func, write), avail, size)
#endif
#ifndef fortify_warn_once
# define fortify_warn_once(x...) WARN_ONCE(x)
#endif
#define FORTIFY_READ 0
#define FORTIFY_WRITE 1
#define EACH_FORTIFY_FUNC(macro) \
macro(strncpy), \
macro(strnlen), \
macro(strlen), \
macro(strscpy), \
macro(strlcat), \
macro(strcat), \
macro(strncat), \
macro(memset), \
macro(memcpy), \
macro(memmove), \
macro(memscan), \
macro(memcmp), \
macro(memchr), \
macro(memchr_inv), \
macro(kmemdup), \
macro(strcpy), \
macro(UNKNOWN),
#define MAKE_FORTIFY_FUNC(func) FORTIFY_FUNC_##func
enum fortify_func {
EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC)
};
void __fortify_report(const u8 reason, const size_t avail, const size_t size);
void __fortify_panic(const u8 reason, const size_t avail, const size_t size) __cold __noreturn;
void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?");
void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?");
#define __compiletime_strlen(p) \
({ \
char *__p = (char *)(p); \
size_t __ret = SIZE_MAX; \
const size_t __p_size = __member_size(p); \
if (__p_size != SIZE_MAX && \
__builtin_constant_p(*__p)) { \
size_t __p_len = __p_size - 1; \
if (__builtin_constant_p(__p[__p_len]) && \
__p[__p_len] == '\0') \
__ret = __builtin_strlen(__p); \
} \
__ret; \
})
#if defined(__SANITIZE_ADDRESS__)
#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
#elif defined(CONFIG_KASAN_GENERIC)
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
#else /* CONFIG_KASAN_SW_TAGS */
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
#endif
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
#else
#if defined(__SANITIZE_MEMORY__)
/*
* For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
* corresponding __msan_XXX functions.
*/
#include <linux/kmsan_string.h>
#define __underlying_memcpy __msan_memcpy
#define __underlying_memmove __msan_memmove
#define __underlying_memset __msan_memset
#else
#define __underlying_memcpy __builtin_memcpy
#define __underlying_memmove __builtin_memmove
#define __underlying_memset __builtin_memset
#endif
#define __underlying_memchr __builtin_memchr
#define __underlying_memcmp __builtin_memcmp
#define __underlying_strcat __builtin_strcat
#define __underlying_strcpy __builtin_strcpy
#define __underlying_strlen __builtin_strlen
#define __underlying_strncat __builtin_strncat
#define __underlying_strncpy __builtin_strncpy
#endif
/**
* unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking
*
* @dst: Destination memory address to write to
* @src: Source memory address to read from
* @bytes: How many bytes to write to @dst from @src
* @justification: Free-form text or comment describing why the use is needed
*
* This should be used for corner cases where the compiler cannot do the
* right thing, or during transitions between APIs, etc. It should be used
* very rarely, and includes a place for justification detailing where bounds
* checking has happened, and why existing solutions cannot be employed.
*/
#define unsafe_memcpy(dst, src, bytes, justification) \
__underlying_memcpy(dst, src, bytes)
/*
* Clang's use of __builtin_*object_size() within inlines needs hinting via
* __pass_*object_size(). The preference is to only ever use type 1 (member
* size, rather than struct size), but there remain some stragglers using
* type 0 that will be converted in the future.
*/
#if __has_builtin(__builtin_dynamic_object_size)
#define POS __pass_dynamic_object_size(1)
#define POS0 __pass_dynamic_object_size(0)
#else
#define POS __pass_object_size(1)
#define POS0 __pass_object_size(0)
#endif
#define __compiletime_lessthan(bounds, length) ( \
__builtin_constant_p((bounds) < (length)) && \
(bounds) < (length) \
)
/**
* strncpy - Copy a string to memory with non-guaranteed NUL padding
*
* @p: pointer to destination of copy
* @q: pointer to NUL-terminated source string to copy
* @size: bytes to write at @p
*
* If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
* and @p will NOT be NUL-terminated
*
* If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
* will be written to @p until @size total bytes have been written.
*
* Do not use this function. While FORTIFY_SOURCE tries to avoid
* over-reads of @q, it cannot defend against writing unterminated
* results to @p. Using strncpy() remains ambiguous and fragile.
* Instead, please choose an alternative, so that the expectation
* of @p's contents is unambiguous:
*
* +--------------------+--------------------+------------+
* | **p** needs to be: | padded to **size** | not padded |
* +====================+====================+============+
* | NUL-terminated | strscpy_pad() | strscpy() |
* +--------------------+--------------------+------------+
* | not NUL-terminated | strtomem_pad() | strtomem() |
* +--------------------+--------------------+------------+
*
* Note strscpy*()'s differing return values for detecting truncation,
* and strtomem*()'s expectation that the destination is marked with
* __nonstring when it is a character array.
*
*/
__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
{
const size_t p_size = __member_size(p);
if (__compiletime_lessthan(p_size, size))
__write_overflow();
if (p_size < size)
fortify_panic(FORTIFY_FUNC_strncpy, FORTIFY_WRITE, p_size, size, p);
return __underlying_strncpy(p, q, size);
}
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
/**
* strnlen - Return bounded count of characters in a NUL-terminated string
*
* @p: pointer to NUL-terminated string to count.
* @maxlen: maximum number of characters to count.
*
* Returns number of characters in @p (NOT including the final NUL), or
* @maxlen, if no NUL has been found up to there.
*
*/
__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
{
const size_t p_size = __member_size(p);
const size_t p_len = __compiletime_strlen(p);
size_t ret;
/* We can take compile-time actions when maxlen is const. */
if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
/* If p is const, we can use its compile-time-known len. */
if (maxlen >= p_size)
return p_len;
}
/* Do not check characters beyond the end of p. */
ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
if (p_size <= ret && maxlen != ret) fortify_panic(FORTIFY_FUNC_strnlen, FORTIFY_READ, p_size, ret + 1, ret);
return ret;
}
/*
* Defined after fortified strnlen to reuse it. However, it must still be
* possible for strlen() to be used on compile-time strings for use in
* static initializers (i.e. as a constant expression).
*/
/**
* strlen - Return count of characters in a NUL-terminated string
*
* @p: pointer to NUL-terminated string to count.
*
* Do not use this function unless the string length is known at
* compile-time. When @p is unterminated, this function may crash
* or return unexpected counts that could lead to memory content
* exposures. Prefer strnlen().
*
* Returns number of characters in @p (NOT including the final NUL).
*
*/
#define strlen(p) \
__builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \
__builtin_strlen(p), __fortify_strlen(p))
__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
__kernel_size_t __fortify_strlen(const char * const POS p)
{
const size_t p_size = __member_size(p);
__kernel_size_t ret;
/* Give up if we don't know how large p is. */
if (p_size == SIZE_MAX)
return __underlying_strlen(p);
ret = strnlen(p, p_size); if (p_size <= ret) fortify_panic(FORTIFY_FUNC_strlen, FORTIFY_READ, p_size, ret + 1, ret);
return ret;
}
/* Defined after fortified strnlen() to reuse it. */
extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(sized_strscpy);
__FORTIFY_INLINE ssize_t sized_strscpy(char * const POS p, const char * const POS q, size_t size)
{
/* Use string size rather than possible enclosing struct size. */
const size_t p_size = __member_size(p);
const size_t q_size = __member_size(q);
size_t len;
/* If we cannot get size of p and q default to call strscpy. */
if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __real_strscpy(p, q, size);
/*
* If size can be known at compile time and is greater than
* p_size, generate a compile time write overflow error.
*/
if (__compiletime_lessthan(p_size, size))
__write_overflow();
/* Short-circuit for compile-time known-safe lengths. */
if (__compiletime_lessthan(p_size, SIZE_MAX)) {
len = __compiletime_strlen(q);
if (len < SIZE_MAX && __compiletime_lessthan(len, size)) {
__underlying_memcpy(p, q, len + 1);
return len;
}
}
/*
* This call protects from read overflow, because len will default to q
* length if it smaller than size.
*/
len = strnlen(q, size);
/*
* If len equals size, we will copy only size bytes which leads to
* -E2BIG being returned.
* Otherwise we will copy len + 1 because of the final '\O'.
*/
len = len == size ? size : len + 1;
/*
* Generate a runtime write overflow error if len is greater than
* p_size.
*/
if (p_size < len) fortify_panic(FORTIFY_FUNC_strscpy, FORTIFY_WRITE, p_size, len, -E2BIG);
/*
* We can now safely call vanilla strscpy because we are protected from:
* 1. Read overflow thanks to call to strnlen().
* 2. Write overflow thanks to above ifs.
*/
return __real_strscpy(p, q, len);
}
/* Defined after fortified strlen() to reuse it. */
extern size_t __real_strlcat(char *p, const char *q, size_t avail) __RENAME(strlcat);
/**
* strlcat - Append a string to an existing string
*
* @p: pointer to %NUL-terminated string to append to
* @q: pointer to %NUL-terminated string to append from
* @avail: Maximum bytes available in @p
*
* Appends %NUL-terminated string @q after the %NUL-terminated
* string at @p, but will not write beyond @avail bytes total,
* potentially truncating the copy from @q. @p will stay
* %NUL-terminated only if a %NUL already existed within
* the @avail bytes of @p. If so, the resulting number of
* bytes copied from @q will be at most "@avail - strlen(@p) - 1".
*
* Do not use this function. While FORTIFY_SOURCE tries to avoid
* read and write overflows, this is only possible when the sizes
* of @p and @q are known to the compiler. Prefer building the
* string with formatting, via scnprintf(), seq_buf, or similar.
*
* Returns total bytes that _would_ have been contained by @p
* regardless of truncation, similar to snprintf(). If return
* value is >= @avail, the string has been truncated.
*
*/
__FORTIFY_INLINE
size_t strlcat(char * const POS p, const char * const POS q, size_t avail)
{
const size_t p_size = __member_size(p);
const size_t q_size = __member_size(q);
size_t p_len, copy_len;
size_t actual, wanted;
/* Give up immediately if both buffer sizes are unknown. */
if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __real_strlcat(p, q, avail);
p_len = strnlen(p, avail); copy_len = strlen(q);
wanted = actual = p_len + copy_len;
/* Cannot append any more: report truncation. */
if (avail <= p_len)
return wanted;
/* Give up if string is already overflowed. */
if (p_size <= p_len)
fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_READ, p_size, p_len + 1, wanted);
if (actual >= avail) {
copy_len = avail - p_len - 1;
actual = p_len + copy_len;
}
/* Give up if copy will overflow. */
if (p_size <= actual)
fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_WRITE, p_size, actual + 1, wanted);
__underlying_memcpy(p + p_len, q, copy_len);
p[actual] = '\0';
return wanted;
}
/* Defined after fortified strlcat() to reuse it. */
/**
* strcat - Append a string to an existing string
*
* @p: pointer to NUL-terminated string to append to
* @q: pointer to NUL-terminated source string to append from
*
* Do not use this function. While FORTIFY_SOURCE tries to avoid
* read and write overflows, this is only possible when the
* destination buffer size is known to the compiler. Prefer
* building the string with formatting, via scnprintf() or similar.
* At the very least, use strncat().
*
* Returns @p.
*
*/
__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
char *strcat(char * const POS p, const char *q)
{
const size_t p_size = __member_size(p);
const size_t wanted = strlcat(p, q, p_size); if (p_size <= wanted) fortify_panic(FORTIFY_FUNC_strcat, FORTIFY_WRITE, p_size, wanted + 1, p);
return p;
}
/**
* strncat - Append a string to an existing string
*
* @p: pointer to NUL-terminated string to append to
* @q: pointer to source string to append from
* @count: Maximum bytes to read from @q
*
* Appends at most @count bytes from @q (stopping at the first
* NUL byte) after the NUL-terminated string at @p. @p will be
* NUL-terminated.
*
* Do not use this function. While FORTIFY_SOURCE tries to avoid
* read and write overflows, this is only possible when the sizes
* of @p and @q are known to the compiler. Prefer building the
* string with formatting, via scnprintf() or similar.
*
* Returns @p.
*
*/
/* Defined after fortified strlen() and strnlen() to reuse them. */
__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
{
const size_t p_size = __member_size(p);
const size_t q_size = __member_size(q);
size_t p_len, copy_len, total;
if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strncat(p, q, count);
p_len = strlen(p);
copy_len = strnlen(q, count);
total = p_len + copy_len + 1;
if (p_size < total)
fortify_panic(FORTIFY_FUNC_strncat, FORTIFY_WRITE, p_size, total, p);
__underlying_memcpy(p + p_len, q, copy_len);
p[p_len + copy_len] = '\0';
return p;
}
__FORTIFY_INLINE bool fortify_memset_chk(__kernel_size_t size,
const size_t p_size,
const size_t p_size_field)
{
if (__builtin_constant_p(size)) {
/*
* Length argument is a constant expression, so we
* can perform compile-time bounds checking where
* buffer sizes are also known at compile time.
*/
/* Error when size is larger than enclosing struct. */
if (__compiletime_lessthan(p_size_field, p_size) &&
__compiletime_lessthan(p_size, size))
__write_overflow();
/* Warn when write size is larger than dest field. */
if (__compiletime_lessthan(p_size_field, size))
__write_overflow_field(p_size_field, size);
}
/*
* At this point, length argument may not be a constant expression,
* so run-time bounds checking can be done where buffer sizes are
* known. (This is not an "else" because the above checks may only
* be compile-time warnings, and we want to still warn for run-time
* overflows.)
*/
/*
* Always stop accesses beyond the struct that contains the
* field, when the buffer's remaining size is known.
* (The SIZE_MAX test is to optimize away checks where the buffer
* lengths are unknown.)
*/
if (p_size != SIZE_MAX && p_size < size) fortify_panic(FORTIFY_FUNC_memset, FORTIFY_WRITE, p_size, size, true);
return false;
}
#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \
size_t __fortify_size = (size_t)(size); \
fortify_memset_chk(__fortify_size, p_size, p_size_field), \
__underlying_memset(p, c, __fortify_size); \
})
/*
* __struct_size() vs __member_size() must be captured here to avoid
* evaluating argument side-effects further into the macro layers.
*/
#ifndef CONFIG_KMSAN
#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
__struct_size(p), __member_size(p))
#endif
/*
* To make sure the compiler can enforce protection against buffer overflows,
* memcpy(), memmove(), and memset() must not be used beyond individual
* struct members. If you need to copy across multiple members, please use
* struct_group() to create a named mirror of an anonymous struct union.
* (e.g. see struct sk_buff.) Read overflow checking is currently only
* done when a write overflow is also present, or when building with W=1.
*
* Mitigation coverage matrix
* Bounds checking at:
* +-------+-------+-------+-------+
* | Compile time | Run time |
* memcpy() argument sizes: | write | read | write | read |
* dest source length +-------+-------+-------+-------+
* memcpy(known, known, constant) | y | y | n/a | n/a |
* memcpy(known, unknown, constant) | y | n | n/a | V |
* memcpy(known, known, dynamic) | n | n | B | B |
* memcpy(known, unknown, dynamic) | n | n | B | V |
* memcpy(unknown, known, constant) | n | y | V | n/a |
* memcpy(unknown, unknown, constant) | n | n | V | V |
* memcpy(unknown, known, dynamic) | n | n | V | B |
* memcpy(unknown, unknown, dynamic) | n | n | V | V |
* +-------+-------+-------+-------+
*
* y = perform deterministic compile-time bounds checking
* n = cannot perform deterministic compile-time bounds checking
* n/a = no run-time bounds checking needed since compile-time deterministic
* B = can perform run-time bounds checking (currently unimplemented)
* V = vulnerable to run-time overflow (will need refactoring to solve)
*
*/
__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
const size_t p_size,
const size_t q_size,
const size_t p_size_field,
const size_t q_size_field,
const u8 func)
{
if (__builtin_constant_p(size)) {
/*
* Length argument is a constant expression, so we
* can perform compile-time bounds checking where
* buffer sizes are also known at compile time.
*/
/* Error when size is larger than enclosing struct. */
if (__compiletime_lessthan(p_size_field, p_size) &&
__compiletime_lessthan(p_size, size))
__write_overflow();
if (__compiletime_lessthan(q_size_field, q_size) &&
__compiletime_lessthan(q_size, size))
__read_overflow2();
/* Warn when write size argument larger than dest field. */
if (__compiletime_lessthan(p_size_field, size))
__write_overflow_field(p_size_field, size);
/*
* Warn for source field over-read when building with W=1
* or when an over-write happened, so both can be fixed at
* the same time.
*/
if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
__compiletime_lessthan(p_size_field, size)) &&
__compiletime_lessthan(q_size_field, size))
__read_overflow2_field(q_size_field, size);
}
/*
* At this point, length argument may not be a constant expression,
* so run-time bounds checking can be done where buffer sizes are
* known. (This is not an "else" because the above checks may only
* be compile-time warnings, and we want to still warn for run-time
* overflows.)
*/
/*
* Always stop accesses beyond the struct that contains the
* field, when the buffer's remaining size is known.
* (The SIZE_MAX test is to optimize away checks where the buffer
* lengths are unknown.)
*/
if (p_size != SIZE_MAX && p_size < size) fortify_panic(func, FORTIFY_WRITE, p_size, size, true); else if (q_size != SIZE_MAX && q_size < size) fortify_panic(func, FORTIFY_READ, q_size, size, true);
/*
* Warn when writing beyond destination field size.
*
* Note the implementation of __builtin_*object_size() behaves
* like sizeof() when not directly referencing a flexible
* array member, which means there will be many bounds checks
* that will appear at run-time, without a way for them to be
* detected at compile-time (as can be done when the destination
* is specifically the flexible array member).
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
*/
if (p_size_field != SIZE_MAX && p_size != p_size_field && p_size_field < size)
return true;
return false;
}
/*
* To work around what seems to be an optimizer bug, the macro arguments
* need to have const copies or the values end up changed by the time they
* reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture
* __bos() results in const temp vars") for more details.
*/
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
p_size_field, q_size_field, op) ({ \
const size_t __fortify_size = (size_t)(size); \
const size_t __p_size = (p_size); \
const size_t __q_size = (q_size); \
const size_t __p_size_field = (p_size_field); \
const size_t __q_size_field = (q_size_field); \
/* Keep a mutable version of the size for the final copy. */ \
size_t __copy_size = __fortify_size; \
fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \
__q_size, __p_size_field, \
__q_size_field, FORTIFY_FUNC_ ##op), \
#op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
__fortify_size, \
"field \"" #p "\" at " FILE_LINE, \
__p_size_field); \
/* Hide only the run-time size from value range tracking to */ \
/* silence compile-time false positive bounds warnings. */ \
if (!__builtin_constant_p(__copy_size)) \
OPTIMIZER_HIDE_VAR(__copy_size); \
__underlying_##op(p, q, __copy_size); \
})
/*
* Notes about compile-time buffer size detection:
*
* With these types...
*
* struct middle {
* u16 a;
* u8 middle_buf[16];
* int b;
* };
* struct end {
* u16 a;
* u8 end_buf[16];
* };
* struct flex {
* int a;
* u8 flex_buf[];
* };
*
* void func(TYPE *ptr) { ... }
*
* Cases where destination size cannot be currently detected:
* - the size of ptr's object (seemingly by design, gcc & clang fail):
* __builtin_object_size(ptr, 1) == SIZE_MAX
* - the size of flexible arrays in ptr's obj (by design, dynamic size):
* __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
* - the size of ANY array at the end of ptr's obj (gcc and clang bug):
* __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
*
* Cases where destination size is currently detected:
* - the size of non-array members within ptr's object:
* __builtin_object_size(ptr->a, 1) == 2
* - the size of non-flexible-array in the middle of ptr's obj:
* __builtin_object_size(ptr->middle_buf, 1) == 16
*
*/
/*
* __struct_size() vs __member_size() must be captured here to avoid
* evaluating argument side-effects further into the macro layers.
*/
#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
__struct_size(p), __struct_size(q), \
__member_size(p), __member_size(q), \
memcpy)
#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
__struct_size(p), __struct_size(q), \
__member_size(p), __member_size(q), \
memmove)
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
{
const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(FORTIFY_FUNC_memscan, FORTIFY_READ, p_size, size, NULL);
return __real_memscan(p, c, size);
}
__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
{
const size_t p_size = __struct_size(p);
const size_t q_size = __struct_size(q);
if (__builtin_constant_p(size)) {
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (__compiletime_lessthan(q_size, size))
__read_overflow2();
}
if (p_size < size)
fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, p_size, size, INT_MIN);
else if (q_size < size)
fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, q_size, size, INT_MIN);
return __underlying_memcmp(p, q, size);
}
__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
{
const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(FORTIFY_FUNC_memchr, FORTIFY_READ, p_size, size, NULL);
return __underlying_memchr(p, c, size);
}
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
{
const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(FORTIFY_FUNC_memchr_inv, FORTIFY_READ, p_size, size, NULL);
return __real_memchr_inv(p, c, size);
}
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof)
__realloc_size(2);
__FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp)
{
const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size,
__real_kmemdup(p, 0, gfp));
return __real_kmemdup(p, size, gfp);
}
#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
/**
* strcpy - Copy a string into another string buffer
*
* @p: pointer to destination of copy
* @q: pointer to NUL-terminated source string to copy
*
* Do not use this function. While FORTIFY_SOURCE tries to avoid
* overflows, this is only possible when the sizes of @q and @p are
* known to the compiler. Prefer strscpy(), though note its different
* return values for detecting truncation.
*
* Returns @p.
*
*/
/* Defined after fortified strlen to reuse it. */
__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
char *strcpy(char * const POS p, const char * const POS q)
{
const size_t p_size = __member_size(p);
const size_t q_size = __member_size(q);
size_t size;
/* If neither buffer size is known, immediately give up. */
if (__builtin_constant_p(p_size) &&
__builtin_constant_p(q_size) &&
p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strcpy(p, q);
size = strlen(q) + 1;
/* Compile-time check for const size overflow. */
if (__compiletime_lessthan(p_size, size))
__write_overflow();
/* Run-time check for dynamic size overflow. */
if (p_size < size)
fortify_panic(FORTIFY_FUNC_strcpy, FORTIFY_WRITE, p_size, size, p);
__underlying_memcpy(p, q, size); return p;
}
/* Don't use these outside the FORITFY_SOURCE implementation */
#undef __underlying_memchr
#undef __underlying_memcmp
#undef __underlying_strcat
#undef __underlying_strcpy
#undef __underlying_strlen
#undef __underlying_strncat
#undef __underlying_strncpy
#undef POS
#undef POS0
#endif /* _LINUX_FORTIFY_STRING_H_ */
/*
* include/linux/topology.h
*
* Written by: Matthew Dobson, IBM Corporation
*
* Copyright (C) 2002, IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <colpatch@us.ibm.com>
*/
#ifndef _LINUX_TOPOLOGY_H
#define _LINUX_TOPOLOGY_H
#include <linux/arch_topology.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/bitops.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/topology.h>
#ifndef nr_cpus_node
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
#endif
int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10
#define REMOTE_DISTANCE 20
#define DISTANCE_BITS 8
#ifndef node_distance
#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
#endif
#ifndef RECLAIM_DISTANCE
/*
* If the distance between nodes in a system is larger than RECLAIM_DISTANCE
* (in whatever arch specific measurement units returned by node_distance())
* and node_reclaim_mode is enabled then the VM will only call node_reclaim()
* on nodes within this distance.
*/
#define RECLAIM_DISTANCE 30
#endif
/*
* The following tunable allows platforms to override the default node
* reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are
* sufficiently fast that the default value actually hurts
* performance.
*
* AMD EPYC machines use this because even though the 2-hop distance
* is 32 (3.2x slower than a local memory access) performance actually
* *improves* if allowed to reclaim memory and load balance tasks
* between NUMA nodes 2-hops apart.
*/
extern int __read_mostly node_reclaim_distance;
#ifndef PENALTY_FOR_NODE_WITH_CPUS
#define PENALTY_FOR_NODE_WITH_CPUS (1)
#endif
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DECLARE_PER_CPU(int, numa_node);
#ifndef numa_node_id
/* Returns the number of the current Node. */
static inline int numa_node_id(void)
{
return raw_cpu_read(numa_node);
}
#endif
#ifndef cpu_to_node
static inline int cpu_to_node(int cpu)
{
return per_cpu(numa_node, cpu);
}
#endif
#ifndef set_numa_node
static inline void set_numa_node(int node)
{
this_cpu_write(numa_node, node);
}
#endif
#ifndef set_cpu_numa_node
static inline void set_cpu_numa_node(int cpu, int node)
{
per_cpu(numa_node, cpu) = node;
}
#endif
#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
/* Returns the number of the current Node. */
#ifndef numa_node_id
static inline int numa_node_id(void)
{
return cpu_to_node(raw_smp_processor_id());
}
#endif
#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
* N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
* It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
*/
DECLARE_PER_CPU(int, _numa_mem_);
#ifndef set_numa_mem
static inline void set_numa_mem(int node)
{
this_cpu_write(_numa_mem_, node);
}
#endif
#ifndef numa_mem_id
/* Returns the number of the nearest Node with memory */
static inline int numa_mem_id(void)
{
return raw_cpu_read(_numa_mem_);
}
#endif
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
return per_cpu(_numa_mem_, cpu);
}
#endif
#ifndef set_cpu_numa_mem
static inline void set_cpu_numa_mem(int cpu, int node)
{
per_cpu(_numa_mem_, cpu) = node;
}
#endif
#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
#ifndef numa_mem_id
/* Returns the number of the nearest Node with memory */
static inline int numa_mem_id(void)
{
return numa_node_id();
}
#endif
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
return cpu_to_node(cpu);
}
#endif
#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
#if defined(topology_die_id) && defined(topology_die_cpumask)
#define TOPOLOGY_DIE_SYSFS
#endif
#if defined(topology_cluster_id) && defined(topology_cluster_cpumask)
#define TOPOLOGY_CLUSTER_SYSFS
#endif
#if defined(topology_book_id) && defined(topology_book_cpumask)
#define TOPOLOGY_BOOK_SYSFS
#endif
#if defined(topology_drawer_id) && defined(topology_drawer_cpumask)
#define TOPOLOGY_DRAWER_SYSFS
#endif
#ifndef topology_physical_package_id
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_die_id
#define topology_die_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_cluster_id
#define topology_cluster_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
#ifndef topology_book_id
#define topology_book_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_drawer_id
#define topology_drawer_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_ppin
#define topology_ppin(cpu) ((void)(cpu), 0ull)
#endif
#ifndef topology_sibling_cpumask
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_cluster_cpumask
#define topology_cluster_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_die_cpumask
#define topology_die_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_book_cpumask
#define topology_book_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_drawer_cpumask
#define topology_drawer_cpumask(cpu) cpumask_of(cpu)
#endif
#if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask)
static inline const struct cpumask *cpu_smt_mask(int cpu)
{
return topology_sibling_cpumask(cpu);
}
#endif
#ifndef topology_is_primary_thread
static inline bool topology_is_primary_thread(unsigned int cpu)
{
/*
* When disabling SMT, the primary thread of the SMT will remain
* enabled/active. Architectures that have a special primary thread
* (e.g. x86) need to override this function. Otherwise the first
* thread in the SMT can be made the primary thread.
*
* The sibling cpumask of an offline CPU always contains the CPU
* itself on architectures using the implementation of
* CONFIG_GENERIC_ARCH_TOPOLOGY for building their topology.
* Other architectures not using CONFIG_GENERIC_ARCH_TOPOLOGY for
* building their topology have to check whether to use this default
* implementation or to override it.
*/
return cpu == cpumask_first(topology_sibling_cpumask(cpu));
}
#define topology_is_primary_thread topology_is_primary_thread
#endif
static inline const struct cpumask *cpu_node_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
#ifdef CONFIG_NUMA
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
#else
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
{
return cpumask_nth_and(cpu, cpus, cpu_online_mask);
}
static inline const struct cpumask *
sched_numa_hop_mask(unsigned int node, unsigned int hops)
{
return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_NUMA */
/**
* for_each_node_numadist() - iterate over nodes in increasing distance
* order, starting from a given node
* @node: the iteration variable and the starting node.
* @unvisited: a nodemask to keep track of the unvisited nodes.
*
* This macro iterates over NUMA node IDs in increasing distance from the
* starting @node and yields MAX_NUMNODES when all the nodes have been
* visited.
*
* Note that by the time the loop completes, the @unvisited nodemask will
* be fully cleared, unless the loop exits early.
*
* The difference between for_each_node() and for_each_node_numadist() is
* that the former allows to iterate over nodes in numerical order, whereas
* the latter iterates over nodes in increasing order of distance.
*
* This complexity of this iterator is O(N^2), where N represents the
* number of nodes, as each iteration involves scanning all nodes to
* find the one with the shortest distance.
*
* Requires rcu_lock to be held.
*/
#define for_each_node_numadist(node, unvisited) \
for (int __start = (node), \
(node) = nearest_node_nodemask((__start), &(unvisited)); \
(node) < MAX_NUMNODES; \
node_clear((node), (unvisited)), \
(node) = nearest_node_nodemask((__start), &(unvisited)))
/**
* for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
* from a given node.
* @mask: the iteration variable.
* @node: the NUMA node to start the search from.
*
* Requires rcu_lock to be held.
*
* Yields cpu_online_mask for @node == NUMA_NO_NODE.
*/
#define for_each_numa_hop_mask(mask, node) \
for (unsigned int __hops = 0; \
mask = (node != NUMA_NO_NODE || __hops) ? \
sched_numa_hop_mask(node, __hops) : \
cpu_online_mask, \
!IS_ERR_OR_NULL(mask); \
__hops++)
DECLARE_PER_CPU(unsigned long, cpu_scale);
static inline unsigned long topology_get_cpu_scale(int cpu)
{
return per_cpu(cpu_scale, cpu);
}
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
#endif /* _LINUX_TOPOLOGY_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Access vector cache interface for object managers.
*
* Author : Stephen Smalley, <stephen.smalley.work@gmail.com>
*/
#ifndef _SELINUX_AVC_H_
#define _SELINUX_AVC_H_
#include <linux/stddef.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kdev_t.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/audit.h>
#include <linux/lsm_audit.h>
#include <linux/in6.h>
#include "flask.h"
#include "av_permissions.h"
#include "security.h"
/*
* An entry in the AVC.
*/
struct avc_entry;
struct task_struct;
struct inode;
struct sock;
struct sk_buff;
/*
* AVC statistics
*/
struct avc_cache_stats {
unsigned int lookups;
unsigned int misses;
unsigned int allocations;
unsigned int reclaims;
unsigned int frees;
};
/*
* We only need this data after we have decided to send an audit message.
*/
struct selinux_audit_data {
u32 ssid;
u32 tsid;
u16 tclass;
u32 requested;
u32 audited;
u32 denied;
int result;
} __randomize_layout;
/*
* AVC operations
*/
void __init avc_init(void);
static inline u32 avc_audit_required(u32 requested, struct av_decision *avd,
int result, u32 auditdeny, u32 *deniedp)
{
u32 denied, audited;
if (avd->flags & AVD_FLAGS_NEVERAUDIT)
return 0;
denied = requested & ~avd->allowed; if (unlikely(denied)) { audited = denied & avd->auditdeny;
/*
* auditdeny is TRICKY! Setting a bit in
* this field means that ANY denials should NOT be audited if
* the policy contains an explicit dontaudit rule for that
* permission. Take notice that this is unrelated to the
* actual permissions that were denied. As an example lets
* assume:
*
* denied == READ
* avd.auditdeny & ACCESS == 0 (not set means explicit rule)
* auditdeny & ACCESS == 1
*
* We will NOT audit the denial even though the denied
* permission was READ and the auditdeny checks were for
* ACCESS
*/
if (auditdeny && !(auditdeny & avd->auditdeny))
audited = 0;
} else if (result) audited = denied = requested;
else
audited = requested & avd->auditallow;
*deniedp = denied;
return audited;
}
int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, u32 audited,
u32 denied, int result, struct common_audit_data *a);
/**
* avc_audit - Audit the granting or denial of permissions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions
* @avd: access vector decisions
* @result: result from avc_has_perm_noaudit
* @a: auxiliary audit data
*
* Audit the granting or denial of permissions in accordance
* with the policy. This function is typically called by
* avc_has_perm() after a permission check, but can also be
* called directly by callers who use avc_has_perm_noaudit()
* in order to separate the permission check from the auditing.
* For example, this separation is useful when the permission check must
* be performed under a lock, to allow the lock to be released
* before calling the auditing code.
*/
static inline int avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct av_decision *avd, int result,
struct common_audit_data *a)
{
u32 audited, denied;
audited = avc_audit_required(requested, avd, result, 0, &denied); if (likely(!audited))
return 0;
return slow_avc_audit(ssid, tsid, tclass, requested, audited, denied,
result, a);
}
#define AVC_STRICT 1 /* Ignore permissive mode. */
#define AVC_EXTENDED_PERMS 2 /* update extended permissions */
int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
unsigned int flags, struct av_decision *avd);
int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct common_audit_data *auditdata);
#define AVC_EXT_IOCTL (1 << 0) /* Cache entry for an ioctl extended permission */
#define AVC_EXT_NLMSG (1 << 1) /* Cache entry for an nlmsg extended permission */
int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
u8 driver, u8 base_perm, u8 perm,
struct common_audit_data *ad);
u32 avc_policy_seqno(void);
#define AVC_CALLBACK_GRANT 1
#define AVC_CALLBACK_TRY_REVOKE 2
#define AVC_CALLBACK_REVOKE 4
#define AVC_CALLBACK_RESET 8
#define AVC_CALLBACK_AUDITALLOW_ENABLE 16
#define AVC_CALLBACK_AUDITALLOW_DISABLE 32
#define AVC_CALLBACK_AUDITDENY_ENABLE 64
#define AVC_CALLBACK_AUDITDENY_DISABLE 128
#define AVC_CALLBACK_ADD_XPERMS 256
int avc_add_callback(int (*callback)(u32 event), u32 events);
/* Exported to selinuxfs */
int avc_get_hash_stats(char *page);
unsigned int avc_get_cache_threshold(void);
void avc_set_cache_threshold(unsigned int cache_threshold);
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
#endif
#endif /* _SELINUX_AVC_H_ */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* This code is based in part on work published here:
*
* https://github.com/IAIK/KAISER
*
* The original work was written by and signed off by for the Linux
* kernel by:
*
* Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
* Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
* Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
* Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
*
* Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
* Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
* Andy Lutomirsky <luto@amacapital.net>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/cpu.h>
#include <asm/cpufeature.h>
#include <asm/hypervisor.h>
#include <asm/vsyscall.h>
#include <asm/cmdline.h>
#include <asm/pti.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
#include <asm/sections.h>
#include <asm/set_memory.h>
#include <asm/bugs.h>
#undef pr_fmt
#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
/* Backporting helper */
#ifndef __GFP_NOTRACK
#define __GFP_NOTRACK 0
#endif
/*
* Define the page-table levels we clone for user-space on 32
* and 64 bit.
*/
#ifdef CONFIG_X86_64
#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
#else
#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
#endif
static void __init pti_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
pr_info("%s\n", reason);
}
static void __init pti_print_if_secure(const char *reason)
{
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
pr_info("%s\n", reason);
}
/* Assume mode is auto unless overridden via cmdline below. */
static enum pti_mode {
PTI_AUTO = 0,
PTI_FORCE_OFF,
PTI_FORCE_ON
} pti_mode;
void __init pti_check_boottime_disable(void)
{
if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
pti_mode = PTI_FORCE_OFF;
pti_print_if_insecure("disabled on XEN PV.");
return;
}
if (pti_mode == PTI_AUTO &&
!cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
pti_mode = PTI_FORCE_OFF;
if (pti_mode == PTI_FORCE_OFF) {
pti_print_if_insecure("disabled on command line.");
return;
}
if (pti_mode == PTI_FORCE_ON)
pti_print_if_secure("force enabled on command line.");
if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
return;
setup_force_cpu_cap(X86_FEATURE_PTI);
if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
pr_debug("PTI enabled, disabling INVLPGB\n");
setup_clear_cpu_cap(X86_FEATURE_INVLPGB);
}
}
static int __init pti_parse_cmdline(char *arg)
{
if (!strcmp(arg, "off"))
pti_mode = PTI_FORCE_OFF;
else if (!strcmp(arg, "on"))
pti_mode = PTI_FORCE_ON;
else if (!strcmp(arg, "auto"))
pti_mode = PTI_AUTO;
else
return -EINVAL;
return 0;
}
early_param("pti", pti_parse_cmdline);
static int __init pti_parse_cmdline_nopti(char *arg)
{
pti_mode = PTI_FORCE_OFF;
return 0;
}
early_param("nopti", pti_parse_cmdline_nopti);
pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
{
/*
* Changes to the high (kernel) portion of the kernelmode page
* tables are not automatically propagated to the usermode tables.
*
* Users should keep in mind that, unlike the kernelmode tables,
* there is no vmalloc_fault equivalent for the usermode tables.
* Top-level entries added to init_mm's usermode pgd after boot
* will not be automatically propagated to other mms.
*/
if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW)) return pgd;
/*
* The user page tables get the full PGD, accessible from
* userspace:
*/
kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
/*
* If this is normal user memory, make it NX in the kernel
* pagetables so that, if we somehow screw up and return to
* usermode with the kernel CR3 loaded, we'll get a page fault
* instead of allowing user code to execute with the wrong CR3.
*
* As exceptions, we don't set NX if:
* - _PAGE_USER is not set. This could be an executable
* EFI runtime mapping or something similar, and the kernel
* may execute from it
* - we don't have NX support
* - we're clearing the PGD (i.e. the new pgd is not present).
*/
if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
(__supported_pte_mask & _PAGE_NX))
pgd.pgd |= _PAGE_NX;
/* return the copy of the PGD we want the kernel to use: */
return pgd;}
/*
* Walk the user copy of the page tables (optionally) trying to allocate
* page table pages on the way down.
*
* Returns a pointer to a P4D on success, or NULL on failure.
*/
static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
{
pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
if (address < PAGE_OFFSET) {
WARN_ONCE(1, "attempt to walk user address\n");
return NULL;
}
if (pgd_none(*pgd)) {
unsigned long new_p4d_page = __get_free_page(gfp);
if (WARN_ON_ONCE(!new_p4d_page))
return NULL;
set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
}
BUILD_BUG_ON(pgd_leaf(*pgd));
return p4d_offset(pgd, address);
}
/*
* Walk the user copy of the page tables (optionally) trying to allocate
* page table pages on the way down.
*
* Returns a pointer to a PMD on success, or NULL on failure.
*/
static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
p4d_t *p4d;
pud_t *pud;
p4d = pti_user_pagetable_walk_p4d(address);
if (!p4d)
return NULL;
BUILD_BUG_ON(p4d_leaf(*p4d));
if (p4d_none(*p4d)) {
unsigned long new_pud_page = __get_free_page(gfp);
if (WARN_ON_ONCE(!new_pud_page))
return NULL;
set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
}
pud = pud_offset(p4d, address);
/* The user page tables do not use large mappings: */
if (pud_leaf(*pud)) {
WARN_ON(1);
return NULL;
}
if (pud_none(*pud)) {
unsigned long new_pmd_page = __get_free_page(gfp);
if (WARN_ON_ONCE(!new_pmd_page))
return NULL;
set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
}
return pmd_offset(pud, address);
}
/*
* Walk the shadow copy of the page tables (optionally) trying to allocate
* page table pages on the way down. Does not support large pages.
*
* Note: this is only used when mapping *new* kernel data into the
* user/shadow page tables. It is never used for userspace data.
*
* Returns a pointer to a PTE on success, or NULL on failure.
*/
static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pmd_t *pmd;
pte_t *pte;
pmd = pti_user_pagetable_walk_pmd(address);
if (!pmd)
return NULL;
/* Large PMD mapping found */
if (pmd_leaf(*pmd)) {
/* Clear the PMD if we hit a large mapping from the first round */
if (late_text) {
set_pmd(pmd, __pmd(0));
} else {
WARN_ON_ONCE(1);
return NULL;
}
}
if (pmd_none(*pmd)) {
unsigned long new_pte_page = __get_free_page(gfp);
if (!new_pte_page)
return NULL;
set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
}
pte = pte_offset_kernel(pmd, address);
if (pte_flags(*pte) & _PAGE_USER) {
WARN_ONCE(1, "attempt to walk to user pte\n");
return NULL;
}
return pte;
}
#ifdef CONFIG_X86_VSYSCALL_EMULATION
static void __init pti_setup_vsyscall(void)
{
pte_t *pte, *target_pte;
unsigned int level;
pte = lookup_address(VSYSCALL_ADDR, &level);
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
return;
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
if (WARN_ON(!target_pte))
return;
*target_pte = *pte;
set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
}
#else
static void __init pti_setup_vsyscall(void) { }
#endif
enum pti_clone_level {
PTI_CLONE_PMD,
PTI_CLONE_PTE,
};
static void
pti_clone_pgtable(unsigned long start, unsigned long end,
enum pti_clone_level level, bool late_text)
{
unsigned long addr;
/*
* Clone the populated PMDs which cover start to end. These PMD areas
* can have holes.
*/
for (addr = start; addr < end;) {
pte_t *pte, *target_pte;
pmd_t *pmd, *target_pmd;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
/* Overflow check */
if (addr < start)
break;
pgd = pgd_offset_k(addr);
if (WARN_ON(pgd_none(*pgd)))
return;
p4d = p4d_offset(pgd, addr);
if (WARN_ON(p4d_none(*p4d)))
return;
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
WARN_ON_ONCE(addr & ~PUD_MASK);
addr = round_up(addr + 1, PUD_SIZE);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
WARN_ON_ONCE(addr & ~PMD_MASK);
addr = round_up(addr + 1, PMD_SIZE);
continue;
}
if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
target_pmd = pti_user_pagetable_walk_pmd(addr);
if (WARN_ON(!target_pmd))
return;
/*
* Only clone present PMDs. This ensures only setting
* _PAGE_GLOBAL on present PMDs. This should only be
* called on well-known addresses anyway, so a non-
* present PMD would be a surprise.
*/
if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
return;
/*
* Setting 'target_pmd' below creates a mapping in both
* the user and kernel page tables. It is effectively
* global, so set it as global in both copies. Note:
* the X86_FEATURE_PGE check is not _required_ because
* the CPU ignores _PAGE_GLOBAL when PGE is not
* supported. The check keeps consistency with
* code that only set this bit when supported.
*/
if (boot_cpu_has(X86_FEATURE_PGE))
*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
/*
* Copy the PMD. That is, the kernelmode and usermode
* tables will share the last-level page tables of this
* address range
*/
*target_pmd = *pmd;
addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
addr = round_up(addr + 1, PAGE_SIZE);
continue;
}
/* Only clone present PTEs */
if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
return;
/* Allocate PTE in the user page-table */
target_pte = pti_user_pagetable_walk_pte(addr, late_text);
if (WARN_ON(!target_pte))
return;
/* Set GLOBAL bit in both PTEs */
if (boot_cpu_has(X86_FEATURE_PGE))
*pte = pte_set_flags(*pte, _PAGE_GLOBAL);
/* Clone the PTE */
*target_pte = *pte;
addr = round_up(addr + 1, PAGE_SIZE);
} else {
BUG();
}
}
}
#ifdef CONFIG_X86_64
/*
* Clone a single p4d (i.e. a top-level entry on 4-level systems and a
* next-level entry on 5-level systems.
*/
static void __init pti_clone_p4d(unsigned long addr)
{
p4d_t *kernel_p4d, *user_p4d;
pgd_t *kernel_pgd;
user_p4d = pti_user_pagetable_walk_p4d(addr);
if (!user_p4d)
return;
kernel_pgd = pgd_offset_k(addr);
kernel_p4d = p4d_offset(kernel_pgd, addr);
*user_p4d = *kernel_p4d;
}
/*
* Clone the CPU_ENTRY_AREA and associated data into the user space visible
* page table.
*/
static void __init pti_clone_user_shared(void)
{
unsigned int cpu;
pti_clone_p4d(CPU_ENTRY_AREA_BASE);
for_each_possible_cpu(cpu) {
/*
* The SYSCALL64 entry code needs one word of scratch space
* in which to spill a register. It lives in the sp2 slot
* of the CPU's TSS.
*
* This is done for all possible CPUs during boot to ensure
* that it's propagated to all mms.
*/
unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
pte_t *target_pte;
target_pte = pti_user_pagetable_walk_pte(va, false);
if (WARN_ON(!target_pte))
return;
*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
}
}
#else /* CONFIG_X86_64 */
/*
* On 32 bit PAE systems with 1GB of Kernel address space there is only
* one pgd/p4d for the whole kernel. Cloning that would map the whole
* address space into the user page-tables, making PTI useless. So clone
* the page-table on the PMD level to prevent that.
*/
static void __init pti_clone_user_shared(void)
{
unsigned long start, end;
start = CPU_ENTRY_AREA_BASE;
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
}
#endif /* CONFIG_X86_64 */
/*
* Clone the ESPFIX P4D into the user space visible page table
*/
static void __init pti_setup_espfix64(void)
{
#ifdef CONFIG_X86_ESPFIX64
pti_clone_p4d(ESPFIX_BASE_ADDR);
#endif
}
/*
* Clone the populated PMDs of the entry text and force it RO.
*/
static void pti_clone_entry_text(bool late)
{
pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end,
PTI_LEVEL_KERNEL_IMAGE, late);
}
/*
* Global pages and PCIDs are both ways to make kernel TLB entries
* live longer, reduce TLB misses and improve kernel performance.
* But, leaving all kernel text Global makes it potentially accessible
* to Meltdown-style attacks which make it trivial to find gadgets or
* defeat KASLR.
*
* Only use global pages when it is really worth it.
*/
static inline bool pti_kernel_image_global_ok(void)
{
/*
* Systems with PCIDs get little benefit from global
* kernel text and are not worth the downsides.
*/
if (cpu_feature_enabled(X86_FEATURE_PCID))
return false;
/*
* Only do global kernel image for pti=auto. Do the most
* secure thing (not global) if pti=on specified.
*/
if (pti_mode != PTI_AUTO)
return false;
/*
* K8 may not tolerate the cleared _PAGE_RW on the userspace
* global kernel image pages. Do the safe thing (disable
* global kernel image). This is unlikely to ever be
* noticed because PTI is disabled by default on AMD CPUs.
*/
if (boot_cpu_has(X86_FEATURE_K8))
return false;
/*
* RANDSTRUCT derives its hardening benefits from the
* attacker's lack of knowledge about the layout of kernel
* data structures. Keep the kernel image non-global in
* cases where RANDSTRUCT is in use to help keep the layout a
* secret.
*/
if (IS_ENABLED(CONFIG_RANDSTRUCT))
return false;
return true;
}
/*
* For some configurations, map all of kernel text into the user page
* tables. This reduces TLB misses, especially on non-PCID systems.
*/
static void pti_clone_kernel_text(void)
{
/*
* rodata is part of the kernel image and is normally
* readable on the filesystem or on the web. But, do not
* clone the areas past rodata, they might contain secrets.
*/
unsigned long start = PFN_ALIGN(_text);
unsigned long end_clone = (unsigned long)__end_rodata_aligned;
unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
if (!pti_kernel_image_global_ok())
return;
pr_debug("mapping partial kernel image into user address space\n");
/*
* Note that this will undo _some_ of the work that
* pti_set_kernel_image_nonglobal() did to clear the
* global bit.
*/
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
/*
* pti_clone_pgtable() will set the global bit in any PMDs
* that it clones, but we also need to get any PTEs in
* the last level for areas that are not huge-page-aligned.
*/
/* Set the global bit for normal non-__init kernel text: */
set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
}
static void pti_set_kernel_image_nonglobal(void)
{
/*
* The identity map is created with PMDs, regardless of the
* actual length of the kernel. We need to clear
* _PAGE_GLOBAL up to a PMD boundary, not just to the end
* of the image.
*/
unsigned long start = PFN_ALIGN(_text);
unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
/*
* This clears _PAGE_GLOBAL from the entire kernel image.
* pti_clone_kernel_text() map put _PAGE_GLOBAL back for
* areas that are mapped to userspace.
*/
set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}
/*
* Initialize kernel page table isolation
*/
void __init pti_init(void)
{
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
pr_info("enabled\n");
#ifdef CONFIG_X86_32
/*
* We check for X86_FEATURE_PCID here. But the init-code will
* clear the feature flag on 32 bit because the feature is not
* supported on 32 bit anyway. To print the warning we need to
* check with cpuid directly again.
*/
if (cpuid_ecx(0x1) & BIT(17)) {
/* Use printk to work around pr_fmt() */
printk(KERN_WARNING "\n");
printk(KERN_WARNING "************************************************************\n");
printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
printk(KERN_WARNING "** **\n");
printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
printk(KERN_WARNING "** **\n");
printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
printk(KERN_WARNING "************************************************************\n");
}
#endif
pti_clone_user_shared();
/* Undo all global bits from the init pagetables in head_64.S: */
pti_set_kernel_image_nonglobal();
/* Replace some of the global bits just for shared entry text: */
/*
* This is very early in boot. Device and Late initcalls can do
* modprobe before free_initmem() and mark_readonly(). This
* pti_clone_entry_text() allows those user-mode-helpers to function,
* but notably the text is still RW.
*/
pti_clone_entry_text(false);
pti_setup_espfix64();
pti_setup_vsyscall();
}
/*
* Finalize the kernel mappings in the userspace page-table. Some of the
* mappings for the kernel image might have changed since pti_init()
* cloned them. This is because parts of the kernel image have been
* mapped RO and/or NX. These changes need to be cloned again to the
* userspace page-table.
*/
void pti_finalize(void)
{
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
/*
* This is after free_initmem() (all initcalls are done) and we've done
* mark_readonly(). Text is now NX which might've split some PMDs
* relative to the early clone.
*/
pti_clone_entry_text(true);
pti_clone_kernel_text();
debug_checkwx_user();
}
// SPDX-License-Identifier: GPL-2.0
/*
* fs/sysfs/group.c - Operations for adding/removing multiple files at once.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2013 Greg Kroah-Hartman
* Copyright (c) 2013 The Linux Foundation
*/
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/err.h>
#include <linux/fs.h>
#include "sysfs.h"
static void remove_files(struct kernfs_node *parent,
const struct attribute_group *grp)
{
struct attribute *const *attr;
const struct bin_attribute *const *bin_attr;
if (grp->attrs)
for (attr = grp->attrs; *attr; attr++)
kernfs_remove_by_name(parent, (*attr)->name);
if (grp->bin_attrs)
for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++)
kernfs_remove_by_name(parent, (*bin_attr)->attr.name);
}
static umode_t __first_visible(const struct attribute_group *grp, struct kobject *kobj)
{
if (grp->attrs && grp->attrs[0] && grp->is_visible) return grp->is_visible(kobj, grp->attrs[0], 0); if (grp->bin_attrs && grp->bin_attrs[0] && grp->is_bin_visible)
return grp->is_bin_visible(kobj, grp->bin_attrs[0], 0);
return 0;
}
static int create_files(struct kernfs_node *parent, struct kobject *kobj,
kuid_t uid, kgid_t gid,
const struct attribute_group *grp, int update)
{
struct attribute *const *attr;
const struct bin_attribute *const *bin_attr;
int error = 0, i;
if (grp->attrs) {
for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) { umode_t mode = (*attr)->mode;
/*
* In update mode, we're changing the permissions or
* visibility. Do this by first removing then
* re-adding (if required) the file.
*/
if (update) kernfs_remove_by_name(parent, (*attr)->name); if (grp->is_visible) {
mode = grp->is_visible(kobj, *attr, i);
mode &= ~SYSFS_GROUP_INVISIBLE;
if (!mode)
continue;
}
WARN(mode & ~(SYSFS_PREALLOC | 0664),
"Attribute %s: Invalid permissions 0%o\n",
(*attr)->name, mode);
mode &= SYSFS_PREALLOC | 0664;
error = sysfs_add_file_mode_ns(parent, *attr, mode, uid,
gid, NULL);
if (unlikely(error))
break;
}
if (error) {
remove_files(parent, grp);
goto exit;
}
}
if (grp->bin_attrs) { for (i = 0, bin_attr = grp->bin_attrs; *bin_attr; i++, bin_attr++) { umode_t mode = (*bin_attr)->attr.mode;
size_t size = (*bin_attr)->size;
if (update) kernfs_remove_by_name(parent,
(*bin_attr)->attr.name);
if (grp->is_bin_visible) {
mode = grp->is_bin_visible(kobj, *bin_attr, i);
mode &= ~SYSFS_GROUP_INVISIBLE;
if (!mode)
continue;
}
if (grp->bin_size)
size = grp->bin_size(kobj, *bin_attr, i);
WARN(mode & ~(SYSFS_PREALLOC | 0664),
"Attribute %s: Invalid permissions 0%o\n",
(*bin_attr)->attr.name, mode);
mode &= SYSFS_PREALLOC | 0664;
error = sysfs_add_bin_file_mode_ns(parent, *bin_attr,
mode, size, uid, gid,
NULL);
if (error)
break;
}
if (error)
remove_files(parent, grp);
}
exit:
return error;
}
static int internal_create_group(struct kobject *kobj, int update,
const struct attribute_group *grp)
{
struct kernfs_node *kn;
kuid_t uid;
kgid_t gid;
int error;
if (WARN_ON(!kobj || (!update && !kobj->sd))) return -EINVAL;
/* Updates may happen before the object has been instantiated */
if (unlikely(update && !kobj->sd))
return -EINVAL;
if (!grp->attrs && !grp->bin_attrs) {
pr_debug("sysfs: (bin_)attrs not set by subsystem for group: %s/%s, skipping\n",
kobj->name, grp->name ?: "");
return 0;
}
kobject_get_ownership(kobj, &uid, &gid); if (grp->name) { umode_t mode = __first_visible(grp, kobj); if (mode & SYSFS_GROUP_INVISIBLE)
mode = 0;
else
mode = S_IRWXU | S_IRUGO | S_IXUGO; if (update) { kn = kernfs_find_and_get(kobj->sd, grp->name); if (!kn) {
pr_debug("attr grp %s/%s not created yet\n",
kobj->name, grp->name);
/* may have been invisible prior to this update */
update = 0;
} else if (!mode) {
sysfs_remove_group(kobj, grp);
kernfs_put(kn);
return 0;
}
}
if (!update) {
if (!mode)
return 0;
kn = kernfs_create_dir_ns(kobj->sd, grp->name, mode,
uid, gid, kobj, NULL);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(kobj->sd, grp->name);
return PTR_ERR(kn);
}
}
} else {
kn = kobj->sd;
}
kernfs_get(kn); error = create_files(kn, kobj, uid, gid, grp, update); if (error) { if (grp->name)
kernfs_remove(kn);
}
kernfs_put(kn); if (grp->name && update)
kernfs_put(kn);
return error;
}
/**
* sysfs_create_group - given a directory kobject, create an attribute group
* @kobj: The kobject to create the group on
* @grp: The attribute group to create
*
* This function creates a group for the first time. It will explicitly
* warn and error if any of the attribute files being created already exist.
*
* Returns 0 on success or error code on failure.
*/
int sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return internal_create_group(kobj, 0, grp);
}
EXPORT_SYMBOL_GPL(sysfs_create_group);
static int internal_create_groups(struct kobject *kobj, int update,
const struct attribute_group **groups)
{
int error = 0;
int i;
if (!groups)
return 0; for (i = 0; groups[i]; i++) { error = internal_create_group(kobj, update, groups[i]); if (error) { while (--i >= 0) sysfs_remove_group(kobj, groups[i]);
break;
}
}
return error;
}
/**
* sysfs_create_groups - given a directory kobject, create a bunch of attribute groups
* @kobj: The kobject to create the group on
* @groups: The attribute groups to create, NULL terminated
*
* This function creates a bunch of attribute groups. If an error occurs when
* creating a group, all previously created groups will be removed, unwinding
* everything back to the original state when this function was called.
* It will explicitly warn and error if any of the attribute files being
* created already exist.
*
* Returns 0 on success or error code from sysfs_create_group on failure.
*/
int sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups)
{
return internal_create_groups(kobj, 0, groups);
}
EXPORT_SYMBOL_GPL(sysfs_create_groups);
/**
* sysfs_update_groups - given a directory kobject, create a bunch of attribute groups
* @kobj: The kobject to update the group on
* @groups: The attribute groups to update, NULL terminated
*
* This function update a bunch of attribute groups. If an error occurs when
* updating a group, all previously updated groups will be removed together
* with already existing (not updated) attributes.
*
* Returns 0 on success or error code from sysfs_update_group on failure.
*/
int sysfs_update_groups(struct kobject *kobj,
const struct attribute_group **groups)
{
return internal_create_groups(kobj, 1, groups);
}
EXPORT_SYMBOL_GPL(sysfs_update_groups);
/**
* sysfs_update_group - given a directory kobject, update an attribute group
* @kobj: The kobject to update the group on
* @grp: The attribute group to update
*
* This function updates an attribute group. Unlike
* sysfs_create_group(), it will explicitly not warn or error if any
* of the attribute files being created already exist. Furthermore,
* if the visibility of the files has changed through the is_visible()
* callback, it will update the permissions and add or remove the
* relevant files. Changing a group's name (subdirectory name under
* kobj's directory in sysfs) is not allowed.
*
* The primary use for this function is to call it after making a change
* that affects group visibility.
*
* Returns 0 on success or error code on failure.
*/
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return internal_create_group(kobj, 1, grp);
}
EXPORT_SYMBOL_GPL(sysfs_update_group);
/**
* sysfs_remove_group: remove a group from a kobject
* @kobj: kobject to remove the group from
* @grp: group to remove
*
* This function removes a group of attributes from a kobject. The attributes
* previously have to have been created for this group, otherwise it will fail.
*/
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp)
{
struct kernfs_node *parent = kobj->sd;
struct kernfs_node *kn;
if (grp->name) {
kn = kernfs_find_and_get(parent, grp->name);
if (!kn) {
pr_debug("sysfs group '%s' not found for kobject '%s'\n",
grp->name, kobject_name(kobj));
return;
}
} else {
kn = parent;
kernfs_get(kn);
}
remove_files(kn, grp);
if (grp->name)
kernfs_remove(kn);
kernfs_put(kn);
}
EXPORT_SYMBOL_GPL(sysfs_remove_group);
/**
* sysfs_remove_groups - remove a list of groups
*
* @kobj: The kobject for the groups to be removed from
* @groups: NULL terminated list of groups to be removed
*
* If groups is not NULL, remove the specified groups from the kobject.
*/
void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups)
{
int i;
if (!groups)
return;
for (i = 0; groups[i]; i++)
sysfs_remove_group(kobj, groups[i]);
}
EXPORT_SYMBOL_GPL(sysfs_remove_groups);
/**
* sysfs_merge_group - merge files into a pre-existing named attribute group.
* @kobj: The kobject containing the group.
* @grp: The files to create and the attribute group they belong to.
*
* This function returns an error if the group doesn't exist, the .name field is
* NULL or any of the files already exist in that group, in which case none of
* the new files are created.
*/
int sysfs_merge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
struct kernfs_node *parent;
kuid_t uid;
kgid_t gid;
int error = 0;
struct attribute *const *attr;
int i;
parent = kernfs_find_and_get(kobj->sd, grp->name);
if (!parent)
return -ENOENT;
kobject_get_ownership(kobj, &uid, &gid);
for ((i = 0, attr = grp->attrs); *attr && !error; (++i, ++attr)) error = sysfs_add_file_mode_ns(parent, *attr, (*attr)->mode,
uid, gid, NULL);
if (error) { while (--i >= 0) kernfs_remove_by_name(parent, (*--attr)->name);
}
kernfs_put(parent); return error;}
EXPORT_SYMBOL_GPL(sysfs_merge_group);
/**
* sysfs_unmerge_group - remove files from a pre-existing named attribute group.
* @kobj: The kobject containing the group.
* @grp: The files to remove and the attribute group they belong to.
*/
void sysfs_unmerge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
struct kernfs_node *parent;
struct attribute *const *attr;
parent = kernfs_find_and_get(kobj->sd, grp->name);
if (parent) {
for (attr = grp->attrs; *attr; ++attr)
kernfs_remove_by_name(parent, (*attr)->name);
kernfs_put(parent);
}
}
EXPORT_SYMBOL_GPL(sysfs_unmerge_group);
/**
* sysfs_add_link_to_group - add a symlink to an attribute group.
* @kobj: The kobject containing the group.
* @group_name: The name of the group.
* @target: The target kobject of the symlink to create.
* @link_name: The name of the symlink to create.
*/
int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name)
{
struct kernfs_node *parent;
int error = 0;
parent = kernfs_find_and_get(kobj->sd, group_name);
if (!parent)
return -ENOENT;
error = sysfs_create_link_sd(parent, target, link_name);
kernfs_put(parent);
return error;
}
EXPORT_SYMBOL_GPL(sysfs_add_link_to_group);
/**
* sysfs_remove_link_from_group - remove a symlink from an attribute group.
* @kobj: The kobject containing the group.
* @group_name: The name of the group.
* @link_name: The name of the symlink to remove.
*/
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name)
{
struct kernfs_node *parent;
parent = kernfs_find_and_get(kobj->sd, group_name);
if (parent) {
kernfs_remove_by_name(parent, link_name);
kernfs_put(parent);
}
}
EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group);
/**
* compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing
* to a group or an attribute
* @kobj: The kobject containing the group.
* @target_kobj: The target kobject.
* @target_name: The name of the target group or attribute.
* @symlink_name: The name of the symlink file (target_name will be
* considered if symlink_name is NULL).
*/
int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
struct kobject *target_kobj,
const char *target_name,
const char *symlink_name)
{
struct kernfs_node *target;
struct kernfs_node *entry;
struct kernfs_node *link;
/*
* We don't own @target_kobj and it may be removed at any time.
* Synchronize using sysfs_symlink_target_lock. See sysfs_remove_dir()
* for details.
*/
spin_lock(&sysfs_symlink_target_lock);
target = target_kobj->sd;
if (target)
kernfs_get(target);
spin_unlock(&sysfs_symlink_target_lock);
if (!target)
return -ENOENT;
entry = kernfs_find_and_get(target, target_name);
if (!entry) {
kernfs_put(target);
return -ENOENT;
}
if (!symlink_name)
symlink_name = target_name;
link = kernfs_create_link(kobj->sd, symlink_name, entry);
if (PTR_ERR(link) == -EEXIST)
sysfs_warn_dup(kobj->sd, symlink_name);
kernfs_put(entry);
kernfs_put(target);
return PTR_ERR_OR_ZERO(link);
}
EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj);
static int sysfs_group_attrs_change_owner(struct kobject *kobj,
struct kernfs_node *grp_kn,
const struct attribute_group *grp,
struct iattr *newattrs)
{
struct kernfs_node *kn;
int error, i;
umode_t mode;
if (grp->attrs) {
struct attribute *const *attr;
for (i = 0, attr = grp->attrs; *attr; i++, attr++) {
if (grp->is_visible) {
mode = grp->is_visible(kobj, *attr, i);
if (mode & SYSFS_GROUP_INVISIBLE)
break;
if (!mode)
continue;
}
kn = kernfs_find_and_get(grp_kn, (*attr)->name);
if (!kn)
return -ENOENT;
error = kernfs_setattr(kn, newattrs);
kernfs_put(kn);
if (error)
return error;
}
}
if (grp->bin_attrs) {
const struct bin_attribute *const *bin_attr;
for (i = 0, bin_attr = grp->bin_attrs; *bin_attr; i++, bin_attr++) {
if (grp->is_bin_visible) {
mode = grp->is_bin_visible(kobj, *bin_attr, i);
if (mode & SYSFS_GROUP_INVISIBLE)
break;
if (!mode)
continue;
}
kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name);
if (!kn)
return -ENOENT;
error = kernfs_setattr(kn, newattrs);
kernfs_put(kn);
if (error)
return error;
}
}
return 0;
}
/**
* sysfs_group_change_owner - change owner of an attribute group.
* @kobj: The kobject containing the group.
* @grp: The attribute group.
* @kuid: new owner's kuid
* @kgid: new owner's kgid
*
* Returns 0 on success or error code on failure.
*/
int sysfs_group_change_owner(struct kobject *kobj,
const struct attribute_group *grp, kuid_t kuid,
kgid_t kgid)
{
struct kernfs_node *grp_kn;
int error;
struct iattr newattrs = {
.ia_valid = ATTR_UID | ATTR_GID,
.ia_uid = kuid,
.ia_gid = kgid,
};
if (!kobj->state_in_sysfs)
return -EINVAL;
if (grp->name) {
grp_kn = kernfs_find_and_get(kobj->sd, grp->name);
} else {
kernfs_get(kobj->sd);
grp_kn = kobj->sd;
}
if (!grp_kn)
return -ENOENT;
error = kernfs_setattr(grp_kn, &newattrs);
if (!error)
error = sysfs_group_attrs_change_owner(kobj, grp_kn, grp, &newattrs);
kernfs_put(grp_kn);
return error;
}
EXPORT_SYMBOL_GPL(sysfs_group_change_owner);
/**
* sysfs_groups_change_owner - change owner of a set of attribute groups.
* @kobj: The kobject containing the groups.
* @groups: The attribute groups.
* @kuid: new owner's kuid
* @kgid: new owner's kgid
*
* Returns 0 on success or error code on failure.
*/
int sysfs_groups_change_owner(struct kobject *kobj,
const struct attribute_group **groups,
kuid_t kuid, kgid_t kgid)
{
int error = 0, i;
if (!kobj->state_in_sysfs)
return -EINVAL;
if (!groups)
return 0;
for (i = 0; groups[i]; i++) {
error = sysfs_group_change_owner(kobj, groups[i], kuid, kgid);
if (error)
break;
}
return error;
}
EXPORT_SYMBOL_GPL(sysfs_groups_change_owner);
// SPDX-License-Identifier: GPL-2.0-only
#include "cgroup-internal.h"
#include <linux/sched/cputime.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <trace/events/cgroup.h>
static DEFINE_SPINLOCK(rstat_base_lock);
static DEFINE_PER_CPU(struct llist_head, rstat_backlog_list);
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
/*
* Determines whether a given css can participate in rstat.
* css's that are cgroup::self use rstat for base stats.
* Other css's associated with a subsystem use rstat only when
* they define the ss->css_rstat_flush callback.
*/
static inline bool css_uses_rstat(struct cgroup_subsys_state *css)
{
return css_is_self(css) || css->ss->css_rstat_flush != NULL;
}
static struct css_rstat_cpu *css_rstat_cpu(
struct cgroup_subsys_state *css, int cpu)
{
return per_cpu_ptr(css->rstat_cpu, cpu);
}
static struct cgroup_rstat_base_cpu *cgroup_rstat_base_cpu(
struct cgroup *cgrp, int cpu)
{
return per_cpu_ptr(cgrp->rstat_base_cpu, cpu);
}
static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss)
{
if (ss)
return &ss->rstat_ss_lock;
return &rstat_base_lock;
}
static inline struct llist_head *ss_lhead_cpu(struct cgroup_subsys *ss, int cpu)
{
if (ss)
return per_cpu_ptr(ss->lhead, cpu);
return per_cpu_ptr(&rstat_backlog_list, cpu);
}
/**
* css_rstat_updated - keep track of updated rstat_cpu
* @css: target cgroup subsystem state
* @cpu: cpu on which rstat_cpu was updated
*
* Atomically inserts the css in the ss's llist for the given cpu. This is
* reentrant safe i.e. safe against softirq, hardirq and nmi. The ss's llist
* will be processed at the flush time to create the update tree.
*
* NOTE: if the user needs the guarantee that the updater either add itself in
* the lockless list or the concurrent flusher flushes its updated stats, a
* memory barrier is needed before the call to css_rstat_updated() i.e. a
* barrier after updating the per-cpu stats and before calling
* css_rstat_updated().
*/
__bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
{
struct llist_head *lhead;
struct css_rstat_cpu *rstatc;
struct css_rstat_cpu __percpu *rstatc_pcpu;
struct llist_node *self;
/*
* Since bpf programs can call this function, prevent access to
* uninitialized rstat pointers.
*/
if (!css_uses_rstat(css))
return;
lockdep_assert_preemption_disabled();
/*
* For archs withnot nmi safe cmpxchg or percpu ops support, ignore
* the requests from nmi context.
*/
if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
!IS_ENABLED(CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS)) && in_nmi())
return;
rstatc = css_rstat_cpu(css, cpu);
/*
* If already on list return. This check is racy and smp_mb() is needed
* to pair it with the smp_mb() in css_process_update_tree() if the
* guarantee that the updated stats are visible to concurrent flusher is
* needed.
*/
if (llist_on_list(&rstatc->lnode))
return;
/*
* This function can be renentered by irqs and nmis for the same cgroup
* and may try to insert the same per-cpu lnode into the llist. Note
* that llist_add() does not protect against such scenarios.
*
* To protect against such stacked contexts of irqs/nmis, we use the
* fact that lnode points to itself when not on a list and then use
* this_cpu_cmpxchg() to atomically set to NULL to select the winner
* which will call llist_add(). The losers can assume the insertion is
* successful and the winner will eventually add the per-cpu lnode to
* the llist.
*/
self = &rstatc->lnode;
rstatc_pcpu = css->rstat_cpu;
if (this_cpu_cmpxchg(rstatc_pcpu->lnode.next, self, NULL) != self)
return;
lhead = ss_lhead_cpu(css->ss, cpu); llist_add(&rstatc->lnode, lhead);}
static void __css_process_update_tree(struct cgroup_subsys_state *css, int cpu)
{
/* put @css and all ancestors on the corresponding updated lists */
while (true) {
struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu);
struct cgroup_subsys_state *parent = css->parent;
struct css_rstat_cpu *prstatc;
/*
* Both additions and removals are bottom-up. If a cgroup
* is already in the tree, all ancestors are.
*/
if (rstatc->updated_next)
break;
/* Root has no parent to link it to, but mark it busy */
if (!parent) {
rstatc->updated_next = css;
break;
}
prstatc = css_rstat_cpu(parent, cpu);
rstatc->updated_next = prstatc->updated_children;
prstatc->updated_children = css;
css = parent;
}
}
static void css_process_update_tree(struct cgroup_subsys *ss, int cpu)
{
struct llist_head *lhead = ss_lhead_cpu(ss, cpu);
struct llist_node *lnode;
while ((lnode = llist_del_first_init(lhead))) {
struct css_rstat_cpu *rstatc;
/*
* smp_mb() is needed here (more specifically in between
* init_llist_node() and per-cpu stats flushing) if the
* guarantee is required by a rstat user where etiher the
* updater should add itself on the lockless list or the
* flusher flush the stats updated by the updater who have
* observed that they are already on the list. The
* corresponding barrier pair for this one should be before
* css_rstat_updated() by the user.
*
* For now, there aren't any such user, so not adding the
* barrier here but if such a use-case arise, please add
* smp_mb() here.
*/
rstatc = container_of(lnode, struct css_rstat_cpu, lnode);
__css_process_update_tree(rstatc->owner, cpu);
}
}
/**
* css_rstat_push_children - push children css's into the given list
* @head: current head of the list (= subtree root)
* @child: first child of the root
* @cpu: target cpu
* Return: A new singly linked list of css's to be flushed
*
* Iteratively traverse down the css_rstat_cpu updated tree level by
* level and push all the parents first before their next level children
* into a singly linked list via the rstat_flush_next pointer built from the
* tail backward like "pushing" css's into a stack. The root is pushed by
* the caller.
*/
static struct cgroup_subsys_state *css_rstat_push_children(
struct cgroup_subsys_state *head,
struct cgroup_subsys_state *child, int cpu)
{
struct cgroup_subsys_state *cnext = child; /* Next head of child css level */
struct cgroup_subsys_state *ghead = NULL; /* Head of grandchild css level */
struct cgroup_subsys_state *parent, *grandchild;
struct css_rstat_cpu *crstatc;
child->rstat_flush_next = NULL;
/*
* The subsystem rstat lock must be held for the whole duration from
* here as the rstat_flush_next list is being constructed to when
* it is consumed later in css_rstat_flush().
*/
lockdep_assert_held(ss_rstat_lock(head->ss));
/*
* Notation: -> updated_next pointer
* => rstat_flush_next pointer
*
* Assuming the following sample updated_children lists:
* P: C1 -> C2 -> P
* C1: G11 -> G12 -> C1
* C2: G21 -> G22 -> C2
*
* After 1st iteration:
* head => C2 => C1 => NULL
* ghead => G21 => G11 => NULL
*
* After 2nd iteration:
* head => G12 => G11 => G22 => G21 => C2 => C1 => NULL
*/
next_level:
while (cnext) {
child = cnext;
cnext = child->rstat_flush_next;
parent = child->parent;
/* updated_next is parent cgroup terminated if !NULL */
while (child != parent) {
child->rstat_flush_next = head;
head = child;
crstatc = css_rstat_cpu(child, cpu);
grandchild = crstatc->updated_children;
if (grandchild != child) {
/* Push the grand child to the next level */
crstatc->updated_children = child;
grandchild->rstat_flush_next = ghead;
ghead = grandchild;
}
child = crstatc->updated_next;
crstatc->updated_next = NULL;
}
}
if (ghead) {
cnext = ghead;
ghead = NULL;
goto next_level;
}
return head;
}
/**
* css_rstat_updated_list - build a list of updated css's to be flushed
* @root: root of the css subtree to traverse
* @cpu: target cpu
* Return: A singly linked list of css's to be flushed
*
* Walks the updated rstat_cpu tree on @cpu from @root. During traversal,
* each returned css is unlinked from the updated tree.
*
* The only ordering guarantee is that, for a parent and a child pair
* covered by a given traversal, the child is before its parent in
* the list.
*
* Note that updated_children is self terminated and points to a list of
* child css's if not empty. Whereas updated_next is like a sibling link
* within the children list and terminated by the parent css. An exception
* here is the css root whose updated_next can be self terminated.
*/
static struct cgroup_subsys_state *css_rstat_updated_list(
struct cgroup_subsys_state *root, int cpu)
{
struct css_rstat_cpu *rstatc = css_rstat_cpu(root, cpu);
struct cgroup_subsys_state *head = NULL, *parent, *child;
css_process_update_tree(root->ss, cpu);
/* Return NULL if this subtree is not on-list */
if (!rstatc->updated_next)
return NULL;
/*
* Unlink @root from its parent. As the updated_children list is
* singly linked, we have to walk it to find the removal point.
*/
parent = root->parent;
if (parent) {
struct css_rstat_cpu *prstatc;
struct cgroup_subsys_state **nextp;
prstatc = css_rstat_cpu(parent, cpu);
nextp = &prstatc->updated_children;
while (*nextp != root) {
struct css_rstat_cpu *nrstatc;
nrstatc = css_rstat_cpu(*nextp, cpu);
WARN_ON_ONCE(*nextp == parent);
nextp = &nrstatc->updated_next;
}
*nextp = rstatc->updated_next;
}
rstatc->updated_next = NULL;
/* Push @root to the list first before pushing the children */
head = root;
root->rstat_flush_next = NULL;
child = rstatc->updated_children;
rstatc->updated_children = root;
if (child != root)
head = css_rstat_push_children(head, child, cpu);
return head;
}
/*
* A hook for bpf stat collectors to attach to and flush their stats.
* Together with providing bpf kfuncs for css_rstat_updated() and
* css_rstat_flush(), this enables a complete workflow where bpf progs that
* collect cgroup stats can integrate with rstat for efficient flushing.
*
* A static noinline declaration here could cause the compiler to optimize away
* the function. A global noinline declaration will keep the definition, but may
* optimize away the callsite. Therefore, __weak is needed to ensure that the
* call is still emitted, by telling the compiler that we don't know what the
* function might eventually be.
*/
__bpf_hook_start();
__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
struct cgroup *parent, int cpu)
{
}
__bpf_hook_end();
/*
* Helper functions for locking.
*
* This makes it easier to diagnose locking issues and contention in
* production environments. The parameter @cpu_in_loop indicate lock
* was released and re-taken when collection data from the CPUs. The
* value -1 is used when obtaining the main lock else this is the CPU
* number processed last.
*/
static inline void __css_rstat_lock(struct cgroup_subsys_state *css,
int cpu_in_loop)
__acquires(ss_rstat_lock(css->ss))
{
struct cgroup *cgrp = css->cgroup;
spinlock_t *lock;
bool contended;
lock = ss_rstat_lock(css->ss);
contended = !spin_trylock_irq(lock);
if (contended) {
trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended);
spin_lock_irq(lock);
}
trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);
}
static inline void __css_rstat_unlock(struct cgroup_subsys_state *css,
int cpu_in_loop)
__releases(ss_rstat_lock(css->ss))
{
struct cgroup *cgrp = css->cgroup;
spinlock_t *lock;
lock = ss_rstat_lock(css->ss);
trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false);
spin_unlock_irq(lock);
}
/**
* css_rstat_flush - flush stats in @css's rstat subtree
* @css: target cgroup subsystem state
*
* Collect all per-cpu stats in @css's subtree into the global counters
* and propagate them upwards. After this function returns, all rstat
* nodes in the subtree have up-to-date ->stat.
*
* This also gets all rstat nodes in the subtree including @css off the
* ->updated_children lists.
*
* This function may block.
*/
__bpf_kfunc void css_rstat_flush(struct cgroup_subsys_state *css)
{
int cpu;
bool is_self = css_is_self(css);
/*
* Since bpf programs can call this function, prevent access to
* uninitialized rstat pointers.
*/
if (!css_uses_rstat(css))
return;
might_sleep();
for_each_possible_cpu(cpu) {
struct cgroup_subsys_state *pos;
/* Reacquire for each CPU to avoid disabling IRQs too long */
__css_rstat_lock(css, cpu);
pos = css_rstat_updated_list(css, cpu);
for (; pos; pos = pos->rstat_flush_next) {
if (is_self) {
cgroup_base_stat_flush(pos->cgroup, cpu);
bpf_rstat_flush(pos->cgroup,
cgroup_parent(pos->cgroup), cpu);
} else
pos->ss->css_rstat_flush(pos, cpu);
}
__css_rstat_unlock(css, cpu);
if (!cond_resched())
cpu_relax();
}
}
int css_rstat_init(struct cgroup_subsys_state *css)
{
struct cgroup *cgrp = css->cgroup;
int cpu;
bool is_self = css_is_self(css);
if (is_self) {
/* the root cgrp has rstat_base_cpu preallocated */
if (!cgrp->rstat_base_cpu) {
cgrp->rstat_base_cpu = alloc_percpu(struct cgroup_rstat_base_cpu);
if (!cgrp->rstat_base_cpu)
return -ENOMEM;
}
} else if (css->ss->css_rstat_flush == NULL)
return 0;
/* the root cgrp's self css has rstat_cpu preallocated */
if (!css->rstat_cpu) {
css->rstat_cpu = alloc_percpu(struct css_rstat_cpu);
if (!css->rstat_cpu) {
if (is_self)
free_percpu(cgrp->rstat_base_cpu);
return -ENOMEM;
}
}
/* ->updated_children list is self terminated */
for_each_possible_cpu(cpu) {
struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu);
rstatc->owner = rstatc->updated_children = css;
init_llist_node(&rstatc->lnode);
if (is_self) {
struct cgroup_rstat_base_cpu *rstatbc;
rstatbc = cgroup_rstat_base_cpu(cgrp, cpu);
u64_stats_init(&rstatbc->bsync);
}
}
return 0;
}
void css_rstat_exit(struct cgroup_subsys_state *css)
{
int cpu;
if (!css_uses_rstat(css))
return;
if (!css->rstat_cpu)
return;
css_rstat_flush(css);
/* sanity check */
for_each_possible_cpu(cpu) {
struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu);
if (WARN_ON_ONCE(rstatc->updated_children != css) ||
WARN_ON_ONCE(rstatc->updated_next))
return;
}
if (css_is_self(css)) {
struct cgroup *cgrp = css->cgroup;
free_percpu(cgrp->rstat_base_cpu);
cgrp->rstat_base_cpu = NULL;
}
free_percpu(css->rstat_cpu);
css->rstat_cpu = NULL;
}
/**
* ss_rstat_init - subsystem-specific rstat initialization
* @ss: target subsystem
*
* If @ss is NULL, the static locks associated with the base stats
* are initialized. If @ss is non-NULL, the subsystem-specific locks
* are initialized.
*/
int __init ss_rstat_init(struct cgroup_subsys *ss)
{
int cpu;
if (ss) {
ss->lhead = alloc_percpu(struct llist_head);
if (!ss->lhead)
return -ENOMEM;
}
spin_lock_init(ss_rstat_lock(ss));
for_each_possible_cpu(cpu)
init_llist_head(ss_lhead_cpu(ss, cpu));
return 0;
}
/*
* Functions for cgroup basic resource statistics implemented on top of
* rstat.
*/
static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
struct cgroup_base_stat *src_bstat)
{
dst_bstat->cputime.utime += src_bstat->cputime.utime;
dst_bstat->cputime.stime += src_bstat->cputime.stime;
dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
#ifdef CONFIG_SCHED_CORE
dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
#endif
dst_bstat->ntime += src_bstat->ntime;
}
static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
struct cgroup_base_stat *src_bstat)
{
dst_bstat->cputime.utime -= src_bstat->cputime.utime;
dst_bstat->cputime.stime -= src_bstat->cputime.stime;
dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
#ifdef CONFIG_SCHED_CORE
dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
#endif
dst_bstat->ntime -= src_bstat->ntime;
}
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
{
struct cgroup_rstat_base_cpu *rstatbc = cgroup_rstat_base_cpu(cgrp, cpu);
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup_rstat_base_cpu *prstatbc;
struct cgroup_base_stat delta;
unsigned seq;
/* Root-level stats are sourced from system-wide CPU stats */
if (!parent)
return;
/* fetch the current per-cpu values */
do {
seq = __u64_stats_fetch_begin(&rstatbc->bsync);
delta = rstatbc->bstat;
} while (__u64_stats_fetch_retry(&rstatbc->bsync, seq));
/* propagate per-cpu delta to cgroup and per-cpu global statistics */
cgroup_base_stat_sub(&delta, &rstatbc->last_bstat);
cgroup_base_stat_add(&cgrp->bstat, &delta);
cgroup_base_stat_add(&rstatbc->last_bstat, &delta);
cgroup_base_stat_add(&rstatbc->subtree_bstat, &delta);
/* propagate cgroup and per-cpu global delta to parent (unless that's root) */
if (cgroup_parent(parent)) {
delta = cgrp->bstat;
cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
cgroup_base_stat_add(&parent->bstat, &delta);
cgroup_base_stat_add(&cgrp->last_bstat, &delta);
delta = rstatbc->subtree_bstat;
prstatbc = cgroup_rstat_base_cpu(parent, cpu);
cgroup_base_stat_sub(&delta, &rstatbc->last_subtree_bstat);
cgroup_base_stat_add(&prstatbc->subtree_bstat, &delta);
cgroup_base_stat_add(&rstatbc->last_subtree_bstat, &delta);
}
}
static struct cgroup_rstat_base_cpu *
cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
{
struct cgroup_rstat_base_cpu *rstatbc;
rstatbc = get_cpu_ptr(cgrp->rstat_base_cpu);
*flags = u64_stats_update_begin_irqsave(&rstatbc->bsync);
return rstatbc;
}
static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
struct cgroup_rstat_base_cpu *rstatbc,
unsigned long flags)
{
u64_stats_update_end_irqrestore(&rstatbc->bsync, flags);
css_rstat_updated(&cgrp->self, smp_processor_id());
put_cpu_ptr(rstatbc);
}
void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
{
struct cgroup_rstat_base_cpu *rstatbc;
unsigned long flags;
rstatbc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
rstatbc->bstat.cputime.sum_exec_runtime += delta_exec;
cgroup_base_stat_cputime_account_end(cgrp, rstatbc, flags);
}
void __cgroup_account_cputime_field(struct cgroup *cgrp,
enum cpu_usage_stat index, u64 delta_exec)
{
struct cgroup_rstat_base_cpu *rstatbc;
unsigned long flags;
rstatbc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
switch (index) {
case CPUTIME_NICE:
rstatbc->bstat.ntime += delta_exec;
fallthrough;
case CPUTIME_USER:
rstatbc->bstat.cputime.utime += delta_exec;
break;
case CPUTIME_SYSTEM:
case CPUTIME_IRQ:
case CPUTIME_SOFTIRQ:
rstatbc->bstat.cputime.stime += delta_exec;
break;
#ifdef CONFIG_SCHED_CORE
case CPUTIME_FORCEIDLE:
rstatbc->bstat.forceidle_sum += delta_exec;
break;
#endif
default:
break;
}
cgroup_base_stat_cputime_account_end(cgrp, rstatbc, flags);
}
/*
* compute the cputime for the root cgroup by getting the per cpu data
* at a global level, then categorizing the fields in a manner consistent
* with how it is done by __cgroup_account_cputime_field for each bit of
* cpu time attributed to a cgroup.
*/
static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
{
struct task_cputime *cputime = &bstat->cputime;
int i;
memset(bstat, 0, sizeof(*bstat));
for_each_possible_cpu(i) {
struct kernel_cpustat kcpustat;
u64 *cpustat = kcpustat.cpustat;
u64 user = 0;
u64 sys = 0;
kcpustat_cpu_fetch(&kcpustat, i);
user += cpustat[CPUTIME_USER];
user += cpustat[CPUTIME_NICE];
cputime->utime += user;
sys += cpustat[CPUTIME_SYSTEM];
sys += cpustat[CPUTIME_IRQ];
sys += cpustat[CPUTIME_SOFTIRQ];
cputime->stime += sys;
cputime->sum_exec_runtime += user;
cputime->sum_exec_runtime += sys;
#ifdef CONFIG_SCHED_CORE
bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
#endif
bstat->ntime += cpustat[CPUTIME_NICE];
}
}
static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat *bstat)
{
#ifdef CONFIG_SCHED_CORE
u64 forceidle_time = bstat->forceidle_sum;
do_div(forceidle_time, NSEC_PER_USEC);
seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
#endif
}
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
struct cgroup_base_stat bstat;
if (cgroup_parent(cgrp)) {
css_rstat_flush(&cgrp->self);
__css_rstat_lock(&cgrp->self, -1);
bstat = cgrp->bstat;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
&bstat.cputime.utime, &bstat.cputime.stime);
__css_rstat_unlock(&cgrp->self, -1);
} else {
root_cgroup_cputime(&bstat);
}
do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC);
do_div(bstat.cputime.utime, NSEC_PER_USEC);
do_div(bstat.cputime.stime, NSEC_PER_USEC);
do_div(bstat.ntime, NSEC_PER_USEC);
seq_printf(seq, "usage_usec %llu\n"
"user_usec %llu\n"
"system_usec %llu\n"
"nice_usec %llu\n",
bstat.cputime.sum_exec_runtime,
bstat.cputime.utime,
bstat.cputime.stime,
bstat.ntime);
cgroup_force_idle_show(seq, &bstat);
}
/* Add bpf kfuncs for css_rstat_updated() and css_rstat_flush() */
BTF_KFUNCS_START(bpf_rstat_kfunc_ids)
BTF_ID_FLAGS(func, css_rstat_updated)
BTF_ID_FLAGS(func, css_rstat_flush, KF_SLEEPABLE)
BTF_KFUNCS_END(bpf_rstat_kfunc_ids)
static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_rstat_kfunc_ids,
};
static int __init bpf_rstat_kfunc_init(void)
{
return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
&bpf_rstat_kfunc_set);
}
late_initcall(bpf_rstat_kfunc_init);
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
#include <linux/bitfield.h>
#include <xen/xen.h>
#include <asm/fpu/api.h>
#include <asm/fred.h>
#include <asm/sev.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
#include <asm/insn-eval.h>
#include <asm/sgx.h>
static inline unsigned long *pt_regs_nr(struct pt_regs *regs, int nr)
{
int reg_offset = pt_regs_offset(regs, nr);
static unsigned long __dummy;
if (WARN_ON_ONCE(reg_offset < 0))
return &__dummy;
return (unsigned long *)((unsigned long)regs + reg_offset);
}
static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
static bool ex_handler_default(const struct exception_table_entry *e,
struct pt_regs *regs)
{
if (e->data & EX_FLAG_CLEAR_AX) regs->ax = 0; if (e->data & EX_FLAG_CLEAR_DX)
regs->dx = 0;
regs->ip = ex_fixup_addr(e);
return true;
}
/*
* This is the *very* rare case where we do a "load_unaligned_zeropad()"
* and it's a page crosser into a non-existent page.
*
* This happens when we optimistically load a pathname a word-at-a-time
* and the name is less than the full word and the next page is not
* mapped. Typically that only happens for CONFIG_DEBUG_PAGEALLOC.
*
* NOTE! The faulting address is always a 'mov mem,reg' type instruction
* of size 'long', and the exception fixup must always point to right
* after the instruction.
*/
static bool ex_handler_zeropad(const struct exception_table_entry *e,
struct pt_regs *regs,
unsigned long fault_addr)
{
struct insn insn;
const unsigned long mask = sizeof(long) - 1;
unsigned long offset, addr, next_ip, len;
unsigned long *reg;
next_ip = ex_fixup_addr(e);
len = next_ip - regs->ip;
if (len > MAX_INSN_SIZE)
return false;
if (insn_decode(&insn, (void *) regs->ip, len, INSN_MODE_KERN))
return false;
if (insn.length != len)
return false;
if (insn.opcode.bytes[0] != 0x8b)
return false;
if (insn.opnd_bytes != sizeof(long))
return false;
addr = (unsigned long) insn_get_addr_ref(&insn, regs);
if (addr == ~0ul)
return false;
offset = addr & mask;
addr = addr & ~mask;
if (fault_addr != addr + sizeof(long))
return false;
reg = insn_get_modrm_reg_ptr(&insn, regs);
if (!reg)
return false;
*reg = *(unsigned long *)addr >> (offset * 8);
return ex_handler_default(e, regs);
}
static bool ex_handler_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
regs->ax = trapnr;
return ex_handler_default(fixup, regs);
}
static bool ex_handler_sgx(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
regs->ax = trapnr | SGX_ENCLS_FAULT_FLAG;
return ex_handler_default(fixup, regs);
}
/*
* Handler for when we fail to restore a task's FPU state. We should never get
* here because the FPU state of a task using the FPU (struct fpu::fpstate)
* should always be valid. However, past bugs have allowed userspace to set
* reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
* These caused XRSTOR to fail when switching to the task, leaking the FPU
* registers of the task previously executing on the CPU. Mitigate this class
* of vulnerability by restoring from the initial state (essentially, zeroing
* out all the FPU registers) if we can't restore from the task's FPU state.
*/
static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
struct pt_regs *regs)
{
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
(void *)instruction_pointer(regs));
fpu_reset_from_exception_fixup(); return ex_handler_default(fixup, regs);
}
/*
* On x86-64, we end up being imprecise with 'access_ok()', and allow
* non-canonical user addresses to make the range comparisons simpler,
* and to not have to worry about LAM being enabled.
*
* In fact, we allow up to one page of "slop" at the sign boundary,
* which means that we can do access_ok() by just checking the sign
* of the pointer for the common case of having a small access size.
*/
static bool gp_fault_address_ok(unsigned long fault_address)
{
#ifdef CONFIG_X86_64
/* Is it in the "user space" part of the non-canonical space? */
if (valid_user_address(fault_address))
return true;
/* .. or just above it? */
fault_address -= PAGE_SIZE;
if (valid_user_address(fault_address))
return true;
#endif
return false;
}
static bool ex_handler_uaccess(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long fault_address)
{ WARN_ONCE(trapnr == X86_TRAP_GP && !gp_fault_address_ok(fault_address),
"General protection fault in user access. Non-canonical address?");
return ex_handler_default(fixup, regs);}
static bool ex_handler_msr(const struct exception_table_entry *fixup,
struct pt_regs *regs, bool wrmsr, bool safe, int reg)
{
if (__ONCE_LITE_IF(!safe && wrmsr)) {
pr_warn("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx,
(unsigned int)regs->ax, regs->ip, (void *)regs->ip);
show_stack_regs(regs);
}
if (__ONCE_LITE_IF(!safe && !wrmsr)) {
pr_warn("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip);
show_stack_regs(regs);
}
if (!wrmsr) {
/* Pretend that the read succeeded and returned 0. */
regs->ax = 0;
regs->dx = 0;
}
if (safe)
*pt_regs_nr(regs, reg) = -EIO;
return ex_handler_default(fixup, regs);
}
static bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
struct pt_regs *regs)
{
if (static_cpu_has(X86_BUG_NULL_SEG)) asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS)); asm volatile ("mov %0, %%fs" : : "rm" (0)); return ex_handler_default(fixup, regs);
}
static bool ex_handler_imm_reg(const struct exception_table_entry *fixup,
struct pt_regs *regs, int reg, int imm)
{
*pt_regs_nr(regs, reg) = (long)imm; return ex_handler_default(fixup, regs);
}
static bool ex_handler_ucopy_len(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long fault_address,
int reg, int imm)
{
regs->cx = imm * regs->cx + *pt_regs_nr(regs, reg);
return ex_handler_uaccess(fixup, regs, trapnr, fault_address);
}
#ifdef CONFIG_X86_FRED
static bool ex_handler_eretu(const struct exception_table_entry *fixup,
struct pt_regs *regs, unsigned long error_code)
{
struct pt_regs *uregs = (struct pt_regs *)(regs->sp - offsetof(struct pt_regs, orig_ax));
unsigned short ss = uregs->ss;
unsigned short cs = uregs->cs;
/*
* Move the NMI bit from the invalid stack frame, which caused ERETU
* to fault, to the fault handler's stack frame, thus to unblock NMI
* with the fault handler's ERETS instruction ASAP if NMI is blocked.
*/
regs->fred_ss.nmi = uregs->fred_ss.nmi;
/*
* Sync event information to uregs, i.e., the ERETU return frame, but
* is it safe to write to the ERETU return frame which is just above
* current event stack frame?
*
* The RSP used by FRED to push a stack frame is not the value in %rsp,
* it is calculated from %rsp with the following 2 steps:
* 1) RSP = %rsp - (IA32_FRED_CONFIG & 0x1c0) // Reserve N*64 bytes
* 2) RSP = RSP & ~0x3f // Align to a 64-byte cache line
* when an event delivery doesn't trigger a stack level change.
*
* Here is an example with N*64 (N=1) bytes reserved:
*
* 64-byte cache line ==> ______________
* |___Reserved___|
* |__Event_data__|
* |_____SS_______|
* |_____RSP______|
* |_____FLAGS____|
* |_____CS_______|
* |_____IP_______|
* 64-byte cache line ==> |__Error_code__| <== ERETU return frame
* |______________|
* |______________|
* |______________|
* |______________|
* |______________|
* |______________|
* |______________|
* 64-byte cache line ==> |______________| <== RSP after step 1) and 2)
* |___Reserved___|
* |__Event_data__|
* |_____SS_______|
* |_____RSP______|
* |_____FLAGS____|
* |_____CS_______|
* |_____IP_______|
* 64-byte cache line ==> |__Error_code__| <== ERETS return frame
*
* Thus a new FRED stack frame will always be pushed below a previous
* FRED stack frame ((N*64) bytes may be reserved between), and it is
* safe to write to a previous FRED stack frame as they never overlap.
*/
fred_info(uregs)->edata = fred_event_data(regs);
uregs->ssx = regs->ssx;
uregs->fred_ss.ss = ss;
/* The NMI bit was moved away above */
uregs->fred_ss.nmi = 0;
uregs->csx = regs->csx;
uregs->fred_cs.sl = 0;
uregs->fred_cs.wfe = 0;
uregs->cs = cs;
uregs->orig_ax = error_code;
return ex_handler_default(fixup, regs);
}
#endif
int ex_get_fixup_type(unsigned long ip)
{
const struct exception_table_entry *e = search_exception_tables(ip);
return e ? FIELD_GET(EX_DATA_TYPE_MASK, e->data) : EX_TYPE_NONE;
}
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
unsigned long fault_addr)
{
const struct exception_table_entry *e;
int type, reg, imm;
#ifdef CONFIG_PNPBIOS
if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
extern u32 pnp_bios_is_utter_crap;
pnp_bios_is_utter_crap = 1;
printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
__asm__ volatile(
"movl %0, %%esp\n\t"
"jmp *%1\n\t"
: : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
panic("do_trap: can't hit this");
}
#endif
e = search_exception_tables(regs->ip);
if (!e)
return 0;
type = FIELD_GET(EX_DATA_TYPE_MASK, e->data);
reg = FIELD_GET(EX_DATA_REG_MASK, e->data);
imm = FIELD_GET(EX_DATA_IMM_MASK, e->data);
switch (type) {
case EX_TYPE_DEFAULT:
case EX_TYPE_DEFAULT_MCE_SAFE:
return ex_handler_default(e, regs);
case EX_TYPE_FAULT:
case EX_TYPE_FAULT_MCE_SAFE:
return ex_handler_fault(e, regs, trapnr);
case EX_TYPE_UACCESS:
return ex_handler_uaccess(e, regs, trapnr, fault_addr);
case EX_TYPE_CLEAR_FS:
return ex_handler_clear_fs(e, regs);
case EX_TYPE_FPU_RESTORE:
return ex_handler_fprestore(e, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(e, regs);
case EX_TYPE_WRMSR:
return ex_handler_msr(e, regs, true, false, reg);
case EX_TYPE_RDMSR:
return ex_handler_msr(e, regs, false, false, reg);
case EX_TYPE_WRMSR_SAFE:
return ex_handler_msr(e, regs, true, true, reg);
case EX_TYPE_RDMSR_SAFE:
return ex_handler_msr(e, regs, false, true, reg);
case EX_TYPE_WRMSR_IN_MCE:
ex_handler_msr_mce(regs, true);
break;
case EX_TYPE_RDMSR_IN_MCE:
ex_handler_msr_mce(regs, false);
break;
case EX_TYPE_POP_REG:
regs->sp += sizeof(long);
fallthrough;
case EX_TYPE_IMM_REG:
return ex_handler_imm_reg(e, regs, reg, imm);
case EX_TYPE_FAULT_SGX:
return ex_handler_sgx(e, regs, trapnr);
case EX_TYPE_UCOPY_LEN:
return ex_handler_ucopy_len(e, regs, trapnr, fault_addr, reg, imm);
case EX_TYPE_ZEROPAD:
return ex_handler_zeropad(e, regs, fault_addr);
#ifdef CONFIG_X86_FRED
case EX_TYPE_ERETU:
return ex_handler_eretu(e, regs, error_code);
#endif
}
BUG();
}
extern unsigned int early_recursion_flag;
/* Restricted version used during very early boot */
void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
{
/* Ignore early NMIs. */
if (trapnr == X86_TRAP_NMI)
return;
if (early_recursion_flag > 2)
goto halt_loop;
/*
* Old CPUs leave the high bits of CS on the stack
* undefined. I'm not sure which CPUs do this, but at least
* the 486 DX works this way.
* Xen pv domains are not using the default __KERNEL_CS.
*/
if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
goto fail;
/*
* The full exception fixup machinery is available as soon as
* the early IDT is loaded. This means that it is the
* responsibility of extable users to either function correctly
* when handlers are invoked early or to simply avoid causing
* exceptions before they're ready to handle them.
*
* This is better than filtering which handlers can be used,
* because refusing to call a handler here is guaranteed to
* result in a hard-to-debug panic.
*
* Keep in mind that not all vectors actually get here. Early
* page faults, for example, are special.
*/
if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
return;
if (trapnr == X86_TRAP_UD) {
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
/* Skip the ud2. */
regs->ip += LEN_UD2;
return;
}
/*
* If this was a BUG and report_bug returns or if this
* was just a normal #UD, we want to continue onward and
* crash.
*/
}
fail:
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
regs->orig_ax, read_cr2());
show_regs(regs);
halt_loop:
while (true)
halt();
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/idr.h
*
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
* Copyright (C) 2002 by Concurrent Computer Corporation
*
* Small id to pointer translation service avoiding fixed sized
* tables.
*/
#ifndef __IDR_H__
#define __IDR_H__
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
#include <linux/cleanup.h>
struct idr {
struct radix_tree_root idr_rt;
unsigned int idr_base;
unsigned int idr_next;
};
/*
* The IDR API does not expose the tagging functionality of the radix tree
* to users. Use tag 0 to track whether a node has free space below it.
*/
#define IDR_FREE 0
/* Set the IDR flag and the IDR_FREE tag */
#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
(1 << (ROOT_TAG_SHIFT + IDR_FREE)))
#define IDR_INIT_BASE(name, base) { \
.idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
.idr_base = (base), \
.idr_next = 0, \
}
/**
* IDR_INIT() - Initialise an IDR.
* @name: Name of IDR.
*
* A freshly-initialised IDR contains no IDs.
*/
#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
/**
* DEFINE_IDR() - Define a statically-allocated IDR.
* @name: Name of IDR.
*
* An IDR defined using this macro is ready for use with no additional
* initialisation required. It contains no IDs.
*/
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/**
* idr_get_cursor - Return the current position of the cyclic allocator
* @idr: idr handle
*
* The value returned is the value that will be next returned from
* idr_alloc_cyclic() if it is free (otherwise the search will start from
* this position).
*/
static inline unsigned int idr_get_cursor(const struct idr *idr)
{
return READ_ONCE(idr->idr_next);
}
/**
* idr_set_cursor - Set the current position of the cyclic allocator
* @idr: idr handle
* @val: new position
*
* The next call to idr_alloc_cyclic() will return @val if it is free
* (otherwise the search will start from this position).
*/
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
{
WRITE_ONCE(idr->idr_next, val);
}
/**
* DOC: idr sync
* idr synchronization (stolen from radix-tree.h)
*
* idr_find() is able to be called locklessly, using RCU. The caller must
* ensure calls to this function are made within rcu_read_lock() regions.
* Other readers (lock-free or otherwise) and modifications may be running
* concurrently.
*
* It is still required that the caller manage the synchronization and
* lifetimes of the items. So if RCU lock-free lookups are used, typically
* this would mean that the items have their own locks, or are amenable to
* lock-free access; and that the items are freed by RCU (or only freed after
* having been deleted from the idr tree *and* a synchronize_rcu() grace
* period).
*/
#define idr_lock(idr) xa_lock(&(idr)->idr_rt)
#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
#define idr_lock_irqsave(idr, flags) \
xa_lock_irqsave(&(idr)->idr_rt, flags)
#define idr_unlock_irqrestore(idr, flags) \
xa_unlock_irqrestore(&(idr)->idr_rt, flags)
void idr_preload(gfp_t gfp_mask);
int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
unsigned long max, gfp_t);
int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
void *idr_remove(struct idr *, unsigned long id);
void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
void *idr_get_next_ul(struct idr *, unsigned long *nextid);
void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
struct __class_idr {
struct idr *idr;
int id;
};
#define idr_null ((struct __class_idr){ NULL, -1 })
#define take_idr_id(id) __get_and_null(id, idr_null)
DEFINE_CLASS(idr_alloc, struct __class_idr,
if (_T.id >= 0) idr_remove(_T.idr, _T.id),
((struct __class_idr){
.idr = idr,
.id = idr_alloc(idr, ptr, start, end, gfp),
}),
struct idr *idr, void *ptr, int start, int end, gfp_t gfp);
/**
* idr_init_base() - Initialise an IDR.
* @idr: IDR handle.
* @base: The base value for the IDR.
*
* This variation of idr_init() creates an IDR which will allocate IDs
* starting at %base.
*/
static inline void idr_init_base(struct idr *idr, int base)
{
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
idr->idr_base = base;
idr->idr_next = 0;
}
/**
* idr_init() - Initialise an IDR.
* @idr: IDR handle.
*
* Initialise a dynamically allocated IDR. To initialise a
* statically allocated IDR, use DEFINE_IDR().
*/
static inline void idr_init(struct idr *idr)
{
idr_init_base(idr, 0);
}
/**
* idr_is_empty() - Are there any IDs allocated?
* @idr: IDR handle.
*
* Return: %true if any IDs have been allocated from this IDR.
*/
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
radix_tree_tagged(&idr->idr_rt, IDR_FREE);
}
/**
* idr_preload_end - end preload section started with idr_preload()
*
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
*/
static inline void idr_preload_end(void)
{ local_unlock(&radix_tree_preloads.lock);}
/**
* idr_for_each_entry() - Iterate over an IDR's elements of a given type.
* @idr: IDR handle.
* @entry: The type * to use as cursor
* @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
* after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
#define idr_for_each_entry(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
/**
* idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
* @idr: IDR handle.
* @entry: The type * to use as cursor.
* @tmp: A temporary placeholder for ID.
* @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
* after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
#define idr_for_each_entry_ul(idr, entry, tmp, id) \
for (tmp = 0, id = 0; \
((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/**
* idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
* @idr: IDR handle.
* @entry: The type * to use as a cursor.
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
*/
#define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idr), &(id)); \
entry; \
++id, (entry) = idr_get_next((idr), &(id)))
/**
* idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
* @idr: IDR handle.
* @entry: The type * to use as a cursor.
* @tmp: A temporary placeholder for ID.
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
* After normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
for (tmp = id; \
((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/*
* IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
struct ida_bitmap {
unsigned long bitmap[IDA_BITMAP_LONGS];
};
struct ida {
struct xarray xa;
};
#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
#define IDA_INIT(name) { \
.xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
}
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max);
/**
* ida_alloc() - Allocate an unused ID.
* @ida: IDA handle.
* @gfp: Memory allocation flags.
*
* Allocate an ID between 0 and %INT_MAX, inclusive.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
static inline int ida_alloc(struct ida *ida, gfp_t gfp)
{
return ida_alloc_range(ida, 0, ~0, gfp);
}
/**
* ida_alloc_min() - Allocate an unused ID.
* @ida: IDA handle.
* @min: Lowest ID to allocate.
* @gfp: Memory allocation flags.
*
* Allocate an ID between @min and %INT_MAX, inclusive.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
{
return ida_alloc_range(ida, min, ~0, gfp);
}
/**
* ida_alloc_max() - Allocate an unused ID.
* @ida: IDA handle.
* @max: Highest ID to allocate.
* @gfp: Memory allocation flags.
*
* Allocate an ID between 0 and @max, inclusive.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
{
return ida_alloc_range(ida, 0, max, gfp);
}
static inline void ida_init(struct ida *ida)
{
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
static inline bool ida_is_empty(const struct ida *ida)
{
return xa_empty(&ida->xa);
}
static inline bool ida_exists(struct ida *ida, unsigned int id)
{
return ida_find_first_range(ida, id, id) == id;
}
static inline int ida_find_first(struct ida *ida)
{
return ida_find_first_range(ida, 0, ~0);
}
#endif /* __IDR_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* pm_runtime.h - Device run-time power management helper functions.
*
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
*/
#ifndef _LINUX_PM_RUNTIME_H
#define _LINUX_PM_RUNTIME_H
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm.h>
#include <linux/jiffies.h>
/* Runtime PM flag argument bits */
#define RPM_ASYNC 0x01 /* Request is asynchronous */
#define RPM_NOWAIT 0x02 /* Don't wait for concurrent
state change */
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
#define RPM_TRANSPARENT 0x10 /* Succeed if runtime PM is disabled */
/*
* Use this for defining a set of PM operations to be used in all situations
* (system suspend, hibernation or runtime PM).
*
* Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
* macro, which uses the provided callbacks for both runtime PM and system
* sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
* and pm_runtime_force_resume() for its system sleep callbacks.
*
* If the underlying dev_pm_ops struct symbol has to be exported, use
* EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
*/
#define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
_DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
pm_runtime_force_resume, suspend_fn, \
resume_fn, idle_fn)
#define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
EXPORT_DEV_PM_OPS(name) = { \
RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
#define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
EXPORT_GPL_DEV_PM_OPS(name) = { \
RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
#define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
EXPORT_NS_DEV_PM_OPS(name, ns) = { \
RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
#define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
static inline bool queue_pm_work(struct work_struct *work)
{
return queue_work(pm_wq, work);
}
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_runtime_get_if_active(struct device *dev);
extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
extern bool pm_runtime_block_if_disabled(struct device *dev);
extern void pm_runtime_unblock(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
extern void pm_runtime_forbid(struct device *dev);
extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
extern void pm_runtime_release_supplier(struct device_link *link);
int devm_pm_runtime_set_active_enabled(struct device *dev);
extern int devm_pm_runtime_enable(struct device *dev);
int devm_pm_runtime_get_noresume(struct device *dev);
/**
* pm_suspend_ignore_children - Set runtime PM behavior regarding children.
* @dev: Target device.
* @enable: Whether or not to ignore possible dependencies on children.
*
* The dependencies of @dev on its children will not be taken into account by
* the runtime PM framework going forward if @enable is %true, or they will
* be taken into account otherwise.
*/
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
}
/**
* pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
* @dev: Target device.
*/
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
}
/**
* pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
* @dev: Target device.
*
* Decrement the runtime PM usage counter of @dev unless it is 0 already.
*/
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
/**
* pm_runtime_suspended - Check whether or not a device is runtime-suspended.
* @dev: Target device.
*
* Return %true if runtime PM is enabled for @dev and its runtime PM status is
* %RPM_SUSPENDED, or %false otherwise.
*
* Note that the return value of this function can only be trusted if it is
* called under the runtime PM lock of @dev or under conditions in which
* runtime PM cannot be either disabled or enabled for @dev and its runtime PM
* status cannot change.
*/
static inline bool pm_runtime_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED
&& !dev->power.disable_depth;
}
/**
* pm_runtime_active - Check whether or not a device is runtime-active.
* @dev: Target device.
*
* Return %true if runtime PM is disabled for @dev or its runtime PM status is
* %RPM_ACTIVE, or %false otherwise.
*
* Note that the return value of this function can only be trusted if it is
* called under the runtime PM lock of @dev or under conditions in which
* runtime PM cannot be either disabled or enabled for @dev and its runtime PM
* status cannot change.
*/
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
/**
* pm_runtime_status_suspended - Check if runtime PM status is "suspended".
* @dev: Target device.
*
* Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
* otherwise, regardless of whether or not runtime PM has been enabled for @dev.
*
* Note that the return value of this function can only be trusted if it is
* called under the runtime PM lock of @dev or under conditions in which the
* runtime PM status of @dev cannot change.
*/
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
}
/**
* pm_runtime_enabled - Check if runtime PM is enabled.
* @dev: Target device.
*
* Return %true if runtime PM is enabled for @dev or %false otherwise.
*
* Note that the return value of this function can only be trusted if it is
* called under the runtime PM lock of @dev or under conditions in which
* runtime PM cannot be either disabled or enabled for @dev.
*/
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
}
/**
* pm_runtime_blocked - Check if runtime PM enabling is blocked.
* @dev: Target device.
*
* Do not call this function outside system suspend/resume code paths.
*/
static inline bool pm_runtime_blocked(struct device *dev)
{
return dev->power.last_status == RPM_BLOCKED;
}
/**
* pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
* @dev: Target device.
*
* Return %true if @dev is a special device without runtime PM callbacks or
* %false otherwise.
*/
static inline bool pm_runtime_has_no_callbacks(struct device *dev)
{
return dev->power.no_callbacks;
}
/**
* pm_runtime_mark_last_busy - Update the last access time of a device.
* @dev: Target device.
*
* Update the last access time of @dev used by the runtime PM autosuspend
* mechanism to the current time as returned by ktime_get_mono_fast_ns().
*/
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
}
/**
* pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
* @dev: Target device.
*
* Return %true if @dev has been marked as an "IRQ-safe" device (with respect
* to runtime PM), in which case its runtime PM callabcks can be expected to
* work correctly when invoked from interrupt handlers.
*/
static inline bool pm_runtime_is_irq_safe(struct device *dev)
{
return dev->power.irq_safe;
}
extern u64 pm_runtime_suspended_time(struct device *dev);
#else /* !CONFIG_PM */
static inline bool queue_pm_work(struct work_struct *work) { return false; }
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
{
return 1;
}
static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
return -ENOSYS;
}
static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
static inline int pm_runtime_get_if_active(struct device *dev)
{
return -EINVAL;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
static inline bool pm_runtime_block_if_disabled(struct device *dev) { return true; }
static inline void pm_runtime_unblock(struct device *dev) {}
static inline void pm_runtime_enable(struct device *dev) {}
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline bool pm_runtime_blocked(struct device *dev) { return true; }
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
static inline void pm_runtime_drop_link(struct device_link *link) {}
static inline void pm_runtime_release_supplier(struct device_link *link) {}
#endif /* !CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
bool pm_runtime_need_not_resume(struct device *dev);
int pm_runtime_force_resume(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; }
static inline int pm_runtime_force_resume(struct device *dev) { return -ENXIO; }
#endif /* CONFIG_PM_SLEEP */
/**
* pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
* @dev: Target device.
*
* Invoke the "idle check" callback of @dev and, depending on its return value,
* set up autosuspend of @dev or suspend it (depending on whether or not
* autosuspend has been enabled for it).
*
* Return:
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
* ongoing or device not in %RPM_ACTIVE state.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
* Other values and conditions for the above values are possible as returned by
* Runtime PM idle and suspend callbacks.
*/
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
/**
* pm_runtime_suspend - Suspend a device synchronously.
* @dev: Target device.
*
* Return:
* * 1: Success; device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
* ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
/**
* pm_runtime_autosuspend - Update the last access time and set up autosuspend
* of a device.
* @dev: Target device.
*
* First update the last access time, then set up autosuspend of @dev or suspend
* it (depending on whether or not autosuspend is enabled for it) without
* engaging its "idle check" callback.
*
* Return:
* * 1: Success; device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
* ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
static inline int pm_runtime_autosuspend(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_AUTO);
}
/**
* pm_runtime_resume - Resume a device synchronously.
* @dev: Target device.
*/
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
/**
* pm_request_idle - Queue up "idle check" execution for a device.
* @dev: Target device.
*
* Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
* asynchronously.
*
* Return:
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
* ongoing or device not in %RPM_ACTIVE state.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
/**
* pm_request_resume - Queue up runtime-resume of a device.
* @dev: Target device.
*/
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
/**
* pm_request_autosuspend - Update the last access time and queue up autosuspend
* of a device.
* @dev: Target device.
*
* Update the last access time of a device and queue up a work item to run an
* equivalent pm_runtime_autosuspend() for @dev asynchronously.
*
* Return:
* * 1: Success; device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
* ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_request_autosuspend(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
/**
* pm_runtime_get - Bump up usage counter and queue up resume of a device.
* @dev: Target device.
*
* Bump up the runtime PM usage counter of @dev and queue up a work item to
* carry out runtime-resume of it.
*/
static inline int pm_runtime_get(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
/**
* pm_runtime_get_sync - Bump up usage counter of a device and resume it.
* @dev: Target device.
*
* Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
* it synchronously.
*
* The possible return values of this function are the same as for
* pm_runtime_resume() and the runtime PM usage counter of @dev remains
* incremented in all cases, even if it returns an error code.
* Consider using pm_runtime_resume_and_get() instead of it, especially
* if its return value is checked by the caller, as this is likely to result
* in cleaner code.
*/
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
static inline int pm_runtime_get_active(struct device *dev, int rpmflags)
{
int ret;
ret = __pm_runtime_resume(dev, RPM_GET_PUT | rpmflags);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
return 0;
}
/**
* pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
* @dev: Target device.
*
* Resume @dev synchronously and if that is successful, increment its runtime
* PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
* incremented or a negative error code otherwise.
*/
static inline int pm_runtime_resume_and_get(struct device *dev)
{
return pm_runtime_get_active(dev, 0);
}
/**
* pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
* @dev: Target device.
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, queue up a work item for @dev like in pm_request_idle().
*
* Return:
* * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
* change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_runtime_put(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
}
/**
* __pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
* @dev: Target device.
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
*
* Return:
* * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
* change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int __pm_runtime_put_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
}
/**
* pm_runtime_put_autosuspend - Update the last access time of a device, drop
* its usage counter and queue autosuspend if the usage counter becomes 0.
* @dev: Target device.
*
* Update the last access time of @dev, decrement runtime PM usage counter of
* @dev and if it turns out to be equal to 0, queue up a work item for @dev like
* in pm_request_autosuspend().
*
* Return:
* * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
* change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
return __pm_runtime_put_autosuspend(dev);
}
DEFINE_GUARD(pm_runtime_noresume, struct device *,
pm_runtime_get_noresume(_T), pm_runtime_put_noidle(_T));
DEFINE_GUARD(pm_runtime_active, struct device *,
pm_runtime_get_sync(_T), pm_runtime_put(_T));
DEFINE_GUARD(pm_runtime_active_auto, struct device *,
pm_runtime_get_sync(_T), pm_runtime_put_autosuspend(_T));
/*
* Use the following guards with ACQUIRE()/ACQUIRE_ERR().
*
* The difference between the "_try" and "_try_enabled" variants is that the
* former do not produce an error when runtime PM is disabled for the given
* device.
*/
DEFINE_GUARD_COND(pm_runtime_active, _try,
pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active, _try_enabled,
pm_runtime_resume_and_get(_T), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active_auto, _try,
pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled,
pm_runtime_resume_and_get(_T), _RET == 0)
/**
* pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
* @dev: Target device.
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, invoke the "idle check" callback of @dev and, depending on its
* return value, set up autosuspend of @dev or suspend it (depending on whether
* or not autosuspend has been enabled for it).
*
* The runtime PM usage counter of @dev remains decremented in all cases, even
* if it returns an error code.
*
* Return:
* * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
* change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT);
}
/**
* pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
* @dev: Target device.
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, carry out runtime-suspend of @dev synchronously.
*
* The runtime PM usage counter of @dev remains decremented in all cases, even
* if it returns an error code.
*
* Return:
* * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
* change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT);
}
/**
* pm_runtime_put_sync_autosuspend - Update the last access time of a device,
* drop device usage counter and autosuspend if 0.
* @dev: Target device.
*
* Update the last access time of @dev, decrement the runtime PM usage counter
* of @dev and if it turns out to be equal to 0, set up autosuspend of @dev or
* suspend it synchronously (depending on whether or not autosuspend has been
* enabled for it).
*
* The runtime PM usage counter of @dev remains decremented in all cases, even
* if it returns an error code.
*
* Return:
* * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
* * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
* change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
/**
* pm_runtime_set_active - Set runtime PM status to "active".
* @dev: Target device.
*
* Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
* of it will be taken into account.
*
* It is not valid to call this function for devices with runtime PM enabled.
*/
static inline int pm_runtime_set_active(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
/**
* pm_runtime_set_suspended - Set runtime PM status to "suspended".
* @dev: Target device.
*
* Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
* dependencies of it will be taken into account.
*
* It is not valid to call this function for devices with runtime PM enabled.
*/
static inline int pm_runtime_set_suspended(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_SUSPENDED);
}
/**
* pm_runtime_disable - Disable runtime PM for a device.
* @dev: Target device.
*
* Prevent the runtime PM framework from working with @dev by incrementing its
* "disable" counter.
*
* If the counter is zero when this function runs and there is a pending runtime
* resume request for @dev, it will be resumed. If the counter is still zero at
* that point, all of the pending runtime PM requests for @dev will be canceled
* and all runtime PM operations in progress involving it will be waited for to
* complete.
*
* For each invocation of this function for @dev, there must be a matching
* pm_runtime_enable() call, so that runtime PM is eventually enabled for it
* again.
*/
static inline void pm_runtime_disable(struct device *dev)
{
__pm_runtime_disable(dev, true);
}
/**
* pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
* @dev: Target device.
*
* Allow the runtime PM autosuspend mechanism to be used for @dev whenever
* requested (or "autosuspend" will be handled as direct runtime-suspend for
* it).
*
* NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
* at driver exit time unless your driver initially enabled pm_runtime
* with devm_pm_runtime_enable() (which handles it for you).
*/
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
}
/**
* pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
* @dev: Target device.
*
* Prevent the runtime PM autosuspend mechanism from being used for @dev which
* means that "autosuspend" will be handled as direct runtime-suspend for it
* going forward.
*/
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, false);
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NET_UDP_TUNNEL_H
#define __NET_UDP_TUNNEL_H
#include <net/ip_tunnels.h>
#include <net/udp.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/ipv6_stubs.h>
#endif
struct udp_port_cfg {
u8 family;
/* Used only for kernel-created sockets */
union {
struct in_addr local_ip;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr local_ip6;
#endif
};
union {
struct in_addr peer_ip;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr peer_ip6;
#endif
};
__be16 local_udp_port;
__be16 peer_udp_port;
int bind_ifindex;
unsigned int use_udp_checksums:1,
use_udp6_tx_checksums:1,
use_udp6_rx_checksums:1,
ipv6_v6only:1;
};
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp);
#if IS_ENABLED(CONFIG_IPV6)
int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp);
#else
static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
{
return 0;
}
#endif
static inline int udp_sock_create(struct net *net,
struct udp_port_cfg *cfg,
struct socket **sockp)
{
if (cfg->family == AF_INET)
return udp_sock_create4(net, cfg, sockp);
if (cfg->family == AF_INET6)
return udp_sock_create6(net, cfg, sockp);
return -EPFNOSUPPORT;
}
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
struct sk_buff *skb);
typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct list_head *head,
struct sk_buff *skb);
typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
int nhoff);
struct udp_tunnel_sock_cfg {
void *sk_user_data; /* user data used by encap_rcv call back */
/* Used for setting up udp_sock fields, see udp.h for details */
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
udp_tunnel_encap_err_lookup_t encap_err_lookup;
udp_tunnel_encap_err_rcv_t encap_err_rcv;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
};
/* Setup the given (UDP) sock to receive UDP encapsulated packets */
void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
struct udp_tunnel_sock_cfg *sock_cfg);
/* -- List of parsable UDP tunnel types --
*
* Adding to this list will result in serious debate. The main issue is
* that this list is essentially a list of workarounds for either poorly
* designed tunnels, or poorly designed device offloads.
*
* The parsing supported via these types should really be used for Rx
* traffic only as the network stack will have already inserted offsets for
* the location of the headers in the skb. In addition any ports that are
* pushed should be kept within the namespace without leaking to other
* devices such as VFs or other ports on the same device.
*
* It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
* need to use this for Rx checksum offload. It should not be necessary to
* call this function to perform Tx offloads on outgoing traffic.
*/
enum udp_parsable_tunnel_type {
UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */
UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */
UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
};
struct udp_tunnel_info {
unsigned short type;
sa_family_t sa_family;
__be16 port;
u8 hw_priv;
};
/* Notify network devices of offloadable types */
void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
unsigned short type);
void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
unsigned short type);
void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
/* Transmit the skb using UDP encapsulation. */
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
bool xnet, bool nocheck, u16 ipcb_flags);
void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
struct net_device *dev,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be32 label,
__be16 src_port, __be16 dst_port, bool nocheck,
u16 ip6cb_flags);
void udp_tunnel_sock_release(struct socket *sock);
struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
struct net_device *dev,
struct net *net, int oif,
__be32 *saddr,
const struct ip_tunnel_key *key,
__be16 sport, __be16 dport, u8 tos,
struct dst_cache *dst_cache);
struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
struct net_device *dev,
struct net *net,
struct socket *sock, int oif,
struct in6_addr *saddr,
const struct ip_tunnel_key *key,
__be16 sport, __be16 dport, u8 dsfield,
struct dst_cache *dst_cache);
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
const unsigned long *flags,
__be64 tunnel_id, int md_size);
#ifdef CONFIG_INET
static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
{
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
return iptunnel_handle_offloads(skb, type);
}
#endif
#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
#else
static inline void udp_tunnel_update_gro_lookup(struct net *net,
struct sock *sk, bool add) {}
static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
#endif
static inline void udp_tunnel_cleanup_gro(struct sock *sk)
{
udp_tunnel_update_gro_rcv(sk, false);
udp_tunnel_update_gro_lookup(sock_net(sk), sk, false);
}
static inline void udp_tunnel_encap_enable(struct sock *sk)
{
if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
return;
#if IS_ENABLED(CONFIG_IPV6)
if (READ_ONCE(sk->sk_family) == PF_INET6)
ipv6_stub->udpv6_encap_enable();
#endif
udp_encap_enable();
}
#define UDP_TUNNEL_NIC_MAX_TABLES 4
enum udp_tunnel_nic_info_flags {
/* Device only supports offloads when it's open, all ports
* will be removed before close and re-added after open.
*/
UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(0),
/* Device supports only IPv4 tunnels */
UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(1),
/* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
* This port must not be counted towards n_entries of any table.
* Driver will not receive any callback associated with port 4789.
*/
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(2),
};
struct udp_tunnel_nic;
#define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
struct udp_tunnel_nic_shared {
struct udp_tunnel_nic *udp_tunnel_nic_info;
struct list_head devices;
};
struct udp_tunnel_nic_shared_node {
struct net_device *dev;
struct list_head list;
};
/**
* struct udp_tunnel_nic_info - driver UDP tunnel offload information
* @set_port: callback for adding a new port
* @unset_port: callback for removing a port
* @sync_table: callback for syncing the entire port table at once
* @shared: reference to device global state (optional)
* @flags: device flags from enum udp_tunnel_nic_info_flags
* @tables: UDP port tables this device has
* @tables.n_entries: number of entries in this table
* @tables.tunnel_types: types of tunnels this table accepts
*
* Drivers are expected to provide either @set_port and @unset_port callbacks
* or the @sync_table callback. Callbacks are invoked with rtnl lock held.
*
* Devices which (misguidedly) share the UDP tunnel port table across multiple
* netdevs should allocate an instance of struct udp_tunnel_nic_shared and
* point @shared at it.
* There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
* sharing a table.
*
* Known limitations:
* - UDP tunnel port notifications are fundamentally best-effort -
* it is likely the driver will both see skbs which use a UDP tunnel port,
* while not being a tunneled skb, and tunnel skbs from other ports -
* drivers should only use these ports for non-critical RX-side offloads,
* e.g. the checksum offload;
* - none of the devices care about the socket family at present, so we don't
* track it. Please extend this code if you care.
*/
struct udp_tunnel_nic_info {
/* one-by-one */
int (*set_port)(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti);
int (*unset_port)(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti);
/* all at once */
int (*sync_table)(struct net_device *dev, unsigned int table);
struct udp_tunnel_nic_shared *shared;
unsigned int flags;
struct udp_tunnel_nic_table_info {
unsigned int n_entries;
unsigned int tunnel_types;
} tables[UDP_TUNNEL_NIC_MAX_TABLES];
};
/* UDP tunnel module dependencies
*
* Tunnel drivers are expected to have a hard dependency on the udp_tunnel
* module. NIC drivers are not, they just attach their
* struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
* Loading a tunnel driver will cause the udp_tunnel module to be loaded
* and only then will all the required state structures be allocated.
* Since we want a weak dependency from the drivers and the core to udp_tunnel
* we call things through the following stubs.
*/
struct udp_tunnel_nic_ops {
void (*get_port)(struct net_device *dev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
void (*set_port_priv)(struct net_device *dev, unsigned int table,
unsigned int idx, u8 priv);
void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
void (*reset_ntf)(struct net_device *dev);
size_t (*dump_size)(struct net_device *dev, unsigned int table);
int (*dump_write)(struct net_device *dev, unsigned int table,
struct sk_buff *skb);
void (*assert_locked)(struct net_device *dev);
void (*lock)(struct net_device *dev);
void (*unlock)(struct net_device *dev);
};
#ifdef CONFIG_INET
extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
#else
#define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL)
#endif
static inline void
udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
/* This helper is used from .sync_table, we indicate empty entries
* by zero'ed @ti. Drivers which need to know the details of a port
* when it gets deleted should use the .set_port / .unset_port
* callbacks.
* Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
*/
memset(ti, 0, sizeof(*ti));
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
}
static inline void
udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
unsigned int idx, u8 priv)
{
if (udp_tunnel_nic_ops) {
udp_tunnel_nic_ops->assert_locked(dev);
udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
}
}
static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
{
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->assert_locked(dev);
}
static inline void udp_tunnel_nic_lock(struct net_device *dev)
{
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->lock(dev);
}
static inline void udp_tunnel_nic_unlock(struct net_device *dev)
{
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->unlock(dev);
}
static inline void
udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
{
if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
return;
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->add_port(dev, ti);
}
static inline void
udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
{
if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
return;
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->del_port(dev, ti);
}
/**
* udp_tunnel_nic_reset_ntf() - device-originating reset notification
* @dev: network interface device structure
*
* Called by the driver to inform the core that the entire UDP tunnel port
* state has been lost, usually due to device reset. Core will assume device
* forgot all the ports and issue .set_port and .sync_table callbacks as
* necessary.
*
* This function must be called with rtnl lock held, and will issue all
* the callbacks before returning.
*/
static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
{
if (udp_tunnel_nic_ops)
udp_tunnel_nic_ops->reset_ntf(dev);
}
static inline size_t
udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
{
size_t ret;
if (!udp_tunnel_nic_ops)
return 0;
udp_tunnel_nic_ops->lock(dev);
ret = udp_tunnel_nic_ops->dump_size(dev, table);
udp_tunnel_nic_ops->unlock(dev);
return ret;
}
static inline int
udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
struct sk_buff *skb)
{
int ret;
if (!udp_tunnel_nic_ops)
return 0;
udp_tunnel_nic_ops->lock(dev);
ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
udp_tunnel_nic_ops->unlock(dev);
return ret;
}
static inline void udp_tunnel_get_rx_info(struct net_device *dev)
{
ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
return;
udp_tunnel_nic_assert_locked(dev); call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
}
static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
{
ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
return;
udp_tunnel_nic_assert_locked(dev); call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM capability
#if !defined(_TRACE_CAPABILITY_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_CAPABILITY_H
#include <linux/cred.h>
#include <linux/tracepoint.h>
#include <linux/user_namespace.h>
/**
* cap_capable - called after it's determined if a task has a particular
* effective capability
*
* @cred: The credentials used
* @target_ns: The user namespace of the resource being accessed
* @capable_ns: The user namespace in which the credential provides the
* capability to access the targeted resource.
* This will be NULL if ret is not 0.
* @cap: The capability to check for
* @ret: The return value of the check: 0 if it does, -ve if it does not
*
* Allows to trace calls to cap_capable in commoncap.c
*/
TRACE_EVENT(cap_capable,
TP_PROTO(const struct cred *cred, struct user_namespace *target_ns,
const struct user_namespace *capable_ns, int cap, int ret),
TP_ARGS(cred, target_ns, capable_ns, cap, ret),
TP_STRUCT__entry(
__field(const struct cred *, cred)
__field(struct user_namespace *, target_ns)
__field(const struct user_namespace *, capable_ns)
__field(int, cap)
__field(int, ret)
),
TP_fast_assign(
__entry->cred = cred;
__entry->target_ns = target_ns;
__entry->capable_ns = ret == 0 ? capable_ns : NULL;
__entry->cap = cap;
__entry->ret = ret;
),
TP_printk("cred %p, target_ns %p, capable_ns %p, cap %d, ret %d",
__entry->cred, __entry->target_ns, __entry->capable_ns, __entry->cap,
__entry->ret)
);
#endif /* _TRACE_CAPABILITY_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file provides wrappers with sanitizer instrumentation for atomic bit
* operations.
*
* To use this functionality, an arch's bitops.h file needs to define each of
* the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
* arch___set_bit(), etc.).
*/
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This is a relaxed atomic operation (no implied memory barriers).
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __always_inline void set_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* This is a relaxed atomic operation (no implied memory barriers).
*/
static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit(nr, addr);}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* This is a relaxed atomic operation (no implied memory barriers).
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* fs/kernfs/dir.c - kernfs directory implementation
*
* Copyright (c) 2001-3 Patrick Mochel
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
*/
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/hash.h>
#include "kernfs-internal.h"
/*
* Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
* call pr_cont() while holding rename_lock. Because sometimes pr_cont()
* will perform wakeups when releasing console_sem. Holding rename_lock
* will introduce deadlock if the scheduler reads the kernfs_name in the
* wakeup path.
*/
static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
static bool __kernfs_active(struct kernfs_node *kn)
{
return atomic_read(&kn->active) >= 0;
}
static bool kernfs_active(struct kernfs_node *kn)
{
lockdep_assert_held(&kernfs_root(kn)->kernfs_rwsem); return __kernfs_active(kn);}
static bool kernfs_lockdep(struct kernfs_node *kn)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
return kn->flags & KERNFS_LOCKDEP;
#else
return false;
#endif
}
/* kernfs_node_depth - compute depth from @from to @to */
static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
{
size_t depth = 0;
while (rcu_dereference(to->__parent) && to != from) {
depth++;
to = rcu_dereference(to->__parent);
}
return depth;
}
static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
struct kernfs_node *b)
{
size_t da, db;
struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
if (ra != rb)
return NULL;
da = kernfs_depth(ra->kn, a);
db = kernfs_depth(rb->kn, b);
while (da > db) {
a = rcu_dereference(a->__parent);
da--;
}
while (db > da) {
b = rcu_dereference(b->__parent);
db--;
}
/* worst case b and a will be the same at root */
while (b != a) {
b = rcu_dereference(b->__parent);
a = rcu_dereference(a->__parent);
}
return a;
}
/**
* kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to,
* where kn_from is treated as root of the path.
* @kn_from: kernfs node which should be treated as root for the path
* @kn_to: kernfs node to which path is needed
* @buf: buffer to copy the path into
* @buflen: size of @buf
*
* We need to handle couple of scenarios here:
* [1] when @kn_from is an ancestor of @kn_to at some level
* kn_from: /n1/n2/n3
* kn_to: /n1/n2/n3/n4/n5
* result: /n4/n5
*
* [2] when @kn_from is on a different hierarchy and we need to find common
* ancestor between @kn_from and @kn_to.
* kn_from: /n1/n2/n3/n4
* kn_to: /n1/n2/n5
* result: /../../n5
* OR
* kn_from: /n1/n2/n3/n4/n5 [depth=5]
* kn_to: /n1/n2/n3 [depth=3]
* result: /../..
*
* [3] when @kn_to is %NULL result will be "(null)"
*
* Return: the length of the constructed path. If the path would have been
* greater than @buflen, @buf contains the truncated path with the trailing
* '\0'. On error, -errno is returned.
*/
static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
struct kernfs_node *kn_from,
char *buf, size_t buflen)
{
struct kernfs_node *kn, *common;
const char parent_str[] = "/..";
size_t depth_from, depth_to, len = 0;
ssize_t copied;
int i, j;
if (!kn_to)
return strscpy(buf, "(null)", buflen);
if (!kn_from)
kn_from = kernfs_root(kn_to)->kn;
if (kn_from == kn_to)
return strscpy(buf, "/", buflen);
common = kernfs_common_ancestor(kn_from, kn_to);
if (WARN_ON(!common))
return -EINVAL;
depth_to = kernfs_depth(common, kn_to);
depth_from = kernfs_depth(common, kn_from);
buf[0] = '\0';
for (i = 0; i < depth_from; i++) {
copied = strscpy(buf + len, parent_str, buflen - len);
if (copied < 0)
return copied;
len += copied;
}
/* Calculate how many bytes we need for the rest */
for (i = depth_to - 1; i >= 0; i--) {
const char *name;
for (kn = kn_to, j = 0; j < i; j++)
kn = rcu_dereference(kn->__parent);
name = rcu_dereference(kn->name);
len += scnprintf(buf + len, buflen - len, "/%s", name);
}
return len;
}
/**
* kernfs_name - obtain the name of a given node
* @kn: kernfs_node of interest
* @buf: buffer to copy @kn's name into
* @buflen: size of @buf
*
* Copies the name of @kn into @buf of @buflen bytes. The behavior is
* similar to strscpy().
*
* Fills buffer with "(null)" if @kn is %NULL.
*
* Return: the resulting length of @buf. If @buf isn't long enough,
* it's filled up to @buflen-1 and nul terminated, and returns -E2BIG.
*
* This function can be called from any context.
*/
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{
struct kernfs_node *kn_parent;
if (!kn)
return strscpy(buf, "(null)", buflen);
guard(rcu)();
/*
* KERNFS_ROOT_INVARIANT_PARENT is ignored here. The name is RCU freed and
* the parent is either existing or not.
*/
kn_parent = rcu_dereference(kn->__parent);
return strscpy(buf, kn_parent ? rcu_dereference(kn->name) : "/", buflen);
}
/**
* kernfs_path_from_node - build path of node @to relative to @from.
* @from: parent kernfs_node relative to which we need to build the path
* @to: kernfs_node of interest
* @buf: buffer to copy @to's path into
* @buflen: size of @buf
*
* Builds @to's path relative to @from in @buf. @from and @to must
* be on the same kernfs-root. If @from is not parent of @to, then a relative
* path (which includes '..'s) as needed to reach from @from to @to is
* returned.
*
* Return: the length of the constructed path. If the path would have been
* greater than @buflen, @buf contains the truncated path with the trailing
* '\0'. On error, -errno is returned.
*/
int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
char *buf, size_t buflen)
{
struct kernfs_root *root;
guard(rcu)();
if (to) {
root = kernfs_root(to);
if (!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)) {
guard(read_lock_irqsave)(&root->kernfs_rename_lock);
return kernfs_path_from_node_locked(to, from, buf, buflen);
}
}
return kernfs_path_from_node_locked(to, from, buf, buflen);
}
EXPORT_SYMBOL_GPL(kernfs_path_from_node);
/**
* pr_cont_kernfs_name - pr_cont name of a kernfs_node
* @kn: kernfs_node of interest
*
* This function can be called from any context.
*/
void pr_cont_kernfs_name(struct kernfs_node *kn)
{
unsigned long flags;
spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
pr_cont("%s", kernfs_pr_cont_buf);
spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
}
/**
* pr_cont_kernfs_path - pr_cont path of a kernfs_node
* @kn: kernfs_node of interest
*
* This function can be called from any context.
*/
void pr_cont_kernfs_path(struct kernfs_node *kn)
{
unsigned long flags;
int sz;
spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
sizeof(kernfs_pr_cont_buf));
if (sz < 0) {
if (sz == -E2BIG)
pr_cont("(name too long)");
else
pr_cont("(error)");
goto out;
}
pr_cont("%s", kernfs_pr_cont_buf);
out:
spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
}
/**
* kernfs_get_parent - determine the parent node and pin it
* @kn: kernfs_node of interest
*
* Determines @kn's parent, pins and returns it. This function can be
* called from any context.
*
* Return: parent node of @kn
*/
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
{
struct kernfs_node *parent;
struct kernfs_root *root;
unsigned long flags;
root = kernfs_root(kn);
read_lock_irqsave(&root->kernfs_rename_lock, flags);
parent = kernfs_parent(kn);
kernfs_get(parent);
read_unlock_irqrestore(&root->kernfs_rename_lock, flags);
return parent;
}
/**
* kernfs_name_hash - calculate hash of @ns + @name
* @name: Null terminated string to hash
* @ns: Namespace tag to hash
*
* Return: 31-bit hash of ns + name (so it fits in an off_t)
*/
static unsigned int kernfs_name_hash(const char *name, const void *ns)
{
unsigned long hash = init_name_hash(ns);
unsigned int len = strlen(name);
while (len--) hash = partial_name_hash(*name++, hash); hash = end_name_hash(hash);
hash &= 0x7fffffffU;
/* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
if (hash < 2)
hash += 2;
if (hash >= INT_MAX)
hash = INT_MAX - 1;
return hash;}
static int kernfs_name_compare(unsigned int hash, const char *name,
const void *ns, const struct kernfs_node *kn)
{
if (hash < kn->hash)
return -1;
if (hash > kn->hash)
return 1;
if (ns < kn->ns)
return -1;
if (ns > kn->ns)
return 1;
return strcmp(name, kernfs_rcu_name(kn));
}
static int kernfs_sd_compare(const struct kernfs_node *left,
const struct kernfs_node *right)
{
return kernfs_name_compare(left->hash, kernfs_rcu_name(left), left->ns, right);
}
/**
* kernfs_link_sibling - link kernfs_node into sibling rbtree
* @kn: kernfs_node of interest
*
* Link @kn into its sibling rbtree which starts from
* @kn->parent->dir.children.
*
* Locking:
* kernfs_rwsem held exclusive
*
* Return:
* %0 on success, -EEXIST on failure.
*/
static int kernfs_link_sibling(struct kernfs_node *kn)
{
struct rb_node *parent = NULL;
struct kernfs_node *kn_parent;
struct rb_node **node;
kn_parent = kernfs_parent(kn);
node = &kn_parent->dir.children.rb_node;
while (*node) {
struct kernfs_node *pos;
int result;
pos = rb_to_kn(*node);
parent = *node;
result = kernfs_sd_compare(kn, pos); if (result < 0) node = &pos->rb.rb_left; else if (result > 0)
node = &pos->rb.rb_right;
else
return -EEXIST;
}
/* add new node and rebalance the tree */
rb_link_node(&kn->rb, parent, node);
rb_insert_color(&kn->rb, &kn_parent->dir.children);
/* successfully added, account subdir number */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
kn_parent->dir.subdirs++; kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
return 0;
}
/**
* kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
* @kn: kernfs_node of interest
*
* Try to unlink @kn from its sibling rbtree which starts from
* kn->parent->dir.children.
*
* Return: %true if @kn was actually removed,
* %false if @kn wasn't on the rbtree.
*
* Locking:
* kernfs_rwsem held exclusive
*/
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
{
struct kernfs_node *kn_parent;
if (RB_EMPTY_NODE(&kn->rb))
return false;
kn_parent = kernfs_parent(kn);
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
kn_parent->dir.subdirs--;
kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
rb_erase(&kn->rb, &kn_parent->dir.children);
RB_CLEAR_NODE(&kn->rb);
return true;
}
/**
* kernfs_get_active - get an active reference to kernfs_node
* @kn: kernfs_node to get an active reference to
*
* Get an active reference of @kn. This function is noop if @kn
* is %NULL.
*
* Return:
* Pointer to @kn on success, %NULL on failure.
*/
struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
{
if (unlikely(!kn))
return NULL;
if (!atomic_inc_unless_negative(&kn->active))
return NULL;
if (kernfs_lockdep(kn))
rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
return kn;
}
/**
* kernfs_put_active - put an active reference to kernfs_node
* @kn: kernfs_node to put an active reference to
*
* Put an active reference to @kn. This function is noop if @kn
* is %NULL.
*/
void kernfs_put_active(struct kernfs_node *kn)
{
int v;
if (unlikely(!kn))
return;
if (kernfs_lockdep(kn))
rwsem_release(&kn->dep_map, _RET_IP_);
v = atomic_dec_return(&kn->active);
if (likely(v != KN_DEACTIVATED_BIAS))
return;
wake_up_all(&kernfs_root(kn)->deactivate_waitq);
}
/**
* kernfs_drain - drain kernfs_node
* @kn: kernfs_node to drain
*
* Drain existing usages and nuke all existing mmaps of @kn. Multiple
* removers may invoke this function concurrently on @kn and all will
* return after draining is complete.
*/
static void kernfs_drain(struct kernfs_node *kn)
__releases(&kernfs_root(kn)->kernfs_rwsem)
__acquires(&kernfs_root(kn)->kernfs_rwsem)
{
struct kernfs_root *root = kernfs_root(kn);
lockdep_assert_held_write(&root->kernfs_rwsem);
WARN_ON_ONCE(kernfs_active(kn));
/*
* Skip draining if already fully drained. This avoids draining and its
* lockdep annotations for nodes which have never been activated
* allowing embedding kernfs_remove() in create error paths without
* worrying about draining.
*/
if (atomic_read(&kn->active) == KN_DEACTIVATED_BIAS &&
!kernfs_should_drain_open_files(kn))
return;
up_write(&root->kernfs_rwsem);
if (kernfs_lockdep(kn)) {
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
lock_contended(&kn->dep_map, _RET_IP_);
}
wait_event(root->deactivate_waitq,
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
if (kernfs_lockdep(kn)) {
lock_acquired(&kn->dep_map, _RET_IP_);
rwsem_release(&kn->dep_map, _RET_IP_);
}
if (kernfs_should_drain_open_files(kn))
kernfs_drain_open_files(kn);
down_write(&root->kernfs_rwsem);
}
/**
* kernfs_get - get a reference count on a kernfs_node
* @kn: the target kernfs_node
*/
void kernfs_get(struct kernfs_node *kn)
{
if (kn) {
WARN_ON(!atomic_read(&kn->count));
atomic_inc(&kn->count);
}
}
EXPORT_SYMBOL_GPL(kernfs_get);
static void kernfs_free_rcu(struct rcu_head *rcu)
{
struct kernfs_node *kn = container_of(rcu, struct kernfs_node, rcu);
/* If the whole node goes away, then name can't be used outside */
kfree_const(rcu_access_pointer(kn->name));
if (kn->iattr) {
simple_xattrs_free(&kn->iattr->xattrs, NULL);
kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
}
kmem_cache_free(kernfs_node_cache, kn);
}
/**
* kernfs_put - put a reference count on a kernfs_node
* @kn: the target kernfs_node
*
* Put a reference count of @kn and destroy it if it reached zero.
*/
void kernfs_put(struct kernfs_node *kn)
{
struct kernfs_node *parent;
struct kernfs_root *root;
if (!kn || !atomic_dec_and_test(&kn->count)) return;
root = kernfs_root(kn);
repeat:
/*
* Moving/renaming is always done while holding reference.
* kn->parent won't change beneath us.
*/
parent = kernfs_parent(kn); WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
"kernfs_put: %s/%s: released with incorrect active_ref %d\n",
parent ? rcu_dereference(parent->name) : "",
rcu_dereference(kn->name), atomic_read(&kn->active));
if (kernfs_type(kn) == KERNFS_LINK) kernfs_put(kn->symlink.target_kn); spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
spin_unlock(&root->kernfs_idr_lock);
call_rcu(&kn->rcu, kernfs_free_rcu);
kn = parent;
if (kn) {
if (atomic_dec_and_test(&kn->count))
goto repeat;
} else {
/* just released the root kn, free @root too */
idr_destroy(&root->ino_idr);
kfree_rcu(root, rcu);
}
}
EXPORT_SYMBOL_GPL(kernfs_put);
/**
* kernfs_node_from_dentry - determine kernfs_node associated with a dentry
* @dentry: the dentry in question
*
* Return: the kernfs_node associated with @dentry. If @dentry is not a
* kernfs one, %NULL is returned.
*
* While the returned kernfs_node will stay accessible as long as @dentry
* is accessible, the returned node can be in any state and the caller is
* fully responsible for determining what's accessible.
*/
struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
{
if (dentry->d_sb->s_op == &kernfs_sops)
return kernfs_dentry_node(dentry);
return NULL;
}
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
unsigned flags)
{
struct kernfs_node *kn;
u32 id_highbits;
int ret;
name = kstrdup_const(name, GFP_KERNEL);
if (!name) return NULL;
kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
if (!kn) goto err_out1;
idr_preload(GFP_KERNEL);
spin_lock(&root->kernfs_idr_lock);
ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
if (ret >= 0 && ret < root->last_id_lowbits)
root->id_highbits++;
id_highbits = root->id_highbits; root->last_id_lowbits = ret;
spin_unlock(&root->kernfs_idr_lock);
idr_preload_end(); if (ret < 0) goto err_out2;
kn->id = (u64)id_highbits << 32 | ret;
atomic_set(&kn->count, 1);
atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
RB_CLEAR_NODE(&kn->rb);
rcu_assign_pointer(kn->name, name);
kn->mode = mode;
kn->flags = flags;
if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) { struct iattr iattr = {
.ia_valid = ATTR_UID | ATTR_GID,
.ia_uid = uid,
.ia_gid = gid,
};
ret = __kernfs_setattr(kn, &iattr);
if (ret < 0) goto err_out3;
}
if (parent) {
ret = security_kernfs_init_security(parent, kn);
if (ret) goto err_out3;
}
return kn;
err_out3:
spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
spin_unlock(&root->kernfs_idr_lock);
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
kfree_const(name); return NULL;}
struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
unsigned flags)
{
struct kernfs_node *kn;
if (parent->mode & S_ISGID) {
/* this code block imitates inode_init_owner() for
* kernfs
*/
if (parent->iattr)
gid = parent->iattr->ia_gid;
if (flags & KERNFS_DIR) mode |= S_ISGID;
}
kn = __kernfs_new_node(kernfs_root(parent), parent,
name, mode, uid, gid, flags);
if (kn) {
kernfs_get(parent);
rcu_assign_pointer(kn->__parent, parent);
}
return kn;}
/*
* kernfs_find_and_get_node_by_id - get kernfs_node from node id
* @root: the kernfs root
* @id: the target node id
*
* @id's lower 32bits encode ino and upper gen. If the gen portion is
* zero, all generations are matched.
*
* Return: %NULL on failure,
* otherwise a kernfs node with reference counter incremented.
*/
struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
u64 id)
{
struct kernfs_node *kn;
ino_t ino = kernfs_id_ino(id);
u32 gen = kernfs_id_gen(id);
rcu_read_lock();
kn = idr_find(&root->ino_idr, (u32)ino);
if (!kn)
goto err_unlock;
if (sizeof(ino_t) >= sizeof(u64)) {
/* we looked up with the low 32bits, compare the whole */
if (kernfs_ino(kn) != ino)
goto err_unlock;
} else {
/* 0 matches all generations */
if (unlikely(gen && kernfs_gen(kn) != gen))
goto err_unlock;
}
/*
* We should fail if @kn has never been activated and guarantee success
* if the caller knows that @kn is active. Both can be achieved by
* __kernfs_active() which tests @kn->active without kernfs_rwsem.
*/
if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
goto err_unlock;
rcu_read_unlock();
return kn;
err_unlock:
rcu_read_unlock();
return NULL;
}
/**
* kernfs_add_one - add kernfs_node to parent without warning
* @kn: kernfs_node to be added
*
* The caller must already have initialized @kn->parent. This
* function increments nlink of the parent's inode if @kn is a
* directory and link into the children list of the parent.
*
* Return:
* %0 on success, -EEXIST if entry with the given name already
* exists.
*/
int kernfs_add_one(struct kernfs_node *kn)
{
struct kernfs_root *root = kernfs_root(kn);
struct kernfs_iattrs *ps_iattr;
struct kernfs_node *parent;
bool has_ns;
int ret;
down_write(&root->kernfs_rwsem);
parent = kernfs_parent(kn);
ret = -EINVAL;
has_ns = kernfs_ns_enabled(parent);
if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
has_ns ? "required" : "invalid",
kernfs_rcu_name(parent), kernfs_rcu_name(kn)))
goto out_unlock; if (kernfs_type(parent) != KERNFS_DIR)
goto out_unlock;
ret = -ENOENT;
if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR))
goto out_unlock;
kn->hash = kernfs_name_hash(kernfs_rcu_name(kn), kn->ns);
ret = kernfs_link_sibling(kn);
if (ret)
goto out_unlock;
/* Update timestamps on the parent */
down_write(&root->kernfs_iattr_rwsem);
ps_iattr = parent->iattr;
if (ps_iattr) {
ktime_get_real_ts64(&ps_iattr->ia_ctime);
ps_iattr->ia_mtime = ps_iattr->ia_ctime;
}
up_write(&root->kernfs_iattr_rwsem);
up_write(&root->kernfs_rwsem);
/*
* Activate the new node unless CREATE_DEACTIVATED is requested.
* If not activated here, the kernfs user is responsible for
* activating the node with kernfs_activate(). A node which hasn't
* been activated is not visible to userland and its removal won't
* trigger deactivation.
*/
if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
kernfs_activate(kn);
return 0;
out_unlock:
up_write(&root->kernfs_rwsem); return ret;}
/**
* kernfs_find_ns - find kernfs_node with the given name
* @parent: kernfs_node to search under
* @name: name to look for
* @ns: the namespace tag to use
*
* Look for kernfs_node with name @name under @parent.
*
* Return: pointer to the found kernfs_node on success, %NULL on failure.
*/
static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
const unsigned char *name,
const void *ns)
{
struct rb_node *node = parent->dir.children.rb_node;
bool has_ns = kernfs_ns_enabled(parent);
unsigned int hash;
lockdep_assert_held(&kernfs_root(parent)->kernfs_rwsem); if (has_ns != (bool)ns) { WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
has_ns ? "required" : "invalid", kernfs_rcu_name(parent), name);
return NULL;
}
hash = kernfs_name_hash(name, ns);
while (node) {
struct kernfs_node *kn;
int result;
kn = rb_to_kn(node); result = kernfs_name_compare(hash, name, ns, kn); if (result < 0) node = node->rb_left; else if (result > 0)
node = node->rb_right;
else
return kn;
}
return NULL;
}
static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
const unsigned char *path,
const void *ns)
{
ssize_t len;
char *p, *name;
lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem);
spin_lock_irq(&kernfs_pr_cont_lock);
len = strscpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
if (len < 0) {
spin_unlock_irq(&kernfs_pr_cont_lock);
return NULL;
}
p = kernfs_pr_cont_buf;
while ((name = strsep(&p, "/")) && parent) {
if (*name == '\0')
continue;
parent = kernfs_find_ns(parent, name, ns);
}
spin_unlock_irq(&kernfs_pr_cont_lock);
return parent;
}
/**
* kernfs_find_and_get_ns - find and get kernfs_node with the given name
* @parent: kernfs_node to search under
* @name: name to look for
* @ns: the namespace tag to use
*
* Look for kernfs_node with name @name under @parent and get a reference
* if found. This function may sleep.
*
* Return: pointer to the found kernfs_node on success, %NULL on failure.
*/
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
const char *name, const void *ns)
{
struct kernfs_node *kn;
struct kernfs_root *root = kernfs_root(parent);
down_read(&root->kernfs_rwsem);
kn = kernfs_find_ns(parent, name, ns);
kernfs_get(kn);
up_read(&root->kernfs_rwsem);
return kn;
}
EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
/**
* kernfs_walk_and_get_ns - find and get kernfs_node with the given path
* @parent: kernfs_node to search under
* @path: path to look for
* @ns: the namespace tag to use
*
* Look for kernfs_node with path @path under @parent and get a reference
* if found. This function may sleep.
*
* Return: pointer to the found kernfs_node on success, %NULL on failure.
*/
struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
const char *path, const void *ns)
{
struct kernfs_node *kn;
struct kernfs_root *root = kernfs_root(parent);
down_read(&root->kernfs_rwsem);
kn = kernfs_walk_ns(parent, path, ns);
kernfs_get(kn);
up_read(&root->kernfs_rwsem);
return kn;
}
unsigned int kernfs_root_flags(struct kernfs_node *kn)
{
return kernfs_root(kn)->flags;
}
/**
* kernfs_create_root - create a new kernfs hierarchy
* @scops: optional syscall operations for the hierarchy
* @flags: KERNFS_ROOT_* flags
* @priv: opaque data associated with the new directory
*
* Return: the root of the new hierarchy on success, ERR_PTR() value on
* failure.
*/
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv)
{
struct kernfs_root *root;
struct kernfs_node *kn;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
idr_init(&root->ino_idr);
spin_lock_init(&root->kernfs_idr_lock);
init_rwsem(&root->kernfs_rwsem);
init_rwsem(&root->kernfs_iattr_rwsem);
init_rwsem(&root->kernfs_supers_rwsem);
INIT_LIST_HEAD(&root->supers);
rwlock_init(&root->kernfs_rename_lock);
/*
* On 64bit ino setups, id is ino. On 32bit, low 32bits are ino.
* High bits generation. The starting value for both ino and
* genenration is 1. Initialize upper 32bit allocation
* accordingly.
*/
if (sizeof(ino_t) >= sizeof(u64))
root->id_highbits = 0;
else
root->id_highbits = 1;
kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO,
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
KERNFS_DIR);
if (!kn) {
idr_destroy(&root->ino_idr);
kfree(root);
return ERR_PTR(-ENOMEM);
}
kn->priv = priv;
kn->dir.root = root;
root->syscall_ops = scops;
root->flags = flags;
root->kn = kn;
init_waitqueue_head(&root->deactivate_waitq);
if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
kernfs_activate(kn);
return root;
}
/**
* kernfs_destroy_root - destroy a kernfs hierarchy
* @root: root of the hierarchy to destroy
*
* Destroy the hierarchy anchored at @root by removing all existing
* directories and destroying @root.
*/
void kernfs_destroy_root(struct kernfs_root *root)
{
/*
* kernfs_remove holds kernfs_rwsem from the root so the root
* shouldn't be freed during the operation.
*/
kernfs_get(root->kn);
kernfs_remove(root->kn);
kernfs_put(root->kn); /* will also free @root */
}
/**
* kernfs_root_to_node - return the kernfs_node associated with a kernfs_root
* @root: root to use to lookup
*
* Return: @root's kernfs_node
*/
struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root)
{
return root->kn;
}
/**
* kernfs_create_dir_ns - create a directory
* @parent: parent in which to create a new directory
* @name: name of the new directory
* @mode: mode of the new directory
* @uid: uid of the new directory
* @gid: gid of the new directory
* @priv: opaque data associated with the new directory
* @ns: optional namespace tag of the directory
*
* Return: the created node on success, ERR_PTR() value on failure.
*/
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
void *priv, const void *ns)
{
struct kernfs_node *kn;
int rc;
/* allocate */
kn = kernfs_new_node(parent, name, mode | S_IFDIR,
uid, gid, KERNFS_DIR);
if (!kn)
return ERR_PTR(-ENOMEM);
kn->dir.root = parent->dir.root;
kn->ns = ns;
kn->priv = priv;
/* link in */
rc = kernfs_add_one(kn);
if (!rc)
return kn;
kernfs_put(kn);
return ERR_PTR(rc);}
/**
* kernfs_create_empty_dir - create an always empty directory
* @parent: parent in which to create a new directory
* @name: name of the new directory
*
* Return: the created node on success, ERR_PTR() value on failure.
*/
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name)
{
struct kernfs_node *kn;
int rc;
/* allocate */
kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR,
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR);
if (!kn)
return ERR_PTR(-ENOMEM);
kn->flags |= KERNFS_EMPTY_DIR;
kn->dir.root = parent->dir.root;
kn->ns = NULL;
kn->priv = NULL;
/* link in */
rc = kernfs_add_one(kn);
if (!rc)
return kn;
kernfs_put(kn);
return ERR_PTR(rc);
}
static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *dentry, unsigned int flags)
{
struct kernfs_node *kn, *parent;
struct kernfs_root *root;
if (flags & LOOKUP_RCU)
return -ECHILD;
/* Negative hashed dentry? */
if (d_really_is_negative(dentry)) {
/* If the kernfs parent node has changed discard and
* proceed to ->lookup.
*
* There's nothing special needed here when getting the
* dentry parent, even if a concurrent rename is in
* progress. That's because the dentry is negative so
* it can only be the target of the rename and it will
* be doing a d_move() not a replace. Consequently the
* dentry d_parent won't change over the d_move().
*
* Also kernfs negative dentries transitioning from
* negative to positive during revalidate won't happen
* because they are invalidated on containing directory
* changes and the lookup re-done so that a new positive
* dentry can be properly created.
*/
root = kernfs_root_from_sb(dentry->d_sb);
down_read(&root->kernfs_rwsem);
parent = kernfs_dentry_node(dentry->d_parent);
if (parent) {
if (kernfs_dir_changed(parent, dentry)) {
up_read(&root->kernfs_rwsem);
return 0;
}
}
up_read(&root->kernfs_rwsem);
/* The kernfs parent node hasn't changed, leave the
* dentry negative and return success.
*/
return 1;
}
kn = kernfs_dentry_node(dentry);
root = kernfs_root(kn);
down_read(&root->kernfs_rwsem);
/* The kernfs node has been deactivated */
if (!kernfs_active(kn))
goto out_bad;
parent = kernfs_parent(kn);
/* The kernfs node has been moved? */
if (kernfs_dentry_node(dentry->d_parent) != parent)
goto out_bad;
/* The kernfs node has been renamed */
if (strcmp(dentry->d_name.name, kernfs_rcu_name(kn)) != 0)
goto out_bad;
/* The kernfs node has been moved to a different namespace */
if (parent && kernfs_ns_enabled(parent) &&
kernfs_info(dentry->d_sb)->ns != kn->ns)
goto out_bad;
up_read(&root->kernfs_rwsem);
return 1;
out_bad:
up_read(&root->kernfs_rwsem);
return 0;
}
const struct dentry_operations kernfs_dops = {
.d_revalidate = kernfs_dop_revalidate,
};
static struct dentry *kernfs_iop_lookup(struct inode *dir,
struct dentry *dentry,
unsigned int flags)
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_node *kn;
struct kernfs_root *root;
struct inode *inode = NULL;
const void *ns = NULL;
root = kernfs_root(parent);
down_read(&root->kernfs_rwsem);
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dir->i_sb)->ns;
kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
/* attach dentry and inode */
if (kn) {
/* Inactive nodes are invisible to the VFS so don't
* create a negative.
*/
if (!kernfs_active(kn)) {
up_read(&root->kernfs_rwsem);
return NULL;
}
inode = kernfs_get_inode(dir->i_sb, kn);
if (!inode)
inode = ERR_PTR(-ENOMEM);
}
/*
* Needed for negative dentry validation.
* The negative dentry can be created in kernfs_iop_lookup()
* or transforms from positive dentry in dentry_unlink_inode()
* called from vfs_rmdir().
*/
if (!IS_ERR(inode))
kernfs_set_rev(parent, dentry);
up_read(&root->kernfs_rwsem);
/* instantiate and hash (possibly negative) dentry */
return d_splice_alias(inode, dentry);
}
static struct dentry *kernfs_iop_mkdir(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
umode_t mode)
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
int ret;
if (!scops || !scops->mkdir)
return ERR_PTR(-EPERM);
if (!kernfs_get_active(parent))
return ERR_PTR(-ENODEV);
ret = scops->mkdir(parent, dentry->d_name.name, mode);
kernfs_put_active(parent);
return ERR_PTR(ret);
}
static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
{
struct kernfs_node *kn = kernfs_dentry_node(dentry);
struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
int ret;
if (!scops || !scops->rmdir)
return -EPERM;
if (!kernfs_get_active(kn))
return -ENODEV;
ret = scops->rmdir(kn);
kernfs_put_active(kn);
return ret;
}
static int kernfs_iop_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct kernfs_node *kn = kernfs_dentry_node(old_dentry);
struct kernfs_node *new_parent = new_dir->i_private;
struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
int ret;
if (flags)
return -EINVAL;
if (!scops || !scops->rename)
return -EPERM;
if (!kernfs_get_active(kn))
return -ENODEV;
if (!kernfs_get_active(new_parent)) {
kernfs_put_active(kn);
return -ENODEV;
}
ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
kernfs_put_active(new_parent);
kernfs_put_active(kn);
return ret;
}
const struct inode_operations kernfs_dir_iops = {
.lookup = kernfs_iop_lookup,
.permission = kernfs_iop_permission,
.setattr = kernfs_iop_setattr,
.getattr = kernfs_iop_getattr,
.listxattr = kernfs_iop_listxattr,
.mkdir = kernfs_iop_mkdir,
.rmdir = kernfs_iop_rmdir,
.rename = kernfs_iop_rename,
};
static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
{
struct kernfs_node *last;
while (true) {
struct rb_node *rbn;
last = pos;
if (kernfs_type(pos) != KERNFS_DIR)
break;
rbn = rb_first(&pos->dir.children); if (!rbn)
break;
pos = rb_to_kn(rbn);
}
return last;}
/**
* kernfs_next_descendant_post - find the next descendant for post-order walk
* @pos: the current position (%NULL to initiate traversal)
* @root: kernfs_node whose descendants to walk
*
* Find the next descendant to visit for post-order traversal of @root's
* descendants. @root is included in the iteration and the last node to be
* visited.
*
* Return: the next descendant to visit or %NULL when done.
*/
static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
struct kernfs_node *root)
{
struct rb_node *rbn;
lockdep_assert_held_write(&kernfs_root(root)->kernfs_rwsem);
/* if first iteration, visit leftmost descendant which may be root */
if (!pos)
return kernfs_leftmost_descendant(root);
/* if we visited @root, we're done */
if (pos == root) return NULL;
/* if there's an unvisited sibling, visit its leftmost descendant */
rbn = rb_next(&pos->rb);
if (rbn) return kernfs_leftmost_descendant(rb_to_kn(rbn));
/* no sibling left, visit parent */
return kernfs_parent(pos);}
static void kernfs_activate_one(struct kernfs_node *kn)
{
lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem); kn->flags |= KERNFS_ACTIVATED; if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING)))
return;
WARN_ON_ONCE(rcu_access_pointer(kn->__parent) && RB_EMPTY_NODE(&kn->rb)); WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);}
/**
* kernfs_activate - activate a node which started deactivated
* @kn: kernfs_node whose subtree is to be activated
*
* If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
* needs to be explicitly activated. A node which hasn't been activated
* isn't visible to userland and deactivation is skipped during its
* removal. This is useful to construct atomic init sequences where
* creation of multiple nodes should either succeed or fail atomically.
*
* The caller is responsible for ensuring that this function is not called
* after kernfs_remove*() is invoked on @kn.
*/
void kernfs_activate(struct kernfs_node *kn)
{
struct kernfs_node *pos;
struct kernfs_root *root = kernfs_root(kn);
down_write(&root->kernfs_rwsem);
pos = NULL; while ((pos = kernfs_next_descendant_post(pos, kn)))
kernfs_activate_one(pos);
up_write(&root->kernfs_rwsem);
}
/**
* kernfs_show - show or hide a node
* @kn: kernfs_node to show or hide
* @show: whether to show or hide
*
* If @show is %false, @kn is marked hidden and deactivated. A hidden node is
* ignored in future activaitons. If %true, the mark is removed and activation
* state is restored. This function won't implicitly activate a new node in a
* %KERNFS_ROOT_CREATE_DEACTIVATED root which hasn't been activated yet.
*
* To avoid recursion complexities, directories aren't supported for now.
*/
void kernfs_show(struct kernfs_node *kn, bool show)
{
struct kernfs_root *root = kernfs_root(kn);
if (WARN_ON_ONCE(kernfs_type(kn) == KERNFS_DIR))
return;
down_write(&root->kernfs_rwsem);
if (show) {
kn->flags &= ~KERNFS_HIDDEN;
if (kn->flags & KERNFS_ACTIVATED)
kernfs_activate_one(kn);
} else {
kn->flags |= KERNFS_HIDDEN;
if (kernfs_active(kn))
atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
kernfs_drain(kn);
}
up_write(&root->kernfs_rwsem);
}
static void __kernfs_remove(struct kernfs_node *kn)
{
struct kernfs_node *pos, *parent;
/* Short-circuit if non-root @kn has already finished removal. */
if (!kn)
return;
lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
/*
* This is for kernfs_remove_self() which plays with active ref
* after removal.
*/
if (kernfs_parent(kn) && RB_EMPTY_NODE(&kn->rb))
return;
pr_debug("kernfs %s: removing\n", kernfs_rcu_name(kn));
/* prevent new usage by marking all nodes removing and deactivating */
pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) {
pos->flags |= KERNFS_REMOVING;
if (kernfs_active(pos))
atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
}
/* deactivate and unlink the subtree node-by-node */
do {
pos = kernfs_leftmost_descendant(kn);
/*
* kernfs_drain() may drop kernfs_rwsem temporarily and @pos's
* base ref could have been put by someone else by the time
* the function returns. Make sure it doesn't go away
* underneath us.
*/
kernfs_get(pos);
kernfs_drain(pos);
parent = kernfs_parent(pos);
/*
* kernfs_unlink_sibling() succeeds once per node. Use it
* to decide who's responsible for cleanups.
*/
if (!parent || kernfs_unlink_sibling(pos)) {
struct kernfs_iattrs *ps_iattr =
parent ? parent->iattr : NULL;
/* update timestamps on the parent */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (ps_iattr) {
ktime_get_real_ts64(&ps_iattr->ia_ctime);
ps_iattr->ia_mtime = ps_iattr->ia_ctime;
}
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
kernfs_put(pos);
}
kernfs_put(pos);
} while (pos != kn);
}
/**
* kernfs_remove - remove a kernfs_node recursively
* @kn: the kernfs_node to remove
*
* Remove @kn along with all its subdirectories and files.
*/
void kernfs_remove(struct kernfs_node *kn)
{
struct kernfs_root *root;
if (!kn)
return;
root = kernfs_root(kn);
down_write(&root->kernfs_rwsem);
__kernfs_remove(kn);
up_write(&root->kernfs_rwsem);
}
/**
* kernfs_break_active_protection - break out of active protection
* @kn: the self kernfs_node
*
* The caller must be running off of a kernfs operation which is invoked
* with an active reference - e.g. one of kernfs_ops. Each invocation of
* this function must also be matched with an invocation of
* kernfs_unbreak_active_protection().
*
* This function releases the active reference of @kn the caller is
* holding. Once this function is called, @kn may be removed at any point
* and the caller is solely responsible for ensuring that the objects it
* dereferences are accessible.
*/
void kernfs_break_active_protection(struct kernfs_node *kn)
{
/*
* Take out ourself out of the active ref dependency chain. If
* we're called without an active ref, lockdep will complain.
*/
kernfs_put_active(kn);
}
/**
* kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
* @kn: the self kernfs_node
*
* If kernfs_break_active_protection() was called, this function must be
* invoked before finishing the kernfs operation. Note that while this
* function restores the active reference, it doesn't and can't actually
* restore the active protection - @kn may already or be in the process of
* being drained and removed. Once kernfs_break_active_protection() is
* invoked, that protection is irreversibly gone for the kernfs operation
* instance.
*
* While this function may be called at any point after
* kernfs_break_active_protection() is invoked, its most useful location
* would be right before the enclosing kernfs operation returns.
*/
void kernfs_unbreak_active_protection(struct kernfs_node *kn)
{
/*
* @kn->active could be in any state; however, the increment we do
* here will be undone as soon as the enclosing kernfs operation
* finishes and this temporary bump can't break anything. If @kn
* is alive, nothing changes. If @kn is being deactivated, the
* soon-to-follow put will either finish deactivation or restore
* deactivated state. If @kn is already removed, the temporary
* bump is guaranteed to be gone before @kn is released.
*/
atomic_inc(&kn->active);
if (kernfs_lockdep(kn))
rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
}
/**
* kernfs_remove_self - remove a kernfs_node from its own method
* @kn: the self kernfs_node to remove
*
* The caller must be running off of a kernfs operation which is invoked
* with an active reference - e.g. one of kernfs_ops. This can be used to
* implement a file operation which deletes itself.
*
* For example, the "delete" file for a sysfs device directory can be
* implemented by invoking kernfs_remove_self() on the "delete" file
* itself. This function breaks the circular dependency of trying to
* deactivate self while holding an active ref itself. It isn't necessary
* to modify the usual removal path to use kernfs_remove_self(). The
* "delete" implementation can simply invoke kernfs_remove_self() on self
* before proceeding with the usual removal path. kernfs will ignore later
* kernfs_remove() on self.
*
* kernfs_remove_self() can be called multiple times concurrently on the
* same kernfs_node. Only the first one actually performs removal and
* returns %true. All others will wait until the kernfs operation which
* won self-removal finishes and return %false. Note that the losers wait
* for the completion of not only the winning kernfs_remove_self() but also
* the whole kernfs_ops which won the arbitration. This can be used to
* guarantee, for example, all concurrent writes to a "delete" file to
* finish only after the whole operation is complete.
*
* Return: %true if @kn is removed by this call, otherwise %false.
*/
bool kernfs_remove_self(struct kernfs_node *kn)
{
bool ret;
struct kernfs_root *root = kernfs_root(kn);
down_write(&root->kernfs_rwsem);
kernfs_break_active_protection(kn);
/*
* SUICIDAL is used to arbitrate among competing invocations. Only
* the first one will actually perform removal. When the removal
* is complete, SUICIDED is set and the active ref is restored
* while kernfs_rwsem for held exclusive. The ones which lost
* arbitration waits for SUICIDED && drained which can happen only
* after the enclosing kernfs operation which executed the winning
* instance of kernfs_remove_self() finished.
*/
if (!(kn->flags & KERNFS_SUICIDAL)) {
kn->flags |= KERNFS_SUICIDAL;
__kernfs_remove(kn);
kn->flags |= KERNFS_SUICIDED;
ret = true;
} else {
wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
DEFINE_WAIT(wait);
while (true) {
prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
if ((kn->flags & KERNFS_SUICIDED) &&
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
break;
up_write(&root->kernfs_rwsem);
schedule();
down_write(&root->kernfs_rwsem);
}
finish_wait(waitq, &wait);
WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
ret = false;
}
/*
* This must be done while kernfs_rwsem held exclusive; otherwise,
* waiting for SUICIDED && deactivated could finish prematurely.
*/
kernfs_unbreak_active_protection(kn);
up_write(&root->kernfs_rwsem);
return ret;
}
/**
* kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
* @parent: parent of the target
* @name: name of the kernfs_node to remove
* @ns: namespace tag of the kernfs_node to remove
*
* Look for the kernfs_node with @name and @ns under @parent and remove it.
*
* Return: %0 on success, -ENOENT if such entry doesn't exist.
*/
int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
const void *ns)
{
struct kernfs_node *kn;
struct kernfs_root *root;
if (!parent) {
WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
name);
return -ENOENT;
}
root = kernfs_root(parent);
down_write(&root->kernfs_rwsem);
kn = kernfs_find_ns(parent, name, ns);
if (kn) {
kernfs_get(kn);
__kernfs_remove(kn);
kernfs_put(kn);
}
up_write(&root->kernfs_rwsem);
if (kn)
return 0;
else
return -ENOENT;
}
/**
* kernfs_rename_ns - move and rename a kernfs_node
* @kn: target node
* @new_parent: new parent to put @sd under
* @new_name: new name
* @new_ns: new namespace tag
*
* Return: %0 on success, -errno on failure.
*/
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns)
{
struct kernfs_node *old_parent;
struct kernfs_root *root;
const char *old_name;
int error;
/* can't move or rename root */
if (!rcu_access_pointer(kn->__parent))
return -EINVAL;
root = kernfs_root(kn);
down_write(&root->kernfs_rwsem);
error = -ENOENT;
if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
(new_parent->flags & KERNFS_EMPTY_DIR))
goto out;
old_parent = kernfs_parent(kn);
if (root->flags & KERNFS_ROOT_INVARIANT_PARENT) {
error = -EINVAL;
if (WARN_ON_ONCE(old_parent != new_parent))
goto out;
}
error = 0;
old_name = kernfs_rcu_name(kn);
if (!new_name)
new_name = old_name;
if ((old_parent == new_parent) && (kn->ns == new_ns) &&
(strcmp(old_name, new_name) == 0))
goto out; /* nothing to rename */
error = -EEXIST;
if (kernfs_find_ns(new_parent, new_name, new_ns))
goto out;
/* rename kernfs_node */
if (strcmp(old_name, new_name) != 0) {
error = -ENOMEM;
new_name = kstrdup_const(new_name, GFP_KERNEL);
if (!new_name)
goto out;
} else {
new_name = NULL;
}
/*
* Move to the appropriate place in the appropriate directories rbtree.
*/
kernfs_unlink_sibling(kn);
/* rename_lock protects ->parent accessors */
if (old_parent != new_parent) {
kernfs_get(new_parent);
write_lock_irq(&root->kernfs_rename_lock);
rcu_assign_pointer(kn->__parent, new_parent);
kn->ns = new_ns;
if (new_name)
rcu_assign_pointer(kn->name, new_name);
write_unlock_irq(&root->kernfs_rename_lock);
kernfs_put(old_parent);
} else {
/* name assignment is RCU protected, parent is the same */
kn->ns = new_ns;
if (new_name)
rcu_assign_pointer(kn->name, new_name);
}
kn->hash = kernfs_name_hash(new_name ?: old_name, kn->ns);
kernfs_link_sibling(kn);
if (new_name && !is_kernel_rodata((unsigned long)old_name))
kfree_rcu_mightsleep(old_name);
error = 0;
out:
up_write(&root->kernfs_rwsem);
return error;
}
static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
{
kernfs_put(filp->private_data);
return 0;
}
static struct kernfs_node *kernfs_dir_pos(const void *ns,
struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
{
if (pos) {
int valid = kernfs_active(pos) &&
rcu_access_pointer(pos->__parent) == parent &&
hash == pos->hash;
kernfs_put(pos);
if (!valid)
pos = NULL;
}
if (!pos && (hash > 1) && (hash < INT_MAX)) {
struct rb_node *node = parent->dir.children.rb_node;
while (node) {
pos = rb_to_kn(node);
if (hash < pos->hash)
node = node->rb_left;
else if (hash > pos->hash)
node = node->rb_right;
else
break;
}
}
/* Skip over entries which are dying/dead or in the wrong namespace */
while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
struct rb_node *node = rb_next(&pos->rb);
if (!node)
pos = NULL;
else
pos = rb_to_kn(node);
}
return pos;
}
static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
{
pos = kernfs_dir_pos(ns, parent, ino, pos);
if (pos) {
do {
struct rb_node *node = rb_next(&pos->rb);
if (!node)
pos = NULL;
else
pos = rb_to_kn(node);
} while (pos && (!kernfs_active(pos) || pos->ns != ns));
}
return pos;
}
static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct kernfs_node *parent = kernfs_dentry_node(dentry);
struct kernfs_node *pos = file->private_data;
struct kernfs_root *root;
const void *ns = NULL;
if (!dir_emit_dots(file, ctx))
return 0;
root = kernfs_root(parent);
down_read(&root->kernfs_rwsem);
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dentry->d_sb)->ns;
for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
pos;
pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
const char *name = kernfs_rcu_name(pos);
unsigned int type = fs_umode_to_dtype(pos->mode);
int len = strlen(name);
ino_t ino = kernfs_ino(pos);
ctx->pos = pos->hash;
file->private_data = pos;
kernfs_get(pos);
if (!dir_emit(ctx, name, len, ino, type)) {
up_read(&root->kernfs_rwsem);
return 0;
}
}
up_read(&root->kernfs_rwsem);
file->private_data = NULL;
ctx->pos = INT_MAX;
return 0;
}
const struct file_operations kernfs_dir_fops = {
.read = generic_read_dir,
.iterate_shared = kernfs_fop_readdir,
.release = kernfs_dir_fop_release,
.llseek = generic_file_llseek,
};
// SPDX-License-Identifier: GPL-2.0-or-later
/* Manage a process's keyrings
*
* Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
#include <linux/init_task.h>
#include <keys/request_key_auth-type.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = REFCOUNT_INIT(3),
.cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
.lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
.uid = GLOBAL_ROOT_UID,
};
/*
* Get or create a user register keyring.
*/
static struct key *get_user_register(struct user_namespace *user_ns)
{
struct key *reg_keyring = READ_ONCE(user_ns->user_keyring_register);
if (reg_keyring)
return reg_keyring;
down_write(&user_ns->keyring_sem);
/* Make sure there's a register keyring. It gets owned by the
* user_namespace's owner.
*/
reg_keyring = user_ns->user_keyring_register;
if (!reg_keyring) {
reg_keyring = keyring_alloc(".user_reg",
user_ns->owner, INVALID_GID,
&init_cred,
KEY_POS_WRITE | KEY_POS_SEARCH |
KEY_USR_VIEW | KEY_USR_READ,
0,
NULL, NULL);
if (!IS_ERR(reg_keyring))
smp_store_release(&user_ns->user_keyring_register,
reg_keyring);
}
up_write(&user_ns->keyring_sem);
/* We don't return a ref since the keyring is pinned by the user_ns */
return reg_keyring;
}
/*
* Look up the user and user session keyrings for the current process's UID,
* creating them if they don't exist.
*/
int look_up_user_keyrings(struct key **_user_keyring,
struct key **_user_session_keyring)
{
const struct cred *cred = current_cred();
struct user_namespace *user_ns = current_user_ns();
struct key *reg_keyring, *uid_keyring, *session_keyring;
key_perm_t user_keyring_perm;
key_ref_t uid_keyring_r, session_keyring_r;
uid_t uid = from_kuid(user_ns, cred->user->uid);
char buf[20];
int ret;
user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
kenter("%u", uid);
reg_keyring = get_user_register(user_ns);
if (IS_ERR(reg_keyring))
return PTR_ERR(reg_keyring);
down_write(&user_ns->keyring_sem);
ret = 0;
/* Get the user keyring. Note that there may be one in existence
* already as it may have been pinned by a session, but the user_struct
* pointing to it may have been destroyed by setuid.
*/
snprintf(buf, sizeof(buf), "_uid.%u", uid);
uid_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
&key_type_keyring, buf, false);
kdebug("_uid %p", uid_keyring_r);
if (uid_keyring_r == ERR_PTR(-EAGAIN)) {
uid_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_UID_KEYRING |
KEY_ALLOC_IN_QUOTA,
NULL, reg_keyring);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
}
} else if (IS_ERR(uid_keyring_r)) {
ret = PTR_ERR(uid_keyring_r);
goto error;
} else {
uid_keyring = key_ref_to_ptr(uid_keyring_r);
}
/* Get a default session keyring (which might also exist already) */
snprintf(buf, sizeof(buf), "_uid_ses.%u", uid);
session_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
&key_type_keyring, buf, false);
kdebug("_uid_ses %p", session_keyring_r);
if (session_keyring_r == ERR_PTR(-EAGAIN)) {
session_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
cred, user_keyring_perm,
KEY_ALLOC_UID_KEYRING |
KEY_ALLOC_IN_QUOTA,
NULL, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
}
/* We install a link from the user session keyring to
* the user keyring.
*/
ret = key_link(session_keyring, uid_keyring);
if (ret < 0)
goto error_release_session;
/* And only then link the user-session keyring to the
* register.
*/
ret = key_link(reg_keyring, session_keyring);
if (ret < 0)
goto error_release_session;
} else if (IS_ERR(session_keyring_r)) {
ret = PTR_ERR(session_keyring_r);
goto error_release;
} else {
session_keyring = key_ref_to_ptr(session_keyring_r);
}
up_write(&user_ns->keyring_sem);
if (_user_session_keyring)
*_user_session_keyring = session_keyring;
else
key_put(session_keyring);
if (_user_keyring)
*_user_keyring = uid_keyring;
else
key_put(uid_keyring);
kleave(" = 0");
return 0;
error_release_session:
key_put(session_keyring);
error_release:
key_put(uid_keyring);
error:
up_write(&user_ns->keyring_sem);
kleave(" = %d", ret);
return ret;
}
/*
* Get the user session keyring if it exists, but don't create it if it
* doesn't.
*/
struct key *get_user_session_keyring_rcu(const struct cred *cred)
{
struct key *reg_keyring = READ_ONCE(cred->user_ns->user_keyring_register);
key_ref_t session_keyring_r;
char buf[20];
struct keyring_search_context ctx = {
.index_key.type = &key_type_keyring,
.index_key.description = buf,
.cred = cred,
.match_data.cmp = key_default_cmp,
.match_data.raw_data = buf,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
if (!reg_keyring)
return NULL;
ctx.index_key.desc_len = snprintf(buf, sizeof(buf), "_uid_ses.%u",
from_kuid(cred->user_ns,
cred->user->uid));
session_keyring_r = keyring_search_rcu(make_key_ref(reg_keyring, true),
&ctx);
if (IS_ERR(session_keyring_r))
return NULL;
return key_ref_to_ptr(session_keyring_r);
}
/*
* Install a thread keyring to the given credentials struct if it didn't have
* one already. This is allowed to overrun the quota.
*
* Return: 0 if a thread keyring is now present; -errno on failure.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->thread_keyring)
return 0;
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->thread_keyring = keyring;
return 0;
}
/*
* Install a thread keyring to the current task if it didn't have one already.
*
* Return: 0 if a thread keyring is now present; -errno on failure.
*/
static int install_thread_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Install a process keyring to the given credentials struct if it didn't have
* one already. This is allowed to overrun the quota.
*
* Return: 0 if a process keyring is now present; -errno on failure.
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
return 0;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->process_keyring = keyring;
return 0;
}
/*
* Install a process keyring to the current task if it didn't have one already.
*
* Return: 0 if a process keyring is now present; -errno on failure.
*/
static int install_process_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Install the given keyring as the session keyring of the given credentials
* struct, replacing the existing one if any. If the given keyring is NULL,
* then install a new anonymous session keyring.
* @cred can not be in use by any task yet.
*
* Return: 0 on success; -errno on failure.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
unsigned long flags;
struct key *old;
might_sleep();
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
flags, NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
__key_get(keyring);
}
/* install the keyring */
old = cred->session_keyring;
cred->session_keyring = keyring;
if (old)
key_put(old);
return 0;
}
/*
* Install the given keyring as the session keyring of the current task,
* replacing the existing one if any. If the given keyring is NULL, then
* install a new anonymous session keyring.
*
* Return: 0 on success; -errno on failure.
*/
static int install_session_keyring(struct key *keyring)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
/*
* Handle the fsuid changing.
*/
void key_fsuid_changed(struct cred *new_cred)
{
/* update the ownership of the thread keyring */
if (new_cred->thread_keyring) {
down_write(&new_cred->thread_keyring->sem);
new_cred->thread_keyring->uid = new_cred->fsuid;
up_write(&new_cred->thread_keyring->sem);
}
}
/*
* Handle the fsgid changing.
*/
void key_fsgid_changed(struct cred *new_cred)
{
/* update the ownership of the thread keyring */
if (new_cred->thread_keyring) {
down_write(&new_cred->thread_keyring->sem);
new_cred->thread_keyring->gid = new_cred->fsgid;
up_write(&new_cred->thread_keyring->sem);
}
}
/*
* Search the process keyrings attached to the supplied cred for the first
* matching key under RCU conditions (the caller must be holding the RCU read
* lock).
*
* The search criteria are the type and the match function. The description is
* given to the match function as a parameter, but doesn't otherwise influence
* the search. Typically the match function will compare the description
* parameter to the key's description.
*
* This can only search keyrings that grant Search permission to the supplied
* credentials. Keyrings linked to searched keyrings will also be searched if
* they grant Search permission too. Keys can only be found if they grant
* Search permission to the credentials.
*
* Returns a pointer to the key with the key usage count incremented if
* successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
* matched negative keys.
*
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx)
{
struct key *user_session;
key_ref_t key_ref, ret, err;
const struct cred *cred = ctx->cred;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
* otherwise we want to return a sample error (probably -EACCES) if
* none of the keyrings were searchable
*
* in terms of priority: success > -ENOKEY > -EAGAIN > other error
*/
key_ref = NULL;
ret = NULL;
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
if (cred->thread_keyring) {
key_ref = keyring_search_rcu(
make_key_ref(cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the process keyring second */
if (cred->process_keyring) {
key_ref = keyring_search_rcu(
make_key_ref(cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
fallthrough;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the session keyring */
if (cred->session_keyring) {
key_ref = keyring_search_rcu(
make_key_ref(cred->session_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
fallthrough;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* or search the user-session keyring */
else if ((user_session = get_user_session_keyring_rcu(cred))) {
key_ref = keyring_search_rcu(make_key_ref(user_session, 1),
ctx);
key_put(user_session);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
fallthrough;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* no key - decide on the error we're going to go for */
key_ref = ret ? ret : err;
found:
return key_ref;
}
/*
* Search the process keyrings attached to the supplied cred for the first
* matching key in the manner of search_my_process_keyrings(), but also search
* the keys attached to the assumed authorisation key using its credentials if
* one is available.
*
* The caller must be holding the RCU read lock.
*
* Return same as search_cred_keyrings_rcu().
*/
key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
key_ref = search_cred_keyrings_rcu(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
/* if this process has an instantiation authorisation key, then we also
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
if (ctx->cred->request_key_auth &&
ctx->cred == current_cred() &&
ctx->index_key.type != &key_type_request_key_auth
) {
const struct cred *cred = ctx->cred;
if (key_validate(cred->request_key_auth) == 0) {
rka = ctx->cred->request_key_auth->payload.data[0];
//// was search_process_keyrings() [ie. recursive]
ctx->cred = rka->cred;
key_ref = search_cred_keyrings_rcu(ctx);
ctx->cred = cred;
if (!IS_ERR(key_ref))
goto found;
ret = key_ref;
}
}
/* no key - decide on the error we're going to go for */
if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
key_ref = ERR_PTR(-ENOKEY);
else if (err == ERR_PTR(-EACCES))
key_ref = ret;
else
key_ref = err;
found:
return key_ref;
}
/*
* See if the key we're looking at is the target key.
*/
bool lookup_user_key_possessed(const struct key *key,
const struct key_match_data *match_data)
{
return key == match_data->raw_data;
}
/*
* Look up a key ID given us by userspace with a given permissions mask to get
* the key it refers to.
*
* Flags can be passed to request that special keyrings be created if referred
* to directly, to permit partially constructed keys to be found and to skip
* validity and permission checks on the found key.
*
* Returns a pointer to the key with an incremented usage count if successful;
* -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
* to a key or the best found key was a negative key; -EKEYREVOKED or
* -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
* found key doesn't grant the requested permit or the LSM denied access to it;
* or -ENOMEM if a special keyring couldn't be created.
*
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
enum key_need_perm need_perm)
{
struct keyring_search_context ctx = {
.match_data.cmp = lookup_user_key_possessed,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = (KEYRING_SEARCH_NO_STATE_CHECK |
KEYRING_SEARCH_RECURSE),
};
struct request_key_auth *rka;
struct key *key, *user_session;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
ctx.cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
if (!ctx.cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_thread_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = ctx.cred->thread_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
if (!ctx.cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_process_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = ctx.cred->process_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = look_up_user_keyrings(NULL, &user_session);
if (ret < 0)
goto error;
if (lflags & KEY_LOOKUP_CREATE)
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(user_session);
key_put(user_session);
if (ret < 0)
goto error;
goto reget_creds;
} else if (test_bit(KEY_FLAG_UID_KEYRING,
&ctx.cred->session_keyring->flags) &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
goto error;
goto reget_creds;
}
key = ctx.cred->session_keyring;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
ret = look_up_user_keyrings(&key, NULL);
if (ret < 0)
goto error;
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
ret = look_up_user_keyrings(NULL, &key);
if (ret < 0)
goto error;
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_GROUP_KEYRING:
/* group keyrings are not yet supported */
key_ref = ERR_PTR(-EINVAL);
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
key = ctx.cred->request_key_auth;
if (!key)
goto error;
__key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
if (!ctx.cred->request_key_auth)
goto error;
down_read(&ctx.cred->request_key_auth->sem);
if (test_bit(KEY_FLAG_REVOKED,
&ctx.cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
rka = ctx.cred->request_key_auth->payload.data[0];
key = rka->dest_keyring;
__key_get(key);
}
up_read(&ctx.cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
break;
default:
key_ref = ERR_PTR(-EINVAL);
if (id < 1)
goto error;
key = key_lookup(id);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error;
}
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
ctx.index_key = key->index_key;
ctx.match_data.raw_data = key;
kdebug("check possessed");
rcu_read_lock();
skey_ref = search_process_keyrings_rcu(&ctx);
rcu_read_unlock();
kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
key_put(key);
key_ref = skey_ref;
}
break;
}
/* unlink does not use the nominated key in any way, so can skip all
* the permission checks as it is only concerned with the keyring */
if (need_perm != KEY_NEED_UNLINK) {
if (!(lflags & KEY_LOOKUP_PARTIAL)) {
ret = wait_for_key_construction(key, true);
switch (ret) {
case -ERESTARTSYS:
goto invalid_key;
default:
if (need_perm != KEY_AUTHTOKEN_OVERRIDE &&
need_perm != KEY_DEFER_PERM_CHECK)
goto invalid_key;
break;
case 0:
break;
}
} else if (need_perm != KEY_DEFER_PERM_CHECK) {
ret = key_validate(key);
if (ret < 0)
goto invalid_key;
}
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
key_read_state(key) == KEY_IS_UNINSTANTIATED)
goto invalid_key;
}
/* check the permissions */
ret = key_task_permission(key_ref, ctx.cred, need_perm);
if (ret < 0)
goto invalid_key;
key->last_used_at = ktime_get_real_seconds();
error:
put_cred(ctx.cred);
return key_ref;
invalid_key:
key_ref_put(key_ref);
key_ref = ERR_PTR(ret);
goto error;
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
put_cred(ctx.cred);
goto try_again;
}
EXPORT_SYMBOL(lookup_user_key);
/*
* Join the named keyring as the session keyring if possible else attempt to
* create a new one of that name and join that.
*
* If the name is NULL, an empty anonymous keyring will be installed as the
* session keyring.
*
* Named session keyrings are joined with a semaphore held to prevent the
* keyrings from going away whilst the attempt is made to going them and also
* to prevent a race in creating compatible session keyrings.
*/
long join_session_keyring(const char *name)
{
const struct cred *old;
struct cred *new;
struct key *keyring;
long ret, serial;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
/* if no name is provided, install an anonymous keyring */
if (!name) {
ret = install_session_keyring_to_cred(new, NULL);
if (ret < 0)
goto error;
serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
goto okay;
}
/* allow the user to join or create a named keyring */
mutex_lock(&key_session_mutex);
/* look for an existing keyring of this name */
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
keyring = keyring_alloc(
name, old->uid, old->gid, old,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
KEY_ALLOC_IN_QUOTA, NULL, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
}
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
} else if (keyring == new->session_keyring) {
ret = 0;
goto error3;
}
/* we've got a keyring - now to install it */
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0)
goto error3;
commit_creds(new);
mutex_unlock(&key_session_mutex);
ret = keyring->serial;
key_put(keyring);
okay:
return ret;
error3:
key_put(keyring);
error2:
mutex_unlock(&key_session_mutex);
error:
abort_creds(new);
return ret;
}
/*
* Replace a process's session keyring on behalf of one of its children when
* the target process is about to resume userspace execution.
*/
void key_change_session_keyring(struct callback_head *twork)
{
const struct cred *old = current_cred();
struct cred *new = container_of(twork, struct cred, rcu);
if (unlikely(current->flags & PF_EXITING)) {
put_cred(new);
return;
}
/* If get_ucounts fails more bits are needed in the refcount */
if (unlikely(!get_ucounts(old->ucounts))) {
WARN_ONCE(1, "In %s get_ucounts failed\n", __func__);
put_cred(new);
return;
}
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
new->fsuid = old->fsuid;
new-> gid = old-> gid;
new-> egid = old-> egid;
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
new->ucounts = old->ucounts;
new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
new->securebits = old->securebits;
new->cap_inheritable = old->cap_inheritable;
new->cap_permitted = old->cap_permitted;
new->cap_effective = old->cap_effective;
new->cap_ambient = old->cap_ambient;
new->cap_bset = old->cap_bset;
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
commit_creds(new);
}
/*
* Make sure that root's user and user-session keyrings exist.
*/
static int __init init_root_keyring(void)
{
return look_up_user_keyrings(NULL, NULL);
}
late_initcall(init_root_keyring);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_SECTIONS_H_
#define _ASM_GENERIC_SECTIONS_H_
/* References to section boundaries */
#include <linux/compiler.h>
#include <linux/types.h>
/*
* Usage guidelines:
* _text, _data: architecture specific, don't use them in arch-independent code
* [_stext, _etext]: contains .text.* sections, may also contain .rodata.*
* and/or .init.* sections
* [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
* and/or .init.* sections.
* [__start_rodata, __end_rodata]: contains .rodata.* sections
* [__start_ro_after_init, __end_ro_after_init]:
* contains .data..ro_after_init section
* [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
* may be out of this range on some architectures.
* [_sinittext, _einittext]: contains .init.text.* sections
* [__bss_start, __bss_stop]: contains BSS sections
*
* Following global variables are optional and may be unavailable on some
* architectures and/or kernel configurations.
* _text, _data
* __kprobes_text_start, __kprobes_text_end
* __entry_text_start, __entry_text_end
* __ctors_start, __ctors_end
* __irqentry_text_start, __irqentry_text_end
* __softirqentry_text_start, __softirqentry_text_end
* __start_opd, __end_opd
*/
extern char _text[], _stext[], _etext[];
extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
extern char __start_ro_after_init[], __end_ro_after_init[];
extern char _end[];
extern char __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __entry_text_start[], __entry_text_end[];
extern char __start_rodata[], __end_rodata[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __softirqentry_text_start[], __softirqentry_text_end[];
extern char __start_once[], __end_once[];
/* Start and end of .ctors section - used for constructor calls. */
extern char __ctors_start[], __ctors_end[];
/* Start and end of .opd section - used for function descriptors. */
extern char __start_opd[], __end_opd[];
/* Start and end of instrumentation protected text section */
extern char __noinstr_text_start[], __noinstr_text_end[];
extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
void *dereference_function_descriptor(void *ptr);
void *dereference_kernel_function_descriptor(void *ptr);
#else
#define dereference_function_descriptor(p) ((void *)(p))
#define dereference_kernel_function_descriptor(p) ((void *)(p))
/* An address is simply the address of the function. */
typedef struct {
unsigned long addr;
} func_desc_t;
#endif
static inline bool have_function_descriptors(void)
{
return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS);
}
/**
* memory_contains - checks if an object is contained within a memory region
* @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
*
* Returns: true if the object specified by @virt and @size is entirely
* contained within the memory region defined by @begin and @end, false
* otherwise.
*/
static inline bool memory_contains(void *begin, void *end, void *virt,
size_t size)
{
return virt >= begin && virt + size <= end;
}
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
* @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
*
* Returns: true if an object's memory region, specified by @virt and @size,
* intersects with the region specified by @begin and @end, false otherwise.
*/
static inline bool memory_intersects(void *begin, void *end, void *virt,
size_t size)
{
void *vend = virt + size;
if (virt < end && vend > begin)
return true;
return false;
}
/**
* init_section_contains - checks if an object is contained within the init
* section
* @virt: virtual address of the memory object
* @size: size of the memory object
*
* Returns: true if the object specified by @virt and @size is entirely
* contained within the init section, false otherwise.
*/
static inline bool init_section_contains(void *virt, size_t size)
{
return memory_contains(__init_begin, __init_end, virt, size);
}
/**
* init_section_intersects - checks if the region occupied by an object
* intersects with the init section
* @virt: virtual address of the memory object
* @size: size of the memory object
*
* Returns: true if an object's memory region, specified by @virt and @size,
* intersects with the init section, false otherwise.
*/
static inline bool init_section_intersects(void *virt, size_t size)
{
return memory_intersects(__init_begin, __init_end, virt, size);
}
/**
* is_kernel_core_data - checks if the pointer address is located in the
* .data or .bss section
*
* @addr: address to check
*
* Returns: true if the address is located in .data or .bss, false otherwise.
* Note: On some archs it may return true for core RODATA, and false
* for others. But will always be true for core RW data.
*/
static inline bool is_kernel_core_data(unsigned long addr)
{
if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
return true;
if (addr >= (unsigned long)__bss_start &&
addr < (unsigned long)__bss_stop)
return true;
return false;
}
/**
* is_kernel_rodata - checks if the pointer address is located in the
* .rodata section
*
* @addr: address to check
*
* Returns: true if the address is located in .rodata, false otherwise.
*/
static inline bool is_kernel_rodata(unsigned long addr)
{
return addr >= (unsigned long)__start_rodata &&
addr < (unsigned long)__end_rodata;
}
static inline bool is_kernel_ro_after_init(unsigned long addr)
{
return addr >= (unsigned long)__start_ro_after_init &&
addr < (unsigned long)__end_ro_after_init;
}
/**
* is_kernel_inittext - checks if the pointer address is located in the
* .init.text section
*
* @addr: address to check
*
* Returns: true if the address is located in .init.text, false otherwise.
*/
static inline bool is_kernel_inittext(unsigned long addr)
{
return addr >= (unsigned long)_sinittext &&
addr < (unsigned long)_einittext;
}
/**
* __is_kernel_text - checks if the pointer address is located in the
* .text section
*
* @addr: address to check
*
* Returns: true if the address is located in .text, false otherwise.
* Note: an internal helper, only check the range of _stext to _etext.
*/
static inline bool __is_kernel_text(unsigned long addr)
{
return addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext;
}
/**
* __is_kernel - checks if the pointer address is located in the kernel range
*
* @addr: address to check
*
* Returns: true if the address is located in the kernel range, false otherwise.
* Note: an internal helper, check the range of _stext to _end,
* and range from __init_begin to __init_end, which can be outside
* of the _stext to _end range.
*/
static inline bool __is_kernel(unsigned long addr)
{
return ((addr >= (unsigned long)_stext &&
addr < (unsigned long)_end) ||
(addr >= (unsigned long)__init_begin &&
addr < (unsigned long)__init_end));
}
#endif /* _ASM_GENERIC_SECTIONS_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Routines to manage notifier chains for passing status changes to any
* interested routines. We need this instead of hard coded call lists so
* that modules can poke their nose into the innards. The network devices
* needed them so here they are for the rest of you.
*
* Alan Cox <Alan.Cox@linux.org>
*/
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/srcu.h>
/*
* Notifier chains are of four types:
*
* Atomic notifier chains: Chain callbacks run in interrupt/atomic
* context. Callouts are not allowed to block.
* Blocking notifier chains: Chain callbacks run in process context.
* Callouts are allowed to block.
* Raw notifier chains: There are no restrictions on callbacks,
* registration, or unregistration. All locking and protection
* must be provided by the caller.
* SRCU notifier chains: A variant of blocking notifier chains, with
* the same restrictions.
*
* atomic_notifier_chain_register() may be called from an atomic context,
* but blocking_notifier_chain_register() and srcu_notifier_chain_register()
* must be called from a process context. Ditto for the corresponding
* _unregister() routines.
*
* atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(),
* and srcu_notifier_chain_unregister() _must not_ be called from within
* the call chain.
*
* SRCU notifier chains are an alternative form of blocking notifier chains.
* They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for
* protection of the chain links. This means there is _very_ low overhead
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
* often but notifier_blocks will seldom be removed.
*/
struct notifier_block;
typedef int (*notifier_fn_t)(struct notifier_block *nb,
unsigned long action, void *data);
struct notifier_block {
notifier_fn_t notifier_call;
struct notifier_block __rcu *next;
int priority;
};
struct atomic_notifier_head {
spinlock_t lock;
struct notifier_block __rcu *head;
};
struct blocking_notifier_head {
struct rw_semaphore rwsem;
struct notifier_block __rcu *head;
};
struct raw_notifier_head {
struct notifier_block __rcu *head;
};
struct srcu_notifier_head {
struct mutex mutex;
struct srcu_usage srcuu;
struct srcu_struct srcu;
struct notifier_block __rcu *head;
};
#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
spin_lock_init(&(name)->lock); \
(name)->head = NULL; \
} while (0)
#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
init_rwsem(&(name)->rwsem); \
(name)->head = NULL; \
} while (0)
#define RAW_INIT_NOTIFIER_HEAD(name) do { \
(name)->head = NULL; \
} while (0)
/* srcu_notifier_heads must be cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
#define ATOMIC_NOTIFIER_INIT(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.head = NULL }
#define BLOCKING_NOTIFIER_INIT(name) { \
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
#define SRCU_NOTIFIER_INIT(name, pcpu) \
{ \
.mutex = __MUTEX_INITIALIZER(name.mutex), \
.head = NULL, \
.srcuu = __SRCU_USAGE_INIT(name.srcuu), \
.srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
}
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
ATOMIC_NOTIFIER_INIT(name)
#define BLOCKING_NOTIFIER_HEAD(name) \
struct blocking_notifier_head name = \
BLOCKING_NOTIFIER_INIT(name)
#define RAW_NOTIFIER_HEAD(name) \
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
#ifdef CONFIG_TREE_SRCU
#define _SRCU_NOTIFIER_HEAD(name, mod) \
static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \
mod struct srcu_notifier_head name = \
SRCU_NOTIFIER_INIT(name, name##_head_srcu_data)
#else
#define _SRCU_NOTIFIER_HEAD(name, mod) \
mod struct srcu_notifier_head name = \
SRCU_NOTIFIER_INIT(name, name)
#endif
#define SRCU_NOTIFIER_HEAD(name) \
_SRCU_NOTIFIER_HEAD(name, /* not static */)
#define SRCU_NOTIFIER_HEAD_STATIC(name) \
_SRCU_NOTIFIER_HEAD(name, static)
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
extern int atomic_notifier_chain_register_unique_prio(
struct atomic_notifier_head *nh, struct notifier_block *nb);
extern int blocking_notifier_chain_register_unique_prio(
struct blocking_notifier_head *nh, struct notifier_block *nb);
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
struct notifier_block *nb);
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
/* Bad/Veto action */
/*
* Clean way to return from the notifier and stop further calls.
*/
#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
static inline int notifier_from_errno(int err)
{
if (err)
return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
return NOTIFY_OK;
}
/* Restore (negative) errno value from notify return value. */
static inline int notifier_to_errno(int ret)
{
ret &= ~NOTIFY_STOP_MASK; return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0;
}
/*
* Declared notifiers so far. I can imagine quite a few more chains
* over time (eg laptop power reset chains, reboot chain (to clean
* device units up), device [un]mount chain, module load/unload chain,
* low memory chain, screenblank chain (for plug in modular screenblankers)
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
*/
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
/* reboot notifiers are defined in include/linux/reboot.h. */
/* Hibernation and suspend events are defined in include/linux/suspend.h. */
/* Virtual Terminal events are defined in include/linux/vt.h. */
#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
/* Console keyboard events.
* Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and
* KBD_KEYSYM. */
#define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */
#define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */
#define KBD_UNICODE 0x0003 /* Keyboard unicode */
#define KBD_KEYSYM 0x0004 /* Keyboard keysym */
#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* fs/kernfs/file.c - kernfs file implementation
*
* Copyright (c) 2001-3 Patrick Mochel
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
*/
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/fsnotify.h>
#include <linux/uio.h>
#include "kernfs-internal.h"
struct kernfs_open_node {
struct rcu_head rcu_head;
atomic_t event;
wait_queue_head_t poll;
struct list_head files; /* goes through kernfs_open_file.list */
unsigned int nr_mmapped;
unsigned int nr_to_release;
};
/*
* kernfs_notify() may be called from any context and bounces notifications
* through a work item. To minimize space overhead in kernfs_node, the
* pending queue is implemented as a singly linked list of kernfs_nodes.
* The list is terminated with the self pointer so that whether a
* kernfs_node is on the list or not can be determined by testing the next
* pointer for %NULL.
*/
#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
static DEFINE_SPINLOCK(kernfs_notify_lock);
static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
{
int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
return &kernfs_locks->open_file_mutex[idx];
}
static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
{
struct mutex *lock;
lock = kernfs_open_file_mutex_ptr(kn);
mutex_lock(lock);
return lock;
}
/**
* of_on - Get the kernfs_open_node of the specified kernfs_open_file
* @of: target kernfs_open_file
*
* Return: the kernfs_open_node of the kernfs_open_file
*/
static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
{
return rcu_dereference_protected(of->kn->attr.open,
!list_empty(&of->list));
}
/* Get active reference to kernfs node for an open file */
static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file *of)
{
/* Skip if file was already released */
if (unlikely(of->released))
return NULL;
if (!kernfs_get_active(of->kn))
return NULL;
return of;
}
static void kernfs_put_active_of(struct kernfs_open_file *of)
{
return kernfs_put_active(of->kn);
}
/**
* kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
*
* @kn: target kernfs_node.
*
* Fetch and return ->attr.open of @kn when caller holds the
* kernfs_open_file_mutex_ptr(kn).
*
* Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when
* the caller guarantees that this mutex is being held, other updaters can't
* change ->attr.open and this means that we can safely deref ->attr.open
* outside RCU read-side critical section.
*
* The caller needs to make sure that kernfs_open_file_mutex is held.
*
* Return: @kn->attr.open when kernfs_open_file_mutex is held.
*/
static struct kernfs_open_node *
kernfs_deref_open_node_locked(struct kernfs_node *kn)
{
return rcu_dereference_protected(kn->attr.open,
lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
}
static struct kernfs_open_file *kernfs_of(struct file *file)
{
return ((struct seq_file *)file->private_data)->private;
}
/*
* Determine the kernfs_ops for the given kernfs_node. This function must
* be called while holding an active reference.
*/
static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
{
if (kn->flags & KERNFS_LOCKDEP)
lockdep_assert_held(kn);
return kn->attr.ops;
}
/*
* As kernfs_seq_stop() is also called after kernfs_seq_start() or
* kernfs_seq_next() failure, it needs to distinguish whether it's stopping
* a seq_file iteration which is fully initialized with an active reference
* or an aborted kernfs_seq_start() due to get_active failure. The
* position pointer is the only context for each seq_file iteration and
* thus the stop condition should be encoded in it. As the return value is
* directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
* choice to indicate get_active failure.
*
* Unfortunately, this is complicated due to the optional custom seq_file
* operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
* can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
* custom seq_file operations and thus can't decide whether put_active
* should be performed or not only on ERR_PTR(-ENODEV).
*
* This is worked around by factoring out the custom seq_stop() and
* put_active part into kernfs_seq_stop_active(), skipping it from
* kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
* custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
* that kernfs_seq_stop_active() is skipped only after get_active failure.
*/
static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
const struct kernfs_ops *ops = kernfs_ops(of->kn);
if (ops->seq_stop)
ops->seq_stop(sf, v);
kernfs_put_active_of(of);
}
static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
{
struct kernfs_open_file *of = sf->private;
const struct kernfs_ops *ops;
/*
* @of->mutex nests outside active ref and is primarily to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active_of(of))
return ERR_PTR(-ENODEV);
ops = kernfs_ops(of->kn);
if (ops->seq_start) {
void *next = ops->seq_start(sf, ppos);
/* see the comment above kernfs_seq_stop_active() */
if (next == ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, next);
return next;
}
return single_start(sf, ppos);
}
static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
{
struct kernfs_open_file *of = sf->private;
const struct kernfs_ops *ops = kernfs_ops(of->kn);
if (ops->seq_next) {
void *next = ops->seq_next(sf, v, ppos);
/* see the comment above kernfs_seq_stop_active() */
if (next == ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, next);
return next;
} else {
/*
* The same behavior and code as single_open(), always
* terminate after the initial read.
*/
++*ppos;
return NULL;
}
}
static void kernfs_seq_stop(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
if (v != ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, v);
mutex_unlock(&of->mutex);
}
static int kernfs_seq_show(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
of->event = atomic_read(&of_on(of)->event);
return of->kn->attr.ops->seq_show(sf, v);
}
static const struct seq_operations kernfs_seq_ops = {
.start = kernfs_seq_start,
.next = kernfs_seq_next,
.stop = kernfs_seq_stop,
.show = kernfs_seq_show,
};
/*
* As reading a bin file can have side-effects, the exact offset and bytes
* specified in read(2) call should be passed to the read callback making
* it difficult to use seq_file. Implement simplistic custom buffering for
* bin files.
*/
static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
const struct kernfs_ops *ops;
char *buf;
buf = of->prealloc_buf;
if (buf)
mutex_lock(&of->prealloc_mutex);
else
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/*
* @of->mutex nests outside active ref and is used both to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active_of(of)) {
len = -ENODEV;
mutex_unlock(&of->mutex);
goto out_free;
}
of->event = atomic_read(&of_on(of)->event);
ops = kernfs_ops(of->kn);
if (ops->read)
len = ops->read(of, buf, len, iocb->ki_pos);
else
len = -EINVAL;
kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len < 0)
goto out_free;
if (copy_to_iter(buf, len, iter) != len) {
len = -EFAULT;
goto out_free;
}
iocb->ki_pos += len;
out_free:
if (buf == of->prealloc_buf)
mutex_unlock(&of->prealloc_mutex);
else
kfree(buf);
return len;
}
static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
return seq_read_iter(iocb, iter);
return kernfs_file_read_iter(iocb, iter);
}
/*
* Copy data in from userland and pass it to the matching kernfs write
* operation.
*
* There is no easy way for us to know if userspace is only doing a partial
* write, so we don't support them. We expect the entire buffer to come on
* the first write. Hint: if you're writing a value, first read the file,
* modify only the value you're changing, then write entire buffer
* back.
*/
static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
ssize_t len = iov_iter_count(iter);
const struct kernfs_ops *ops;
char *buf;
if (of->atomic_write_len) {
if (len > of->atomic_write_len)
return -E2BIG;
} else {
len = min_t(size_t, len, PAGE_SIZE);
}
buf = of->prealloc_buf;
if (buf)
mutex_lock(&of->prealloc_mutex);
else
buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_iter(buf, len, iter) != len) {
len = -EFAULT;
goto out_free;
}
buf[len] = '\0'; /* guarantee string termination */
/*
* @of->mutex nests outside active ref and is used both to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active_of(of)) {
mutex_unlock(&of->mutex);
len = -ENODEV;
goto out_free;
}
ops = kernfs_ops(of->kn);
if (ops->write)
len = ops->write(of, buf, len, iocb->ki_pos);
else
len = -EINVAL;
kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len > 0)
iocb->ki_pos += len;
out_free:
if (buf == of->prealloc_buf)
mutex_unlock(&of->prealloc_mutex);
else
kfree(buf);
return len;
}
static void kernfs_vma_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
if (!of->vm_ops)
return;
if (!kernfs_get_active_of(of))
return;
if (of->vm_ops->open)
of->vm_ops->open(vma);
kernfs_put_active_of(of);
}
static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
vm_fault_t ret;
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
if (of->vm_ops->fault)
ret = of->vm_ops->fault(vmf);
kernfs_put_active_of(of);
return ret;
}
static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
vm_fault_t ret;
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = 0;
if (of->vm_ops->page_mkwrite)
ret = of->vm_ops->page_mkwrite(vmf);
else
file_update_time(file);
kernfs_put_active_of(of);
return ret;
}
static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
if (!of->vm_ops)
return -EINVAL;
if (!kernfs_get_active_of(of))
return -EINVAL;
ret = -EINVAL;
if (of->vm_ops->access)
ret = of->vm_ops->access(vma, addr, buf, len, write);
kernfs_put_active_of(of);
return ret;
}
static const struct vm_operations_struct kernfs_vm_ops = {
.open = kernfs_vma_open,
.fault = kernfs_vma_fault,
.page_mkwrite = kernfs_vma_page_mkwrite,
.access = kernfs_vma_access,
};
static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
struct kernfs_open_file *of = kernfs_of(file);
const struct kernfs_ops *ops;
int rc;
/*
* mmap path and of->mutex are prone to triggering spurious lockdep
* warnings and we don't want to add spurious locking dependency
* between the two. Check whether mmap is actually implemented
* without grabbing @of->mutex by testing HAS_MMAP flag. See the
* comment in kernfs_fop_open() for more details.
*/
if (!(of->kn->flags & KERNFS_HAS_MMAP))
return -ENODEV;
mutex_lock(&of->mutex);
rc = -ENODEV;
if (!kernfs_get_active_of(of))
goto out_unlock;
ops = kernfs_ops(of->kn);
rc = ops->mmap(of, vma);
if (rc)
goto out_put;
/*
* PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
* to satisfy versions of X which crash if the mmap fails: that
* substitutes a new vm_file, and we don't then want bin_vm_ops.
*/
if (vma->vm_file != file)
goto out_put;
rc = -EINVAL;
if (of->mmapped && of->vm_ops != vma->vm_ops)
goto out_put;
/*
* It is not possible to successfully wrap close.
* So error if someone is trying to use close.
*/
if (vma->vm_ops && vma->vm_ops->close)
goto out_put;
rc = 0;
if (!of->mmapped) {
of->mmapped = true;
of_on(of)->nr_mmapped++;
of->vm_ops = vma->vm_ops;
}
vma->vm_ops = &kernfs_vm_ops;
out_put:
kernfs_put_active_of(of);
out_unlock:
mutex_unlock(&of->mutex);
return rc;
}
/**
* kernfs_get_open_node - get or create kernfs_open_node
* @kn: target kernfs_node
* @of: kernfs_open_file for this instance of open
*
* If @kn->attr.open exists, increment its reference count; otherwise,
* create one. @of is chained to the files list.
*
* Locking:
* Kernel thread context (may sleep).
*
* Return:
* %0 on success, -errno on failure.
*/
static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on;
struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_locked(kn);
if (!on) {
/* not there, initialize a new one */
on = kzalloc(sizeof(*on), GFP_KERNEL);
if (!on) {
mutex_unlock(mutex);
return -ENOMEM;
}
atomic_set(&on->event, 1);
init_waitqueue_head(&on->poll);
INIT_LIST_HEAD(&on->files);
rcu_assign_pointer(kn->attr.open, on);
}
list_add_tail(&of->list, &on->files);
if (kn->flags & KERNFS_HAS_RELEASE)
on->nr_to_release++;
mutex_unlock(mutex);
return 0;
}
/**
* kernfs_unlink_open_file - Unlink @of from @kn.
*
* @kn: target kernfs_node
* @of: associated kernfs_open_file
* @open_failed: ->open() failed, cancel ->release()
*
* Unlink @of from list of @kn's associated open files. If list of
* associated open files becomes empty, disassociate and free
* kernfs_open_node.
*
* LOCKING:
* None.
*/
static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of,
bool open_failed)
{
struct kernfs_open_node *on;
struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_locked(kn);
if (!on) {
mutex_unlock(mutex);
return;
}
if (of) {
if (kn->flags & KERNFS_HAS_RELEASE) {
WARN_ON_ONCE(of->released == open_failed);
if (open_failed)
on->nr_to_release--;
}
if (of->mmapped)
on->nr_mmapped--;
list_del(&of->list);
}
if (list_empty(&on->files)) {
rcu_assign_pointer(kn->attr.open, NULL);
kfree_rcu(on, rcu_head);
}
mutex_unlock(mutex);
}
static int kernfs_fop_open(struct inode *inode, struct file *file)
{
struct kernfs_node *kn = inode->i_private;
struct kernfs_root *root = kernfs_root(kn);
const struct kernfs_ops *ops;
struct kernfs_open_file *of;
bool has_read, has_write, has_mmap;
int error = -EACCES;
if (!kernfs_get_active(kn))
return -ENODEV;
ops = kernfs_ops(kn);
has_read = ops->seq_show || ops->read || ops->mmap;
has_write = ops->write || ops->mmap;
has_mmap = ops->mmap;
/* see the flag definition for details */
if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
if ((file->f_mode & FMODE_WRITE) &&
(!(inode->i_mode & S_IWUGO) || !has_write))
goto err_out;
if ((file->f_mode & FMODE_READ) &&
(!(inode->i_mode & S_IRUGO) || !has_read))
goto err_out;
}
/* allocate a kernfs_open_file for the file */
error = -ENOMEM;
of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
if (!of)
goto err_out;
/*
* The following is done to give a different lockdep key to
* @of->mutex for files which implement mmap. This is a rather
* crude way to avoid false positive lockdep warning around
* mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and
* reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
* which mm->mmap_lock nests, while holding @of->mutex. As each
* open file has a separate mutex, it's okay as long as those don't
* happen on the same file. At this point, we can't easily give
* each file a separate locking class. Let's differentiate on
* whether the file has mmap or not for now.
*
* For similar reasons, writable and readonly files are given different
* lockdep key, because the writable file /sys/power/resume may call vfs
* lookup helpers for arbitrary paths and readonly files can be read by
* overlayfs from vfs helpers when sysfs is a lower layer of overalyfs.
*
* All three cases look the same. They're supposed to
* look that way and give @of->mutex different static lockdep keys.
*/
if (has_mmap)
mutex_init(&of->mutex);
else if (file->f_mode & FMODE_WRITE)
mutex_init(&of->mutex);
else
mutex_init(&of->mutex);
of->kn = kn;
of->file = file;
/*
* Write path needs to atomic_write_len outside active reference.
* Cache it in open_file. See kernfs_fop_write_iter() for details.
*/
of->atomic_write_len = ops->atomic_write_len;
error = -EINVAL;
/*
* ->seq_show is incompatible with ->prealloc,
* as seq_read does its own allocation.
* ->read must be used instead.
*/
if (ops->prealloc && ops->seq_show)
goto err_free;
if (ops->prealloc) {
int len = of->atomic_write_len ?: PAGE_SIZE;
of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
error = -ENOMEM;
if (!of->prealloc_buf)
goto err_free;
mutex_init(&of->prealloc_mutex);
}
/*
* Always instantiate seq_file even if read access doesn't use
* seq_file or is not requested. This unifies private data access
* and readable regular files are the vast majority anyway.
*/
if (ops->seq_show)
error = seq_open(file, &kernfs_seq_ops);
else
error = seq_open(file, NULL);
if (error)
goto err_free;
of->seq_file = file->private_data;
of->seq_file->private = of;
/* seq_file clears PWRITE unconditionally, restore it if WRITE */
if (file->f_mode & FMODE_WRITE)
file->f_mode |= FMODE_PWRITE;
/* make sure we have open node struct */
error = kernfs_get_open_node(kn, of);
if (error)
goto err_seq_release;
if (ops->open) {
/* nobody has access to @of yet, skip @of->mutex */
error = ops->open(of);
if (error)
goto err_put_node;
}
/* open succeeded, put active references */
kernfs_put_active(kn);
return 0;
err_put_node:
kernfs_unlink_open_file(kn, of, true);
err_seq_release:
seq_release(inode, file);
err_free:
kfree(of->prealloc_buf);
kfree(of);
err_out:
kernfs_put_active(kn);
return error;
}
/* used from release/drain to ensure that ->release() is called exactly once */
static void kernfs_release_file(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
/*
* @of is guaranteed to have no other file operations in flight and
* we just want to synchronize release and drain paths.
* @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used
* here because drain path may be called from places which can
* cause circular dependency.
*/
lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
if (!of->released) {
/*
* A file is never detached without being released and we
* need to be able to release files which are deactivated
* and being drained. Don't use kernfs_ops().
*/
kn->attr.ops->release(of);
of->released = true;
of_on(of)->nr_to_release--;
}
}
static int kernfs_fop_release(struct inode *inode, struct file *filp)
{
struct kernfs_node *kn = inode->i_private;
struct kernfs_open_file *of = kernfs_of(filp);
if (kn->flags & KERNFS_HAS_RELEASE) {
struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of);
mutex_unlock(mutex);
}
kernfs_unlink_open_file(kn, of, false);
seq_release(inode, filp);
kfree(of->prealloc_buf);
kfree(of);
return 0;
}
bool kernfs_should_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
bool ret;
/*
* @kn being deactivated guarantees that @kn->attr.open can't change
* beneath us making the lockless test below safe.
* Callers post kernfs_unbreak_active_protection may be counted in
* kn->active by now, do not WARN_ON because of them.
*/
rcu_read_lock();
on = rcu_dereference(kn->attr.open);
ret = on && (on->nr_mmapped || on->nr_to_release);
rcu_read_unlock();
return ret;
}
void kernfs_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
struct mutex *mutex;
mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_locked(kn);
if (!on) {
mutex_unlock(mutex);
return;
}
list_for_each_entry(of, &on->files, list) {
struct inode *inode = file_inode(of->file);
if (of->mmapped) {
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
of->mmapped = false;
on->nr_mmapped--;
}
if (kn->flags & KERNFS_HAS_RELEASE)
kernfs_release_file(kn, of);
}
WARN_ON_ONCE(on->nr_mmapped || on->nr_to_release);
mutex_unlock(mutex);
}
/*
* Kernfs attribute files are pollable. The idea is that you read
* the content and then you use 'poll' or 'select' to wait for
* the content to change. When the content changes (assuming the
* manager for the kobject supports notification), poll will
* return EPOLLERR|EPOLLPRI, and select will return the fd whether
* it is waiting for read, write, or exceptions.
* Once poll/select indicates that the value has changed, you
* need to close and re-open the file, or seek to 0 and read again.
* Reminder: this only works for attributes which actively support
* it, and it is not possible to test an attribute from userspace
* to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
{
struct kernfs_open_node *on = of_on(of);
poll_wait(of->file, &on->poll, wait);
if (of->event != atomic_read(&on->event))
return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
return DEFAULT_POLLMASK;
}
static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
{
struct kernfs_open_file *of = kernfs_of(filp);
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
__poll_t ret;
if (!kernfs_get_active_of(of))
return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
if (kn->attr.ops->poll)
ret = kn->attr.ops->poll(of, wait);
else
ret = kernfs_generic_poll(of, wait);
kernfs_put_active_of(of);
return ret;
}
static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
{
struct kernfs_open_file *of = kernfs_of(file);
const struct kernfs_ops *ops;
loff_t ret;
/*
* @of->mutex nests outside active ref and is primarily to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active_of(of)) {
mutex_unlock(&of->mutex);
return -ENODEV;
}
ops = kernfs_ops(of->kn);
if (ops->llseek)
ret = ops->llseek(of, offset, whence);
else
ret = generic_file_llseek(file, offset, whence);
kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
return ret;
}
static void kernfs_notify_workfn(struct work_struct *work)
{
struct kernfs_node *kn;
struct kernfs_super_info *info;
struct kernfs_root *root;
repeat:
/* pop one off the notify_list */
spin_lock_irq(&kernfs_notify_lock);
kn = kernfs_notify_list;
if (kn == KERNFS_NOTIFY_EOL) {
spin_unlock_irq(&kernfs_notify_lock);
return;
}
kernfs_notify_list = kn->attr.notify_next;
kn->attr.notify_next = NULL;
spin_unlock_irq(&kernfs_notify_lock);
root = kernfs_root(kn);
/* kick fsnotify */
down_read(&root->kernfs_supers_rwsem);
down_read(&root->kernfs_rwsem);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct kernfs_node *parent;
struct inode *p_inode = NULL;
const char *kn_name;
struct inode *inode;
struct qstr name;
/*
* We want fsnotify_modify() on @kn but as the
* modifications aren't originating from userland don't
* have the matching @file available. Look up the inodes
* and generate the events manually.
*/
inode = ilookup(info->sb, kernfs_ino(kn));
if (!inode)
continue;
kn_name = kernfs_rcu_name(kn);
name = QSTR(kn_name);
parent = kernfs_get_parent(kn);
if (parent) {
p_inode = ilookup(info->sb, kernfs_ino(parent));
if (p_inode) {
fsnotify(FS_MODIFY | FS_EVENT_ON_CHILD,
inode, FSNOTIFY_EVENT_INODE,
p_inode, &name, inode, 0);
iput(p_inode);
}
kernfs_put(parent);
}
if (!p_inode)
fsnotify_inode(inode, FS_MODIFY);
iput(inode);
}
up_read(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
kernfs_put(kn);
goto repeat;
}
/**
* kernfs_notify - notify a kernfs file
* @kn: file to notify
*
* Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
* context.
*/
void kernfs_notify(struct kernfs_node *kn)
{
static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
unsigned long flags;
struct kernfs_open_node *on;
if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
return;
/* kick poll immediately */
rcu_read_lock();
on = rcu_dereference(kn->attr.open);
if (on) {
atomic_inc(&on->event);
wake_up_interruptible(&on->poll);
}
rcu_read_unlock();
/* schedule work to kick fsnotify */
spin_lock_irqsave(&kernfs_notify_lock, flags);
if (!kn->attr.notify_next) {
kernfs_get(kn);
kn->attr.notify_next = kernfs_notify_list;
kernfs_notify_list = kn;
schedule_work(&kernfs_notify_work);
}
spin_unlock_irqrestore(&kernfs_notify_lock, flags);
}
EXPORT_SYMBOL_GPL(kernfs_notify);
const struct file_operations kernfs_file_fops = {
.read_iter = kernfs_fop_read_iter,
.write_iter = kernfs_fop_write_iter,
.llseek = kernfs_fop_llseek,
.mmap = kernfs_fop_mmap,
.open = kernfs_fop_open,
.release = kernfs_fop_release,
.poll = kernfs_fop_poll,
.fsync = noop_fsync,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
};
/**
* __kernfs_create_file - kernfs internal function to create a file
* @parent: directory to create the file in
* @name: name of the file
* @mode: mode of the file
* @uid: uid of the file
* @gid: gid of the file
* @size: size of the file
* @ops: kernfs operations for the file
* @priv: private data for the file
* @ns: optional namespace tag of the file
* @key: lockdep key for the file's active_ref, %NULL to disable lockdep
*
* Return: the created node on success, ERR_PTR() value on error.
*/
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
const char *name,
umode_t mode, kuid_t uid, kgid_t gid,
loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key)
{
struct kernfs_node *kn;
unsigned flags;
int rc;
flags = KERNFS_FILE;
kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG,
uid, gid, flags);
if (!kn)
return ERR_PTR(-ENOMEM); kn->attr.ops = ops;
kn->attr.size = size;
kn->ns = ns;
kn->priv = priv;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (key) {
lockdep_init_map(&kn->dep_map, "kn->active", key, 0);
kn->flags |= KERNFS_LOCKDEP;
}
#endif
/*
* kn->attr.ops is accessible only while holding active ref. We
* need to know whether some ops are implemented outside active
* ref. Cache their existence in flags.
*/
if (ops->seq_show)
kn->flags |= KERNFS_HAS_SEQ_SHOW;
if (ops->mmap) kn->flags |= KERNFS_HAS_MMAP; if (ops->release) kn->flags |= KERNFS_HAS_RELEASE; rc = kernfs_add_one(kn); if (rc) {
kernfs_put(kn);
return ERR_PTR(rc);
}
return kn;
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NET3 Protocol independent device support routines.
*
* Derived from the non IP parts of dev.c 1.0.19
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
*
* Additional Authors:
* Florian la Roche <rzsfl@rz.uni-sb.de>
* Alan Cox <gw4pts@gw4pts.ampr.org>
* David Hinds <dahinds@users.sourceforge.net>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
* Adam Sulmicki <adam@cfar.umd.edu>
* Pekka Riikonen <priikone@poesidon.pspt.fi>
*
* Changes:
* D.J. Barrow : Fixed bug where dev->refcnt gets set
* to 2 if register_netdev gets called
* before net_dev_init & also removed a
* few lines of code in the process.
* Alan Cox : device private ioctl copies fields back.
* Alan Cox : Transmit queue code does relevant
* stunts to keep the queue safe.
* Alan Cox : Fixed double lock.
* Alan Cox : Fixed promisc NULL pointer trap
* ???????? : Support the full private ioctl range
* Alan Cox : Moved ioctl permission check into
* drivers
* Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
* Alan Cox : 100 backlog just doesn't cut it when
* you start doing multicast video 8)
* Alan Cox : Rewrote net_bh and list manager.
* Alan Cox : Fix ETH_P_ALL echoback lengths.
* Alan Cox : Took out transmit every packet pass
* Saved a few bytes in the ioctl handler
* Alan Cox : Network driver sets packet type before
* calling netif_rx. Saves a function
* call a packet.
* Alan Cox : Hashed net_bh()
* Richard Kooijman: Timestamp fixes.
* Alan Cox : Wrong field in SIOCGIFDSTADDR
* Alan Cox : Device lock protection.
* Alan Cox : Fixed nasty side effect of device close
* changes.
* Rudi Cilibrasi : Pass the right thing to
* set_mac_address()
* Dave Miller : 32bit quantity for the device lock to
* make it work out on a Sparc.
* Bjorn Ekwall : Added KERNELD hack.
* Alan Cox : Cleaned up the backlog initialise.
* Craig Metz : SIOCGIFCONF fix if space for under
* 1 device.
* Thomas Bogendoerfer : Return ENODEV for dev_open, if there
* is no device open function.
* Andi Kleen : Fix error reporting for SIOCGIFCONF
* Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
* Cyrus Durgin : Cleaned for KMOD
* Adam Sulmicki : Bug Fix : Network Device Unload
* A network device unload needs to purge
* the backlog queue.
* Paul Rusty Russell : SIOCSIFNAME
* Pekka Riikonen : Netdev boot-time settings code
* Andrew Morton : Make unregister_netdevice wait
* indefinitely on dev->refcnt
* J Hadi Salim : - Backlog queue sampling
* - netif_rx() feedback
*/
#include <linux/uaccess.h>
#include <linux/bitmap.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/isolation.h>
#include <linux/sched/mm.h>
#include <linux/smpboot.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/busy_poll.h>
#include <linux/rtnetlink.h>
#include <linux/stat.h>
#include <net/dsa.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/gro.h>
#include <net/netdev_queues.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/tcx.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <net/iw_handler.h>
#include <asm/current.h>
#include <linux/audit.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/mpls.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
#include <trace/events/net.h>
#include <trace/events/skb.h>
#include <trace/events/qdisc.h>
#include <trace/events/xdp.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
#include <linux/static_key.h>
#include <linux/hashtable.h>
#include <linux/vmalloc.h>
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
#include <linux/netfilter_netdev.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
#include <linux/net_namespace.h>
#include <linux/indirect_call_wrapper.h>
#include <net/devlink.h>
#include <linux/pm_runtime.h>
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_lock.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/page_pool/memory_provider.h>
#include <net/rps.h>
#include <linux/phy_link_topology.h>
#include "dev.h"
#include "devmem.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack);
static DEFINE_MUTEX(ifalias_mutex);
/* protects napi_hash addition/deletion and napi_gen_id */
static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static inline void dev_base_seq_inc(struct net *net)
{
unsigned int val = net->dev_base_seq + 1;
WRITE_ONCE(net->dev_base_seq, val ?: 1);
}
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{
unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
}
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
{
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
#ifndef CONFIG_PREEMPT_RT
static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
static int __init setup_backlog_napi_threads(char *arg)
{
static_branch_enable(&use_backlog_threads_key);
return 0;
}
early_param("thread_backlog_napi", setup_backlog_napi_threads);
static bool use_backlog_threads(void)
{
return static_branch_unlikely(&use_backlog_threads_key);
}
#else
static bool use_backlog_threads(void)
{
return true;
}
#endif
static inline void backlog_lock_irq_save(struct softnet_data *sd,
unsigned long *flags)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
else
local_irq_save(*flags);
}
static inline void backlog_lock_irq_disable(struct softnet_data *sd)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irq(&sd->input_pkt_queue.lock);
else
local_irq_disable();
}
static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
unsigned long *flags)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
else
local_irq_restore(*flags);
}
static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irq(&sd->input_pkt_queue.lock);
else
local_irq_enable();
}
static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
const char *name)
{
struct netdev_name_node *name_node;
name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
if (!name_node)
return NULL;
INIT_HLIST_NODE(&name_node->hlist);
name_node->dev = dev;
name_node->name = name;
return name_node;
}
static struct netdev_name_node *
netdev_name_node_head_alloc(struct net_device *dev)
{
struct netdev_name_node *name_node;
name_node = netdev_name_node_alloc(dev, dev->name);
if (!name_node)
return NULL;
INIT_LIST_HEAD(&name_node->list);
return name_node;
}
static void netdev_name_node_free(struct netdev_name_node *name_node)
{
kfree(name_node);
}
static void netdev_name_node_add(struct net *net,
struct netdev_name_node *name_node)
{ hlist_add_head_rcu(&name_node->hlist,
dev_name_hash(net, name_node->name));
}
static void netdev_name_node_del(struct netdev_name_node *name_node)
{
hlist_del_rcu(&name_node->hlist);
}
static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
const char *name)
{ struct hlist_head *head = dev_name_hash(net, name);
struct netdev_name_node *name_node;
hlist_for_each_entry(name_node, head, hlist)
if (!strcmp(name_node->name, name))
return name_node;
return NULL;
}
static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
const char *name)
{
struct hlist_head *head = dev_name_hash(net, name);
struct netdev_name_node *name_node;
hlist_for_each_entry_rcu(name_node, head, hlist)
if (!strcmp(name_node->name, name))
return name_node;
return NULL;
}
bool netdev_name_in_use(struct net *net, const char *name)
{
return netdev_name_node_lookup(net, name);
}
EXPORT_SYMBOL(netdev_name_in_use);
int netdev_name_node_alt_create(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
name_node = netdev_name_node_lookup(net, name);
if (name_node)
return -EEXIST;
name_node = netdev_name_node_alloc(dev, name);
if (!name_node)
return -ENOMEM;
netdev_name_node_add(net, name_node);
/* The node that holds dev->name acts as a head of per-device list. */
list_add_tail_rcu(&name_node->list, &dev->name_node->list);
return 0;
}
static void netdev_name_node_alt_free(struct rcu_head *head)
{
struct netdev_name_node *name_node =
container_of(head, struct netdev_name_node, rcu);
kfree(name_node->name);
netdev_name_node_free(name_node);
}
static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
{
netdev_name_node_del(name_node);
list_del(&name_node->list);
call_rcu(&name_node->rcu, netdev_name_node_alt_free);
}
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
name_node = netdev_name_node_lookup(net, name);
if (!name_node)
return -ENOENT;
/* lookup might have found our primary name or a name belonging
* to another device.
*/
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
__netdev_name_node_alt_destroy(name_node);
return 0;
}
static void netdev_name_node_alt_flush(struct net_device *dev)
{
struct netdev_name_node *name_node, *tmp;
list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
list_del(&name_node->list);
netdev_name_node_alt_free(&name_node->rcu);
}
}
/* Device list insertion */
static void list_netdevice(struct net_device *dev)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL(); list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
netdev_name_node_add(net, dev->name_node);
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
netdev_for_each_altname(dev, name_node) netdev_name_node_add(net, name_node);
/* We reserved the ifindex, this can't fail */
WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
dev_base_seq_inc(net);}
/* Device list removal
* caller must respect a RCU grace period before freeing/reusing dev
*/
static void unlist_netdevice(struct net_device *dev)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL();
xa_erase(&net->dev_by_index, dev->ifindex);
netdev_for_each_altname(dev, name_node)
netdev_name_node_del(name_node);
/* Unlink dev from the device chain */
list_del_rcu(&dev->dev_list);
netdev_name_node_del(dev->name_node);
hlist_del_rcu(&dev->index_hlist);
dev_base_seq_inc(dev_net(dev));
}
/*
* Our notifier list
*/
static RAW_NOTIFIER_HEAD(netdev_chain);
/*
* Device drivers call our routines to queue packets here. We empty the
* queue in the local softnet handler.
*/
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
.process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
};
EXPORT_PER_CPU_SYMBOL(softnet_data);
/* Page_pool has a lockless array/stack to alloc/recycle pages.
* PP consumers must pay attention to run APIs in the appropriate context
* (e.g. NAPI context).
*/
DEFINE_PER_CPU(struct page_pool_bh, system_page_pool) = {
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
};
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
* according to dev->type
*/
static const unsigned short netdev_lock_type[] = {
ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
static const char *const netdev_lock_name[] = {
"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
int i;
for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) if (netdev_lock_type[i] == dev_type)
return i;
/* the last key is used by default */
return ARRAY_SIZE(netdev_lock_type) - 1;
}
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
int i;
i = netdev_lock_pos(dev_type); lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
netdev_lock_name[i]);
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
int i;
i = netdev_lock_pos(dev->type); lockdep_set_class_and_name(&dev->addr_list_lock,
&netdev_addr_lock_key[i],
netdev_lock_name[i]);
}
#else
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
}
#endif
/*******************************************************************************
*
* Protocol management and registration routines
*
*******************************************************************************/
/*
* Add a protocol ID to the list. Now that the input handler is
* smarter we can dispense with all the messy stuff that used to be
* here.
*
* BEWARE!!! Protocol handlers, mangling input packets,
* MUST BE last in hash buckets and checking protocol handlers
* MUST start from promiscuous ptype_all chain in net_bh.
* It is true now, do not change it.
* Explanation follows: if protocol handler, mangling packet, will
* be the first on list, it is not able to sense, that packet
* is cloned and should be copied-on-write, so that it will
* change it and subsequent readers will get broken packet.
* --ANK (980803)
*/
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL)) {
if (!pt->af_packet_net && !pt->dev)
return NULL;
return pt->dev ? &pt->dev->ptype_all :
&pt->af_packet_net->ptype_all;
}
if (pt->dev)
return &pt->dev->ptype_specific;
return pt->af_packet_net ? &pt->af_packet_net->ptype_specific :
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
}
/**
* dev_add_pack - add packet handler
* @pt: packet type declaration
*
* Add a protocol handler to the networking stack. The passed &packet_type
* is linked into kernel lists and may not be freed until it has been
* removed from the kernel lists.
*
* This call does not sleep therefore it can not
* guarantee all CPU's that are in middle of receiving packets
* will see the new packet type (until the next received packet).
*/
void dev_add_pack(struct packet_type *pt)
{
struct list_head *head = ptype_head(pt);
if (WARN_ON_ONCE(!head))
return;
spin_lock(&ptype_lock);
list_add_rcu(&pt->list, head);
spin_unlock(&ptype_lock);
}
EXPORT_SYMBOL(dev_add_pack);
/**
* __dev_remove_pack - remove packet handler
* @pt: packet type declaration
*
* Remove a protocol handler that was previously added to the kernel
* protocol handlers by dev_add_pack(). The passed &packet_type is removed
* from the kernel lists and can be freed or reused once this function
* returns.
*
* The packet type might still be in use by receivers
* and must not be freed until after all the CPU's have gone
* through a quiescent state.
*/
void __dev_remove_pack(struct packet_type *pt)
{
struct list_head *head = ptype_head(pt);
struct packet_type *pt1;
if (!head)
return;
spin_lock(&ptype_lock);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
list_del_rcu(&pt->list);
goto out;
}
}
pr_warn("dev_remove_pack: %p not found\n", pt);
out:
spin_unlock(&ptype_lock);
}
EXPORT_SYMBOL(__dev_remove_pack);
/**
* dev_remove_pack - remove packet handler
* @pt: packet type declaration
*
* Remove a protocol handler that was previously added to the kernel
* protocol handlers by dev_add_pack(). The passed &packet_type is removed
* from the kernel lists and can be freed or reused once this function
* returns.
*
* This call sleeps to guarantee that no CPU is looking at the packet
* type after return.
*/
void dev_remove_pack(struct packet_type *pt)
{
__dev_remove_pack(pt);
synchronize_net();
}
EXPORT_SYMBOL(dev_remove_pack);
/*******************************************************************************
*
* Device Interface Subroutines
*
*******************************************************************************/
/**
* dev_get_iflink - get 'iflink' value of a interface
* @dev: targeted interface
*
* Indicates the ifindex the interface is linked to.
* Physical interfaces have the same 'ifindex' and 'iflink' values.
*/
int dev_get_iflink(const struct net_device *dev)
{
if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) return dev->netdev_ops->ndo_get_iflink(dev); return READ_ONCE(dev->ifindex);}
EXPORT_SYMBOL(dev_get_iflink);
/**
* dev_fill_metadata_dst - Retrieve tunnel egress information.
* @dev: targeted interface
* @skb: The packet.
*
* For better visibility of tunnel traffic OVS needs to retrieve
* egress tunnel information for a packet. Following API allows
* user to get this info.
*/
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info;
if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
return -EINVAL;
info = skb_tunnel_info_unclone(skb);
if (!info)
return -ENOMEM;
if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
return -EINVAL;
return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
}
EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
{
int k = stack->num_paths++;
if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
return NULL;
return &stack->path[k];
}
int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
struct net_device_path_stack *stack)
{
const struct net_device *last_dev;
struct net_device_path_ctx ctx = {
.dev = dev,
};
struct net_device_path *path;
int ret = 0;
memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
stack->num_paths = 0;
while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
last_dev = ctx.dev;
path = dev_fwd_path(stack);
if (!path)
return -1;
memset(path, 0, sizeof(struct net_device_path));
ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
if (ret < 0)
return -1;
if (WARN_ON_ONCE(last_dev == ctx.dev))
return -1;
}
if (!ctx.dev)
return ret;
path = dev_fwd_path(stack);
if (!path)
return -1;
path->type = DEV_PATH_ETHERNET;
path->dev = ctx.dev;
return ret;
}
EXPORT_SYMBOL_GPL(dev_fill_forward_path);
/* must be called under rcu_read_lock(), as we dont take a reference */
static struct napi_struct *napi_by_id(unsigned int napi_id)
{
unsigned int hash = napi_id % HASH_SIZE(napi_hash);
struct napi_struct *napi;
hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
if (napi->napi_id == napi_id)
return napi;
return NULL;
}
/* must be called under rcu_read_lock(), as we dont take a reference */
static struct napi_struct *
netdev_napi_by_id(struct net *net, unsigned int napi_id)
{
struct napi_struct *napi;
napi = napi_by_id(napi_id);
if (!napi)
return NULL;
if (WARN_ON_ONCE(!napi->dev))
return NULL;
if (!net_eq(net, dev_net(napi->dev)))
return NULL;
return napi;
}
/**
* netdev_napi_by_id_lock() - find a device by NAPI ID and lock it
* @net: the applicable net namespace
* @napi_id: ID of a NAPI of a target device
*
* Find a NAPI instance with @napi_id. Lock its device.
* The device must be in %NETREG_REGISTERED state for lookup to succeed.
* netdev_unlock() must be called to release it.
*
* Return: pointer to NAPI, its device with lock held, NULL if not found.
*/
struct napi_struct *
netdev_napi_by_id_lock(struct net *net, unsigned int napi_id)
{
struct napi_struct *napi;
struct net_device *dev;
rcu_read_lock();
napi = netdev_napi_by_id(net, napi_id);
if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) {
rcu_read_unlock();
return NULL;
}
dev = napi->dev;
dev_hold(dev);
rcu_read_unlock();
dev = __netdev_put_lock(dev, net);
if (!dev)
return NULL;
rcu_read_lock();
napi = netdev_napi_by_id(net, napi_id);
if (napi && napi->dev != dev)
napi = NULL;
rcu_read_unlock();
if (!napi)
netdev_unlock(dev);
return napi;
}
/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
*
* Find an interface by name. Must be called under RTNL semaphore.
* If the name is found a pointer to the device is returned.
* If the name is not found then %NULL is returned. The
* reference counters are not incremented so the caller must be
* careful with locks.
*/
struct net_device *__dev_get_by_name(struct net *net, const char *name)
{
struct netdev_name_node *node_name;
node_name = netdev_name_node_lookup(net, name);
return node_name ? node_name->dev : NULL;
}
EXPORT_SYMBOL(__dev_get_by_name);
/**
* dev_get_by_name_rcu - find a device by its name
* @net: the applicable net namespace
* @name: name to find
*
* Find an interface by name.
* If the name is found a pointer to the device is returned.
* If the name is not found then %NULL is returned.
* The reference counters are not incremented so the caller must be
* careful with locks. The caller must hold RCU lock.
*/
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{
struct netdev_name_node *node_name;
node_name = netdev_name_node_lookup_rcu(net, name);
return node_name ? node_name->dev : NULL;
}
EXPORT_SYMBOL(dev_get_by_name_rcu);
/* Deprecated for new users, call netdev_get_by_name() instead */
struct net_device *dev_get_by_name(struct net *net, const char *name)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
dev_hold(dev);
rcu_read_unlock();
return dev;
}
EXPORT_SYMBOL(dev_get_by_name);
/**
* netdev_get_by_name() - find a device by its name
* @net: the applicable net namespace
* @name: name to find
* @tracker: tracking object for the acquired reference
* @gfp: allocation flags for the tracker
*
* Find an interface by name. This can be called from any
* context and does its own locking. The returned handle has
* the usage count incremented and the caller must use netdev_put() to
* release it when it is no longer needed. %NULL is returned if no
* matching device is found.
*/
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp)
{
struct net_device *dev;
dev = dev_get_by_name(net, name);
if (dev)
netdev_tracker_alloc(dev, tracker, gfp);
return dev;
}
EXPORT_SYMBOL(netdev_get_by_name);
/**
* __dev_get_by_index - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
*
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
* about locking. The caller must hold the RTNL semaphore.
*/
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
{
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
hlist_for_each_entry(dev, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
return NULL;
}
EXPORT_SYMBOL(__dev_get_by_index);
/**
* dev_get_by_index_rcu - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
*
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
* about locking. The caller must hold RCU lock.
*/
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
hlist_for_each_entry_rcu(dev, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_get_by_index_rcu);
/* Deprecated for new users, call netdev_get_by_index() instead */
struct net_device *dev_get_by_index(struct net *net, int ifindex)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
dev_hold(dev);
rcu_read_unlock();
return dev;
}
EXPORT_SYMBOL(dev_get_by_index);
/**
* netdev_get_by_index() - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
* @tracker: tracking object for the acquired reference
* @gfp: allocation flags for the tracker
*
* Search for an interface by index. Returns NULL if the device
* is not found or a pointer to the device. The device returned has
* had a reference added and the pointer is safe until the user calls
* netdev_put() to indicate they have finished with it.
*/
struct net_device *netdev_get_by_index(struct net *net, int ifindex,
netdevice_tracker *tracker, gfp_t gfp)
{
struct net_device *dev;
dev = dev_get_by_index(net, ifindex);
if (dev)
netdev_tracker_alloc(dev, tracker, gfp);
return dev;
}
EXPORT_SYMBOL(netdev_get_by_index);
/**
* dev_get_by_napi_id - find a device by napi_id
* @napi_id: ID of the NAPI struct
*
* Search for an interface by NAPI ID. Returns %NULL if the device
* is not found or a pointer to the device. The device has not had
* its reference counter increased so the caller must be careful
* about locking. The caller must hold RCU lock.
*/
struct net_device *dev_get_by_napi_id(unsigned int napi_id)
{
struct napi_struct *napi;
WARN_ON_ONCE(!rcu_read_lock_held());
if (!napi_id_valid(napi_id))
return NULL;
napi = napi_by_id(napi_id);
return napi ? napi->dev : NULL;
}
/* Release the held reference on the net_device, and if the net_device
* is still registered try to lock the instance lock. If device is being
* unregistered NULL will be returned (but the reference has been released,
* either way!)
*
* This helper is intended for locking net_device after it has been looked up
* using a lockless lookup helper. Lock prevents the instance from going away.
*/
struct net_device *__netdev_put_lock(struct net_device *dev, struct net *net)
{
netdev_lock(dev);
if (dev->reg_state > NETREG_REGISTERED ||
dev->moving_ns || !net_eq(dev_net(dev), net)) {
netdev_unlock(dev);
dev_put(dev);
return NULL;
}
dev_put(dev);
return dev;
}
static struct net_device *
__netdev_put_lock_ops_compat(struct net_device *dev, struct net *net)
{
netdev_lock_ops_compat(dev);
if (dev->reg_state > NETREG_REGISTERED ||
dev->moving_ns || !net_eq(dev_net(dev), net)) {
netdev_unlock_ops_compat(dev);
dev_put(dev);
return NULL;
}
dev_put(dev);
return dev;
}
/**
* netdev_get_by_index_lock() - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
*
* Search for an interface by index. If a valid device
* with @ifindex is found it will be returned with netdev->lock held.
* netdev_unlock() must be called to release it.
*
* Return: pointer to a device with lock held, NULL if not found.
*/
struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex)
{
struct net_device *dev;
dev = dev_get_by_index(net, ifindex);
if (!dev)
return NULL;
return __netdev_put_lock(dev, net);
}
struct net_device *
netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex)
{
struct net_device *dev;
dev = dev_get_by_index(net, ifindex);
if (!dev)
return NULL;
return __netdev_put_lock_ops_compat(dev, net);
}
struct net_device *
netdev_xa_find_lock(struct net *net, struct net_device *dev,
unsigned long *index)
{
if (dev)
netdev_unlock(dev);
do {
rcu_read_lock();
dev = xa_find(&net->dev_by_index, index, ULONG_MAX, XA_PRESENT);
if (!dev) {
rcu_read_unlock();
return NULL;
}
dev_hold(dev);
rcu_read_unlock();
dev = __netdev_put_lock(dev, net);
if (dev)
return dev;
(*index)++;
} while (true);
}
struct net_device *
netdev_xa_find_lock_ops_compat(struct net *net, struct net_device *dev,
unsigned long *index)
{
if (dev)
netdev_unlock_ops_compat(dev);
do {
rcu_read_lock();
dev = xa_find(&net->dev_by_index, index, ULONG_MAX, XA_PRESENT);
if (!dev) {
rcu_read_unlock();
return NULL;
}
dev_hold(dev);
rcu_read_unlock();
dev = __netdev_put_lock_ops_compat(dev, net);
if (dev)
return dev;
(*index)++;
} while (true);
}
static DEFINE_SEQLOCK(netdev_rename_lock);
void netdev_copy_name(struct net_device *dev, char *name)
{
unsigned int seq;
do {
seq = read_seqbegin(&netdev_rename_lock); strscpy(name, dev->name, IFNAMSIZ); } while (read_seqretry(&netdev_rename_lock, seq));
}
/**
* netdev_get_name - get a netdevice name, knowing its ifindex.
* @net: network namespace
* @name: a pointer to the buffer where the name will be stored.
* @ifindex: the ifindex of the interface to get the name from.
*/
int netdev_get_name(struct net *net, char *name, int ifindex)
{
struct net_device *dev;
int ret;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
if (!dev) {
ret = -ENODEV;
goto out;
}
netdev_copy_name(dev, name);
ret = 0;
out:
rcu_read_unlock();
return ret;
}
static bool dev_addr_cmp(struct net_device *dev, unsigned short type,
const char *ha)
{
return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len);
}
/**
* dev_getbyhwaddr_rcu - find a device by its hardware address
* @net: the applicable net namespace
* @type: media type of device
* @ha: hardware address
*
* Search for an interface by MAC address. Returns NULL if the device
* is not found or a pointer to the device.
* The caller must hold RCU.
* The returned device has not had its ref count increased
* and the caller must therefore be careful about locking
*
*/
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *ha)
{
struct net_device *dev;
for_each_netdev_rcu(net, dev)
if (dev_addr_cmp(dev, type, ha))
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
/**
* dev_getbyhwaddr() - find a device by its hardware address
* @net: the applicable net namespace
* @type: media type of device
* @ha: hardware address
*
* Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold
* rtnl_lock.
*
* Context: rtnl_lock() must be held.
* Return: pointer to the net_device, or NULL if not found
*/
struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
const char *ha)
{
struct net_device *dev;
ASSERT_RTNL();
for_each_netdev(net, dev)
if (dev_addr_cmp(dev, type, ha))
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_getbyhwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
struct net_device *dev, *ret = NULL;
rcu_read_lock();
for_each_netdev_rcu(net, dev)
if (dev->type == type) {
dev_hold(dev);
ret = dev;
break;
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
* netdev_get_by_flags_rcu - find any device with given flags
* @net: the applicable net namespace
* @tracker: tracking object for the acquired reference
* @if_flags: IFF_* values
* @mask: bitmask of bits in if_flags to check
*
* Search for any interface with the given flags.
*
* Context: rcu_read_lock() must be held.
* Returns: NULL if a device is not found or a pointer to the device.
*/
struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
unsigned short if_flags, unsigned short mask)
{
struct net_device *dev;
for_each_netdev_rcu(net, dev) {
if (((READ_ONCE(dev->flags) ^ if_flags) & mask) == 0) {
netdev_hold(dev, tracker, GFP_ATOMIC);
return dev;
}
}
return NULL;
}
EXPORT_IPV6_MOD(netdev_get_by_flags_rcu);
/**
* dev_valid_name - check if name is okay for network device
* @name: name string
*
* Network device names need to be valid file names to
* allow sysfs to work. We also disallow any kind of
* whitespace.
*/
bool dev_valid_name(const char *name)
{
if (*name == '\0') return false; if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
return false;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return false;
while (*name) { if (*name == '/' || *name == ':' || isspace(*name))
return false;
name++;
}
return true;}
EXPORT_SYMBOL(dev_valid_name);
/**
* __dev_alloc_name - allocate a name for a device
* @net: network namespace to allocate the device name in
* @name: name format string
* @res: result name string
*
* Passed a format string - eg "lt%d" it will try and find a suitable
* id. It scans list of devices to build up a free map, then chooses
* the first empty slot. The caller must hold the dev_base or rtnl lock
* while allocating the name and adding the device in order to avoid
* duplicates.
* Limited to bits_per_byte * page size devices (ie 32K on most platforms).
* Returns the number of the unit assigned or a negative errno code.
*/
static int __dev_alloc_name(struct net *net, const char *name, char *res)
{
int i = 0;
const char *p;
const int max_netdevices = 8*PAGE_SIZE;
unsigned long *inuse;
struct net_device *d;
char buf[IFNAMSIZ];
/* Verify the string as this thing may have come from the user.
* There must be one "%d" and no other "%" characters.
*/
p = strchr(name, '%');
if (!p || p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
/* Use one page as a bit array of possible slots */
inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
if (!inuse)
return -ENOMEM; for_each_netdev(net, d) {
struct netdev_name_node *name_node;
netdev_for_each_altname(d, name_node) { if (!sscanf(name_node->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
continue;
/* avoid cases where sscanf is not exact inverse of printf */
snprintf(buf, IFNAMSIZ, name, i);
if (!strncmp(buf, name_node->name, IFNAMSIZ))
__set_bit(i, inuse);
}
if (!sscanf(d->name, name, &i)) continue; if (i < 0 || i >= max_netdevices)
continue;
/* avoid cases where sscanf is not exact inverse of printf */
snprintf(buf, IFNAMSIZ, name, i);
if (!strncmp(buf, d->name, IFNAMSIZ))
__set_bit(i, inuse);
}
i = find_first_zero_bit(inuse, max_netdevices);
bitmap_free(inuse);
if (i == max_netdevices)
return -ENFILE;
/* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
strscpy(buf, name, IFNAMSIZ);
snprintf(res, IFNAMSIZ, buf, i);
return i;
}
/* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
static int dev_prep_valid_name(struct net *net, struct net_device *dev,
const char *want_name, char *out_name,
int dup_errno)
{
if (!dev_valid_name(want_name))
return -EINVAL;
if (strchr(want_name, '%')) return __dev_alloc_name(net, want_name, out_name);
if (netdev_name_in_use(net, want_name))
return -dup_errno; if (out_name != want_name) strscpy(out_name, want_name, IFNAMSIZ); return 0;
}
/**
* dev_alloc_name - allocate a name for a device
* @dev: device
* @name: name format string
*
* Passed a format string - eg "lt%d" it will try and find a suitable
* id. It scans list of devices to build up a free map, then chooses
* the first empty slot. The caller must hold the dev_base or rtnl lock
* while allocating the name and adding the device in order to avoid
* duplicates.
* Limited to bits_per_byte * page size devices (ie 32K on most platforms).
* Returns the number of the unit assigned or a negative errno code.
*/
int dev_alloc_name(struct net_device *dev, const char *name)
{
return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE);
}
EXPORT_SYMBOL(dev_alloc_name);
static int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
int ret;
ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST);
return ret < 0 ? ret : 0;
}
int netif_change_name(struct net_device *dev, const char *newname)
{
struct net *net = dev_net(dev);
unsigned char old_assign_type;
char oldname[IFNAMSIZ];
int err = 0;
int ret;
ASSERT_RTNL_NET(net);
if (!strncmp(newname, dev->name, IFNAMSIZ))
return 0;
memcpy(oldname, dev->name, IFNAMSIZ);
write_seqlock_bh(&netdev_rename_lock);
err = dev_get_valid_name(net, dev, newname);
write_sequnlock_bh(&netdev_rename_lock);
if (err < 0)
return err;
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s%s\n", oldname,
dev->flags & IFF_UP ? " (while UP)" : "");
old_assign_type = dev->name_assign_type;
WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
write_seqlock_bh(&netdev_rename_lock);
memcpy(dev->name, oldname, IFNAMSIZ);
write_sequnlock_bh(&netdev_rename_lock);
WRITE_ONCE(dev->name_assign_type, old_assign_type);
return ret;
}
netdev_adjacent_rename_links(dev, oldname);
netdev_name_node_del(dev->name_node);
synchronize_net();
netdev_name_node_add(net, dev->name_node);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
if (ret) {
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
write_seqlock_bh(&netdev_rename_lock);
memcpy(dev->name, oldname, IFNAMSIZ);
write_sequnlock_bh(&netdev_rename_lock);
memcpy(oldname, newname, IFNAMSIZ);
WRITE_ONCE(dev->name_assign_type, old_assign_type);
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
netdev_err(dev, "name change rollback failed: %d\n",
ret);
}
}
return err;
}
int netif_set_alias(struct net_device *dev, const char *alias, size_t len)
{
struct dev_ifalias *new_alias = NULL;
if (len >= IFALIASZ)
return -EINVAL;
if (len) {
new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
if (!new_alias)
return -ENOMEM;
memcpy(new_alias->ifalias, alias, len);
new_alias->ifalias[len] = 0;
}
mutex_lock(&ifalias_mutex);
new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
mutex_is_locked(&ifalias_mutex));
mutex_unlock(&ifalias_mutex);
if (new_alias)
kfree_rcu(new_alias, rcuhead);
return len;
}
/**
* dev_get_alias - get ifalias of a device
* @dev: device
* @name: buffer to store name of ifalias
* @len: size of buffer
*
* get ifalias for a device. Caller must make sure dev cannot go
* away, e.g. rcu read lock or own a reference count to device.
*/
int dev_get_alias(const struct net_device *dev, char *name, size_t len)
{
const struct dev_ifalias *alias;
int ret = 0; rcu_read_lock(); alias = rcu_dereference(dev->ifalias); if (alias)
ret = snprintf(name, len, "%s", alias->ifalias);
rcu_read_unlock();
return ret;
}
/**
* netdev_features_change - device changes features
* @dev: device to cause notification
*
* Called to indicate a device has changed features.
*/
void netdev_features_change(struct net_device *dev)
{
call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
}
EXPORT_SYMBOL(netdev_features_change);
void netif_state_change(struct net_device *dev)
{
netdev_ops_assert_locked_or_invisible(dev);
if (dev->flags & IFF_UP) {
struct netdev_notifier_change_info change_info = {
.info.dev = dev,
};
call_netdevice_notifiers_info(NETDEV_CHANGE,
&change_info.info);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
}
}
/**
* __netdev_notify_peers - notify network peers about existence of @dev,
* to be called when rtnl lock is already held.
* @dev: network device
*
* Generate traffic such that interested network peers are aware of
* @dev, such as by generating a gratuitous ARP. This may be used when
* a device wants to inform the rest of the network about some sort of
* reconfiguration such as a failover event or virtual machine
* migration.
*/
void __netdev_notify_peers(struct net_device *dev)
{
ASSERT_RTNL();
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
}
EXPORT_SYMBOL(__netdev_notify_peers);
/**
* netdev_notify_peers - notify network peers about existence of @dev
* @dev: network device
*
* Generate traffic such that interested network peers are aware of
* @dev, such as by generating a gratuitous ARP. This may be used when
* a device wants to inform the rest of the network about some sort of
* reconfiguration such as a failover event or virtual machine
* migration.
*/
void netdev_notify_peers(struct net_device *dev)
{
rtnl_lock();
__netdev_notify_peers(dev);
rtnl_unlock();
}
EXPORT_SYMBOL(netdev_notify_peers);
static int napi_threaded_poll(void *data);
static int napi_kthread_create(struct napi_struct *n)
{
int err = 0;
/* Create and wake up the kthread once to put it in
* TASK_INTERRUPTIBLE mode to avoid the blocked task
* warning and work with loadavg.
*/
n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
n->dev->name, n->napi_id);
if (IS_ERR(n->thread)) {
err = PTR_ERR(n->thread);
pr_err("kthread_run failed with err %d\n", err);
n->thread = NULL;
}
return err;
}
static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
int ret;
ASSERT_RTNL();
dev_addr_check(dev);
if (!netif_device_present(dev)) {
/* may be detached because parent is runtime-suspended */
if (dev->dev.parent)
pm_runtime_resume(dev->dev.parent);
if (!netif_device_present(dev))
return -ENODEV;
}
/* Block netpoll from trying to do any rx path servicing.
* If we don't do this there is a chance ndo_poll_controller
* or ndo_poll may be running while we open the device
*/
netpoll_poll_disable(dev);
ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
ret = notifier_to_errno(ret);
if (ret)
return ret;
set_bit(__LINK_STATE_START, &dev->state);
netdev_ops_assert_locked(dev);
if (ops->ndo_validate_addr)
ret = ops->ndo_validate_addr(dev);
if (!ret && ops->ndo_open)
ret = ops->ndo_open(dev);
netpoll_poll_enable(dev);
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
else {
netif_set_up(dev, true);
dev_set_rx_mode(dev);
dev_activate(dev);
add_device_randomness(dev->dev_addr, dev->addr_len);
}
return ret;
}
int netif_open(struct net_device *dev, struct netlink_ext_ack *extack)
{
int ret;
if (dev->flags & IFF_UP)
return 0;
ret = __dev_open(dev, extack);
if (ret < 0)
return ret;
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
call_netdevice_notifiers(NETDEV_UP, dev);
return ret;
}
static void __dev_close_many(struct list_head *head)
{
struct net_device *dev;
ASSERT_RTNL();
might_sleep();
list_for_each_entry(dev, head, close_list) {
/* Temporarily disable netpoll until the interface is down */
netpoll_poll_disable(dev);
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
clear_bit(__LINK_STATE_START, &dev->state);
/* Synchronize to scheduled poll. We cannot touch poll list, it
* can be even on different cpu. So just clear netif_running().
*
* dev->stop() will invoke napi_disable() on all of it's
* napi_struct instances on this device.
*/
smp_mb__after_atomic(); /* Commit netif_running(). */
}
dev_deactivate_many(head);
list_for_each_entry(dev, head, close_list) {
const struct net_device_ops *ops = dev->netdev_ops;
/*
* Call the device specific close. This cannot fail.
* Only if device is UP
*
* We allow it to be called even after a DETACH hot-plug
* event.
*/
netdev_ops_assert_locked(dev);
if (ops->ndo_stop)
ops->ndo_stop(dev);
netif_set_up(dev, false);
netpoll_poll_enable(dev);
}
}
static void __dev_close(struct net_device *dev)
{
LIST_HEAD(single);
list_add(&dev->close_list, &single);
__dev_close_many(&single);
list_del(&single);
}
void netif_close_many(struct list_head *head, bool unlink)
{
struct net_device *dev, *tmp;
/* Remove the devices that don't need to be closed */
list_for_each_entry_safe(dev, tmp, head, close_list)
if (!(dev->flags & IFF_UP))
list_del_init(&dev->close_list);
__dev_close_many(head);
list_for_each_entry_safe(dev, tmp, head, close_list) {
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
call_netdevice_notifiers(NETDEV_DOWN, dev);
if (unlink)
list_del_init(&dev->close_list);
}
}
EXPORT_SYMBOL_NS_GPL(netif_close_many, "NETDEV_INTERNAL");
void netif_close(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
LIST_HEAD(single);
list_add(&dev->close_list, &single);
netif_close_many(&single, true);
list_del(&single);
}
}
EXPORT_SYMBOL(netif_close);
void netif_disable_lro(struct net_device *dev)
{
struct net_device *lower_dev;
struct list_head *iter;
dev->wanted_features &= ~NETIF_F_LRO;
netdev_update_features(dev);
if (unlikely(dev->features & NETIF_F_LRO))
netdev_WARN(dev, "failed to disable LRO!\n");
netdev_for_each_lower_dev(dev, lower_dev, iter) {
netdev_lock_ops(lower_dev);
netif_disable_lro(lower_dev);
netdev_unlock_ops(lower_dev);
}
}
EXPORT_IPV6_MOD(netif_disable_lro);
/**
* dev_disable_gro_hw - disable HW Generic Receive Offload on a device
* @dev: device
*
* Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
* called under RTNL. This is needed if Generic XDP is installed on
* the device.
*/
static void dev_disable_gro_hw(struct net_device *dev)
{
dev->wanted_features &= ~NETIF_F_GRO_HW;
netdev_update_features(dev);
if (unlikely(dev->features & NETIF_F_GRO_HW))
netdev_WARN(dev, "failed to disable GRO_HW!\n");
}
const char *netdev_cmd_to_name(enum netdev_cmd cmd)
{
#define N(val) \
case NETDEV_##val: \
return "NETDEV_" __stringify(val);
switch (cmd) {
N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
N(XDP_FEAT_CHANGE)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
}
EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
struct net_device *dev)
{
struct netdev_notifier_info info = {
.dev = dev,
};
return nb->notifier_call(nb, val, &info);
}
static int call_netdevice_register_notifiers(struct notifier_block *nb,
struct net_device *dev)
{
int err;
err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
err = notifier_to_errno(err);
if (err)
return err;
if (!(dev->flags & IFF_UP))
return 0;
call_netdevice_notifier(nb, NETDEV_UP, dev);
return 0;
}
static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
struct net_device *dev)
{
if (dev->flags & IFF_UP) {
call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
dev);
call_netdevice_notifier(nb, NETDEV_DOWN, dev);
}
call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
}
static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
struct net *net)
{
struct net_device *dev;
int err;
for_each_netdev(net, dev) {
netdev_lock_ops(dev);
err = call_netdevice_register_notifiers(nb, dev);
netdev_unlock_ops(dev);
if (err)
goto rollback;
}
return 0;
rollback:
for_each_netdev_continue_reverse(net, dev)
call_netdevice_unregister_notifiers(nb, dev);
return err;
}
static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
struct net *net)
{
struct net_device *dev;
for_each_netdev(net, dev)
call_netdevice_unregister_notifiers(nb, dev);
}
static int dev_boot_phase = 1;
/**
* register_netdevice_notifier - register a network notifier block
* @nb: notifier
*
* Register a notifier to be called when network device events occur.
* The notifier passed is linked into the kernel structures and must
* not be reused until it has been unregistered. A negative errno code
* is returned on a failure.
*
* When registered all registration and up events are replayed
* to the new notifier to allow device to have a race free
* view of the network device list.
*/
int register_netdevice_notifier(struct notifier_block *nb)
{
struct net *net;
int err;
/* Close race with setup_net() and cleanup_net() */
down_write(&pernet_ops_rwsem);
/* When RTNL is removed, we need protection for netdev_chain. */
rtnl_lock();
err = raw_notifier_chain_register(&netdev_chain, nb);
if (err)
goto unlock;
if (dev_boot_phase)
goto unlock;
for_each_net(net) {
__rtnl_net_lock(net);
err = call_netdevice_register_net_notifiers(nb, net);
__rtnl_net_unlock(net);
if (err)
goto rollback;
}
unlock:
rtnl_unlock();
up_write(&pernet_ops_rwsem);
return err;
rollback:
for_each_net_continue_reverse(net) {
__rtnl_net_lock(net);
call_netdevice_unregister_net_notifiers(nb, net);
__rtnl_net_unlock(net);
}
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
EXPORT_SYMBOL(register_netdevice_notifier);
/**
* unregister_netdevice_notifier - unregister a network notifier block
* @nb: notifier
*
* Unregister a notifier previously registered by
* register_netdevice_notifier(). The notifier is unlinked into the
* kernel structures and may then be reused. A negative errno code
* is returned on a failure.
*
* After unregistering unregister and down device events are synthesized
* for all devices on the device list to the removed notifier to remove
* the need for special case cleanup code.
*/
int unregister_netdevice_notifier(struct notifier_block *nb)
{
struct net *net;
int err;
/* Close race with setup_net() and cleanup_net() */
down_write(&pernet_ops_rwsem);
rtnl_lock();
err = raw_notifier_chain_unregister(&netdev_chain, nb);
if (err)
goto unlock;
for_each_net(net) {
__rtnl_net_lock(net);
call_netdevice_unregister_net_notifiers(nb, net);
__rtnl_net_unlock(net);
}
unlock:
rtnl_unlock();
up_write(&pernet_ops_rwsem);
return err;
}
EXPORT_SYMBOL(unregister_netdevice_notifier);
static int __register_netdevice_notifier_net(struct net *net,
struct notifier_block *nb,
bool ignore_call_fail)
{
int err;
err = raw_notifier_chain_register(&net->netdev_chain, nb);
if (err)
return err;
if (dev_boot_phase)
return 0;
err = call_netdevice_register_net_notifiers(nb, net);
if (err && !ignore_call_fail)
goto chain_unregister;
return 0;
chain_unregister:
raw_notifier_chain_unregister(&net->netdev_chain, nb);
return err;
}
static int __unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb)
{
int err;
err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
if (err)
return err;
call_netdevice_unregister_net_notifiers(nb, net);
return 0;
}
/**
* register_netdevice_notifier_net - register a per-netns network notifier block
* @net: network namespace
* @nb: notifier
*
* Register a notifier to be called when network device events occur.
* The notifier passed is linked into the kernel structures and must
* not be reused until it has been unregistered. A negative errno code
* is returned on a failure.
*
* When registered all registration and up events are replayed
* to the new notifier to allow device to have a race free
* view of the network device list.
*/
int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
{
int err;
rtnl_net_lock(net);
err = __register_netdevice_notifier_net(net, nb, false);
rtnl_net_unlock(net);
return err;
}
EXPORT_SYMBOL(register_netdevice_notifier_net);
/**
* unregister_netdevice_notifier_net - unregister a per-netns
* network notifier block
* @net: network namespace
* @nb: notifier
*
* Unregister a notifier previously registered by
* register_netdevice_notifier_net(). The notifier is unlinked from the
* kernel structures and may then be reused. A negative errno code
* is returned on a failure.
*
* After unregistering unregister and down device events are synthesized
* for all devices on the device list to the removed notifier to remove
* the need for special case cleanup code.
*/
int unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb)
{
int err;
rtnl_net_lock(net);
err = __unregister_netdevice_notifier_net(net, nb);
rtnl_net_unlock(net);
return err;
}
EXPORT_SYMBOL(unregister_netdevice_notifier_net);
static void __move_netdevice_notifier_net(struct net *src_net,
struct net *dst_net,
struct notifier_block *nb)
{
__unregister_netdevice_notifier_net(src_net, nb);
__register_netdevice_notifier_net(dst_net, nb, true);
}
static void rtnl_net_dev_lock(struct net_device *dev)
{
bool again;
do {
struct net *net;
again = false;
/* netns might be being dismantled. */
rcu_read_lock();
net = dev_net_rcu(dev);
net_passive_inc(net);
rcu_read_unlock();
rtnl_net_lock(net);
#ifdef CONFIG_NET_NS
/* dev might have been moved to another netns. */
if (!net_eq(net, rcu_access_pointer(dev->nd_net.net))) {
rtnl_net_unlock(net);
net_passive_dec(net);
again = true;
}
#endif
} while (again);
}
static void rtnl_net_dev_unlock(struct net_device *dev)
{
struct net *net = dev_net(dev);
rtnl_net_unlock(net);
net_passive_dec(net);
}
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
{
int err;
rtnl_net_dev_lock(dev);
err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
if (!err) {
nn->nb = nb;
list_add(&nn->list, &dev->net_notifier_list);
}
rtnl_net_dev_unlock(dev);
return err;
}
EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
int unregister_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
{
int err;
rtnl_net_dev_lock(dev);
list_del(&nn->list);
err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
rtnl_net_dev_unlock(dev);
return err;
}
EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
static void move_netdevice_notifiers_dev_net(struct net_device *dev,
struct net *net)
{
struct netdev_net_notifier *nn;
list_for_each_entry(nn, &dev->net_notifier_list, list)
__move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
}
/**
* call_netdevice_notifiers_info - call all network notifier blocks
* @val: value passed unmodified to notifier function
* @info: notifier information data
*
* Call all network notifier blocks. Parameters and return value
* are as for raw_notifier_call_chain().
*/
int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info)
{
struct net *net = dev_net(info->dev);
int ret;
ASSERT_RTNL();
/* Run per-netns notifier block chain first, then run the global one.
* Hopefully, one day, the global one is going to be removed after
* all notifier block registrators get converted to be per-netns.
*/
ret = raw_notifier_call_chain(&net->netdev_chain, val, info); if (ret & NOTIFY_STOP_MASK)
return ret;
return raw_notifier_call_chain(&netdev_chain, val, info);}
/**
* call_netdevice_notifiers_info_robust - call per-netns notifier blocks
* for and rollback on error
* @val_up: value passed unmodified to notifier function
* @val_down: value passed unmodified to the notifier function when
* recovering from an error on @val_up
* @info: notifier information data
*
* Call all per-netns network notifier blocks, but not notifier blocks on
* the global notifier chain. Parameters and return value are as for
* raw_notifier_call_chain_robust().
*/
static int
call_netdevice_notifiers_info_robust(unsigned long val_up,
unsigned long val_down,
struct netdev_notifier_info *info)
{
struct net *net = dev_net(info->dev);
ASSERT_RTNL();
return raw_notifier_call_chain_robust(&net->netdev_chain,
val_up, val_down, info);
}
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_info info = {
.dev = dev,
.extack = extack,
};
return call_netdevice_notifiers_info(val, &info);
}
/**
* call_netdevice_notifiers - call all network notifier blocks
* @val: value passed unmodified to notifier function
* @dev: net_device pointer passed unmodified to notifier function
*
* Call all network notifier blocks. Parameters and return value
* are as for raw_notifier_call_chain().
*/
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{
return call_netdevice_notifiers_extack(val, dev, NULL);
}
EXPORT_SYMBOL(call_netdevice_notifiers);
/**
* call_netdevice_notifiers_mtu - call all network notifier blocks
* @val: value passed unmodified to notifier function
* @dev: net_device pointer passed unmodified to notifier function
* @arg: additional u32 argument passed to the notifier function
*
* Call all network notifier blocks. Parameters and return value
* are as for raw_notifier_call_chain().
*/
static int call_netdevice_notifiers_mtu(unsigned long val,
struct net_device *dev, u32 arg)
{
struct netdev_notifier_info_ext info = {
.info.dev = dev,
.ext.mtu = arg,
};
BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
return call_netdevice_notifiers_info(val, &info.info);
}
#ifdef CONFIG_NET_INGRESS
static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
void net_inc_ingress_queue(void)
{
static_branch_inc(&ingress_needed_key);
}
EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
void net_dec_ingress_queue(void)
{
static_branch_dec(&ingress_needed_key);
}
EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
#endif
#ifdef CONFIG_NET_EGRESS
static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
void net_inc_egress_queue(void)
{
static_branch_inc(&egress_needed_key);
}
EXPORT_SYMBOL_GPL(net_inc_egress_queue);
void net_dec_egress_queue(void)
{
static_branch_dec(&egress_needed_key);
}
EXPORT_SYMBOL_GPL(net_dec_egress_queue);
#endif
#ifdef CONFIG_NET_CLS_ACT
DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
EXPORT_SYMBOL(tcf_sw_enabled_key);
#endif
DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
EXPORT_SYMBOL(netstamp_needed_key);
#ifdef CONFIG_JUMP_LABEL
static atomic_t netstamp_needed_deferred;
static atomic_t netstamp_wanted;
static void netstamp_clear(struct work_struct *work)
{
int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
int wanted;
wanted = atomic_add_return(deferred, &netstamp_wanted);
if (wanted > 0)
static_branch_enable(&netstamp_needed_key);
else
static_branch_disable(&netstamp_needed_key);
}
static DECLARE_WORK(netstamp_work, netstamp_clear);
#endif
void net_enable_timestamp(void)
{
#ifdef CONFIG_JUMP_LABEL
int wanted = atomic_read(&netstamp_wanted);
while (wanted > 0) {
if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
return;
}
atomic_inc(&netstamp_needed_deferred);
schedule_work(&netstamp_work);
#else
static_branch_inc(&netstamp_needed_key);
#endif
}
EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
#ifdef CONFIG_JUMP_LABEL
int wanted = atomic_read(&netstamp_wanted);
while (wanted > 1) {
if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
return;
}
atomic_dec(&netstamp_needed_deferred);
schedule_work(&netstamp_work);
#else
static_branch_dec(&netstamp_needed_key);
#endif
}
EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb)
{
skb->tstamp = 0;
skb->tstamp_type = SKB_CLOCK_REALTIME;
if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real();
}
#define net_timestamp_check(COND, SKB) \
if (static_branch_unlikely(&netstamp_needed_key)) { \
if ((COND) && !(SKB)->tstamp) \
(SKB)->tstamp = ktime_get_real(); \
} \
bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
{
return __is_skb_forwardable(dev, skb, true);
}
EXPORT_SYMBOL_GPL(is_skb_forwardable);
static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
bool check_mtu)
{
int ret = ____dev_forward_skb(dev, skb, check_mtu);
if (likely(!ret)) {
skb->protocol = eth_type_trans(skb, dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
return ret;
}
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
return __dev_forward_skb2(dev, skb, true);
}
EXPORT_SYMBOL_GPL(__dev_forward_skb);
/**
* dev_forward_skb - loopback an skb to another netif
*
* @dev: destination network device
* @skb: buffer to forward
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped, but freed)
*
* dev_forward_skb can be used for injecting an skb from the
* start_xmit function of one device into the receive queue
* of another device.
*
* The receiving device may be in another namespace, so
* we have to clear all information in the skb that could
* impact namespace isolation.
*/
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
{
return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
}
static inline int deliver_skb(struct sk_buff *skb,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
return -ENOMEM;
refcount_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
static inline void deliver_ptype_list_skb(struct sk_buff *skb,
struct packet_type **pt,
struct net_device *orig_dev,
__be16 type,
struct list_head *ptype_list)
{
struct packet_type *ptype, *pt_prev = *pt;
list_for_each_entry_rcu(ptype, ptype_list, list) {
if (ptype->type != type)
continue;
if (pt_prev)
deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
*pt = pt_prev;
}
static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
{
if (!ptype->af_packet_priv || !skb->sk)
return false;
if (ptype->id_match)
return ptype->id_match(ptype, skb->sk);
else if ((struct sock *)ptype->af_packet_priv == skb->sk)
return true;
return false;
}
/**
* dev_nit_active_rcu - return true if any network interface taps are in use
*
* The caller must hold the RCU lock
*
* @dev: network device to check for the presence of taps
*/
bool dev_nit_active_rcu(const struct net_device *dev)
{
/* Callers may hold either RCU or RCU BH lock */
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); return !list_empty(&dev_net(dev)->ptype_all) ||
!list_empty(&dev->ptype_all);
}
EXPORT_SYMBOL_GPL(dev_nit_active_rcu);
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
*/
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype, *pt_prev = NULL;
struct list_head *ptype_list;
struct sk_buff *skb2 = NULL;
rcu_read_lock();
ptype_list = &dev_net_rcu(dev)->ptype_all;
again:
list_for_each_entry_rcu(ptype, ptype_list, list) {
if (READ_ONCE(ptype->ignore_outgoing))
continue;
/* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org)
*/
if (skb_loop_sk(ptype, skb))
continue;
if (pt_prev) {
deliver_skb(skb2, pt_prev, skb->dev);
pt_prev = ptype;
continue;
}
/* need to clone skb, done only once */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
goto out_unlock;
net_timestamp_set(skb2);
/* skb->nh should be correctly
* set by sender, so that the second statement is
* just protection against buggy protocols.
*/
skb_reset_mac_header(skb2);
if (skb_network_header(skb2) < skb2->data ||
skb_network_header(skb2) > skb_tail_pointer(skb2)) {
net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
ntohs(skb2->protocol),
dev->name);
skb_reset_network_header(skb2);
}
skb2->transport_header = skb2->network_header;
skb2->pkt_type = PACKET_OUTGOING;
pt_prev = ptype;
}
if (ptype_list != &dev->ptype_all) {
ptype_list = &dev->ptype_all;
goto again;
}
out_unlock:
if (pt_prev) {
if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
else
kfree_skb(skb2);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
/**
* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
* @dev: Network device
* @txq: number of queues available
*
* If real_num_tx_queues is changed the tc mappings may no longer be
* valid. To resolve this verify the tc mapping remains valid and if
* not NULL the mapping. With no priorities mapping to this
* offset/count pair it will no longer be used. In the worst case TC0
* is invalid nothing can be done so disable priority mappings. If is
* expected that drivers will fix this mapping if they can before
* calling netif_set_real_num_tx_queues.
*/
static void netif_setup_tc(struct net_device *dev, unsigned int txq)
{
int i;
struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
/* If TC0 is invalidated disable TC mapping */
if (tc->offset + tc->count > txq) {
netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
dev->num_tc = 0;
return;
}
/* Invalidated prio to tc mappings set to TC0 */
for (i = 1; i < TC_BITMASK + 1; i++) {
int q = netdev_get_prio_tc_map(dev, i);
tc = &dev->tc_to_txq[q];
if (tc->offset + tc->count > txq) {
netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
i, q);
netdev_set_prio_tc_map(dev, i, 0);
}
}
}
int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
{
if (dev->num_tc) {
struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
int i;
/* walk through the TCs and see if it falls into any of them */
for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
if ((txq - tc->offset) < tc->count)
return i;
}
/* didn't find it, just return -1 to indicate no match */
return -1;
}
return 0;
}
EXPORT_SYMBOL(netdev_txq_to_tc);
#ifdef CONFIG_XPS
static struct static_key xps_needed __read_mostly;
static struct static_key xps_rxqs_needed __read_mostly;
static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) \
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
struct xps_dev_maps *old_maps, int tci, u16 index)
{
struct xps_map *map = NULL;
int pos;
map = xmap_dereference(dev_maps->attr_map[tci]);
if (!map)
return false;
for (pos = map->len; pos--;) {
if (map->queues[pos] != index)
continue;
if (map->len > 1) {
map->queues[pos] = map->queues[--map->len];
break;
}
if (old_maps)
RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
kfree_rcu(map, rcu);
return false;
}
return true;
}
static bool remove_xps_queue_cpu(struct net_device *dev,
struct xps_dev_maps *dev_maps,
int cpu, u16 offset, u16 count)
{
int num_tc = dev_maps->num_tc;
bool active = false;
int tci;
for (tci = cpu * num_tc; num_tc--; tci++) {
int i, j;
for (i = count, j = offset; i--; j++) {
if (!remove_xps_queue(dev_maps, NULL, tci, j))
break;
}
active |= i < 0;
}
return active;
}
static void reset_xps_maps(struct net_device *dev,
struct xps_dev_maps *dev_maps,
enum xps_map_type type)
{
static_key_slow_dec_cpuslocked(&xps_needed);
if (type == XPS_RXQS)
static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
RCU_INIT_POINTER(dev->xps_maps[type], NULL);
kfree_rcu(dev_maps, rcu);
}
static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
u16 offset, u16 count)
{
struct xps_dev_maps *dev_maps;
bool active = false;
int i, j;
dev_maps = xmap_dereference(dev->xps_maps[type]);
if (!dev_maps)
return;
for (j = 0; j < dev_maps->nr_ids; j++)
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
if (!active)
reset_xps_maps(dev, dev_maps, type);
if (type == XPS_CPUS) {
for (i = offset + (count - 1); count--; i--)
netdev_queue_numa_node_write(
netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
}
}
static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
u16 count)
{
if (!static_key_false(&xps_needed))
return;
cpus_read_lock();
mutex_lock(&xps_map_mutex);
if (static_key_false(&xps_rxqs_needed))
clean_xps_maps(dev, XPS_RXQS, offset, count);
clean_xps_maps(dev, XPS_CPUS, offset, count);
mutex_unlock(&xps_map_mutex);
cpus_read_unlock();
}
static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
{
netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
}
static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
u16 index, bool is_rxqs_map)
{
struct xps_map *new_map;
int alloc_len = XPS_MIN_MAP_ALLOC;
int i, pos;
for (pos = 0; map && pos < map->len; pos++) {
if (map->queues[pos] != index)
continue;
return map;
}
/* Need to add tx-queue to this CPU's/rx-queue's existing map */
if (map) {
if (pos < map->alloc_len)
return map;
alloc_len = map->alloc_len * 2;
}
/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
* map
*/
if (is_rxqs_map)
new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
else
new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
cpu_to_node(attr_index));
if (!new_map)
return NULL;
for (i = 0; i < pos; i++)
new_map->queues[i] = map->queues[i];
new_map->alloc_len = alloc_len;
new_map->len = pos;
return new_map;
}
/* Copy xps maps at a given index */
static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
struct xps_dev_maps *new_dev_maps, int index,
int tc, bool skip_tc)
{
int i, tci = index * dev_maps->num_tc;
struct xps_map *map;
/* copy maps belonging to foreign traffic classes */
for (i = 0; i < dev_maps->num_tc; i++, tci++) {
if (i == tc && skip_tc)
continue;
/* fill in the new device map from the old device map */
map = xmap_dereference(dev_maps->attr_map[tci]);
RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
}
}
/* Must be called under cpus_read_lock */
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
u16 index, enum xps_map_type type)
{
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
const unsigned long *online_mask = NULL;
bool active = false, copy = false;
int i, j, tci, numa_node_id = -2;
int maps_sz, num_tc = 1, tc = 0;
struct xps_map *map, *new_map;
unsigned int nr_ids;
WARN_ON_ONCE(index >= dev->num_tx_queues);
if (dev->num_tc) {
/* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
if (num_tc < 0)
return -EINVAL;
/* If queue belongs to subordinate dev use its map */
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
}
mutex_lock(&xps_map_mutex);
dev_maps = xmap_dereference(dev->xps_maps[type]);
if (type == XPS_RXQS) {
maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
nr_ids = dev->num_rx_queues;
} else {
maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
if (num_possible_cpus() > 1)
online_mask = cpumask_bits(cpu_online_mask);
nr_ids = nr_cpu_ids;
}
if (maps_sz < L1_CACHE_BYTES)
maps_sz = L1_CACHE_BYTES;
/* The old dev_maps could be larger or smaller than the one we're
* setting up now, as dev->num_tc or nr_ids could have been updated in
* between. We could try to be smart, but let's be safe instead and only
* copy foreign traffic classes if the two map sizes match.
*/
if (dev_maps &&
dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
copy = true;
/* allocate memory for queue storage */
for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
j < nr_ids;) {
if (!new_dev_maps) {
new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
if (!new_dev_maps) {
mutex_unlock(&xps_map_mutex);
return -ENOMEM;
}
new_dev_maps->nr_ids = nr_ids;
new_dev_maps->num_tc = num_tc;
}
tci = j * num_tc + tc;
map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
map = expand_xps_map(map, j, index, type == XPS_RXQS);
if (!map)
goto error;
RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
}
if (!new_dev_maps)
goto out_no_new_maps;
if (!dev_maps) {
/* Increment static keys at most once per type */
static_key_slow_inc_cpuslocked(&xps_needed);
if (type == XPS_RXQS)
static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
}
for (j = 0; j < nr_ids; j++) {
bool skip_tc = false;
tci = j * num_tc + tc;
if (netif_attr_test_mask(j, mask, nr_ids) &&
netif_attr_test_online(j, online_mask, nr_ids)) {
/* add tx-queue to CPU/rx-queue maps */
int pos = 0;
skip_tc = true;
map = xmap_dereference(new_dev_maps->attr_map[tci]);
while ((pos < map->len) && (map->queues[pos] != index))
pos++;
if (pos == map->len)
map->queues[map->len++] = index;
#ifdef CONFIG_NUMA
if (type == XPS_CPUS) {
if (numa_node_id == -2)
numa_node_id = cpu_to_node(j);
else if (numa_node_id != cpu_to_node(j))
numa_node_id = -1;
}
#endif
}
if (copy)
xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
skip_tc);
}
rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
/* Cleanup old maps */
if (!dev_maps)
goto out_no_old_maps;
for (j = 0; j < dev_maps->nr_ids; j++) {
for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
map = xmap_dereference(dev_maps->attr_map[tci]);
if (!map)
continue;
if (copy) {
new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
if (map == new_map)
continue;
}
RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
kfree_rcu(map, rcu);
}
}
old_dev_maps = dev_maps;
out_no_old_maps:
dev_maps = new_dev_maps;
active = true;
out_no_new_maps:
if (type == XPS_CPUS)
/* update Tx queue numa node */
netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
(numa_node_id >= 0) ?
numa_node_id : NUMA_NO_NODE);
if (!dev_maps)
goto out_no_maps;
/* removes tx-queue from unused CPUs/rx-queues */
for (j = 0; j < dev_maps->nr_ids; j++) {
tci = j * dev_maps->num_tc;
for (i = 0; i < dev_maps->num_tc; i++, tci++) {
if (i == tc &&
netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
continue;
active |= remove_xps_queue(dev_maps,
copy ? old_dev_maps : NULL,
tci, index);
}
}
if (old_dev_maps)
kfree_rcu(old_dev_maps, rcu);
/* free map if not active */
if (!active)
reset_xps_maps(dev, dev_maps, type);
out_no_maps:
mutex_unlock(&xps_map_mutex);
return 0;
error:
/* remove any maps that we added */
for (j = 0; j < nr_ids; j++) {
for (i = num_tc, tci = j * num_tc; i--; tci++) {
new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
map = copy ?
xmap_dereference(dev_maps->attr_map[tci]) :
NULL;
if (new_map && new_map != map)
kfree(new_map);
}
}
mutex_unlock(&xps_map_mutex);
kfree(new_dev_maps);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index)
{
int ret;
cpus_read_lock();
ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL(netif_set_xps_queue);
#endif
static void netdev_unbind_all_sb_channels(struct net_device *dev)
{
struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
/* Unbind any subordinate channels */
while (txq-- != &dev->_tx[0]) {
if (txq->sb_dev)
netdev_unbind_sb_channel(dev, txq->sb_dev);
}
}
void netdev_reset_tc(struct net_device *dev)
{
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
netdev_unbind_all_sb_channels(dev);
/* Reset TC configuration of device */
dev->num_tc = 0;
memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
}
EXPORT_SYMBOL(netdev_reset_tc);
int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
{
if (tc >= dev->num_tc)
return -EINVAL;
#ifdef CONFIG_XPS
netif_reset_xps_queues(dev, offset, count);
#endif
dev->tc_to_txq[tc].count = count;
dev->tc_to_txq[tc].offset = offset;
return 0;
}
EXPORT_SYMBOL(netdev_set_tc_queue);
int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
{
if (num_tc > TC_MAX_QUEUE)
return -EINVAL;
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
netdev_unbind_all_sb_channels(dev);
dev->num_tc = num_tc;
return 0;
}
EXPORT_SYMBOL(netdev_set_num_tc);
void netdev_unbind_sb_channel(struct net_device *dev,
struct net_device *sb_dev)
{
struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(sb_dev, 0);
#endif
memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
while (txq-- != &dev->_tx[0]) {
if (txq->sb_dev == sb_dev)
txq->sb_dev = NULL;
}
}
EXPORT_SYMBOL(netdev_unbind_sb_channel);
int netdev_bind_sb_channel_queue(struct net_device *dev,
struct net_device *sb_dev,
u8 tc, u16 count, u16 offset)
{
/* Make certain the sb_dev and dev are already configured */
if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
return -EINVAL;
/* We cannot hand out queues we don't have */
if ((offset + count) > dev->real_num_tx_queues)
return -EINVAL;
/* Record the mapping */
sb_dev->tc_to_txq[tc].count = count;
sb_dev->tc_to_txq[tc].offset = offset;
/* Provide a way for Tx queue to find the tc_to_txq map or
* XPS map for itself.
*/
while (count--)
netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
return 0;
}
EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
int netdev_set_sb_channel(struct net_device *dev, u16 channel)
{
/* Do not use a multiqueue device to represent a subordinate channel */
if (netif_is_multiqueue(dev))
return -ENODEV;
/* We allow channels 1 - 32767 to be used for subordinate channels.
* Channel 0 is meant to be "native" mode and used only to represent
* the main root device. We allow writing 0 to reset the device back
* to normal mode after being used as a subordinate channel.
*/
if (channel > S16_MAX)
return -EINVAL;
dev->num_tc = -channel;
return 0;
}
EXPORT_SYMBOL(netdev_set_sb_channel);
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
*/
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
bool disabling;
int rc;
disabling = txq < dev->real_num_tx_queues;
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING) {
netdev_ops_assert_locked(dev);
rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
txq);
if (rc)
return rc;
if (dev->num_tc)
netif_setup_tc(dev, txq);
net_shaper_set_real_num_tx_queues(dev, txq);
dev_qdisc_change_real_num_tx(dev, txq);
dev->real_num_tx_queues = txq;
if (disabling) {
synchronize_net();
qdisc_reset_all_tx_gt(dev, txq);
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, txq);
#endif
}
} else {
dev->real_num_tx_queues = txq;
}
return 0;
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
/**
* netif_set_real_num_rx_queues - set actual number of RX queues used
* @dev: Network device
* @rxq: Actual number of RX queues
*
* This must be called either with the rtnl_lock held or before
* registration of the net device. Returns 0 on success, or a
* negative error code. If called before registration, it always
* succeeds.
*/
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
{
int rc;
if (rxq < 1 || rxq > dev->num_rx_queues)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED) {
netdev_ops_assert_locked(dev);
rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
rxq);
if (rc)
return rc;
}
dev->real_num_rx_queues = rxq;
return 0;
}
EXPORT_SYMBOL(netif_set_real_num_rx_queues);
/**
* netif_set_real_num_queues - set actual number of RX and TX queues used
* @dev: Network device
* @txq: Actual number of TX queues
* @rxq: Actual number of RX queues
*
* Set the real number of both TX and RX queues.
* Does nothing if the number of queues is already correct.
*/
int netif_set_real_num_queues(struct net_device *dev,
unsigned int txq, unsigned int rxq)
{
unsigned int old_rxq = dev->real_num_rx_queues;
int err;
if (txq < 1 || txq > dev->num_tx_queues ||
rxq < 1 || rxq > dev->num_rx_queues)
return -EINVAL;
/* Start from increases, so the error path only does decreases -
* decreases can't fail.
*/
if (rxq > dev->real_num_rx_queues) {
err = netif_set_real_num_rx_queues(dev, rxq);
if (err)
return err;
}
if (txq > dev->real_num_tx_queues) {
err = netif_set_real_num_tx_queues(dev, txq);
if (err)
goto undo_rx;
}
if (rxq < dev->real_num_rx_queues)
WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
if (txq < dev->real_num_tx_queues)
WARN_ON(netif_set_real_num_tx_queues(dev, txq));
return 0;
undo_rx:
WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
return err;
}
EXPORT_SYMBOL(netif_set_real_num_queues);
/**
* netif_set_tso_max_size() - set the max size of TSO frames supported
* @dev: netdev to update
* @size: max skb->len of a TSO frame
*
* Set the limit on the size of TSO super-frames the device can handle.
* Unless explicitly set the stack will assume the value of
* %GSO_LEGACY_MAX_SIZE.
*/
void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
{
dev->tso_max_size = min(GSO_MAX_SIZE, size);
if (size < READ_ONCE(dev->gso_max_size))
netif_set_gso_max_size(dev, size); if (size < READ_ONCE(dev->gso_ipv4_max_size))
netif_set_gso_ipv4_max_size(dev, size);
}
EXPORT_SYMBOL(netif_set_tso_max_size);
/**
* netif_set_tso_max_segs() - set the max number of segs supported for TSO
* @dev: netdev to update
* @segs: max number of TCP segments
*
* Set the limit on the number of TCP segments the device can generate from
* a single TSO super-frame.
* Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
*/
void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
{
dev->tso_max_segs = segs;
if (segs < READ_ONCE(dev->gso_max_segs))
netif_set_gso_max_segs(dev, segs);
}
EXPORT_SYMBOL(netif_set_tso_max_segs);
/**
* netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
* @to: netdev to update
* @from: netdev from which to copy the limits
*/
void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
{
netif_set_tso_max_size(to, from->tso_max_size);
netif_set_tso_max_segs(to, from->tso_max_segs);
}
EXPORT_SYMBOL(netif_inherit_tso_max);
/**
* netif_get_num_default_rss_queues - default number of RSS queues
*
* Default value is the number of physical cores if there are only 1 or 2, or
* divided by 2 if there are more.
*/
int netif_get_num_default_rss_queues(void)
{
cpumask_var_t cpus;
int cpu, count = 0;
if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
return 1;
cpumask_copy(cpus, cpu_online_mask);
for_each_cpu(cpu, cpus) {
++count;
cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
}
free_cpumask_var(cpus);
return count > 2 ? DIV_ROUND_UP(count, 2) : count;
}
EXPORT_SYMBOL(netif_get_num_default_rss_queues);
static void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
unsigned long flags;
local_irq_save(flags);
sd = this_cpu_ptr(&softnet_data);
q->next_sched = NULL;
*sd->output_queue_tailp = q;
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
void __netif_schedule(struct Qdisc *q)
{
if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
__netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
struct dev_kfree_skb_cb {
enum skb_drop_reason reason;
};
static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
{
return (struct dev_kfree_skb_cb *)skb->cb;
}
void netif_schedule_queue(struct netdev_queue *txq)
{
rcu_read_lock();
if (!netif_xmit_stopped(txq)) {
struct Qdisc *q = rcu_dereference(txq->qdisc);
__netif_schedule(q);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(netif_schedule_queue);
void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
struct Qdisc *q;
rcu_read_lock();
q = rcu_dereference(dev_queue->qdisc);
__netif_schedule(q);
rcu_read_unlock();
}
}
EXPORT_SYMBOL(netif_tx_wake_queue);
void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{
unsigned long flags;
if (unlikely(!skb))
return;
if (likely(refcount_read(&skb->users) == 1)) {
smp_rmb();
refcount_set(&skb->users, 0);
} else if (likely(!refcount_dec_and_test(&skb->users))) {
return;
}
get_kfree_skb_cb(skb)->reason = reason;
local_irq_save(flags);
skb->next = __this_cpu_read(softnet_data.completion_queue);
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{
if (in_hardirq() || irqs_disabled())
dev_kfree_skb_irq_reason(skb, reason);
else
kfree_skb_reason(skb, reason);
}
EXPORT_SYMBOL(dev_kfree_skb_any_reason);
/**
* netif_device_detach - mark device as removed
* @dev: network device
*
* Mark device as removed from system and therefore no longer available.
*/
void netif_device_detach(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_tx_stop_all_queues(dev);
}
}
EXPORT_SYMBOL(netif_device_detach);
/**
* netif_device_attach - mark device as attached
* @dev: network device
*
* Mark device as attached from system and restart if needed.
*/
void netif_device_attach(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_tx_wake_all_queues(dev);
netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_device_attach);
/*
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
* to be used as a distribution range.
*/
static u16 skb_tx_hash(const struct net_device *dev,
const struct net_device *sb_dev,
struct sk_buff *skb)
{
u32 hash;
u16 qoffset = 0;
u16 qcount = dev->real_num_tx_queues; if (dev->num_tc) {
u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
qoffset = sb_dev->tc_to_txq[tc].offset;
qcount = sb_dev->tc_to_txq[tc].count;
if (unlikely(!qcount)) {
net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
sb_dev->name, qoffset, tc);
qoffset = 0;
qcount = dev->real_num_tx_queues;
}
}
if (skb_rx_queue_recorded(skb)) {
DEBUG_NET_WARN_ON_ONCE(qcount == 0);
hash = skb_get_rx_queue(skb);
if (hash >= qoffset) hash -= qoffset; while (unlikely(hash >= qcount)) hash -= qcount; return hash + qoffset;
}
return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
}
void skb_warn_bad_offload(const struct sk_buff *skb)
{
static const netdev_features_t null_features;
struct net_device *dev = skb->dev;
const char *name = "";
if (!net_ratelimit())
return;
if (dev) {
if (dev->dev.parent)
name = dev_driver_string(dev->dev.parent);
else
name = netdev_name(dev);
}
skb_dump(KERN_WARNING, skb, false);
WARN(1, "%s: caps=(%pNF, %pNF)\n",
name, dev ? &dev->features : &null_features,
skb->sk ? &skb->sk->sk_route_caps : &null_features);
}
/*
* Invalidate hardware checksum when packet is to be mangled, and
* complete checksum manually on outgoing path.
*/
int skb_checksum_help(struct sk_buff *skb)
{
__wsum csum;
int ret = 0, offset;
if (skb->ip_summed == CHECKSUM_COMPLETE)
goto out_set_summed;
if (unlikely(skb_is_gso(skb))) {
skb_warn_bad_offload(skb);
return -EINVAL;
}
if (!skb_frags_readable(skb)) {
return -EFAULT;
}
/* Before computing a checksum, we should make sure no frag could
* be modified by an external entity : checksum could be wrong.
*/
if (skb_has_shared_frag(skb)) {
ret = __skb_linearize(skb);
if (ret)
goto out;
}
offset = skb_checksum_start_offset(skb);
ret = -EINVAL;
if (unlikely(offset >= skb_headlen(skb))) {
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
offset, skb_headlen(skb));
goto out;
}
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
offset + sizeof(__sum16), skb_headlen(skb));
goto out;
}
ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
if (ret)
goto out;
*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
return ret;
}
EXPORT_SYMBOL(skb_checksum_help);
#ifdef CONFIG_NET_CRC32C
int skb_crc32c_csum_help(struct sk_buff *skb)
{
u32 crc;
int ret = 0, offset, start;
if (skb->ip_summed != CHECKSUM_PARTIAL)
goto out;
if (unlikely(skb_is_gso(skb)))
goto out;
/* Before computing a checksum, we should make sure no frag could
* be modified by an external entity : checksum could be wrong.
*/
if (unlikely(skb_has_shared_frag(skb))) {
ret = __skb_linearize(skb);
if (ret)
goto out;
}
start = skb_checksum_start_offset(skb);
offset = start + offsetof(struct sctphdr, checksum);
if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
ret = -EINVAL;
goto out;
}
ret = skb_ensure_writable(skb, offset + sizeof(__le32));
if (ret)
goto out;
crc = ~skb_crc32c(skb, start, skb->len - start, ~0);
*(__le32 *)(skb->data + offset) = cpu_to_le32(crc);
skb_reset_csum_not_inet(skb);
out:
return ret;
}
EXPORT_SYMBOL(skb_crc32c_csum_help);
#endif /* CONFIG_NET_CRC32C */
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
__be16 type = skb->protocol;
/* Tunnel gso handlers can set protocol to ethernet. */
if (type == htons(ETH_P_TEB)) {
struct ethhdr *eth;
if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
return 0;
eth = (struct ethhdr *)skb->data;
type = eth->h_proto;
}
return vlan_get_protocol_and_depth(skb, type, depth);}
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
netdev_err(dev, "hw csum failure\n");
skb_dump(KERN_ERR, skb, true);
dump_stack();
}
void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif
/* XXX: check that highmem exists at all on the given machine. */
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = skb_frag_page(frag);
if (page && PageHighMem(page))
return 1;
}
}
#endif
return 0;
}
/* If MPLS offload request, verify we are testing hardware MPLS features
* instead of standard features for the netdev.
*/
#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{
if (eth_p_mpls(type))
features &= skb->dev->mpls_features;
return features;
}
#else
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{
return features;
}
#endif
static netdev_features_t harmonize_features(struct sk_buff *skb,
netdev_features_t features)
{
__be16 type;
type = skb_network_protocol(skb, NULL);
features = net_mpls_features(skb, features, type);
if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, type)) {
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
if (illegal_highdma(skb->dev, skb))
features &= ~NETIF_F_SG;
return features;
}
netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
return features;
}
EXPORT_SYMBOL(passthru_features_check);
static netdev_features_t dflt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
return vlan_features_check(skb, features);
}
static netdev_features_t gso_features_check(const struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
u16 gso_segs = skb_shinfo(skb)->gso_segs;
if (gso_segs > READ_ONCE(dev->gso_max_segs))
return features & ~NETIF_F_GSO_MASK; if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb))) return features & ~NETIF_F_GSO_MASK; if (!skb_shinfo(skb)->gso_type) {
skb_warn_bad_offload(skb);
return features & ~NETIF_F_GSO_MASK;
}
/* Support for GSO partial features requires software
* intervention before we can actually process the packets
* so we need to strip support for any partial features now
* and we can pull them back in after we have partially
* segmented the frame.
*/
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
features &= ~dev->gso_partial_features;
/* Make sure to clear the IPv4 ID mangling feature if the IPv4 header
* has the potential to be fragmented so that TSO does not generate
* segments with the same ID. For encapsulated packets, the ID mangling
* feature is guaranteed not to use the same ID for the outer IPv4
* headers of the generated segments if the headers have the potential
* to be fragmented, so there is no need to clear the IPv4 ID mangling
* feature (see the section about NETIF_F_TSO_MANGLEID in
* segmentation-offloads.rst).
*/
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { struct iphdr *iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); if (!(iph->frag_off & htons(IP_DF))) features &= ~NETIF_F_TSO_MANGLEID;
}
/* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
* so neither does TSO that depends on it.
*/
if (features & NETIF_F_IPV6_CSUM && (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 || (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
skb_transport_header_was_set(skb) && skb_network_header_len(skb) != sizeof(struct ipv6hdr) && !ipv6_has_hopopt_jumbo(skb)) features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
return features;
}
netdev_features_t netif_skb_features(struct sk_buff *skb)
{
struct net_device *dev = skb->dev; netdev_features_t features = dev->features;
if (skb_is_gso(skb))
features = gso_features_check(skb, dev, features);
/* If encapsulation offload request, verify we are testing
* hardware encapsulation features instead of standard
* features for the netdev
*/
if (skb->encapsulation)
features &= dev->hw_enc_features;
if (skb_vlan_tagged(skb)) features = netdev_intersect_features(features,
dev->vlan_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
if (dev->netdev_ops->ndo_features_check)
features &= dev->netdev_ops->ndo_features_check(skb, dev,
features);
else
features &= dflt_features_check(skb, dev, features); return harmonize_features(skb, features);
}
EXPORT_SYMBOL(netif_skb_features);
static int xmit_one(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{
unsigned int len;
int rc;
if (dev_nit_active_rcu(dev))
dev_queue_xmit_nit(skb, dev); len = skb->len; trace_net_dev_start_xmit(skb, dev); rc = netdev_start_xmit(skb, dev, txq, more); trace_net_dev_xmit(skb, rc, dev, len);
return rc;
}
struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
struct netdev_queue *txq, int *ret)
{
struct sk_buff *skb = first;
int rc = NETDEV_TX_OK;
while (skb) { struct sk_buff *next = skb->next;
skb_mark_not_on_list(skb);
rc = xmit_one(skb, dev, txq, next != NULL); if (unlikely(!dev_xmit_complete(rc))) {
skb->next = next;
goto out;
}
skb = next;
if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}
}
out: *ret = rc;
return skb;
}
static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features)
{
if (unlikely(skb_csum_is_sctp(skb)))
return !!(features & NETIF_F_SCTP_CRC) ? 0 :
skb_crc32c_csum_help(skb);
if (features & NETIF_F_HW_CSUM)
return 0;
if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
!ipv6_has_hopopt_jumbo(skb))
goto sw_checksum;
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
case offsetof(struct udphdr, check):
return 0;
}
}
sw_checksum:
return skb_checksum_help(skb);
}
EXPORT_SYMBOL(skb_csum_hwoffload_help);
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
*/
static struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
struct net_device *dev)
{
#ifdef CONFIG_SOCK_VALIDATE_XMIT
struct sk_buff *(*sk_validate)(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
struct sock *sk = skb->sk;
sk_validate = NULL;
if (sk) {
if (sk_fullsock(sk))
sk_validate = sk->sk_validate_xmit_skb;
else if (sk_is_inet(sk) && sk->sk_state == TCP_TIME_WAIT)
sk_validate = inet_twsk(sk)->tw_validate_xmit_skb;
}
if (sk_validate) {
skb = sk_validate(sk, dev, skb);
} else if (unlikely(skb_is_decrypted(skb))) {
pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
kfree_skb(skb);
skb = NULL;
}
#endif
return skb;
}
static struct sk_buff *validate_xmit_unreadable_skb(struct sk_buff *skb,
struct net_device *dev)
{
struct skb_shared_info *shinfo;
struct net_iov *niov;
if (likely(skb_frags_readable(skb)))
goto out;
if (!dev->netmem_tx)
goto out_free;
shinfo = skb_shinfo(skb);
if (shinfo->nr_frags > 0) {
niov = netmem_to_net_iov(skb_frag_netmem(&shinfo->frags[0])); if (net_is_devmem_iov(niov) && net_devmem_iov_binding(niov)->dev != dev)
goto out_free;
}
out:
return skb;
out_free:
kfree_skb(skb);
return NULL;
}
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{
netdev_features_t features;
skb = validate_xmit_unreadable_skb(skb, dev); if (unlikely(!skb)) goto out_null;
features = netif_skb_features(skb);
skb = validate_xmit_vlan(skb, features); if (unlikely(!skb))
goto out_null;
skb = sk_validate_xmit_skb(skb, dev);
if (unlikely(!skb))
goto out_null;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs)) {
goto out_kfree_skb; } else if (segs) {
consume_skb(skb);
skb = segs;
}
} else {
if (skb_needs_linearize(skb, features) && __skb_linearize(skb))
goto out_kfree_skb;
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation)
skb_set_inner_transport_header(skb,
skb_checksum_start_offset(skb));
else
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
if (skb_csum_hwoffload_help(skb, features)) goto out_kfree_skb;
}
}
skb = validate_xmit_xfrm(skb, features, again);
return skb;
out_kfree_skb:
kfree_skb(skb);
out_null:
dev_core_stats_tx_dropped_inc(dev); return NULL;}
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{
struct sk_buff *next, *head = NULL, *tail;
for (; skb != NULL; skb = next) {
next = skb->next;
skb_mark_not_on_list(skb);
/* in case skb won't be segmented, point to itself */
skb->prev = skb;
skb = validate_xmit_skb(skb, dev, again);
if (!skb)
continue;
if (!head)
head = skb;
else
tail->next = skb;
/* If skb was segmented, skb->prev points to
* the last segment. If not, it still contains skb.
*/
tail = skb->prev;
}
return head;
}
EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
qdisc_skb_cb(skb)->pkt_len = skb->len;
/* To get more precise estimation of bytes sent on wire,
* we add to pkt_len the headers size of all segments
*/
if (shinfo->gso_size && skb_transport_header_was_set(skb)) { u16 gso_segs = shinfo->gso_segs;
unsigned int hdr_len;
/* mac layer + network layer */
if (!skb->encapsulation) hdr_len = skb_transport_offset(skb);
else
hdr_len = skb_inner_transport_offset(skb);
/* + transport layer */
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
const struct tcphdr *th;
struct tcphdr _tcphdr;
th = skb_header_pointer(skb, hdr_len,
sizeof(_tcphdr), &_tcphdr);
if (likely(th)) hdr_len += __tcp_hdrlen(th); } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
struct udphdr _udphdr;
if (skb_header_pointer(skb, hdr_len,
sizeof(_udphdr), &_udphdr))
hdr_len += sizeof(struct udphdr);
}
if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) { int payload = skb->len - hdr_len;
/* Malicious packet. */
if (payload <= 0)
return;
gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
}
qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
}
}
static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
struct sk_buff **to_free,
struct netdev_queue *txq)
{
int rc;
rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
if (rc == NET_XMIT_SUCCESS)
trace_qdisc_enqueue(q, txq, skb);
return rc;
}
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
struct sk_buff *to_free = NULL;
bool contended;
int rc;
qdisc_calculate_pkt_len(skb, q); tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP);
if (q->flags & TCQ_F_NOLOCK) {
if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && qdisc_run_begin(q)) {
/* Retest nolock_qdisc_is_empty() within the protection
* of q->seqlock to protect from racing with requeuing.
*/
if (unlikely(!nolock_qdisc_is_empty(q))) {
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
__qdisc_run(q);
qdisc_run_end(q);
goto no_lock_out;
}
qdisc_bstats_cpu_update(q, skb); if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
!nolock_qdisc_is_empty(q))
__qdisc_run(q); qdisc_run_end(q); return NET_XMIT_SUCCESS;
}
rc = dev_qdisc_enqueue(skb, q, &to_free, txq); qdisc_run(q);
no_lock_out:
if (unlikely(to_free)) kfree_skb_list_reason(to_free,
tcf_get_drop_reason(to_free));
return rc;
}
if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) {
kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
return NET_XMIT_DROP;
}
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
* On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
* and then other tasks will only enqueue packets. The packets will be
* sent after the qdisc owner is scheduled again. To prevent this
* scenario the task always serialize on the lock.
*/
contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
if (unlikely(contended))
spin_lock(&q->busylock);
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
__qdisc_drop(skb, &to_free);
rc = NET_XMIT_DROP; } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && qdisc_run_begin(q)) {
/*
* This is a work-conserving queue; there are no old skbs
* waiting to be sent out; and the qdisc is not running -
* xmit the skb directly.
*/
qdisc_bstats_update(q, skb); if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
__qdisc_run(q);
}
qdisc_run_end(q); rc = NET_XMIT_SUCCESS;
} else {
WRITE_ONCE(q->owner, smp_processor_id());
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
WRITE_ONCE(q->owner, -1);
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
__qdisc_run(q);
qdisc_run_end(q);
}
}
spin_unlock(root_lock);
if (unlikely(to_free))
kfree_skb_list_reason(to_free,
tcf_get_drop_reason(to_free));
if (unlikely(contended)) spin_unlock(&q->busylock);
return rc;
}
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
const struct netprio_map *map;
const struct sock *sk;
unsigned int prioidx;
if (skb->priority)
return;
map = rcu_dereference_bh(skb->dev->priomap); if (!map)
return;
sk = skb_to_full_sk(skb); if (!sk)
return;
prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); if (prioidx < map->priomap_len) skb->priority = map->priomap[prioidx];
}
#else
#define skb_update_prio(skb)
#endif
/**
* dev_loopback_xmit - loop back @skb
* @net: network namespace this loopback is happening in
* @sk: sk needed to be a netfilter okfn
* @skb: buffer to transmit
*/
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
skb_reset_mac_header(skb);
__skb_pull(skb, skb_network_offset(skb));
skb->pkt_type = PACKET_LOOPBACK;
if (skb->ip_summed == CHECKSUM_NONE)
skb->ip_summed = CHECKSUM_UNNECESSARY;
DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
skb_dst_force(skb);
netif_rx(skb);
return 0;
}
EXPORT_SYMBOL(dev_loopback_xmit);
#ifdef CONFIG_NET_EGRESS
static struct netdev_queue *
netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
{
int qm = skb_get_queue_mapping(skb);
return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
}
#ifndef CONFIG_PREEMPT_RT
static bool netdev_xmit_txqueue_skipped(void)
{
return __this_cpu_read(softnet_data.xmit.skip_txqueue);
}
void netdev_xmit_skip_txqueue(bool skip)
{
__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
}
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
#else
static bool netdev_xmit_txqueue_skipped(void)
{
return current->net_xmit.skip_txqueue;
}
void netdev_xmit_skip_txqueue(bool skip)
{
current->net_xmit.skip_txqueue = skip;
}
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
#endif
#endif /* CONFIG_NET_EGRESS */
#ifdef CONFIG_NET_XGRESS
static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
enum skb_drop_reason *drop_reason)
{
int ret = TC_ACT_UNSPEC;
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
struct tcf_result res;
if (!miniq)
return ret;
/* Global bypass */
if (!static_branch_likely(&tcf_sw_enabled_key))
return ret;
/* Block-wise bypass */
if (tcf_block_bypass_sw(miniq->block))
return ret;
tc_skb_cb(skb)->mru = 0;
tc_skb_cb(skb)->post_ct = false;
tcf_set_drop_reason(skb, *drop_reason);
mini_qdisc_bstats_cpu_update(miniq, skb);
ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
/* Only tcf related quirks below. */
switch (ret) {
case TC_ACT_SHOT:
*drop_reason = tcf_get_drop_reason(skb);
mini_qdisc_qstats_cpu_drop(miniq);
break;
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(res.classid);
break;
}
#endif /* CONFIG_NET_CLS_ACT */
return ret;
}
static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
void tcx_inc(void)
{
static_branch_inc(&tcx_needed_key);
}
void tcx_dec(void)
{
static_branch_dec(&tcx_needed_key);
}
static __always_inline enum tcx_action_base
tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
const bool needs_mac)
{
const struct bpf_mprog_fp *fp;
const struct bpf_prog *prog;
int ret = TCX_NEXT;
if (needs_mac)
__skb_push(skb, skb->mac_len);
bpf_mprog_foreach_prog(entry, fp, prog) {
bpf_compute_data_pointers(skb); ret = bpf_prog_run(prog, skb); if (ret != TCX_NEXT)
break;
}
if (needs_mac)
__skb_pull(skb, skb->mac_len);
return tcx_action_code(skb, ret);
}
static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
struct net_device *orig_dev, bool *another)
{
struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret;
if (!entry)
return skb;
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
qdisc_skb_cb(skb)->pkt_len = skb->len;
tcx_set_ingress(skb, true);
if (static_branch_unlikely(&tcx_needed_key)) {
sch_ret = tcx_run(entry, skb, true);
if (sch_ret != TC_ACT_UNSPEC)
goto ingress_verdict;
}
sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
ingress_verdict:
switch (sch_ret) {
case TC_ACT_REDIRECT:
/* skb_mac_header check was done by BPF, so we can safely
* push the L2 header back before redirecting to another
* netdev.
*/
__skb_push(skb, skb->mac_len);
if (skb_do_redirect(skb) == -EAGAIN) {
__skb_pull(skb, skb->mac_len);
*another = true;
break;
}
*ret = NET_RX_SUCCESS;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason);
*ret = NET_RX_DROP;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
/* used by tc_run */
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
consume_skb(skb);
fallthrough;
case TC_ACT_CONSUMED:
*ret = NET_RX_SUCCESS;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
}
bpf_net_ctx_clear(bpf_net_ctx);
return skb;
}
static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress); enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret;
if (!entry)
return skb;
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
/* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
* already set by the caller.
*/
if (static_branch_unlikely(&tcx_needed_key)) { sch_ret = tcx_run(entry, skb, false);
if (sch_ret != TC_ACT_UNSPEC)
goto egress_verdict;
}
sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
egress_verdict:
switch (sch_ret) {
case TC_ACT_REDIRECT:
/* No need to push/pop skb's mac_header here on egress! */
skb_do_redirect(skb);
*ret = NET_XMIT_SUCCESS;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason);
*ret = NET_XMIT_DROP;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
/* used by tc_run */
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
consume_skb(skb);
fallthrough;
case TC_ACT_CONSUMED:
*ret = NET_XMIT_SUCCESS; bpf_net_ctx_clear(bpf_net_ctx);
return NULL;
}
bpf_net_ctx_clear(bpf_net_ctx);
return skb;
}
#else
static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
struct net_device *orig_dev, bool *another)
{
return skb;
}
static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
return skb;
}
#endif /* CONFIG_NET_XGRESS */
#ifdef CONFIG_XPS
static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
struct xps_dev_maps *dev_maps, unsigned int tci)
{
int tc = netdev_get_prio_tc_map(dev, skb->priority);
struct xps_map *map;
int queue_index = -1;
if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
return queue_index;
tci *= dev_maps->num_tc;
tci += tc;
map = rcu_dereference(dev_maps->attr_map[tci]);
if (map) {
if (map->len == 1)
queue_index = map->queues[0];
else
queue_index = map->queues[reciprocal_scale(
skb_get_hash(skb), map->len)];
if (unlikely(queue_index >= dev->real_num_tx_queues))
queue_index = -1;
}
return queue_index;
}
#endif
static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
struct sk_buff *skb)
{
#ifdef CONFIG_XPS
struct xps_dev_maps *dev_maps;
struct sock *sk = skb->sk;
int queue_index = -1;
if (!static_key_false(&xps_needed))
return -1;
rcu_read_lock(); if (!static_key_false(&xps_rxqs_needed))
goto get_cpus_map;
dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); if (dev_maps) { int tci = sk_rx_queue_get(sk);
if (tci >= 0)
queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
tci);
}
get_cpus_map: if (queue_index < 0) { dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); if (dev_maps) {
unsigned int tci = skb->sender_cpu - 1;
queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
tci);
}
}
rcu_read_unlock();
return queue_index;
#else
return -1;
#endif
}
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
return 0;
}
EXPORT_SYMBOL(dev_pick_tx_zero);
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); sb_dev = sb_dev ? : dev; if (queue_index < 0 || skb->ooo_okay ||
queue_index >= dev->real_num_tx_queues) {
int new_index = get_xps_queue(dev, sb_dev, skb); if (new_index < 0) new_index = skb_tx_hash(dev, sb_dev, skb); if (queue_index != new_index && sk &&
sk_fullsock(sk) &&
rcu_access_pointer(sk->sk_dst_cache)) sk_tx_queue_set(sk, new_index);
queue_index = new_index;
}
return queue_index;}
EXPORT_SYMBOL(netdev_pick_tx);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
{
int queue_index = 0;
#ifdef CONFIG_XPS
u32 sender_cpu = skb->sender_cpu - 1; if (sender_cpu >= (u32)NR_CPUS)
skb->sender_cpu = raw_smp_processor_id() + 1;
#endif
if (dev->real_num_tx_queues != 1) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
else
queue_index = netdev_pick_tx(dev, skb, sb_dev); queue_index = netdev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index); return netdev_get_tx_queue(dev, queue_index);}
/**
* __dev_queue_xmit() - transmit a buffer
* @skb: buffer to transmit
* @sb_dev: suboordinate device used for L2 forwarding offload
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
* this function. The function can be called from an interrupt.
*
* When calling this method, interrupts MUST be enabled. This is because
* the BH enable code must have IRQs enabled so that it will not deadlock.
*
* Regardless of the return value, the skb is consumed, so it is currently
* difficult to retry a send to this method. (You can bump the ref count
* before sending to hold a reference for retry if you are careful.)
*
* Return:
* * 0 - buffer successfully transmitted
* * positive qdisc return code - NET_XMIT_DROP etc.
* * negative errno - other errors
*/
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq = NULL;
struct Qdisc *q;
int rc = -ENOMEM;
bool again = false;
skb_reset_mac_header(skb);
skb_assert_len(skb);
if (unlikely(skb_shinfo(skb)->tx_flags &
(SKBTX_SCHED_TSTAMP | SKBTX_BPF)))
__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rcu_read_lock_bh(); skb_update_prio(skb); qdisc_pkt_len_init(skb);
tcx_set_ingress(skb, false);
#ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {
if (nf_hook_egress_active()) { skb = nf_hook_egress(skb, &rc, dev); if (!skb) goto out;
}
netdev_xmit_skip_txqueue(false);
nf_skip_egress(skb, true);
skb = sch_handle_egress(skb, &rc, dev); if (!skb)
goto out;
nf_skip_egress(skb, false);
if (netdev_xmit_txqueue_skipped()) txq = netdev_tx_queue_mapping(dev, skb);
}
#endif
/* If device/qdisc don't need skb->dst, release it right now while
* its hot in this cpu cache.
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(skb);
else
skb_dst_force(skb);
if (!txq) txq = netdev_core_pick_tx(dev, skb, sb_dev); q = rcu_dereference_bh(txq->qdisc); trace_net_dev_queue(skb); if (q->enqueue) { rc = __dev_xmit_skb(skb, q, dev, txq); goto out;
}
/* The device has no queue. Common case for software devices:
* loopback, all the sorts of tunnels...
* Really, it is unlikely that netif_tx_lock protection is necessary
* here. (f.e. loopback and IP tunnels are clean ignoring statistics
* counters.)
* However, it is possible, that they rely on protection
* made by us here.
* Check this and shot the lock. It is not prone from deadlocks.
*Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */
/* Other cpus might concurrently change txq->xmit_lock_owner
* to -1 or to their cpu id, but not to our id.
*/
if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
if (dev_xmit_recursion())
goto recursion_alert;
skb = validate_xmit_skb(skb, dev, &again);
if (!skb)
goto out; HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) {
dev_xmit_recursion_inc();
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
dev_xmit_recursion_dec();
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq); goto out;
}
}
HARD_TX_UNLOCK(dev, txq); net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately
*/
recursion_alert: net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
dev->name);
}
}
rc = -ENETDOWN;
rcu_read_unlock_bh();
dev_core_stats_tx_dropped_inc(dev);
kfree_skb_list(skb);
return rc;
out:
rcu_read_unlock_bh(); return rc;}
EXPORT_SYMBOL(__dev_queue_xmit);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{
struct net_device *dev = skb->dev;
struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
bool again = false;
if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev)))
goto drop;
skb = validate_xmit_skb_list(skb, dev, &again);
if (skb != orig_skb)
goto drop;
skb_set_queue_mapping(skb, queue_id);
txq = skb_get_tx_queue(dev, skb);
local_bh_disable();
dev_xmit_recursion_inc();
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_drv_stopped(txq))
ret = netdev_start_xmit(skb, dev, txq, false);
HARD_TX_UNLOCK(dev, txq);
dev_xmit_recursion_dec();
local_bh_enable();
return ret;
drop:
dev_core_stats_tx_dropped_inc(dev);
kfree_skb_list(skb);
return NET_XMIT_DROP;
}
EXPORT_SYMBOL(__dev_direct_xmit);
/*************************************************************************
* Receiver routines
*************************************************************************/
static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
struct napi_struct *napi)
{
struct task_struct *thread;
lockdep_assert_irqs_disabled();
if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
/* Paired with smp_mb__before_atomic() in
* napi_enable()/netif_set_threaded().
* Use READ_ONCE() to guarantee a complete
* read on napi->thread. Only call
* wake_up_process() when it's not NULL.
*/
thread = READ_ONCE(napi->thread);
if (thread) {
if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
goto use_local_napi;
set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
wake_up_process(thread);
return;
}
}
use_local_napi:
DEBUG_NET_WARN_ON_ONCE(!list_empty(&napi->poll_list));
list_add_tail(&napi->poll_list, &sd->poll_list);
WRITE_ONCE(napi->list_owner, smp_processor_id());
/* If not called from net_rx_action()
* we have to raise NET_RX_SOFTIRQ.
*/
if (!sd->in_net_rx_action)
raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
#ifdef CONFIG_RPS
struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed);
struct static_key_false rfs_needed __read_mostly;
EXPORT_SYMBOL(rfs_needed);
static u32 rfs_slot(u32 hash, const struct rps_dev_flow_table *flow_table)
{
return hash_32(hash, flow_table->log);
}
#ifdef CONFIG_RFS_ACCEL
/**
* rps_flow_is_active - check whether the flow is recently active.
* @rflow: Specific flow to check activity.
* @flow_table: per-queue flowtable that @rflow belongs to.
* @cpu: CPU saved in @rflow.
*
* If the CPU has processed many packets since the flow's last activity
* (beyond 10 times the table size), the flow is considered stale.
*
* Return: true if flow was recently active.
*/
static bool rps_flow_is_active(struct rps_dev_flow *rflow,
struct rps_dev_flow_table *flow_table,
unsigned int cpu)
{
unsigned int flow_last_active;
unsigned int sd_input_head;
if (cpu >= nr_cpu_ids)
return false;
sd_input_head = READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head);
flow_last_active = READ_ONCE(rflow->last_qtail);
return (int)(sd_input_head - flow_last_active) <
(int)(10 << flow_table->log);
}
#endif
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu, u32 hash,
u32 flow_id)
{
if (next_cpu < nr_cpu_ids) {
u32 head;
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *old_rflow;
struct rps_dev_flow *tmp_rflow;
unsigned int tmp_cpu;
u16 rxq_index;
int rc;
/* Should we steer this flow to a different hardware queue? */
if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
!(dev->features & NETIF_F_NTUPLE))
goto out;
rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
if (rxq_index == skb_get_rx_queue(skb))
goto out;
rxqueue = dev->_rx + rxq_index;
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (!flow_table)
goto out;
tmp_rflow = &flow_table->flows[flow_id];
tmp_cpu = READ_ONCE(tmp_rflow->cpu);
if (READ_ONCE(tmp_rflow->filter) != RPS_NO_FILTER) {
if (rps_flow_is_active(tmp_rflow, flow_table,
tmp_cpu)) {
if (hash != READ_ONCE(tmp_rflow->hash) ||
next_cpu == tmp_cpu)
goto out;
}
}
rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
rxq_index, flow_id);
if (rc < 0)
goto out;
old_rflow = rflow;
rflow = tmp_rflow;
WRITE_ONCE(rflow->filter, rc);
WRITE_ONCE(rflow->hash, hash);
if (old_rflow->filter == rc)
WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER);
out:
#endif
head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
rps_input_queue_tail_save(&rflow->last_qtail, head);
}
WRITE_ONCE(rflow->cpu, next_cpu);
return rflow;
}
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
* rcu_read_lock must be held on entry.
*/
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
const struct rps_sock_flow_table *sock_flow_table;
struct netdev_rx_queue *rxqueue = dev->_rx;
struct rps_dev_flow_table *flow_table;
struct rps_map *map;
int cpu = -1;
u32 flow_id;
u32 tcpu;
u32 hash;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->real_num_rx_queues)) {
WARN_ONCE(dev->real_num_rx_queues > 1,
"%s received packet on queue %u, but number "
"of RX queues is %u\n",
dev->name, index, dev->real_num_rx_queues);
goto done;
}
rxqueue += index;
}
/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
flow_table = rcu_dereference(rxqueue->rps_flow_table);
map = rcu_dereference(rxqueue->rps_map);
if (!flow_table && !map)
goto done;
skb_reset_network_header(skb);
hash = skb_get_hash(skb);
if (!hash)
goto done;
sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
u32 ident;
/* First check into global flow table if there is a match.
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;
next_cpu = ident & net_hotdata.rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
*/
flow_id = rfs_slot(hash, flow_table);
rflow = &flow_table->flows[flow_id];
tcpu = rflow->cpu;
/*
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
* - Current CPU is unset (>= nr_cpu_ids).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
* This guarantees that all previous packets for the flow
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
(tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash,
flow_id);
}
if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
}
}
try_rps:
if (map) {
tcpu = map->cpus[reciprocal_scale(hash, map->len)];
if (cpu_online(tcpu)) {
cpu = tcpu;
goto done;
}
}
done:
return cpu;
}
#ifdef CONFIG_RFS_ACCEL
/**
* rps_may_expire_flow - check whether an RFS hardware filter may be removed
* @dev: Device on which the filter was set
* @rxq_index: RX queue index
* @flow_id: Flow ID passed to ndo_rx_flow_steer()
* @filter_id: Filter ID returned by ndo_rx_flow_steer()
*
* Drivers that implement ndo_rx_flow_steer() should periodically call
* this function for each installed filter and remove the filters for
* which it returns %true.
*/
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
u32 flow_id, u16 filter_id)
{
struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rflow;
bool expire = true;
rcu_read_lock();
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id < (1UL << flow_table->log)) {
unsigned int cpu;
rflow = &flow_table->flows[flow_id];
cpu = READ_ONCE(rflow->cpu);
if (READ_ONCE(rflow->filter) == filter_id &&
rps_flow_is_active(rflow, flow_table, cpu))
expire = false;
}
rcu_read_unlock();
return expire;
}
EXPORT_SYMBOL(rps_may_expire_flow);
#endif /* CONFIG_RFS_ACCEL */
/* Called from hardirq (IPI) context */
static void rps_trigger_softirq(void *data)
{
struct softnet_data *sd = data;
____napi_schedule(sd, &sd->backlog);
/* Pairs with READ_ONCE() in softnet_seq_show() */
WRITE_ONCE(sd->received_rps, sd->received_rps + 1);
}
#endif /* CONFIG_RPS */
/* Called from hardirq (IPI) context */
static void trigger_rx_softirq(void *data)
{
struct softnet_data *sd = data;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
smp_store_release(&sd->defer_ipi_scheduled, 0);
}
/*
* After we queued a packet into sd->input_pkt_queue,
* we need to make sure this queue is serviced soon.
*
* - If this is another cpu queue, link it to our rps_ipi_list,
* and make sure we will process rps_ipi_list from net_rx_action().
*
* - If this is our own queue, NAPI schedule our backlog.
* Note that this also raises NET_RX_SOFTIRQ.
*/
static void napi_schedule_rps(struct softnet_data *sd)
{
struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
#ifdef CONFIG_RPS
if (sd != mysd) {
if (use_backlog_threads()) {
__napi_schedule_irqoff(&sd->backlog);
return;
}
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
/* If not called from net_rx_action() or napi_threaded_poll()
* we have to raise NET_RX_SOFTIRQ.
*/
if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
return;
}
#endif /* CONFIG_RPS */
__napi_schedule_irqoff(&mysd->backlog);
}
void kick_defer_list_purge(unsigned int cpu)
{
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
unsigned long flags;
if (use_backlog_threads()) {
backlog_lock_irq_save(sd, &flags);
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
__napi_schedule_irqoff(&sd->backlog);
backlog_unlock_irq_restore(sd, &flags);
} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
smp_call_function_single_async(cpu, &sd->defer_csd);
}
}
#ifdef CONFIG_NET_FLOW_LIMIT
int netdev_flow_limit_table_len __read_mostly = (1 << 12);
#endif
static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
{
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit *fl;
struct softnet_data *sd;
unsigned int old_flow, new_flow;
if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
return false;
sd = this_cpu_ptr(&softnet_data);
rcu_read_lock();
fl = rcu_dereference(sd->flow_limit);
if (fl) {
new_flow = hash_32(skb_get_hash(skb), fl->log_buckets);
old_flow = fl->history[fl->history_head];
fl->history[fl->history_head] = new_flow;
fl->history_head++;
fl->history_head &= FLOW_LIMIT_HISTORY - 1;
if (likely(fl->buckets[old_flow]))
fl->buckets[old_flow]--;
if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
/* Pairs with READ_ONCE() in softnet_seq_show() */
WRITE_ONCE(fl->count, fl->count + 1);
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
#endif
return false;
}
/*
* enqueue_to_backlog is called to queue an skb to a per CPU backlog
* queue (may be a remote CPU queue).
*/
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
unsigned int *qtail)
{
enum skb_drop_reason reason;
struct softnet_data *sd;
unsigned long flags;
unsigned int qlen;
int max_backlog;
u32 tail;
reason = SKB_DROP_REASON_DEV_READY;
if (!netif_running(skb->dev))
goto bad_dev;
reason = SKB_DROP_REASON_CPU_BACKLOG;
sd = &per_cpu(softnet_data, cpu);
qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
max_backlog = READ_ONCE(net_hotdata.max_backlog);
if (unlikely(qlen > max_backlog))
goto cpu_backlog_drop;
backlog_lock_irq_save(sd, &flags);
qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
if (!qlen) {
/* Schedule NAPI for backlog device. We can use
* non atomic operation as we own the queue lock.
*/
if (!__test_and_set_bit(NAPI_STATE_SCHED,
&sd->backlog.state))
napi_schedule_rps(sd);
}
__skb_queue_tail(&sd->input_pkt_queue, skb);
tail = rps_input_queue_tail_incr(sd);
backlog_unlock_irq_restore(sd, &flags);
/* save the tail outside of the critical section */
rps_input_queue_tail_save(qtail, tail);
return NET_RX_SUCCESS;
}
backlog_unlock_irq_restore(sd, &flags);
cpu_backlog_drop:
numa_drop_add(&sd->drop_counters, 1);
bad_dev:
dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb_reason(skb, reason);
return NET_RX_DROP;
}
static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct netdev_rx_queue *rxqueue;
rxqueue = dev->_rx;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->real_num_rx_queues)) {
WARN_ONCE(dev->real_num_rx_queues > 1,
"%s received packet on queue %u, but number "
"of RX queues is %u\n",
dev->name, index, dev->real_num_rx_queues);
return rxqueue; /* Return first rxqueue */
}
rxqueue += index;
}
return rxqueue;
}
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
const struct bpf_prog *xdp_prog)
{
void *orig_data, *orig_data_end, *hard_start;
struct netdev_rx_queue *rxqueue;
bool orig_bcast, orig_host;
u32 mac_len, frame_sz;
__be16 orig_eth_type;
struct ethhdr *eth;
u32 metalen, act;
int off;
/* The XDP program wants to see the packet starting at the MAC
* header.
*/
mac_len = skb->data - skb_mac_header(skb);
hard_start = skb->data - skb_headroom(skb);
/* SKB "head" area always have tailroom for skb_shared_info */
frame_sz = (void *)skb_end_pointer(skb) - hard_start;
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
rxqueue = netif_get_rxqueue(skb);
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
if (skb_is_nonlinear(skb)) {
skb_shinfo(skb)->xdp_frags_size = skb->data_len;
xdp_buff_set_frags_flag(xdp);
} else {
xdp_buff_clear_frags_flag(xdp);
}
orig_data_end = xdp->data_end;
orig_data = xdp->data;
eth = (struct ethhdr *)xdp->data;
orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
orig_eth_type = eth->h_proto;
act = bpf_prog_run_xdp(xdp_prog, xdp);
/* check if bpf_xdp_adjust_head was used */
off = xdp->data - orig_data;
if (off) {
if (off > 0)
__skb_pull(skb, off);
else if (off < 0)
__skb_push(skb, -off);
skb->mac_header += off;
skb_reset_network_header(skb);
}
/* check if bpf_xdp_adjust_tail was used */
off = xdp->data_end - orig_data_end;
if (off != 0) {
skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
skb->len += off; /* positive on grow, negative on shrink */
}
/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
* (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
*/
if (xdp_buff_has_frags(xdp))
skb->data_len = skb_shinfo(skb)->xdp_frags_size;
else
skb->data_len = 0;
/* check if XDP changed eth hdr such SKB needs update */
eth = (struct ethhdr *)xdp->data;
if ((orig_eth_type != eth->h_proto) ||
(orig_host != ether_addr_equal_64bits(eth->h_dest,
skb->dev->dev_addr)) ||
(orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
__skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
}
/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
* before calling us again on redirect path. We do not call do_redirect
* as we leave that up to the caller.
*
* Caller is responsible for managing lifetime of skb (i.e. calling
* kfree_skb in response to actions it cannot handle/XDP_DROP).
*/
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
__skb_push(skb, mac_len);
break;
case XDP_PASS:
metalen = xdp->data - xdp->data_meta;
if (metalen)
skb_metadata_set(skb, metalen);
break;
}
return act;
}
static int
netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog)
{
struct sk_buff *skb = *pskb;
int err, hroom, troom;
local_lock_nested_bh(&system_page_pool.bh_lock);
err = skb_cow_data_for_xdp(this_cpu_read(system_page_pool.pool), pskb, prog);
local_unlock_nested_bh(&system_page_pool.bh_lock);
if (!err)
return 0;
/* In case we have to go down the path and also linearize,
* then lets do the pskb_expand_head() work just once here.
*/
hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
troom = skb->tail + skb->data_len - skb->end;
err = pskb_expand_head(skb,
hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
if (err)
return err;
return skb_linearize(skb);
}
static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
const struct bpf_prog *xdp_prog)
{
struct sk_buff *skb = *pskb;
u32 mac_len, act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
if (skb_is_redirected(skb))
return XDP_PASS;
/* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
* bytes. This is the guarantee that also native XDP provides,
* thus we need to do it here as well.
*/
mac_len = skb->data - skb_mac_header(skb);
__skb_push(skb, mac_len);
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
if (netif_skb_check_for_xdp(pskb, xdp_prog))
goto do_drop;
}
__skb_pull(*pskb, mac_len);
act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default:
bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
do_drop:
kfree_skb(*pskb);
break;
}
return act;
}
/* When doing generic XDP we have to bypass the qdisc layer and the
* network taps in order to match in-driver-XDP behavior. This also means
* that XDP packets are able to starve other packets going through a qdisc,
* and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
* queues, so they do not have this starvation issue.
*/
void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
bool free_skb = true;
int cpu, rc;
txq = netdev_core_pick_tx(dev, skb, NULL);
cpu = smp_processor_id();
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_frozen_or_drv_stopped(txq)) {
rc = netdev_start_xmit(skb, dev, txq, 0);
if (dev_xmit_complete(rc))
free_skb = false;
}
HARD_TX_UNLOCK(dev, txq);
if (free_skb) {
trace_xdp_exception(dev, xdp_prog, XDP_TX);
dev_core_stats_tx_dropped_inc(dev);
kfree_skb(skb);
}
}
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
&xdp, xdp_prog);
if (err)
goto out_redir;
break;
case XDP_TX:
generic_xdp_tx(*pskb, xdp_prog);
break;
}
bpf_net_ctx_clear(bpf_net_ctx);
return XDP_DROP;
}
bpf_net_ctx_clear(bpf_net_ctx);
}
return XDP_PASS;
out_redir:
bpf_net_ctx_clear(bpf_net_ctx);
kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_rx(skb);
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu < 0)
cpu = smp_processor_id();
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
} else
#endif
{
unsigned int qtail;
ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
}
return ret;
}
/**
* __netif_rx - Slightly optimized version of netif_rx
* @skb: buffer to post
*
* This behaves as netif_rx except that it does not disable bottom halves.
* As a result this function may only be invoked from the interrupt context
* (either hard or soft interrupt).
*/
int __netif_rx(struct sk_buff *skb)
{
int ret;
lockdep_assert_once(hardirq_count() | softirq_count());
trace_netif_rx_entry(skb);
ret = netif_rx_internal(skb);
trace_netif_rx_exit(ret);
return ret;
}
EXPORT_SYMBOL(__netif_rx);
/**
* netif_rx - post buffer to the network code
* @skb: buffer to post
*
* This function receives a packet from a device driver and queues it for
* the upper (protocol) levels to process via the backlog NAPI device. It
* always succeeds. The buffer may be dropped during processing for
* congestion control or by the protocol layers.
* The network buffer is passed via the backlog NAPI device. Modern NIC
* driver should use NAPI and GRO.
* This function can used from interrupt and from process context. The
* caller from process context must not disable interrupts before invoking
* this function.
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped)
*
*/
int netif_rx(struct sk_buff *skb)
{
bool need_bh_off = !(hardirq_count() | softirq_count());
int ret;
if (need_bh_off)
local_bh_disable();
trace_netif_rx_entry(skb);
ret = netif_rx_internal(skb);
trace_netif_rx_exit(ret);
if (need_bh_off)
local_bh_enable();
return ret;
}
EXPORT_SYMBOL(netif_rx);
static __latent_entropy void net_tx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
if (sd->completion_queue) {
struct sk_buff *clist;
local_irq_disable();
clist = sd->completion_queue;
sd->completion_queue = NULL;
local_irq_enable();
while (clist) {
struct sk_buff *skb = clist;
clist = clist->next;
WARN_ON(refcount_read(&skb->users));
if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
trace_consume_skb(skb, net_tx_action);
else
trace_kfree_skb(skb, net_tx_action,
get_kfree_skb_cb(skb)->reason, NULL);
if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
__kfree_skb(skb);
else
__napi_kfree_skb(skb,
get_kfree_skb_cb(skb)->reason);
}
}
if (sd->output_queue) {
struct Qdisc *head;
local_irq_disable();
head = sd->output_queue;
sd->output_queue = NULL;
sd->output_queue_tailp = &sd->output_queue;
local_irq_enable();
rcu_read_lock();
while (head) {
struct Qdisc *q = head;
spinlock_t *root_lock = NULL;
head = head->next_sched;
/* We need to make sure head->next_sched is read
* before clearing __QDISC_STATE_SCHED
*/
smp_mb__before_atomic();
if (!(q->flags & TCQ_F_NOLOCK)) {
root_lock = qdisc_lock(q);
spin_lock(root_lock);
} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
&q->state))) {
/* There is a synchronize_net() between
* STATE_DEACTIVATED flag being set and
* qdisc_reset()/some_qdisc_is_busy() in
* dev_deactivate(), so we can safely bail out
* early here to avoid data race between
* qdisc_deactivate() and some_qdisc_is_busy()
* for lockless qdisc.
*/
clear_bit(__QDISC_STATE_SCHED, &q->state);
continue;
}
clear_bit(__QDISC_STATE_SCHED, &q->state);
qdisc_run(q);
if (root_lock)
spin_unlock(root_lock);
}
rcu_read_unlock();
}
xfrm_dev_backlog(sd);
}
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
#endif
/**
* netdev_is_rx_handler_busy - check if receive handler is registered
* @dev: device to check
*
* Check if a receive handler is already registered for a given device.
* Return true if there one.
*
* The caller must hold the rtnl_mutex.
*/
bool netdev_is_rx_handler_busy(struct net_device *dev)
{
ASSERT_RTNL();
return dev && rtnl_dereference(dev->rx_handler);
}
EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
/**
* netdev_rx_handler_register - register receive handler
* @dev: device to register a handler for
* @rx_handler: receive handler to register
* @rx_handler_data: data pointer that is used by rx handler
*
* Register a receive handler for a device. This handler will then be
* called from __netif_receive_skb. A negative errno code is returned
* on a failure.
*
* The caller must hold the rtnl_mutex.
*
* For a general description of rx_handler, see enum rx_handler_result.
*/
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data)
{
if (netdev_is_rx_handler_busy(dev))
return -EBUSY;
if (dev->priv_flags & IFF_NO_RX_HANDLER)
return -EINVAL;
/* Note: rx_handler_data must be set before rx_handler */
rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
rcu_assign_pointer(dev->rx_handler, rx_handler);
return 0;
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
/**
* netdev_rx_handler_unregister - unregister receive handler
* @dev: device to unregister a handler from
*
* Unregister a receive handler from a device.
*
* The caller must hold the rtnl_mutex.
*/
void netdev_rx_handler_unregister(struct net_device *dev)
{
ASSERT_RTNL();
RCU_INIT_POINTER(dev->rx_handler, NULL);
/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
* section has a guarantee to see a non NULL rx_handler_data
* as well.
*/
synchronize_net();
RCU_INIT_POINTER(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
/*
* Limit the use of PFMEMALLOC reserves to those protocols that implement
* the special handling of PFMEMALLOC skbs.
*/
static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
{
switch (skb->protocol) {
case htons(ETH_P_ARP):
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_8021Q):
case htons(ETH_P_8021AD):
return true;
default:
return false;
}
}
static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
if (nf_hook_ingress_active(skb)) {
int ingress_retval;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
rcu_read_lock();
ingress_retval = nf_hook_ingress(skb);
rcu_read_unlock();
return ingress_retval;
}
return 0;
}
static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
struct packet_type **ppt_prev)
{
enum skb_drop_reason drop_reason = SKB_DROP_REASON_UNHANDLED_PROTO;
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
struct sk_buff *skb = *pskb;
struct net_device *orig_dev;
bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
orig_dev = skb->dev;
skb_reset_network_header(skb);
#if !defined(CONFIG_DEBUG_NET)
/* We plan to no longer reset the transport header here.
* Give some time to fuzzers and dev build to catch bugs
* in network stacks.
*/
if (!skb_transport_header_was_set(skb))
skb_reset_transport_header(skb);
#endif
skb_reset_mac_len(skb);
pt_prev = NULL;
another_round:
skb->skb_iif = skb->dev->ifindex;
__this_cpu_inc(softnet_data.processed);
if (static_branch_unlikely(&generic_xdp_needed_key)) {
int ret2;
migrate_disable();
ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
&skb);
migrate_enable();
if (ret2 != XDP_PASS) {
ret = NET_RX_DROP;
goto out;
}
}
if (eth_type_vlan(skb->protocol)) {
skb = skb_vlan_untag(skb);
if (unlikely(!skb))
goto out;
}
if (skb_skip_tc_classify(skb))
goto skip_classify;
if (pfmemalloc)
goto skip_taps;
list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all,
list) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
skip_taps:
#ifdef CONFIG_NET_INGRESS
if (static_branch_unlikely(&ingress_needed_key)) {
bool another = false;
nf_skip_egress(skb, true);
skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
&another);
if (another)
goto another_round;
if (!skb)
goto out;
nf_skip_egress(skb, false);
if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
goto out;
}
#endif
skb_reset_redirect(skb);
skip_classify:
if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) {
drop_reason = SKB_DROP_REASON_PFMEMALLOC;
goto drop;
}
if (skb_vlan_tag_present(skb)) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
if (vlan_do_receive(&skb))
goto another_round;
else if (unlikely(!skb))
goto out;
}
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
switch (rx_handler(&skb)) {
case RX_HANDLER_CONSUMED:
ret = NET_RX_SUCCESS;
goto out;
case RX_HANDLER_ANOTHER:
goto another_round;
case RX_HANDLER_EXACT:
deliver_exact = true;
break;
case RX_HANDLER_PASS:
break;
default:
BUG();
}
}
if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
check_vlan_id:
if (skb_vlan_tag_get_id(skb)) {
/* Vlan id is non 0 and vlan_do_receive() above couldn't
* find vlan device.
*/
skb->pkt_type = PACKET_OTHERHOST;
} else if (eth_type_vlan(skb->protocol)) {
/* Outer header is 802.1P with vlan 0, inner header is
* 802.1Q or 802.1AD and vlan_do_receive() above could
* not find vlan dev for vlan id 0.
*/
__vlan_hwaccel_clear_tag(skb);
skb = skb_vlan_untag(skb);
if (unlikely(!skb))
goto out;
if (vlan_do_receive(&skb))
/* After stripping off 802.1P header with vlan 0
* vlan dev is found for inner header.
*/
goto another_round;
else if (unlikely(!skb))
goto out;
else
/* We have stripped outer 802.1P vlan 0 header.
* But could not find vlan dev.
* check again for vlan id to set OTHERHOST.
*/
goto check_vlan_id;
}
/* Note: we might in the future use prio bits
* and set skb->priority like in vlan_do_receive()
* For the time being, just ignore Priority Code Point
*/
__vlan_hwaccel_clear_tag(skb);
}
type = skb->protocol;
/* deliver only exact match when indicated */
if (likely(!deliver_exact)) {
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
&ptype_base[ntohs(type) &
PTYPE_HASH_MASK]);
/* orig_dev and skb->dev could belong to different netns;
* Even in such case we need to traverse only the list
* coming from skb->dev, as the ptype owner (packet socket)
* will use dev_net(skb->dev) to do namespace filtering.
*/
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
&dev_net_rcu(skb->dev)->ptype_specific);
}
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
&orig_dev->ptype_specific);
if (unlikely(skb->dev != orig_dev)) {
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
&skb->dev->ptype_specific);
}
if (pt_prev) {
*ppt_prev = pt_prev;
} else {
drop:
if (!deliver_exact)
dev_core_stats_rx_dropped_inc(skb->dev);
else
dev_core_stats_rx_nohandler_inc(skb->dev);
kfree_skb_reason(skb, drop_reason);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
*/
ret = NET_RX_DROP;
}
out:
/* The invariant here is that if *ppt_prev is not NULL
* then skb should also be non-NULL.
*
* Apparently *ppt_prev assignment above holds this invariant due to
* skb dereferencing near it.
*/
*pskb = skb;
return ret;
}
static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
{
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
int ret;
ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
if (pt_prev)
ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
skb->dev, pt_prev, orig_dev);
return ret;
}
/**
* netif_receive_skb_core - special purpose version of netif_receive_skb
* @skb: buffer to process
*
* More direct receive version of netif_receive_skb(). It should
* only be used by callers that have a need to skip RPS and Generic XDP.
* Caller must also take care of handling if ``(page_is_)pfmemalloc``.
*
* This function may only be called from softirq context and interrupts
* should be enabled.
*
* Return values (usually ignored):
* NET_RX_SUCCESS: no congestion
* NET_RX_DROP: packet was dropped
*/
int netif_receive_skb_core(struct sk_buff *skb)
{
int ret;
rcu_read_lock();
ret = __netif_receive_skb_one_core(skb, false);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(netif_receive_skb_core);
static inline void __netif_receive_skb_list_ptype(struct list_head *head,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{
struct sk_buff *skb, *next;
if (!pt_prev)
return;
if (list_empty(head))
return;
if (pt_prev->list_func != NULL)
INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
ip_list_rcv, head, pt_prev, orig_dev);
else
list_for_each_entry_safe(skb, next, head, list) {
skb_list_del_init(skb);
pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
}
static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
{
/* Fast-path assumptions:
* - There is no RX handler.
* - Only one packet_type matches.
* If either of these fails, we will end up doing some per-packet
* processing in-line, then handling the 'last ptype' for the whole
* sublist. This can't cause out-of-order delivery to any single ptype,
* because the 'last ptype' must be constant across the sublist, and all
* other ptypes are handled per-packet.
*/
/* Current (common) ptype of sublist */
struct packet_type *pt_curr = NULL;
/* Current (common) orig_dev of sublist */
struct net_device *od_curr = NULL;
struct sk_buff *skb, *next;
LIST_HEAD(sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
skb_list_del_init(skb);
__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
if (!pt_prev)
continue;
if (pt_curr != pt_prev || od_curr != orig_dev) {
/* dispatch old sublist */
__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
/* start new sublist */
INIT_LIST_HEAD(&sublist);
pt_curr = pt_prev;
od_curr = orig_dev;
}
list_add_tail(&skb->list, &sublist);
}
/* dispatch final sublist */
__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
}
static int __netif_receive_skb(struct sk_buff *skb)
{
int ret;
if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
unsigned int noreclaim_flag;
/*
* PFMEMALLOC skbs are special, they should
* - be delivered to SOCK_MEMALLOC sockets only
* - stay away from userspace
* - have bounded memory usage
*
* Use PF_MEMALLOC as this saves us from propagating the allocation
* context down to all allocation sites.
*/
noreclaim_flag = memalloc_noreclaim_save();
ret = __netif_receive_skb_one_core(skb, true);
memalloc_noreclaim_restore(noreclaim_flag);
} else
ret = __netif_receive_skb_one_core(skb, false);
return ret;
}
static void __netif_receive_skb_list(struct list_head *head)
{
unsigned long noreclaim_flag = 0;
struct sk_buff *skb, *next;
bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
list_for_each_entry_safe(skb, next, head, list) {
if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
struct list_head sublist;
/* Handle the previous sublist */
list_cut_before(&sublist, head, &skb->list);
if (!list_empty(&sublist))
__netif_receive_skb_list_core(&sublist, pfmemalloc);
pfmemalloc = !pfmemalloc;
/* See comments in __netif_receive_skb */
if (pfmemalloc)
noreclaim_flag = memalloc_noreclaim_save();
else
memalloc_noreclaim_restore(noreclaim_flag);
}
}
/* Handle the remaining sublist */
if (!list_empty(head))
__netif_receive_skb_list_core(head, pfmemalloc);
/* Restore pflags */
if (pfmemalloc)
memalloc_noreclaim_restore(noreclaim_flag);
}
static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
struct bpf_prog *new = xdp->prog;
int ret = 0;
switch (xdp->command) {
case XDP_SETUP_PROG:
rcu_assign_pointer(dev->xdp_prog, new);
if (old)
bpf_prog_put(old);
if (old && !new) {
static_branch_dec(&generic_xdp_needed_key);
} else if (new && !old) {
static_branch_inc(&generic_xdp_needed_key);
netif_disable_lro(dev);
dev_disable_gro_hw(dev);
}
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
rcu_read_lock();
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
return ret;
}
}
#endif
ret = __netif_receive_skb(skb);
rcu_read_unlock();
return ret;
}
void netif_receive_skb_list_internal(struct list_head *head)
{
struct sk_buff *skb, *next;
LIST_HEAD(sublist);
list_for_each_entry_safe(skb, next, head, list) {
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
}
list_splice_init(&sublist, head);
rcu_read_lock();
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rps_needed)) {
list_for_each_entry_safe(skb, next, head, list) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu >= 0) {
/* Will be handled, remove from list */
skb_list_del_init(skb);
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
}
}
}
#endif
__netif_receive_skb_list(head);
rcu_read_unlock();
}
/**
* netif_receive_skb - process receive buffer from network
* @skb: buffer to process
*
* netif_receive_skb() is the main receive data processing function.
* It always succeeds. The buffer may be dropped during processing
* for congestion control or by the protocol layers.
*
* This function may only be called from softirq context and interrupts
* should be enabled.
*
* Return values (usually ignored):
* NET_RX_SUCCESS: no congestion
* NET_RX_DROP: packet was dropped
*/
int netif_receive_skb(struct sk_buff *skb)
{
int ret;
trace_netif_receive_skb_entry(skb);
ret = netif_receive_skb_internal(skb);
trace_netif_receive_skb_exit(ret);
return ret;
}
EXPORT_SYMBOL(netif_receive_skb);
/**
* netif_receive_skb_list - process many receive buffers from network
* @head: list of skbs to process.
*
* Since return value of netif_receive_skb() is normally ignored, and
* wouldn't be meaningful for a list, this function returns void.
*
* This function may only be called from softirq context and interrupts
* should be enabled.
*/
void netif_receive_skb_list(struct list_head *head)
{
struct sk_buff *skb;
if (list_empty(head))
return;
if (trace_netif_receive_skb_list_entry_enabled()) {
list_for_each_entry(skb, head, list)
trace_netif_receive_skb_list_entry(skb);
}
netif_receive_skb_list_internal(head);
trace_netif_receive_skb_list_exit(0);
}
EXPORT_SYMBOL(netif_receive_skb_list);
/* Network device is going away, flush any packets still pending */
static void flush_backlog(struct work_struct *work)
{
struct sk_buff *skb, *tmp;
struct sk_buff_head list;
struct softnet_data *sd;
__skb_queue_head_init(&list);
local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
backlog_lock_irq_disable(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
__skb_queue_tail(&list, skb);
rps_input_queue_head_incr(sd);
}
}
backlog_unlock_irq_enable(sd);
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
__skb_queue_tail(&list, skb);
rps_input_queue_head_incr(sd);
}
}
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
local_bh_enable();
__skb_queue_purge_reason(&list, SKB_DROP_REASON_DEV_READY);
}
static bool flush_required(int cpu)
{
#if IS_ENABLED(CONFIG_RPS)
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
bool do_flush;
backlog_lock_irq_disable(sd);
/* as insertion into process_queue happens with the rps lock held,
* process_queue access may race only with dequeue
*/
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
!skb_queue_empty_lockless(&sd->process_queue);
backlog_unlock_irq_enable(sd);
return do_flush;
#endif
/* without RPS we can't safely check input_pkt_queue: during a
* concurrent remote skb_queue_splice() we can detect as empty both
* input_pkt_queue and process_queue even if the latter could end-up
* containing a lot of packets.
*/
return true;
}
struct flush_backlogs {
cpumask_t flush_cpus;
struct work_struct w[];
};
static struct flush_backlogs *flush_backlogs_alloc(void)
{
return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids),
GFP_KERNEL);
}
static struct flush_backlogs *flush_backlogs_fallback;
static DEFINE_MUTEX(flush_backlogs_mutex);
static void flush_all_backlogs(void)
{
struct flush_backlogs *ptr = flush_backlogs_alloc();
unsigned int cpu;
if (!ptr) {
mutex_lock(&flush_backlogs_mutex);
ptr = flush_backlogs_fallback;
}
cpumask_clear(&ptr->flush_cpus);
cpus_read_lock();
for_each_online_cpu(cpu) {
if (flush_required(cpu)) {
INIT_WORK(&ptr->w[cpu], flush_backlog);
queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]);
__cpumask_set_cpu(cpu, &ptr->flush_cpus);
}
}
/* we can have in flight packet[s] on the cpus we are not flushing,
* synchronize_net() in unregister_netdevice_many() will take care of
* them.
*/
for_each_cpu(cpu, &ptr->flush_cpus)
flush_work(&ptr->w[cpu]);
cpus_read_unlock();
if (ptr != flush_backlogs_fallback)
kfree(ptr);
else
mutex_unlock(&flush_backlogs_mutex);
}
static void net_rps_send_ipi(struct softnet_data *remsd)
{
#ifdef CONFIG_RPS
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
if (cpu_online(remsd->cpu))
smp_call_function_single_async(remsd->cpu, &remsd->csd);
remsd = next;
}
#endif
}
/*
* net_rps_action_and_irq_enable sends any pending IPI's for rps.
* Note: called with local irq disabled, but exits with local irq enabled.
*/
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
struct softnet_data *remsd = sd->rps_ipi_list;
if (!use_backlog_threads() && remsd) {
sd->rps_ipi_list = NULL;
local_irq_enable();
/* Send pending IPI's to kick RPS processing on remote cpus. */
net_rps_send_ipi(remsd);
} else
#endif
local_irq_enable();
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
return !use_backlog_threads() && sd->rps_ipi_list;
#else
return false;
#endif
}
static int process_backlog(struct napi_struct *napi, int quota)
{
struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
bool again = true;
int work = 0;
/* Check if we have pending ipi, its better to send them now,
* not waiting net_rx_action() end.
*/
if (sd_has_rps_ipi_waiting(sd)) {
local_irq_disable();
net_rps_action_and_irq_enable(sd);
}
napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
while (again) {
struct sk_buff *skb;
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
while ((skb = __skb_dequeue(&sd->process_queue))) {
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
if (++work >= quota) {
rps_input_queue_head_add(sd, work);
return work;
}
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
}
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
backlog_lock_irq_disable(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
* only current cpu owns and manipulates this napi,
* and NAPI_STATE_SCHED is the only possible flag set
* on backlog.
* We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
napi->state &= NAPIF_STATE_THREADED;
again = false;
} else {
local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
}
backlog_unlock_irq_enable(sd);
}
if (work)
rps_input_queue_head_add(sd, work);
return work;
}
/**
* __napi_schedule - schedule for receive
* @n: entry to schedule
*
* The entry's receive function will be scheduled to run.
* Consider using __napi_schedule_irqoff() if hard irqs are masked.
*/
void __napi_schedule(struct napi_struct *n)
{
unsigned long flags;
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__napi_schedule);
/**
* napi_schedule_prep - check if napi can be scheduled
* @n: napi context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
bool napi_schedule_prep(struct napi_struct *n)
{
unsigned long new, val = READ_ONCE(n->state);
do {
if (unlikely(val & NAPIF_STATE_DISABLE))
return false;
new = val | NAPIF_STATE_SCHED;
/* Sets STATE_MISSED bit if STATE_SCHED was already set
* This was suggested by Alexander Duyck, as compiler
* emits better code than :
* if (val & NAPIF_STATE_SCHED)
* new |= NAPIF_STATE_MISSED;
*/
new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
NAPIF_STATE_MISSED;
} while (!try_cmpxchg(&n->state, &val, new));
return !(val & NAPIF_STATE_SCHED);
}
EXPORT_SYMBOL(napi_schedule_prep);
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
*
* Variant of __napi_schedule() assuming hard irqs are masked.
*
* On PREEMPT_RT enabled kernels this maps to __napi_schedule()
* because the interrupt disabled assumption might not be true
* due to force-threaded interrupts and spinlock substitution.
*/
void __napi_schedule_irqoff(struct napi_struct *n)
{
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
____napi_schedule(this_cpu_ptr(&softnet_data), n);
else
__napi_schedule(n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
bool napi_complete_done(struct napi_struct *n, int work_done)
{
unsigned long flags, val, new, timeout = 0;
bool ret = true;
/*
* 1) Don't let napi dequeue from the cpu poll list
* just in case its running on a different cpu.
* 2) If we are busy polling, do nothing here, we have
* the guarantee we will be called later.
*/
if (unlikely(n->state & (NAPIF_STATE_NPSVC |
NAPIF_STATE_IN_BUSY_POLL)))
return false;
if (work_done) {
if (n->gro.bitmask)
timeout = napi_get_gro_flush_timeout(n);
n->defer_hard_irqs_count = napi_get_defer_hard_irqs(n);
}
if (n->defer_hard_irqs_count > 0) {
n->defer_hard_irqs_count--;
timeout = napi_get_gro_flush_timeout(n);
if (timeout)
ret = false;
}
/*
* When the NAPI instance uses a timeout and keeps postponing
* it, we need to bound somehow the time packets are kept in
* the GRO layer.
*/
gro_flush_normal(&n->gro, !!timeout);
if (unlikely(!list_empty(&n->poll_list))) {
/* If n->poll_list is not empty, we need to mask irqs */
local_irq_save(flags);
list_del_init(&n->poll_list);
local_irq_restore(flags);
}
WRITE_ONCE(n->list_owner, -1);
val = READ_ONCE(n->state);
do {
WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
NAPIF_STATE_SCHED_THREADED |
NAPIF_STATE_PREFER_BUSY_POLL);
/* If STATE_MISSED was set, leave STATE_SCHED set,
* because we will call napi->poll() one more time.
* This C code was suggested by Alexander Duyck to help gcc.
*/
new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
NAPIF_STATE_SCHED;
} while (!try_cmpxchg(&n->state, &val, new));
if (unlikely(val & NAPIF_STATE_MISSED)) {
__napi_schedule(n);
return false;
}
if (timeout)
hrtimer_start(&n->timer, ns_to_ktime(timeout),
HRTIMER_MODE_REL_PINNED);
return ret;
}
EXPORT_SYMBOL(napi_complete_done);
static void skb_defer_free_flush(void)
{
struct llist_node *free_list;
struct sk_buff *skb, *next;
struct skb_defer_node *sdn;
int node;
for_each_node(node) {
sdn = this_cpu_ptr(net_hotdata.skb_defer_nodes) + node;
if (llist_empty(&sdn->defer_list))
continue;
atomic_long_set(&sdn->defer_count, 0);
free_list = llist_del_all(&sdn->defer_list);
llist_for_each_entry_safe(skb, next, free_list, ll_node) {
napi_consume_skb(skb, 1);
}
}
}
#if defined(CONFIG_NET_RX_BUSY_POLL)
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
{
if (!skip_schedule) {
gro_normal_list(&napi->gro);
__napi_schedule(napi);
return;
}
/* Flush too old packets. If HZ < 1000, flush all packets */
gro_flush_normal(&napi->gro, HZ >= 1000);
clear_bit(NAPI_STATE_SCHED, &napi->state);
}
enum {
NAPI_F_PREFER_BUSY_POLL = 1,
NAPI_F_END_ON_RESCHED = 2,
};
static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
unsigned flags, u16 budget)
{
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
bool skip_schedule = false;
unsigned long timeout;
int rc;
/* Busy polling means there is a high chance device driver hard irq
* could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
* set in napi_schedule_prep().
* Since we are about to call napi->poll() once more, we can safely
* clear NAPI_STATE_MISSED.
*
* Note: x86 could use a single "lock and ..." instruction
* to perform these two clear_bit()
*/
clear_bit(NAPI_STATE_MISSED, &napi->state);
clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (flags & NAPI_F_PREFER_BUSY_POLL) {
napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi);
timeout = napi_get_gro_flush_timeout(napi);
if (napi->defer_hard_irqs_count && timeout) {
hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
skip_schedule = true;
}
}
/* All we really want here is to re-enable device interrupts.
* Ideally, a new ndo_busy_poll_stop() could avoid another round.
*/
rc = napi->poll(napi, budget);
/* We can't gro_normal_list() here, because napi->poll() might have
* rearmed the napi (napi_complete_done()) in which case it could
* already be running on another CPU.
*/
trace_napi_poll(napi, rc, budget);
netpoll_poll_unlock(have_poll_lock);
if (rc == budget)
__busy_poll_stop(napi, skip_schedule);
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
}
static void __napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, unsigned flags, u16 budget)
{
unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget);
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
void *have_poll_lock = NULL;
struct napi_struct *napi;
WARN_ON_ONCE(!rcu_read_lock_held());
restart:
napi_poll = NULL;
napi = napi_by_id(napi_id);
if (!napi)
return;
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
for (;;) {
int work = 0;
local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (!napi_poll) {
unsigned long val = READ_ONCE(napi->state);
/* If multiple threads are competing for this napi,
* we avoid dirtying napi->state as much as we can.
*/
if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
NAPIF_STATE_IN_BUSY_POLL)) {
if (flags & NAPI_F_PREFER_BUSY_POLL)
set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
}
if (cmpxchg(&napi->state, val,
val | NAPIF_STATE_IN_BUSY_POLL |
NAPIF_STATE_SCHED) != val) {
if (flags & NAPI_F_PREFER_BUSY_POLL)
set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
}
have_poll_lock = netpoll_poll_lock(napi);
napi_poll = napi->poll;
}
work = napi_poll(napi, budget);
trace_napi_poll(napi, work, budget);
gro_normal_list(&napi->gro);
count:
if (work > 0)
__NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
skb_defer_free_flush();
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
if (!loop_end || loop_end(loop_end_arg, start_time))
break;
if (unlikely(need_resched())) {
if (flags & NAPI_F_END_ON_RESCHED)
break;
if (napi_poll)
busy_poll_stop(napi, have_poll_lock, flags, budget);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
rcu_read_unlock();
cond_resched();
rcu_read_lock();
if (loop_end(loop_end_arg, start_time))
return;
goto restart;
}
cpu_relax();
}
if (napi_poll)
busy_poll_stop(napi, have_poll_lock, flags, budget);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
}
void napi_busy_loop_rcu(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{
unsigned flags = NAPI_F_END_ON_RESCHED;
if (prefer_busy_poll)
flags |= NAPI_F_PREFER_BUSY_POLL;
__napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
}
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{
unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0;
rcu_read_lock();
__napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
rcu_read_unlock();
}
EXPORT_SYMBOL(napi_busy_loop);
void napi_suspend_irqs(unsigned int napi_id)
{
struct napi_struct *napi;
rcu_read_lock();
napi = napi_by_id(napi_id);
if (napi) {
unsigned long timeout = napi_get_irq_suspend_timeout(napi);
if (timeout)
hrtimer_start(&napi->timer, ns_to_ktime(timeout),
HRTIMER_MODE_REL_PINNED);
}
rcu_read_unlock();
}
void napi_resume_irqs(unsigned int napi_id)
{
struct napi_struct *napi;
rcu_read_lock();
napi = napi_by_id(napi_id);
if (napi) {
/* If irq_suspend_timeout is set to 0 between the call to
* napi_suspend_irqs and now, the original value still
* determines the safety timeout as intended and napi_watchdog
* will resume irq processing.
*/
if (napi_get_irq_suspend_timeout(napi)) {
local_bh_disable();
napi_schedule(napi);
local_bh_enable();
}
}
rcu_read_unlock();
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
static void __napi_hash_add_with_id(struct napi_struct *napi,
unsigned int napi_id)
{
napi->gro.cached_napi_id = napi_id;
WRITE_ONCE(napi->napi_id, napi_id);
hlist_add_head_rcu(&napi->napi_hash_node,
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
}
static void napi_hash_add_with_id(struct napi_struct *napi,
unsigned int napi_id)
{
unsigned long flags;
spin_lock_irqsave(&napi_hash_lock, flags);
WARN_ON_ONCE(napi_by_id(napi_id));
__napi_hash_add_with_id(napi, napi_id);
spin_unlock_irqrestore(&napi_hash_lock, flags);
}
static void napi_hash_add(struct napi_struct *napi)
{
unsigned long flags;
if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
return;
spin_lock_irqsave(&napi_hash_lock, flags);
/* 0..NR_CPUS range is reserved for sender_cpu use */
do {
if (unlikely(!napi_id_valid(++napi_gen_id)))
napi_gen_id = MIN_NAPI_ID;
} while (napi_by_id(napi_gen_id));
__napi_hash_add_with_id(napi, napi_gen_id);
spin_unlock_irqrestore(&napi_hash_lock, flags);
}
/* Warning : caller is responsible to make sure rcu grace period
* is respected before freeing memory containing @napi
*/
static void napi_hash_del(struct napi_struct *napi)
{
unsigned long flags;
spin_lock_irqsave(&napi_hash_lock, flags);
hlist_del_init_rcu(&napi->napi_hash_node);
spin_unlock_irqrestore(&napi_hash_lock, flags);
}
static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
{
struct napi_struct *napi;
napi = container_of(timer, struct napi_struct, timer);
/* Note : we use a relaxed variant of napi_schedule_prep() not setting
* NAPI_STATE_MISSED, since we do not react to a device IRQ.
*/
if (!napi_disable_pending(napi) &&
!test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
__napi_schedule_irqoff(napi);
}
return HRTIMER_NORESTART;
}
static void napi_stop_kthread(struct napi_struct *napi)
{
unsigned long val, new;
/* Wait until the napi STATE_THREADED is unset. */
while (true) {
val = READ_ONCE(napi->state);
/* If napi kthread own this napi or the napi is idle,
* STATE_THREADED can be unset here.
*/
if ((val & NAPIF_STATE_SCHED_THREADED) ||
!(val & NAPIF_STATE_SCHED)) {
new = val & (~NAPIF_STATE_THREADED);
} else {
msleep(20);
continue;
}
if (try_cmpxchg(&napi->state, &val, new))
break;
}
/* Once STATE_THREADED is unset, wait for SCHED_THREADED to be unset by
* the kthread.
*/
while (true) {
if (!test_bit(NAPI_STATE_SCHED_THREADED, &napi->state))
break;
msleep(20);
}
kthread_stop(napi->thread);
napi->thread = NULL;
}
int napi_set_threaded(struct napi_struct *napi,
enum netdev_napi_threaded threaded)
{
if (threaded) {
if (!napi->thread) {
int err = napi_kthread_create(napi);
if (err)
return err;
}
}
if (napi->config)
napi->config->threaded = threaded;
/* Setting/unsetting threaded mode on a napi might not immediately
* take effect, if the current napi instance is actively being
* polled. In this case, the switch between threaded mode and
* softirq mode will happen in the next round of napi_schedule().
* This should not cause hiccups/stalls to the live traffic.
*/
if (!threaded && napi->thread) {
napi_stop_kthread(napi);
} else {
/* Make sure kthread is created before THREADED bit is set. */
smp_mb__before_atomic();
assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
}
return 0;
}
int netif_set_threaded(struct net_device *dev,
enum netdev_napi_threaded threaded)
{
struct napi_struct *napi;
int i, err = 0;
netdev_assert_locked_or_invisible(dev);
if (threaded) {
list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (!napi->thread) {
err = napi_kthread_create(napi);
if (err) {
threaded = NETDEV_NAPI_THREADED_DISABLED;
break;
}
}
}
}
WRITE_ONCE(dev->threaded, threaded);
/* The error should not occur as the kthreads are already created. */
list_for_each_entry(napi, &dev->napi_list, dev_list)
WARN_ON_ONCE(napi_set_threaded(napi, threaded));
/* Override the config for all NAPIs even if currently not listed */
for (i = 0; i < dev->num_napi_configs; i++)
dev->napi_config[i].threaded = threaded;
return err;
}
/**
* netif_threaded_enable() - enable threaded NAPIs
* @dev: net_device instance
*
* Enable threaded mode for the NAPI instances of the device. This may be useful
* for devices where multiple NAPI instances get scheduled by a single
* interrupt. Threaded NAPI allows moving the NAPI processing to cores other
* than the core where IRQ is mapped.
*
* This function should be called before @dev is registered.
*/
void netif_threaded_enable(struct net_device *dev)
{
WARN_ON_ONCE(netif_set_threaded(dev, NETDEV_NAPI_THREADED_ENABLED));
}
EXPORT_SYMBOL(netif_threaded_enable);
/**
* netif_queue_set_napi - Associate queue with the napi
* @dev: device to which NAPI and queue belong
* @queue_index: Index of queue
* @type: queue type as RX or TX
* @napi: NAPI context, pass NULL to clear previously set NAPI
*
* Set queue with its corresponding napi context. This should be done after
* registering the NAPI handler for the queue-vector and the queues have been
* mapped to the corresponding interrupt vector.
*/
void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type, struct napi_struct *napi)
{
struct netdev_rx_queue *rxq;
struct netdev_queue *txq;
if (WARN_ON_ONCE(napi && !napi->dev))
return;
netdev_ops_assert_locked_or_invisible(dev);
switch (type) {
case NETDEV_QUEUE_TYPE_RX:
rxq = __netif_get_rx_queue(dev, queue_index);
rxq->napi = napi;
return;
case NETDEV_QUEUE_TYPE_TX:
txq = netdev_get_tx_queue(dev, queue_index);
txq->napi = napi;
return;
default:
return;
}
}
EXPORT_SYMBOL(netif_queue_set_napi);
static void
netif_napi_irq_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct napi_struct *napi =
container_of(notify, struct napi_struct, notify);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap;
int err;
#endif
if (napi->config && napi->dev->irq_affinity_auto)
cpumask_copy(&napi->config->affinity_mask, mask);
#ifdef CONFIG_RFS_ACCEL
if (napi->dev->rx_cpu_rmap_auto) {
err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask);
if (err)
netdev_warn(napi->dev, "RMAP update failed (%d)\n",
err);
}
#endif
}
#ifdef CONFIG_RFS_ACCEL
static void netif_napi_affinity_release(struct kref *ref)
{
struct napi_struct *napi =
container_of(ref, struct napi_struct, notify.kref);
struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap;
netdev_assert_locked(napi->dev);
WARN_ON(test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER,
&napi->state));
if (!napi->dev->rx_cpu_rmap_auto)
return;
rmap->obj[napi->napi_rmap_idx] = NULL;
napi->napi_rmap_idx = -1;
cpu_rmap_put(rmap);
}
int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs)
{
if (dev->rx_cpu_rmap_auto)
return 0;
dev->rx_cpu_rmap = alloc_irq_cpu_rmap(num_irqs);
if (!dev->rx_cpu_rmap)
return -ENOMEM;
dev->rx_cpu_rmap_auto = true;
return 0;
}
EXPORT_SYMBOL(netif_enable_cpu_rmap);
static void netif_del_cpu_rmap(struct net_device *dev)
{
struct cpu_rmap *rmap = dev->rx_cpu_rmap;
if (!dev->rx_cpu_rmap_auto)
return;
/* Free the rmap */
cpu_rmap_put(rmap);
dev->rx_cpu_rmap = NULL;
dev->rx_cpu_rmap_auto = false;
}
#else
static void netif_napi_affinity_release(struct kref *ref)
{
}
int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs)
{
return 0;
}
EXPORT_SYMBOL(netif_enable_cpu_rmap);
static void netif_del_cpu_rmap(struct net_device *dev)
{
}
#endif
void netif_set_affinity_auto(struct net_device *dev)
{
unsigned int i, maxqs, numa;
maxqs = max(dev->num_tx_queues, dev->num_rx_queues);
numa = dev_to_node(&dev->dev);
for (i = 0; i < maxqs; i++)
cpumask_set_cpu(cpumask_local_spread(i, numa),
&dev->napi_config[i].affinity_mask);
dev->irq_affinity_auto = true;
}
EXPORT_SYMBOL(netif_set_affinity_auto);
void netif_napi_set_irq_locked(struct napi_struct *napi, int irq)
{
int rc;
netdev_assert_locked_or_invisible(napi->dev);
if (napi->irq == irq)
return;
/* Remove existing resources */
if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state))
irq_set_affinity_notifier(napi->irq, NULL);
napi->irq = irq;
if (irq < 0 ||
(!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto))
return;
/* Abort for buggy drivers */
if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config))
return;
#ifdef CONFIG_RFS_ACCEL
if (napi->dev->rx_cpu_rmap_auto) {
rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi);
if (rc < 0)
return;
cpu_rmap_get(napi->dev->rx_cpu_rmap);
napi->napi_rmap_idx = rc;
}
#endif
/* Use core IRQ notifier */
napi->notify.notify = netif_napi_irq_notify;
napi->notify.release = netif_napi_affinity_release;
rc = irq_set_affinity_notifier(irq, &napi->notify);
if (rc) {
netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n",
rc);
goto put_rmap;
}
set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state);
return;
put_rmap:
#ifdef CONFIG_RFS_ACCEL
if (napi->dev->rx_cpu_rmap_auto) {
napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL;
cpu_rmap_put(napi->dev->rx_cpu_rmap);
napi->napi_rmap_idx = -1;
}
#endif
napi->notify.notify = NULL;
napi->notify.release = NULL;
}
EXPORT_SYMBOL(netif_napi_set_irq_locked);
static void napi_restore_config(struct napi_struct *n)
{
n->defer_hard_irqs = n->config->defer_hard_irqs;
n->gro_flush_timeout = n->config->gro_flush_timeout;
n->irq_suspend_timeout = n->config->irq_suspend_timeout;
if (n->dev->irq_affinity_auto &&
test_bit(NAPI_STATE_HAS_NOTIFIER, &n->state))
irq_set_affinity(n->irq, &n->config->affinity_mask);
/* a NAPI ID might be stored in the config, if so use it. if not, use
* napi_hash_add to generate one for us.
*/
if (n->config->napi_id) {
napi_hash_add_with_id(n, n->config->napi_id);
} else {
napi_hash_add(n);
n->config->napi_id = n->napi_id;
}
WARN_ON_ONCE(napi_set_threaded(n, n->config->threaded));
}
static void napi_save_config(struct napi_struct *n)
{
n->config->defer_hard_irqs = n->defer_hard_irqs;
n->config->gro_flush_timeout = n->gro_flush_timeout;
n->config->irq_suspend_timeout = n->irq_suspend_timeout;
napi_hash_del(n);
}
/* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
* inherit an existing ID try to insert it at the right position.
*/
static void
netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
{
unsigned int new_id, pos_id;
struct list_head *higher;
struct napi_struct *pos;
new_id = UINT_MAX;
if (napi->config && napi->config->napi_id)
new_id = napi->config->napi_id;
higher = &dev->napi_list;
list_for_each_entry(pos, &dev->napi_list, dev_list) {
if (napi_id_valid(pos->napi_id))
pos_id = pos->napi_id;
else if (pos->config)
pos_id = pos->config->napi_id;
else
pos_id = UINT_MAX;
if (pos_id <= new_id)
break;
higher = &pos->dev_list;
}
list_add_rcu(&napi->dev_list, higher); /* adds after higher */
}
/* Double check that napi_get_frags() allocates skbs with
* skb->head being backed by slab, not a page fragment.
* This is to make sure bug fixed in 3226b158e67c
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
* does not accidentally come back.
*/
static void napi_get_frags_check(struct napi_struct *napi)
{
struct sk_buff *skb;
local_bh_disable();
skb = napi_get_frags(napi);
WARN_ON_ONCE(skb && skb->head_frag);
napi_free_frags(napi);
local_bh_enable();
}
void netif_napi_add_weight_locked(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight)
{
netdev_assert_locked(dev);
if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
return;
INIT_LIST_HEAD(&napi->poll_list);
INIT_HLIST_NODE(&napi->napi_hash_node);
hrtimer_setup(&napi->timer, napi_watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
gro_init(&napi->gro);
napi->skb = NULL;
napi->poll = poll;
if (weight > NAPI_POLL_WEIGHT)
netdev_err_once(dev, "%s() called with weight %d\n", __func__,
weight);
napi->weight = weight;
napi->dev = dev;
#ifdef CONFIG_NETPOLL
napi->poll_owner = -1;
#endif
napi->list_owner = -1;
set_bit(NAPI_STATE_SCHED, &napi->state);
set_bit(NAPI_STATE_NPSVC, &napi->state);
netif_napi_dev_list_add(dev, napi);
/* default settings from sysfs are applied to all NAPIs. any per-NAPI
* configuration will be loaded in napi_enable
*/
napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs));
napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout));
napi_get_frags_check(napi);
/* Create kthread for this napi if dev->threaded is set.
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().
*/
if (napi_get_threaded_config(dev, napi))
if (napi_kthread_create(napi))
dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
netif_napi_set_irq_locked(napi, -1);
}
EXPORT_SYMBOL(netif_napi_add_weight_locked);
void napi_disable_locked(struct napi_struct *n)
{
unsigned long val, new;
might_sleep();
netdev_assert_locked(n->dev);
set_bit(NAPI_STATE_DISABLE, &n->state);
val = READ_ONCE(n->state);
do {
while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
usleep_range(20, 200);
val = READ_ONCE(n->state);
}
new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
} while (!try_cmpxchg(&n->state, &val, new));
hrtimer_cancel(&n->timer);
if (n->config)
napi_save_config(n);
else
napi_hash_del(n);
clear_bit(NAPI_STATE_DISABLE, &n->state);
}
EXPORT_SYMBOL(napi_disable_locked);
/**
* napi_disable() - prevent NAPI from scheduling
* @n: NAPI context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
* Takes netdev_lock() for associated net_device.
*/
void napi_disable(struct napi_struct *n)
{
netdev_lock(n->dev);
napi_disable_locked(n);
netdev_unlock(n->dev);
}
EXPORT_SYMBOL(napi_disable);
void napi_enable_locked(struct napi_struct *n)
{
unsigned long new, val = READ_ONCE(n->state);
if (n->config)
napi_restore_config(n);
else
napi_hash_add(n);
do {
BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
if (n->dev->threaded && n->thread)
new |= NAPIF_STATE_THREADED;
} while (!try_cmpxchg(&n->state, &val, new));
}
EXPORT_SYMBOL(napi_enable_locked);
/**
* napi_enable() - enable NAPI scheduling
* @n: NAPI context
*
* Enable scheduling of a NAPI instance.
* Must be paired with napi_disable().
* Takes netdev_lock() for associated net_device.
*/
void napi_enable(struct napi_struct *n)
{
netdev_lock(n->dev);
napi_enable_locked(n);
netdev_unlock(n->dev);
}
EXPORT_SYMBOL(napi_enable);
/* Must be called in process context */
void __netif_napi_del_locked(struct napi_struct *napi)
{
netdev_assert_locked(napi->dev);
if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
return;
/* Make sure NAPI is disabled (or was never enabled). */
WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state))
irq_set_affinity_notifier(napi->irq, NULL);
if (napi->config) {
napi->index = -1;
napi->config = NULL;
}
list_del_rcu(&napi->dev_list);
napi_free_frags(napi);
gro_cleanup(&napi->gro);
if (napi->thread) {
kthread_stop(napi->thread);
napi->thread = NULL;
}
}
EXPORT_SYMBOL(__netif_napi_del_locked);
static int __napi_poll(struct napi_struct *n, bool *repoll)
{
int work, weight;
weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (napi_is_scheduled(n)) {
work = n->poll(n, weight);
trace_napi_poll(n, work, weight);
xdp_do_check_flushed(n);
}
if (unlikely(work > weight))
netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
n->poll, work, weight);
if (likely(work < weight))
return work;
/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(napi_disable_pending(n))) {
napi_complete(n);
return work;
}
/* The NAPI context has more processing work, but busy-polling
* is preferred. Exit early.
*/
if (napi_prefer_busy_poll(n)) {
if (napi_complete_done(n, work)) {
/* If timeout is not set, we need to make sure
* that the NAPI is re-scheduled.
*/
napi_schedule(n);
}
return work;
}
/* Flush too old packets. If HZ < 1000, flush all packets */
gro_flush_normal(&n->gro, HZ >= 1000);
/* Some drivers may have called napi_schedule
* prior to exhausting their budget.
*/
if (unlikely(!list_empty(&n->poll_list))) {
pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
n->dev ? n->dev->name : "backlog");
return work;
}
*repoll = true;
return work;
}
static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{
bool do_repoll = false;
void *have;
int work;
list_del_init(&n->poll_list);
have = netpoll_poll_lock(n);
work = __napi_poll(n, &do_repoll);
if (do_repoll) {
#if defined(CONFIG_DEBUG_NET)
if (unlikely(!napi_is_scheduled(n)))
pr_crit("repoll requested for device %s %ps but napi is not scheduled.\n",
n->dev->name, n->poll);
#endif
list_add_tail(&n->poll_list, repoll);
}
netpoll_poll_unlock(have);
return work;
}
static int napi_thread_wait(struct napi_struct *napi)
{
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
/* Testing SCHED_THREADED bit here to make sure the current
* kthread owns this napi and could poll on this napi.
* Testing SCHED bit is not enough because SCHED bit might be
* set by some other busy poll thread or by napi_disable().
*/
if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
WARN_ON(!list_empty(&napi->poll_list));
__set_current_state(TASK_RUNNING);
return 0;
}
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
return -1;
}
static void napi_threaded_poll_loop(struct napi_struct *napi)
{
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct softnet_data *sd;
unsigned long last_qs = jiffies;
for (;;) {
bool repoll = false;
void *have;
local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
sd = this_cpu_ptr(&softnet_data);
sd->in_napi_threaded_poll = true;
have = netpoll_poll_lock(napi);
__napi_poll(napi, &repoll);
netpoll_poll_unlock(have);
sd->in_napi_threaded_poll = false;
barrier();
if (sd_has_rps_ipi_waiting(sd)) {
local_irq_disable();
net_rps_action_and_irq_enable(sd);
}
skb_defer_free_flush();
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
if (!repoll)
break;
rcu_softirq_qs_periodic(last_qs);
cond_resched();
}
}
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
while (!napi_thread_wait(napi))
napi_threaded_poll_loop(napi);
return 0;
}
static __latent_entropy void net_rx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
start:
sd->in_net_rx_action = true;
local_irq_disable();
list_splice_init(&sd->poll_list, &list);
local_irq_enable();
for (;;) {
struct napi_struct *n;
skb_defer_free_flush();
if (list_empty(&list)) {
if (list_empty(&repoll)) {
sd->in_net_rx_action = false;
barrier();
/* We need to check if ____napi_schedule()
* had refilled poll_list while
* sd->in_net_rx_action was true.
*/
if (!list_empty(&sd->poll_list))
goto start;
if (!sd_has_rps_ipi_waiting(sd))
goto end;
}
break;
}
n = list_first_entry(&list, struct napi_struct, poll_list);
budget -= napi_poll(n, &repoll);
/* If softirq window is exhausted then punt.
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
if (unlikely(budget <= 0 ||
time_after_eq(jiffies, time_limit))) {
/* Pairs with READ_ONCE() in softnet_seq_show() */
WRITE_ONCE(sd->time_squeeze, sd->time_squeeze + 1);
break;
}
}
local_irq_disable();
list_splice_tail_init(&sd->poll_list, &list);
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
else
sd->in_net_rx_action = false;
net_rps_action_and_irq_enable(sd);
end:
bpf_net_ctx_clear(bpf_net_ctx);
}
struct netdev_adjacent {
struct net_device *dev;
netdevice_tracker dev_tracker;
/* upper master flag, there can only be one master device per list */
bool master;
/* lookup ignore flag */
bool ignore;
/* counter for the number of times this device was added to us */
u16 ref_nr;
/* private field for the users */
void *private;
struct list_head list;
struct rcu_head rcu;
};
static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
struct list_head *adj_list)
{
struct netdev_adjacent *adj;
list_for_each_entry(adj, adj_list, list) {
if (adj->dev == adj_dev)
return adj;
}
return NULL;
}
static int ____netdev_has_upper_dev(struct net_device *upper_dev,
struct netdev_nested_priv *priv)
{
struct net_device *dev = (struct net_device *)priv->data;
return upper_dev == dev;
}
/**
* netdev_has_upper_dev - Check if device is linked to an upper device
* @dev: device
* @upper_dev: upper device to check
*
* Find out if a device is linked to specified upper device and return true
* in case it is. Note that this checks only immediate upper device,
* not through a complete stack of devices. The caller must hold the RTNL lock.
*/
bool netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev)
{
struct netdev_nested_priv priv = {
.data = (void *)upper_dev,
};
ASSERT_RTNL();
return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
&priv);
}
EXPORT_SYMBOL(netdev_has_upper_dev);
/**
* netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
* @dev: device
* @upper_dev: upper device to check
*
* Find out if a device is linked to specified upper device and return true
* in case it is. Note that this checks the entire upper device chain.
* The caller must hold rcu lock.
*/
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev)
{
struct netdev_nested_priv priv = {
.data = (void *)upper_dev,
};
return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
&priv);
}
EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
/**
* netdev_has_any_upper_dev - Check if device is linked to some device
* @dev: device
*
* Find out if a device is linked to an upper device and return true in case
* it is. The caller must hold the RTNL lock.
*/
bool netdev_has_any_upper_dev(struct net_device *dev)
{
ASSERT_RTNL();
return !list_empty(&dev->adj_list.upper);
}
EXPORT_SYMBOL(netdev_has_any_upper_dev);
/**
* netdev_master_upper_dev_get - Get master upper device
* @dev: device
*
* Find a master upper device and return pointer to it or NULL in case
* it's not there. The caller must hold the RTNL lock.
*/
struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
{
struct netdev_adjacent *upper;
ASSERT_RTNL(); if (list_empty(&dev->adj_list.upper)) return NULL; upper = list_first_entry(&dev->adj_list.upper,
struct netdev_adjacent, list);
if (likely(upper->master)) return upper->dev;
return NULL;
}
EXPORT_SYMBOL(netdev_master_upper_dev_get);
static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
{
struct netdev_adjacent *upper;
ASSERT_RTNL();
if (list_empty(&dev->adj_list.upper))
return NULL;
upper = list_first_entry(&dev->adj_list.upper,
struct netdev_adjacent, list);
if (likely(upper->master) && !upper->ignore)
return upper->dev;
return NULL;
}
/**
* netdev_has_any_lower_dev - Check if device is linked to some device
* @dev: device
*
* Find out if a device is linked to a lower device and return true in case
* it is. The caller must hold the RTNL lock.
*/
static bool netdev_has_any_lower_dev(struct net_device *dev)
{
ASSERT_RTNL();
return !list_empty(&dev->adj_list.lower);
}
void *netdev_adjacent_get_private(struct list_head *adj_list)
{
struct netdev_adjacent *adj;
adj = list_entry(adj_list, struct netdev_adjacent, list);
return adj->private;
}
EXPORT_SYMBOL(netdev_adjacent_get_private);
/**
* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next device from the dev's upper list, starting from iter
* position. The caller must hold RCU read lock.
*/
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *upper;
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
if (&upper->list == &dev->adj_list.upper)
return NULL;
*iter = &upper->list; return upper->dev;}
EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
struct list_head **iter,
bool *ignore)
{
struct netdev_adjacent *upper;
upper = list_entry((*iter)->next, struct netdev_adjacent, list);
if (&upper->list == &dev->adj_list.upper)
return NULL;
*iter = &upper->list;
*ignore = upper->ignore;
return upper->dev;
}
static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *upper;
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
if (&upper->list == &dev->adj_list.upper)
return NULL;
*iter = &upper->list;
return upper->dev;
}
static int __netdev_walk_all_upper_dev(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{
struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
int ret, cur = 0;
bool ignore;
now = dev;
iter = &dev->adj_list.upper;
while (1) {
if (now != dev) {
ret = fn(now, priv);
if (ret)
return ret;
}
next = NULL;
while (1) {
udev = __netdev_next_upper_dev(now, &iter, &ignore);
if (!udev)
break;
if (ignore)
continue;
next = udev;
niter = &udev->adj_list.upper;
dev_stack[cur] = now;
iter_stack[cur++] = iter;
break;
}
if (!next) {
if (!cur)
return 0;
next = dev_stack[--cur];
niter = iter_stack[cur];
}
now = next;
iter = niter;
}
return 0;
}
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{
struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
int ret, cur = 0;
now = dev;
iter = &dev->adj_list.upper;
while (1) {
if (now != dev) {
ret = fn(now, priv);
if (ret)
return ret;
}
next = NULL;
while (1) {
udev = netdev_next_upper_dev_rcu(now, &iter);
if (!udev)
break;
next = udev;
niter = &udev->adj_list.upper;
dev_stack[cur] = now;
iter_stack[cur++] = iter;
break;
}
if (!next) {
if (!cur)
return 0;
next = dev_stack[--cur];
niter = iter_stack[cur];
}
now = next;
iter = niter;
}
return 0;
}
EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
static bool __netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev)
{
struct netdev_nested_priv priv = {
.flags = 0,
.data = (void *)upper_dev,
};
ASSERT_RTNL();
return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
&priv);
}
/**
* netdev_lower_get_next_private - Get the next ->private from the
* lower neighbour list
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next netdev_adjacent->private from the dev's lower neighbour
* list, starting from iter position. The caller must hold either hold the
* RTNL lock or its own locking that guarantees that the neighbour lower
* list will remain unchanged.
*/
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_entry(*iter, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = lower->list.next;
return lower->private;
}
EXPORT_SYMBOL(netdev_lower_get_next_private);
/**
* netdev_lower_get_next_private_rcu - Get the next ->private from the
* lower neighbour list, RCU
* variant
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next netdev_adjacent->private from the dev's lower neighbour
* list, starting from iter position. The caller must hold RCU read lock.
*/
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *lower;
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = &lower->list;
return lower->private;
}
EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
/**
* netdev_lower_get_next - Get the next device from the lower neighbour
* list
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next netdev_adjacent from the dev's lower neighbour
* list, starting from iter position. The caller must hold RTNL lock or
* its own locking that guarantees that the neighbour lower
* list will remain unchanged.
*/
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_entry(*iter, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = lower->list.next;
return lower->dev;
}
EXPORT_SYMBOL(netdev_lower_get_next);
static struct net_device *netdev_next_lower_dev(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_entry((*iter)->next, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = &lower->list;
return lower->dev;
}
static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
struct list_head **iter,
bool *ignore)
{
struct netdev_adjacent *lower;
lower = list_entry((*iter)->next, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = &lower->list;
*ignore = lower->ignore;
return lower->dev;
}
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{
struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
int ret, cur = 0;
now = dev;
iter = &dev->adj_list.lower;
while (1) {
if (now != dev) {
ret = fn(now, priv);
if (ret)
return ret;
}
next = NULL;
while (1) {
ldev = netdev_next_lower_dev(now, &iter);
if (!ldev)
break;
next = ldev;
niter = &ldev->adj_list.lower;
dev_stack[cur] = now;
iter_stack[cur++] = iter;
break;
}
if (!next) {
if (!cur)
return 0;
next = dev_stack[--cur];
niter = iter_stack[cur];
}
now = next;
iter = niter;
}
return 0;
}
EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
static int __netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{
struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
int ret, cur = 0;
bool ignore;
now = dev;
iter = &dev->adj_list.lower;
while (1) {
if (now != dev) {
ret = fn(now, priv);
if (ret)
return ret;
}
next = NULL;
while (1) {
ldev = __netdev_next_lower_dev(now, &iter, &ignore);
if (!ldev)
break;
if (ignore)
continue;
next = ldev;
niter = &ldev->adj_list.lower;
dev_stack[cur] = now;
iter_stack[cur++] = iter;
break;
}
if (!next) {
if (!cur)
return 0;
next = dev_stack[--cur];
niter = iter_stack[cur];
}
now = next;
iter = niter;
}
return 0;
}
struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
*iter = &lower->list;
return lower->dev;
}
EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
static u8 __netdev_upper_depth(struct net_device *dev)
{
struct net_device *udev;
struct list_head *iter;
u8 max_depth = 0;
bool ignore;
for (iter = &dev->adj_list.upper,
udev = __netdev_next_upper_dev(dev, &iter, &ignore);
udev;
udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
if (ignore)
continue;
if (max_depth < udev->upper_level)
max_depth = udev->upper_level;
}
return max_depth;
}
static u8 __netdev_lower_depth(struct net_device *dev)
{
struct net_device *ldev;
struct list_head *iter;
u8 max_depth = 0;
bool ignore;
for (iter = &dev->adj_list.lower,
ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
ldev;
ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
if (ignore)
continue;
if (max_depth < ldev->lower_level)
max_depth = ldev->lower_level;
}
return max_depth;
}
static int __netdev_update_upper_level(struct net_device *dev,
struct netdev_nested_priv *__unused)
{
dev->upper_level = __netdev_upper_depth(dev) + 1;
return 0;
}
#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);
static void net_unlink_todo(struct net_device *dev)
{
if (list_empty(&dev->unlink_list))
list_add_tail(&dev->unlink_list, &net_unlink_list);
}
#endif
static int __netdev_update_lower_level(struct net_device *dev,
struct netdev_nested_priv *priv)
{
dev->lower_level = __netdev_lower_depth(dev) + 1;
#ifdef CONFIG_LOCKDEP
if (!priv)
return 0;
if (priv->flags & NESTED_SYNC_IMM)
dev->nested_level = dev->lower_level - 1;
if (priv->flags & NESTED_SYNC_TODO)
net_unlink_todo(dev);
#endif
return 0;
}
int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{
struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
int ret, cur = 0;
now = dev;
iter = &dev->adj_list.lower;
while (1) {
if (now != dev) {
ret = fn(now, priv);
if (ret)
return ret;
}
next = NULL;
while (1) {
ldev = netdev_next_lower_dev_rcu(now, &iter);
if (!ldev)
break;
next = ldev;
niter = &ldev->adj_list.lower;
dev_stack[cur] = now;
iter_stack[cur++] = iter;
break;
}
if (!next) {
if (!cur)
return 0;
next = dev_stack[--cur];
niter = iter_stack[cur];
}
now = next;
iter = niter;
}
return 0;
}
EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
/**
* netdev_lower_get_first_private_rcu - Get the first ->private from the
* lower neighbour list, RCU
* variant
* @dev: device
*
* Gets the first netdev_adjacent->private from the dev's lower neighbour
* list. The caller must hold RCU read lock.
*/
void *netdev_lower_get_first_private_rcu(struct net_device *dev)
{
struct netdev_adjacent *lower;
lower = list_first_or_null_rcu(&dev->adj_list.lower,
struct netdev_adjacent, list);
if (lower)
return lower->private;
return NULL;
}
EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
/**
* netdev_master_upper_dev_get_rcu - Get master upper device
* @dev: device
*
* Find a master upper device and return pointer to it or NULL in case
* it's not there. The caller must hold the RCU read lock.
*/
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
{
struct netdev_adjacent *upper;
upper = list_first_or_null_rcu(&dev->adj_list.upper,
struct netdev_adjacent, list);
if (upper && likely(upper->master)) return upper->dev; return NULL;}
EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
static int netdev_adjacent_sysfs_add(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list)
{
char linkname[IFNAMSIZ+7];
sprintf(linkname, dev_list == &dev->adj_list.upper ?
"upper_%s" : "lower_%s", adj_dev->name);
return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
linkname);
}
static void netdev_adjacent_sysfs_del(struct net_device *dev,
char *name,
struct list_head *dev_list)
{
char linkname[IFNAMSIZ+7];
sprintf(linkname, dev_list == &dev->adj_list.upper ?
"upper_%s" : "lower_%s", name);
sysfs_remove_link(&(dev->dev.kobj), linkname);
}
static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list)
{
return (dev_list == &dev->adj_list.upper ||
dev_list == &dev->adj_list.lower) &&
net_eq(dev_net(dev), dev_net(adj_dev));
}
static int __netdev_adjacent_dev_insert(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list,
void *private, bool master)
{
struct netdev_adjacent *adj;
int ret;
adj = __netdev_find_adj(adj_dev, dev_list);
if (adj) {
adj->ref_nr += 1;
pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
dev->name, adj_dev->name, adj->ref_nr);
return 0;
}
adj = kmalloc(sizeof(*adj), GFP_KERNEL);
if (!adj)
return -ENOMEM;
adj->dev = adj_dev;
adj->master = master;
adj->ref_nr = 1;
adj->private = private;
adj->ignore = false;
netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
if (ret)
goto free_adj;
}
/* Ensure that master link is always the first item in list. */
if (master) {
ret = sysfs_create_link(&(dev->dev.kobj),
&(adj_dev->dev.kobj), "master");
if (ret)
goto remove_symlinks;
list_add_rcu(&adj->list, dev_list);
} else {
list_add_tail_rcu(&adj->list, dev_list);
}
return 0;
remove_symlinks:
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
free_adj:
netdev_put(adj_dev, &adj->dev_tracker);
kfree(adj);
return ret;
}
static void __netdev_adjacent_dev_remove(struct net_device *dev,
struct net_device *adj_dev,
u16 ref_nr,
struct list_head *dev_list)
{
struct netdev_adjacent *adj;
pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
dev->name, adj_dev->name, ref_nr);
adj = __netdev_find_adj(adj_dev, dev_list);
if (!adj) {
pr_err("Adjacency does not exist for device %s from %s\n",
dev->name, adj_dev->name);
WARN_ON(1);
return;
}
if (adj->ref_nr > ref_nr) {
pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
dev->name, adj_dev->name, ref_nr,
adj->ref_nr - ref_nr);
adj->ref_nr -= ref_nr;
return;
}
if (adj->master)
sysfs_remove_link(&(dev->dev.kobj), "master");
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
list_del_rcu(&adj->list);
pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
adj_dev->name, dev->name, adj_dev->name);
netdev_put(adj_dev, &adj->dev_tracker);
kfree_rcu(adj, rcu);
}
static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
struct net_device *upper_dev,
struct list_head *up_list,
struct list_head *down_list,
void *private, bool master)
{
int ret;
ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
private, master);
if (ret)
return ret;
ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
private, false);
if (ret) {
__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
return ret;
}
return 0;
}
static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
struct net_device *upper_dev,
u16 ref_nr,
struct list_head *up_list,
struct list_head *down_list)
{
__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
}
static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
struct net_device *upper_dev,
void *private, bool master)
{
return __netdev_adjacent_dev_link_lists(dev, upper_dev,
&dev->adj_list.upper,
&upper_dev->adj_list.lower,
private, master);
}
static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
struct net_device *upper_dev)
{
__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
&dev->adj_list.upper,
&upper_dev->adj_list.lower);
}
static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master,
void *upper_priv, void *upper_info,
struct netdev_nested_priv *priv,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_changeupper_info changeupper_info = {
.info = {
.dev = dev,
.extack = extack,
},
.upper_dev = upper_dev,
.master = master,
.linking = true,
.upper_info = upper_info,
};
struct net_device *master_dev;
int ret = 0;
ASSERT_RTNL();
if (dev == upper_dev)
return -EBUSY;
/* To prevent loops, check if dev is not upper device to upper_dev. */
if (__netdev_has_upper_dev(upper_dev, dev))
return -EBUSY;
if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
return -EMLINK;
if (!master) {
if (__netdev_has_upper_dev(dev, upper_dev))
return -EEXIST;
} else {
master_dev = __netdev_master_upper_dev_get(dev);
if (master_dev)
return master_dev == upper_dev ? -EEXIST : -EBUSY;
}
ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
&changeupper_info.info);
ret = notifier_to_errno(ret);
if (ret)
return ret;
ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
master);
if (ret)
return ret;
ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
&changeupper_info.info);
ret = notifier_to_errno(ret);
if (ret)
goto rollback;
__netdev_update_upper_level(dev, NULL);
__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
__netdev_update_lower_level(upper_dev, priv);
__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
priv);
return 0;
rollback:
__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
return ret;
}
/**
* netdev_upper_dev_link - Add a link to the upper device
* @dev: device
* @upper_dev: new upper device
* @extack: netlink extended ack
*
* Adds a link to device which is upper to this one. The caller must hold
* the RTNL lock. On a failure a negative errno code is returned.
* On success the reference counts are adjusted and the function
* returns zero.
*/
int netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
{
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
.data = NULL,
};
return __netdev_upper_dev_link(dev, upper_dev, false,
NULL, NULL, &priv, extack);
}
EXPORT_SYMBOL(netdev_upper_dev_link);
/**
* netdev_master_upper_dev_link - Add a master link to the upper device
* @dev: device
* @upper_dev: new upper device
* @upper_priv: upper device private
* @upper_info: upper info to be passed down via notifier
* @extack: netlink extended ack
*
* Adds a link to device which is upper to this one. In this case, only
* one master upper device can be linked, although other non-master devices
* might be linked as well. The caller must hold the RTNL lock.
* On a failure a negative errno code is returned. On success the reference
* counts are adjusted and the function returns zero.
*/
int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev,
void *upper_priv, void *upper_info,
struct netlink_ext_ack *extack)
{
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
.data = NULL,
};
return __netdev_upper_dev_link(dev, upper_dev, true,
upper_priv, upper_info, &priv, extack);
}
EXPORT_SYMBOL(netdev_master_upper_dev_link);
static void __netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev,
struct netdev_nested_priv *priv)
{
struct netdev_notifier_changeupper_info changeupper_info = {
.info = {
.dev = dev,
},
.upper_dev = upper_dev,
.linking = false,
};
ASSERT_RTNL();
changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
&changeupper_info.info);
__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
&changeupper_info.info);
__netdev_update_upper_level(dev, NULL);
__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
__netdev_update_lower_level(upper_dev, priv);
__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
priv);
}
/**
* netdev_upper_dev_unlink - Removes a link to upper device
* @dev: device
* @upper_dev: new upper device
*
* Removes a link to device which is upper to this one. The caller must hold
* the RTNL lock.
*/
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_TODO,
.data = NULL,
};
__netdev_upper_dev_unlink(dev, upper_dev, &priv);
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
struct net_device *lower_dev,
bool val)
{
struct netdev_adjacent *adj;
adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
if (adj)
adj->ignore = val;
adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
if (adj)
adj->ignore = val;
}
static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
struct net_device *lower_dev)
{
__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
}
static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
struct net_device *lower_dev)
{
__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
}
int netdev_adjacent_change_prepare(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct netdev_nested_priv priv = {
.flags = 0,
.data = NULL,
};
int err;
if (!new_dev)
return 0;
if (old_dev && new_dev != old_dev)
netdev_adjacent_dev_disable(dev, old_dev);
err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
extack);
if (err) {
if (old_dev && new_dev != old_dev)
netdev_adjacent_dev_enable(dev, old_dev);
return err;
}
return 0;
}
EXPORT_SYMBOL(netdev_adjacent_change_prepare);
void netdev_adjacent_change_commit(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev)
{
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
.data = NULL,
};
if (!new_dev || !old_dev)
return;
if (new_dev == old_dev)
return;
netdev_adjacent_dev_enable(dev, old_dev);
__netdev_upper_dev_unlink(old_dev, dev, &priv);
}
EXPORT_SYMBOL(netdev_adjacent_change_commit);
void netdev_adjacent_change_abort(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev)
{
struct netdev_nested_priv priv = {
.flags = 0,
.data = NULL,
};
if (!new_dev)
return;
if (old_dev && new_dev != old_dev)
netdev_adjacent_dev_enable(dev, old_dev);
__netdev_upper_dev_unlink(new_dev, dev, &priv);
}
EXPORT_SYMBOL(netdev_adjacent_change_abort);
/**
* netdev_bonding_info_change - Dispatch event about slave change
* @dev: device
* @bonding_info: info to dispatch
*
* Send NETDEV_BONDING_INFO to netdev notifiers with info.
* The caller must hold the RTNL lock.
*/
void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info)
{
struct netdev_notifier_bonding_info info = {
.info.dev = dev,
};
memcpy(&info.bonding_info, bonding_info,
sizeof(struct netdev_bonding_info));
call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
&info.info);
}
EXPORT_SYMBOL(netdev_bonding_info_change);
static int netdev_offload_xstats_enable_l3(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.info.extack = extack,
.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
};
int err;
int rc;
dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
GFP_KERNEL);
if (!dev->offload_xstats_l3)
return -ENOMEM;
rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
NETDEV_OFFLOAD_XSTATS_DISABLE,
&info.info);
err = notifier_to_errno(rc);
if (err)
goto free_stats;
return 0;
free_stats:
kfree(dev->offload_xstats_l3);
dev->offload_xstats_l3 = NULL;
return err;
}
int netdev_offload_xstats_enable(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct netlink_ext_ack *extack)
{
ASSERT_RTNL();
if (netdev_offload_xstats_enabled(dev, type))
return -EALREADY;
switch (type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
return netdev_offload_xstats_enable_l3(dev, extack);
}
WARN_ON(1);
return -EINVAL;
}
EXPORT_SYMBOL(netdev_offload_xstats_enable);
static void netdev_offload_xstats_disable_l3(struct net_device *dev)
{
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
};
call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
&info.info);
kfree(dev->offload_xstats_l3);
dev->offload_xstats_l3 = NULL;
}
int netdev_offload_xstats_disable(struct net_device *dev,
enum netdev_offload_xstats_type type)
{
ASSERT_RTNL();
if (!netdev_offload_xstats_enabled(dev, type))
return -EALREADY;
switch (type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
netdev_offload_xstats_disable_l3(dev);
return 0;
}
WARN_ON(1);
return -EINVAL;
}
EXPORT_SYMBOL(netdev_offload_xstats_disable);
static void netdev_offload_xstats_disable_all(struct net_device *dev)
{
netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
}
static struct rtnl_hw_stats64 *
netdev_offload_xstats_get_ptr(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{
switch (type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
return dev->offload_xstats_l3;
}
WARN_ON(1);
return NULL;
}
bool netdev_offload_xstats_enabled(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{
ASSERT_RTNL();
return netdev_offload_xstats_get_ptr(dev, type);
}
EXPORT_SYMBOL(netdev_offload_xstats_enabled);
struct netdev_notifier_offload_xstats_ru {
bool used;
};
struct netdev_notifier_offload_xstats_rd {
struct rtnl_hw_stats64 stats;
bool used;
};
static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
const struct rtnl_hw_stats64 *src)
{
dest->rx_packets += src->rx_packets;
dest->tx_packets += src->tx_packets;
dest->rx_bytes += src->rx_bytes;
dest->tx_bytes += src->tx_bytes;
dest->rx_errors += src->rx_errors;
dest->tx_errors += src->tx_errors;
dest->rx_dropped += src->rx_dropped;
dest->tx_dropped += src->tx_dropped;
dest->multicast += src->multicast;
}
static int netdev_offload_xstats_get_used(struct net_device *dev,
enum netdev_offload_xstats_type type,
bool *p_used,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_offload_xstats_ru report_used = {};
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.info.extack = extack,
.type = type,
.report_used = &report_used,
};
int rc;
WARN_ON(!netdev_offload_xstats_enabled(dev, type));
rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
&info.info);
*p_used = report_used.used;
return notifier_to_errno(rc);
}
static int netdev_offload_xstats_get_stats(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *p_stats,
bool *p_used,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_offload_xstats_rd report_delta = {};
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.info.extack = extack,
.type = type,
.report_delta = &report_delta,
};
struct rtnl_hw_stats64 *stats;
int rc;
stats = netdev_offload_xstats_get_ptr(dev, type);
if (WARN_ON(!stats))
return -EINVAL;
rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
&info.info);
/* Cache whatever we got, even if there was an error, otherwise the
* successful stats retrievals would get lost.
*/
netdev_hw_stats64_add(stats, &report_delta.stats);
if (p_stats)
*p_stats = *stats;
*p_used = report_delta.used;
return notifier_to_errno(rc);
}
int netdev_offload_xstats_get(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *p_stats, bool *p_used,
struct netlink_ext_ack *extack)
{
ASSERT_RTNL();
if (p_stats)
return netdev_offload_xstats_get_stats(dev, type, p_stats,
p_used, extack);
else
return netdev_offload_xstats_get_used(dev, type, p_used,
extack);
}
EXPORT_SYMBOL(netdev_offload_xstats_get);
void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
const struct rtnl_hw_stats64 *stats)
{
report_delta->used = true;
netdev_hw_stats64_add(&report_delta->stats, stats);
}
EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
{
report_used->used = true;
}
EXPORT_SYMBOL(netdev_offload_xstats_report_used);
void netdev_offload_xstats_push_delta(struct net_device *dev,
enum netdev_offload_xstats_type type,
const struct rtnl_hw_stats64 *p_stats)
{
struct rtnl_hw_stats64 *stats;
ASSERT_RTNL();
stats = netdev_offload_xstats_get_ptr(dev, type);
if (WARN_ON(!stats))
return;
netdev_hw_stats64_add(stats, p_stats);
}
EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
/**
* netdev_get_xmit_slave - Get the xmit slave of master device
* @dev: device
* @skb: The packet
* @all_slaves: assume all the slaves are active
*
* The reference counters are not incremented so the caller must be
* careful with locks. The caller must hold RCU lock.
* %NULL is returned if no slave is found.
*/
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (!ops->ndo_get_xmit_slave)
return NULL;
return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
}
EXPORT_SYMBOL(netdev_get_xmit_slave);
static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
struct sock *sk)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (!ops->ndo_sk_get_lower_dev)
return NULL;
return ops->ndo_sk_get_lower_dev(dev, sk);
}
/**
* netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
* @dev: device
* @sk: the socket
*
* %NULL is returned if no lower device is found.
*/
struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
struct sock *sk)
{
struct net_device *lower;
lower = netdev_sk_get_lower_dev(dev, sk);
while (lower) {
dev = lower;
lower = netdev_sk_get_lower_dev(dev, sk);
}
return dev;
}
EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
static void netdev_adjacent_add_links(struct net_device *dev)
{
struct netdev_adjacent *iter;
struct net *net = dev_net(dev);
list_for_each_entry(iter, &dev->adj_list.upper, list) {
if (!net_eq(net, dev_net(iter->dev)))
continue;
netdev_adjacent_sysfs_add(iter->dev, dev,
&iter->dev->adj_list.lower);
netdev_adjacent_sysfs_add(dev, iter->dev,
&dev->adj_list.upper);
}
list_for_each_entry(iter, &dev->adj_list.lower, list) {
if (!net_eq(net, dev_net(iter->dev)))
continue;
netdev_adjacent_sysfs_add(iter->dev, dev,
&iter->dev->adj_list.upper);
netdev_adjacent_sysfs_add(dev, iter->dev,
&dev->adj_list.lower);
}
}
static void netdev_adjacent_del_links(struct net_device *dev)
{
struct netdev_adjacent *iter;
struct net *net = dev_net(dev);
list_for_each_entry(iter, &dev->adj_list.upper, list) {
if (!net_eq(net, dev_net(iter->dev)))
continue;
netdev_adjacent_sysfs_del(iter->dev, dev->name,
&iter->dev->adj_list.lower);
netdev_adjacent_sysfs_del(dev, iter->dev->name,
&dev->adj_list.upper);
}
list_for_each_entry(iter, &dev->adj_list.lower, list) {
if (!net_eq(net, dev_net(iter->dev)))
continue;
netdev_adjacent_sysfs_del(iter->dev, dev->name,
&iter->dev->adj_list.upper);
netdev_adjacent_sysfs_del(dev, iter->dev->name,
&dev->adj_list.lower);
}
}
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
{
struct netdev_adjacent *iter;
struct net *net = dev_net(dev);
list_for_each_entry(iter, &dev->adj_list.upper, list) {
if (!net_eq(net, dev_net(iter->dev)))
continue;
netdev_adjacent_sysfs_del(iter->dev, oldname,
&iter->dev->adj_list.lower);
netdev_adjacent_sysfs_add(iter->dev, dev,
&iter->dev->adj_list.lower);
}
list_for_each_entry(iter, &dev->adj_list.lower, list) {
if (!net_eq(net, dev_net(iter->dev)))
continue;
netdev_adjacent_sysfs_del(iter->dev, oldname,
&iter->dev->adj_list.upper);
netdev_adjacent_sysfs_add(iter->dev, dev,
&iter->dev->adj_list.upper);
}
}
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev)
{
struct netdev_adjacent *lower;
if (!lower_dev)
return NULL;
lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
if (!lower)
return NULL;
return lower->private;
}
EXPORT_SYMBOL(netdev_lower_dev_get_private);
/**
* netdev_lower_state_changed - Dispatch event about lower device state change
* @lower_dev: device
* @lower_state_info: state to dispatch
*
* Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
* The caller must hold the RTNL lock.
*/
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info)
{
struct netdev_notifier_changelowerstate_info changelowerstate_info = {
.info.dev = lower_dev,
};
ASSERT_RTNL();
changelowerstate_info.lower_state_info = lower_state_info;
call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
&changelowerstate_info.info);
}
EXPORT_SYMBOL(netdev_lower_state_changed);
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_change_rx_flags)
ops->ndo_change_rx_flags(dev, flags);
}
static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
{
unsigned int old_flags = dev->flags;
unsigned int promiscuity, flags;
kuid_t uid;
kgid_t gid;
ASSERT_RTNL();
promiscuity = dev->promiscuity + inc;
if (promiscuity == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch promisc and return error.
*/
if (unlikely(inc > 0)) {
netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
return -EOVERFLOW;
}
flags = old_flags & ~IFF_PROMISC;
} else {
flags = old_flags | IFF_PROMISC;
}
WRITE_ONCE(dev->promiscuity, promiscuity);
if (flags != old_flags) {
WRITE_ONCE(dev->flags, flags);
netdev_info(dev, "%s promiscuous mode\n",
dev->flags & IFF_PROMISC ? "entered" : "left");
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(audit_context(), GFP_ATOMIC,
AUDIT_ANOM_PROMISCUOUS,
"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
dev->name, (dev->flags & IFF_PROMISC),
(old_flags & IFF_PROMISC),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
from_kuid(&init_user_ns, uid),
from_kgid(&init_user_ns, gid),
audit_get_sessionid(current));
}
dev_change_rx_flags(dev, IFF_PROMISC);
}
if (notify) {
/* The ops lock is only required to ensure consistent locking
* for `NETDEV_CHANGE` notifiers. This function is sometimes
* called without the lock, even for devices that are ops
* locked, such as in `dev_uc_sync_multiple` when using
* bonding or teaming.
*/
netdev_ops_assert_locked(dev);
__dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
}
return 0;
}
int netif_set_promiscuity(struct net_device *dev, int inc)
{
unsigned int old_flags = dev->flags;
int err;
err = __dev_set_promiscuity(dev, inc, true);
if (err < 0)
return err;
if (dev->flags != old_flags)
dev_set_rx_mode(dev);
return err;
}
int netif_set_allmulti(struct net_device *dev, int inc, bool notify)
{
unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
unsigned int allmulti, flags;
ASSERT_RTNL();
allmulti = dev->allmulti + inc;
if (allmulti == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch allmulti and return error.
*/
if (unlikely(inc > 0)) {
netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
return -EOVERFLOW;
}
flags = old_flags & ~IFF_ALLMULTI;
} else {
flags = old_flags | IFF_ALLMULTI;
}
WRITE_ONCE(dev->allmulti, allmulti);
if (flags != old_flags) {
WRITE_ONCE(dev->flags, flags);
netdev_info(dev, "%s allmulticast mode\n",
dev->flags & IFF_ALLMULTI ? "entered" : "left");
dev_change_rx_flags(dev, IFF_ALLMULTI);
dev_set_rx_mode(dev);
if (notify)
__dev_notify_flags(dev, old_flags,
dev->gflags ^ old_gflags, 0, NULL);
}
return 0;
}
/*
* Upload unicast and multicast address lists to device and
* configure RX filtering. When the device doesn't support unicast
* filtering it is put in promiscuous mode while unicast addresses
* are present.
*/
void __dev_set_rx_mode(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
/* dev_open will call this function so the list will stay sane. */
if (!(dev->flags&IFF_UP))
return;
if (!netif_device_present(dev))
return;
if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
/* Unicast addresses changes may only happen under the rtnl,
* therefore calling __dev_set_promiscuity here is safe.
*/
if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
__dev_set_promiscuity(dev, 1, false);
dev->uc_promisc = true;
} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
__dev_set_promiscuity(dev, -1, false);
dev->uc_promisc = false;
}
}
if (ops->ndo_set_rx_mode)
ops->ndo_set_rx_mode(dev);
}
void dev_set_rx_mode(struct net_device *dev)
{
netif_addr_lock_bh(dev);
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
}
/**
* netif_get_flags() - get flags reported to userspace
* @dev: device
*
* Get the combination of flag bits exported through APIs to userspace.
*/
unsigned int netif_get_flags(const struct net_device *dev)
{
unsigned int flags;
flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
IFF_ALLMULTI |
IFF_RUNNING |
IFF_LOWER_UP |
IFF_DORMANT)) |
(READ_ONCE(dev->gflags) & (IFF_PROMISC |
IFF_ALLMULTI));
if (netif_running(dev)) {
if (netif_oper_up(dev)) flags |= IFF_RUNNING; if (netif_carrier_ok(dev)) flags |= IFF_LOWER_UP; if (netif_dormant(dev)) flags |= IFF_DORMANT;
}
return flags;
}
EXPORT_SYMBOL(netif_get_flags);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack)
{
unsigned int old_flags = dev->flags;
int ret;
ASSERT_RTNL();
/*
* Set the flags on our device.
*/
dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
IFF_AUTOMEDIA)) |
(dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
IFF_ALLMULTI));
/*
* Load in the correct multicast list now the flags have changed.
*/
if ((old_flags ^ flags) & IFF_MULTICAST)
dev_change_rx_flags(dev, IFF_MULTICAST);
dev_set_rx_mode(dev);
/*
* Have we downed the interface. We handle IFF_UP ourselves
* according to user attempts to set it, rather than blindly
* setting it.
*/
ret = 0;
if ((old_flags ^ flags) & IFF_UP) {
if (old_flags & IFF_UP)
__dev_close(dev);
else
ret = __dev_open(dev, extack);
}
if ((flags ^ dev->gflags) & IFF_PROMISC) {
int inc = (flags & IFF_PROMISC) ? 1 : -1;
old_flags = dev->flags;
dev->gflags ^= IFF_PROMISC;
if (__dev_set_promiscuity(dev, inc, false) >= 0)
if (dev->flags != old_flags)
dev_set_rx_mode(dev);
}
/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
* is important. Some (broken) drivers set IFF_PROMISC, when
* IFF_ALLMULTI is requested not asking us and not reporting.
*/
if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
dev->gflags ^= IFF_ALLMULTI;
netif_set_allmulti(dev, inc, false);
}
return ret;
}
void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
unsigned int gchanges, u32 portid,
const struct nlmsghdr *nlh)
{
unsigned int changes = dev->flags ^ old_flags;
if (gchanges)
rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
if (changes & IFF_UP) {
if (dev->flags & IFF_UP)
call_netdevice_notifiers(NETDEV_UP, dev);
else
call_netdevice_notifiers(NETDEV_DOWN, dev);
}
if (dev->flags & IFF_UP &&
(changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
struct netdev_notifier_change_info change_info = {
.info = {
.dev = dev,
},
.flags_changed = changes,
};
call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
}
}
int netif_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack)
{
int ret;
unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
ret = __dev_change_flags(dev, flags, extack);
if (ret < 0)
return ret;
changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
__dev_notify_flags(dev, old_flags, changes, 0, NULL);
return ret;
}
int __netif_set_mtu(struct net_device *dev, int new_mtu)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_change_mtu)
return ops->ndo_change_mtu(dev, new_mtu);
/* Pairs with all the lockless reads of dev->mtu in the stack */
WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL_NS_GPL(__netif_set_mtu, "NETDEV_INTERNAL");
int dev_validate_mtu(struct net_device *dev, int new_mtu,
struct netlink_ext_ack *extack)
{
/* MTU must be positive, and in range */
if (new_mtu < 0 || new_mtu < dev->min_mtu) {
NL_SET_ERR_MSG(extack, "mtu less than device minimum");
return -EINVAL;
}
if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
return -EINVAL;
}
return 0;
}
/**
* netif_set_mtu_ext() - Change maximum transfer unit
* @dev: device
* @new_mtu: new transfer unit
* @extack: netlink extended ack
*
* Change the maximum transfer size of the network device.
*
* Return: 0 on success, -errno on failure.
*/
int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
struct netlink_ext_ack *extack)
{
int err, orig_mtu;
netdev_ops_assert_locked(dev);
if (new_mtu == dev->mtu)
return 0;
err = dev_validate_mtu(dev, new_mtu, extack);
if (err)
return err;
if (!netif_device_present(dev))
return -ENODEV;
err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
err = notifier_to_errno(err);
if (err)
return err;
orig_mtu = dev->mtu;
err = __netif_set_mtu(dev, new_mtu);
if (!err) {
err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
orig_mtu);
err = notifier_to_errno(err);
if (err) {
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
__netif_set_mtu(dev, orig_mtu);
call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
new_mtu);
}
}
return err;
}
int netif_set_mtu(struct net_device *dev, int new_mtu)
{
struct netlink_ext_ack extack;
int err;
memset(&extack, 0, sizeof(extack));
err = netif_set_mtu_ext(dev, new_mtu, &extack);
if (err && extack._msg)
net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
return err;
}
EXPORT_SYMBOL(netif_set_mtu);
int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
{
unsigned int orig_len = dev->tx_queue_len;
int res;
if (new_len != (unsigned int)new_len)
return -ERANGE;
if (new_len != orig_len) {
WRITE_ONCE(dev->tx_queue_len, new_len);
res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
res = notifier_to_errno(res);
if (res)
goto err_rollback;
res = dev_qdisc_change_tx_queue_len(dev);
if (res)
goto err_rollback;
}
return 0;
err_rollback:
netdev_err(dev, "refused to change device tx_queue_len\n");
WRITE_ONCE(dev->tx_queue_len, orig_len);
return res;
}
void netif_set_group(struct net_device *dev, int new_group)
{
dev->group = new_group;
}
/**
* netif_pre_changeaddr_notify() - Call NETDEV_PRE_CHANGEADDR.
* @dev: device
* @addr: new address
* @extack: netlink extended ack
*
* Return: 0 on success, -errno on failure.
*/
int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_pre_changeaddr_info info = {
.info.dev = dev,
.info.extack = extack,
.dev_addr = addr,
};
int rc;
rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
return notifier_to_errno(rc);
}
EXPORT_SYMBOL_NS_GPL(netif_pre_changeaddr_notify, "NETDEV_INTERNAL");
int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err;
if (!ops->ndo_set_mac_address)
return -EOPNOTSUPP;
if (ss->ss_family != dev->type)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
err = netif_pre_changeaddr_notify(dev, ss->__data, extack);
if (err)
return err;
if (memcmp(dev->dev_addr, ss->__data, dev->addr_len)) {
err = ops->ndo_set_mac_address(dev, ss);
if (err)
return err;
}
dev->addr_assign_type = NET_ADDR_SET;
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
add_device_randomness(dev->dev_addr, dev->addr_len);
return 0;
}
DECLARE_RWSEM(dev_addr_sem);
/* "sa" is a true struct sockaddr with limited "sa_data" member. */
int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
{
size_t size = sizeof(sa->sa_data_min);
struct net_device *dev;
int ret = 0;
down_read(&dev_addr_sem);
rcu_read_lock();
dev = dev_get_by_name_rcu(net, dev_name);
if (!dev) {
ret = -ENODEV;
goto unlock;
}
if (!dev->addr_len)
memset(sa->sa_data, 0, size);
else
memcpy(sa->sa_data, dev->dev_addr,
min_t(size_t, size, dev->addr_len));
sa->sa_family = dev->type;
unlock:
rcu_read_unlock();
up_read(&dev_addr_sem);
return ret;
}
EXPORT_SYMBOL_NS_GPL(netif_get_mac_address, "NETDEV_INTERNAL");
int netif_change_carrier(struct net_device *dev, bool new_carrier)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (!ops->ndo_change_carrier)
return -EOPNOTSUPP;
if (!netif_device_present(dev))
return -ENODEV;
return ops->ndo_change_carrier(dev, new_carrier);
}
/**
* dev_get_phys_port_id - Get device physical port ID
* @dev: device
* @ppid: port ID
*
* Get device physical port ID
*/
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
const struct net_device_ops *ops = dev->netdev_ops; if (!ops->ndo_get_phys_port_id)
return -EOPNOTSUPP;
return ops->ndo_get_phys_port_id(dev, ppid);}
/**
* dev_get_phys_port_name - Get device physical port name
* @dev: device
* @name: port name
* @len: limit of bytes to copy to name
*
* Get device physical port name
*/
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err;
if (ops->ndo_get_phys_port_name) {
err = ops->ndo_get_phys_port_name(dev, name, len);
if (err != -EOPNOTSUPP)
return err;
}
return devlink_compat_phys_port_name_get(dev, name, len);
}
/**
* netif_get_port_parent_id() - Get the device's port parent identifier
* @dev: network device
* @ppid: pointer to a storage for the port's parent identifier
* @recurse: allow/disallow recursion to lower devices
*
* Get the devices's port parent identifier.
*
* Return: 0 on success, -errno on failure.
*/
int netif_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct netdev_phys_item_id first = { };
struct net_device *lower_dev;
struct list_head *iter;
int err;
if (ops->ndo_get_port_parent_id) {
err = ops->ndo_get_port_parent_id(dev, ppid);
if (err != -EOPNOTSUPP)
return err;
}
err = devlink_compat_switch_id_get(dev, ppid);
if (!recurse || err != -EOPNOTSUPP) return err; netdev_for_each_lower_dev(dev, lower_dev, iter) { err = netif_get_port_parent_id(lower_dev, ppid, true); if (err)
break;
if (!first.id_len)
first = *ppid; else if (memcmp(&first, ppid, sizeof(*ppid)))
return -EOPNOTSUPP;
}
return err;
}
EXPORT_SYMBOL(netif_get_port_parent_id);
/**
* netdev_port_same_parent_id - Indicate if two network devices have
* the same port parent identifier
* @a: first network device
* @b: second network device
*/
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
{
struct netdev_phys_item_id a_id = { };
struct netdev_phys_item_id b_id = { };
if (netif_get_port_parent_id(a, &a_id, true) ||
netif_get_port_parent_id(b, &b_id, true))
return false;
return netdev_phys_item_id_same(&a_id, &b_id);
}
EXPORT_SYMBOL(netdev_port_same_parent_id);
int netif_change_proto_down(struct net_device *dev, bool proto_down)
{
if (!dev->change_proto_down)
return -EOPNOTSUPP;
if (!netif_device_present(dev))
return -ENODEV;
if (proto_down)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
WRITE_ONCE(dev->proto_down, proto_down);
return 0;
}
/**
* netdev_change_proto_down_reason_locked - proto down reason
*
* @dev: device
* @mask: proto down mask
* @value: proto down value
*/
void netdev_change_proto_down_reason_locked(struct net_device *dev,
unsigned long mask, u32 value)
{
u32 proto_down_reason;
int b;
if (!mask) {
proto_down_reason = value;
} else {
proto_down_reason = dev->proto_down_reason;
for_each_set_bit(b, &mask, 32) {
if (value & (1 << b))
proto_down_reason |= BIT(b);
else
proto_down_reason &= ~BIT(b);
}
}
WRITE_ONCE(dev->proto_down_reason, proto_down_reason);
}
struct bpf_xdp_link {
struct bpf_link link;
struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
int flags;
};
static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
{
if (flags & XDP_FLAGS_HW_MODE)
return XDP_MODE_HW;
if (flags & XDP_FLAGS_DRV_MODE)
return XDP_MODE_DRV;
if (flags & XDP_FLAGS_SKB_MODE)
return XDP_MODE_SKB;
return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
}
static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
{
switch (mode) {
case XDP_MODE_SKB:
return generic_xdp_install;
case XDP_MODE_DRV:
case XDP_MODE_HW:
return dev->netdev_ops->ndo_bpf;
default:
return NULL;
}
}
static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
enum bpf_xdp_mode mode)
{
return dev->xdp_state[mode].link;
}
static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
enum bpf_xdp_mode mode)
{
struct bpf_xdp_link *link = dev_xdp_link(dev, mode); if (link)
return link->link.prog;
return dev->xdp_state[mode].prog;
}
u8 dev_xdp_prog_count(struct net_device *dev)
{
u8 count = 0;
int i;
for (i = 0; i < __MAX_XDP_MODE; i++)
if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
count++;
return count;
}
EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
u8 dev_xdp_sb_prog_count(struct net_device *dev)
{
u8 count = 0;
int i;
for (i = 0; i < __MAX_XDP_MODE; i++)
if (dev->xdp_state[i].prog &&
!dev->xdp_state[i].prog->aux->xdp_has_frags)
count++;
return count;
}
int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
{
if (!dev->netdev_ops->ndo_bpf)
return -EOPNOTSUPP;
if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
bpf->command == XDP_SETUP_PROG &&
bpf->prog && !bpf->prog->aux->xdp_has_frags) {
NL_SET_ERR_MSG(bpf->extack,
"unable to propagate XDP to device using tcp-data-split");
return -EBUSY;
}
if (dev_get_min_mp_channel_count(dev)) {
NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider");
return -EBUSY;
}
return dev->netdev_ops->ndo_bpf(dev, bpf);
}
EXPORT_SYMBOL_GPL(netif_xdp_propagate);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{
struct bpf_prog *prog = dev_xdp_prog(dev, mode); return prog ? prog->aux->id : 0;}
static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
struct bpf_xdp_link *link)
{
dev->xdp_state[mode].link = link;
dev->xdp_state[mode].prog = NULL;
}
static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
struct bpf_prog *prog)
{
dev->xdp_state[mode].link = NULL;
dev->xdp_state[mode].prog = prog;
}
static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
bpf_op_t bpf_op, struct netlink_ext_ack *extack,
u32 flags, struct bpf_prog *prog)
{
struct netdev_bpf xdp;
int err;
netdev_ops_assert_locked(dev);
if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
prog && !prog->aux->xdp_has_frags) {
NL_SET_ERR_MSG(extack, "unable to install XDP to device using tcp-data-split");
return -EBUSY;
}
if (dev_get_min_mp_channel_count(dev)) {
NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider");
return -EBUSY;
}
memset(&xdp, 0, sizeof(xdp));
xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
xdp.extack = extack;
xdp.flags = flags;
xdp.prog = prog;
/* Drivers assume refcnt is already incremented (i.e, prog pointer is
* "moved" into driver), so they don't increment it on their own, but
* they do decrement refcnt when program is detached or replaced.
* Given net_device also owns link/prog, we need to bump refcnt here
* to prevent drivers from underflowing it.
*/
if (prog)
bpf_prog_inc(prog);
err = bpf_op(dev, &xdp);
if (err) {
if (prog)
bpf_prog_put(prog);
return err;
}
if (mode != XDP_MODE_HW)
bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
return 0;
}
static void dev_xdp_uninstall(struct net_device *dev)
{
struct bpf_xdp_link *link;
struct bpf_prog *prog;
enum bpf_xdp_mode mode;
bpf_op_t bpf_op;
ASSERT_RTNL();
for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
prog = dev_xdp_prog(dev, mode);
if (!prog)
continue;
bpf_op = dev_xdp_bpf_op(dev, mode);
if (!bpf_op)
continue;
WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
/* auto-detach link from net device */
link = dev_xdp_link(dev, mode);
if (link)
link->dev = NULL;
else
bpf_prog_put(prog);
dev_xdp_set_link(dev, mode, NULL);
}
}
static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
struct bpf_xdp_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog, u32 flags)
{
unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
struct bpf_prog *cur_prog;
struct net_device *upper;
struct list_head *iter;
enum bpf_xdp_mode mode;
bpf_op_t bpf_op;
int err;
ASSERT_RTNL();
/* either link or prog attachment, never both */
if (link && (new_prog || old_prog))
return -EINVAL;
/* link supports only XDP mode flags */
if (link && (flags & ~XDP_FLAGS_MODES)) {
NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
return -EINVAL;
}
/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
if (num_modes > 1) {
NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
return -EINVAL;
}
/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
if (!num_modes && dev_xdp_prog_count(dev) > 1) {
NL_SET_ERR_MSG(extack,
"More than one program loaded, unset mode is ambiguous");
return -EINVAL;
}
/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
return -EINVAL;
}
mode = dev_xdp_mode(dev, flags);
/* can't replace attached link */
if (dev_xdp_link(dev, mode)) {
NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
return -EBUSY;
}
/* don't allow if an upper device already has a program */
netdev_for_each_upper_dev_rcu(dev, upper, iter) {
if (dev_xdp_prog_count(upper) > 0) {
NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
return -EEXIST;
}
}
cur_prog = dev_xdp_prog(dev, mode);
/* can't replace attached prog with link */
if (link && cur_prog) {
NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
return -EBUSY;
}
if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
NL_SET_ERR_MSG(extack, "Active program does not match expected");
return -EEXIST;
}
/* put effective new program into new_prog */
if (link)
new_prog = link->link.prog;
if (new_prog) {
bool offload = mode == XDP_MODE_HW;
enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
? XDP_MODE_DRV : XDP_MODE_SKB;
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
}
if (!offload && dev_xdp_prog(dev, other_mode)) {
NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
return -EEXIST;
}
if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
return -EINVAL;
}
if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
NL_SET_ERR_MSG(extack, "Program bound to different device");
return -EINVAL;
}
if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
return -EINVAL;
}
if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
return -EINVAL;
}
if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
return -EINVAL;
}
}
/* don't call drivers if the effective program didn't change */
if (new_prog != cur_prog) {
bpf_op = dev_xdp_bpf_op(dev, mode);
if (!bpf_op) {
NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
return -EOPNOTSUPP;
}
err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
if (err)
return err;
}
if (link)
dev_xdp_set_link(dev, mode, link);
else
dev_xdp_set_prog(dev, mode, new_prog);
if (cur_prog)
bpf_prog_put(cur_prog);
return 0;
}
static int dev_xdp_attach_link(struct net_device *dev,
struct netlink_ext_ack *extack,
struct bpf_xdp_link *link)
{
return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
}
static int dev_xdp_detach_link(struct net_device *dev,
struct netlink_ext_ack *extack,
struct bpf_xdp_link *link)
{
enum bpf_xdp_mode mode;
bpf_op_t bpf_op;
ASSERT_RTNL();
mode = dev_xdp_mode(dev, link->flags);
if (dev_xdp_link(dev, mode) != link)
return -EINVAL;
bpf_op = dev_xdp_bpf_op(dev, mode);
WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
dev_xdp_set_link(dev, mode, NULL);
return 0;
}
static void bpf_xdp_link_release(struct bpf_link *link)
{
struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
rtnl_lock();
/* if racing with net_device's tear down, xdp_link->dev might be
* already NULL, in which case link was already auto-detached
*/
if (xdp_link->dev) {
netdev_lock_ops(xdp_link->dev);
WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
netdev_unlock_ops(xdp_link->dev);
xdp_link->dev = NULL;
}
rtnl_unlock();
}
static int bpf_xdp_link_detach(struct bpf_link *link)
{
bpf_xdp_link_release(link);
return 0;
}
static void bpf_xdp_link_dealloc(struct bpf_link *link)
{
struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
kfree(xdp_link);
}
static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
struct seq_file *seq)
{
struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
u32 ifindex = 0;
rtnl_lock();
if (xdp_link->dev)
ifindex = xdp_link->dev->ifindex;
rtnl_unlock();
seq_printf(seq, "ifindex:\t%u\n", ifindex);
}
static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
struct bpf_link_info *info)
{
struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
u32 ifindex = 0;
rtnl_lock();
if (xdp_link->dev)
ifindex = xdp_link->dev->ifindex;
rtnl_unlock();
info->xdp.ifindex = ifindex;
return 0;
}
static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog)
{
struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
enum bpf_xdp_mode mode;
bpf_op_t bpf_op;
int err = 0;
rtnl_lock();
/* link might have been auto-released already, so fail */
if (!xdp_link->dev) {
err = -ENOLINK;
goto out_unlock;
}
if (old_prog && link->prog != old_prog) {
err = -EPERM;
goto out_unlock;
}
old_prog = link->prog;
if (old_prog->type != new_prog->type ||
old_prog->expected_attach_type != new_prog->expected_attach_type) {
err = -EINVAL;
goto out_unlock;
}
if (old_prog == new_prog) {
/* no-op, don't disturb drivers */
bpf_prog_put(new_prog);
goto out_unlock;
}
netdev_lock_ops(xdp_link->dev);
mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
xdp_link->flags, new_prog);
netdev_unlock_ops(xdp_link->dev);
if (err)
goto out_unlock;
old_prog = xchg(&link->prog, new_prog);
bpf_prog_put(old_prog);
out_unlock:
rtnl_unlock();
return err;
}
static const struct bpf_link_ops bpf_xdp_link_lops = {
.release = bpf_xdp_link_release,
.dealloc = bpf_xdp_link_dealloc,
.detach = bpf_xdp_link_detach,
.show_fdinfo = bpf_xdp_link_show_fdinfo,
.fill_link_info = bpf_xdp_link_fill_link_info,
.update_prog = bpf_xdp_link_update,
};
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
struct net *net = current->nsproxy->net_ns;
struct bpf_link_primer link_primer;
struct netlink_ext_ack extack = {};
struct bpf_xdp_link *link;
struct net_device *dev;
int err, fd;
rtnl_lock();
dev = dev_get_by_index(net, attr->link_create.target_ifindex);
if (!dev) {
rtnl_unlock();
return -EINVAL;
}
link = kzalloc(sizeof(*link), GFP_USER);
if (!link) {
err = -ENOMEM;
goto unlock;
}
bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog,
attr->link_create.attach_type);
link->dev = dev;
link->flags = attr->link_create.flags;
err = bpf_link_prime(&link->link, &link_primer);
if (err) {
kfree(link);
goto unlock;
}
netdev_lock_ops(dev);
err = dev_xdp_attach_link(dev, &extack, link);
netdev_unlock_ops(dev);
rtnl_unlock();
if (err) {
link->dev = NULL;
bpf_link_cleanup(&link_primer);
trace_bpf_xdp_link_attach_failed(extack._msg);
goto out_put_dev;
}
fd = bpf_link_settle(&link_primer);
/* link itself doesn't hold dev's refcnt to not complicate shutdown */
dev_put(dev);
return fd;
unlock:
rtnl_unlock();
out_put_dev:
dev_put(dev);
return err;
}
/**
* dev_change_xdp_fd - set or clear a bpf program for a device rx path
* @dev: device
* @extack: netlink extended ack
* @fd: new program fd or negative value to clear
* @expected_fd: old program fd that userspace expects to replace or clear
* @flags: xdp-related flags
*
* Set or clear a bpf program for a device
*/
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags)
{
enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
struct bpf_prog *new_prog = NULL, *old_prog = NULL;
int err;
ASSERT_RTNL();
if (fd >= 0) {
new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
mode != XDP_MODE_SKB);
if (IS_ERR(new_prog))
return PTR_ERR(new_prog);
}
if (expected_fd >= 0) {
old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
mode != XDP_MODE_SKB);
if (IS_ERR(old_prog)) {
err = PTR_ERR(old_prog);
old_prog = NULL;
goto err_out;
}
}
err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
err_out:
if (err && new_prog)
bpf_prog_put(new_prog);
if (old_prog)
bpf_prog_put(old_prog);
return err;
}
u32 dev_get_min_mp_channel_count(const struct net_device *dev)
{
int i;
netdev_ops_assert_locked(dev);
for (i = dev->real_num_rx_queues - 1; i >= 0; i--)
if (dev->_rx[i].mp_params.mp_priv)
/* The channel count is the idx plus 1. */
return i + 1;
return 0;
}
/**
* dev_index_reserve() - allocate an ifindex in a namespace
* @net: the applicable net namespace
* @ifindex: requested ifindex, pass %0 to get one allocated
*
* Allocate a ifindex for a new device. Caller must either use the ifindex
* to store the device (via list_netdevice()) or call dev_index_release()
* to give the index up.
*
* Return: a suitable unique value for a new device interface number or -errno.
*/
static int dev_index_reserve(struct net *net, u32 ifindex)
{
int err;
if (ifindex > INT_MAX) {
DEBUG_NET_WARN_ON_ONCE(1);
return -EINVAL;
}
if (!ifindex)
err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
xa_limit_31b, &net->ifindex, GFP_KERNEL);
else
err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
if (err < 0)
return err;
return ifindex;}
static void dev_index_release(struct net *net, int ifindex)
{
/* Expect only unused indexes, unlist_netdevice() removes the used */
WARN_ON(xa_erase(&net->dev_by_index, ifindex));
}
static bool from_cleanup_net(void)
{
#ifdef CONFIG_NET_NS
return current == READ_ONCE(cleanup_net_task);
#else
return false;
#endif
}
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
list_add_tail(&dev->todo_list, &net_todo_list);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
struct net_device *upper, netdev_features_t features)
{
netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
netdev_features_t feature;
int feature_bit;
for_each_netdev_feature(upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(upper->wanted_features & feature) && (features & feature)) {
netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
&feature, upper->name);
features &= ~feature;
}
}
return features;
}
static void netdev_sync_lower_features(struct net_device *upper,
struct net_device *lower, netdev_features_t features)
{
netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
netdev_features_t feature;
int feature_bit;
for_each_netdev_feature(upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(features & feature) && (lower->features & feature)) {
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
&feature, lower->name);
netdev_lock_ops(lower); lower->wanted_features &= ~feature;
__netdev_update_features(lower);
if (unlikely(lower->features & feature)) netdev_WARN(upper, "failed to disable %pNF on %s!\n",
&feature, lower->name);
else
netdev_features_change(lower);
netdev_unlock_ops(lower);
}
}
}
static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
{
netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
bool hw_csum = features & NETIF_F_HW_CSUM;
return ip_csum || hw_csum;
}
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
/* Fix illegal checksum combinations */
if ((features & NETIF_F_HW_CSUM) &&
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
netdev_warn(dev, "mixed HW and IP checksum settings.\n");
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
features &= ~NETIF_F_ALL_TSO;
}
if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
!(features & NETIF_F_IP_CSUM)) {
netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
features &= ~NETIF_F_TSO;
features &= ~NETIF_F_TSO_ECN;
}
if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
!(features & NETIF_F_IPV6_CSUM)) {
netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
features &= ~NETIF_F_TSO6;
}
/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
features &= ~NETIF_F_TSO_MANGLEID;
/* TSO ECN requires that TSO is present as well. */
if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
features &= ~NETIF_F_TSO_ECN;
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
features &= ~NETIF_F_GSO;
}
/* GSO partial features require GSO partial be set */
if ((features & dev->gso_partial_features) &&
!(features & NETIF_F_GSO_PARTIAL)) {
netdev_dbg(dev,
"Dropping partially supported GSO features since no GSO partial.\n");
features &= ~dev->gso_partial_features;
}
if (!(features & NETIF_F_RXCSUM)) {
/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
* successfully merged by hardware must also have the
* checksum verified by hardware. If the user does not
* want to enable RXCSUM, logically, we should disable GRO_HW.
*/
if (features & NETIF_F_GRO_HW) {
netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
features &= ~NETIF_F_GRO_HW;
}
}
/* LRO/HW-GRO features cannot be combined with RX-FCS */
if (features & NETIF_F_RXFCS) { if (features & NETIF_F_LRO) {
netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) {
netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
features &= ~NETIF_F_GRO_HW;
}
}
if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
features &= ~NETIF_F_LRO;
}
if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
features &= ~NETIF_F_HW_TLS_TX;
}
if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
features &= ~NETIF_F_HW_TLS_RX;
}
if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
features &= ~NETIF_F_GSO_UDP_L4;
}
return features;
}
int __netdev_update_features(struct net_device *dev)
{
struct net_device *upper, *lower;
netdev_features_t features;
struct list_head *iter;
int err = -1;
ASSERT_RTNL(); netdev_ops_assert_locked(dev); features = netdev_get_wanted_features(dev); if (dev->netdev_ops->ndo_fix_features)
features = dev->netdev_ops->ndo_fix_features(dev, features);
/* driver might be less strict about feature dependencies */
features = netdev_fix_features(dev, features);
/* some features can't be enabled if they're off on an upper device */
netdev_for_each_upper_dev_rcu(dev, upper, iter) features = netdev_sync_upper_features(dev, upper, features); if (dev->features == features) goto sync_lower;
netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
&dev->features, &features);
if (dev->netdev_ops->ndo_set_features)
err = dev->netdev_ops->ndo_set_features(dev, features);
else
err = 0; if (unlikely(err < 0)) {
netdev_err(dev,
"set_features() failed (%d); wanted %pNF, left %pNF\n",
err, &features, &dev->features);
/* return non-0 since some features might have changed and
* it's better to fire a spurious notification than miss it
*/
return -1;
}
sync_lower:
/* some features must be disabled on lower devices when disabled
* on an upper device (think: bonding master or bridge)
*/
netdev_for_each_lower_dev(dev, lower, iter) netdev_sync_lower_features(dev, lower, features);
if (!err) {
netdev_features_t diff = features ^ dev->features;
if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
/* udp_tunnel_{get,drop}_rx_info both need
* NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
* device, or they won't do anything.
* Thus we need to update dev->features
* *before* calling udp_tunnel_get_rx_info,
* but *after* calling udp_tunnel_drop_rx_info.
*/
udp_tunnel_nic_lock(dev); if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
dev->features = features;
udp_tunnel_get_rx_info(dev);
} else {
udp_tunnel_drop_rx_info(dev);
}
udp_tunnel_nic_unlock(dev);
}
if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
dev->features = features;
err |= vlan_get_rx_ctag_filter_info(dev);
} else {
vlan_drop_rx_ctag_filter_info(dev);
}
}
if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
dev->features = features;
err |= vlan_get_rx_stag_filter_info(dev);
} else {
vlan_drop_rx_stag_filter_info(dev);
}
}
dev->features = features;
}
return err < 0 ? 0 : 1;}
/**
* netdev_update_features - recalculate device features
* @dev: the device to check
*
* Recalculate dev->features set and send notifications if it
* has changed. Should be called after driver or hardware dependent
* conditions might have changed that influence the features.
*/
void netdev_update_features(struct net_device *dev)
{
if (__netdev_update_features(dev))
netdev_features_change(dev);
}
EXPORT_SYMBOL(netdev_update_features);
/**
* netdev_change_features - recalculate device features
* @dev: the device to check
*
* Recalculate dev->features set and send notifications even
* if they have not changed. Should be called instead of
* netdev_update_features() if also dev->vlan_features might
* have changed to allow the changes to be propagated to stacked
* VLAN devices.
*/
void netdev_change_features(struct net_device *dev)
{
__netdev_update_features(dev);
netdev_features_change(dev);
}
EXPORT_SYMBOL(netdev_change_features);
/**
* netif_stacked_transfer_operstate - transfer operstate
* @rootdev: the root or lower level device to transfer state from
* @dev: the device to transfer operstate to
*
* Transfer operational state from root to device. This is normally
* called when a stacking relationship exists between the root
* device and the device(a leaf device).
*/
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev)
{
if (rootdev->operstate == IF_OPER_DORMANT)
netif_dormant_on(dev);
else
netif_dormant_off(dev);
if (rootdev->operstate == IF_OPER_TESTING)
netif_testing_on(dev);
else
netif_testing_off(dev);
if (netif_carrier_ok(rootdev))
netif_carrier_on(dev);
else
netif_carrier_off(dev);
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
static int netif_alloc_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
struct netdev_rx_queue *rx;
size_t sz = count * sizeof(*rx);
int err = 0;
BUG_ON(count < 1);
rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!rx)
return -ENOMEM;
dev->_rx = rx; for (i = 0; i < count; i++) { rx[i].dev = dev;
/* XDP RX-queue setup */
err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
if (err < 0)
goto err_rxq_info;
}
return 0;
err_rxq_info:
/* Rollback successful reg's and free other resources */
while (i--)
xdp_rxq_info_unreg(&rx[i].xdp_rxq);
kvfree(dev->_rx);
dev->_rx = NULL;
return err;
}
static void netif_free_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
if (!dev->_rx)
return;
for (i = 0; i < count; i++)
xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
kvfree(dev->_rx);
}
static void netdev_init_one_queue(struct net_device *dev,
struct netdev_queue *queue, void *_unused)
{
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
queue->xmit_lock_owner = -1;
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
queue->dev = dev;
#ifdef CONFIG_BQL
dql_init(&queue->dql, HZ);
#endif
}
static void netif_free_tx_queues(struct net_device *dev)
{
kvfree(dev->_tx);
}
static int netif_alloc_netdev_queues(struct net_device *dev)
{
unsigned int count = dev->num_tx_queues;
struct netdev_queue *tx;
size_t sz = count * sizeof(*tx);
if (count < 1 || count > 0xffff)
return -EINVAL;
tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!tx)
return -ENOMEM;
dev->_tx = tx;
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
spin_lock_init(&dev->tx_global_lock);
return 0;
}
void netif_tx_stop_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
}
}
EXPORT_SYMBOL(netif_tx_stop_all_queues);
static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
{
void __percpu *v;
/* Drivers implementing ndo_get_peer_dev must support tstat
* accounting, so that skb_do_redirect() can bump the dev's
* RX stats upon network namespace switch.
*/
if (dev->netdev_ops->ndo_get_peer_dev &&
dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
return -EOPNOTSUPP;
switch (dev->pcpu_stat_type) {
case NETDEV_PCPU_STAT_NONE:
return 0;
case NETDEV_PCPU_STAT_LSTATS:
v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
break;
case NETDEV_PCPU_STAT_TSTATS:
v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
break;
case NETDEV_PCPU_STAT_DSTATS:
v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
break;
default:
return -EINVAL;
}
return v ? 0 : -ENOMEM;
}
static void netdev_do_free_pcpu_stats(struct net_device *dev)
{
switch (dev->pcpu_stat_type) {
case NETDEV_PCPU_STAT_NONE:
return;
case NETDEV_PCPU_STAT_LSTATS:
free_percpu(dev->lstats);
break;
case NETDEV_PCPU_STAT_TSTATS:
free_percpu(dev->tstats);
break;
case NETDEV_PCPU_STAT_DSTATS:
free_percpu(dev->dstats);
break;
}
}
static void netdev_free_phy_link_topology(struct net_device *dev)
{
struct phy_link_topology *topo = dev->link_topo;
if (IS_ENABLED(CONFIG_PHYLIB) && topo) {
xa_destroy(&topo->phys);
kfree(topo);
dev->link_topo = NULL;
}
}
/**
* register_netdevice() - register a network device
* @dev: device to register
*
* Take a prepared network device structure and make it externally accessible.
* A %NETDEV_REGISTER message is sent to the netdev notifier chain.
* Callers must hold the rtnl lock - you may want register_netdev()
* instead of this.
*/
int register_netdevice(struct net_device *dev)
{
int ret;
struct net *net = dev_net(dev);
BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
NETDEV_FEATURE_COUNT);
BUG_ON(dev_boot_phase); ASSERT_RTNL(); might_sleep();
/* When net_device's are persistent, this will be fatal. */
BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
BUG_ON(!net);
ret = ethtool_check_ops(dev->ethtool_ops);
if (ret)
return ret;
/* rss ctx ID 0 is reserved for the default context, start from 1 */
xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1);
mutex_init(&dev->ethtool->rss_lock);
spin_lock_init(&dev->addr_list_lock);
netdev_set_addr_lockdep_class(dev); ret = dev_get_valid_name(net, dev, dev->name); if (ret < 0) goto out;
ret = -ENOMEM;
dev->name_node = netdev_name_node_head_alloc(dev);
if (!dev->name_node)
goto out;
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
if (ret) {
if (ret > 0) ret = -EIO;
goto err_free_name;
}
}
if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_CTAG_FILTER) && (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
!dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
ret = -EINVAL;
goto err_uninit;
}
ret = netdev_do_alloc_pcpu_stats(dev); if (ret) goto err_uninit;
ret = dev_index_reserve(net, dev->ifindex);
if (ret < 0)
goto err_free_pcpu; dev->ifindex = ret;
/* Transfer changeable features to wanted_features and enable
* software offloads (GSO and GRO).
*/
dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
dev->features |= NETIF_F_SOFT_FEATURES;
if (dev->udp_tunnel_nic_info) {
dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
}
dev->wanted_features = dev->features & dev->hw_features; if (!(dev->flags & IFF_LOOPBACK))
dev->hw_features |= NETIF_F_NOCACHE_COPY;
/* If IPv4 TCP segmentation offload is supported we should also
* allow the device to enable segmenting the frame with the option
* of ignoring a static IP ID value. This doesn't enable the
* feature itself but allows the user to enable it later.
*/
if (dev->hw_features & NETIF_F_TSO)
dev->hw_features |= NETIF_F_TSO_MANGLEID;
if (dev->vlan_features & NETIF_F_TSO)
dev->vlan_features |= NETIF_F_TSO_MANGLEID;
if (dev->mpls_features & NETIF_F_TSO)
dev->mpls_features |= NETIF_F_TSO_MANGLEID;
if (dev->hw_enc_features & NETIF_F_TSO)
dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
*/
dev->vlan_features |= NETIF_F_HIGHDMA;
/* Make NETIF_F_SG inheritable to tunnel devices.
*/
dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
/* Make NETIF_F_SG inheritable to MPLS.
*/
dev->mpls_features |= NETIF_F_SG;
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)
goto err_ifindex_release;
ret = netdev_register_kobject(dev);
netdev_lock(dev);
WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
netdev_unlock(dev);
if (ret)
goto err_uninit_notify;
netdev_lock_ops(dev);
__netdev_update_features(dev);
netdev_unlock_ops(dev);
/*
* Default initial state at registry is that the
* device is present.
*/
set_bit(__LINK_STATE_PRESENT, &dev->state);
linkwatch_init_dev(dev);
dev_init_scheduler(dev);
netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); list_netdevice(dev);
add_device_randomness(dev->dev_addr, dev->addr_len);
/* If the device has permanent device address, driver should
* set dev_addr and also addr_assign_type should be set to
* NET_ADDR_PERM (default value).
*/
if (dev->addr_assign_type == NET_ADDR_PERM)
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* Notify protocols, that a new device appeared. */
netdev_lock_ops(dev);
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
netdev_unlock_ops(dev); ret = notifier_to_errno(ret);
if (ret) {
/* Expect explicit free_netdev() on failure */
dev->needs_free_netdev = false;
unregister_netdevice_queue(dev, NULL);
goto out;
}
/*
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
if (!(dev->rtnl_link_ops && dev->rtnl_link_initializing)) rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
out:
return ret;
err_uninit_notify:
call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
err_ifindex_release:
dev_index_release(net, dev->ifindex);
err_free_pcpu:
netdev_do_free_pcpu_stats(dev);
err_uninit:
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
if (dev->priv_destructor) dev->priv_destructor(dev);err_free_name: netdev_name_node_free(dev->name_node); goto out;}
EXPORT_SYMBOL(register_netdevice);
/* Initialize the core of a dummy net device.
* The setup steps dummy netdevs need which normal netdevs get by going
* through register_netdevice().
*/
static void init_dummy_netdev(struct net_device *dev)
{
/* make sure we BUG if trying to hit standard
* register/unregister code path
*/
dev->reg_state = NETREG_DUMMY;
/* a dummy interface is started by default */
set_bit(__LINK_STATE_PRESENT, &dev->state);
set_bit(__LINK_STATE_START, &dev->state);
/* Note : We dont allocate pcpu_refcnt for dummy devices,
* because users of this 'device' dont need to change
* its refcount.
*/
}
/**
* register_netdev - register a network device
* @dev: device to register
*
* Take a completed network device structure and add it to the kernel
* interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
* chain. 0 is returned on success. A negative errno code is returned
* on a failure to set up the device, or if the name is a duplicate.
*
* This is a wrapper around register_netdevice that takes the rtnl semaphore
* and expands the device name if you passed a format string to
* alloc_netdev.
*/
int register_netdev(struct net_device *dev)
{
struct net *net = dev_net(dev);
int err;
if (rtnl_net_lock_killable(net))
return -EINTR;
err = register_netdevice(dev);
rtnl_net_unlock(net);
return err;}
EXPORT_SYMBOL(register_netdev);
int netdev_refcnt_read(const struct net_device *dev)
{
#ifdef CONFIG_PCPU_DEV_REFCNT
int i, refcnt = 0;
for_each_possible_cpu(i)
refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
return refcnt;
#else
return refcount_read(&dev->dev_refcnt);
#endif
}
EXPORT_SYMBOL(netdev_refcnt_read);
int netdev_unregister_timeout_secs __read_mostly = 10;
#define WAIT_REFS_MIN_MSECS 1
#define WAIT_REFS_MAX_MSECS 250
/**
* netdev_wait_allrefs_any - wait until all references are gone.
* @list: list of net_devices to wait on
*
* This is called when unregistering network devices.
*
* Any protocol or device that holds a reference should register
* for netdevice notification, and cleanup and put back the
* reference if they receive an UNREGISTER event.
* We can get stuck here if buggy protocols don't correctly
* call dev_put.
*/
static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
{
unsigned long rebroadcast_time, warning_time;
struct net_device *dev;
int wait = 0;
rebroadcast_time = warning_time = jiffies;
list_for_each_entry(dev, list, todo_list) if (netdev_refcnt_read(dev) == 1) return dev;
while (true) {
if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
rtnl_lock();
/* Rebroadcast unregister notification */
list_for_each_entry(dev, list, todo_list) call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
__rtnl_unlock();
rcu_barrier();
rtnl_lock();
list_for_each_entry(dev, list, todo_list) if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
&dev->state)) {
/* We must not have linkwatch events
* pending on unregister. If this
* happens, we simply run the queue
* unscheduled, resulting in a noop
* for this device.
*/
linkwatch_run_queue();
break;
}
__rtnl_unlock();
rebroadcast_time = jiffies;
}
rcu_barrier();
if (!wait) {
wait = WAIT_REFS_MIN_MSECS;
} else {
msleep(wait);
wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
}
list_for_each_entry(dev, list, todo_list) if (netdev_refcnt_read(dev) == 1) return dev; if (time_after(jiffies, warning_time +
READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
list_for_each_entry(dev, list, todo_list) {
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
dev->name, netdev_refcnt_read(dev));
ref_tracker_dir_print(&dev->refcnt_tracker, 10);
}
warning_time = jiffies;
}
}
}
/* The sequence is:
*
* rtnl_lock();
* ...
* register_netdevice(x1);
* register_netdevice(x2);
* ...
* unregister_netdevice(y1);
* unregister_netdevice(y2);
* ...
* rtnl_unlock();
* free_netdev(y1);
* free_netdev(y2);
*
* We are invoked by rtnl_unlock().
* This allows us to deal with problems:
* 1) We can delete sysfs objects which invoke hotplug
* without deadlocking with linkwatch via keventd.
* 2) Since we run with the RTNL semaphore not held, we can sleep
* safely in order to wait for the netdev refcnt to drop to zero.
*
* We must not return until all unregister events added during
* the interval the lock was held have been completed.
*/
void netdev_run_todo(void)
{
struct net_device *dev, *tmp;
struct list_head list;
int cnt;
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
list_replace_init(&net_unlink_list, &unlink_list);
while (!list_empty(&unlink_list)) {
dev = list_first_entry(&unlink_list, struct net_device,
unlink_list);
list_del_init(&dev->unlink_list);
dev->nested_level = dev->lower_level - 1;
}
#endif
/* Snapshot list, allow later requests */
list_replace_init(&net_todo_list, &list);
__rtnl_unlock();
/* Wait for rcu callbacks to finish before next phase */
if (!list_empty(&list))
rcu_barrier();
list_for_each_entry_safe(dev, tmp, &list, todo_list) { if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { netdev_WARN(dev, "run_todo but not unregistering\n");
list_del(&dev->todo_list);
continue;
}
netdev_lock(dev);
WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
netdev_unlock(dev);
linkwatch_sync_dev(dev);
}
cnt = 0;
while (!list_empty(&list)) { dev = netdev_wait_allrefs_any(&list);
list_del(&dev->todo_list);
/* paranoia */
BUG_ON(netdev_refcnt_read(dev) != 1); BUG_ON(!list_empty(&dev->ptype_all)); BUG_ON(!list_empty(&dev->ptype_specific)); WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); netdev_do_free_pcpu_stats(dev); if (dev->priv_destructor)
dev->priv_destructor(dev);
if (dev->needs_free_netdev) free_netdev(dev); cnt++;
/* Free network device */
kobject_put(&dev->dev.kobj);
}
if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count)) wake_up(&netdev_unregistering_wq);}
/* Collate per-cpu network dstats statistics
*
* Read per-cpu network statistics from dev->dstats and populate the related
* fields in @s.
*/
static void dev_fetch_dstats(struct rtnl_link_stats64 *s,
const struct pcpu_dstats __percpu *dstats)
{
int cpu;
for_each_possible_cpu(cpu) {
u64 rx_packets, rx_bytes, rx_drops;
u64 tx_packets, tx_bytes, tx_drops;
const struct pcpu_dstats *stats;
unsigned int start;
stats = per_cpu_ptr(dstats, cpu);
do {
start = u64_stats_fetch_begin(&stats->syncp);
rx_packets = u64_stats_read(&stats->rx_packets);
rx_bytes = u64_stats_read(&stats->rx_bytes);
rx_drops = u64_stats_read(&stats->rx_drops);
tx_packets = u64_stats_read(&stats->tx_packets);
tx_bytes = u64_stats_read(&stats->tx_bytes);
tx_drops = u64_stats_read(&stats->tx_drops);
} while (u64_stats_fetch_retry(&stats->syncp, start));
s->rx_packets += rx_packets;
s->rx_bytes += rx_bytes;
s->rx_dropped += rx_drops;
s->tx_packets += tx_packets;
s->tx_bytes += tx_bytes;
s->tx_dropped += tx_drops;
}
}
/* ndo_get_stats64 implementation for dtstats-based accounting.
*
* Populate @s from dev->stats and dev->dstats. This is used internally by the
* core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
*/
static void dev_get_dstats64(const struct net_device *dev,
struct rtnl_link_stats64 *s)
{
netdev_stats_to_stats64(s, &dev->stats);
dev_fetch_dstats(s, dev->dstats);
}
/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
* all the same fields in the same order as net_device_stats, with only
* the type differing, but rtnl_link_stats64 may have additional fields
* at the end for newer counters.
*/
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats)
{
size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
const atomic_long_t *src = (atomic_long_t *)netdev_stats;
u64 *dst = (u64 *)stats64;
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++)
dst[i] = (unsigned long)atomic_long_read(&src[i]);
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + n * sizeof(u64), 0,
sizeof(*stats64) - n * sizeof(u64));
}
EXPORT_SYMBOL(netdev_stats_to_stats64);
static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
struct net_device *dev)
{
struct net_device_core_stats __percpu *p;
p = alloc_percpu_gfp(struct net_device_core_stats,
GFP_ATOMIC | __GFP_NOWARN);
if (p && cmpxchg(&dev->core_stats, NULL, p))
free_percpu(p);
/* This READ_ONCE() pairs with the cmpxchg() above */
return READ_ONCE(dev->core_stats);
}
noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
{
/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
unsigned long __percpu *field;
if (unlikely(!p)) {
p = netdev_core_stats_alloc(dev);
if (!p)
return;
}
field = (unsigned long __percpu *)((void __percpu *)p + offset);
this_cpu_inc(*field);
}
EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
/**
* dev_get_stats - get network device statistics
* @dev: device to get statistics from
* @storage: place to store stats
*
* Get network statistics from device. Return @storage.
* The device driver may provide its own method by setting
* dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
* otherwise the internal statistics structure is used.
*/
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage)
{
const struct net_device_ops *ops = dev->netdev_ops;
const struct net_device_core_stats __percpu *p;
/*
* IPv{4,6} and udp tunnels share common stat helpers and use
* different stat type (NETDEV_PCPU_STAT_TSTATS vs
* NETDEV_PCPU_STAT_DSTATS). Ensure the accounting is consistent.
*/
BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_bytes) !=
offsetof(struct pcpu_dstats, rx_bytes));
BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_packets) !=
offsetof(struct pcpu_dstats, rx_packets));
BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_bytes) !=
offsetof(struct pcpu_dstats, tx_bytes));
BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_packets) !=
offsetof(struct pcpu_dstats, tx_packets));
if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage));
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) { netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
dev_get_tstats64(dev, storage);
} else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) { dev_get_dstats64(dev, storage);
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
p = READ_ONCE(dev->core_stats);
if (p) {
const struct net_device_core_stats *core_stats;
int i;
for_each_possible_cpu(i) { core_stats = per_cpu_ptr(p, i); storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
}
}
return storage;
}
EXPORT_SYMBOL(dev_get_stats);
/**
* dev_fetch_sw_netstats - get per-cpu network device statistics
* @s: place to store stats
* @netstats: per-cpu network stats to read from
*
* Read per-cpu network statistics and populate the related fields in @s.
*/
void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats)
{
int cpu;
for_each_possible_cpu(cpu) {
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
const struct pcpu_sw_netstats *stats;
unsigned int start;
stats = per_cpu_ptr(netstats, cpu);
do {
start = u64_stats_fetch_begin(&stats->syncp);
rx_packets = u64_stats_read(&stats->rx_packets);
rx_bytes = u64_stats_read(&stats->rx_bytes);
tx_packets = u64_stats_read(&stats->tx_packets);
tx_bytes = u64_stats_read(&stats->tx_bytes);
} while (u64_stats_fetch_retry(&stats->syncp, start));
s->rx_packets += rx_packets;
s->rx_bytes += rx_bytes;
s->tx_packets += tx_packets;
s->tx_bytes += tx_bytes;
}
}
EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
/**
* dev_get_tstats64 - ndo_get_stats64 implementation
* @dev: device to get statistics from
* @s: place to store stats
*
* Populate @s from dev->stats and dev->tstats. Can be used as
* ndo_get_stats64() callback.
*/
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
{
netdev_stats_to_stats64(s, &dev->stats);
dev_fetch_sw_netstats(s, dev->tstats);
}
EXPORT_SYMBOL_GPL(dev_get_tstats64);
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
{
struct netdev_queue *queue = dev_ingress_queue(dev);
#ifdef CONFIG_NET_CLS_ACT
if (queue)
return queue;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
return NULL;
netdev_init_one_queue(dev, queue, NULL);
RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
rcu_assign_pointer(dev->ingress_queue, queue);
#endif
return queue;
}
static const struct ethtool_ops default_ethtool_ops;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops)
{
if (dev->ethtool_ops == &default_ethtool_ops)
dev->ethtool_ops = ops;
}
EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
/**
* netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
* @dev: netdev to enable the IRQ coalescing on
*
* Sets a conservative default for SW IRQ coalescing. Users can use
* sysfs attributes to override the default values.
*/
void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
{
WARN_ON(dev->reg_state == NETREG_REGISTERED);
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
netdev_set_gro_flush_timeout(dev, 20000);
netdev_set_defer_hard_irqs(dev, 1);
}
}
EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
/**
* alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for
* @name: device name format string
* @name_assign_type: origin of device name
* @setup: callback to initialize device
* @txqs: the number of TX subqueues to allocate
* @rxqs: the number of RX subqueues to allocate
*
* Allocates a struct net_device with private data area for driver use
* and performs basic initialization. Also allocates subqueue structs
* for each queue on the device.
*/
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
size_t napi_config_sz;
unsigned int maxqs;
BUG_ON(strlen(name) >= sizeof(dev->name)); if (txqs < 1) {
pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
return NULL;
}
if (rxqs < 1) {
pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
return NULL;
}
maxqs = max(txqs, rxqs); dev = kvzalloc(struct_size(dev, priv, sizeof_priv),
GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!dev)
return NULL;
dev->priv_len = sizeof_priv;
ref_tracker_dir_init(&dev->refcnt_tracker, 128, "netdev");
#ifdef CONFIG_PCPU_DEV_REFCNT
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt) goto free_dev;
__dev_hold(dev);
#else
refcount_set(&dev->dev_refcnt, 1);
#endif
if (dev_addr_init(dev))
goto free_pcpu;
dev_mc_init(dev);
dev_uc_init(dev);
dev_net_set(dev, &init_net);
dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
dev->xdp_zc_max_segs = 1;
dev->gso_max_segs = GSO_MAX_SEGS;
dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
dev->tso_max_segs = TSO_MAX_SEGS;
dev->upper_level = 1;
dev->lower_level = 1;
#ifdef CONFIG_LOCKDEP
dev->nested_level = 0;
INIT_LIST_HEAD(&dev->unlink_list);
#endif
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->close_list);
INIT_LIST_HEAD(&dev->link_watch_list);
INIT_LIST_HEAD(&dev->adj_list.upper);
INIT_LIST_HEAD(&dev->adj_list.lower);
INIT_LIST_HEAD(&dev->ptype_all);
INIT_LIST_HEAD(&dev->ptype_specific);
INIT_LIST_HEAD(&dev->net_notifier_list);
#ifdef CONFIG_NET_SCHED
hash_init(dev->qdisc_hash);
#endif
mutex_init(&dev->lock);
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
if (!dev->tx_queue_len) {
dev->priv_flags |= IFF_NO_QUEUE;
dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
}
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev)) goto free_all; dev->num_rx_queues = rxqs;
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev)) goto free_all;
dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
if (!dev->ethtool)
goto free_all; dev->cfg = kzalloc(sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT);
if (!dev->cfg)
goto free_all;
dev->cfg_pending = dev->cfg;
dev->num_napi_configs = maxqs;
napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config));
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
if (!dev->napi_config)
goto free_all; strscpy(dev->name, name);
dev->name_assign_type = name_assign_type;
dev->group = INIT_NETDEV_GROUP;
if (!dev->ethtool_ops)
dev->ethtool_ops = &default_ethtool_ops; nf_hook_netdev_init(dev); return dev;
free_all:
free_netdev(dev);
return NULL;
free_pcpu:
#ifdef CONFIG_PCPU_DEV_REFCNT
free_percpu(dev->pcpu_refcnt);
free_dev:
#endif
kvfree(dev); return NULL;}
EXPORT_SYMBOL(alloc_netdev_mqs);
static void netdev_napi_exit(struct net_device *dev)
{
if (!list_empty(&dev->napi_list)) {
struct napi_struct *p, *n;
netdev_lock(dev);
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
__netif_napi_del_locked(p);
netdev_unlock(dev);
synchronize_net();
}
kvfree(dev->napi_config);
}
/**
* free_netdev - free network device
* @dev: device
*
* This function does the last stage of destroying an allocated device
* interface. The reference to the device object is released. If this
* is the last reference then it will be freed.Must be called in process
* context.
*/
void free_netdev(struct net_device *dev)
{
might_sleep();
/* When called immediately after register_netdevice() failed the unwind
* handling may still be dismantling the device. Handle that case by
* deferring the free.
*/
if (dev->reg_state == NETREG_UNREGISTERING) {
ASSERT_RTNL();
dev->needs_free_netdev = true;
return;
}
WARN_ON(dev->cfg != dev->cfg_pending);
kfree(dev->cfg);
kfree(dev->ethtool);
netif_free_tx_queues(dev);
netif_free_rx_queues(dev);
kfree(rcu_dereference_protected(dev->ingress_queue, 1));
/* Flush device addresses */
dev_addr_flush(dev);
netdev_napi_exit(dev);
netif_del_cpu_rmap(dev);
ref_tracker_dir_exit(&dev->refcnt_tracker);
#ifdef CONFIG_PCPU_DEV_REFCNT
free_percpu(dev->pcpu_refcnt);
dev->pcpu_refcnt = NULL;
#endif
free_percpu(dev->core_stats);
dev->core_stats = NULL;
free_percpu(dev->xdp_bulkq);
dev->xdp_bulkq = NULL;
netdev_free_phy_link_topology(dev);
mutex_destroy(&dev->lock);
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED ||
dev->reg_state == NETREG_DUMMY) {
kvfree(dev);
return;
}
BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
/* will free via device release */
put_device(&dev->dev);
}
EXPORT_SYMBOL(free_netdev);
/**
* alloc_netdev_dummy - Allocate and initialize a dummy net device.
* @sizeof_priv: size of private data to allocate space for
*
* Return: the allocated net_device on success, NULL otherwise
*/
struct net_device *alloc_netdev_dummy(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN,
init_dummy_netdev);
}
EXPORT_SYMBOL_GPL(alloc_netdev_dummy);
/**
* synchronize_net - Synchronize with packet receive processing
*
* Wait for packets currently being received to be done.
* Does not block later packets from starting.
*/
void synchronize_net(void)
{
might_sleep();
if (from_cleanup_net() || rtnl_is_locked())
synchronize_rcu_expedited();
else
synchronize_rcu();
}
EXPORT_SYMBOL(synchronize_net);
static void netdev_rss_contexts_free(struct net_device *dev)
{
struct ethtool_rxfh_context *ctx;
unsigned long context;
mutex_lock(&dev->ethtool->rss_lock);
xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
xa_erase(&dev->ethtool->rss_ctx, context);
dev->ethtool_ops->remove_rxfh_context(dev, ctx, context, NULL);
kfree(ctx);
}
xa_destroy(&dev->ethtool->rss_ctx);
mutex_unlock(&dev->ethtool->rss_lock);
}
/**
* unregister_netdevice_queue - remove device from the kernel
* @dev: device
* @head: list
*
* This function shuts down a device interface and removes it
* from the kernel tables.
* If head not NULL, device is queued to be unregistered later.
*
* Callers must hold the rtnl semaphore. You may want
* unregister_netdev() instead of this.
*/
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
{
ASSERT_RTNL();
if (head) {
list_move_tail(&dev->unreg_list, head);
} else {
LIST_HEAD(single);
list_add(&dev->unreg_list, &single);
unregister_netdevice_many(&single);
}
}
EXPORT_SYMBOL(unregister_netdevice_queue);
static void dev_memory_provider_uninstall(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct netdev_rx_queue *rxq = &dev->_rx[i];
struct pp_memory_provider_params *p = &rxq->mp_params;
if (p->mp_ops && p->mp_ops->uninstall)
p->mp_ops->uninstall(rxq->mp_params.mp_priv, rxq);
}
}
/* devices must be UP and netdev_lock()'d */
static void netif_close_many_and_unlock(struct list_head *close_head)
{
struct net_device *dev, *tmp;
netif_close_many(close_head, false);
/* ... now unlock them */
list_for_each_entry_safe(dev, tmp, close_head, close_list) {
netdev_unlock(dev);
list_del_init(&dev->close_list);
}
}
static void netif_close_many_and_unlock_cond(struct list_head *close_head)
{
#ifdef CONFIG_LOCKDEP
/* We can only track up to MAX_LOCK_DEPTH locks per task.
*
* Reserve half the available slots for additional locks possibly
* taken by notifiers and (soft)irqs.
*/
unsigned int limit = MAX_LOCK_DEPTH / 2;
if (lockdep_depth(current) > limit)
netif_close_many_and_unlock(close_head);
#endif
}
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh)
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
int cnt = 0;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
if (list_empty(head))
return;
list_for_each_entry_safe(dev, tmp, head, unreg_list) {
/* Some devices call without registering
* for initialization unwind. Remove those
* devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never was registered\n",
dev->name, dev);
WARN_ON(1);
list_del(&dev->unreg_list);
continue;
}
dev->dismantle = true;
BUG_ON(dev->reg_state != NETREG_REGISTERED);
}
/* If device is running, close it first. Start with ops locked... */
list_for_each_entry(dev, head, unreg_list) {
if (!(dev->flags & IFF_UP))
continue;
if (netdev_need_ops_lock(dev)) {
list_add_tail(&dev->close_list, &close_head);
netdev_lock(dev);
}
netif_close_many_and_unlock_cond(&close_head);
}
netif_close_many_and_unlock(&close_head);
/* ... now go over the rest. */
list_for_each_entry(dev, head, unreg_list) {
if (!netdev_need_ops_lock(dev))
list_add_tail(&dev->close_list, &close_head);
}
netif_close_many(&close_head, true);
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
unlist_netdevice(dev);
netdev_lock(dev);
WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
netdev_unlock(dev);
}
flush_all_backlogs();
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
struct sk_buff *skb = NULL;
/* Shutdown queueing discipline. */
netdev_lock_ops(dev);
dev_shutdown(dev);
dev_tcx_uninstall(dev);
dev_xdp_uninstall(dev);
dev_memory_provider_uninstall(dev);
netdev_unlock_ops(dev);
bpf_dev_bound_netdev_unregister(dev);
netdev_offload_xstats_disable_all(dev);
/* Notify protocols, that we are about to destroy
* this device. They should clean all the things.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
if (!(dev->rtnl_link_ops && dev->rtnl_link_initializing))
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
GFP_KERNEL, NULL, 0,
portid, nlh);
/*
* Flush the unicast and multicast chains
*/
dev_uc_flush(dev);
dev_mc_flush(dev);
netdev_name_node_alt_flush(dev);
netdev_name_node_free(dev->name_node);
netdev_rss_contexts_free(dev);
call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
mutex_destroy(&dev->ethtool->rss_lock);
net_shaper_flush_netdev(dev);
if (skb)
rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
/* Notifier chain MUST detach us all upper devices. */
WARN_ON(netdev_has_any_upper_dev(dev));
WARN_ON(netdev_has_any_lower_dev(dev));
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
#ifdef CONFIG_XPS
/* Remove XPS queueing entries */
netif_reset_xps_queues_gt(dev, 0);
#endif
}
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);
net_set_todo(dev);
cnt++;
}
atomic_add(cnt, &dev_unreg_count);
list_del(head);
}
/**
* unregister_netdevice_many - unregister many devices
* @head: list of devices
*
* Note: As most callers use a stack allocated list_head,
* we force a list_del() to make sure stack won't be corrupted later.
*/
void unregister_netdevice_many(struct list_head *head)
{
unregister_netdevice_many_notify(head, 0, NULL);
}
EXPORT_SYMBOL(unregister_netdevice_many);
/**
* unregister_netdev - remove device from the kernel
* @dev: device
*
* This function shuts down a device interface and removes it
* from the kernel tables.
*
* This is just a wrapper for unregister_netdevice that takes
* the rtnl semaphore. In general you want to use this and not
* unregister_netdevice.
*/
void unregister_netdev(struct net_device *dev)
{
rtnl_net_dev_lock(dev);
unregister_netdevice(dev);
rtnl_net_dev_unlock(dev);
}
EXPORT_SYMBOL(unregister_netdev);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex,
struct netlink_ext_ack *extack)
{
struct netdev_name_node *name_node;
struct net *net_old = dev_net(dev);
char new_name[IFNAMSIZ] = {};
int err, new_nsid;
ASSERT_RTNL();
/* Don't allow namespace local devices to be moved. */
err = -EINVAL;
if (dev->netns_immutable) {
NL_SET_ERR_MSG(extack, "The interface netns is immutable");
goto out;
}
/* Ensure the device has been registered */
if (dev->reg_state != NETREG_REGISTERED) {
NL_SET_ERR_MSG(extack, "The interface isn't registered");
goto out;
}
/* Get out if there is nothing todo */
err = 0;
if (net_eq(net_old, net))
goto out;
/* Pick the destination device name, and ensure
* we can use it in the destination network namespace.
*/
err = -EEXIST;
if (netdev_name_in_use(net, dev->name)) {
/* We get here if we can't use the current device name */
if (!pat) {
NL_SET_ERR_MSG(extack,
"An interface with the same name exists in the target netns");
goto out;
}
err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST);
if (err < 0) {
NL_SET_ERR_MSG_FMT(extack,
"Unable to use '%s' for the new interface name in the target netns",
pat);
goto out;
}
}
/* Check that none of the altnames conflicts. */
err = -EEXIST;
netdev_for_each_altname(dev, name_node) {
if (netdev_name_in_use(net, name_node->name)) {
NL_SET_ERR_MSG_FMT(extack,
"An interface with the altname %s exists in the target netns",
name_node->name);
goto out;
}
}
/* Check that new_ifindex isn't used yet. */
if (new_ifindex) {
err = dev_index_reserve(net, new_ifindex);
if (err < 0) {
NL_SET_ERR_MSG_FMT(extack,
"The ifindex %d is not available in the target netns",
new_ifindex);
goto out;
}
} else {
/* If there is an ifindex conflict assign a new one */
err = dev_index_reserve(net, dev->ifindex);
if (err == -EBUSY)
err = dev_index_reserve(net, 0);
if (err < 0) {
NL_SET_ERR_MSG(extack,
"Unable to allocate a new ifindex in the target netns");
goto out;
}
new_ifindex = err;
}
/*
* And now a mini version of register_netdevice unregister_netdevice.
*/
netdev_lock_ops(dev);
/* If device is running close it first. */
netif_close(dev);
/* And unlink it from device chain */
unlist_netdevice(dev);
if (!netdev_need_ops_lock(dev))
netdev_lock(dev);
dev->moving_ns = true;
netdev_unlock(dev);
synchronize_net();
/* Shutdown queueing discipline. */
netdev_lock_ops(dev);
dev_shutdown(dev);
netdev_unlock_ops(dev);
/* Notify protocols, that we are about to destroy
* this device. They should clean all the things.
*
* Note that dev->reg_state stays at NETREG_REGISTERED.
* This is wanted because this way 8021q and macvlan know
* the device is just moving and can keep their slaves up.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
rcu_barrier();
new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
new_ifindex);
/*
* Flush the unicast and multicast chains
*/
dev_uc_flush(dev);
dev_mc_flush(dev);
/* Send a netdev-removed uevent to the old namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
netdev_adjacent_del_links(dev);
/* Move per-net netdevice notifiers that are following the netdevice */
move_netdevice_notifiers_dev_net(dev, net);
/* Actually switch the network namespace */
netdev_lock(dev);
dev_net_set(dev, net);
netdev_unlock(dev);
dev->ifindex = new_ifindex;
if (new_name[0]) {
/* Rename the netdev to prepared name */
write_seqlock_bh(&netdev_rename_lock);
strscpy(dev->name, new_name, IFNAMSIZ);
write_sequnlock_bh(&netdev_rename_lock);
}
/* Fixup kobjects */
dev_set_uevent_suppress(&dev->dev, 1);
err = device_rename(&dev->dev, dev->name);
dev_set_uevent_suppress(&dev->dev, 0);
WARN_ON(err);
/* Send a netdev-add uevent to the new namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
netdev_adjacent_add_links(dev);
/* Adapt owner in case owning user namespace of target network
* namespace is different from the original one.
*/
err = netdev_change_owner(dev, net_old, net);
WARN_ON(err);
netdev_lock(dev);
dev->moving_ns = false;
if (!netdev_need_ops_lock(dev))
netdev_unlock(dev);
/* Add the device back in the hashes */
list_netdevice(dev);
/* Notify protocols, that a new device appeared. */
call_netdevice_notifiers(NETDEV_REGISTER, dev);
netdev_unlock_ops(dev);
/*
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
synchronize_net();
err = 0;
out:
return err;
}
static int dev_cpu_dead(unsigned int oldcpu)
{
struct sk_buff **list_skb;
struct sk_buff *skb;
unsigned int cpu;
struct softnet_data *sd, *oldsd, *remsd = NULL;
local_irq_disable();
cpu = smp_processor_id();
sd = &per_cpu(softnet_data, cpu);
oldsd = &per_cpu(softnet_data, oldcpu);
/* Find end of our completion_queue. */
list_skb = &sd->completion_queue;
while (*list_skb)
list_skb = &(*list_skb)->next;
/* Append completion queue from offline CPU. */
*list_skb = oldsd->completion_queue;
oldsd->completion_queue = NULL;
/* Append output queue from offline CPU. */
if (oldsd->output_queue) {
*sd->output_queue_tailp = oldsd->output_queue;
sd->output_queue_tailp = oldsd->output_queue_tailp;
oldsd->output_queue = NULL;
oldsd->output_queue_tailp = &oldsd->output_queue;
}
/* Append NAPI poll list from offline CPU, with one exception :
* process_backlog() must be called by cpu owning percpu backlog.
* We properly handle process_queue & input_pkt_queue later.
*/
while (!list_empty(&oldsd->poll_list)) {
struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
struct napi_struct,
poll_list);
list_del_init(&napi->poll_list);
if (napi->poll == process_backlog)
napi->state &= NAPIF_STATE_THREADED;
else
____napi_schedule(sd, napi);
}
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
if (!use_backlog_threads()) {
#ifdef CONFIG_RPS
remsd = oldsd->rps_ipi_list;
oldsd->rps_ipi_list = NULL;
#endif
/* send out pending IPI's on offline CPU */
net_rps_send_ipi(remsd);
}
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
netif_rx(skb);
rps_input_queue_head_incr(oldsd);
}
while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
netif_rx(skb);
rps_input_queue_head_incr(oldsd);
}
return 0;
}
/**
* netdev_increment_features - increment feature set by one
* @all: current feature set
* @one: new feature set
* @mask: mask feature set
*
* Computes a new feature set after adding a device with feature set
* @one to the master device with current feature set @all. Will not
* enable anything that is off in @mask. Returns the new feature set.
*/
netdev_features_t netdev_increment_features(netdev_features_t all,
netdev_features_t one, netdev_features_t mask)
{
if (mask & NETIF_F_HW_CSUM)
mask |= NETIF_F_CSUM_MASK;
mask |= NETIF_F_VLAN_CHALLENGED;
all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
all &= one | ~NETIF_F_ALL_FOR_ALL;
/* If one device supports hw checksumming, set for all. */
if (all & NETIF_F_HW_CSUM)
all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
return all;
}
EXPORT_SYMBOL(netdev_increment_features);
static struct hlist_head * __net_init netdev_create_hash(void)
{
int i;
struct hlist_head *hash;
hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
if (hash != NULL)
for (i = 0; i < NETDEV_HASHENTRIES; i++)
INIT_HLIST_HEAD(&hash[i]); return hash;
}
/* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net)
{
BUILD_BUG_ON(GRO_HASH_BUCKETS >
BITS_PER_BYTE * sizeof_field(struct gro_node, bitmask));
INIT_LIST_HEAD(&net->dev_base_head);
net->dev_name_head = netdev_create_hash();
if (net->dev_name_head == NULL) goto err_name;
net->dev_index_head = netdev_create_hash();
if (net->dev_index_head == NULL)
goto err_idx;
xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
return 0;
err_idx:
kfree(net->dev_name_head);
err_name:
return -ENOMEM;
}
/**
* netdev_drivername - network driver for the device
* @dev: network device
*
* Determine network driver for device.
*/
const char *netdev_drivername(const struct net_device *dev)
{
const struct device_driver *driver;
const struct device *parent;
const char *empty = "";
parent = dev->dev.parent;
if (!parent)
return empty;
driver = parent->driver;
if (driver && driver->name)
return driver->name;
return empty;
}
static void __netdev_printk(const char *level, const struct net_device *dev,
struct va_format *vaf)
{
if (dev && dev->dev.parent) {
dev_printk_emit(level[1] - '0',
dev->dev.parent,
"%s %s %s%s: %pV",
dev_driver_string(dev->dev.parent),
dev_name(dev->dev.parent),
netdev_name(dev), netdev_reg_state(dev),
vaf);
} else if (dev) {
printk("%s%s%s: %pV",
level, netdev_name(dev), netdev_reg_state(dev), vaf);
} else {
printk("%s(NULL net_device): %pV", level, vaf);
}
}
void netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
{
struct va_format vaf;
va_list args;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
__netdev_printk(level, dev, &vaf);
va_end(args);
}
EXPORT_SYMBOL(netdev_printk);
#define define_netdev_printk_level(func, level) \
void func(const struct net_device *dev, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
__netdev_printk(level, dev, &vaf); \
\
va_end(args); \
} \
EXPORT_SYMBOL(func);
define_netdev_printk_level(netdev_emerg, KERN_EMERG);
define_netdev_printk_level(netdev_alert, KERN_ALERT);
define_netdev_printk_level(netdev_crit, KERN_CRIT);
define_netdev_printk_level(netdev_err, KERN_ERR);
define_netdev_printk_level(netdev_warn, KERN_WARNING);
define_netdev_printk_level(netdev_notice, KERN_NOTICE);
define_netdev_printk_level(netdev_info, KERN_INFO);
static void __net_exit netdev_exit(struct net *net)
{
kfree(net->dev_name_head);
kfree(net->dev_index_head);
xa_destroy(&net->dev_by_index);
if (net != &init_net)
WARN_ON_ONCE(!list_empty(&net->dev_base_head));
}
static struct pernet_operations __net_initdata netdev_net_ops = {
.init = netdev_init,
.exit = netdev_exit,
};
static void __net_exit default_device_exit_net(struct net *net)
{
struct netdev_name_node *name_node, *tmp;
struct net_device *dev, *aux;
/*
* Push all migratable network devices back to the
* initial network namespace
*/
ASSERT_RTNL();
for_each_netdev_safe(net, dev, aux) {
int err;
char fb_name[IFNAMSIZ];
/* Ignore unmoveable devices (i.e. loopback) */
if (dev->netns_immutable)
continue;
/* Leave virtual devices for the generic cleanup */
if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
continue;
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
if (netdev_name_in_use(&init_net, fb_name))
snprintf(fb_name, IFNAMSIZ, "dev%%d");
netdev_for_each_altname_safe(dev, name_node, tmp)
if (netdev_name_in_use(&init_net, name_node->name))
__netdev_name_node_alt_destroy(name_node);
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
pr_emerg("%s: failed to move %s to init_net: %d\n",
__func__, dev->name, err);
BUG();
}
}
}
static void __net_exit default_device_exit_batch(struct list_head *net_list)
{
/* At exit all network devices most be removed from a network
* namespace. Do this in the reverse order of registration.
* Do this across as many network namespaces as possible to
* improve batching efficiency.
*/
struct net_device *dev;
struct net *net;
LIST_HEAD(dev_kill_list);
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
default_device_exit_net(net);
cond_resched();
}
list_for_each_entry(net, net_list, exit_list) {
for_each_netdev_reverse(net, dev) {
if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
else
unregister_netdevice_queue(dev, &dev_kill_list);
}
}
unregister_netdevice_many(&dev_kill_list);
rtnl_unlock();
}
static struct pernet_operations __net_initdata default_device_ops = {
.exit_batch = default_device_exit_batch,
};
static void __init net_dev_struct_check(void)
{
/* TX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags_fast);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_partial_features);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq);
#ifdef CONFIG_XPS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps);
#endif
#ifdef CONFIG_NETFILTER_EGRESS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress);
#endif
#ifdef CONFIG_NET_XGRESS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress);
#endif
CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160);
/* TXRX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46);
/* RX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net);
#ifdef CONFIG_NETPOLL
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo);
#endif
#ifdef CONFIG_NET_XGRESS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
#endif
CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 92);
}
/*
* Initialize the DEV module. At boot time this walks the device list and
* unhooks any devices that fail to initialise (normally hardware not
* present) and leaves us with a valid list of present and active devices.
*
*/
/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
static int net_page_pool_create(int cpuid)
{
#if IS_ENABLED(CONFIG_PAGE_POOL)
struct page_pool_params page_pool_params = {
.pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
.flags = PP_FLAG_SYSTEM_POOL,
.nid = cpu_to_mem(cpuid),
};
struct page_pool *pp_ptr;
int err;
pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
if (IS_ERR(pp_ptr))
return -ENOMEM;
err = xdp_reg_page_pool(pp_ptr);
if (err) {
page_pool_destroy(pp_ptr);
return err;
}
per_cpu(system_page_pool.pool, cpuid) = pp_ptr;
#endif
return 0;
}
static int backlog_napi_should_run(unsigned int cpu)
{
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
struct napi_struct *napi = &sd->backlog;
return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
}
static void run_backlog_napi(unsigned int cpu)
{
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
napi_threaded_poll_loop(&sd->backlog);
}
static void backlog_napi_setup(unsigned int cpu)
{
struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
struct napi_struct *napi = &sd->backlog;
napi->thread = this_cpu_read(backlog_napi);
set_bit(NAPI_STATE_THREADED, &napi->state);
}
static struct smp_hotplug_thread backlog_threads = {
.store = &backlog_napi,
.thread_should_run = backlog_napi_should_run,
.thread_fn = run_backlog_napi,
.thread_comm = "backlog_napi/%u",
.setup = backlog_napi_setup,
};
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
*/
static int __init net_dev_init(void)
{
int i, rc = -ENOMEM;
BUG_ON(!dev_boot_phase);
net_dev_struct_check();
if (dev_proc_init())
goto out;
if (netdev_kobject_init())
goto out;
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
if (register_pernet_subsys(&netdev_net_ops))
goto out;
/*
* Initialise the packet receive queues.
*/
flush_backlogs_fallback = flush_backlogs_alloc();
if (!flush_backlogs_fallback)
goto out;
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
#ifdef CONFIG_XFRM_OFFLOAD
skb_queue_head_init(&sd->xfrm_backlog);
#endif
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->cpu = i;
#endif
INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
gro_init(&sd->backlog.gro);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
INIT_LIST_HEAD(&sd->backlog.poll_list);
if (net_page_pool_create(i))
goto out;
}
net_hotdata.skb_defer_nodes =
__alloc_percpu(sizeof(struct skb_defer_node) * nr_node_ids,
__alignof__(struct skb_defer_node));
if (!net_hotdata.skb_defer_nodes)
goto out;
if (use_backlog_threads())
smpboot_register_percpu_thread(&backlog_threads);
dev_boot_phase = 0;
/* The loopback device is special if any other network devices
* is present in a network namespace the loopback device must
* be present. Since we now dynamically allocate and free the
* loopback device ensure this invariant is maintained by
* keeping the loopback device as the first device on the
* list of network devices. Ensuring the loopback devices
* is the first device that appears and the last network device
* that disappears.
*/
if (register_pernet_device(&loopback_net_ops))
goto out;
if (register_pernet_device(&default_device_ops))
goto out;
open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
NULL, dev_cpu_dead);
WARN_ON(rc < 0);
rc = 0;
/* avoid static key IPIs to isolated CPUs */
if (housekeeping_enabled(HK_TYPE_MISC))
net_enable_timestamp();
out:
if (rc < 0) {
for_each_possible_cpu(i) {
struct page_pool *pp_ptr;
pp_ptr = per_cpu(system_page_pool.pool, i);
if (!pp_ptr)
continue;
xdp_unreg_page_pool(pp_ptr);
page_pool_destroy(pp_ptr);
per_cpu(system_page_pool.pool, i) = NULL;
}
}
return rc;
}
subsys_initcall(net_dev_init);
// SPDX-License-Identifier: GPL-2.0-only
/*
* jump label support
*
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
* Copyright (C) 2011 Peter Zijlstra
*
*/
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
#include <linux/static_key.h>
#include <linux/jump_label_ratelimit.h>
#include <linux/bug.h>
#include <linux/cpu.h>
#include <asm/sections.h>
/* mutex to protect coming/going of the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);
void jump_label_lock(void)
{
mutex_lock(&jump_label_mutex);
}
void jump_label_unlock(void)
{
mutex_unlock(&jump_label_mutex);
}
static int jump_label_cmp(const void *a, const void *b)
{
const struct jump_entry *jea = a;
const struct jump_entry *jeb = b;
/*
* Entrires are sorted by key.
*/
if (jump_entry_key(jea) < jump_entry_key(jeb))
return -1;
if (jump_entry_key(jea) > jump_entry_key(jeb))
return 1;
/*
* In the batching mode, entries should also be sorted by the code
* inside the already sorted list of entries, enabling a bsearch in
* the vector.
*/
if (jump_entry_code(jea) < jump_entry_code(jeb))
return -1;
if (jump_entry_code(jea) > jump_entry_code(jeb))
return 1;
return 0;
}
static void jump_label_swap(void *a, void *b, int size)
{
long delta = (unsigned long)a - (unsigned long)b;
struct jump_entry *jea = a;
struct jump_entry *jeb = b;
struct jump_entry tmp = *jea;
jea->code = jeb->code - delta;
jea->target = jeb->target - delta;
jea->key = jeb->key - delta;
jeb->code = tmp.code + delta;
jeb->target = tmp.target + delta;
jeb->key = tmp.key + delta;
}
static void
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
{
unsigned long size;
void *swapfn = NULL;
if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
swapfn = jump_label_swap;
size = (((unsigned long)stop - (unsigned long)start)
/ sizeof(struct jump_entry));
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
}
static void jump_label_update(struct static_key *key);
/*
* There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
* The use of 'atomic_read()' requires atomic.h and its problematic for some
* kernel headers such as kernel.h and others. Since static_key_count() is not
* used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
* to have it be a function here. Similarly, for 'static_key_enable()' and
* 'static_key_disable()', which require bug.h. This should allow jump_label.h
* to be included from most/all places for CONFIG_JUMP_LABEL.
*/
int static_key_count(struct static_key *key)
{
/*
* -1 means the first static_key_slow_inc() is in progress.
* static_key_enabled() must return true, so return 1 here.
*/
int n = atomic_read(&key->enabled);
return n >= 0 ? n : 1;
}
EXPORT_SYMBOL_GPL(static_key_count);
/*
* static_key_fast_inc_not_disabled - adds a user for a static key
* @key: static key that must be already enabled
*
* The caller must make sure that the static key can't get disabled while
* in this function. It doesn't patch jump labels, only adds a user to
* an already enabled static key.
*
* Returns true if the increment was done. Unlike refcount_t the ref counter
* is not saturated, but will fail to increment on overflow.
*/
bool static_key_fast_inc_not_disabled(struct static_key *key)
{
int v;
STATIC_KEY_CHECK_USE(key);
/*
* Negative key->enabled has a special meaning: it sends
* static_key_slow_inc/dec() down the slow path, and it is non-zero
* so it counts as "enabled" in jump_label_update().
*
* The INT_MAX overflow condition is either used by the networking
* code to reset or detected in the slow path of
* static_key_slow_inc_cpuslocked().
*/
v = atomic_read(&key->enabled);
do {
if (v <= 0 || v == INT_MAX) return false; } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
return true;
}
EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
bool static_key_slow_inc_cpuslocked(struct static_key *key)
{
lockdep_assert_cpus_held();
/*
* Careful if we get concurrent static_key_slow_inc/dec() calls;
* later calls must wait for the first one to _finish_ the
* jump_label_update() process. At the same time, however,
* the jump_label_update() call below wants to see
* static_key_enabled(&key) for jumps to be updated properly.
*/
if (static_key_fast_inc_not_disabled(key))
return true;
guard(mutex)(&jump_label_mutex);
/* Try to mark it as 'enabling in progress. */
if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
jump_label_update(key);
/*
* Ensure that when static_key_fast_inc_not_disabled() or
* static_key_dec_not_one() observe the positive value,
* they must also observe all the text changes.
*/
atomic_set_release(&key->enabled, 1);
} else {
/*
* While holding the mutex this should never observe
* anything else than a value >= 1 and succeed
*/
if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
return false;
}
return true;
}
bool static_key_slow_inc(struct static_key *key)
{
bool ret;
cpus_read_lock();
ret = static_key_slow_inc_cpuslocked(key);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
void static_key_enable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
lockdep_assert_cpus_held();
if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
return;
}
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
jump_label_update(key);
/*
* See static_key_slow_inc().
*/
atomic_set_release(&key->enabled, 1);
}
jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
void static_key_enable(struct static_key *key)
{
cpus_read_lock();
static_key_enable_cpuslocked(key);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
lockdep_assert_cpus_held();
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
return;
}
jump_label_lock();
if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
jump_label_update(key);
jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
void static_key_disable(struct static_key *key)
{
cpus_read_lock();
static_key_disable_cpuslocked(key);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);
static bool static_key_dec_not_one(struct static_key *key)
{
int v;
/*
* Go into the slow path if key::enabled is less than or equal than
* one. One is valid to shut down the key, anything less than one
* is an imbalance, which is handled at the call site.
*
* That includes the special case of '-1' which is set in
* static_key_slow_inc_cpuslocked(), but that's harmless as it is
* fully serialized in the slow path below. By the time this task
* acquires the jump label lock the value is back to one and the
* retry under the lock must succeed.
*/
v = atomic_read(&key->enabled);
do {
/*
* Warn about the '-1' case though; since that means a
* decrement is concurrent with a first (0->1) increment. IOW
* people are trying to disable something that wasn't yet fully
* enabled. This suggests an ordering problem on the user side.
*/
WARN_ON_ONCE(v < 0);
/*
* Warn about underflow, and lie about success in an attempt to
* not make things worse.
*/
if (WARN_ON_ONCE(v == 0))
return true;
if (v <= 1)
return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
return true;
}
static void __static_key_slow_dec_cpuslocked(struct static_key *key)
{
lockdep_assert_cpus_held();
int val;
if (static_key_dec_not_one(key))
return;
guard(mutex)(&jump_label_mutex);
val = atomic_read(&key->enabled);
/*
* It should be impossible to observe -1 with jump_label_mutex held,
* see static_key_slow_inc_cpuslocked().
*/
if (WARN_ON_ONCE(val == -1))
return;
/*
* Cannot already be 0, something went sideways.
*/
if (WARN_ON_ONCE(val == 0))
return;
if (atomic_dec_and_test(&key->enabled))
jump_label_update(key);
}
static void __static_key_slow_dec(struct static_key *key)
{
cpus_read_lock();
__static_key_slow_dec_cpuslocked(key);
cpus_read_unlock();
}
void jump_label_update_timeout(struct work_struct *work)
{
struct static_key_deferred *key =
container_of(work, struct static_key_deferred, work.work);
__static_key_slow_dec(&key->key);
}
EXPORT_SYMBOL_GPL(jump_label_update_timeout);
void static_key_slow_dec(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec(key);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
void static_key_slow_dec_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec_cpuslocked(key);
}
void __static_key_slow_dec_deferred(struct static_key *key,
struct delayed_work *work,
unsigned long timeout)
{
STATIC_KEY_CHECK_USE(key);
if (static_key_dec_not_one(key))
return;
schedule_delayed_work(work, timeout);
}
EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
void __static_key_deferred_flush(void *key, struct delayed_work *work)
{
STATIC_KEY_CHECK_USE(key);
flush_delayed_work(work);
}
EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
STATIC_KEY_CHECK_USE(key);
key->timeout = rl;
INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (jump_entry_code(entry) <= (unsigned long)end &&
jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
return 1;
return 0;
}
static int __jump_label_text_reserved(struct jump_entry *iter_start,
struct jump_entry *iter_stop, void *start, void *end, bool init)
{
struct jump_entry *iter;
iter = iter_start;
while (iter < iter_stop) {
if (init || !jump_entry_is_init(iter)) {
if (addr_conflict(iter, start, end))
return 1;
}
iter++;
}
return 0;
}
#ifndef arch_jump_label_transform_static
static void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/* nothing to do on most architectures */
}
#endif
static inline struct jump_entry *static_key_entries(struct static_key *key)
{
WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
}
static inline bool static_key_type(struct static_key *key)
{
return key->type & JUMP_TYPE_TRUE;
}
static inline bool static_key_linked(struct static_key *key)
{
return key->type & JUMP_TYPE_LINKED;
}
static inline void static_key_clear_linked(struct static_key *key)
{
key->type &= ~JUMP_TYPE_LINKED;
}
static inline void static_key_set_linked(struct static_key *key)
{
key->type |= JUMP_TYPE_LINKED;
}
/***
* A 'struct static_key' uses a union such that it either points directly
* to a table of 'struct jump_entry' or to a linked list of modules which in
* turn point to 'struct jump_entry' tables.
*
* The two lower bits of the pointer are used to keep track of which pointer
* type is in use and to store the initial branch direction, we use an access
* function which preserves these bits.
*/
static void static_key_set_entries(struct static_key *key,
struct jump_entry *entries)
{
unsigned long type;
WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
type = key->type & JUMP_TYPE_MASK;
key->entries = entries;
key->type |= type;
}
static enum jump_label_type jump_label_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
bool enabled = static_key_enabled(key);
bool branch = jump_entry_is_branch(entry);
/* See the comment in linux/jump_label.h */
return enabled ^ branch;
}
static bool jump_label_can_update(struct jump_entry *entry, bool init)
{
/*
* Cannot update code that was in an init text area.
*/
if (!init && jump_entry_is_init(entry))
return false;
if (!kernel_text_address(jump_entry_code(entry))) {
/*
* This skips patching built-in __exit, which
* is part of init_section_contains() but is
* not part of kernel_text_address().
*
* Skipping built-in __exit is fine since it
* will never be executed.
*/
WARN_ONCE(!jump_entry_is_init(entry),
"can't patch jump_label at %pS",
(void *)jump_entry_code(entry));
return false;
}
return true;
}
#ifndef HAVE_JUMP_LABEL_BATCH
static void __jump_label_update(struct static_key *key,
struct jump_entry *entry,
struct jump_entry *stop,
bool init)
{
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
if (jump_label_can_update(entry, init))
arch_jump_label_transform(entry, jump_label_type(entry));
}
}
#else
static void __jump_label_update(struct static_key *key,
struct jump_entry *entry,
struct jump_entry *stop,
bool init)
{
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
if (!jump_label_can_update(entry, init))
continue;
if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
/*
* Queue is full: Apply the current queue and try again.
*/
arch_jump_label_transform_apply();
BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
}
}
arch_jump_label_transform_apply();
}
#endif
void __init jump_label_init(void)
{
struct jump_entry *iter_start = __start___jump_table;
struct jump_entry *iter_stop = __stop___jump_table;
struct static_key *key = NULL;
struct jump_entry *iter;
/*
* Since we are initializing the static_key.enabled field with
* with the 'raw' int values (to avoid pulling in atomic.h) in
* jump_label.h, let's make sure that is safe. There are only two
* cases to check since we initialize to 0 or 1.
*/
BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
if (static_key_initialized)
return;
cpus_read_lock();
jump_label_lock();
jump_label_sort_entries(iter_start, iter_stop);
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
bool in_init;
/* rewrite NOPs */
if (jump_label_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
in_init = init_section_contains((void *)jump_entry_code(iter), 1);
jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)
continue;
key = iterk;
static_key_set_entries(key, iter);
}
static_key_initialized = true;
jump_label_unlock();
cpus_read_unlock();
}
static inline bool static_key_sealed(struct static_key *key)
{
return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK);
}
static inline void static_key_seal(struct static_key *key)
{
unsigned long type = key->type & JUMP_TYPE_TRUE;
key->type = JUMP_TYPE_LINKED | type;
}
void jump_label_init_ro(void)
{
struct jump_entry *iter_start = __start___jump_table;
struct jump_entry *iter_stop = __stop___jump_table;
struct jump_entry *iter;
if (WARN_ON_ONCE(!static_key_initialized))
return;
cpus_read_lock();
jump_label_lock();
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk = jump_entry_key(iter);
if (!is_kernel_ro_after_init((unsigned long)iterk))
continue;
if (static_key_sealed(iterk))
continue;
static_key_seal(iterk);
}
jump_label_unlock();
cpus_read_unlock();
}
#ifdef CONFIG_MODULES
enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
bool type = static_key_type(key);
bool branch = jump_entry_is_branch(entry);
/* See the comment in linux/jump_label.h */
return type ^ branch;
}
struct static_key_mod {
struct static_key_mod *next;
struct jump_entry *entries;
struct module *mod;
};
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
WARN_ON_ONCE(!static_key_linked(key));
return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}
/***
* key->type and key->next are the same via union.
* This sets key->next and preserves the type bits.
*
* See additional comments above static_key_set_entries().
*/
static void static_key_set_mod(struct static_key *key,
struct static_key_mod *mod)
{
unsigned long type;
WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
type = key->type & JUMP_TYPE_MASK;
key->next = mod;
key->type |= type;
}
static int __jump_label_mod_text_reserved(void *start, void *end)
{
struct module *mod;
int ret;
scoped_guard(rcu) {
mod = __module_text_address((unsigned long)start);
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
if (!try_module_get(mod))
mod = NULL;
}
if (!mod)
return 0;
ret = __jump_label_text_reserved(mod->jump_entries,
mod->jump_entries + mod->num_jump_entries,
start, end, mod->state == MODULE_STATE_COMING);
module_put(mod);
return ret;
}
static void __jump_label_mod_update(struct static_key *key)
{
struct static_key_mod *mod;
for (mod = static_key_mod(key); mod; mod = mod->next) {
struct jump_entry *stop;
struct module *m;
/*
* NULL if the static_key is defined in a module
* that does not use it
*/
if (!mod->entries)
continue;
m = mod->mod;
if (!m)
stop = __stop___jump_table;
else
stop = m->jump_entries + m->num_jump_entries;
__jump_label_update(key, mod->entries, stop,
m && m->state == MODULE_STATE_COMING);
}
}
static int jump_label_add_module(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
struct static_key *key = NULL;
struct static_key_mod *jlm, *jlm2;
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return 0;
jump_label_sort_entries(iter_start, iter_stop);
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
bool in_init;
in_init = within_module_init(jump_entry_code(iter), mod);
jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)
continue;
key = iterk;
if (within_module((unsigned long)key, mod)) {
static_key_set_entries(key, iter);
continue;
}
/*
* If the key was sealed at init, then there's no need to keep a
* reference to its module entries - just patch them now and be
* done with it.
*/
if (static_key_sealed(key))
goto do_poke;
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
if (!jlm)
return -ENOMEM;
if (!static_key_linked(key)) {
jlm2 = kzalloc(sizeof(struct static_key_mod),
GFP_KERNEL);
if (!jlm2) {
kfree(jlm);
return -ENOMEM;
}
scoped_guard(rcu)
jlm2->mod = __module_address((unsigned long)key);
jlm2->entries = static_key_entries(key);
jlm2->next = NULL;
static_key_set_mod(key, jlm2);
static_key_set_linked(key);
}
jlm->mod = mod;
jlm->entries = iter;
jlm->next = static_key_mod(key);
static_key_set_mod(key, jlm);
static_key_set_linked(key);
/* Only update if we've changed from our initial state */
do_poke:
if (jump_label_type(iter) != jump_label_init_type(iter))
__jump_label_update(key, iter, iter_stop, true);
}
return 0;
}
static void jump_label_del_module(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
struct static_key *key = NULL;
struct static_key_mod *jlm, **prev;
for (iter = iter_start; iter < iter_stop; iter++) {
if (jump_entry_key(iter) == key)
continue;
key = jump_entry_key(iter);
if (within_module((unsigned long)key, mod))
continue;
/* No @jlm allocated because key was sealed at init. */
if (static_key_sealed(key))
continue;
/* No memory during module load */
if (WARN_ON(!static_key_linked(key)))
continue;
prev = &key->next;
jlm = static_key_mod(key);
while (jlm && jlm->mod != mod) {
prev = &jlm->next;
jlm = jlm->next;
}
/* No memory during module load */
if (WARN_ON(!jlm))
continue;
if (prev == &key->next)
static_key_set_mod(key, jlm->next);
else
*prev = jlm->next;
kfree(jlm);
jlm = static_key_mod(key);
/* if only one etry is left, fold it back into the static_key */
if (jlm->next == NULL) {
static_key_set_entries(key, jlm->entries);
static_key_clear_linked(key);
kfree(jlm);
}
}
}
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct module *mod = data;
int ret = 0;
cpus_read_lock();
jump_label_lock();
switch (val) {
case MODULE_STATE_COMING:
ret = jump_label_add_module(mod);
if (ret) {
WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
jump_label_del_module(mod);
}
break;
case MODULE_STATE_GOING:
jump_label_del_module(mod);
break;
}
jump_label_unlock();
cpus_read_unlock();
return notifier_from_errno(ret);
}
static struct notifier_block jump_label_module_nb = {
.notifier_call = jump_label_module_notify,
.priority = 1, /* higher than tracepoints */
};
static __init int jump_label_init_module(void)
{
return register_module_notifier(&jump_label_module_nb);
}
early_initcall(jump_label_init_module);
#endif /* CONFIG_MODULES */
/***
* jump_label_text_reserved - check if addr range is reserved
* @start: start text addr
* @end: end text addr
*
* checks if the text addr located between @start and @end
* overlaps with any of the jump label patch addresses. Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses.
* Caller must hold jump_label_mutex.
*
* returns 1 if there is an overlap, 0 otherwise
*/
int jump_label_text_reserved(void *start, void *end)
{
bool init = system_state < SYSTEM_RUNNING;
int ret = __jump_label_text_reserved(__start___jump_table,
__stop___jump_table, start, end, init);
if (ret)
return ret;
#ifdef CONFIG_MODULES
ret = __jump_label_mod_text_reserved(start, end);
#endif
return ret;
}
static void jump_label_update(struct static_key *key)
{
struct jump_entry *stop = __stop___jump_table;
bool init = system_state < SYSTEM_RUNNING;
struct jump_entry *entry;
#ifdef CONFIG_MODULES
struct module *mod;
if (static_key_linked(key)) {
__jump_label_mod_update(key);
return;
}
scoped_guard(rcu) {
mod = __module_address((unsigned long)key);
if (mod) {
stop = mod->jump_entries + mod->num_jump_entries;
init = mod->state == MODULE_STATE_COMING;
}
}
#endif
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, stop, init);
}
#ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE(sk_true);
static DEFINE_STATIC_KEY_FALSE(sk_false);
static __init int jump_label_test(void)
{
int i;
for (i = 0; i < 2; i++) {
WARN_ON(static_key_enabled(&sk_true.key) != true);
WARN_ON(static_key_enabled(&sk_false.key) != false);
WARN_ON(!static_branch_likely(&sk_true));
WARN_ON(!static_branch_unlikely(&sk_true));
WARN_ON(static_branch_likely(&sk_false));
WARN_ON(static_branch_unlikely(&sk_false));
static_branch_disable(&sk_true);
static_branch_enable(&sk_false);
WARN_ON(static_key_enabled(&sk_true.key) == true);
WARN_ON(static_key_enabled(&sk_false.key) == false);
WARN_ON(static_branch_likely(&sk_true));
WARN_ON(static_branch_unlikely(&sk_true));
WARN_ON(!static_branch_likely(&sk_false));
WARN_ON(!static_branch_unlikely(&sk_false));
static_branch_enable(&sk_true);
static_branch_disable(&sk_false);
}
return 0;
}
early_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/mtrr.h>
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
EXPORT_SYMBOL(physical_mask);
SYM_PIC_ALIAS(physical_mask);
#endif
pgtable_t pte_alloc_one(struct mm_struct *mm)
{ return __pte_alloc_one(mm, GFP_PGTABLE_USER);}
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
paravirt_release_pte(page_to_pfn(pte));
tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#if CONFIG_PGTABLE_LEVELS > 2
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
/*
* NOTE! For PAE, any changes to the top page-directory-pointer-table
* entries need a full cr3 reload to flush.
*/
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
static inline void pgd_list_add(pgd_t *pgd)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
list_add(&ptdesc->pt_list, &pgd_list);
}
static inline void pgd_list_del(pgd_t *pgd)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
list_del(&ptdesc->pt_list);
}
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{
virt_to_ptdesc(pgd)->pt_mm = mm;
}
struct mm_struct *pgd_page_get_mm(struct page *page)
{
return page_ptdesc(page)->pt_mm;
}
static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
{
/* PAE preallocates all its PMDs. No cloning needed. */
if (!IS_ENABLED(CONFIG_X86_PAE))
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
/* List used to sync kernel mapping updates */
pgd_set_mm(pgd, mm); pgd_list_add(pgd);
}
static void pgd_dtor(pgd_t *pgd)
{
spin_lock(&pgd_lock);
pgd_list_del(pgd);
spin_unlock(&pgd_lock);
}
/*
* List of all pgd's needed for non-PAE so it can invalidate entries
* in both cached and uncached pgd's; not needed for PAE since the
* kernel pmd is shared. If PAE were not to share the pmd a similar
* tactic would be needed. This is essentially codepath-based locking
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
* -- nyc
*/
#ifdef CONFIG_X86_PAE
/*
* In PAE mode, we need to do a cr3 reload (=tlb flush) when
* updating the top-level pagetable entries to guarantee the
* processor notices the update. Since this is expensive, and
* all 4 top-level entries are used almost immediately in a
* new process's life, we just pre-populate them here.
*/
#define PREALLOCATED_PMDS PTRS_PER_PGD
/*
* "USER_PMDS" are the PMDs for the user copy of the page tables when
* PTI is enabled. They do not exist when PTI is disabled. Note that
* this is distinct from the user _portion_ of the kernel page tables
* which always exists.
*
* We allocate separate PMDs for the kernel part of the user page-table
* when PTI is enabled. We need them to map the per-process LDT into the
* user-space page-table.
*/
#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
KERNEL_PGD_PTRS : 0)
#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
/* Note: almost everything apart from _PAGE_PRESENT is
reserved at the pmd (PDPT) level. */
set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
/*
* According to Intel App note "TLBs, Paging-Structure Caches,
* and Their Invalidation", April 2007, document 317080-001,
* section 8.1: in PAE mode we explicitly have to flush the
* TLB via cr3 if the top-level pgd is changed...
*/
flush_tlb_mm(mm);
}
#else /* !CONFIG_X86_PAE */
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
#define PREALLOCATED_USER_PMDS 0
#define MAX_PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
struct ptdesc *ptdesc;
for (i = 0; i < count; i++)
if (pmds[i]) {
ptdesc = virt_to_ptdesc(pmds[i]);
pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
mm_dec_nr_pmds(mm);
}
}
static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
bool failed = false;
gfp_t gfp = GFP_PGTABLE_USER;
if (mm == &init_mm)
gfp &= ~__GFP_ACCOUNT;
gfp &= ~__GFP_HIGHMEM;
for (i = 0; i < count; i++) {
pmd_t *pmd = NULL;
struct ptdesc *ptdesc = pagetable_alloc(gfp, 0);
if (!ptdesc)
failed = true;
if (ptdesc && !pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
ptdesc = NULL;
failed = true;
}
if (ptdesc) {
mm_inc_nr_pmds(mm);
pmd = ptdesc_address(ptdesc);
}
pmds[i] = pmd;
}
if (failed) {
free_pmds(mm, pmds, count);
return -ENOMEM;
}
return 0;
}
/*
* Mop up any pmd pages which may still be attached to the pgd.
* Normally they will be freed by munmap/exit_mmap, but any pmd we
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
{
pgd_t pgd = *pgdp;
if (pgd_val(pgd) != 0) {
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
pgd_clear(pgdp);
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
pmd_free(mm, pmd);
mm_dec_nr_pmds(mm);
}
}
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
int i;
for (i = 0; i < PREALLOCATED_PMDS; i++)
mop_up_one_pmd(mm, &pgdp[i]);
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
pgdp = kernel_to_user_pgdp(pgdp);
for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
#endif
}
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
{
p4d_t *p4d;
pud_t *pud;
int i;
p4d = p4d_offset(pgd, 0);
pud = pud_offset(p4d, 0);
for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
pmd_t *pmd = pmds[i];
if (i >= KERNEL_PGD_BOUNDARY)
memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
sizeof(pmd_t) * PTRS_PER_PMD);
pud_populate(mm, pud, pmd);
}
}
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
pgd_t *k_pgd, pmd_t *pmds[])
{
pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
p4d_t *u_p4d;
pud_t *u_pud;
int i;
u_p4d = p4d_offset(u_pgd, 0);
u_pud = pud_offset(u_p4d, 0);
s_pgd += KERNEL_PGD_BOUNDARY;
u_pud += KERNEL_PGD_BOUNDARY;
for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
pmd_t *pmd = pmds[i];
memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
sizeof(pmd_t) * PTRS_PER_PMD);
pud_populate(mm, u_pud, pmd);
}
}
#else
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
pgd_t *k_pgd, pmd_t *pmds[])
{
}
#endif
static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
{
/*
* PTI and Xen need a whole page for the PAE PGD
* even though the hardware only needs 32 bytes.
*
* For simplicity, allocate a page for all users.
*/
return __pgd_alloc(mm, pgd_allocation_order());
}
static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
__pgd_free(mm, pgd);
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
pmd_t *pmds[PREALLOCATED_PMDS];
pgd = _pgd_alloc(mm); if (pgd == NULL)
goto out;
mm->pgd = pgd;
if (sizeof(pmds) != 0 &&
preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
goto out_free_pgd;
if (sizeof(u_pmds) != 0 &&
preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
goto out_free_pmds;
if (paravirt_pgd_alloc(mm) != 0)
goto out_free_user_pmds;
/*
* Make sure that pre-populating the pmds is atomic with
* respect to anything walking the pgd_list, so that they
* never see a partially populated pgd.
*/
spin_lock(&pgd_lock);
pgd_ctor(mm, pgd);
if (sizeof(pmds) != 0)
pgd_prepopulate_pmd(mm, pgd, pmds);
if (sizeof(u_pmds) != 0)
pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
spin_unlock(&pgd_lock);
return pgd;
out_free_user_pmds:
if (sizeof(u_pmds) != 0)
free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
out_free_pmds:
if (sizeof(pmds) != 0)
free_pmds(mm, pmds, PREALLOCATED_PMDS);
out_free_pgd:
_pgd_free(mm, pgd);
out:
return NULL;}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
pgd_mop_up_pmds(mm, pgd);
pgd_dtor(pgd);
paravirt_pgd_free(mm, pgd);
_pgd_free(mm, pgd);
}
/*
* Used to set accessed or dirty bits in the page table entries
* on other architectures. On x86, the accessed and dirty bits
* are tracked by hardware. However, do_wp_page calls this function
* to also make the pte writeable at the same time the dirty bit is
* set. In that case we do actually need to write the PTE.
*/
int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
int changed = !pte_same(*ptep, entry);
if (changed && dirty)
set_pte(ptep, entry);
return changed;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
int changed = !pmd_same(*pmdp, entry);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
if (changed && dirty) {
set_pmd(pmdp, entry);
/*
* We had a write-protection fault here and changed the pmd
* to to more permissive. No need to flush the TLB for that,
* #PF is architecturally guaranteed to do that and in the
* worst-case we'll generate a spurious fault.
*/
}
return changed;
}
int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
pud_t *pudp, pud_t entry, int dirty)
{
int changed = !pud_same(*pudp, entry);
VM_BUG_ON(address & ~HPAGE_PUD_MASK);
if (changed && dirty) {
set_pud(pudp, entry);
/*
* We had a write-protection fault here and changed the pud
* to to more permissive. No need to flush the TLB for that,
* #PF is architecturally guaranteed to do that and in the
* worst-case we'll generate a spurious fault.
*/
}
return changed;
}
#endif
int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
int ret = 0;
if (pte_young(*ptep))
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
(unsigned long *) &ptep->pte);
return ret;
}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
int ret = 0;
if (pmd_young(*pmdp))
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
(unsigned long *)pmdp);
return ret;
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pudp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp)
{
int ret = 0;
if (pud_young(*pudp))
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
(unsigned long *)pudp);
return ret;
}
#endif
int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/*
* On x86 CPUs, clearing the accessed bit without a TLB flush
* doesn't cause data corruption. [ It could cause incorrect
* page aging and the (mistaken) reclaim of hot pages, but the
* chance of that should be relatively low. ]
*
* So as a performance optimization don't flush the TLB when
* clearing the accessed bit, it will eventually be flushed by
* a context switch or a VM operation anyway. [ In the rare
* event of it not getting flushed for a long time the delay
* shouldn't really matter because there's no real memory
* pressure for swapout to react to. ]
*/
return ptep_test_and_clear_young(vma, address, ptep);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
int young;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
young = pmdp_test_and_clear_young(vma, address, pmdp);
if (young)
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return young;
}
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
VM_WARN_ON_ONCE(!pmd_present(*pmdp));
/*
* No flush is necessary. Once an invalid PTE is established, the PTE's
* access and dirty bits cannot be updated.
*/
return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
}
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
pud_t *pudp)
{
VM_WARN_ON_ONCE(!pud_present(*pudp));
pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp));
flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
return old;
}
#endif
/**
* reserve_top_address - Reserve a hole in the top of the kernel address space
* @reserve: Size of hole to reserve
*
* Can be used to relocate the fixmap area and poke a hole in the top
* of the kernel address space to make room for a hypervisor.
*/
void __init reserve_top_address(unsigned long reserve)
{
#ifdef CONFIG_X86_32
BUG_ON(fixmaps_set > 0);
__FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
-reserve, __FIXADDR_TOP + PAGE_SIZE);
#endif
}
int fixmaps_set;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
{
unsigned long address = __fix_to_virt(idx);
#ifdef CONFIG_X86_64
/*
* Ensure that the static initial page tables are covering the
* fixmap completely.
*/
BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
(FIXMAP_PMD_NUM * PTRS_PER_PTE));
#endif
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
}
set_pte_vaddr(address, pte);
fixmaps_set++;
}
void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
{
/* Sanitize 'prot' against any unsupported bits: */
pgprot_val(flags) &= __default_kernel_pte_mask;
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
#if CONFIG_PGTABLE_LEVELS > 4
/**
* p4d_set_huge - Set up kernel P4D mapping
* @p4d: Pointer to the P4D entry
* @addr: Virtual address associated with the P4D entry
* @prot: Protection bits to use
*
* No 512GB pages yet -- always return 0
*/
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
/**
* p4d_clear_huge - Clear kernel P4D mapping when it is set
* @p4d: Pointer to the P4D entry to clear
*
* No 512GB pages yet -- do nothing
*/
void p4d_clear_huge(p4d_t *p4d)
{
}
#endif
/**
* pud_set_huge - Set up kernel PUD mapping
* @pud: Pointer to the PUD entry
* @addr: Virtual address associated with the PUD entry
* @prot: Protection bits to use
*
* MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
* function sets up a huge page only if the complete range has the same MTRR
* caching mode.
*
* Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
* page mapping attempt fails.
*
* Returns 1 on success and 0 on failure.
*/
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
u8 uniform;
mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
if (!uniform)
return 0;
/* Bail out if we are we on a populated non-leaf entry: */
if (pud_present(*pud) && !pud_leaf(*pud))
return 0;
set_pte((pte_t *)pud, pfn_pte(
(u64)addr >> PAGE_SHIFT,
__pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
return 1;
}
/**
* pmd_set_huge - Set up kernel PMD mapping
* @pmd: Pointer to the PMD entry
* @addr: Virtual address associated with the PMD entry
* @prot: Protection bits to use
*
* See text over pud_set_huge() above.
*
* Returns 1 on success and 0 on failure.
*/
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
u8 uniform;
mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
if (!uniform) {
pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
__func__, addr, addr + PMD_SIZE);
return 0;
}
/* Bail out if we are we on a populated non-leaf entry: */
if (pmd_present(*pmd) && !pmd_leaf(*pmd))
return 0;
set_pte((pte_t *)pmd, pfn_pte(
(u64)addr >> PAGE_SHIFT,
__pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
return 1;
}
/**
* pud_clear_huge - Clear kernel PUD mapping when it is set
* @pud: Pointer to the PUD entry to clear.
*
* Returns 1 on success and 0 on failure (no PUD map is found).
*/
int pud_clear_huge(pud_t *pud)
{
if (pud_leaf(*pud)) {
pud_clear(pud);
return 1;
}
return 0;
}
/**
* pmd_clear_huge - Clear kernel PMD mapping when it is set
* @pmd: Pointer to the PMD entry to clear.
*
* Returns 1 on success and 0 on failure (no PMD map is found).
*/
int pmd_clear_huge(pmd_t *pmd)
{
if (pmd_leaf(*pmd)) {
pmd_clear(pmd);
return 1;
}
return 0;
}
#ifdef CONFIG_X86_64
/**
* pud_free_pmd_page - Clear PUD entry and free PMD page
* @pud: Pointer to a PUD
* @addr: Virtual address associated with PUD
*
* Context: The PUD range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*
* NOTE: Callers must allow a single page allocation.
*/
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
pmd_t *pmd, *pmd_sv;
pte_t *pte;
int i;
pmd = pud_pgtable(*pud);
pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
if (!pmd_sv)
return 0;
for (i = 0; i < PTRS_PER_PMD; i++) {
pmd_sv[i] = pmd[i];
if (!pmd_none(pmd[i]))
pmd_clear(&pmd[i]);
}
pud_clear(pud);
/* INVLPG to clear all paging-structure caches */
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
for (i = 0; i < PTRS_PER_PMD; i++) {
if (!pmd_none(pmd_sv[i])) {
pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
pte_free_kernel(&init_mm, pte);
}
}
free_page((unsigned long)pmd_sv);
pmd_free(&init_mm, pmd);
return 1;
}
/**
* pmd_free_pte_page - Clear PMD entry and free PTE page.
* @pmd: Pointer to the PMD
* @addr: Virtual address associated with PMD
*
* Context: The PMD range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
/* INVLPG to clear all paging-structure caches */
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
pte_free_kernel(&init_mm, pte);
return 1;
}
#else /* !CONFIG_X86_64 */
/*
* Disable free page handling on x86-PAE. This assures that ioremap()
* does not update sync'd PMD entries. See vmalloc_sync_one().
*/
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return pmd_none(*pmd);
}
#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHADOW_STACK)
return pte_mkwrite_shstk(pte);
pte = pte_mkwrite_novma(pte);
return pte_clear_saveddirty(pte);
}
pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHADOW_STACK)
return pmd_mkwrite_shstk(pmd);
pmd = pmd_mkwrite_novma(pmd);
return pmd_clear_saveddirty(pmd);
}
void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte)
{
/*
* Hardware before shadow stack can (rarely) set Dirty=1
* on a Write=0 PTE. So the below condition
* only indicates a software bug when shadow stack is
* supported by the HW. This checking is covered in
* pte_shstk().
*/
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
pte_shstk(pte));
}
void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
{
/* See note in arch_check_zapped_pte() */
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
pmd_shstk(pmd));
}
void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
{
/* See note in arch_check_zapped_pte() */
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pud_shstk(pud));
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
*
* Provides a framework for enqueueing and running callbacks from hardirq
* context. The enqueueing is NMI-safe.
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <asm/processor.h>
#include <linux/kasan.h>
#include <trace/events/ipi.h>
static DEFINE_PER_CPU(struct llist_head, raised_list);
static DEFINE_PER_CPU(struct llist_head, lazy_list);
static DEFINE_PER_CPU(struct task_struct *, irq_workd);
static void wake_irq_workd(void)
{
struct task_struct *tsk = __this_cpu_read(irq_workd);
if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk)
wake_up_process(tsk);
}
#ifdef CONFIG_SMP
static void irq_work_wake(struct irq_work *entry)
{
wake_irq_workd();
}
static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) =
IRQ_WORK_INIT_HARD(irq_work_wake);
#endif
static int irq_workd_should_run(unsigned int cpu)
{
return !llist_empty(this_cpu_ptr(&lazy_list));
}
/*
* Claim the entry so that no one else will poke at it.
*/
static bool irq_work_claim(struct irq_work *work)
{
int oflags;
oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
/*
* If the work is already pending, no need to raise the IPI.
* The pairing smp_mb() in irq_work_single() makes sure
* everything we did before is visible.
*/
if (oflags & IRQ_WORK_PENDING) return false;
return true;
}
void __weak arch_irq_work_raise(void)
{
/*
* Lame architectures will get the timer tick callback
*/
}
static __always_inline void irq_work_raise(struct irq_work *work)
{
if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); arch_irq_work_raise();
}
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
static void __irq_work_queue_local(struct irq_work *work)
{
struct llist_head *list;
bool rt_lazy_work = false;
bool lazy_work = false;
int work_flags;
work_flags = atomic_read(&work->node.a_flags);
if (work_flags & IRQ_WORK_LAZY)
lazy_work = true;
else if (IS_ENABLED(CONFIG_PREEMPT_RT) &&
!(work_flags & IRQ_WORK_HARD_IRQ))
rt_lazy_work = true;
if (lazy_work || rt_lazy_work)
list = this_cpu_ptr(&lazy_list);
else
list = this_cpu_ptr(&raised_list); if (!llist_add(&work->node.llist, list))
return;
/* If the work is "lazy", handle it from next tick if any */
if (!lazy_work || tick_nohz_tick_stopped()) irq_work_raise(work);
}
/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
__irq_work_queue_local(work);
preempt_enable();
return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);
/*
* Enqueue the irq_work @work on @cpu unless it's already pending
* somewhere.
*
* Can be re-enqueued while the callback is still in progress.
*/
bool irq_work_queue_on(struct irq_work *work, int cpu)
{
#ifndef CONFIG_SMP
return irq_work_queue(work);
#else /* CONFIG_SMP: */
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu));
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
kasan_record_aux_stack(work);
preempt_disable();
if (cpu != smp_processor_id()) {
/* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi());
/*
* On PREEMPT_RT the items which are not marked as
* IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work
* item is used on the remote CPU to wake the thread.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) &&
!(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) {
if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
goto out;
work = &per_cpu(irq_work_wakeup, cpu);
if (!irq_work_claim(work))
goto out;
}
__smp_call_single_queue(cpu, &work->node.llist);
} else {
__irq_work_queue_local(work);
}
out:
preempt_enable();
return true;
#endif /* CONFIG_SMP */
}
bool irq_work_needs_cpu(void)
{
struct llist_head *raised, *lazy;
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
if (llist_empty(raised) || arch_irq_work_has_interrupt())
if (llist_empty(lazy))
return false;
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
return true;
}
void irq_work_single(void *arg)
{
struct irq_work *work = arg;
int flags;
/*
* Clear the PENDING bit, after this point the @work can be re-used.
* The PENDING bit acts as a lock, and we own it, so we can clear it
* without atomic ops.
*/
flags = atomic_read(&work->node.a_flags);
flags &= ~IRQ_WORK_PENDING;
atomic_set(&work->node.a_flags, flags);
/*
* See irq_work_claim().
*/
smp_mb();
lockdep_irq_work_enter(flags);
work->func(work);
lockdep_irq_work_exit(flags);
/*
* Clear the BUSY bit, if set, and return to the free state if no-one
* else claimed it meanwhile.
*/
(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
!arch_irq_work_has_interrupt())
rcuwait_wake_up(&work->irqwait);
}
static void irq_work_run_list(struct llist_head *list)
{
struct irq_work *work, *tmp;
struct llist_node *llnode;
/*
* On PREEMPT_RT IRQ-work which is not marked as HARD will be processed
* in a per-CPU thread in preemptible context. Only the items which are
* marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context.
*/
BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT));
if (llist_empty(list))
return;
llnode = llist_del_all(list);
llist_for_each_entry_safe(work, tmp, llnode, node.llist)
irq_work_single(work);
}
/*
* hotplug calls this through:
* hotplug_cfd() -> flush_smp_call_function_queue()
*/
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
irq_work_run_list(this_cpu_ptr(&lazy_list));
else
wake_irq_workd();
}
EXPORT_SYMBOL_GPL(irq_work_run);
void irq_work_tick(void)
{
struct llist_head *raised = this_cpu_ptr(&raised_list);
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
irq_work_run_list(this_cpu_ptr(&lazy_list));
else
wake_irq_workd();
}
/*
* Synchronize against the irq_work @entry, ensures the entry is not
* currently in use.
*/
void irq_work_sync(struct irq_work *work)
{
lockdep_assert_irqs_enabled();
might_sleep();
if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
!arch_irq_work_has_interrupt()) {
rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
TASK_UNINTERRUPTIBLE);
return;
}
while (irq_work_is_busy(work))
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
static void run_irq_workd(unsigned int cpu)
{
irq_work_run_list(this_cpu_ptr(&lazy_list));
}
static void irq_workd_setup(unsigned int cpu)
{
sched_set_fifo_low(current);
}
static struct smp_hotplug_thread irqwork_threads = {
.store = &irq_workd,
.setup = irq_workd_setup,
.thread_should_run = irq_workd_should_run,
.thread_fn = run_irq_workd,
.thread_comm = "irq_work/%u",
};
static __init int irq_work_init_threads(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
BUG_ON(smpboot_register_percpu_thread(&irqwork_threads));
return 0;
}
early_initcall(irq_work_init_threads);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MM_TYPES_H
#define _LINUX_MM_TYPES_H
#include <linux/mm_types_task.h>
#include <linux/auxvec.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/uprobes.h>
#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
#include <linux/seqlock.h>
#include <linux/percpu_counter.h>
#include <linux/types.h>
#include <linux/bitmap.h>
#include <asm/mmu.h>
#ifndef AT_VECTOR_SIZE_ARCH
#define AT_VECTOR_SIZE_ARCH 0
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
struct address_space;
struct futex_private_hash;
struct mem_cgroup;
typedef struct {
unsigned long f;
} memdesc_flags_t;
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it.
*
* If you allocate the page using alloc_pages(), you can use some of the
* space in struct page for your own purposes. The five words in the main
* union are available, except for bit 0 of the first word which must be
* kept clear. Many users use this word to store a pointer to an object
* which is guaranteed to be aligned. If you use the same storage as
* page->mapping, you must restore it to NULL before freeing the page.
*
* The mapcount field must not be used for own purposes.
*
* If you want to use the refcount field, it must be used in such a way
* that other CPUs temporarily incrementing and then decrementing the
* refcount does not cause problems. On receiving the page from
* alloc_pages(), the refcount will be positive.
*
* If you allocate pages of order > 0, you can use some of the fields
* in each subpage, but you may need to restore some of their values
* afterwards.
*
* SLUB uses cmpxchg_double() to atomically update its freelist and counters.
* That requires that freelist & counters in struct slab be adjacent and
* double-word aligned. Because struct slab currently just reinterprets the
* bits of struct page, we align all struct pages to double-word boundaries,
* and ensure that 'freelist' is aligned within struct slab.
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
#else
#define _struct_page_alignment __aligned(sizeof(unsigned long))
#endif
struct page {
memdesc_flags_t flags; /* Atomic flags, some possibly
* updated asynchronously */
/*
* Five words (20/40 bytes) are available in this union.
* WARNING: bit 0 of the first word is used for PageTail(). That
* means the other users of this union MUST NOT use the bit to
* avoid collision and false-positive PageTail().
*/
union {
struct { /* Page cache and anonymous pages */
/**
* @lru: Pageout list, eg. active_list protected by
* lruvec->lru_lock. Sometimes used as a generic list
* by the page owner.
*/
union {
struct list_head lru;
/* Or, free page */
struct list_head buddy_list;
struct list_head pcp_list;
struct llist_node pcp_llist;
};
struct address_space *mapping;
union {
pgoff_t __folio_index; /* Our offset within mapping. */
unsigned long share; /* share count for fsdax */
};
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
* Used for swp_entry_t if swapcache flag set.
* Indicates order in the buddy system if PageBuddy
* or on pcp_llist.
*/
unsigned long private;
};
struct { /* page_pool used by netstack */
/**
* @pp_magic: magic value to avoid recycling non
* page_pool allocated pages.
*/
unsigned long pp_magic;
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
atomic_long_t pp_ref_count;
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
};
struct { /* ZONE_DEVICE pages */
/*
* The first word is used for compound_head or folio
* pgmap
*/
void *_unused_pgmap_compound_head;
void *zone_device_data;
/*
* ZONE_DEVICE private pages are counted as being
* mapped so the next 3 words hold the mapping, index,
* and private fields from the source anonymous or
* page cache page while the page is migrated to device
* private memory.
* ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
* use the mapping, index, and private fields when
* pmem backed DAX files are mapped.
*/
};
/** @rcu_head: You can use this to free a page by RCU. */
struct rcu_head rcu_head;
};
union { /* This union is 4 bytes in size. */
/*
* For head pages of typed folios, the value stored here
* allows for determining what this page is used for. The
* tail pages of typed folios will not store a type
* (page_type == _mapcount == -1).
*
* See page-flags.h for a list of page types which are currently
* stored here.
*
* Owners of typed folios may reuse the lower 16 bit of the
* head page page_type field after setting the page type,
* but must reset these 16 bit to -1 before clearing the
* page type.
*/
unsigned int page_type;
/*
* For pages that are part of non-typed folios for which mappings
* are tracked via the RMAP, encodes the number of times this page
* is directly referenced by a page table.
*
* Note that the mapcount is always initialized to -1, so that
* transitions both from it and to it can be tracked, using
* atomic_inc_and_test() and atomic_add_negative(-1).
*/
atomic_t _mapcount;
};
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount;
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
#elif defined(CONFIG_SLAB_OBJ_EXT)
unsigned long _unused_slab_obj_exts;
#endif
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
* highmem some memory is mapped into kernel virtual memory
* dynamically, so we need a place to store that address.
* Note that this field could be 16 bits on x86 ... ;)
*
* Architectures with slow multiplication can define
* WANT_PAGE_VIRTUAL in asm/page.h
*/
#if defined(WANT_PAGE_VIRTUAL)
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
#ifdef CONFIG_KMSAN
/*
* KMSAN metadata for this page:
* - shadow page: every bit indicates whether the corresponding
* bit of the original page is initialized (0) or not (1);
* - origin page: every 4 bytes contain an id of the stack trace
* where the uninitialized value was created.
*/
struct page *kmsan_shadow;
struct page *kmsan_origin;
#endif
} _struct_page_alignment;
/*
* struct encoded_page - a nonexistent type marking this pointer
*
* An 'encoded_page' pointer is a pointer to a regular 'struct page', but
* with the low bits of the pointer indicating extra context-dependent
* information. Only used in mmu_gather handling, and this acts as a type
* system check on that use.
*
* We only really have two guaranteed bits in general, although you could
* play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
* for more.
*
* Use the supplied helper functions to endcode/decode the pointer and bits.
*/
struct encoded_page;
#define ENCODED_PAGE_BITS 3ul
/* Perform rmap removal after we have flushed the TLB. */
#define ENCODED_PAGE_BIT_DELAY_RMAP 1ul
/*
* The next item in an encoded_page array is the "nr_pages" argument, specifying
* the number of consecutive pages starting from this page, that all belong to
* the same folio. For example, "nr_pages" corresponds to the number of folio
* references that must be dropped. If this bit is not set, "nr_pages" is
* implicitly 1.
*/
#define ENCODED_PAGE_BIT_NR_PAGES_NEXT 2ul
static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
{
BUILD_BUG_ON(flags > ENCODED_PAGE_BITS);
return (struct encoded_page *)(flags | (unsigned long)page);
}
static inline unsigned long encoded_page_flags(struct encoded_page *page)
{
return ENCODED_PAGE_BITS & (unsigned long)page;
}
static inline struct page *encoded_page_ptr(struct encoded_page *page)
{
return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page);
}
static __always_inline struct encoded_page *encode_nr_pages(unsigned long nr)
{
VM_WARN_ON_ONCE((nr << 2) >> 2 != nr);
return (struct encoded_page *)(nr << 2);
}
static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page)
{
return ((unsigned long)page) >> 2;
}
/*
* A swap entry has to fit into a "unsigned long", as the entry is hidden
* in the "index" field of the swapper address space.
*/
typedef struct {
unsigned long val;
} swp_entry_t;
#if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT)
/* We have some extra room after the refcount in tail pages. */
#define NR_PAGES_IN_LARGE_FOLIO
#endif
/*
* On 32bit, we can cut the required metadata in half, because:
* (a) PID_MAX_LIMIT implicitly limits the number of MMs we could ever have,
* so we can limit MM IDs to 15 bit (32767).
* (b) We don't expect folios where even a single complete PTE mapping by
* one MM would exceed 15 bits (order-15).
*/
#ifdef CONFIG_64BIT
typedef int mm_id_mapcount_t;
#define MM_ID_MAPCOUNT_MAX INT_MAX
typedef unsigned int mm_id_t;
#else /* !CONFIG_64BIT */
typedef short mm_id_mapcount_t;
#define MM_ID_MAPCOUNT_MAX SHRT_MAX
typedef unsigned short mm_id_t;
#endif /* CONFIG_64BIT */
/* We implicitly use the dummy ID for init-mm etc. where we never rmap pages. */
#define MM_ID_DUMMY 0
#define MM_ID_MIN (MM_ID_DUMMY + 1)
/*
* We leave the highest bit of each MM id unused, so we can store a flag
* in the highest bit of each folio->_mm_id[].
*/
#define MM_ID_BITS ((sizeof(mm_id_t) * BITS_PER_BYTE) - 1)
#define MM_ID_MASK ((1U << MM_ID_BITS) - 1)
#define MM_ID_MAX MM_ID_MASK
/*
* In order to use bit_spin_lock(), which requires an unsigned long, we
* operate on folio->_mm_ids when working on flags.
*/
#define FOLIO_MM_IDS_LOCK_BITNUM MM_ID_BITS
#define FOLIO_MM_IDS_LOCK_BIT BIT(FOLIO_MM_IDS_LOCK_BITNUM)
#define FOLIO_MM_IDS_SHARED_BITNUM (2 * MM_ID_BITS + 1)
#define FOLIO_MM_IDS_SHARED_BIT BIT(FOLIO_MM_IDS_SHARED_BITNUM)
/**
* struct folio - Represents a contiguous set of bytes.
* @flags: Identical to the page flags.
* @lru: Least Recently Used list; tracks how recently this folio was used.
* @mlock_count: Number of times this folio has been pinned by mlock().
* @mapping: The file this page belongs to, or refers to the anon_vma for
* anonymous memory.
* @index: Offset within the file, in units of pages. For anonymous memory,
* this is the index from the beginning of the mmap.
* @share: number of DAX mappings that reference this folio. See
* dax_associate_entry.
* @private: Filesystem per-folio data (see folio_attach_private()).
* @swap: Used for swp_entry_t if folio_test_swapcache().
* @_mapcount: Do not access this member directly. Use folio_mapcount() to
* find out how many times this folio is mapped by userspace.
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
* @pgmap: Metadata for ZONE_DEVICE mappings
* @virtual: Virtual address in the kernel direct map.
* @_last_cpupid: IDs of last CPU and last process that accessed the folio.
* @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
* @_large_mapcount: Do not use directly, call folio_mapcount().
* @_nr_pages_mapped: Do not use outside of rmap and debug code.
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
* @_nr_pages: Do not use directly, call folio_nr_pages().
* @_mm_id: Do not use outside of rmap code.
* @_mm_ids: Do not use outside of rmap code.
* @_mm_id_mapcount: Do not use outside of rmap code.
* @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
* @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
* @_deferred_list: Folios to be split under memory pressure.
* @_unused_slab_obj_exts: Placeholder to match obj_exts in struct slab.
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
* same power-of-two. It is at least as large as %PAGE_SIZE. If it is
* in the page cache, it is at a file offset which is a multiple of that
* power-of-two. It may be mapped into userspace at an address which is
* at an arbitrary page offset, but its kernel virtual address is aligned
* to its size.
*/
struct folio {
/* private: don't document the anon union */
union {
struct {
/* public: */
memdesc_flags_t flags;
union {
struct list_head lru;
/* private: avoid cluttering the output */
/* For the Unevictable "LRU list" slot */
struct {
/* Avoid compound_head */
void *__filler;
/* public: */
unsigned int mlock_count;
/* private: */
};
/* public: */
struct dev_pagemap *pgmap;
};
struct address_space *mapping;
union {
pgoff_t index;
unsigned long share;
};
union {
void *private;
swp_entry_t swap;
};
atomic_t _mapcount;
atomic_t _refcount;
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
#elif defined(CONFIG_SLAB_OBJ_EXT)
unsigned long _unused_slab_obj_exts;
#endif
#if defined(WANT_PAGE_VIRTUAL)
void *virtual;
#endif
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
/* private: the union with struct page is transitional */
};
struct page page;
};
union {
struct {
unsigned long _flags_1;
unsigned long _head_1;
union {
struct {
/* public: */
atomic_t _large_mapcount;
atomic_t _nr_pages_mapped;
#ifdef CONFIG_64BIT
atomic_t _entire_mapcount;
atomic_t _pincount;
#endif /* CONFIG_64BIT */
mm_id_mapcount_t _mm_id_mapcount[2];
union {
mm_id_t _mm_id[2];
unsigned long _mm_ids;
};
/* private: the union with struct page is transitional */
};
unsigned long _usable_1[4];
};
atomic_t _mapcount_1;
atomic_t _refcount_1;
/* public: */
#ifdef NR_PAGES_IN_LARGE_FOLIO
unsigned int _nr_pages;
#endif /* NR_PAGES_IN_LARGE_FOLIO */
/* private: the union with struct page is transitional */
};
struct page __page_1;
};
union {
struct {
unsigned long _flags_2;
unsigned long _head_2;
/* public: */
struct list_head _deferred_list;
#ifndef CONFIG_64BIT
atomic_t _entire_mapcount;
atomic_t _pincount;
#endif /* !CONFIG_64BIT */
/* private: the union with struct page is transitional */
};
struct page __page_2;
};
union {
struct {
unsigned long _flags_3;
unsigned long _head_3;
/* public: */
void *_hugetlb_subpool;
void *_hugetlb_cgroup;
void *_hugetlb_cgroup_rsvd;
void *_hugetlb_hwpoison;
/* private: the union with struct page is transitional */
};
struct page __page_3;
};
};
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
FOLIO_MATCH(flags, flags);
FOLIO_MATCH(lru, lru);
FOLIO_MATCH(mapping, mapping);
FOLIO_MATCH(compound_head, lru);
FOLIO_MATCH(__folio_index, index);
FOLIO_MATCH(private, private);
FOLIO_MATCH(_mapcount, _mapcount);
FOLIO_MATCH(_refcount, _refcount);
#ifdef CONFIG_MEMCG
FOLIO_MATCH(memcg_data, memcg_data);
#endif
#if defined(WANT_PAGE_VIRTUAL)
FOLIO_MATCH(virtual, virtual);
#endif
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
FOLIO_MATCH(_last_cpupid, _last_cpupid);
#endif
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + sizeof(struct page))
FOLIO_MATCH(flags, _flags_1);
FOLIO_MATCH(compound_head, _head_1);
FOLIO_MATCH(_mapcount, _mapcount_1);
FOLIO_MATCH(_refcount, _refcount_1);
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
FOLIO_MATCH(compound_head, _head_2);
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + 3 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_3);
FOLIO_MATCH(compound_head, _head_3);
#undef FOLIO_MATCH
/**
* struct ptdesc - Memory descriptor for page tables.
* @pt_flags: enum pt_flags plus zone/node/section.
* @pt_rcu_head: For freeing page table pages.
* @pt_list: List of used page tables. Used for s390 gmap shadow pages
* (which are not linked into the user page tables) and x86
* pgds.
* @_pt_pad_1: Padding that aliases with page's compound head.
* @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
* @__page_mapping: Aliases with page->mapping. Unused for page tables.
* @pt_index: Used for s390 gmap.
* @pt_mm: Used for x86 pgds.
* @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
* @pt_share_count: Used for HugeTLB PMD page table share count.
* @_pt_pad_2: Padding to ensure proper alignment.
* @ptl: Lock for the page table.
* @__page_type: Same as page->page_type. Unused for page tables.
* @__page_refcount: Same as page refcount.
* @pt_memcg_data: Memcg data. Tracked for page tables here.
*
* This struct overlays struct page for now. Do not modify without a good
* understanding of the issues.
*/
struct ptdesc {
memdesc_flags_t pt_flags;
union {
struct rcu_head pt_rcu_head;
struct list_head pt_list;
struct {
unsigned long _pt_pad_1;
pgtable_t pmd_huge_pte;
};
};
unsigned long __page_mapping;
union {
pgoff_t pt_index;
struct mm_struct *pt_mm;
atomic_t pt_frag_refcount;
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
atomic_t pt_share_count;
#endif
};
union {
unsigned long _pt_pad_2;
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
#endif
};
unsigned int __page_type;
atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long pt_memcg_data;
#endif
};
#define TABLE_MATCH(pg, pt) \
static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
TABLE_MATCH(flags, pt_flags);
TABLE_MATCH(compound_head, pt_list);
TABLE_MATCH(compound_head, _pt_pad_1);
TABLE_MATCH(mapping, __page_mapping);
TABLE_MATCH(__folio_index, pt_index);
TABLE_MATCH(rcu_head, pt_rcu_head);
TABLE_MATCH(page_type, __page_type);
TABLE_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
TABLE_MATCH(memcg_data, pt_memcg_data);
#endif
#undef TABLE_MATCH
static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
#define ptdesc_page(pt) (_Generic((pt), \
const struct ptdesc *: (const struct page *)(pt), \
struct ptdesc *: (struct page *)(pt)))
#define ptdesc_folio(pt) (_Generic((pt), \
const struct ptdesc *: (const struct folio *)(pt), \
struct ptdesc *: (struct folio *)(pt)))
#define page_ptdesc(p) (_Generic((p), \
const struct page *: (const struct ptdesc *)(p), \
struct page *: (struct ptdesc *)(p)))
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
{
atomic_set(&ptdesc->pt_share_count, 0);
}
static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
{
atomic_inc(&ptdesc->pt_share_count);
}
static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
{
atomic_dec(&ptdesc->pt_share_count);
}
static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
{
return atomic_read(&ptdesc->pt_share_count);
}
static inline bool ptdesc_pmd_is_shared(struct ptdesc *ptdesc)
{
return !!ptdesc_pmd_pts_count(ptdesc);
}
#else
static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
{
}
#endif
/*
* Used for sizing the vmemmap region on some architectures
*/
#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
/*
* page_private can be used on tail pages. However, PagePrivate is only
* checked by the VM on the head page. So page_private on the tail pages
* should be used for data that's ancillary to the head page (eg attaching
* buffer heads to tail pages after attaching buffer heads to the head page)
*/
#define page_private(page) ((page)->private)
static inline void set_page_private(struct page *page, unsigned long private)
{
page->private = private;
}
static inline void *folio_get_private(const struct folio *folio)
{
return folio->private;
}
typedef unsigned long vm_flags_t;
/*
* freeptr_t represents a SLUB freelist pointer, which might be encoded
* and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
*/
typedef struct { unsigned long v; } freeptr_t;
/*
* A region containing a mapping of a non-memory backed file under NOMMU
* conditions. These are held in a global tree and are pinned by the VMAs that
* map parts of them.
*/
struct vm_region {
struct rb_node vm_rb; /* link in global region tree */
vm_flags_t vm_flags; /* VMA vm_flags */
unsigned long vm_start; /* start address of region */
unsigned long vm_end; /* region initialised to here */
unsigned long vm_top; /* region allocated to here */
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
* this region */
};
#ifdef CONFIG_USERFAULTFD
#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
struct vm_userfaultfd_ctx {
struct userfaultfd_ctx *ctx;
};
#else /* CONFIG_USERFAULTFD */
#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
struct vm_userfaultfd_ctx {};
#endif /* CONFIG_USERFAULTFD */
struct anon_vma_name {
struct kref kref;
/* The name needs to be at the end because it is dynamically sized. */
char name[];
};
#ifdef CONFIG_ANON_VMA_NAME
/*
* mmap_lock should be read-locked when calling anon_vma_name(). Caller should
* either keep holding the lock while using the returned pointer or it should
* raise anon_vma_name refcount before releasing the lock.
*/
struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
struct anon_vma_name *anon_vma_name_alloc(const char *name);
void anon_vma_name_free(struct kref *kref);
#else /* CONFIG_ANON_VMA_NAME */
static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
{
return NULL;
}
static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
{
return NULL;
}
#endif
#define VMA_LOCK_OFFSET 0x40000000
#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1)
struct vma_numab_state {
/*
* Initialised as time in 'jiffies' after which VMA
* should be scanned. Delays first scan of new VMA by at
* least sysctl_numa_balancing_scan_delay:
*/
unsigned long next_scan;
/*
* Time in jiffies when pids_active[] is reset to
* detect phase change behaviour:
*/
unsigned long pids_active_reset;
/*
* Approximate tracking of PIDs that trapped a NUMA hinting
* fault. May produce false positives due to hash collisions.
*
* [0] Previous PID tracking
* [1] Current PID tracking
*
* Window moves after next_pid_reset has expired approximately
* every VMA_PID_RESET_PERIOD jiffies:
*/
unsigned long pids_active[2];
/* MM scan sequence ID when scan first started after VMA creation */
int start_scan_seq;
/*
* MM scan sequence ID when the VMA was last completely scanned.
* A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
*/
int prev_scan_seq;
};
#ifdef __HAVE_PFNMAP_TRACKING
struct pfnmap_track_ctx {
struct kref kref;
unsigned long pfn;
unsigned long size; /* in bytes */
};
#endif
/*
* Describes a VMA that is about to be mmap()'ed. Drivers may choose to
* manipulate mutable fields which will cause those fields to be updated in the
* resultant VMA.
*
* Helper functions are not required for manipulating any field.
*/
struct vm_area_desc {
/* Immutable state. */
const struct mm_struct *const mm;
struct file *const file; /* May vary from vm_file in stacked callers. */
unsigned long start;
unsigned long end;
/* Mutable fields. Populated with initial state. */
pgoff_t pgoff;
struct file *vm_file;
vm_flags_t vm_flags;
pgprot_t page_prot;
/* Write-only fields. */
const struct vm_operations_struct *vm_ops;
void *private_data;
};
/*
* This struct describes a virtual memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
*
* Only explicitly marked struct members may be accessed by RCU readers before
* getting a stable reference.
*
* WARNING: when adding new members, please update vm_area_init_from() to copy
* them during vm_area_struct content duplication.
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
union {
struct {
/* VMA covers [vm_start; vm_end) addresses within mm */
unsigned long vm_start;
unsigned long vm_end;
};
freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
};
/*
* The address space we belong to.
* Unstable RCU readers are allowed to read this.
*/
struct mm_struct *vm_mm;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
* Flags, see mm.h.
* To modify use vm_flags_{init|reset|set|clear|mod} functions.
*/
union {
const vm_flags_t vm_flags;
vm_flags_t __private __vm_flags;
};
#ifdef CONFIG_PER_VMA_LOCK
/*
* Can only be written (using WRITE_ONCE()) while holding both:
* - mmap_lock (in write mode)
* - vm_refcnt bit at VMA_LOCK_OFFSET is set
* Can be read reliably while holding one of:
* - mmap_lock (in read or write mode)
* - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
* Can be read unreliably (using READ_ONCE()) for pessimistic bailout
* while holding nothing (except RCU to keep the VMA struct allocated).
*
* This sequence counter is explicitly allowed to overflow; sequence
* counter reuse can only lead to occasional unnecessary use of the
* slowpath.
*/
unsigned int vm_lock_seq;
#endif
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
* list, after a COW of one of the file pages. A MAP_SHARED vma
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
* or brk vma (with NULL file) can only be in an anon_vma list.
*/
struct list_head anon_vma_chain; /* Serialized by mmap_lock &
* page_table_lock */
struct anon_vma *anon_vma; /* Serialized by page_table_lock */
/* Function pointers to deal with this struct. */
const struct vm_operations_struct *vm_ops;
/* Information about our backing store: */
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
units */
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
#endif
#ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */
#endif
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
#ifdef CONFIG_NUMA_BALANCING
struct vma_numab_state *numab_state; /* NUMA Balancing state */
#endif
#ifdef CONFIG_PER_VMA_LOCK
/* Unstable RCU readers are allowed to read this. */
refcount_t vm_refcnt ____cacheline_aligned_in_smp;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map vmlock_dep_map;
#endif
#endif
/*
* For areas with an address space and backing store,
* linkage into the address_space->i_mmap interval tree.
*
*/
struct {
struct rb_node rb;
unsigned long rb_subtree_last;
} shared;
#ifdef CONFIG_ANON_VMA_NAME
/*
* For private and shared anonymous mappings, a pointer to a null
* terminated string containing the name given to the vma, or NULL if
* unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
*/
struct anon_vma_name *anon_name;
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
#ifdef __HAVE_PFNMAP_TRACKING
struct pfnmap_track_ctx *pfnmap_track_ctx;
#endif
} __randomize_layout;
#ifdef CONFIG_NUMA
#define vma_policy(vma) ((vma)->vm_policy)
#else
#define vma_policy(vma) NULL
#endif
#ifdef CONFIG_SCHED_MM_CID
struct mm_cid {
u64 time;
int cid;
int recent_cid;
};
#endif
/*
* Opaque type representing current mm_struct flag state. Must be accessed via
* mm_flags_xxx() helper functions.
*/
#define NUM_MM_FLAG_BITS (64)
typedef struct {
DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
} __private mm_flags_t;
struct kioctx_table;
struct iommu_mm_data;
struct mm_struct {
struct {
/*
* Fields which are often written to are placed in a separate
* cache line.
*/
struct {
/**
* @mm_count: The number of references to &struct
* mm_struct (@mm_users count as 1).
*
* Use mmgrab()/mmdrop() to modify. When this drops to
* 0, the &struct mm_struct is freed.
*/
atomic_t mm_count;
} ____cacheline_aligned_in_smp;
struct maple_tree mm_mt;
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
/* Base addresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size; /* size of task vm space */
pgd_t * pgd;
#ifdef CONFIG_MEMBARRIER
/**
* @membarrier_state: Flags controlling membarrier behavior.
*
* This field is close to @pgd to hopefully fit in the same
* cache-line, which needs to be touched by switch_mm().
*/
atomic_t membarrier_state;
#endif
/**
* @mm_users: The number of users including userspace.
*
* Use mmget()/mmget_not_zero()/mmput() to modify. When this
* drops to 0 (i.e. when the task exits and there are no other
* temporary reference holders), we also release a reference on
* @mm_count (which may then free the &struct mm_struct if
* @mm_count also drops to 0).
*/
atomic_t mm_users;
#ifdef CONFIG_SCHED_MM_CID
/**
* @pcpu_cid: Per-cpu current cid.
*
* Keep track of the currently allocated mm_cid for each cpu.
* The per-cpu mm_cid values are serialized by their respective
* runqueue locks.
*/
struct mm_cid __percpu *pcpu_cid;
/*
* @mm_cid_next_scan: Next mm_cid scan (in jiffies).
*
* When the next mm_cid scan is due (in jiffies).
*/
unsigned long mm_cid_next_scan;
/**
* @nr_cpus_allowed: Number of CPUs allowed for mm.
*
* Number of CPUs allowed in the union of all mm's
* threads allowed CPUs.
*/
unsigned int nr_cpus_allowed;
/**
* @max_nr_cid: Maximum number of allowed concurrency
* IDs allocated.
*
* Track the highest number of allowed concurrency IDs
* allocated for the mm.
*/
atomic_t max_nr_cid;
/**
* @cpus_allowed_lock: Lock protecting mm cpus_allowed.
*
* Provide mutual exclusion for mm cpus_allowed and
* mm nr_cpus_allowed updates.
*/
raw_spinlock_t cpus_allowed_lock;
#endif
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* size of all page tables */
#endif
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
/*
* Typically the current mmap_lock's offset is 56 bytes from
* the last cacheline boundary, which is very optimal, as
* its two hot fields 'count' and 'owner' sit in 2 different
* cachelines, and when mmap_lock is highly contended, both
* of the 2 fields will be accessed frequently, current layout
* will help to reduce cache bouncing.
*
* So please be careful with adding new fields before
* mmap_lock, which can easily push the 2 fields into one
* cacheline.
*/
struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
* are globally strung together off
* init_mm.mmlist, and are protected
* by mmlist_lock
*/
#ifdef CONFIG_PER_VMA_LOCK
struct rcuwait vma_writer_wait;
/*
* This field has lock-like semantics, meaning it is sometimes
* accessed with ACQUIRE/RELEASE semantics.
* Roughly speaking, incrementing the sequence number is
* equivalent to releasing locks on VMAs; reading the sequence
* number can be part of taking a read lock on a VMA.
* Incremented every time mmap_lock is write-locked/unlocked.
* Initialized to 0, therefore odd values indicate mmap_lock
* is write-locked and even values that it's released.
*
* Can be modified under write mmap_lock using RELEASE
* semantics.
* Can be read with no other protection when holding write
* mmap_lock.
* Can be read with ACQUIRE semantics if not holding write
* mmap_lock.
*/
seqcount_t mm_lock_seq;
#endif
#ifdef CONFIG_FUTEX_PRIVATE_HASH
struct mutex futex_hash_lock;
struct futex_private_hash __rcu *futex_phash;
struct futex_private_hash *futex_phash_new;
/* futex-ref */
unsigned long futex_batches;
struct rcu_head futex_rcu;
atomic_long_t futex_atomic;
unsigned int __percpu *futex_ref;
#endif
unsigned long hiwater_rss; /* High-watermark of RSS usage */
unsigned long hiwater_vm; /* High-water virtual memory usage */
unsigned long total_vm; /* Total pages mapped */
unsigned long locked_vm; /* Pages that have PG_mlocked set */
atomic64_t pinned_vm; /* Refcount permanently increased */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
vm_flags_t def_flags;
/**
* @write_protect_seq: Locked when any thread is write
* protecting pages mapped by this mm to enforce a later COW,
* for instance during page table copying for fork().
*/
seqcount_t write_protect_seq;
spinlock_t arg_lock; /* protect the below fields */
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
/* the ABI-related flags from the ELF header. Used for core dump */
unsigned long saved_e_flags;
#endif
struct percpu_counter rss_stat[NR_MM_COUNTERS];
struct linux_binfmt *binfmt;
/* Architecture-specific MM context */
mm_context_t context;
mm_flags_t flags; /* Must use mm_flags_* hlpers to access */
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
/*
* "owner" points to a task that is regarded as the canonical
* user/owner of this mm. All of the following must be true in
* order for it to be changed:
*
* current == mm->owner
* current->mm != mm
* new_owner->mm == mm
* new_owner->alloc_lock is held
*/
struct task_struct __rcu *owner;
#endif
struct user_namespace *user_ns;
/* store ref to file /proc/<pid>/exe symlink points to */
struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
* numa_next_scan is the next time that PTEs will be remapped
* PROT_NONE to trigger NUMA hinting faults; such faults gather
* statistics and migrate pages to new nodes if necessary.
*/
unsigned long numa_next_scan;
/* Restart point for scanning and remapping PTEs. */
unsigned long numa_scan_offset;
/* numa_scan_seq prevents two threads remapping PTEs. */
int numa_scan_seq;
#endif
/*
* An operation with batched TLB flushing is going on. Anything
* that can move process memory needs to flush the TLB when
* moving a PROT_NONE mapped page.
*/
atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
atomic_t tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_PREEMPT_RT
struct rcu_head delayed_drop;
#endif
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
#ifdef CONFIG_IOMMU_MM_DATA
struct iommu_mm_data *iommu_mm;
#endif
#ifdef CONFIG_KSM
/*
* Represent how many pages of this process are involved in KSM
* merging (not including ksm_zero_pages).
*/
unsigned long ksm_merging_pages;
/*
* Represent how many pages are checked for ksm merging
* including merged and not merged.
*/
unsigned long ksm_rmap_items;
/*
* Represent how many empty pages are merged with kernel zero
* pages when enabling KSM use_zero_pages.
*/
atomic_long_t ksm_zero_pages;
#endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
/* this mm_struct is on lru_gen_mm_list */
struct list_head list;
/*
* Set when switching to this mm_struct, as a hint of
* whether it has been used since the last time per-node
* page table walkers cleared the corresponding bits.
*/
unsigned long bitmap;
#ifdef CONFIG_MEMCG
/* points to the memcg of "owner" above */
struct mem_cgroup *memcg;
#endif
} lru_gen;
#endif /* CONFIG_LRU_GEN_WALKS_MMU */
#ifdef CONFIG_MM_ID
mm_id_t mm_id;
#endif /* CONFIG_MM_ID */
} __randomize_layout;
/*
* The mm_cpumask needs to be at the end of mm_struct, because it
* is dynamically sized based on nr_cpu_ids.
*/
unsigned long cpu_bitmap[];
};
/* Set the first system word of mm flags, non-atomically. */
static inline void __mm_flags_set_word(struct mm_struct *mm, unsigned long value)
{
unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
bitmap_copy(bitmap, &value, BITS_PER_LONG);
}
/* Obtain a read-only view of the bitmap. */
static inline const unsigned long *__mm_flags_get_bitmap(const struct mm_struct *mm)
{
return (const unsigned long *)ACCESS_PRIVATE(&mm->flags, __mm_flags);
}
/* Read the first system word of mm flags, non-atomically. */
static inline unsigned long __mm_flags_get_word(const struct mm_struct *mm)
{
const unsigned long *bitmap = __mm_flags_get_bitmap(mm);
return bitmap_read(bitmap, 0, BITS_PER_LONG);
}
/*
* Update the first system word of mm flags ONLY, applying the specified mask to
* it, then setting all flags specified by bits.
*/
static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
unsigned long mask, unsigned long bits)
{
unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
set_mask_bits(bitmap, mask, bits);
}
#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
unsigned long cpu_bitmap = (unsigned long)mm;
cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
return (struct cpumask *)&mm->cpu_bitmap;
}
#ifdef CONFIG_LRU_GEN
struct lru_gen_mm_list {
/* mm_struct list for page table walkers */
struct list_head fifo;
/* protects the list above */
spinlock_t lock;
};
#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_LRU_GEN_WALKS_MMU
void lru_gen_add_mm(struct mm_struct *mm);
void lru_gen_del_mm(struct mm_struct *mm);
void lru_gen_migrate_mm(struct mm_struct *mm);
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
INIT_LIST_HEAD(&mm->lru_gen.list);
mm->lru_gen.bitmap = 0;
#ifdef CONFIG_MEMCG
mm->lru_gen.memcg = NULL;
#endif
}
static inline void lru_gen_use_mm(struct mm_struct *mm)
{
/*
* When the bitmap is set, page reclaim knows this mm_struct has been
* used since the last time it cleared the bitmap. So it might be worth
* walking the page tables of this mm_struct to clear the accessed bit.
*/
WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
#else /* !CONFIG_LRU_GEN_WALKS_MMU */
static inline void lru_gen_add_mm(struct mm_struct *mm)
{
}
static inline void lru_gen_del_mm(struct mm_struct *mm)
{
}
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
}
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
}
static inline void lru_gen_use_mm(struct mm_struct *mm)
{
}
#endif /* CONFIG_LRU_GEN_WALKS_MMU */
struct vma_iterator {
struct ma_state mas;
};
#define VMA_ITERATOR(name, __mm, __addr) \
struct vma_iterator name = { \
.mas = { \
.tree = &(__mm)->mm_mt, \
.index = __addr, \
.node = NULL, \
.status = ma_start, \
}, \
}
static inline void vma_iter_init(struct vma_iterator *vmi,
struct mm_struct *mm, unsigned long addr)
{
mas_init(&vmi->mas, &mm->mm_mt, addr);
}
#ifdef CONFIG_SCHED_MM_CID
enum mm_cid_state {
MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */
MM_CID_LAZY_PUT = (1U << 31),
};
static inline bool mm_cid_is_unset(int cid)
{
return cid == MM_CID_UNSET;
}
static inline bool mm_cid_is_lazy_put(int cid)
{
return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
}
static inline bool mm_cid_is_valid(int cid)
{
return !(cid & MM_CID_LAZY_PUT);
}
static inline int mm_cid_set_lazy_put(int cid)
{
return cid | MM_CID_LAZY_PUT;
}
static inline int mm_cid_clear_lazy_put(int cid)
{
return cid & ~MM_CID_LAZY_PUT;
}
/*
* mm_cpus_allowed: Union of all mm's threads allowed CPUs.
*/
static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
{
unsigned long bitmap = (unsigned long)mm;
bitmap += offsetof(struct mm_struct, cpu_bitmap);
/* Skip cpu_bitmap */
bitmap += cpumask_size();
return (struct cpumask *)bitmap;
}
/* Accessor for struct mm_struct's cidmask. */
static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
{
unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
/* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
return (struct cpumask *)cid_bitmap;
}
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
{
int i;
for_each_possible_cpu(i) { struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i); pcpu_cid->cid = MM_CID_UNSET;
pcpu_cid->recent_cid = MM_CID_UNSET;
pcpu_cid->time = 0;
}
mm->nr_cpus_allowed = p->nr_cpus_allowed;
atomic_set(&mm->max_nr_cid, 0);
raw_spin_lock_init(&mm->cpus_allowed_lock);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
cpumask_clear(mm_cidmask(mm));
}
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
{
mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
if (!mm->pcpu_cid)
return -ENOMEM;
mm_init_cid(mm, p);
return 0;
}
#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
static inline void mm_destroy_cid(struct mm_struct *mm)
{
free_percpu(mm->pcpu_cid);
mm->pcpu_cid = NULL;
}
static inline unsigned int mm_cid_size(void)
{
return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */
}
static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask)
{
struct cpumask *mm_allowed = mm_cpus_allowed(mm);
if (!mm)
return;
/* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
raw_spin_lock(&mm->cpus_allowed_lock);
cpumask_or(mm_allowed, mm_allowed, cpumask);
WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
raw_spin_unlock(&mm->cpus_allowed_lock);
}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
static inline void mm_destroy_cid(struct mm_struct *mm) { }
static inline unsigned int mm_cid_size(void)
{
return 0;
}
static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { }
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_finish_mmu(struct mmu_gather *tlb);
struct vm_fault;
/**
* typedef vm_fault_t - Return type for page fault handlers.
*
* Page fault handlers return a bitmask of %VM_FAULT values.
*/
typedef __bitwise unsigned int vm_fault_t;
/**
* enum vm_fault_reason - Page fault handlers return a bitmask of
* these values to tell the core VM what happened when handling the
* fault. Used to decide whether a process gets delivered SIGBUS or
* just gets major/minor fault counters bumped up.
*
* @VM_FAULT_OOM: Out Of Memory
* @VM_FAULT_SIGBUS: Bad access
* @VM_FAULT_MAJOR: Page read from storage
* @VM_FAULT_HWPOISON: Hit poisoned small page
* @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded
* in upper bits
* @VM_FAULT_SIGSEGV: segmentation fault
* @VM_FAULT_NOPAGE: ->fault installed the pte, not return page
* @VM_FAULT_LOCKED: ->fault locked the returned page
* @VM_FAULT_RETRY: ->fault blocked, must retry
* @VM_FAULT_FALLBACK: huge page fault failed, fall back to small
* @VM_FAULT_DONE_COW: ->fault has fully handled COW
* @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
* fsync() to complete (for synchronous page faults
* in DAX)
* @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
* @VM_FAULT_HINDEX_MASK: mask HINDEX value
*
*/
enum vm_fault_reason {
VM_FAULT_OOM = (__force vm_fault_t)0x000001,
VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002,
VM_FAULT_MAJOR = (__force vm_fault_t)0x000004,
VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010,
VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040,
VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100,
VM_FAULT_LOCKED = (__force vm_fault_t)0x000200,
VM_FAULT_RETRY = (__force vm_fault_t)0x000400,
VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
};
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \
VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \
VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
#define VM_FAULT_RESULT_TRACE \
{ VM_FAULT_OOM, "OOM" }, \
{ VM_FAULT_SIGBUS, "SIGBUS" }, \
{ VM_FAULT_MAJOR, "MAJOR" }, \
{ VM_FAULT_HWPOISON, "HWPOISON" }, \
{ VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
{ VM_FAULT_SIGSEGV, "SIGSEGV" }, \
{ VM_FAULT_NOPAGE, "NOPAGE" }, \
{ VM_FAULT_LOCKED, "LOCKED" }, \
{ VM_FAULT_RETRY, "RETRY" }, \
{ VM_FAULT_FALLBACK, "FALLBACK" }, \
{ VM_FAULT_DONE_COW, "DONE_COW" }, \
{ VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \
{ VM_FAULT_COMPLETED, "COMPLETED" }
struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
/*
* If .fault is not provided, this points to a
* NULL-terminated array of pages that back the special mapping.
*
* This must not be NULL unless .fault is provided.
*/
struct page **pages;
/*
* If non-NULL, then this is called to resolve page faults
* on the special mapping. If used, .pages is not checked.
*/
vm_fault_t (*fault)(const struct vm_special_mapping *sm,
struct vm_area_struct *vma,
struct vm_fault *vmf);
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
void (*close)(const struct vm_special_mapping *sm,
struct vm_area_struct *vma);
};
enum tlb_flush_reason {
TLB_FLUSH_ON_TASK_SWITCH,
TLB_REMOTE_SHOOTDOWN,
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
TLB_REMOTE_SEND_IPI,
TLB_REMOTE_WRONG_CPU,
NR_TLB_FLUSH_REASONS,
};
/**
* enum fault_flag - Fault flag definitions.
* @FAULT_FLAG_WRITE: Fault was a write fault.
* @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
* @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
* @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
* @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
* @FAULT_FLAG_TRIED: The fault has been tried once.
* @FAULT_FLAG_USER: The fault originated in userspace.
* @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
* @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
* @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
* @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
* COW mapping, making sure that an exclusive anon page is
* mapped after the fault.
* @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
* We should only access orig_pte if this flag set.
* @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
*
* About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
* whether we would allow page faults to retry by specifying these two
* fault flags correctly. Currently there can be three legal combinations:
*
* (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
* this is the first try
*
* (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
* we've already tried at least once
*
* (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
*
* The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
* be used. Note that page faults can be allowed to retry for multiple times,
* in which case we'll have an initial fault with flags (a) then later on
* continuous faults with flags (b). We should always try to detect pending
* signals before a retry to make sure the continuous page faults can still be
* interrupted if necessary.
*
* The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
* FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
* applied to mappings that are not COW mappings.
*/
enum fault_flag {
FAULT_FLAG_WRITE = 1 << 0,
FAULT_FLAG_MKWRITE = 1 << 1,
FAULT_FLAG_ALLOW_RETRY = 1 << 2,
FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
FAULT_FLAG_KILLABLE = 1 << 4,
FAULT_FLAG_TRIED = 1 << 5,
FAULT_FLAG_USER = 1 << 6,
FAULT_FLAG_REMOTE = 1 << 7,
FAULT_FLAG_INSTRUCTION = 1 << 8,
FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
FAULT_FLAG_UNSHARE = 1 << 10,
FAULT_FLAG_ORIG_PTE_VALID = 1 << 11,
FAULT_FLAG_VMA_LOCK = 1 << 12,
};
typedef unsigned int __bitwise zap_flags_t;
/* Flags for clear_young_dirty_ptes(). */
typedef int __bitwise cydp_t;
/* Clear the access bit */
#define CYDP_CLEAR_YOUNG ((__force cydp_t)BIT(0))
/* Clear the dirty bit */
#define CYDP_CLEAR_DIRTY ((__force cydp_t)BIT(1))
/*
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
* other. Here is what they mean, and how to use them:
*
*
* FIXME: For pages which are part of a filesystem, mappings are subject to the
* lifetime enforced by the filesystem and we need guarantees that longterm
* users like RDMA and V4L2 only establish mappings which coordinate usage with
* the filesystem. Ideas for this coordination include revoking the longterm
* pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
* added after the problem with filesystems was found FS DAX VMAs are
* specifically failed. Filesystem pages are still subject to bugs and use of
* FOLL_LONGTERM should be avoided on those pages.
*
* In the CMA case: long term pins in a CMA region would unnecessarily fragment
* that region. And so, CMA attempts to migrate the page before pinning, when
* FOLL_LONGTERM is specified.
*
* FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
* but an additional pin counting system) will be invoked. This is intended for
* anything that gets a page reference and then touches page data (for example,
* Direct IO). This lets the filesystem know that some non-file-system entity is
* potentially changing the pages' data. In contrast to FOLL_GET (whose pages
* are released via put_page()), FOLL_PIN pages must be released, ultimately, by
* a call to unpin_user_page().
*
* FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
* and separate refcounting mechanisms, however, and that means that each has
* its own acquire and release mechanisms:
*
* FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
*
* FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
*
* FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
* (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
* calls applied to them, and that's perfectly OK. This is a constraint on the
* callers, not on the pages.)
*
* FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
* directly by the caller. That's in order to help avoid mismatches when
* releasing pages: get_user_pages*() pages must be released via put_page(),
* while pin_user_pages*() pages must be released via unpin_user_page().
*
* Please see Documentation/core-api/pin_user_pages.rst for more information.
*/
enum {
/* check pte is writable */
FOLL_WRITE = 1 << 0,
/* do get_page on page */
FOLL_GET = 1 << 1,
/* give error on hole if it would be zero */
FOLL_DUMP = 1 << 2,
/* get_user_pages read/write w/o permission */
FOLL_FORCE = 1 << 3,
/*
* if a disk transfer is needed, start the IO and return without waiting
* upon it
*/
FOLL_NOWAIT = 1 << 4,
/* do not fault in pages */
FOLL_NOFAULT = 1 << 5,
/* check page is hwpoisoned */
FOLL_HWPOISON = 1 << 6,
/* don't do file mappings */
FOLL_ANON = 1 << 7,
/*
* FOLL_LONGTERM indicates that the page will be held for an indefinite
* time period _often_ under userspace control. This is in contrast to
* iov_iter_get_pages(), whose usages are transient.
*/
FOLL_LONGTERM = 1 << 8,
/* split huge pmd before returning */
FOLL_SPLIT_PMD = 1 << 9,
/* allow returning PCI P2PDMA pages */
FOLL_PCI_P2PDMA = 1 << 10,
/* allow interrupts from generic signals */
FOLL_INTERRUPTIBLE = 1 << 11,
/*
* Always honor (trigger) NUMA hinting faults.
*
* FOLL_WRITE implicitly honors NUMA hinting faults because a
* PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
* apply). get_user_pages_fast_only() always implicitly honors NUMA
* hinting faults.
*/
FOLL_HONOR_NUMA_FAULT = 1 << 12,
/* See also internal only FOLL flags in mm/internal.h */
};
/* mm flags */
/*
* The first two bits represent core dump modes for set-user-ID,
* the modes are SUID_DUMP_* defined in linux/sched/coredump.h
*/
#define MMF_DUMPABLE_BITS 2
#define MMF_DUMPABLE_MASK (BIT(MMF_DUMPABLE_BITS) - 1)
/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE 2
#define MMF_DUMP_ANON_SHARED 3
#define MMF_DUMP_MAPPED_PRIVATE 4
#define MMF_DUMP_MAPPED_SHARED 5
#define MMF_DUMP_ELF_HEADERS 6
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED 8
#define MMF_DUMP_DAX_PRIVATE 9
#define MMF_DUMP_DAX_SHARED 10
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
#define MMF_DUMP_FILTER_BITS 9
#define MMF_DUMP_FILTER_MASK \
((BIT(MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
(BIT(MMF_DUMP_ANON_PRIVATE) | BIT(MMF_DUMP_ANON_SHARED) | \
BIT(MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF BIT(MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF 0
#endif
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when mm is available for khugepaged */
#define MMF_HUGE_ZERO_FOLIO 18 /* mm has ever used the global huge zero folio */
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_DISABLE_THP_EXCEPT_ADVISED 23 /* no THP except when advised (e.g., VM_HUGEPAGE) */
#define MMF_DISABLE_THP_COMPLETELY 24 /* no THP for all VMAs */
#define MMF_DISABLE_THP_MASK (BIT(MMF_DISABLE_THP_COMPLETELY) | \
BIT(MMF_DISABLE_THP_EXCEPT_ADVISED))
#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
/*
* MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
* replaced in the future by mm.pinned_vm when it becomes stable, or grow into
* a counter on its own. We're aggresive on this bit for now: even if the
* pinned pages were unpinned later on, we'll still keep this bit set for the
* lifecycle of this mm, just for simplicity.
*/
#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
#define MMF_HAS_MDWE 28
#define MMF_HAS_MDWE_MASK BIT(MMF_HAS_MDWE)
#define MMF_HAS_MDWE_NO_INHERIT 29
#define MMF_VM_MERGE_ANY 30
#define MMF_VM_MERGE_ANY_MASK BIT(MMF_VM_MERGE_ANY)
#define MMF_TOPDOWN 31 /* mm searches top down by default */
#define MMF_TOPDOWN_MASK BIT(MMF_TOPDOWN)
#define MMF_INIT_LEGACY_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK)
/* Legacy flags must fit within 32 bits. */
static_assert((u64)MMF_INIT_LEGACY_MASK <= (u64)UINT_MAX);
/*
* Initialise legacy flags according to masks, propagating selected flags on
* fork. Further flag manipulation can be performed by the caller.
*/
static inline unsigned long mmf_init_legacy_flags(unsigned long flags)
{
if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
flags &= ~((1UL << MMF_HAS_MDWE) |
(1UL << MMF_HAS_MDWE_NO_INHERIT));
return flags & MMF_INIT_LEGACY_MASK;
}
#endif /* _LINUX_MM_TYPES_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/srcu.h>
#include <linux/interval_tree.h>
struct mmu_notifier_subscriptions;
struct mmu_notifier;
struct mmu_notifier_range;
struct mmu_interval_notifier;
/**
* enum mmu_notifier_event - reason for the mmu notifier callback
* @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
* move the range
*
* @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
* madvise() or replacing a page by another one, ...).
*
* @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
* ie using the vma access permission (vm_page_prot) to update the whole range
* is enough no need to inspect changes to the CPU page table (mprotect()
* syscall)
*
* @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
* pages in the range so to mirror those changes the user must inspect the CPU
* page table (from the end callback).
*
* @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
* access flags). User should soft dirty the page in the end callback to make
* sure that anyone relying on soft dirtiness catch pages that might be written
* through non CPU mappings.
*
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
* that the mm refcount is zero and the range is no longer accessible.
*
* @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
* a device driver to possibly ignore the invalidation if the
* owner field matches the driver's device private pgmap owner.
*
* @MMU_NOTIFY_EXCLUSIVE: conversion of a page table entry to device-exclusive.
* The owner is initialized to the value provided by the caller of
* make_device_exclusive(), such that this caller can filter out these
* events.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
MMU_NOTIFY_CLEAR,
MMU_NOTIFY_PROTECTION_VMA,
MMU_NOTIFY_PROTECTION_PAGE,
MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
MMU_NOTIFY_MIGRATE,
MMU_NOTIFY_EXCLUSIVE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
* being destroyed by exit_mmap, always before all pages are
* freed. This can run concurrently with other mmu notifier
* methods (the ones invoked outside the mm context) and it
* should tear down all secondary mmu mappings and freeze the
* secondary mmu. If this method isn't implemented you've to
* be sure that nothing could possibly write to the pages
* through the secondary mmu by the time the last thread with
* tsk->mm == mm exits.
*
* As side note: the pages freed after ->release returns could
* be immediately reallocated by the gart at an alias physical
* address with a different cache model, so if ->release isn't
* implemented because all _software_ driven memory accesses
* through the secondary mmu are terminated by the time the
* last thread of this mm quits, you've also to be sure that
* speculative _hardware_ operations can't allocate dirty
* cachelines in the cpu that could not be snooped and made
* coherent with the other read and write operations happening
* through the gart alias address, so leading to memory
* corruption.
*/
void (*release)(struct mmu_notifier *subscription,
struct mm_struct *mm);
/*
* clear_flush_young is called after the VM is
* test-and-clearing the young/accessed bitflag in the
* pte. This way the VM will provide proper aging to the
* accesses to the page through the secondary MMUs and not
* only to the ones through the Linux pte.
* Start-end is necessary in case the secondary MMU is mapping the page
* at a smaller granularity than the primary MMU.
*/
int (*clear_flush_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
/*
* clear_young is a lightweight version of clear_flush_young. Like the
* latter, it is supposed to test-and-clear the young/accessed bitflag
* in the secondary pte, but it may omit flushing the secondary tlb.
*/
int (*clear_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
/*
* test_young is called to check the young/accessed bitflag in
* the secondary pte. This is used to know if the page is
* frequently used without actually clearing the flag or tearing
* down the secondary mapping on the page.
*/
int (*test_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address);
/*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
* can't guarantee that no additional references are taken to
* the pages in the range, it has to implement the
* invalidate_range() notifier to remove any references taken
* after invalidate_range_start().
*
* Invalidation of multiple concurrent ranges may be
* optionally permitted by the driver. Either way the
* establishment of sptes is forbidden in the range passed to
* invalidate_range_begin/end for the whole duration of the
* invalidate_range_begin/end critical section.
*
* invalidate_range_start() is called when all pages in the
* range are still mapped and have at least a refcount of one.
*
* invalidate_range_end() is called when all pages in the
* range have been unmapped and the pages have been freed by
* the VM.
*
* The VM will remove the page table entries and potentially
* the page between invalidate_range_start() and
* invalidate_range_end(). If the page must not be freed
* because of pending I/O or other circumstances then the
* invalidate_range_start() callback (or the initial mapping
* by the driver) must make sure that the refcount is kept
* elevated.
*
* If the driver increases the refcount when the pages are
* initially mapped into an address space then either
* invalidate_range_start() or invalidate_range_end() may
* decrease the refcount. If the refcount is decreased on
* invalidate_range_start() then the VM can free pages as page
* table entries are removed. If the refcount is only
* dropped on invalidate_range_end() then the driver itself
* will drop the last refcount but it must take care to flush
* any secondary tlb before doing the final free on the
* page. Pages will no longer be referenced by the linux
* address space but may still be referenced by sptes until
* the last refcount is dropped.
*
* If blockable argument is set to false then the callback cannot
* sleep and has to return with -EAGAIN if sleeping would be required.
* 0 should be returned otherwise. Please note that notifiers that can
* fail invalidate_range_start are not allowed to implement
* invalidate_range_end, as there is no mechanism for informing the
* notifier that its start failed.
*/
int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
void (*invalidate_range_end)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
/*
* arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
* which shares page-tables with the CPU. The
* invalidate_range_start()/end() callbacks should not be implemented as
* invalidate_secondary_tlbs() already catches the points in time when
* an external TLB needs to be flushed.
*
* This requires arch_invalidate_secondary_tlbs() to be called while
* holding the ptl spin-lock and therefore this callback is not allowed
* to sleep.
*
* This is called by architecture code whenever invalidating a TLB
* entry. It is assumed that any secondary TLB has the same rules for
* when invalidations are required. If this is not the case architecture
* code will need to call this explicitly when required for secondary
* TLB invalidation.
*/
void (*arch_invalidate_secondary_tlbs)(
struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
* lifetime of the mmu_notifier memory. alloc_notifier() returns a new
* notifier for use with the mm.
*
* free_notifier() is only called after the mmu_notifier has been
* fully put, calls to any ops callback are prevented and no ops
* callbacks are currently running. It is called from a SRCU callback
* and cannot sleep.
*/
struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
void (*free_notifier)(struct mmu_notifier *subscription);
};
/*
* The notifier chains are protected by mmap_lock and/or the reverse map
* semaphores. Notifier chains are only changed when all reverse maps and
* the mmap_lock locks are taken.
*
* Therefore notifier chains can only be traversed when either
*
* 1. mmap_lock is held.
* 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
struct mmu_notifier {
struct hlist_node hlist;
const struct mmu_notifier_ops *ops;
struct mm_struct *mm;
struct rcu_head rcu;
unsigned int users;
};
/**
* struct mmu_interval_notifier_ops
* @invalidate: Upon return the caller must stop using any SPTEs within this
* range. This function can sleep. Return false only if sleeping
* was required but mmu_notifier_range_blockable(range) is false.
*/
struct mmu_interval_notifier_ops {
bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
};
struct mmu_interval_notifier {
struct interval_tree_node interval_tree;
const struct mmu_interval_notifier_ops *ops;
struct mm_struct *mm;
struct hlist_node deferred_item;
unsigned long invalidate_seq;
};
#ifdef CONFIG_MMU_NOTIFIER
#ifdef CONFIG_LOCKDEP
extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
struct mmu_notifier_range {
struct mm_struct *mm;
unsigned long start;
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
void *owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return unlikely(mm->notifier_subscriptions);
}
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
struct mm_struct *mm);
static inline struct mmu_notifier *
mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
{
struct mmu_notifier *ret;
mmap_write_lock(mm);
ret = mmu_notifier_get_locked(ops, mm);
mmap_write_unlock(mm);
return ret;
}
void mmu_notifier_put(struct mmu_notifier *subscription);
void mmu_notifier_synchronize(void);
extern int mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
extern int __mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
struct mm_struct *mm);
unsigned long
mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
struct mm_struct *mm, unsigned long start,
unsigned long length,
const struct mmu_interval_notifier_ops *ops);
int mmu_interval_notifier_insert_locked(
struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
unsigned long start, unsigned long length,
const struct mmu_interval_notifier_ops *ops);
void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
/**
* mmu_interval_set_seq - Save the invalidation sequence
* @interval_sub - The subscription passed to invalidate
* @cur_seq - The cur_seq passed to the invalidate() callback
*
* This must be called unconditionally from the invalidate callback of a
* struct mmu_interval_notifier_ops under the same lock that is used to call
* mmu_interval_read_retry(). It updates the sequence number for later use by
* mmu_interval_read_retry(). The provided cur_seq will always be odd.
*
* If the caller does not call mmu_interval_read_begin() or
* mmu_interval_read_retry() then this call is not required.
*/
static inline void
mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
unsigned long cur_seq)
{
WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
}
/**
* mmu_interval_read_retry - End a read side critical section against a VA range
* interval_sub: The subscription
* seq: The return of the paired mmu_interval_read_begin()
*
* This MUST be called under a user provided lock that is also held
* unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
*
* Each call should be paired with a single mmu_interval_read_begin() and
* should be used to conclude the read side.
*
* Returns true if an invalidation collided with this critical section, and
* the caller should retry.
*/
static inline bool
mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
unsigned long seq)
{
return interval_sub->invalidate_seq != seq;
}
/**
* mmu_interval_check_retry - Test if a collision has occurred
* interval_sub: The subscription
* seq: The return of the matching mmu_interval_read_begin()
*
* This can be used in the critical section between mmu_interval_read_begin()
* and mmu_interval_read_retry(). A return of true indicates an invalidation
* has collided with this critical region and a future
* mmu_interval_read_retry() will return true.
*
* False is not reliable and only suggests a collision may not have
* occurred. It can be called many times and does not have to hold the user
* provided lock.
*
* This call can be used as part of loops and other expensive operations to
* expedite a retry.
*/
static inline bool
mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
unsigned long seq)
{
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
return READ_ONCE(interval_sub->invalidate_seq) != seq;
}
extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end);
extern int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
unsigned long end);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern bool
mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
{
return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
}
static inline void mmu_notifier_release(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
__mmu_notifier_release(mm);
}
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
if (mm_has_notifiers(mm))
return __mmu_notifier_clear_flush_young(mm, start, end);
return 0;
}
static inline int mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
if (mm_has_notifiers(mm))
return __mmu_notifier_clear_young(mm, start, end);
return 0;
}
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
if (mm_has_notifiers(mm))
return __mmu_notifier_test_young(mm, address);
return 0;
}
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
might_sleep();
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
if (mm_has_notifiers(range->mm)) {
range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
__mmu_notifier_invalidate_range_start(range);
}
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
/*
* This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
* can return an error if a notifier can't proceed without blocking, in which
* case you're not allowed to modify PTEs in the specified range.
*
* This is mainly intended for OOM handling.
*/
static inline int __must_check
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
int ret = 0;
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
if (mm_has_notifiers(range->mm)) {
range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
ret = __mmu_notifier_invalidate_range_start(range);
}
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
return ret;
}
static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
if (mmu_notifier_range_blockable(range))
might_sleep(); if (mm_has_notifiers(range->mm)) __mmu_notifier_invalidate_range_end(range);
}
static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
__mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
mm->notifier_subscriptions = NULL;
}
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
__mmu_notifier_subscriptions_destroy(mm);
}
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
enum mmu_notifier_event event,
unsigned flags,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
range->event = event;
range->mm = mm;
range->start = start;
range->end = end;
range->flags = flags;
}
static inline void mmu_notifier_range_init_owner(
struct mmu_notifier_range *range,
enum mmu_notifier_event event, unsigned int flags,
struct mm_struct *mm, unsigned long start,
unsigned long end, void *owner)
{
mmu_notifier_range_init(range, event, flags, mm, start, end);
range->owner = owner;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
__young = ptep_clear_flush_young(___vma, ___address, __ptep); \
__young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
___address, \
___address + \
PAGE_SIZE); \
__young; \
})
#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
({ \
int __young; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
__young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
__young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
___address, \
___address + \
PMD_SIZE); \
__young; \
})
#define ptep_clear_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
___address + PAGE_SIZE); \
__young; \
})
#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
({ \
int __young; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
___address + PMD_SIZE); \
__young; \
})
#else /* CONFIG_MMU_NOTIFIER */
struct mmu_notifier_range {
unsigned long start;
unsigned long end;
};
static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
unsigned long start,
unsigned long end)
{
range->start = start;
range->end = end;
}
#define mmu_notifier_range_init(range,event,flags,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
end, owner) \
_mmu_notifier_range_init(range, start, end)
static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
{
return true;
}
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return 0;
}
static inline void mmu_notifier_release(struct mm_struct *mm)
{
}
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
return 0;
}
static inline int mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
return 0;
}
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
return 0;
}
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
}
static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
return 0;
}
static inline
void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}
static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
}
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
}
#define mmu_notifier_range_update_to_read_only(r) false
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
#define pmdp_clear_young_notify pmdp_test_and_clear_young
static inline void mmu_notifier_synchronize(void)
{
}
#endif /* CONFIG_MMU_NOTIFIER */
#endif /* _LINUX_MMU_NOTIFIER_H */
/*
* cgroup_freezer.c - control group freezer subsystem
*
* Copyright IBM Corporation, 2007
*
* Author : Cedric Le Goater <clg@fr.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/cgroup.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
/*
* A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
* set if "FROZEN" is written to freezer.state cgroupfs file, and cleared
* for "THAWED". FREEZING_PARENT is set if the parent freezer is FREEZING
* for whatever reason. IOW, a cgroup has FREEZING_PARENT set if one of
* its ancestors has FREEZING_SELF set.
*/
enum freezer_state_flags {
CGROUP_FREEZER_ONLINE = (1 << 0), /* freezer is fully online */
CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */
CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */
CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */
/* mask for all FREEZING flags */
CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT,
};
struct freezer {
struct cgroup_subsys_state css;
unsigned int state;
};
static DEFINE_MUTEX(freezer_mutex);
static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct freezer, css) : NULL;
}
static inline struct freezer *task_freezer(struct task_struct *task)
{
return css_freezer(task_css(task, freezer_cgrp_id));
}
static struct freezer *parent_freezer(struct freezer *freezer)
{
return css_freezer(freezer->css.parent);
}
bool cgroup_freezing(struct task_struct *task)
{
bool ret;
rcu_read_lock();
ret = task_freezer(task)->state & CGROUP_FREEZING;
rcu_read_unlock();
return ret;
}
static const char *freezer_state_strs(unsigned int state)
{
if (state & CGROUP_FROZEN)
return "FROZEN";
if (state & CGROUP_FREEZING)
return "FREEZING";
return "THAWED";
};
static struct cgroup_subsys_state *
freezer_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct freezer *freezer;
freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
if (!freezer)
return ERR_PTR(-ENOMEM);
return &freezer->css;
}
/**
* freezer_css_online - commit creation of a freezer css
* @css: css being created
*
* We're committing to creation of @css. Mark it online and inherit
* parent's freezing state while holding cpus read lock and freezer_mutex.
*/
static int freezer_css_online(struct cgroup_subsys_state *css)
{
struct freezer *freezer = css_freezer(css);
struct freezer *parent = parent_freezer(freezer);
cpus_read_lock();
mutex_lock(&freezer_mutex);
freezer->state |= CGROUP_FREEZER_ONLINE;
if (parent && (parent->state & CGROUP_FREEZING)) {
freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN;
static_branch_inc_cpuslocked(&freezer_active);
}
mutex_unlock(&freezer_mutex);
cpus_read_unlock();
return 0;
}
/**
* freezer_css_offline - initiate destruction of a freezer css
* @css: css being destroyed
*
* @css is going away. Mark it dead and decrement freezer_active if
* it was holding one.
*/
static void freezer_css_offline(struct cgroup_subsys_state *css)
{
struct freezer *freezer = css_freezer(css);
cpus_read_lock();
mutex_lock(&freezer_mutex);
if (freezer->state & CGROUP_FREEZING)
static_branch_dec_cpuslocked(&freezer_active);
freezer->state = 0;
mutex_unlock(&freezer_mutex);
cpus_read_unlock();
}
static void freezer_css_free(struct cgroup_subsys_state *css)
{
kfree(css_freezer(css));
}
/*
* Tasks can be migrated into a different freezer anytime regardless of its
* current state. freezer_attach() is responsible for making new tasks
* conform to the current state.
*
* Freezer state changes and task migration are synchronized via
* @freezer->lock. freezer_attach() makes the new tasks conform to the
* current state and all following state changes can see the new tasks.
*/
static void freezer_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *new_css;
mutex_lock(&freezer_mutex);
/*
* Make the new tasks conform to the current state of @new_css.
* For simplicity, when migrating any task to a FROZEN cgroup, we
* revert it to FREEZING and let update_if_frozen() determine the
* correct state later.
*
* Tasks in @tset are on @new_css but may not conform to its
* current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/
cgroup_taskset_for_each(task, new_css, tset) {
struct freezer *freezer = css_freezer(new_css);
if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task);
} else {
/* clear FROZEN and propagate upwards */
while (freezer && (freezer->state & CGROUP_FROZEN)) {
freezer->state &= ~CGROUP_FROZEN;
freezer = parent_freezer(freezer);
}
freeze_task(task);
}
}
mutex_unlock(&freezer_mutex);
}
/**
* freezer_fork - cgroup post fork callback
* @task: a task which has just been forked
*
* @task has just been created and should conform to the current state of
* the cgroup_freezer it belongs to. This function may race against
* freezer_attach(). Losing to freezer_attach() means that we don't have
* to do anything as freezer_attach() will put @task into the appropriate
* state.
*/
static void freezer_fork(struct task_struct *task)
{
struct freezer *freezer;
/*
* The root cgroup is non-freezable, so we can skip locking the
* freezer. This is safe regardless of race with task migration.
* If we didn't race or won, skipping is obviously the right thing
* to do. If we lost and root is the new cgroup, noop is still the
* right thing to do.
*/
if (task_css_is_root(task, freezer_cgrp_id))
return;
mutex_lock(&freezer_mutex);
rcu_read_lock(); freezer = task_freezer(task);
if (freezer->state & CGROUP_FREEZING)
freeze_task(task); rcu_read_unlock(); mutex_unlock(&freezer_mutex);}
/**
* update_if_frozen - update whether a cgroup finished freezing
* @css: css of interest
*
* Once FREEZING is initiated, transition to FROZEN is lazily updated by
* calling this function. If the current state is FREEZING but not FROZEN,
* this function checks whether all tasks of this cgroup and the descendant
* cgroups finished freezing and, if so, sets FROZEN.
*
* The caller is responsible for grabbing RCU read lock and calling
* update_if_frozen() on all descendants prior to invoking this function.
*
* Task states and freezer state might disagree while tasks are being
* migrated into or out of @css, so we can't verify task states against
* @freezer state here. See freezer_attach() for details.
*/
static void update_if_frozen(struct cgroup_subsys_state *css)
{
struct freezer *freezer = css_freezer(css);
struct cgroup_subsys_state *pos;
struct css_task_iter it;
struct task_struct *task;
lockdep_assert_held(&freezer_mutex);
if (!(freezer->state & CGROUP_FREEZING) ||
(freezer->state & CGROUP_FROZEN))
return;
/* are all (live) children frozen? */
rcu_read_lock();
css_for_each_child(pos, css) {
struct freezer *child = css_freezer(pos);
if ((child->state & CGROUP_FREEZER_ONLINE) &&
!(child->state & CGROUP_FROZEN)) {
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
/* are all tasks frozen? */
css_task_iter_start(css, 0, &it);
while ((task = css_task_iter_next(&it))) {
if (freezing(task) && !frozen(task))
goto out_iter_end;
}
freezer->state |= CGROUP_FROZEN;
out_iter_end:
css_task_iter_end(&it);
}
static int freezer_read(struct seq_file *m, void *v)
{
struct cgroup_subsys_state *css = seq_css(m), *pos;
mutex_lock(&freezer_mutex);
rcu_read_lock();
/* update states bottom-up */
css_for_each_descendant_post(pos, css) {
if (!css_tryget_online(pos))
continue;
rcu_read_unlock();
update_if_frozen(pos);
rcu_read_lock();
css_put(pos);
}
rcu_read_unlock();
mutex_unlock(&freezer_mutex);
seq_puts(m, freezer_state_strs(css_freezer(css)->state));
seq_putc(m, '\n');
return 0;
}
static void freeze_cgroup(struct freezer *freezer)
{
struct css_task_iter it;
struct task_struct *task;
css_task_iter_start(&freezer->css, 0, &it);
while ((task = css_task_iter_next(&it)))
freeze_task(task);
css_task_iter_end(&it);
}
static void unfreeze_cgroup(struct freezer *freezer)
{
struct css_task_iter it;
struct task_struct *task;
css_task_iter_start(&freezer->css, 0, &it);
while ((task = css_task_iter_next(&it)))
__thaw_task(task);
css_task_iter_end(&it);
}
/**
* freezer_apply_state - apply state change to a single cgroup_freezer
* @freezer: freezer to apply state change to
* @freeze: whether to freeze or unfreeze
* @state: CGROUP_FREEZING_* flag to set or clear
*
* Set or clear @state on @cgroup according to @freeze, and perform
* freezing or thawing as necessary.
*/
static void freezer_apply_state(struct freezer *freezer, bool freeze,
unsigned int state)
{
/* also synchronizes against task migration, see freezer_attach() */
lockdep_assert_held(&freezer_mutex);
if (!(freezer->state & CGROUP_FREEZER_ONLINE))
return;
if (freeze) {
if (!(freezer->state & CGROUP_FREEZING))
static_branch_inc_cpuslocked(&freezer_active);
freezer->state |= state;
freeze_cgroup(freezer);
} else {
bool was_freezing = freezer->state & CGROUP_FREEZING;
freezer->state &= ~state;
if (!(freezer->state & CGROUP_FREEZING)) {
freezer->state &= ~CGROUP_FROZEN;
if (was_freezing)
static_branch_dec_cpuslocked(&freezer_active);
unfreeze_cgroup(freezer);
}
}
}
/**
* freezer_change_state - change the freezing state of a cgroup_freezer
* @freezer: freezer of interest
* @freeze: whether to freeze or thaw
*
* Freeze or thaw @freezer according to @freeze. The operations are
* recursive - all descendants of @freezer will be affected.
*/
static void freezer_change_state(struct freezer *freezer, bool freeze)
{
struct cgroup_subsys_state *pos;
cpus_read_lock();
/*
* Update all its descendants in pre-order traversal. Each
* descendant will try to inherit its parent's FREEZING state as
* CGROUP_FREEZING_PARENT.
*/
mutex_lock(&freezer_mutex);
rcu_read_lock();
css_for_each_descendant_pre(pos, &freezer->css) {
struct freezer *pos_f = css_freezer(pos);
struct freezer *parent = parent_freezer(pos_f);
if (!css_tryget_online(pos))
continue;
rcu_read_unlock();
if (pos_f == freezer)
freezer_apply_state(pos_f, freeze,
CGROUP_FREEZING_SELF);
else
freezer_apply_state(pos_f,
parent->state & CGROUP_FREEZING,
CGROUP_FREEZING_PARENT);
rcu_read_lock();
css_put(pos);
}
rcu_read_unlock();
mutex_unlock(&freezer_mutex);
cpus_read_unlock();
}
static ssize_t freezer_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
bool freeze;
buf = strstrip(buf);
if (strcmp(buf, freezer_state_strs(0)) == 0)
freeze = false;
else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0) {
pr_info_once("Freezing with imperfect legacy cgroup freezer. "
"See cgroup.freeze of cgroup v2\n");
freeze = true;
} else
return -EINVAL;
freezer_change_state(css_freezer(of_css(of)), freeze);
return nbytes;
}
static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_SELF);
}
static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
}
static struct cftype files[] = {
{
.name = "state",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = freezer_read,
.write = freezer_write,
},
{
.name = "self_freezing",
.flags = CFTYPE_NOT_ON_ROOT,
.read_u64 = freezer_self_freezing_read,
},
{
.name = "parent_freezing",
.flags = CFTYPE_NOT_ON_ROOT,
.read_u64 = freezer_parent_freezing_read,
},
{ } /* terminate */
};
struct cgroup_subsys freezer_cgrp_subsys = {
.css_alloc = freezer_css_alloc,
.css_online = freezer_css_online,
.css_offline = freezer_css_offline,
.css_free = freezer_css_free,
.attach = freezer_attach,
.fork = freezer_fork,
.legacy_cftypes = files,
};
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Variant of atomic_t specialized for reference counts.
*
* The interface matches the atomic_t interface (to aid in porting) but only
* provides the few functions one should use for reference counting.
*
* Saturation semantics
* ====================
*
* refcount_t differs from atomic_t in that the counter saturates at
* REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
* counter and causing 'spurious' use-after-free issues. In order to avoid the
* cost associated with introducing cmpxchg() loops into all of the saturating
* operations, we temporarily allow the counter to take on an unchecked value
* and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
* or overflow has occurred. Although this is racy when multiple threads
* access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
* equidistant from 0 and INT_MAX we minimise the scope for error:
*
* INT_MAX REFCOUNT_SATURATED UINT_MAX
* 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
* +--------------------------------+----------------+----------------+
* <---------- bad value! ---------->
*
* (in a signed view of the world, the "bad value" range corresponds to
* a negative counter value).
*
* As an example, consider a refcount_inc() operation that causes the counter
* to overflow:
*
* int old = atomic_fetch_add_relaxed(r);
* // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
* if (old < 0)
* atomic_set(r, REFCOUNT_SATURATED);
*
* If another thread also performs a refcount_inc() operation between the two
* atomic operations, then the count will continue to edge closer to 0. If it
* reaches a value of 1 before /any/ of the threads reset it to the saturated
* value, then a concurrent refcount_dec_and_test() may erroneously free the
* underlying object.
* Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
* 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
* With the current PID limit, if no batched refcounting operations are used and
* the attacker can't repeatedly trigger kernel oopses in the middle of refcount
* operations, this makes it impossible for a saturated refcount to leave the
* saturation range, even if it is possible for multiple uses of the same
* refcount to nest in the context of a single task:
*
* (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
* 0x40000000 / 0x400000 = 0x100 = 256
*
* If hundreds of references are added/removed with a single refcounting
* operation, it may potentially be possible to leave the saturation range; but
* given the precise timing details involved with the round-robin scheduling of
* each thread manipulating the refcount and the need to hit the race multiple
* times in succession, there doesn't appear to be a practical avenue of attack
* even if using refcount_add() operations with larger increments.
*
* Memory ordering
* ===============
*
* Memory ordering rules are slightly relaxed wrt regular atomic_t functions
* and provide only what is strictly required for refcounts.
*
* The increments are fully relaxed; these will not provide ordering. The
* rationale is that whatever is used to obtain the object we're increasing the
* reference count on will provide the ordering. For locked data structures,
* its the lock acquire, for RCU/lockless data structures its the dependent
* load.
*
* Do note that inc_not_zero() provides a control dependency which will order
* future stores against the inc, this ensures we'll never modify the object
* if we did not in fact acquire a reference.
*
* The decrements will provide release order, such that all the prior loads and
* stores will be issued before, it also provides a control dependency, which
* will order us against the subsequent free().
*
* The control dependency is against the load of the cmpxchg (ll/sc) that
* succeeded. This means the stores aren't fully ordered, but this is fine
* because the 1->0 transition indicates no concurrency.
*
* Note that the allocator is responsible for ordering things between free()
* and alloc().
*
* The decrements dec_and_test() and sub_and_test() also provide acquire
* ordering on success.
*
* refcount_{add|inc}_not_zero_acquire() and refcount_set_release() provide
* acquire and release ordering for cases when the memory occupied by the
* object might be reused to store another object. This is important for the
* cases where secondary validation is required to detect such reuse, e.g.
* SLAB_TYPESAFE_BY_RCU. The secondary validation checks have to happen after
* the refcount is taken, hence acquire order is necessary. Similarly, when the
* object is initialized, all stores to its attributes should be visible before
* the refcount is set, otherwise a stale attribute value might be used by
* another task which succeeds in taking a refcount to the new object.
*/
#ifndef _LINUX_REFCOUNT_H
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/limits.h>
#include <linux/refcount_types.h>
#include <linux/spinlock_types.h>
struct mutex;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
#define REFCOUNT_MAX INT_MAX
#define REFCOUNT_SATURATED (INT_MIN / 2)
enum refcount_saturation_type {
REFCOUNT_ADD_NOT_ZERO_OVF,
REFCOUNT_ADD_OVF,
REFCOUNT_ADD_UAF,
REFCOUNT_SUB_UAF,
REFCOUNT_DEC_LEAK,
};
void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
static inline void refcount_set(refcount_t *r, int n)
{
atomic_set(&r->refs, n);}
/**
* refcount_set_release - set a refcount's value with release ordering
* @r: the refcount
* @n: value to which the refcount will be set
*
* This function should be used when memory occupied by the object might be
* reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
*
* Provides release memory ordering which will order previous memory operations
* against this store. This ensures all updates to this object are visible
* once the refcount is set and stale values from the object previously
* occupying this memory are overwritten with new ones.
*
* This function should be called only after new object is fully initialized.
* After this call the object should be considered visible to other tasks even
* if it was not yet added into an object collection normally used to discover
* it. This is because other tasks might have discovered the object previously
* occupying the same memory and after memory reuse they can succeed in taking
* refcount to the new object and start using it.
*/
static inline void refcount_set_release(refcount_t *r, int n)
{
atomic_set_release(&r->refs, n);
}
/**
* refcount_read - get a refcount's value
* @r: the refcount
*
* Return: the refcount's value
*/
static inline unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
}
static inline __must_check __signed_wrap
bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
{
int old = refcount_read(r);
do {
if (!old)
break;
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); if (oldp)
*oldp = old;
if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); return old;
}
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
* Will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*
* Return: false if the passed refcount is 0, true otherwise
*/
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
{
return __refcount_add_not_zero(i, r, NULL);
}
static inline __must_check __signed_wrap
bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp,
int limit)
{
int old = refcount_read(r);
do {
if (!old)
break;
if (i > limit - old) { if (oldp)
*oldp = old;
return false;
}
} while (!atomic_try_cmpxchg_acquire(&r->refs, &old, old + i)); if (oldp) *oldp = old; if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); return old;
}
static inline __must_check bool
__refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit)
{
return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit);
}
static inline __must_check __signed_wrap
bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp)
{
return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX);
}
/**
* refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0
*
* @i: the value to add to the refcount
* @r: the refcount
*
* Will saturate at REFCOUNT_SATURATED and WARN.
*
* This function should be used when memory occupied by the object might be
* reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
*
* Provides acquire memory ordering on success, it is assumed the caller has
* guaranteed the object memory to be stable (RCU, etc.). It does provide a
* control dependency and thereby orders future stores. See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc_not_zero_acquire() should instead be used to increment a
* reference count.
*
* Return: false if the passed refcount is 0, true otherwise
*/
static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t *r)
{
return __refcount_add_not_zero_acquire(i, r, NULL);
}
static inline __signed_wrap
void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);
if (oldp)
*oldp = old;
if (unlikely(!old))
refcount_warn_saturate(r, REFCOUNT_ADD_UAF); else if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
/**
* refcount_add - add a value to a refcount
* @i: the value to add to the refcount
* @r: the refcount
*
* Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*/
static inline void refcount_add(int i, refcount_t *r)
{
__refcount_add(i, r, NULL);
}
static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
{
return __refcount_add_not_zero(1, r, oldp);
}
/**
* refcount_inc_not_zero - increment a refcount unless it is 0
* @r: the refcount to increment
*
* Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
* and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Return: true if the increment was successful, false otherwise
*/
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
return __refcount_inc_not_zero(r, NULL);
}
static inline __must_check bool __refcount_inc_not_zero_acquire(refcount_t *r, int *oldp)
{
return __refcount_add_not_zero_acquire(1, r, oldp);
}
/**
* refcount_inc_not_zero_acquire - increment a refcount with acquire ordering unless it is 0
* @r: the refcount to increment
*
* Similar to refcount_inc_not_zero(), but provides acquire memory ordering on
* success.
*
* This function should be used when memory occupied by the object might be
* reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
*
* Provides acquire memory ordering on success, it is assumed the caller has
* guaranteed the object memory to be stable (RCU, etc.). It does provide a
* control dependency and thereby orders future stores. See the comment on top.
*
* Return: true if the increment was successful, false otherwise
*/
static inline __must_check bool refcount_inc_not_zero_acquire(refcount_t *r)
{
return __refcount_inc_not_zero_acquire(r, NULL);
}
static inline void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
}
/**
* refcount_inc - increment a refcount
* @r: the refcount to increment
*
* Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller already has a
* reference on the object.
*
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
static inline void refcount_inc(refcount_t *r)
{
__refcount_inc(r, NULL);}
static inline __must_check __signed_wrap
bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(i, &r->refs);
if (oldp)
*oldp = old;
if (old > 0 && old == i) {
smp_acquire__after_ctrl_dep();
return true;
}
if (unlikely(old <= 0 || old - i < 0)) refcount_warn_saturate(r, REFCOUNT_SUB_UAF); return false;
}
/**
* refcount_sub_and_test - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
* Similar to atomic_dec_and_test(), but it will WARN, return false and
* ultimately leak on underflow and will fail to decrement when saturated
* at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_dec(), or one of its variants, should instead be used to
* decrement a reference count.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{
return __refcount_sub_and_test(i, r, NULL);
}
static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
{
return __refcount_sub_and_test(1, r, oldp);
}
/**
* refcount_dec_and_test - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
* decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return __refcount_dec_and_test(r, NULL);
}
static inline void __refcount_dec(refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(1, &r->refs);
if (oldp)
*oldp = old;
if (unlikely(old <= 1)) refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
/**
* refcount_dec - decrement a refcount
* @r: the refcount
*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
* when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
static inline void refcount_dec(refcount_t *r)
{
__refcount_dec(r, NULL);
}
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags) __cond_acquires(lock);
#endif /* _LINUX_REFCOUNT_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_FIND_H_
#define __LINUX_FIND_H_
#ifndef __LINUX_BITMAP_H
#error only <linux/bitmap.h> can be included directly
#endif
#include <linux/bitops.h>
unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
unsigned long start);
unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start);
unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start);
unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start);
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
unsigned long start);
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n);
unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n);
unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
const unsigned long *addr3, unsigned long size,
unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
unsigned long _find_first_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size);
unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2,
const unsigned long *addr3, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
#ifdef __BIG_ENDIAN
unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
long size, unsigned long offset);
unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
long size, unsigned long offset);
#endif
unsigned long find_random_bit(const unsigned long *addr, unsigned long size);
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr & GENMASK(size - 1, offset); return val ? __ffs(val) : size;
}
return _find_next_bit(addr, size, offset);
}
#endif
#ifndef find_next_and_bit
/**
* find_next_and_bit - find the next set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr1 & *addr2 & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_and_bit(addr1, addr2, size, offset);
}
#endif
#ifndef find_next_andnot_bit
/**
* find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
* in *addr2
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_next_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_andnot_bit(addr1, addr2, size, offset);
}
#endif
#ifndef find_next_or_bit
/**
* find_next_or_bit - find the next set bit in either memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_next_or_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = (*addr1 | *addr2) & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_or_bit(addr1, addr2, size, offset);
}
#endif
#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
* @addr: The address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
static __always_inline
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val;
if (unlikely(offset >= size))
return size;
val = *addr | ~GENMASK(size - 1, offset); return val == ~0UL ? size : ffz(val);
}
return _find_next_zero_bit(addr, size, offset);
}
#endif
#ifndef find_first_bit
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr & GENMASK(size - 1, 0); return val ? __ffs(val) : size;
}
return _find_first_bit(addr, size);
}
#endif
/**
* find_nth_bit - find N'th set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
* @n: The number of set bit, which position is needed, counting from 0
*
* The following is semantically equivalent:
* idx = find_nth_bit(addr, size, 0);
* idx = find_first_bit(addr, size);
*
* Returns the bit number of the N'th set bit.
* If no such, returns >= @size.
*/
static __always_inline
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
{
if (n >= size)
return size;
if (small_const_nbits(size)) {
unsigned long val = *addr & GENMASK(size - 1, 0);
return val ? fns(val, n) : size;
}
return __find_nth_bit(addr, size, n);
}
/**
* find_nth_and_bit - find N'th set bit in 2 memory regions
* @addr1: The 1st address to start the search at
* @addr2: The 2nd address to start the search at
* @size: The maximum number of bits to search
* @n: The number of set bit, which position is needed, counting from 0
*
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
static __always_inline
unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
if (n >= size)
return size;
if (small_const_nbits(size)) {
unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
return val ? fns(val, n) : size;
}
return __find_nth_and_bit(addr1, addr2, size, n);
}
/**
* find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
* excluding those set in 3rd region
* @addr1: The 1st address to start the search at
* @addr2: The 2nd address to start the search at
* @addr3: The 3rd address to start the search at
* @size: The maximum number of bits to search
* @n: The number of set bit, which position is needed, counting from 0
*
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
static __always_inline
unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2,
const unsigned long *addr3,
unsigned long size, unsigned long n)
{
if (n >= size)
return size;
if (small_const_nbits(size)) {
unsigned long val = *addr1 & *addr2 & (~*addr3) & GENMASK(size - 1, 0);
return val ? fns(val, n) : size;
}
return __find_nth_and_andnot_bit(addr1, addr2, addr3, size, n);
}
#ifndef find_first_and_bit
/**
* find_first_and_bit - find the first set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_and_bit(addr1, addr2, size);
}
#endif
/**
* find_first_andnot_bit - find the first bit set in 1st memory region and unset in 2nd
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
*
* Returns the bit number for the first set bit
* If no bits are set, returns >= @size.
*/
static __always_inline
unsigned long find_first_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_andnot_bit(addr1, addr2, size);
}
/**
* find_first_and_and_bit - find the first set bit in 3 memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @addr3: The third address to base the search on
* @size: The bitmap size in bits
*
* Returns the bit number for the first set bit
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_first_and_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
const unsigned long *addr3,
unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr1 & *addr2 & *addr3 & GENMASK(size - 1, 0);
return val ? __ffs(val) : size;
}
return _find_first_and_and_bit(addr1, addr2, addr3, size);
}
#ifndef find_first_zero_bit
/**
* find_first_zero_bit - find the first cleared bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
static __always_inline
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr | ~GENMASK(size - 1, 0);
return val == ~0UL ? size : ffz(val);
}
return _find_first_zero_bit(addr, size);
}
#endif
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
* @size: The number of bits to search
*
* Returns the bit number of the last set bit, or size.
*/
static __always_inline
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = *addr & GENMASK(size - 1, 0);
return val ? __fls(val) : size;
}
return _find_last_bit(addr, size);
}
#endif
/**
* find_next_and_bit_wrap - find the next set bit in both memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size, unsigned long offset)
{
unsigned long bit = find_next_and_bit(addr1, addr2, size, offset); if (bit < size || offset == 0)
return bit;
bit = find_first_and_bit(addr1, addr2, offset); return bit < offset ? bit : size;
}
/**
* find_next_bit_wrap - find the next set bit in a memory region
* @addr: The address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
static __always_inline
unsigned long find_next_bit_wrap(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
unsigned long bit = find_next_bit(addr, size, offset);
if (bit < size || offset == 0)
return bit;
bit = find_first_bit(addr, offset);
return bit < offset ? bit : size;
}
/*
* Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
* before using it alone.
*/
static __always_inline
unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
unsigned long start, unsigned long n)
{
unsigned long bit;
/* If not wrapped around */
if (n > start) {
/* and have a bit, just return it. */
bit = find_next_bit(bitmap, size, n);
if (bit < size)
return bit;
/* Otherwise, wrap around and ... */
n = 0;
}
/* Search the other part. */
bit = find_next_bit(bitmap, start, n);
return bit < start ? bit : size;
}
/**
* find_next_clump8 - find next 8-bit clump with set bits in a memory region
* @clump: location to store copy of found clump
* @addr: address to base the search on
* @size: bitmap size in number of bits
* @offset: bit offset at which to start searching
*
* Returns the bit offset for the next set clump; the found clump value is
* copied to the location pointed by @clump. If no bits are set, returns @size.
*/
extern unsigned long find_next_clump8(unsigned long *clump,
const unsigned long *addr,
unsigned long size, unsigned long offset);
#define find_first_clump8(clump, bits, size) \
find_next_clump8((clump), (bits), (size), 0)
#if defined(__LITTLE_ENDIAN)
static __always_inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
static __always_inline
unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
static __always_inline
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
return find_first_zero_bit(addr, size);
}
#elif defined(__BIG_ENDIAN)
#ifndef find_next_zero_bit_le
static __always_inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val = *(const unsigned long *)addr;
if (unlikely(offset >= size))
return size;
val = swab(val) | ~GENMASK(size - 1, offset);
return val == ~0UL ? size : ffz(val);
}
return _find_next_zero_bit_le(addr, size, offset);
}
#endif
#ifndef find_first_zero_bit_le
static __always_inline
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
if (small_const_nbits(size)) {
unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
return val == ~0UL ? size : ffz(val);
}
return _find_first_zero_bit_le(addr, size);
}
#endif
#ifndef find_next_bit_le
static __always_inline
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
if (small_const_nbits(size)) {
unsigned long val = *(const unsigned long *)addr;
if (unlikely(offset >= size))
return size;
val = swab(val) & GENMASK(size - 1, offset);
return val ? __ffs(val) : size;
}
return _find_next_bit_le(addr, size, offset);
}
#endif
#else
#error "Please fix <asm/byteorder.h>"
#endif
#define for_each_set_bit(bit, addr, size) \
for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
#define for_each_and_bit(bit, addr1, addr2, size) \
for ((bit) = 0; \
(bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
(bit)++)
#define for_each_andnot_bit(bit, addr1, addr2, size) \
for ((bit) = 0; \
(bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
(bit)++)
#define for_each_or_bit(bit, addr1, addr2, size) \
for ((bit) = 0; \
(bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
(bit)++)
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
#define for_each_clear_bit(bit, addr, size) \
for ((bit) = 0; \
(bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
(bit)++)
/* same as for_each_clear_bit() but use bit as value to start with */
#define for_each_clear_bit_from(bit, addr, size) \
for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
/**
* for_each_set_bitrange - iterate over all set bit ranges [b; e)
* @b: bit offset of start of current bitrange (first set bit)
* @e: bit offset of end of current bitrange (first unset bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_set_bitrange(b, e, addr, size) \
for ((b) = 0; \
(b) = find_next_bit((addr), (size), b), \
(e) = find_next_zero_bit((addr), (size), (b) + 1), \
(b) < (size); \
(b) = (e) + 1)
/**
* for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
* @b: bit offset of start of current bitrange (first set bit); must be initialized
* @e: bit offset of end of current bitrange (first unset bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_set_bitrange_from(b, e, addr, size) \
for (; \
(b) = find_next_bit((addr), (size), (b)), \
(e) = find_next_zero_bit((addr), (size), (b) + 1), \
(b) < (size); \
(b) = (e) + 1)
/**
* for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
* @b: bit offset of start of current bitrange (first unset bit)
* @e: bit offset of end of current bitrange (first set bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_clear_bitrange(b, e, addr, size) \
for ((b) = 0; \
(b) = find_next_zero_bit((addr), (size), (b)), \
(e) = find_next_bit((addr), (size), (b) + 1), \
(b) < (size); \
(b) = (e) + 1)
/**
* for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
* @b: bit offset of start of current bitrange (first set bit); must be initialized
* @e: bit offset of end of current bitrange (first unset bit)
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_clear_bitrange_from(b, e, addr, size) \
for (; \
(b) = find_next_zero_bit((addr), (size), (b)), \
(e) = find_next_bit((addr), (size), (b) + 1), \
(b) < (size); \
(b) = (e) + 1)
/**
* for_each_set_bit_wrap - iterate over all set bits starting from @start, and
* wrapping around the end of bitmap.
* @bit: offset for current iteration
* @addr: bitmap address to base the search on
* @size: bitmap size in number of bits
* @start: Starting bit for bitmap traversing, wrapping around the bitmap end
*/
#define for_each_set_bit_wrap(bit, addr, size, start) \
for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
(bit) < (size); \
(bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
/**
* for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
* @start: bit offset to start search and to store the current iteration offset
* @clump: location to store copy of current 8-bit clump
* @bits: bitmap address to base the search on
* @size: bitmap size in number of bits
*/
#define for_each_set_clump8(start, clump, bits, size) \
for ((start) = find_first_clump8(&(clump), (bits), (size)); \
(start) < (size); \
(start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
#endif /*__LINUX_FIND_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
struct mnt_namespace;
struct uts_namespace;
struct ipc_namespace;
struct pid_namespace;
struct cgroup_namespace;
struct fs_struct;
/*
* A structure to contain pointers to all per-process
* namespaces - fs (mount), uts, network, sysvipc, etc.
*
* The pid namespace is an exception -- it's accessed using
* task_active_pid_ns. The pid namespace here is the
* namespace that children will use.
*
* 'count' is the number of tasks holding a reference.
* The count for each namespace, then, will be the number
* of nsproxies pointing to it, not the number of tasks.
*
* The nsproxy is shared by tasks which share all namespaces.
* As soon as a single namespace is cloned or unshared, the
* nsproxy is copied.
*/
struct nsproxy {
refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
struct time_namespace *time_ns;
struct time_namespace *time_ns_for_children;
struct cgroup_namespace *cgroup_ns;
};
extern struct nsproxy init_nsproxy;
/*
* A structure to encompass all bits needed to install
* a partial or complete new set of namespaces.
*
* If a new user namespace is requested cred will
* point to a modifiable set of credentials. If a pointer
* to a modifiable set is needed nsset_cred() must be
* used and tested.
*/
struct nsset {
unsigned flags;
struct nsproxy *nsproxy;
struct fs_struct *fs;
const struct cred *cred;
};
static inline struct cred *nsset_cred(struct nsset *set)
{
if (set->flags & CLONE_NEWUSER)
return (struct cred *)set->cred;
return NULL;
}
/*
* the namespaces access rules are:
*
* 1. only current task is allowed to change tsk->nsproxy pointer or
* any pointer on the nsproxy itself. Current must hold the task_lock
* when changing tsk->nsproxy.
*
* 2. when accessing (i.e. reading) current task's namespaces - no
* precautions should be taken - just dereference the pointers
*
* 3. the access to other task namespaces is performed like this
* task_lock(task);
* nsproxy = task->nsproxy;
* if (nsproxy != NULL) {
* / *
* * work with the namespaces here
* * e.g. get the reference on one of them
* * /
* } / *
* * NULL task->nsproxy means that this task is
* * almost dead (zombie)
* * /
* task_unlock(task);
*
*/
int copy_namespaces(u64 flags, struct task_struct *tsk);
void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
int exec_task_namespaces(void);
void free_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct cred *, struct fs_struct *);
int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
{
if (refcount_dec_and_test(&ns->count))
free_nsproxy(ns);
}
static inline void get_nsproxy(struct nsproxy *ns)
{
refcount_inc(&ns->count);
}
DEFINE_FREE(put_nsproxy, struct nsproxy *, if (_T) put_nsproxy(_T))
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Percpu refcounts:
* (C) 2012 Google, Inc.
* Author: Kent Overstreet <koverstreet@google.com>
*
* This implements a refcount with similar semantics to atomic_t - atomic_inc(),
* atomic_dec_and_test() - but percpu.
*
* There's one important difference between percpu refs and normal atomic_t
* refcounts; you have to keep track of your initial refcount, and then when you
* start shutting down you call percpu_ref_kill() _before_ dropping the initial
* refcount.
*
* The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
* than an atomic_t - this is because of the way shutdown works, see
* percpu_ref_kill()/PERCPU_COUNT_BIAS.
*
* Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
* refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
* puts the ref back in single atomic_t mode, collecting the per cpu refs and
* issuing the appropriate barriers, and then marks the ref as shutting down so
* that percpu_ref_put() will check for the ref hitting 0. After it returns,
* it's safe to drop the initial ref.
*
* USAGE:
*
* See fs/aio.c for some example usage; it's used there for struct kioctx, which
* is created when userspaces calls io_setup(), and destroyed when userspace
* calls io_destroy() or the process exits.
*
* In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
* removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
* After that, there can't be any new users of the kioctx (from lookup_ioctx())
* and it's then safe to drop the initial ref with percpu_ref_put().
*
* Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
* to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
* imply RCU grace periods of any kind and if a user wants to combine percpu_ref
* with RCU protection, it must be done explicitly.
*
* Code that does a two stage shutdown like this often needs some kind of
* explicit synchronization to ensure the initial refcount can only be dropped
* once - percpu_ref_kill() does this for you, it returns true once and false if
* someone else already called it. The aio code uses it this way, but it's not
* necessary if the code has some other mechanism to synchronize teardown.
* around.
*/
#ifndef _LINUX_PERCPU_REFCOUNT_H
#define _LINUX_PERCPU_REFCOUNT_H
#include <linux/atomic.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/types.h>
#include <linux/gfp.h>
struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);
/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
enum {
__PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
__PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
__PERCPU_REF_FLAG_BITS = 2,
};
/* @flags for percpu_ref_init() */
enum {
/*
* Start w/ ref == 1 in atomic mode. Can be switched to percpu
* operation using percpu_ref_switch_to_percpu(). If initialized
* with this flag, the ref will stay in atomic mode until
* percpu_ref_switch_to_percpu() is invoked on it.
* Implies ALLOW_REINIT.
*/
PERCPU_REF_INIT_ATOMIC = 1 << 0,
/*
* Start dead w/ ref == 0 in atomic mode. Must be revived with
* percpu_ref_reinit() before used. Implies INIT_ATOMIC and
* ALLOW_REINIT.
*/
PERCPU_REF_INIT_DEAD = 1 << 1,
/*
* Allow switching from atomic mode to percpu mode.
*/
PERCPU_REF_ALLOW_REINIT = 1 << 2,
};
struct percpu_ref_data {
atomic_long_t count;
percpu_ref_func_t *release;
percpu_ref_func_t *confirm_switch;
bool force_atomic:1;
bool allow_reinit:1;
struct rcu_head rcu;
struct percpu_ref *ref;
};
struct percpu_ref {
/*
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
*/
unsigned long percpu_count_ptr;
/*
* 'percpu_ref' is often embedded into user structure, and only
* 'percpu_count_ptr' is required in fast path, move other fields
* into 'percpu_ref_data', so we can reduce memory footprint in
* fast path.
*/
struct percpu_ref_data *data;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
percpu_ref_func_t *release, unsigned int flags,
gfp_t gfp);
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch);
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
bool percpu_ref_is_zero(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
* @ref: percpu_ref to kill
*
* Must be used to drop the initial ref on a percpu refcount; must be called
* precisely once before shutdown.
*
* Switches @ref into atomic mode before gathering up the percpu counters
* and dropping the initial ref.
*
* There are no implied RCU grace periods between kill and release.
*/
static inline void percpu_ref_kill(struct percpu_ref *ref)
{
percpu_ref_kill_and_confirm(ref, NULL);
}
/*
* Internal helper. Don't use outside percpu-refcount proper. The
* function doesn't return the pointer and let the caller test it for NULL
* because doing so forces the compiler to generate two conditional
* branches as it can't assume that @ref->percpu_count is not NULL.
*/
static inline bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long __percpu **percpu_countp)
{
unsigned long percpu_ptr;
/*
* The value of @ref->percpu_count_ptr is tested for
* !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
* used as a pointer. If the compiler generates a separate fetch
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
*
* The dependency ordering from the READ_ONCE() pairs
* with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be
* visible without ATOMIC if we race with percpu_ref_kill(). DEAD
* implies ATOMIC anyway. Test them together.
*/
if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
return false;
*percpu_countp = (unsigned long __percpu *)percpu_ptr;
return true;
}
/**
* percpu_ref_get_many - increment a percpu refcount
* @ref: percpu_ref to get
* @nr: number of references to get
*
* Analogous to atomic_long_add().
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
rcu_read_lock(); if (__ref_is_percpu(ref, &percpu_count)) this_cpu_add(*percpu_count, nr);
else
atomic_long_add(nr, &ref->data->count); rcu_read_unlock();
}
/**
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
* Analogous to atomic_long_inc().
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline void percpu_ref_get(struct percpu_ref *ref)
{
percpu_ref_get_many(ref, 1);}
/**
* percpu_ref_tryget_many - try to increment a percpu refcount
* @ref: percpu_ref to try-get
* @nr: number of references to get
*
* Increment a percpu refcount by @nr unless its count already reached zero.
* Returns %true on success; %false on failure.
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
unsigned long nr)
{
unsigned long __percpu *percpu_count;
bool ret;
rcu_read_lock(); if (__ref_is_percpu(ref, &percpu_count)) { this_cpu_add(*percpu_count, nr);
ret = true;
} else {
ret = atomic_long_add_unless(&ref->data->count, nr, 0);
}
rcu_read_unlock();
return ret;
}
/**
* percpu_ref_tryget - try to increment a percpu refcount
* @ref: percpu_ref to try-get
*
* Increment a percpu refcount unless its count already reached zero.
* Returns %true on success; %false on failure.
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
return percpu_ref_tryget_many(ref, 1);
}
/**
* percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
* caller is responsible for taking RCU.
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
bool ret = false;
WARN_ON_ONCE(!rcu_read_lock_held());
if (likely(__ref_is_percpu(ref, &percpu_count))) {
this_cpu_inc(*percpu_count);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->data->count);
}
return ret;
}
/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
*
* Increment a percpu refcount unless it has already been killed. Returns
* %true on success; %false on failure.
*
* Completion of percpu_ref_kill() in itself doesn't guarantee that this
* function will fail. For such guarantee, percpu_ref_kill_and_confirm()
* should be used. After the confirm_kill callback is invoked, it's
* guaranteed that no new reference will be given out by
* percpu_ref_tryget_live().
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
bool ret = false;
rcu_read_lock();
ret = percpu_ref_tryget_live_rcu(ref);
rcu_read_unlock();
return ret;
}
/**
* percpu_ref_put_many - decrement a percpu refcount
* @ref: percpu_ref to put
* @nr: number of references to put
*
* Decrement the refcount, and if 0, call the release function (which was passed
* to percpu_ref_init())
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_sub(*percpu_count, nr);
else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
ref->data->release(ref);
rcu_read_unlock();
}
/**
* percpu_ref_put - decrement a percpu refcount
* @ref: percpu_ref to put
*
* Decrement the refcount, and if 0, call the release function (which was passed
* to percpu_ref_init())
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline void percpu_ref_put(struct percpu_ref *ref)
{
percpu_ref_put_many(ref, 1);
}
/**
* percpu_ref_is_dying - test whether a percpu refcount is dying or dead
* @ref: percpu_ref to test
*
* Returns %true if @ref is dying or dead.
*
* This function is safe to call as long as @ref is between init and exit
* and the caller is responsible for synchronizing against state changes.
*/
static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
{
return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
}
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
*/
#include <linux/sched/mm.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/rculist.h>
#include <trace/events/rpm.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
{
return *(pm_callback_t *)(start + offset);
}
static pm_callback_t __rpm_get_driver_callback(struct device *dev,
size_t cb_offset)
{
if (dev->driver && dev->driver->pm)
return get_callback_ptr(dev->driver->pm, cb_offset);
return NULL;
}
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
const struct dev_pm_ops *ops;
pm_callback_t cb = NULL;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
else if (dev->type && dev->type->pm)
ops = dev->type->pm;
else if (dev->class && dev->class->pm)
ops = dev->class->pm;
else if (dev->bus && dev->bus->pm)
ops = dev->bus->pm;
else
ops = NULL;
if (ops)
cb = get_callback_ptr(ops, cb_offset);
if (!cb)
cb = __rpm_get_driver_callback(dev, cb_offset);
return cb;
}
#define RPM_GET_CALLBACK(dev, callback) \
__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
/**
* update_pm_runtime_accounting - Update the time accounting of power states
* @dev: Device to update the accounting for
*
* In order to be able to have time accounting of the various power states
* (as used by programs such as PowerTOP to show the effectiveness of runtime
* PM), we need to track the time spent in each state.
* update_pm_runtime_accounting must be called each time before the
* runtime_status field is updated, to account the time in the old state
* correctly.
*/
static void update_pm_runtime_accounting(struct device *dev)
{
u64 now, last, delta;
if (dev->power.disable_depth > 0)
return;
last = dev->power.accounting_timestamp;
now = ktime_get_mono_fast_ns();
dev->power.accounting_timestamp = now;
/*
* Because ktime_get_mono_fast_ns() is not monotonic during
* timekeeping updates, ensure that 'now' is after the last saved
* timesptamp.
*/
if (now < last)
return;
delta = now - last;
if (dev->power.runtime_status == RPM_SUSPENDED)
dev->power.suspended_time += delta;
else
dev->power.active_time += delta;
}
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}
static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
{
u64 time;
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags);
update_pm_runtime_accounting(dev);
time = suspended ? dev->power.suspended_time : dev->power.active_time;
spin_unlock_irqrestore(&dev->power.lock, flags);
return time;
}
u64 pm_runtime_active_time(struct device *dev)
{
return rpm_get_accounted_time(dev, false);
}
u64 pm_runtime_suspended_time(struct device *dev)
{
return rpm_get_accounted_time(dev, true);
}
EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
*/
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
hrtimer_try_to_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
/**
* pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
* @dev: Device to handle.
*/
static void pm_runtime_cancel_pending(struct device *dev)
{
pm_runtime_deactivate_timer(dev);
/*
* In case there's a request pending, make sure its work function will
* return without doing anything.
*/
dev->power.request = RPM_REQ_NONE;
}
/*
* pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
* @dev: Device to handle.
*
* Compute the autosuspend-delay expiration time based on the device's
* power.last_busy time. If the delay has already expired or is disabled
* (negative) or the power.use_autosuspend flag isn't set, return 0.
* Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
*
* This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time.
*/
u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
u64 expires;
if (!dev->power.use_autosuspend)
return 0;
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
return 0;
expires = READ_ONCE(dev->power.last_busy);
expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
if (expires > ktime_get_mono_fast_ns())
return expires; /* Expires in the future */
return 0;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
static int dev_memalloc_noio(struct device *dev, void *data)
{
return dev->power.memalloc_noio;
}
/*
* pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
* @dev: Device to handle.
* @enable: True for setting the flag and False for clearing the flag.
*
* Set the flag for all devices in the path from the device to the
* root device in the device tree if @enable is true, otherwise clear
* the flag for devices in the path whose siblings don't set the flag.
*
* The function should only be called by block device, or network
* device driver for solving the deadlock problem during runtime
* resume/suspend:
*
* If memory allocation with GFP_KERNEL is called inside runtime
* resume/suspend callback of any one of its ancestors(or the
* block device itself), the deadlock may be triggered inside the
* memory allocation since it might not complete until the block
* device becomes active and the involed page I/O finishes. The
* situation is pointed out first by Alan Stern. Network device
* are involved in iSCSI kind of situation.
*
* The lock of dev_hotplug_mutex is held in the function for handling
* hotplug race because pm_runtime_set_memalloc_noio() may be called
* in async probe().
*
* The function should be called between device_add() and device_del()
* on the affected device(block/network device).
*/
void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
{
static DEFINE_MUTEX(dev_hotplug_mutex);
mutex_lock(&dev_hotplug_mutex);
for (;;) {
bool enabled;
/* hold power lock since bitfield is not SMP-safe. */
spin_lock_irq(&dev->power.lock);
enabled = dev->power.memalloc_noio;
dev->power.memalloc_noio = enable;
spin_unlock_irq(&dev->power.lock);
/*
* not need to enable ancestors any more if the device
* has been enabled.
*/
if (enabled && enable)
break;
dev = dev->parent;
/*
* clear flag of the parent device only if all the
* children don't set the flag because ancestor's
* flag was set by any one of the descendants.
*/
if (!dev || (!enable &&
device_for_each_child(dev, NULL, dev_memalloc_noio)))
break;
}
mutex_unlock(&dev_hotplug_mutex);}
EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
/**
* rpm_check_suspend_allowed - Test whether a device may be suspended.
* @dev: Device to test.
*/
static int rpm_check_suspend_allowed(struct device *dev)
{
int retval = 0;
if (dev->power.runtime_error)
retval = -EINVAL;
else if (dev->power.disable_depth > 0)
retval = -EACCES;
else if (atomic_read(&dev->power.usage_count))
retval = -EAGAIN;
else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */
else if ((dev->power.deferred_resume &&
dev->power.runtime_status == RPM_SUSPENDING) ||
(dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
else if (__dev_pm_qos_resume_latency(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
return retval;
}
static int rpm_get_suppliers(struct device *dev)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held()) {
int retval;
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
/* Ignore suppliers with disabled runtime PM. */
if (retval < 0 && retval != -EACCES) {
pm_runtime_put_noidle(link->supplier);
return retval;
}
refcount_inc(&link->rpm_active);
}
return 0;
}
/**
* pm_runtime_release_supplier - Drop references to device link's supplier.
* @link: Target device link.
*
* Drop all runtime PM references associated with @link to its supplier device.
*/
void pm_runtime_release_supplier(struct device_link *link)
{
struct device *supplier = link->supplier;
/*
* The additional power.usage_count check is a safety net in case
* the rpm_active refcount becomes saturated, in which case
* refcount_dec_not_one() would return true forever, but it is not
* strictly necessary.
*/
while (refcount_dec_not_one(&link->rpm_active) &&
atomic_read(&supplier->power.usage_count) > 0)
pm_runtime_put_noidle(supplier);
}
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held()) {
pm_runtime_release_supplier(link);
if (try_to_suspend)
pm_request_idle(link->supplier);
}
}
static void rpm_put_suppliers(struct device *dev)
{
__rpm_put_suppliers(dev, true);
}
static void rpm_suspend_suppliers(struct device *dev)
{
struct device_link *link;
int idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
pm_request_idle(link->supplier);
device_links_read_unlock(idx);
}
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
* @dev: Device to run the callback for.
*/
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval = 0, idx;
bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
} else {
spin_unlock_irq(&dev->power.lock);
/*
* Resume suppliers if necessary.
*
* The device's runtime PM status cannot change until this
* routine returns, so it is safe to read the status outside of
* the lock.
*/
if (use_links && dev->power.runtime_status == RPM_RESUMING) {
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
if (retval) {
rpm_put_suppliers(dev);
goto fail;
}
device_links_read_unlock(idx);
}
}
if (cb)
retval = cb(dev);
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
} else {
/*
* If the device is suspending and the callback has returned
* success, drop the usage counters of the suppliers that have
* been reference counted on its resume.
*
* Do that if resume fails too.
*/
if (use_links &&
((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
(dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
__rpm_put_suppliers(dev, false);
fail:
device_links_read_unlock(idx);
}
spin_lock_irq(&dev->power.lock);
}
return retval;
}
/**
* rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
* @dev: Device to run the callback for.
*/
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
{
int retval;
if (dev->power.memalloc_noio) {
unsigned int noio_flag;
/*
* Deadlock might be caused if memory allocation with
* GFP_KERNEL happens inside runtime_suspend and
* runtime_resume callbacks of one block device's
* ancestor or the block device itself. Network
* device might be thought as part of iSCSI block
* device, so network device and its ancestor should
* be marked as memalloc_noio too.
*/
noio_flag = memalloc_noio_save();
retval = __rpm_callback(cb, dev);
memalloc_noio_restore(noio_flag);
} else {
retval = __rpm_callback(cb, dev);
}
/*
* Since -EACCES means that runtime PM is disabled for the given device,
* it should not be returned by runtime PM callbacks. If it is returned
* nevertheless, assume it to be a transient error and convert it to
* -EAGAIN.
*/
if (retval == -EACCES)
retval = -EAGAIN;
if (retval != -EAGAIN && retval != -EBUSY)
dev->power.runtime_error = retval;
return retval;
}
/**
* rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
*
* Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
* run the ->runtime_idle() callback directly. If the ->runtime_idle callback
* doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static int rpm_idle(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
int retval;
trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
else if ((rpmflags & RPM_GET_PUT) && retval == 1)
; /* put() is allowed in RPM_SUSPENDED */
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
/*
* Any pending request other than an idle notification takes
* precedence over us, except that the timer may be running.
*/
else if (dev->power.request_pending &&
dev->power.request > RPM_REQ_IDLE)
retval = -EAGAIN;
/* Act as though RPM_NOWAIT is always set. */
else if (dev->power.idle_notification)
retval = -EINPROGRESS;
if (retval)
goto out;
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
callback = RPM_GET_CALLBACK(dev, runtime_idle);
/* If no callback assume success. */
if (!callback || dev->power.no_callbacks)
goto out;
/* Carry out an asynchronous or a synchronous idle notification. */
if (rpmflags & RPM_ASYNC) {
dev->power.request = RPM_REQ_IDLE;
if (!dev->power.request_pending) {
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
}
dev->power.idle_notification = true;
if (dev->power.irq_safe)
spin_unlock(&dev->power.lock);
else
spin_unlock_irq(&dev->power.lock);
retval = callback(dev);
if (dev->power.irq_safe)
spin_lock(&dev->power.lock);
else
spin_lock_irq(&dev->power.lock);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
* Check if the device's runtime PM status allows it to be suspended.
* Cancel a pending idle notification, autosuspend or suspend. If
* another suspend has been started earlier, either return immediately
* or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
* flags. If the RPM_ASYNC flag is set then queue a suspend request;
* otherwise run the ->runtime_suspend() callback directly. When
* ->runtime_suspend succeeded, if a deferred resume was requested while
* the callback was running then carry it out, otherwise send an idle
* notification for its parent (if the suspend succeeded and both
* ignore_children of parent->power and irq_safe of dev->power are not set).
* If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
* flag is set and the next autosuspend-delay expiration time is in the
* future, schedule another autosuspend attempt.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static int rpm_suspend(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int (*callback)(struct device *);
struct device *parent = NULL;
int retval;
trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
goto out; /* Conditions are wrong. */
/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
retval = -EAGAIN;
if (retval)
goto out;
/* If the autosuspend_delay time hasn't expired yet, reschedule. */
if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
u64 expires = pm_runtime_autosuspend_expiration(dev);
if (expires != 0) {
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
/*
* Optimization: If the timer is already running and is
* set to expire at or before the autosuspend delay,
* avoid the overhead of resetting it. Just let it
* expire; pm_suspend_timer_fn() will take care of the
* rest.
*/
if (!(dev->power.timer_expires &&
dev->power.timer_expires <= expires)) {
/*
* We add a slack of 25% to gather wakeups
* without sacrificing the granularity.
*/
u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
(NSEC_PER_MSEC >> 2);
dev->power.timer_expires = expires;
hrtimer_start_range_ns(&dev->power.suspend_timer,
ns_to_ktime(expires),
slack,
HRTIMER_MODE_ABS);
}
dev->power.timer_autosuspends = 1;
goto out;
}
}
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
if (dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
retval = -EINPROGRESS;
goto out;
}
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
cpu_relax();
spin_lock(&dev->power.lock);
goto repeat;
}
/* Wait for the other suspend running in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_SUSPENDING)
break;
spin_unlock_irq(&dev->power.lock);
schedule();
spin_lock_irq(&dev->power.lock);
}
finish_wait(&dev->power.wait_queue, &wait);
goto repeat;
}
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
/* Carry out an asynchronous or a synchronous suspend. */
if (rpmflags & RPM_ASYNC) {
dev->power.request = (rpmflags & RPM_AUTO) ?
RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
if (!dev->power.request_pending) {
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
goto out;
}
__update_runtime_status(dev, RPM_SUSPENDING);
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
retval = rpm_callback(callback, dev);
if (retval)
goto fail;
dev_pm_enable_wake_irq_complete(dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
if (dev->parent) {
parent = dev->parent;
atomic_add_unless(&parent->power.child_count, -1, 0);
}
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
dev->power.deferred_resume = false;
rpm_resume(dev, 0);
retval = -EAGAIN;
goto out;
}
if (dev->power.irq_safe)
goto out;
/* Maybe the parent is now able to suspend. */
if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
rpm_idle(parent, RPM_ASYNC);
spin_unlock(&parent->power.lock);
spin_lock(&dev->power.lock);
}
/* Maybe the suppliers are now able to suspend. */
if (dev->power.links_count > 0) {
spin_unlock_irq(&dev->power.lock);
rpm_suspend_suppliers(dev);
spin_lock_irq(&dev->power.lock);
}
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
fail:
dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
/*
* On transient errors, if the callback routine failed an autosuspend,
* and if the last_busy time has been updated so that there is a new
* autosuspend expiration time, automatically reschedule another
* autosuspend.
*/
if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
pm_runtime_autosuspend_expiration(dev) != 0)
goto repeat;
pm_runtime_cancel_pending(dev);
goto out;
}
/**
* rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
* Check if the device's runtime PM status allows it to be resumed. Cancel
* any scheduled or pending requests. If another resume has been started
* earlier, either return immediately or wait for it to finish, depending on the
* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
* parallel with this function, either tell the other process to resume after
* suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
* flag is set then queue a resume request; otherwise run the
* ->runtime_resume() callback directly. Queue an idle notification for the
* device if the resume succeeded.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static int rpm_resume(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int (*callback)(struct device *);
struct device *parent = NULL;
int retval = 0;
trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error) {
retval = -EINVAL;
} else if (dev->power.disable_depth > 0) {
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
else if (rpmflags & RPM_TRANSPARENT)
goto out;
else
retval = -EACCES;
}
if (retval)
goto out;
/*
* Other scheduled or pending requests need to be canceled. Small
* optimization: If an autosuspend timer is running, leave it running
* rather than cancelling it now only to restart it again in the near
* future.
*/
dev->power.request = RPM_REQ_NONE;
if (!dev->power.timer_autosuspends)
pm_runtime_deactivate_timer(dev);
if (dev->power.runtime_status == RPM_ACTIVE) {
retval = 1;
goto out;
}
if (dev->power.runtime_status == RPM_RESUMING ||
dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true;
if (rpmflags & RPM_NOWAIT)
retval = -EINPROGRESS;
} else {
retval = -EINPROGRESS;
}
goto out;
}
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
cpu_relax();
spin_lock(&dev->power.lock);
goto repeat;
}
/* Wait for the operation carried out in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_RESUMING &&
dev->power.runtime_status != RPM_SUSPENDING)
break;
spin_unlock_irq(&dev->power.lock);
schedule();
spin_lock_irq(&dev->power.lock);
}
finish_wait(&dev->power.wait_queue, &wait);
goto repeat;
}
/*
* See if we can skip waking up the parent. This is safe only if
* power.no_callbacks is set, because otherwise we don't know whether
* the resume will actually succeed.
*/
if (dev->power.no_callbacks && !parent && dev->parent) {
spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
if (dev->parent->power.disable_depth > 0 ||
dev->parent->power.ignore_children ||
dev->parent->power.runtime_status == RPM_ACTIVE) {
atomic_inc(&dev->parent->power.child_count);
spin_unlock(&dev->parent->power.lock);
retval = 1;
goto no_callback; /* Assume success. */
}
spin_unlock(&dev->parent->power.lock);
}
/* Carry out an asynchronous or a synchronous resume. */
if (rpmflags & RPM_ASYNC) {
dev->power.request = RPM_REQ_RESUME;
if (!dev->power.request_pending) {
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
retval = 0;
goto out;
}
if (!parent && dev->parent) {
/*
* Increment the parent's usage counter and resume it if
* necessary. Not needed if dev is irq-safe; then the
* parent is permanently resumed.
*/
parent = dev->parent;
if (dev->power.irq_safe)
goto skip_parent;
spin_unlock(&dev->power.lock);
pm_runtime_get_noresume(parent);
spin_lock(&parent->power.lock);
/*
* Resume the parent if it has runtime PM enabled and not been
* set to ignore its children.
*/
if (!parent->power.disable_depth &&
!parent->power.ignore_children) {
rpm_resume(parent, 0);
if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY;
}
spin_unlock(&parent->power.lock);
spin_lock(&dev->power.lock);
if (retval)
goto out;
goto repeat;
}
skip_parent:
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
__update_runtime_status(dev, RPM_RESUMING);
callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
dev_pm_enable_wake_irq_check(dev, false);
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
pm_runtime_mark_last_busy(dev);
if (parent)
atomic_inc(&parent->power.child_count);
}
wake_up_all(&dev->power.wait_queue);
if (retval >= 0)
rpm_idle(dev, RPM_ASYNC);
out:
if (parent && !dev->power.irq_safe) {
spin_unlock_irq(&dev->power.lock);
pm_runtime_put(parent);
spin_lock_irq(&dev->power.lock);
}
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
/**
* pm_runtime_work - Universal runtime PM work function.
* @work: Work structure used for scheduling the execution of this function.
*
* Use @work to get the device object the work is to be done for, determine what
* is to be done and execute the appropriate runtime PM function.
*/
static void pm_runtime_work(struct work_struct *work)
{
struct device *dev = container_of(work, struct device, power.work);
enum rpm_request req;
spin_lock_irq(&dev->power.lock);
if (!dev->power.request_pending)
goto out;
req = dev->power.request;
dev->power.request = RPM_REQ_NONE;
dev->power.request_pending = false;
switch (req) {
case RPM_REQ_NONE:
break;
case RPM_REQ_IDLE:
rpm_idle(dev, RPM_NOWAIT);
break;
case RPM_REQ_SUSPEND:
rpm_suspend(dev, RPM_NOWAIT);
break;
case RPM_REQ_AUTOSUSPEND:
rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
break;
case RPM_REQ_RESUME:
rpm_resume(dev, RPM_NOWAIT);
break;
}
out:
spin_unlock_irq(&dev->power.lock);
}
/**
* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
* @timer: hrtimer used by pm_schedule_suspend().
*
* Check if the time is right and queue a suspend request.
*/
static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
{
struct device *dev = container_of(timer, struct device, power.suspend_timer);
unsigned long flags;
u64 expires;
spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires;
/*
* If 'expires' is after the current time, we've been called
* too early.
*/
if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
}
spin_unlock_irqrestore(&dev->power.lock, flags);
return HRTIMER_NORESTART;
}
/**
* pm_schedule_suspend - Set up a timer to submit a suspend request in future.
* @dev: Device to suspend.
* @delay: Time to wait before submitting a suspend request, in milliseconds.
*/
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
u64 expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
if (!delay) {
retval = rpm_suspend(dev, RPM_ASYNC);
goto out;
}
retval = rpm_check_suspend_allowed(dev);
if (retval)
goto out;
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
dev->power.timer_expires = expires;
dev->power.timer_autosuspends = 0;
hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
static int rpm_drop_usage_count(struct device *dev)
{
int ret;
ret = atomic_sub_return(1, &dev->power.usage_count);
if (ret >= 0)
return ret;
/*
* Because rpm_resume() does not check the usage counter, it will resume
* the device even if the usage counter is 0 or negative, so it is
* sufficient to increment the usage counter here to reverse the change
* made above.
*/
atomic_inc(&dev->power.usage_count);
dev_warn(dev, "Runtime PM usage count underflow!\n");
return -EINVAL;
}
/**
* __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
* return immediately if it is larger than zero (if it becomes negative, log a
* warning, increment it, and return an error). Then carry out an idle
* notification, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_idle(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
if (rpmflags & RPM_GET_PUT) {
retval = rpm_drop_usage_count(dev);
if (retval < 0) {
return retval;
} else if (retval > 0) {
trace_rpm_usage(dev, rpmflags);
return 0;
}
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
* __pm_runtime_suspend - Entry point for runtime put/suspend operations.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
* return immediately if it is larger than zero (if it becomes negative, log a
* warning, increment it, and return an error). Then carry out a suspend,
* either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
if (rpmflags & RPM_GET_PUT) {
retval = rpm_drop_usage_count(dev);
if (retval < 0) {
return retval;
} else if (retval > 0) {
trace_rpm_usage(dev, rpmflags);
return 0;
}
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/**
* __pm_runtime_resume - Entry point for runtime resume operations.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, increment the device's usage count. Then
* carry out a resume, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_resume(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_resume(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
* pm_runtime_get_conditional - Conditionally bump up device usage counter.
* @dev: Device to handle.
* @ign_usage_count: Whether or not to look at the current usage counter value.
*
* Return -EINVAL if runtime PM is disabled for @dev.
*
* Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
* is set, or (2) @dev is not ignoring children and its active child count is
* nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
* the usage counter of @dev and return 1.
*
* Otherwise, return 0 without changing the usage counter.
*
* If @ign_usage_count is %true, this function can be used to prevent suspending
* the device when its runtime PM status is %RPM_ACTIVE.
*
* If @ign_usage_count is %false, this function can be used to prevent
* suspending the device when both its runtime PM status is %RPM_ACTIVE and its
* runtime PM usage counter is not zero.
*
* The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
if (dev->power.disable_depth > 0) {
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
} else if (ign_usage_count || (!dev->power.ignore_children &&
atomic_read(&dev->power.child_count) > 0)) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
trace_rpm_usage(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
/**
* pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
* in active state
* @dev: Target device.
*
* Increment the runtime PM usage counter of @dev if its runtime PM status is
* %RPM_ACTIVE, in which case it returns 1. If the device is in a different
* state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
* device, in which case also the usage_count will remain unmodified.
*/
int pm_runtime_get_if_active(struct device *dev)
{
return pm_runtime_get_conditional(dev, true);
}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
* pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
* @dev: Target device.
*
* Increment the runtime PM usage counter of @dev if its runtime PM status is
* %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
* ignoring children and its active child count is nonzero. 1 is returned in
* this case.
*
* If @dev is in a different state or it is not in use (that is, its usage
* counter is 0, or it is ignoring children, or its active child count is 0),
* 0 is returned.
*
* -EINVAL is returned if runtime PM is disabled for the device, in which case
* also the usage counter of @dev is not updated.
*/
int pm_runtime_get_if_in_use(struct device *dev)
{
return pm_runtime_get_conditional(dev, false);
}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
*
* If runtime PM of the device is disabled or its power.runtime_error field is
* different from zero, the status may be changed either to RPM_ACTIVE, or to
* RPM_SUSPENDED, as long as that reflects the actual state of the device.
* However, if the device has a parent and the parent is not active, and the
* parent's power.ignore_children flag is unset, the device's status cannot be
* set to RPM_ACTIVE, so -EBUSY is returned in that case.
*
* If successful, __pm_runtime_set_status() clears the power.runtime_error field
* and the device parent's counter of unsuspended children is modified to
* reflect the new status. If the new status is RPM_SUSPENDED, an idle
* notification request for the parent is submitted.
*
* If @dev has any suppliers (as reflected by device links to them), and @status
* is RPM_ACTIVE, they will be activated upfront and if the activation of one
* of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
* of the @status value) and the suppliers will be deacticated on exit. The
* error returned by the failing supplier activation will be returned in that
* case.
*/
int __pm_runtime_set_status(struct device *dev, unsigned int status)
{
struct device *parent = dev->parent;
bool notify_parent = false;
unsigned long flags;
int error = 0;
if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
return -EINVAL;
spin_lock_irqsave(&dev->power.lock, flags);
/*
* Prevent PM-runtime from being enabled for the device or return an
* error if it is enabled already and working.
*/
if (dev->power.runtime_error || dev->power.disable_depth)
dev->power.disable_depth++;
else
error = -EAGAIN;
spin_unlock_irqrestore(&dev->power.lock, flags);
if (error)
return error;
/*
* If the new status is RPM_ACTIVE, the suppliers can be activated
* upfront regardless of the current status, because next time
* rpm_put_suppliers() runs, the rpm_active refcounts of the links
* involved will be dropped down to one anyway.
*/
if (status == RPM_ACTIVE) {
int idx = device_links_read_lock();
error = rpm_get_suppliers(dev);
if (error)
status = RPM_SUSPENDED;
device_links_read_unlock(idx);
}
spin_lock_irqsave(&dev->power.lock, flags);
if (dev->power.runtime_status == status || !parent)
goto out_set;
if (status == RPM_SUSPENDED) {
atomic_add_unless(&parent->power.child_count, -1, 0);
notify_parent = !parent->power.ignore_children;
} else {
spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
/*
* It is invalid to put an active child under a parent that is
* not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset.
*/
if (!parent->power.disable_depth &&
!parent->power.ignore_children &&
parent->power.runtime_status != RPM_ACTIVE) {
dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
dev_name(dev),
dev_name(parent));
error = -EBUSY;
} else if (dev->power.runtime_status == RPM_SUSPENDED) {
atomic_inc(&parent->power.child_count);
}
spin_unlock(&parent->power.lock);
if (error) {
status = RPM_SUSPENDED;
goto out;
}
}
out_set:
__update_runtime_status(dev, status);
if (!error)
dev->power.runtime_error = 0;
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
if (notify_parent)
pm_request_idle(parent);
if (status == RPM_SUSPENDED) {
int idx = device_links_read_lock();
rpm_put_suppliers(dev);
device_links_read_unlock(idx);
}
pm_runtime_enable(dev);
return error;
}
EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
/**
* __pm_runtime_barrier - Cancel pending requests and wait for completions.
* @dev: Device to handle.
*
* Flush all pending requests for the device from pm_wq and wait for all
* runtime PM operations involving the device in progress to complete.
*
* Should be called under dev->power.lock with interrupts disabled.
*/
static void __pm_runtime_barrier(struct device *dev)
{
pm_runtime_deactivate_timer(dev);
if (dev->power.request_pending) {
dev->power.request = RPM_REQ_NONE;
spin_unlock_irq(&dev->power.lock);
cancel_work_sync(&dev->power.work);
spin_lock_irq(&dev->power.lock);
dev->power.request_pending = false;
}
if (dev->power.runtime_status == RPM_SUSPENDING ||
dev->power.runtime_status == RPM_RESUMING ||
dev->power.idle_notification) {
DEFINE_WAIT(wait);
/* Suspend, wake-up or idle notification in progress. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_SUSPENDING
&& dev->power.runtime_status != RPM_RESUMING
&& !dev->power.idle_notification)
break;
spin_unlock_irq(&dev->power.lock);
schedule();
spin_lock_irq(&dev->power.lock);
}
finish_wait(&dev->power.wait_queue, &wait);
}
}
/**
* pm_runtime_barrier - Flush pending requests and wait for completions.
* @dev: Device to handle.
*
* Prevent the device from being suspended by incrementing its usage counter and
* if there's a pending resume request for the device, wake the device up.
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
*
* Return value:
* 1, if there was a resume request pending and the device had to be woken up,
* 0, otherwise
*/
int pm_runtime_barrier(struct device *dev)
{
int retval = 0;
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME) {
rpm_resume(dev, 0);
retval = 1;
}
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
bool pm_runtime_block_if_disabled(struct device *dev)
{
bool ret;
spin_lock_irq(&dev->power.lock);
ret = !pm_runtime_enabled(dev);
if (ret && dev->power.last_status == RPM_INVALID)
dev->power.last_status = RPM_BLOCKED;
spin_unlock_irq(&dev->power.lock);
return ret;
}
void pm_runtime_unblock(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
if (dev->power.last_status == RPM_BLOCKED)
dev->power.last_status = RPM_INVALID;
spin_unlock_irq(&dev->power.lock);
}
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
if (dev->power.disable_depth > 0) {
dev->power.disable_depth++;
goto out;
}
/*
* Wake up the device if there's a resume request pending, because that
* means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O.
*/
if (check_resume && dev->power.request_pending &&
dev->power.request == RPM_REQ_RESUME) {
/*
* Prevent suspends and idle notifications from being carried
* out after we have woken up the device.
*/
pm_runtime_get_noresume(dev);
rpm_resume(dev, 0);
pm_runtime_put_noidle(dev);
}
/* Update time accounting before disabling PM-runtime. */
update_pm_runtime_accounting(dev);
if (!dev->power.disable_depth++) {
__pm_runtime_barrier(dev);
dev->power.last_status = dev->power.runtime_status;
}
out:
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(__pm_runtime_disable);
/**
* pm_runtime_enable - Enable runtime PM of a device.
* @dev: Device to handle.
*/
void pm_runtime_enable(struct device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags);
if (!dev->power.disable_depth) {
dev_warn(dev, "Unbalanced %s!\n", __func__);
goto out;
}
if (--dev->power.disable_depth > 0)
goto out;
if (dev->power.last_status == RPM_BLOCKED) {
dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
dump_stack();
}
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
if (dev->power.runtime_status == RPM_SUSPENDED &&
!dev->power.ignore_children &&
atomic_read(&dev->power.child_count) > 0)
dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_set_suspended_action(void *data)
{
pm_runtime_set_suspended(data);
}
/**
* devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_set_active_enabled(struct device *dev)
{
int err;
err = pm_runtime_set_active(dev);
if (err)
return err;
err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
if (err)
return err;
return devm_pm_runtime_enable(dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
*
* NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
* you at driver exit time if needed.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
{
pm_runtime_enable(dev);
return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
static void pm_runtime_put_noidle_action(void *data)
{
pm_runtime_put_noidle(data);
}
/**
* devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_get_noresume(struct device *dev)
{
pm_runtime_get_noresume(dev);
return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
* Increase the device's usage count and clear its power.runtime_auto flag,
* so that it cannot be suspended at run time until pm_runtime_allow() is called
* for it.
*/
void pm_runtime_forbid(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
if (!dev->power.runtime_auto)
goto out;
dev->power.runtime_auto = false;
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
out:
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_forbid);
/**
* pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
* Decrease the device's usage count and set its power.runtime_auto flag.
*/
void pm_runtime_allow(struct device *dev)
{
int ret;
spin_lock_irq(&dev->power.lock);
if (dev->power.runtime_auto)
goto out;
dev->power.runtime_auto = true;
ret = rpm_drop_usage_count(dev);
if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
else if (ret > 0)
trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_allow);
/**
* pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
* @dev: Device to handle.
*
* Set the power.no_callbacks flag, which tells the PM core that this
* device is power-managed through its parent and has no runtime PM
* callbacks of its own. The runtime sysfs attributes will be removed.
*/
void pm_runtime_no_callbacks(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.no_callbacks = 1;
spin_unlock_irq(&dev->power.lock);
if (device_is_registered(dev))
rpm_sysfs_remove(dev);
}
EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
/**
* pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
* @dev: Device to handle
*
* Set the power.irq_safe flag, which tells the PM core that the
* ->runtime_suspend() and ->runtime_resume() callbacks for this device should
* always be invoked with the spinlock held and interrupts disabled. It also
* causes the parent's usage counter to be permanently incremented, preventing
* the parent from runtime suspending -- otherwise an irq-safe child might have
* to wait for a non-irq-safe parent.
*/
void pm_runtime_irq_safe(struct device *dev)
{
if (dev->parent)
pm_runtime_get_sync(dev->parent);
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 1;
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
/**
* update_autosuspend - Handle a change to a device's autosuspend settings.
* @dev: Device to handle.
* @old_delay: The former autosuspend_delay value.
* @old_use: The former use_autosuspend value.
*
* Prevent runtime suspend if the new delay is negative and use_autosuspend is
* set; otherwise allow it. Send an idle notification if suspends are allowed.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static void update_autosuspend(struct device *dev, int old_delay, int old_use)
{
int delay = dev->power.autosuspend_delay;
/* Should runtime suspend be prevented now? */
if (dev->power.use_autosuspend && delay < 0) {
/* If it used to be allowed then prevent it. */
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
} else {
trace_rpm_usage(dev, 0);
}
}
/* Runtime suspend should be allowed now. */
else {
/* If it used to be prevented then allow it. */
if (old_use && old_delay < 0)
atomic_dec(&dev->power.usage_count);
/* Maybe we can autosuspend now. */
rpm_idle(dev, RPM_AUTO);
}
}
/**
* pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
* @dev: Device to handle.
* @delay: Value of the new delay in milliseconds.
*
* Set the device's power.autosuspend_delay value. If it changes to negative
* and the power.use_autosuspend flag is set, prevent runtime suspends. If it
* changes the other way, allow runtime suspends.
*/
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
int old_delay, old_use;
spin_lock_irq(&dev->power.lock);
old_delay = dev->power.autosuspend_delay;
old_use = dev->power.use_autosuspend;
dev->power.autosuspend_delay = delay;
update_autosuspend(dev, old_delay, old_use);
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
/**
* __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
* @dev: Device to handle.
* @use: New value for use_autosuspend.
*
* Set the device's power.use_autosuspend flag, and allow or prevent runtime
* suspends as needed.
*/
void __pm_runtime_use_autosuspend(struct device *dev, bool use)
{
int old_delay, old_use;
spin_lock_irq(&dev->power.lock);
old_delay = dev->power.autosuspend_delay;
old_use = dev->power.use_autosuspend;
dev->power.use_autosuspend = use;
update_autosuspend(dev, old_delay, old_use);
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
/**
* pm_runtime_init - Initialize runtime PM fields in given device object.
* @dev: Device object to initialize.
*/
void pm_runtime_init(struct device *dev)
{
dev->power.runtime_status = RPM_SUSPENDED;
dev->power.last_status = RPM_INVALID;
dev->power.idle_notification = false;
dev->power.disable_depth = 1;
atomic_set(&dev->power.usage_count, 0);
dev->power.runtime_error = 0;
atomic_set(&dev->power.child_count, 0);
pm_suspend_ignore_children(dev, false);
dev->power.runtime_auto = true;
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
dev->power.needs_force_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
/**
* pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
* @dev: Device object to re-initialize.
*/
void pm_runtime_reinit(struct device *dev)
{
if (!pm_runtime_enabled(dev)) {
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe) {
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 0;
spin_unlock_irq(&dev->power.lock);
if (dev->parent)
pm_runtime_put(dev->parent);
}
}
/*
* Clear power.needs_force_resume in case it has been set by
* pm_runtime_force_suspend() invoked from a driver remove callback.
*/
dev->power.needs_force_resume = false;
}
/**
* pm_runtime_remove - Prepare for removing a device from device hierarchy.
* @dev: Device object being removed from device hierarchy.
*/
void pm_runtime_remove(struct device *dev)
{
__pm_runtime_disable(dev, false);
pm_runtime_reinit(dev);
}
/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_get_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
}
device_links_read_unlock(idx);
}
/**
* pm_runtime_put_suppliers - Drop references to supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
pm_runtime_put(link->supplier);
}
device_links_read_unlock(idx);
}
void pm_runtime_new_link(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.links_count++;
spin_unlock_irq(&dev->power.lock);
}
static void pm_runtime_drop_link_count(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
dev->power.links_count--;
spin_unlock_irq(&dev->power.lock);
}
/**
* pm_runtime_drop_link - Prepare for device link removal.
* @link: Device link going away.
*
* Drop the link count of the consumer end of @link and decrement the supplier
* device's runtime PM usage counter as many times as needed to drop all of the
* PM runtime reference to it from the consumer.
*/
void pm_runtime_drop_link(struct device_link *link)
{
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
pm_runtime_release_supplier(link);
pm_request_idle(link->supplier);
}
static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
{
/*
* Setting power.strict_midlayer means that the middle layer
* code does not want its runtime PM callbacks to be invoked via
* pm_runtime_force_suspend() and pm_runtime_force_resume(), so
* return a direct pointer to the driver callback in that case.
*/
if (dev_pm_strict_midlayer_is_set(dev))
return __rpm_get_driver_callback(dev, cb_offset);
return __rpm_get_callback(dev, cb_offset);
}
#define GET_CALLBACK(dev, callback) \
get_callback(dev, offsetof(struct dev_pm_ops, callback))
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
* Disable runtime PM so we safely can check the device's runtime PM status and
* if it is active, invoke its ->runtime_suspend callback to suspend it and
* change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
* usage and children counters don't indicate that the device was in use before
* the system-wide transition under way, decrement its parent's children counter
* (if there is a parent). Keep runtime PM disabled to preserve the state
* unless we encounter errors.
*
* Typically this function may be invoked from a system suspend callback to make
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
*/
int pm_runtime_force_suspend(struct device *dev)
{
int (*callback)(struct device *);
int ret;
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
return 0;
callback = GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
if (ret)
goto err;
dev_pm_enable_wake_irq_complete(dev);
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
* its parent and the usage counters of its suppliers. Otherwise, set
* power.needs_force_resume to let pm_runtime_force_resume() know that
* the device needs to be taken care of and to prevent this function
* from handling the device again in case the device is passed to it
* once more subsequently.
*/
if (pm_runtime_need_not_resume(dev))
pm_runtime_set_suspended(dev);
else
dev->power.needs_force_resume = true;
return 0;
err:
dev_pm_disable_wake_irq_check(dev, true);
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
#ifdef CONFIG_PM_SLEEP
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
* This function expects that either pm_runtime_force_suspend() has put the
* device into a low-power state prior to calling it, or the device had been
* runtime-suspended before the preceding system-wide suspend transition and it
* was left in suspend during that transition.
*
* The actions carried out by pm_runtime_force_suspend(), or by a runtime
* suspend in general, are reversed and the device is brought back into full
* power if it is expected to be used on system resume, which is the case when
* its needs_force_resume flag is set or when its smart_suspend flag is set and
* its runtime PM status is "active".
*
* In other cases, the resume is deferred to be managed via runtime PM.
*
* Typically, this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
pm_runtime_status_suspended(dev)))
goto out;
callback = GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
dev_pm_enable_wake_irq_check(dev, false);
goto out;
}
pm_runtime_mark_last_busy(dev);
out:
/*
* The smart_suspend flag can be cleared here because it is not going
* to be necessary until the next system-wide suspend transition that
* will update it again.
*/
dev->power.smart_suspend = false;
/*
* Also clear needs_force_resume to make this function skip devices that
* have been seen by it once.
*/
dev->power.needs_force_resume = false;
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
(atomic_read(&dev->power.child_count) == 0 ||
dev->power.ignore_children);
}
#endif /* CONFIG_PM_SLEEP */
// SPDX-License-Identifier: GPL-2.0
#include <linux/anon_inodes.h>
#include <linux/exportfs.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/cgroup.h>
#include <linux/magic.h>
#include <linux/mount.h>
#include <linux/pid.h>
#include <linux/pidfs.h>
#include <linux/pid_namespace.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/proc_ns.h>
#include <linux/pseudo_fs.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <uapi/linux/pidfd.h>
#include <linux/ipc_namespace.h>
#include <linux/time_namespace.h>
#include <linux/utsname.h>
#include <net/net_namespace.h>
#include <linux/coredump.h>
#include <linux/xattr.h>
#include "internal.h"
#include "mount.h"
#define PIDFS_PID_DEAD ERR_PTR(-ESRCH)
static struct kmem_cache *pidfs_attr_cachep __ro_after_init;
static struct kmem_cache *pidfs_xattr_cachep __ro_after_init;
static struct path pidfs_root_path = {};
void pidfs_get_root(struct path *path)
{
*path = pidfs_root_path;
path_get(path);
}
/*
* Stashes information that userspace needs to access even after the
* process has been reaped.
*/
struct pidfs_exit_info {
__u64 cgroupid;
__s32 exit_code;
__u32 coredump_mask;
};
struct pidfs_attr {
struct simple_xattrs *xattrs;
struct pidfs_exit_info __pei;
struct pidfs_exit_info *exit_info;
};
static struct rb_root pidfs_ino_tree = RB_ROOT;
#if BITS_PER_LONG == 32
static inline unsigned long pidfs_ino(u64 ino)
{
return lower_32_bits(ino);
}
/* On 32 bit the generation number are the upper 32 bits. */
static inline u32 pidfs_gen(u64 ino)
{
return upper_32_bits(ino);
}
#else
/* On 64 bit simply return ino. */
static inline unsigned long pidfs_ino(u64 ino)
{
return ino;
}
/* On 64 bit the generation number is 0. */
static inline u32 pidfs_gen(u64 ino)
{
return 0;
}
#endif
static int pidfs_ino_cmp(struct rb_node *a, const struct rb_node *b)
{
struct pid *pid_a = rb_entry(a, struct pid, pidfs_node);
struct pid *pid_b = rb_entry(b, struct pid, pidfs_node);
u64 pid_ino_a = pid_a->ino; u64 pid_ino_b = pid_b->ino;
if (pid_ino_a < pid_ino_b)
return -1;
if (pid_ino_a > pid_ino_b)
return 1;
return 0;
}
void pidfs_add_pid(struct pid *pid)
{
static u64 pidfs_ino_nr = 2;
/*
* On 64 bit nothing special happens. The 64bit number assigned
* to struct pid is the inode number.
*
* On 32 bit the 64 bit number assigned to struct pid is split
* into two 32 bit numbers. The lower 32 bits are used as the
* inode number and the upper 32 bits are used as the inode
* generation number.
*
* On 32 bit pidfs_ino() will return the lower 32 bit. When
* pidfs_ino() returns zero a wrap around happened. When a
* wraparound happens the 64 bit number will be incremented by 2
* so inode numbering starts at 2 again.
*
* On 64 bit comparing two pidfds is as simple as comparing
* inode numbers.
*
* When a wraparound happens on 32 bit multiple pidfds with the
* same inode number are likely to exist (This isn't a problem
* since before pidfs pidfds used the anonymous inode meaning
* all pidfds had the same inode number.). Userspace can
* reconstruct the 64 bit identifier by retrieving both the
* inode number and the inode generation number to compare or
* use file handles.
*/
if (pidfs_ino(pidfs_ino_nr) == 0)
pidfs_ino_nr += 2; pid->ino = pidfs_ino_nr;
pid->stashed = NULL;
pid->attr = NULL;
pidfs_ino_nr++; write_seqcount_begin(&pidmap_lock_seq); rb_find_add_rcu(&pid->pidfs_node, &pidfs_ino_tree, pidfs_ino_cmp); write_seqcount_end(&pidmap_lock_seq);}
void pidfs_remove_pid(struct pid *pid)
{
write_seqcount_begin(&pidmap_lock_seq);
rb_erase(&pid->pidfs_node, &pidfs_ino_tree);
write_seqcount_end(&pidmap_lock_seq);
}
void pidfs_free_pid(struct pid *pid)
{
struct pidfs_attr *attr __free(kfree) = no_free_ptr(pid->attr);
struct simple_xattrs *xattrs __free(kfree) = NULL;
/*
* Any dentry must've been wiped from the pid by now.
* Otherwise there's a reference count bug.
*/
VFS_WARN_ON_ONCE(pid->stashed);
/*
* This if an error occurred during e.g., task creation that
* causes us to never go through the exit path.
*/
if (unlikely(!attr))
return;
/* This never had a pidfd created. */
if (IS_ERR(attr))
return;
xattrs = no_free_ptr(attr->xattrs);
if (xattrs)
simple_xattrs_free(xattrs, NULL);
}
#ifdef CONFIG_PROC_FS
/**
* pidfd_show_fdinfo - print information about a pidfd
* @m: proc fdinfo file
* @f: file referencing a pidfd
*
* Pid:
* This function will print the pid that a given pidfd refers to in the
* pid namespace of the procfs instance.
* If the pid namespace of the process is not a descendant of the pid
* namespace of the procfs instance 0 will be shown as its pid. This is
* similar to calling getppid() on a process whose parent is outside of
* its pid namespace.
*
* NSpid:
* If pid namespaces are supported then this function will also print
* the pid of a given pidfd refers to for all descendant pid namespaces
* starting from the current pid namespace of the instance, i.e. the
* Pid field and the first entry in the NSpid field will be identical.
* If the pid namespace of the process is not a descendant of the pid
* namespace of the procfs instance 0 will be shown as its first NSpid
* entry and no others will be shown.
* Note that this differs from the Pid and NSpid fields in
* /proc/<pid>/status where Pid and NSpid are always shown relative to
* the pid namespace of the procfs instance. The difference becomes
* obvious when sending around a pidfd between pid namespaces from a
* different branch of the tree, i.e. where no ancestral relation is
* present between the pid namespaces:
* - create two new pid namespaces ns1 and ns2 in the initial pid
* namespace (also take care to create new mount namespaces in the
* new pid namespace and mount procfs)
* - create a process with a pidfd in ns1
* - send pidfd from ns1 to ns2
* - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid
* have exactly one entry, which is 0
*/
static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
{
struct pid *pid = pidfd_pid(f);
struct pid_namespace *ns;
pid_t nr = -1;
if (likely(pid_has_task(pid, PIDTYPE_PID))) {
ns = proc_pid_ns(file_inode(m->file)->i_sb);
nr = pid_nr_ns(pid, ns);
}
seq_put_decimal_ll(m, "Pid:\t", nr);
#ifdef CONFIG_PID_NS
seq_put_decimal_ll(m, "\nNSpid:\t", nr);
if (nr > 0) {
int i;
/* If nr is non-zero it means that 'pid' is valid and that
* ns, i.e. the pid namespace associated with the procfs
* instance, is in the pid namespace hierarchy of pid.
* Start at one below the already printed level.
*/
for (i = ns->level + 1; i <= pid->level; i++)
seq_put_decimal_ll(m, "\t", pid->numbers[i].nr);
}
#endif
seq_putc(m, '\n');
}
#endif
/*
* Poll support for process exit notification.
*/
static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
{
struct pid *pid = pidfd_pid(file);
struct task_struct *task;
__poll_t poll_flags = 0;
poll_wait(file, &pid->wait_pidfd, pts);
/*
* Don't wake waiters if the thread-group leader exited
* prematurely. They either get notified when the last subthread
* exits or not at all if one of the remaining subthreads execs
* and assumes the struct pid of the old thread-group leader.
*/
guard(rcu)();
task = pid_task(pid, PIDTYPE_PID);
if (!task)
poll_flags = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
else if (task->exit_state && !delay_group_leader(task))
poll_flags = EPOLLIN | EPOLLRDNORM;
return poll_flags;
}
static inline bool pid_in_current_pidns(const struct pid *pid)
{
const struct pid_namespace *ns = task_active_pid_ns(current);
if (ns->level <= pid->level)
return pid->numbers[ns->level].ns == ns;
return false;
}
static __u32 pidfs_coredump_mask(unsigned long mm_flags)
{
switch (__get_dumpable(mm_flags)) {
case SUID_DUMP_USER:
return PIDFD_COREDUMP_USER;
case SUID_DUMP_ROOT:
return PIDFD_COREDUMP_ROOT;
case SUID_DUMP_DISABLE:
return PIDFD_COREDUMP_SKIP;
default:
WARN_ON_ONCE(true);
}
return 0;
}
static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pidfd_info __user *uinfo = (struct pidfd_info __user *)arg;
struct task_struct *task __free(put_task) = NULL;
struct pid *pid = pidfd_pid(file);
size_t usize = _IOC_SIZE(cmd);
struct pidfd_info kinfo = {};
struct pidfs_exit_info *exit_info;
struct user_namespace *user_ns;
struct pidfs_attr *attr;
const struct cred *c;
__u64 mask;
if (!uinfo)
return -EINVAL;
if (usize < PIDFD_INFO_SIZE_VER0)
return -EINVAL; /* First version, no smaller struct possible */
if (copy_from_user(&mask, &uinfo->mask, sizeof(mask)))
return -EFAULT;
/*
* Restrict information retrieval to tasks within the caller's pid
* namespace hierarchy.
*/
if (!pid_in_current_pidns(pid))
return -ESRCH;
attr = READ_ONCE(pid->attr);
if (mask & PIDFD_INFO_EXIT) {
exit_info = READ_ONCE(attr->exit_info);
if (exit_info) {
kinfo.mask |= PIDFD_INFO_EXIT;
#ifdef CONFIG_CGROUPS
kinfo.cgroupid = exit_info->cgroupid;
kinfo.mask |= PIDFD_INFO_CGROUPID;
#endif
kinfo.exit_code = exit_info->exit_code;
}
}
if (mask & PIDFD_INFO_COREDUMP) {
kinfo.mask |= PIDFD_INFO_COREDUMP;
kinfo.coredump_mask = READ_ONCE(attr->__pei.coredump_mask);
}
task = get_pid_task(pid, PIDTYPE_PID);
if (!task) {
/*
* If the task has already been reaped, only exit
* information is available
*/
if (!(mask & PIDFD_INFO_EXIT))
return -ESRCH;
goto copy_out;
}
c = get_task_cred(task);
if (!c)
return -ESRCH;
if ((kinfo.mask & PIDFD_INFO_COREDUMP) && !(kinfo.coredump_mask)) {
task_lock(task);
if (task->mm) {
unsigned long flags = __mm_flags_get_dumpable(task->mm);
kinfo.coredump_mask = pidfs_coredump_mask(flags);
}
task_unlock(task);
}
/* Unconditionally return identifiers and credentials, the rest only on request */
user_ns = current_user_ns();
kinfo.ruid = from_kuid_munged(user_ns, c->uid);
kinfo.rgid = from_kgid_munged(user_ns, c->gid);
kinfo.euid = from_kuid_munged(user_ns, c->euid);
kinfo.egid = from_kgid_munged(user_ns, c->egid);
kinfo.suid = from_kuid_munged(user_ns, c->suid);
kinfo.sgid = from_kgid_munged(user_ns, c->sgid);
kinfo.fsuid = from_kuid_munged(user_ns, c->fsuid);
kinfo.fsgid = from_kgid_munged(user_ns, c->fsgid);
kinfo.mask |= PIDFD_INFO_CREDS;
put_cred(c);
#ifdef CONFIG_CGROUPS
if (!kinfo.cgroupid) {
struct cgroup *cgrp;
rcu_read_lock();
cgrp = task_dfl_cgroup(task);
kinfo.cgroupid = cgroup_id(cgrp);
kinfo.mask |= PIDFD_INFO_CGROUPID;
rcu_read_unlock();
}
#endif
/*
* Copy pid/tgid last, to reduce the chances the information might be
* stale. Note that it is not possible to ensure it will be valid as the
* task might return as soon as the copy_to_user finishes, but that's ok
* and userspace expects that might happen and can act accordingly, so
* this is just best-effort. What we can do however is checking that all
* the fields are set correctly, or return ESRCH to avoid providing
* incomplete information. */
kinfo.ppid = task_ppid_nr_ns(task, NULL);
kinfo.tgid = task_tgid_vnr(task);
kinfo.pid = task_pid_vnr(task);
kinfo.mask |= PIDFD_INFO_PID;
if (kinfo.pid == 0 || kinfo.tgid == 0)
return -ESRCH;
copy_out:
/*
* If userspace and the kernel have the same struct size it can just
* be copied. If userspace provides an older struct, only the bits that
* userspace knows about will be copied. If userspace provides a new
* struct, only the bits that the kernel knows about will be copied.
*/
return copy_struct_to_user(uinfo, usize, &kinfo, sizeof(kinfo), NULL);
}
static bool pidfs_ioctl_valid(unsigned int cmd)
{
switch (cmd) {
case FS_IOC_GETVERSION:
case PIDFD_GET_CGROUP_NAMESPACE:
case PIDFD_GET_IPC_NAMESPACE:
case PIDFD_GET_MNT_NAMESPACE:
case PIDFD_GET_NET_NAMESPACE:
case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
case PIDFD_GET_TIME_NAMESPACE:
case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
case PIDFD_GET_UTS_NAMESPACE:
case PIDFD_GET_USER_NAMESPACE:
case PIDFD_GET_PID_NAMESPACE:
return true;
}
/* Extensible ioctls require some more careful checks. */
switch (_IOC_NR(cmd)) {
case _IOC_NR(PIDFD_GET_INFO):
/*
* Try to prevent performing a pidfd ioctl when someone
* erronously mistook the file descriptor for a pidfd.
* This is not perfect but will catch most cases.
*/
return extensible_ioctl_valid(cmd, PIDFD_GET_INFO, PIDFD_INFO_SIZE_VER0);
}
return false;
}
static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct task_struct *task __free(put_task) = NULL;
struct nsproxy *nsp __free(put_nsproxy) = NULL;
struct ns_common *ns_common = NULL;
struct pid_namespace *pid_ns;
if (!pidfs_ioctl_valid(cmd))
return -ENOIOCTLCMD;
if (cmd == FS_IOC_GETVERSION) {
if (!arg)
return -EINVAL;
__u32 __user *argp = (__u32 __user *)arg;
return put_user(file_inode(file)->i_generation, argp);
}
/* Extensible IOCTL that does not open namespace FDs, take a shortcut */
if (_IOC_NR(cmd) == _IOC_NR(PIDFD_GET_INFO))
return pidfd_info(file, cmd, arg);
task = get_pid_task(pidfd_pid(file), PIDTYPE_PID);
if (!task)
return -ESRCH;
if (arg)
return -EINVAL;
scoped_guard(task_lock, task) {
nsp = task->nsproxy;
if (nsp)
get_nsproxy(nsp);
}
if (!nsp)
return -ESRCH; /* just pretend it didn't exist */
/*
* We're trying to open a file descriptor to the namespace so perform a
* filesystem cred ptrace check. Also, we mirror nsfs behavior.
*/
if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
return -EACCES;
switch (cmd) {
/* Namespaces that hang of nsproxy. */
case PIDFD_GET_CGROUP_NAMESPACE:
if (IS_ENABLED(CONFIG_CGROUPS)) {
get_cgroup_ns(nsp->cgroup_ns);
ns_common = to_ns_common(nsp->cgroup_ns);
}
break;
case PIDFD_GET_IPC_NAMESPACE:
if (IS_ENABLED(CONFIG_IPC_NS)) {
get_ipc_ns(nsp->ipc_ns);
ns_common = to_ns_common(nsp->ipc_ns);
}
break;
case PIDFD_GET_MNT_NAMESPACE:
get_mnt_ns(nsp->mnt_ns);
ns_common = to_ns_common(nsp->mnt_ns);
break;
case PIDFD_GET_NET_NAMESPACE:
if (IS_ENABLED(CONFIG_NET_NS)) {
ns_common = to_ns_common(nsp->net_ns);
get_net_ns(ns_common);
}
break;
case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
if (IS_ENABLED(CONFIG_PID_NS)) {
get_pid_ns(nsp->pid_ns_for_children);
ns_common = to_ns_common(nsp->pid_ns_for_children);
}
break;
case PIDFD_GET_TIME_NAMESPACE:
if (IS_ENABLED(CONFIG_TIME_NS)) {
get_time_ns(nsp->time_ns);
ns_common = to_ns_common(nsp->time_ns);
}
break;
case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
if (IS_ENABLED(CONFIG_TIME_NS)) {
get_time_ns(nsp->time_ns_for_children);
ns_common = to_ns_common(nsp->time_ns_for_children);
}
break;
case PIDFD_GET_UTS_NAMESPACE:
if (IS_ENABLED(CONFIG_UTS_NS)) {
get_uts_ns(nsp->uts_ns);
ns_common = to_ns_common(nsp->uts_ns);
}
break;
/* Namespaces that don't hang of nsproxy. */
case PIDFD_GET_USER_NAMESPACE:
if (IS_ENABLED(CONFIG_USER_NS)) {
rcu_read_lock();
ns_common = to_ns_common(get_user_ns(task_cred_xxx(task, user_ns)));
rcu_read_unlock();
}
break;
case PIDFD_GET_PID_NAMESPACE:
if (IS_ENABLED(CONFIG_PID_NS)) {
rcu_read_lock();
pid_ns = task_active_pid_ns(task);
if (pid_ns)
ns_common = to_ns_common(get_pid_ns(pid_ns));
rcu_read_unlock();
}
break;
default:
return -ENOIOCTLCMD;
}
if (!ns_common)
return -EOPNOTSUPP;
/* open_namespace() unconditionally consumes the reference */
return open_namespace(ns_common);
}
static const struct file_operations pidfs_file_operations = {
.poll = pidfd_poll,
#ifdef CONFIG_PROC_FS
.show_fdinfo = pidfd_show_fdinfo,
#endif
.unlocked_ioctl = pidfd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
struct pid *pidfd_pid(const struct file *file)
{
if (file->f_op != &pidfs_file_operations)
return ERR_PTR(-EBADF);
return file_inode(file)->i_private;
}
/*
* We're called from release_task(). We know there's at least one
* reference to struct pid being held that won't be released until the
* task has been reaped which cannot happen until we're out of
* release_task().
*
* If this struct pid has at least once been referred to by a pidfd then
* pid->attr will be allocated. If not we mark the struct pid as dead so
* anyone who is trying to register it with pidfs will fail to do so.
* Otherwise we would hand out pidfs for reaped tasks without having
* exit information available.
*
* Worst case is that we've filled in the info and the pid gets freed
* right away in free_pid() when no one holds a pidfd anymore. Since
* pidfs_exit() currently is placed after exit_task_work() we know that
* it cannot be us aka the exiting task holding a pidfd to itself.
*/
void pidfs_exit(struct task_struct *tsk)
{
struct pid *pid = task_pid(tsk);
struct pidfs_attr *attr;
struct pidfs_exit_info *exit_info;
#ifdef CONFIG_CGROUPS
struct cgroup *cgrp;
#endif
might_sleep();
guard(spinlock_irq)(&pid->wait_pidfd.lock);
attr = pid->attr;
if (!attr) {
/*
* No one ever held a pidfd for this struct pid.
* Mark it as dead so no one can add a pidfs
* entry anymore. We're about to be reaped and
* so no exit information would be available.
*/
pid->attr = PIDFS_PID_DEAD;
return;
}
/*
* If @pid->attr is set someone might still legitimately hold a
* pidfd to @pid or someone might concurrently still be getting
* a reference to an already stashed dentry from @pid->stashed.
* So defer cleaning @pid->attr until the last reference to @pid
* is put
*/
exit_info = &attr->__pei;
#ifdef CONFIG_CGROUPS
rcu_read_lock();
cgrp = task_dfl_cgroup(tsk);
exit_info->cgroupid = cgroup_id(cgrp);
rcu_read_unlock();
#endif
exit_info->exit_code = tsk->exit_code;
/* Ensure that PIDFD_GET_INFO sees either all or nothing. */
smp_store_release(&attr->exit_info, &attr->__pei);
}
#ifdef CONFIG_COREDUMP
void pidfs_coredump(const struct coredump_params *cprm)
{
struct pid *pid = cprm->pid;
struct pidfs_exit_info *exit_info;
struct pidfs_attr *attr;
__u32 coredump_mask = 0;
attr = READ_ONCE(pid->attr);
VFS_WARN_ON_ONCE(!attr);
VFS_WARN_ON_ONCE(attr == PIDFS_PID_DEAD);
exit_info = &attr->__pei;
/* Note how we were coredumped. */
coredump_mask = pidfs_coredump_mask(cprm->mm_flags);
/* Note that we actually did coredump. */
coredump_mask |= PIDFD_COREDUMPED;
/* If coredumping is set to skip we should never end up here. */
VFS_WARN_ON_ONCE(coredump_mask & PIDFD_COREDUMP_SKIP);
smp_store_release(&exit_info->coredump_mask, coredump_mask);
}
#endif
static struct vfsmount *pidfs_mnt __ro_after_init;
/*
* The vfs falls back to simple_setattr() if i_op->setattr() isn't
* implemented. Let's reject it completely until we have a clean
* permission concept for pidfds.
*/
static int pidfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
return anon_inode_setattr(idmap, dentry, attr);
}
static int pidfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
return anon_inode_getattr(idmap, path, stat, request_mask, query_flags);
}
static ssize_t pidfs_listxattr(struct dentry *dentry, char *buf, size_t size)
{
struct inode *inode = d_inode(dentry);
struct pid *pid = inode->i_private;
struct pidfs_attr *attr = pid->attr;
struct simple_xattrs *xattrs;
xattrs = READ_ONCE(attr->xattrs);
if (!xattrs)
return 0;
return simple_xattr_list(inode, xattrs, buf, size);
}
static const struct inode_operations pidfs_inode_operations = {
.getattr = pidfs_getattr,
.setattr = pidfs_setattr,
.listxattr = pidfs_listxattr,
};
static void pidfs_evict_inode(struct inode *inode)
{
struct pid *pid = inode->i_private;
clear_inode(inode);
put_pid(pid);
}
static const struct super_operations pidfs_sops = {
.drop_inode = inode_just_drop,
.evict_inode = pidfs_evict_inode,
.statfs = simple_statfs,
};
/*
* 'lsof' has knowledge of out historical anon_inode use, and expects
* the pidfs dentry name to start with 'anon_inode'.
*/
static char *pidfs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(buffer, buflen, "anon_inode:[pidfd]");
}
const struct dentry_operations pidfs_dentry_operations = {
.d_dname = pidfs_dname,
.d_prune = stashed_dentry_prune,
};
static int pidfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
struct inode *parent)
{
const struct pid *pid = inode->i_private;
if (*max_len < 2) {
*max_len = 2;
return FILEID_INVALID;
}
*max_len = 2;
*(u64 *)fh = pid->ino;
return FILEID_KERNFS;
}
static int pidfs_ino_find(const void *key, const struct rb_node *node)
{
const u64 pid_ino = *(u64 *)key;
const struct pid *pid = rb_entry(node, struct pid, pidfs_node);
if (pid_ino < pid->ino)
return -1;
if (pid_ino > pid->ino)
return 1;
return 0;
}
/* Find a struct pid based on the inode number. */
static struct pid *pidfs_ino_get_pid(u64 ino)
{
struct pid *pid;
struct rb_node *node;
unsigned int seq;
guard(rcu)();
do {
seq = read_seqcount_begin(&pidmap_lock_seq);
node = rb_find_rcu(&ino, &pidfs_ino_tree, pidfs_ino_find);
if (node)
break;
} while (read_seqcount_retry(&pidmap_lock_seq, seq));
if (!node)
return NULL;
pid = rb_entry(node, struct pid, pidfs_node);
/* Within our pid namespace hierarchy? */
if (pid_vnr(pid) == 0)
return NULL;
return get_pid(pid);
}
static struct dentry *pidfs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len,
int fh_type)
{
int ret;
u64 pid_ino;
struct path path;
struct pid *pid;
if (fh_len < 2)
return NULL;
switch (fh_type) {
case FILEID_KERNFS:
pid_ino = *(u64 *)fid;
break;
default:
return NULL;
}
pid = pidfs_ino_get_pid(pid_ino);
if (!pid)
return NULL;
ret = path_from_stashed(&pid->stashed, pidfs_mnt, pid, &path);
if (ret < 0)
return ERR_PTR(ret);
VFS_WARN_ON_ONCE(!pid->attr);
mntput(path.mnt);
return path.dentry;
}
/*
* Make sure that we reject any nonsensical flags that users pass via
* open_by_handle_at(). Note that PIDFD_THREAD is defined as O_EXCL, and
* PIDFD_NONBLOCK as O_NONBLOCK.
*/
#define VALID_FILE_HANDLE_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_NONBLOCK | O_CLOEXEC | O_EXCL)
static int pidfs_export_permission(struct handle_to_path_ctx *ctx,
unsigned int oflags)
{
if (oflags & ~(VALID_FILE_HANDLE_OPEN_FLAGS | O_LARGEFILE))
return -EINVAL;
/*
* pidfd_ino_get_pid() will verify that the struct pid is part
* of the caller's pid namespace hierarchy. No further
* permission checks are needed.
*/
return 0;
}
static struct file *pidfs_export_open(const struct path *path, unsigned int oflags)
{
/*
* Clear O_LARGEFILE as open_by_handle_at() forces it and raise
* O_RDWR as pidfds always are.
*/
oflags &= ~O_LARGEFILE;
return dentry_open(path, oflags | O_RDWR, current_cred());
}
static const struct export_operations pidfs_export_operations = {
.encode_fh = pidfs_encode_fh,
.fh_to_dentry = pidfs_fh_to_dentry,
.open = pidfs_export_open,
.permission = pidfs_export_permission,
};
static int pidfs_init_inode(struct inode *inode, void *data)
{
const struct pid *pid = data;
inode->i_private = data;
inode->i_flags |= S_PRIVATE | S_ANON_INODE;
/* We allow to set xattrs. */
inode->i_flags &= ~S_IMMUTABLE;
inode->i_mode |= S_IRWXU;
inode->i_op = &pidfs_inode_operations;
inode->i_fop = &pidfs_file_operations;
inode->i_ino = pidfs_ino(pid->ino);
inode->i_generation = pidfs_gen(pid->ino);
return 0;
}
static void pidfs_put_data(void *data)
{
struct pid *pid = data;
put_pid(pid);
}
/**
* pidfs_register_pid - register a struct pid in pidfs
* @pid: pid to pin
*
* Register a struct pid in pidfs.
*
* Return: On success zero, on error a negative error code is returned.
*/
int pidfs_register_pid(struct pid *pid)
{
struct pidfs_attr *new_attr __free(kfree) = NULL;
struct pidfs_attr *attr;
might_sleep();
if (!pid)
return 0;
attr = READ_ONCE(pid->attr);
if (unlikely(attr == PIDFS_PID_DEAD))
return PTR_ERR(PIDFS_PID_DEAD);
if (attr)
return 0;
new_attr = kmem_cache_zalloc(pidfs_attr_cachep, GFP_KERNEL);
if (!new_attr)
return -ENOMEM;
/* Synchronize with pidfs_exit(). */
guard(spinlock_irq)(&pid->wait_pidfd.lock);
attr = pid->attr;
if (unlikely(attr == PIDFS_PID_DEAD))
return PTR_ERR(PIDFS_PID_DEAD);
if (unlikely(attr))
return 0;
pid->attr = no_free_ptr(new_attr);
return 0;
}
static struct dentry *pidfs_stash_dentry(struct dentry **stashed,
struct dentry *dentry)
{
int ret;
struct pid *pid = d_inode(dentry)->i_private;
VFS_WARN_ON_ONCE(stashed != &pid->stashed);
ret = pidfs_register_pid(pid);
if (ret)
return ERR_PTR(ret);
return stash_dentry(stashed, dentry);
}
static const struct stashed_operations pidfs_stashed_ops = {
.stash_dentry = pidfs_stash_dentry,
.init_inode = pidfs_init_inode,
.put_data = pidfs_put_data,
};
static int pidfs_xattr_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *suffix, void *value, size_t size)
{
struct pid *pid = inode->i_private;
struct pidfs_attr *attr = pid->attr;
const char *name;
struct simple_xattrs *xattrs;
xattrs = READ_ONCE(attr->xattrs);
if (!xattrs)
return 0;
name = xattr_full_name(handler, suffix);
return simple_xattr_get(xattrs, name, value, size);
}
static int pidfs_xattr_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap, struct dentry *unused,
struct inode *inode, const char *suffix,
const void *value, size_t size, int flags)
{
struct pid *pid = inode->i_private;
struct pidfs_attr *attr = pid->attr;
const char *name;
struct simple_xattrs *xattrs;
struct simple_xattr *old_xattr;
/* Ensure we're the only one to set @attr->xattrs. */
WARN_ON_ONCE(!inode_is_locked(inode));
xattrs = READ_ONCE(attr->xattrs);
if (!xattrs) {
xattrs = kmem_cache_zalloc(pidfs_xattr_cachep, GFP_KERNEL);
if (!xattrs)
return -ENOMEM;
simple_xattrs_init(xattrs);
smp_store_release(&pid->attr->xattrs, xattrs);
}
name = xattr_full_name(handler, suffix);
old_xattr = simple_xattr_set(xattrs, name, value, size, flags);
if (IS_ERR(old_xattr))
return PTR_ERR(old_xattr);
simple_xattr_free(old_xattr);
return 0;
}
static const struct xattr_handler pidfs_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = pidfs_xattr_get,
.set = pidfs_xattr_set,
};
static const struct xattr_handler *const pidfs_xattr_handlers[] = {
&pidfs_trusted_xattr_handler,
NULL
};
static int pidfs_init_fs_context(struct fs_context *fc)
{
struct pseudo_fs_context *ctx;
ctx = init_pseudo(fc, PID_FS_MAGIC);
if (!ctx)
return -ENOMEM;
fc->s_iflags |= SB_I_NOEXEC;
fc->s_iflags |= SB_I_NODEV;
ctx->ops = &pidfs_sops;
ctx->eops = &pidfs_export_operations;
ctx->dops = &pidfs_dentry_operations;
ctx->xattr = pidfs_xattr_handlers;
fc->s_fs_info = (void *)&pidfs_stashed_ops;
return 0;
}
static struct file_system_type pidfs_type = {
.name = "pidfs",
.init_fs_context = pidfs_init_fs_context,
.kill_sb = kill_anon_super,
};
struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags)
{
struct file *pidfd_file;
struct path path __free(path_put) = {};
int ret;
/*
* Ensure that PIDFD_STALE can be passed as a flag without
* overloading other uapi pidfd flags.
*/
BUILD_BUG_ON(PIDFD_STALE == PIDFD_THREAD);
BUILD_BUG_ON(PIDFD_STALE == PIDFD_NONBLOCK);
ret = path_from_stashed(&pid->stashed, pidfs_mnt, get_pid(pid), &path);
if (ret < 0)
return ERR_PTR(ret);
VFS_WARN_ON_ONCE(!pid->attr);
flags &= ~PIDFD_STALE;
flags |= O_RDWR;
pidfd_file = dentry_open(&path, flags, current_cred());
/* Raise PIDFD_THREAD explicitly as do_dentry_open() strips it. */
if (!IS_ERR(pidfd_file))
pidfd_file->f_flags |= (flags & PIDFD_THREAD);
return pidfd_file;
}
void __init pidfs_init(void)
{
pidfs_attr_cachep = kmem_cache_create("pidfs_attr_cache", sizeof(struct pidfs_attr), 0,
(SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
SLAB_ACCOUNT | SLAB_PANIC), NULL);
pidfs_xattr_cachep = kmem_cache_create("pidfs_xattr_cache",
sizeof(struct simple_xattrs), 0,
(SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
SLAB_ACCOUNT | SLAB_PANIC), NULL);
pidfs_mnt = kern_mount(&pidfs_type);
if (IS_ERR(pidfs_mnt))
panic("Failed to mount pidfs pseudo filesystem");
pidfs_root_path.mnt = pidfs_mnt;
pidfs_root_path.dentry = pidfs_mnt->mnt_root;
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* internal.h: mm/ internal definitions
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#ifndef __MM_INTERNAL_H
#define __MM_INTERNAL_H
#include <linux/fs.h>
#include <linux/khugepaged.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/pagemap.h>
#include <linux/pagewalk.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
#include <linux/tracepoint-defs.h>
/* Internal core VMA manipulation functions. */
#include "vma.h"
struct folio_batch;
/*
* Maintains state across a page table move. The operation assumes both source
* and destination VMAs already exist and are specified by the user.
*
* Partial moves are permitted, but the old and new ranges must both reside
* within a VMA.
*
* mmap lock must be held in write and VMA write locks must be held on any VMA
* that is visible.
*
* Use the PAGETABLE_MOVE() macro to initialise this struct.
*
* The old_addr and new_addr fields are updated as the page table move is
* executed.
*
* NOTE: The page table move is affected by reading from [old_addr, old_end),
* and old_addr may be updated for better page table alignment, so len_in
* represents the length of the range being copied as specified by the user.
*/
struct pagetable_move_control {
struct vm_area_struct *old; /* Source VMA. */
struct vm_area_struct *new; /* Destination VMA. */
unsigned long old_addr; /* Address from which the move begins. */
unsigned long old_end; /* Exclusive address at which old range ends. */
unsigned long new_addr; /* Address to move page tables to. */
unsigned long len_in; /* Bytes to remap specified by user. */
bool need_rmap_locks; /* Do rmap locks need to be taken? */
bool for_stack; /* Is this an early temp stack being moved? */
};
#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \
struct pagetable_move_control name = { \
.old = old_, \
.new = new_, \
.old_addr = old_addr_, \
.old_end = (old_addr_) + (len_), \
.new_addr = new_addr_, \
.len_in = len_, \
}
/*
* The set of flags that only affect watermark checking and reclaim
* behaviour. This is used by the MM to obey the caller constraints
* about IO, FS and watermark checking while ignoring placement
* hints such as HIGHMEM usage.
*/
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
__GFP_NOLOCKDEP)
/* The GFP flags allowed during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
/* Control allocation cpuset and node placement constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
/* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
/*
* Different from WARN_ON_ONCE(), no warning will be issued
* when we specify __GFP_NOWARN.
*/
#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
__warned = true; \
WARN_ON(1); \
} \
unlikely(__ret_warn_once); \
})
void page_writeback_init(void);
/*
* If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
* its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
* above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
* leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
*/
#define ENTIRELY_MAPPED 0x800000
#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
/*
* Flags passed to __show_mem() and show_free_areas() to suppress output in
* various contexts.
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
/*
* How many individual pages have an elevated _mapcount. Excludes
* the folio's entire_mapcount.
*
* Don't use this function outside of debugging code.
*/
static inline int folio_nr_pages_mapped(const struct folio *folio)
{
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
return -1;
return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
}
/*
* Retrieve the first entry of a folio based on a provided entry within the
* folio. We cannot rely on folio->swap as there is no guarantee that it has
* been initialized. Used for calling arch_swap_restore()
*/
static inline swp_entry_t folio_swap(swp_entry_t entry,
const struct folio *folio)
{
swp_entry_t swap = {
.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
};
return swap;
}
static inline void *folio_raw_mapping(const struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;
return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
}
/*
* This is a file-backed mapping, and is about to be memory mapped - invoke its
* mmap hook and safely handle error conditions. On error, VMA hooks will be
* mutated.
*
* @file: File which backs the mapping.
* @vma: VMA which we are mapping.
*
* Returns: 0 if success, error otherwise.
*/
static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
{
int err = vfs_mmap(file, vma);
if (likely(!err))
return 0;
/*
* OK, we tried to call the file hook for mmap(), but an error
* arose. The mapping is in an inconsistent state and we most not invoke
* any further hooks on it.
*/
vma->vm_ops = &vma_dummy_vm_ops;
return err;
}
/*
* If the VMA has a close hook then close it, and since closing it might leave
* it in an inconsistent state which makes the use of any hooks suspect, clear
* them down by installing dummy empty hooks.
*/
static inline void vma_close(struct vm_area_struct *vma)
{
if (vma->vm_ops && vma->vm_ops->close) {
vma->vm_ops->close(vma);
/*
* The mapping is in an inconsistent state, and no further hooks
* may be invoked upon it.
*/
vma->vm_ops = &vma_dummy_vm_ops;
}
}
#ifdef CONFIG_MMU
/* Flags for folio_pte_batch(). */
typedef int __bitwise fpb_t;
/* Compare PTEs respecting the dirty bit. */
#define FPB_RESPECT_DIRTY ((__force fpb_t)BIT(0))
/* Compare PTEs respecting the soft-dirty bit. */
#define FPB_RESPECT_SOFT_DIRTY ((__force fpb_t)BIT(1))
/* Compare PTEs respecting the writable bit. */
#define FPB_RESPECT_WRITE ((__force fpb_t)BIT(2))
/*
* Merge PTE write bits: if any PTE in the batch is writable, modify the
* PTE at @ptentp to be writable.
*/
#define FPB_MERGE_WRITE ((__force fpb_t)BIT(3))
/*
* Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
* modify the PTE at @ptentp to be young or dirty, respectively.
*/
#define FPB_MERGE_YOUNG_DIRTY ((__force fpb_t)BIT(4))
static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
{
if (!(flags & FPB_RESPECT_DIRTY))
pte = pte_mkclean(pte);
if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY)))
pte = pte_clear_soft_dirty(pte);
if (likely(!(flags & FPB_RESPECT_WRITE)))
pte = pte_wrprotect(pte);
return pte_mkold(pte);
}
/**
* folio_pte_batch_flags - detect a PTE batch for a large folio
* @folio: The large folio to detect a PTE batch for.
* @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL.
* @ptep: Page table pointer for the first entry.
* @ptentp: Pointer to a COPY of the first page table entry whose flags this
* function updates based on @flags if appropriate.
* @max_nr: The maximum number of table entries to consider.
* @flags: Flags to modify the PTE batch semantics.
*
* Detect a PTE batch: consecutive (present) PTEs that map consecutive
* pages of the same large folio in a single VMA and a single page table.
*
* All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
* the accessed bit, writable bit, dirty bit (unless FPB_RESPECT_DIRTY is set)
* and soft-dirty bit (unless FPB_RESPECT_SOFT_DIRTY is set).
*
* @ptep must map any page of the folio. max_nr must be at least one and
* must be limited by the caller so scanning cannot exceed a single VMA and
* a single page table.
*
* Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will
* be updated: it's crucial that a pointer to a COPY of the first
* page table entry, obtained through ptep_get(), is provided as @ptentp.
*
* This function will be inlined to optimize based on the input parameters;
* consider using folio_pte_batch() instead if applicable.
*
* Return: the number of table entries in the batch.
*/
static inline unsigned int folio_pte_batch_flags(struct folio *folio,
struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
unsigned int max_nr, fpb_t flags)
{
bool any_writable = false, any_young = false, any_dirty = false;
pte_t expected_pte, pte = *ptentp;
unsigned int nr, cur_nr;
VM_WARN_ON_FOLIO(!pte_present(pte), folio);
VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
/*
* Ensure this is a pointer to a copy not a pointer into a page table.
* If this is a stack value, it won't be a valid virtual address, but
* that's fine because it also cannot be pointing into the page table.
*/
VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));
/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
max_nr = min_t(unsigned long, max_nr,
folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
nr = pte_batch_hint(ptep, pte);
expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
ptep = ptep + nr;
while (nr < max_nr) {
pte = ptep_get(ptep); if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
break;
if (flags & FPB_MERGE_WRITE)
any_writable |= pte_write(pte);
if (flags & FPB_MERGE_YOUNG_DIRTY) {
any_young |= pte_young(pte);
any_dirty |= pte_dirty(pte);
}
cur_nr = pte_batch_hint(ptep, pte);
expected_pte = pte_advance_pfn(expected_pte, cur_nr);
ptep += cur_nr;
nr += cur_nr;
}
if (any_writable)
*ptentp = pte_mkwrite(*ptentp, vma);
if (any_young)
*ptentp = pte_mkyoung(*ptentp);
if (any_dirty)
*ptentp = pte_mkdirty(*ptentp);
return min(nr, max_nr);
}
unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
unsigned int max_nr);
/**
* pte_move_swp_offset - Move the swap entry offset field of a swap pte
* forward or backward by delta
* @pte: The initial pte state; is_swap_pte(pte) must be true and
* non_swap_entry() must be false.
* @delta: The direction and the offset we are moving; forward if delta
* is positive; backward if delta is negative
*
* Moves the swap offset, while maintaining all other fields, including
* swap type, and any swp pte bits. The resulting pte is returned.
*/
static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
{
swp_entry_t entry = pte_to_swp_entry(pte);
pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
(swp_offset(entry) + delta)));
if (pte_swp_soft_dirty(pte))
new = pte_swp_mksoft_dirty(new);
if (pte_swp_exclusive(pte))
new = pte_swp_mkexclusive(new);
if (pte_swp_uffd_wp(pte))
new = pte_swp_mkuffd_wp(new);
return new;
}
/**
* pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
* @pte: The initial pte state; is_swap_pte(pte) must be true and
* non_swap_entry() must be false.
*
* Increments the swap offset, while maintaining all other fields, including
* swap type, and any swp pte bits. The resulting pte is returned.
*/
static inline pte_t pte_next_swp_offset(pte_t pte)
{
return pte_move_swp_offset(pte, 1);
}
/**
* swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
* @start_ptep: Page table pointer for the first entry.
* @max_nr: The maximum number of table entries to consider.
* @pte: Page table entry for the first entry.
*
* Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
* containing swap entries all with consecutive offsets and targeting the same
* swap type, all with matching swp pte bits.
*
* max_nr must be at least one and must be limited by the caller so scanning
* cannot exceed a single page table.
*
* Return: the number of table entries in the batch.
*/
static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
{
pte_t expected_pte = pte_next_swp_offset(pte);
const pte_t *end_ptep = start_ptep + max_nr;
swp_entry_t entry = pte_to_swp_entry(pte);
pte_t *ptep = start_ptep + 1;
unsigned short cgroup_id;
VM_WARN_ON(max_nr < 1);
VM_WARN_ON(!is_swap_pte(pte));
VM_WARN_ON(non_swap_entry(entry));
cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
pte = ptep_get(ptep);
if (!pte_same(pte, expected_pte))
break;
if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
break;
expected_pte = pte_next_swp_offset(expected_pte);
ptep++;
}
return ptep - start_ptep;
}
#endif /* CONFIG_MMU */
void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
int nr_throttled);
static inline void acct_reclaim_writeback(struct folio *folio)
{
pg_data_t *pgdat = folio_pgdat(folio);
int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
if (nr_throttled)
__acct_reclaim_writeback(pgdat, folio, nr_throttled);
}
static inline void wake_throttle_isolated(pg_data_t *pgdat)
{
wait_queue_head_t *wqh;
wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
if (waitqueue_active(wqh))
wake_up(wqh);
}
vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
{
vm_fault_t ret = __vmf_anon_prepare(vmf);
if (unlikely(ret & VM_FAULT_RETRY))
vma_end_read(vmf->vma);
return ret;
}
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
void deactivate_file_folio(struct folio *folio);
void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
struct zap_details;
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details);
void zap_page_range_single_batched(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long addr,
unsigned long size, struct zap_details *details);
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
gfp_t gfp);
void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
void force_page_cache_ra(struct readahead_control *, unsigned long nr);
static inline void force_page_cache_readahead(struct address_space *mapping,
struct file *file, pgoff_t index, unsigned long nr_to_read)
{
DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
force_page_cache_ra(&ractl, nr_to_read);
}
unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_failed);
/**
* folio_evictable - Test whether a folio is evictable.
* @folio: The folio to test.
*
* Test whether @folio is evictable -- i.e., should be placed on
* active/inactive lists vs unevictable list.
*
* Reasons folio might not be evictable:
* 1. folio's mapping marked unevictable
* 2. One of the pages in the folio is part of an mlocked VMA
*/
static inline bool folio_evictable(struct folio *folio)
{
bool ret;
/* Prevent address_space of inode and swap cache from being freed */
rcu_read_lock();
ret = !mapping_unevictable(folio_mapping(folio)) &&
!folio_test_mlocked(folio);
rcu_read_unlock();
return ret;
}
/*
* Turn a non-refcounted page (->_refcount == 0) into refcounted with
* a count of one.
*/
static inline void set_page_refcounted(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(page_ref_count(page), page);
set_page_count(page, 1);
}
/*
* Return true if a folio needs ->release_folio() calling upon it.
*/
static inline bool folio_needs_release(struct folio *folio)
{
struct address_space *mapping = folio_mapping(folio);
return folio_has_private(folio) ||
(mapping && mapping_release_always(mapping));
}
extern unsigned long highest_memmap_pfn;
/*
* Maximum number of reclaim retries without progress before the OOM
* killer is consider the only way forward.
*/
#define MAX_RECLAIM_RETRIES 16
/*
* in mm/vmscan.c:
*/
bool folio_isolate_lru(struct folio *folio);
void folio_putback_lru(struct folio *folio);
extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
#ifdef CONFIG_NUMA
int user_proactive_reclaim(char *buf,
struct mem_cgroup *memcg, pg_data_t *pgdat);
#else
static inline int user_proactive_reclaim(char *buf,
struct mem_cgroup *memcg, pg_data_t *pgdat)
{
return 0;
}
#endif
/*
* in mm/rmap.c:
*/
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
/*
* in mm/page_alloc.c
*/
#define K(x) ((x) << (PAGE_SHIFT-10))
extern char * const zone_names[MAX_NR_ZONES];
/* perform sanity checks on struct pages being allocated or freed */
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
extern int min_free_kbytes;
extern int defrag_mode;
void setup_per_zone_wmarks(void);
void calculate_min_free_kbytes(void);
int __meminit init_per_zone_wmark_min(void);
void page_alloc_sysctl_init(void);
/*
* Structure for holding the mostly immutable allocation parameters passed
* between functions involved in allocations, including the alloc_pages*
* family of functions.
*
* nodemask, migratetype and highest_zoneidx are initialized only once in
* __alloc_pages() and then never change.
*
* zonelist, preferred_zone and highest_zoneidx are set first in
* __alloc_pages() for the fast path, and might be later changed
* in __alloc_pages_slowpath(). All other functions pass the whole structure
* by a const pointer.
*/
struct alloc_context {
struct zonelist *zonelist;
nodemask_t *nodemask;
struct zoneref *preferred_zoneref;
int migratetype;
/*
* highest_zoneidx represents highest usable zone index of
* the allocation request. Due to the nature of the zone,
* memory on lower zone than the highest_zoneidx will be
* protected by lowmem_reserve[highest_zoneidx].
*
* highest_zoneidx is also used by reclaim/compaction to limit
* the target zone since higher zone than this index cannot be
* usable for this allocation request.
*/
enum zone_type highest_zoneidx;
bool spread_dirty_pages;
};
/*
* This function returns the order of a free page in the buddy system. In
* general, page_zone(page)->lock must be held by the caller to prevent the
* page from being allocated in parallel and returning garbage as the order.
* If a caller does not hold page_zone(page)->lock, it must guarantee that the
* page cannot be allocated or merged in parallel. Alternatively, it must
* handle invalid values gracefully, and use buddy_order_unsafe() below.
*/
static inline unsigned int buddy_order(struct page *page)
{
/* PageBuddy() must be checked by the caller */
return page_private(page);
}
/*
* Like buddy_order(), but for callers who cannot afford to hold the zone lock.
* PageBuddy() should be checked first by the caller to minimize race window,
* and invalid values must be handled gracefully.
*
* READ_ONCE is used so that if the caller assigns the result into a local
* variable and e.g. tests it for valid range before using, the compiler cannot
* decide to remove the variable and inline the page_private(page) multiple
* times, potentially observing different values in the tests and the actual
* use of the result.
*/
#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
/*
* This function checks whether a page is free && is the buddy
* we can coalesce a page and its buddy if
* (a) the buddy is not in a hole (check before calling!) &&
* (b) the buddy is in the buddy system &&
* (c) a page and its buddy have the same order &&
* (d) a page and its buddy are in the same zone.
*
* For recording whether a page is in the buddy system, we set PageBuddy.
* Setting, clearing, and testing PageBuddy is serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
static inline bool page_is_buddy(struct page *page, struct page *buddy,
unsigned int order)
{
if (!page_is_guard(buddy) && !PageBuddy(buddy))
return false;
if (buddy_order(buddy) != order)
return false;
/*
* zone check is done late to avoid uselessly calculating
* zone/node ids for pages that could never merge.
*/
if (page_zone_id(page) != page_zone_id(buddy))
return false;
VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
return true;
}
/*
* Locate the struct page for both the matching buddy in our
* pair (buddy1) and the combined O(n+1) page they form (page).
*
* 1) Any buddy B1 will have an order O twin B2 which satisfies
* the following equation:
* B2 = B1 ^ (1 << O)
* For example, if the starting buddy (buddy2) is #8 its order
* 1 buddy is #10:
* B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
*
* 2) Any buddy B will have an order O+1 parent P which
* satisfies the following equation:
* P = B & ~(1 << O)
*
* Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
*/
static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
{
return page_pfn ^ (1 << order);
}
/*
* Find the buddy of @page and validate it.
* @page: The input page
* @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
* function is used in the performance-critical __free_one_page().
* @order: The order of the page
* @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
* page_to_pfn().
*
* The found buddy can be a non PageBuddy, out of @page's zone, or its order is
* not the same as @page. The validation is necessary before use it.
*
* Return: the found buddy page or NULL if not found.
*/
static inline struct page *find_buddy_page_pfn(struct page *page,
unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
{
unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
struct page *buddy;
buddy = page + (__buddy_pfn - pfn);
if (buddy_pfn)
*buddy_pfn = __buddy_pfn;
if (page_is_buddy(page, buddy, order))
return buddy;
return NULL;
}
extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
unsigned long end_pfn, struct zone *zone);
static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
unsigned long end_pfn, struct zone *zone)
{
if (zone->contiguous)
return pfn_to_page(start_pfn);
return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
}
void set_zone_contiguous(struct zone *zone);
bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
unsigned long nr_pages);
static inline void clear_zone_contiguous(struct zone *zone)
{
zone->contiguous = false;
}
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __putback_isolated_page(struct page *page, unsigned int order,
int mt);
extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order,
enum meminit_context context);
/*
* This will have no effect, other than possibly generating a warning, if the
* caller passes in a non-large folio.
*/
static inline void folio_set_order(struct folio *folio, unsigned int order)
{
if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
return;
VM_WARN_ON_ONCE(order > MAX_FOLIO_ORDER);
folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
#ifdef NR_PAGES_IN_LARGE_FOLIO
folio->_nr_pages = 1U << order;
#endif
}
bool __folio_unqueue_deferred_split(struct folio *folio);
static inline bool folio_unqueue_deferred_split(struct folio *folio)
{
if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
return false;
/*
* At this point, there is no one trying to add the folio to
* deferred_list. If folio is not in deferred_list, it's safe
* to check without acquiring the split_queue_lock.
*/
if (data_race(list_empty(&folio->_deferred_list)))
return false;
return __folio_unqueue_deferred_split(folio);
}
static inline struct folio *page_rmappable_folio(struct page *page)
{
struct folio *folio = (struct folio *)page;
if (folio && folio_test_large(folio))
folio_set_large_rmappable(folio);
return folio;
}
static inline void prep_compound_head(struct page *page, unsigned int order)
{
struct folio *folio = (struct folio *)page;
folio_set_order(folio, order);
atomic_set(&folio->_large_mapcount, -1);
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
atomic_set(&folio->_nr_pages_mapped, 0);
if (IS_ENABLED(CONFIG_MM_ID)) {
folio->_mm_ids = 0;
folio->_mm_id_mapcount[0] = -1;
folio->_mm_id_mapcount[1] = -1;
}
if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
atomic_set(&folio->_pincount, 0);
atomic_set(&folio->_entire_mapcount, -1);
}
if (order > 1)
INIT_LIST_HEAD(&folio->_deferred_list);
}
static inline void prep_compound_tail(struct page *head, int tail_idx)
{
struct page *p = head + tail_idx;
p->mapping = TAIL_MAPPING;
set_compound_head(p, head);
set_page_private(p, 0);
}
void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes;
struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
nodemask_t *);
#define __alloc_frozen_pages(...) \
alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
void free_frozen_pages(struct page *page, unsigned int order);
void free_unref_folios(struct folio_batch *fbatch);
#ifdef CONFIG_NUMA
struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
#else
static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
{
return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
}
#endif
#define alloc_frozen_pages(...) \
alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
#define alloc_frozen_pages_nolock(...) \
alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone);
extern void zone_pcp_init(struct zone *zone);
extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr,
int nid, bool exact_nid);
void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
unsigned long, enum meminit_context, struct vmem_altmap *, int,
bool);
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/*
* in mm/compaction.c
*/
/*
* compact_control is used to track pages being migrated and the free pages
* they are being migrated to during memory compaction. The free_pfn starts
* at the end of a zone and migrate_pfn begins at the start. Movable pages
* are moved to the end of a zone during a compaction run and the run
* completes when free_pfn <= migrate_pfn
*/
struct compact_control {
struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
struct list_head migratepages; /* List of pages being migrated */
unsigned int nr_freepages; /* Number of isolated free pages */
unsigned int nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */
/*
* Acts as an in/out parameter to page isolation for migration.
* isolate_migratepages uses it as a search base.
* isolate_migratepages_block will update the value to the next pfn
* after the last isolated one.
*/
unsigned long migrate_pfn;
unsigned long fast_start_pfn; /* a pfn to start linear scan from */
struct zone *zone;
unsigned long total_migrate_scanned;
unsigned long total_free_scanned;
unsigned short fast_search_fail;/* failures to use free list searches */
short search_order; /* order to start a fast search at */
const gfp_t gfp_mask; /* gfp mask of a direct compactor */
int order; /* order a direct compactor needs */
int migratetype; /* migratetype of direct compactor */
const unsigned int alloc_flags; /* alloc flags of a direct compactor */
const int highest_zoneidx; /* zone index of a direct compactor */
enum migrate_mode mode; /* Async or sync migration mode */
bool ignore_skip_hint; /* Scan blocks even if marked skip */
bool no_set_skip_hint; /* Don't mark blocks for skipping */
bool ignore_block_suitable; /* Scan blocks considered unsuitable */
bool direct_compaction; /* False from kcompactd or /proc/... */
bool proactive_compaction; /* kcompactd proactive compaction */
bool whole_zone; /* Whole zone should/has been scanned */
bool contended; /* Signal lock contention */
bool finish_pageblock; /* Scan the remainder of a pageblock. Used
* when there are potentially transient
* isolation or migration failures to
* ensure forward progress.
*/
bool alloc_contig; /* alloc_contig_range allocation */
};
/*
* Used in direct compaction when a page should be taken from the freelists
* immediately when one is created during the free path.
*/
struct capture_control {
struct compact_control *cc;
struct page *page;
};
unsigned long
isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn);
int
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
void init_cma_reserved_pageblock(struct page *page);
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
struct cma;
#ifdef CONFIG_CMA
void *cma_reserve_early(struct cma *cma, unsigned long size);
void init_cma_pageblock(struct page *page);
#else
static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
{
return NULL;
}
static inline void init_cma_pageblock(struct page *page)
{
}
#endif
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool claimable);
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
}
/* mm/util.c */
struct anon_vma *folio_anon_vma(const struct folio *folio);
#ifdef CONFIG_MMU
void unmap_mapping_folio(struct folio *folio);
extern long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *locked);
extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool write, int *locked);
bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags,
unsigned long bytes);
/*
* NOTE: This function can't tell whether the folio is "fully mapped" in the
* range.
* "fully mapped" means all the pages of folio is associated with the page
* table of range while this function just check whether the folio range is
* within the range [start, end). Function caller needs to do page table
* check if it cares about the page table association.
*
* Typical usage (like mlock or madvise) is:
* Caller knows at least 1 page of folio is associated with page table of VMA
* and the range [start, end) is intersect with the VMA range. Caller wants
* to know whether the folio is fully associated with the range. It calls
* this function to check whether the folio is in the range first. Then checks
* the page table to know whether the folio is fully mapped to the range.
*/
static inline bool
folio_within_range(struct folio *folio, struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
pgoff_t pgoff, addr;
unsigned long vma_pglen = vma_pages(vma);
VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
if (start > end)
return false;
if (start < vma->vm_start)
start = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
pgoff = folio_pgoff(folio);
/* if folio start address is not in vma range */
if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
return false;
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
return !(addr < start || end - addr < folio_size(folio));
}
static inline bool
folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
{
return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
}
/*
* mlock_vma_folio() and munlock_vma_folio():
* should be called with vma's mmap_lock held for read or write,
* under page table lock for the pte/pmd being added or removed.
*
* mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
* the end of folio_remove_rmap_*(); but new anon folios are managed by
* folio_add_lru_vma() calling mlock_new_folio().
*/
void mlock_folio(struct folio *folio);
static inline void mlock_vma_folio(struct folio *folio,
struct vm_area_struct *vma)
{
/*
* The VM_SPECIAL check here serves two purposes.
* 1) VM_IO check prevents migration from double-counting during mlock.
* 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
* is never left set on a VM_SPECIAL vma, there is an interval while
* file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
* still be set while VM_SPECIAL bits are added: so ignore it then.
*/
if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
mlock_folio(folio);
}
void munlock_folio(struct folio *folio);
static inline void munlock_vma_folio(struct folio *folio,
struct vm_area_struct *vma)
{
/*
* munlock if the function is called. Ideally, we should only
* do munlock if any page of folio is unmapped from VMA and
* cause folio not fully mapped to VMA.
*
* But it's not easy to confirm that's the situation. So we
* always munlock the folio and page reclaim will correct it
* if it's wrong.
*/
if (unlikely(vma->vm_flags & VM_LOCKED))
munlock_folio(folio);
}
void mlock_new_folio(struct folio *folio);
bool need_mlock_drain(int cpu);
void mlock_drain_local(void);
void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/**
* vma_address - Find the virtual address a page range is mapped at
* @vma: The vma which maps this object.
* @pgoff: The page offset within its object.
* @nr_pages: The number of pages to consider.
*
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
static inline unsigned long vma_address(const struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;
if (pgoff >= vma->vm_pgoff) {
address = vma->vm_start +
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
/* Check for address beyond vma (or wrapped through 0?) */
if (address < vma->vm_start || address >= vma->vm_end)
address = -EFAULT;
} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
/* Test above avoids possibility of wrap to 0 on 32-bit */
address = vma->vm_start;
} else {
address = -EFAULT;
}
return address;
}
/*
* Then at what user virtual address will none of the range be found in vma?
* Assumes that vma_address() already returned a good starting address.
*/
static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
{
struct vm_area_struct *vma = pvmw->vma;
pgoff_t pgoff;
unsigned long address;
/* Common case, plus ->pgoff is invalid for KSM */
if (pvmw->nr_pages == 1)
return pvmw->address + PAGE_SIZE;
pgoff = pvmw->pgoff + pvmw->nr_pages;
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
/* Check for address beyond vma (or wrapped through 0?) */
if (address < vma->vm_start || address > vma->vm_end)
address = vma->vm_end;
return address;
}
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
struct file *fpin)
{
int flags = vmf->flags;
if (fpin)
return fpin;
/*
* FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
* anything, so we only pin the file and drop the mmap_lock if only
* FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
*/
if (fault_flag_allow_retry_first(flags) &&
!(flags & FAULT_FLAG_RETRY_NOWAIT)) {
fpin = get_file(vmf->vma->vm_file);
release_fault_lock(vmf);
}
return fpin;
}
#else /* !CONFIG_MMU */
static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void mlock_new_folio(struct folio *folio) { }
static inline bool need_mlock_drain(int cpu) { return false; }
static inline void mlock_drain_local(void) { }
static inline void mlock_drain_remote(int cpu) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{
}
#endif /* !CONFIG_MMU */
/* Memory initialisation debug and verification */
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
DECLARE_STATIC_KEY_TRUE(deferred_pages);
bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
void init_deferred_page(unsigned long pfn, int nid);
enum mminit_level {
MMINIT_WARNING,
MMINIT_VERIFY,
MMINIT_TRACE
};
#ifdef CONFIG_DEBUG_MEMORY_INIT
extern int mminit_loglevel;
#define mminit_dprintk(level, prefix, fmt, arg...) \
do { \
if (level < mminit_loglevel) { \
if (level <= MMINIT_WARNING) \
pr_warn("mminit::" prefix " " fmt, ##arg); \
else \
printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
} \
} while (0)
extern void mminit_verify_pageflags_layout(void);
extern void mminit_verify_zonelist(void);
#else
static inline void mminit_dprintk(enum mminit_level level,
const char *prefix, const char *fmt, ...)
{
}
static inline void mminit_verify_pageflags_layout(void)
{
}
static inline void mminit_verify_zonelist(void)
{
}
#endif /* CONFIG_DEBUG_MEMORY_INIT */
#define NODE_RECLAIM_NOSCAN -2
#define NODE_RECLAIM_FULL -1
#define NODE_RECLAIM_SOME 0
#define NODE_RECLAIM_SUCCESS 1
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
extern int find_next_best_node(int node, nodemask_t *used_node_mask);
#else
#define node_reclaim_mode 0
static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
unsigned int order)
{
return NODE_RECLAIM_NOSCAN;
}
static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
{
return NUMA_NO_NODE;
}
#endif
static inline bool node_reclaim_enabled(void)
{
/* Is any node_reclaim_mode bit set? */
return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
}
/*
* mm/memory-failure.c
*/
#ifdef CONFIG_MEMORY_FAILURE
int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
void shake_folio(struct folio *folio);
typedef int hwpoison_filter_func_t(struct page *p);
void hwpoison_filter_register(hwpoison_filter_func_t *filter);
void hwpoison_filter_unregister(void);
#define MAGIC_HWPOISON 0x48575053U /* HWPS */
void SetPageHWPoisonTakenOff(struct page *page);
void ClearPageHWPoisonTakenOff(struct page *page);
bool take_page_off_buddy(struct page *page);
bool put_page_back_buddy(struct page *page);
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long ksm_addr);
unsigned long page_mapped_in_vma(const struct page *page,
struct vm_area_struct *vma);
#else
static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
{
return -EBUSY;
}
#endif
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
extern void set_pageblock_order(void);
unsigned long reclaim_pages(struct list_head *folio_list);
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *folio_list);
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
#define ALLOC_WMARK_MIN WMARK_MIN
#define ALLOC_WMARK_LOW WMARK_LOW
#define ALLOC_WMARK_HIGH WMARK_HIGH
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
/*
* Only MMU archs have async oom victim reclaim - aka oom_reaper so we
* cannot assume a reduced access to memory reserves is sufficient for
* !MMU
*/
#ifdef CONFIG_MMU
#define ALLOC_OOM 0x08
#else
#define ALLOC_OOM ALLOC_NO_WATERMARKS
#endif
#define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
* to 25% of the min watermark or
* 62.5% if __GFP_HIGH is set.
*/
#define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
* of the min watermark.
*/
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
#ifdef CONFIG_ZONE_DMA32
#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
#else
#define ALLOC_NOFRAGMENT 0x0
#endif
#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
#define ALLOC_TRYLOCK 0x400 /* Only use spin_trylock in allocation path */
#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
/* Flags that allow allocations below the min watermark. */
#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
enum ttu_flags;
struct tlbflush_unmap_batch;
/*
* only for MM internal work items which do not depend on
* any allocations or locks which might depend on allocations
*/
extern struct workqueue_struct *mm_percpu_wq;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
#else
static inline void try_to_unmap_flush(void)
{
}
static inline void try_to_unmap_flush_dirty(void)
{
}
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
extern const struct trace_print_flags vmaflag_names[];
extern const struct trace_print_flags gfpflag_names[];
void setup_zone_pageset(struct zone *zone);
struct migration_target_control {
int nid; /* preferred node id */
nodemask_t *nmask;
gfp_t gfp_mask;
enum migrate_reason reason;
};
/*
* mm/filemap.c
*/
size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
struct folio *folio, loff_t fpos, size_t size);
/*
* mm/vmalloc.c
*/
#ifdef CONFIG_MMU
void __init vmalloc_init(void);
int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift);
unsigned int get_vm_area_page_order(struct vm_struct *vm);
#else
static inline void vmalloc_init(void)
{
}
static inline
int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
return -EINVAL;
}
#endif
int __must_check __vmap_pages_range_noflush(unsigned long addr,
unsigned long end, pgprot_t prot,
struct page **pages, unsigned int page_shift);
void vunmap_range_noflush(unsigned long start, unsigned long end);
void __vunmap_range_noflush(unsigned long start, unsigned long end);
int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
unsigned long addr, int *flags, bool writable,
int *last_cpupid);
void free_zone_device_folio(struct folio *folio);
int migrate_device_coherent_folio(struct folio *folio);
struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long shift,
unsigned long vm_flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask,
const void *caller);
/*
* mm/gup.c
*/
int __must_check try_grab_folio(struct folio *folio, int refs,
unsigned int flags);
/*
* mm/huge_memory.c
*/
void touch_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, bool write);
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write);
/*
* Parses a string with mem suffixes into its order. Useful to parse kernel
* parameters.
*/
static inline int get_order_from_str(const char *size_str,
unsigned long valid_orders)
{
unsigned long size;
char *endptr;
int order;
size = memparse(size_str, &endptr);
if (!is_power_of_2(size))
return -EINVAL;
order = get_order(size);
if (BIT(order) & ~valid_orders)
return -EINVAL;
return order;
}
enum {
/* mark page accessed */
FOLL_TOUCH = 1 << 16,
/* a retry, previous pass started an IO */
FOLL_TRIED = 1 << 17,
/* we are working on non-current tsk/mm */
FOLL_REMOTE = 1 << 18,
/* pages must be released via unpin_user_page */
FOLL_PIN = 1 << 19,
/* gup_fast: prevent fall-back to slow gup */
FOLL_FAST_ONLY = 1 << 20,
/* allow unlocking the mmap lock */
FOLL_UNLOCKABLE = 1 << 21,
/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
FOLL_MADV_POPULATE = 1 << 22,
};
#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
FOLL_MADV_POPULATE)
/*
* Indicates for which pages that are write-protected in the page table,
* whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
* GUP pin will remain consistent with the pages mapped into the page tables
* of the MM.
*
* Temporary unmapping of PageAnonExclusive() pages or clearing of
* PageAnonExclusive() has to protect against concurrent GUP:
* * Ordinary GUP: Using the PT lock
* * GUP-fast and fork(): mm->write_protect_seq
* * GUP-fast and KSM or temporary unmapping (swap, migration): see
* folio_try_share_anon_rmap_*()
*
* Must be called with the (sub)page that's actually referenced via the
* page table entry, which might not necessarily be the head page for a
* PTE-mapped THP.
*
* If the vma is NULL, we're coming from the GUP-fast path and might have
* to fallback to the slow path just to lookup the vma.
*/
static inline bool gup_must_unshare(struct vm_area_struct *vma,
unsigned int flags, struct page *page)
{
/*
* FOLL_WRITE is implicitly handled correctly as the page table entry
* has to be writable -- and if it references (part of) an anonymous
* folio, that part is required to be marked exclusive.
*/
if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
return false;
/*
* Note: PageAnon(page) is stable until the page is actually getting
* freed.
*/
if (!PageAnon(page)) {
/*
* We only care about R/O long-term pining: R/O short-term
* pinning does not have the semantics to observe successive
* changes through the process page tables.
*/
if (!(flags & FOLL_LONGTERM))
return false;
/* We really need the vma ... */
if (!vma)
return true;
/*
* ... because we only care about writable private ("COW")
* mappings where we have to break COW early.
*/
return is_cow_mapping(vma->vm_flags);
}
/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
smp_rmb();
/*
* Note that KSM pages cannot be exclusive, and consequently,
* cannot get pinned.
*/
return !PageAnonExclusive(page);
}
extern bool mirrored_kernelcore;
bool memblock_has_mirror(void);
void memblock_free_all(void);
static __always_inline void vma_set_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
pgoff_t pgoff)
{
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
}
static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
{
/*
* NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
* enablements, because when without soft-dirty being compiled in,
* VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
* will be constantly true.
*/
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
return false;
/*
* Soft-dirty is kind of special: its tracking is enabled when the
* vma flags not set.
*/
return !(vma->vm_flags & VM_SOFTDIRTY);
}
static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
{
return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
}
static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
{
return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
}
void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid);
void __meminit __init_page_from_nid(unsigned long pfn, int nid);
/* shrinker related functions */
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
int priority);
#ifdef CONFIG_SHRINKER_DEBUG
static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
struct shrinker *shrinker, const char *fmt, va_list ap)
{
shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
return shrinker->name ? 0 : -ENOMEM;
}
static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
{
kfree_const(shrinker->name);
shrinker->name = NULL;
}
extern int shrinker_debugfs_add(struct shrinker *shrinker);
extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id);
extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
int debugfs_id);
#else /* CONFIG_SHRINKER_DEBUG */
static inline int shrinker_debugfs_add(struct shrinker *shrinker)
{
return 0;
}
static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
const char *fmt, va_list ap)
{
return 0;
}
static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
{
}
static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id)
{
*debugfs_id = -1;
return NULL;
}
static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
int debugfs_id)
{
}
#endif /* CONFIG_SHRINKER_DEBUG */
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
extern struct list_lru shadow_nodes;
#define mapping_set_update(xas, mapping) do { \
if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
xas_set_update(xas, workingset_update_node); \
xas_set_lru(xas, &shadow_nodes); \
} \
} while (0)
/* mremap.c */
unsigned long move_page_tables(struct pagetable_move_control *pmc);
#ifdef CONFIG_UNACCEPTED_MEMORY
void accept_page(struct page *page);
#else /* CONFIG_UNACCEPTED_MEMORY */
static inline void accept_page(struct page *page)
{
}
#endif /* CONFIG_UNACCEPTED_MEMORY */
/* pagewalk.c */
int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
pgd_t *pgd, void *private);
/* pt_reclaim.c */
bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
pmd_t pmdval);
void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
struct mmu_gather *tlb);
#ifdef CONFIG_PT_RECLAIM
bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
struct zap_details *details);
#else
static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
struct zap_details *details)
{
return false;
}
#endif /* CONFIG_PT_RECLAIM */
void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
#endif /* __MM_INTERNAL_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic Timer-queue
*
* Manages a simple queue of timers, ordered by expiration time.
* Uses rbtrees for quick list adds and expiration.
*
* NOTE: All of the following functions need to be serialized
* to avoid races. No locking is done by this library code.
*/
#include <linux/bug.h>
#include <linux/timerqueue.h>
#include <linux/rbtree.h>
#include <linux/export.h>
#define __node_2_tq(_n) \
rb_entry((_n), struct timerqueue_node, node)
static inline bool __timerqueue_less(struct rb_node *a, const struct rb_node *b)
{
return __node_2_tq(a)->expires < __node_2_tq(b)->expires;
}
/**
* timerqueue_add - Adds timer to timerqueue.
*
* @head: head of timerqueue
* @node: timer node to be added
*
* Adds the timer node to the timerqueue, sorted by the node's expires
* value. Returns true if the newly added timer is the first expiring timer in
* the queue.
*/
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
/* Make sure we don't add nodes that are already added */
WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less);
}
EXPORT_SYMBOL_GPL(timerqueue_add);
/**
* timerqueue_del - Removes a timer from the timerqueue.
*
* @head: head of timerqueue
* @node: timer node to be removed
*
* Removes the timer node from the timerqueue. Returns true if the queue is
* not empty after the remove.
*/
bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
{
WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
rb_erase_cached(&node->node, &head->rb_root);
RB_CLEAR_NODE(&node->node);
return !RB_EMPTY_ROOT(&head->rb_root.rb_root);
}
EXPORT_SYMBOL_GPL(timerqueue_del);
/**
* timerqueue_iterate_next - Returns the timer after the provided timer
*
* @node: Pointer to a timer.
*
* Provides the timer that is after the given node. This is used, when
* necessary, to iterate through the list of timers in a timer list
* without modifying the list.
*/
struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node)
{
struct rb_node *next;
if (!node)
return NULL;
next = rb_next(&node->node);
if (!next)
return NULL;
return container_of(next, struct timerqueue_node, node);
}
EXPORT_SYMBOL_GPL(timerqueue_iterate_next);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* kernel/workqueue_internal.h
*
* Workqueue internal header file. Only to be included by workqueue and
* core kernel subsystems.
*/
#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
#define _KERNEL_WORKQUEUE_INTERNAL_H
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/preempt.h>
struct worker_pool;
/*
* The poor guys doing the actual heavy lifting. All on-duty workers are
* either serving the manager role, on idle list or on busy hash. For
* details on the locking annotation (L, I, X...), refer to workqueue.c.
*
* Only to be used in workqueue and async.
*/
struct worker {
/* on idle list while idle, on busy hash table while busy */
union {
struct list_head entry; /* L: while idle */
struct hlist_node hentry; /* L: while busy */
};
struct work_struct *current_work; /* K: work being processed and its */
work_func_t current_func; /* K: function */
struct pool_workqueue *current_pwq; /* K: pwq */
u64 current_at; /* K: runtime at start or last wakeup */
unsigned int current_color; /* K: color */
int sleeping; /* S: is worker sleeping? */
/* used by the scheduler to determine a worker's last known identity */
work_func_t last_func; /* K: last work's fn */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
struct worker_pool *pool; /* A: the associated pool */
/* L: for rescuers */
struct list_head node; /* A: anchored at pool->workers */
/* A: runs through worker->node */
unsigned long last_active; /* K: last active timestamp */
unsigned int flags; /* L: flags */
int id; /* I: worker id */
/*
* Opaque string set with work_set_desc(). Printed out with task
* dump for debugging - WARN, BUG, panic or sysrq.
*/
char desc[WORKER_DESC_LEN];
/* used only by rescuers to point to the target workqueue */
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
};
/**
* current_wq_worker - return struct worker if %current is a workqueue worker
*/
static inline struct worker *current_wq_worker(void)
{
if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}
/*
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched/ and workqueue.c.
*/
void wq_worker_running(struct task_struct *task);
void wq_worker_sleeping(struct task_struct *task);
void wq_worker_tick(struct task_struct *task);
work_func_t wq_worker_last_func(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
#ifndef __LINUX_OVERFLOW_H
#define __LINUX_OVERFLOW_H
#include <linux/compiler.h>
#include <linux/limits.h>
#include <linux/const.h>
/*
* We need to compute the minimum and maximum values representable in a given
* type. These macros may also be useful elsewhere. It would seem more obvious
* to do something like:
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
*
* Unfortunately, the middle expressions, strictly speaking, have
* undefined behaviour, and at least some versions of gcc warn about
* the type_max expression (but not if -fsanitize=undefined is in
* effect; in that case, the warning is deferred to runtime...).
*
* The slightly excessive casting in type_min is to make sure the
* macros also produce sensible values for the exotic type _Bool. [The
* overflow checkers only almost work for _Bool, but that's
* a-feature-not-a-bug, since people shouldn't be doing arithmetic on
* _Bools. Besides, the gcc builtins don't allow _Bool* as third
* argument.]
*
* Idea stolen from
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define __type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_max(t) __type_max(typeof(t))
#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
#define type_min(t) __type_min(typeof(t))
/*
* Avoids triggering -Wtype-limits compilation warning,
* while using unsigned data types to check a < 0.
*/
#define is_non_negative(a) ((a) > 0 || (a) == 0)
#define is_negative(a) (!(is_non_negative(a)))
/*
* Allows for effectively applying __must_check to a macro so we can have
* both the type-agnostic benefits of the macros while also being able to
* enforce that the return value is, in fact, checked.
*/
static inline bool __must_check __must_check_overflow(bool overflow)
{
return unlikely(overflow);
}
/**
* check_add_overflow() - Calculate addition with overflow checking
* @a: first addend
* @b: second addend
* @d: pointer to store sum
*
* Returns true on wrap-around, false otherwise.
*
* *@d holds the results of the attempted addition, regardless of whether
* wrap-around occurred.
*/
#define check_add_overflow(a, b, d) \
__must_check_overflow(__builtin_add_overflow(a, b, d))
/**
* wrapping_add() - Intentionally perform a wrapping addition
* @type: type for result of calculation
* @a: first addend
* @b: second addend
*
* Return the potentially wrapped-around addition without
* tripping any wrap-around sanitizers that may be enabled.
*/
#define wrapping_add(type, a, b) \
({ \
type __val; \
__builtin_add_overflow(a, b, &__val); \
__val; \
})
/**
* wrapping_assign_add() - Intentionally perform a wrapping increment assignment
* @var: variable to be incremented
* @offset: amount to add
*
* Increments @var by @offset with wrap-around. Returns the resulting
* value of @var. Will not trip any wrap-around sanitizers.
*
* Returns the new value of @var.
*/
#define wrapping_assign_add(var, offset) \
({ \
typeof(var) *__ptr = &(var); \
*__ptr = wrapping_add(typeof(var), *__ptr, offset); \
})
/**
* check_sub_overflow() - Calculate subtraction with overflow checking
* @a: minuend; value to subtract from
* @b: subtrahend; value to subtract from @a
* @d: pointer to store difference
*
* Returns true on wrap-around, false otherwise.
*
* *@d holds the results of the attempted subtraction, regardless of whether
* wrap-around occurred.
*/
#define check_sub_overflow(a, b, d) \
__must_check_overflow(__builtin_sub_overflow(a, b, d))
/**
* wrapping_sub() - Intentionally perform a wrapping subtraction
* @type: type for result of calculation
* @a: minuend; value to subtract from
* @b: subtrahend; value to subtract from @a
*
* Return the potentially wrapped-around subtraction without
* tripping any wrap-around sanitizers that may be enabled.
*/
#define wrapping_sub(type, a, b) \
({ \
type __val; \
__builtin_sub_overflow(a, b, &__val); \
__val; \
})
/**
* wrapping_assign_sub() - Intentionally perform a wrapping decrement assign
* @var: variable to be decremented
* @offset: amount to subtract
*
* Decrements @var by @offset with wrap-around. Returns the resulting
* value of @var. Will not trip any wrap-around sanitizers.
*
* Returns the new value of @var.
*/
#define wrapping_assign_sub(var, offset) \
({ \
typeof(var) *__ptr = &(var); \
*__ptr = wrapping_sub(typeof(var), *__ptr, offset); \
})
/**
* check_mul_overflow() - Calculate multiplication with overflow checking
* @a: first factor
* @b: second factor
* @d: pointer to store product
*
* Returns true on wrap-around, false otherwise.
*
* *@d holds the results of the attempted multiplication, regardless of whether
* wrap-around occurred.
*/
#define check_mul_overflow(a, b, d) \
__must_check_overflow(__builtin_mul_overflow(a, b, d))
/**
* wrapping_mul() - Intentionally perform a wrapping multiplication
* @type: type for result of calculation
* @a: first factor
* @b: second factor
*
* Return the potentially wrapped-around multiplication without
* tripping any wrap-around sanitizers that may be enabled.
*/
#define wrapping_mul(type, a, b) \
({ \
type __val; \
__builtin_mul_overflow(a, b, &__val); \
__val; \
})
/**
* check_shl_overflow() - Calculate a left-shifted value and check overflow
* @a: Value to be shifted
* @s: How many bits left to shift
* @d: Pointer to where to store the result
*
* Computes *@d = (@a << @s)
*
* Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
* make sense. Example conditions:
*
* - '@a << @s' causes bits to be lost when stored in *@d.
* - '@s' is garbage (e.g. negative) or so large that the result of
* '@a << @s' is guaranteed to be 0.
* - '@a' is negative.
* - '@a << @s' sets the sign bit, if any, in '*@d'.
*
* '*@d' will hold the results of the attempted shift, but is not
* considered "safe for use" if true is returned.
*/
#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
typeof(a) _a = a; \
typeof(s) _s = s; \
typeof(d) _d = d; \
unsigned long long _a_full = _a; \
unsigned int _to_shift = \
is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
(_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
(*_d >> _to_shift) != _a); \
}))
#define __overflows_type_constexpr(x, T) ( \
is_unsigned_type(typeof(x)) ? \
(x) > type_max(T) : \
is_unsigned_type(typeof(T)) ? \
(x) < 0 || (x) > type_max(T) : \
(x) < type_min(T) || (x) > type_max(T))
#define __overflows_type(x, T) ({ \
typeof(T) v = 0; \
check_add_overflow((x), v, &v); \
})
/**
* overflows_type - helper for checking the overflows between value, variables,
* or data type
*
* @n: source constant value or variable to be checked
* @T: destination variable or data type proposed to store @x
*
* Compares the @x expression for whether or not it can safely fit in
* the storage of the type in @T. @x and @T can have different types.
* If @x is a constant expression, this will also resolve to a constant
* expression.
*
* Returns: true if overflow can occur, false otherwise.
*/
#define overflows_type(n, T) \
__builtin_choose_expr(__is_constexpr(n), \
__overflows_type_constexpr(n, T), \
__overflows_type(n, T))
/**
* range_overflows() - Check if a range is out of bounds
* @start: Start of the range.
* @size: Size of the range.
* @max: Exclusive upper boundary.
*
* A strict check to determine if the range [@start, @start + @size) is
* invalid with respect to the allowable range [0, @max). Any range
* starting at or beyond @max is considered an overflow, even if @size is 0.
*
* Returns: true if the range is out of bounds.
*/
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
typeof(max) max__ = (max); \
(void)(&start__ == &size__); \
(void)(&start__ == &max__); \
start__ >= max__ || size__ > max__ - start__; \
})
/**
* range_overflows_t() - Check if a range is out of bounds
* @type: Data type to use.
* @start: Start of the range.
* @size: Size of the range.
* @max: Exclusive upper boundary.
*
* Same as range_overflows() but forcing the parameters to @type.
*
* Returns: true if the range is out of bounds.
*/
#define range_overflows_t(type, start, size, max) \
range_overflows((type)(start), (type)(size), (type)(max))
/**
* range_end_overflows() - Check if a range's endpoint is out of bounds
* @start: Start of the range.
* @size: Size of the range.
* @max: Exclusive upper boundary.
*
* Checks only if the endpoint of a range (@start + @size) exceeds @max.
* Unlike range_overflows(), a zero-sized range at the boundary (@start == @max)
* is not considered an overflow. Useful for iterator-style checks.
*
* Returns: true if the endpoint exceeds the boundary.
*/
#define range_end_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
typeof(max) max__ = (max); \
(void)(&start__ == &size__); \
(void)(&start__ == &max__); \
start__ > max__ || size__ > max__ - start__; \
})
/**
* range_end_overflows_t() - Check if a range's endpoint is out of bounds
* @type: Data type to use.
* @start: Start of the range.
* @size: Size of the range.
* @max: Exclusive upper boundary.
*
* Same as range_end_overflows() but forcing the parameters to @type.
*
* Returns: true if the endpoint exceeds the boundary.
*/
#define range_end_overflows_t(type, start, size, max) \
range_end_overflows((type)(start), (type)(size), (type)(max))
/**
* castable_to_type - like __same_type(), but also allows for casted literals
*
* @n: variable or constant value
* @T: variable or data type
*
* Unlike the __same_type() macro, this allows a constant value as the
* first argument. If this value would not overflow into an assignment
* of the second argument's type, it returns true. Otherwise, this falls
* back to __same_type().
*/
#define castable_to_type(n, T) \
__builtin_choose_expr(__is_constexpr(n), \
!__overflows_type_constexpr(n, T), \
__same_type(n, T))
/**
* size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
* @factor1: first factor
* @factor2: second factor
*
* Returns: calculate @factor1 * @factor2, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. The
* lvalue must be size_t to avoid implicit type conversion.
*/
static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
{
size_t bytes;
if (check_mul_overflow(factor1, factor2, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* size_add() - Calculate size_t addition with saturation at SIZE_MAX
* @addend1: first addend
* @addend2: second addend
*
* Returns: calculate @addend1 + @addend2, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. The
* lvalue must be size_t to avoid implicit type conversion.
*/
static inline size_t __must_check size_add(size_t addend1, size_t addend2)
{
size_t bytes;
if (check_add_overflow(addend1, addend2, &bytes)) return SIZE_MAX;
return bytes;
}
/**
* size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
* @minuend: value to subtract from
* @subtrahend: value to subtract from @minuend
*
* Returns: calculate @minuend - @subtrahend, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. For
* composition with the size_add() and size_mul() helpers, neither
* argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
* The lvalue must be size_t to avoid implicit type conversion.
*/
static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
{
size_t bytes;
if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
check_sub_overflow(minuend, subtrahend, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* array_size() - Calculate size of 2-dimensional array.
* @a: dimension one
* @b: dimension two
*
* Calculates size of 2-dimensional array: @a * @b.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
#define array_size(a, b) size_mul(a, b)
/**
* array3_size() - Calculate size of 3-dimensional array.
* @a: dimension one
* @b: dimension two
* @c: dimension three
*
* Calculates size of 3-dimensional array: @a * @b * @c.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
/**
* flex_array_size() - Calculate size of a flexible array member
* within an enclosing structure.
* @p: Pointer to the structure.
* @member: Name of the flexible array member.
* @count: Number of elements in the array.
*
* Calculates size of a flexible array of @count number of @member
* elements, at the end of structure @p.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define flex_array_size(p, member, count) \
__builtin_choose_expr(__is_constexpr(count), \
(count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
/**
* struct_size() - Calculate size of structure with trailing flexible array.
* @p: Pointer to the structure.
* @member: Name of the array member.
* @count: Number of elements in the array.
*
* Calculates size of memory needed for structure of @p followed by an
* array of @count number of @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define struct_size(p, member, count) \
__builtin_choose_expr(__is_constexpr(count), \
sizeof(*(p)) + flex_array_size(p, member, count), \
size_add(sizeof(*(p)), flex_array_size(p, member, count)))
/**
* struct_size_t() - Calculate size of structure with trailing flexible array
* @type: structure type name.
* @member: Name of the array member.
* @count: Number of elements in the array.
*
* Calculates size of memory needed for structure @type followed by an
* array of @count number of @member elements. Prefer using struct_size()
* when possible instead, to keep calculations associated with a specific
* instance variable of type @type.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define struct_size_t(type, member, count) \
struct_size((type *)NULL, member, count)
/**
* __DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
* Enables caller macro to pass arbitrary trailing expressions
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
* @trailer: Trailing expressions for attributes and/or initializers.
*/
#define __DEFINE_FLEX(type, name, member, count, trailer...) \
_Static_assert(__builtin_constant_p(count), \
"onstack flex array members require compile-time const count"); \
union { \
u8 bytes[struct_size_t(type, member, count)]; \
type obj; \
} name##_u trailer; \
type *name = (type *)&name##_u
/**
* _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
* Enables caller macro to pass (different) initializer.
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
* @initializer: Initializer expression (e.g., pass `= { }` at minimum).
*/
#define _DEFINE_FLEX(type, name, member, count, initializer...) \
__DEFINE_FLEX(type, name, member, count, = { .obj initializer })
/**
* DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing
* flexible array member, when it does not have a __counted_by annotation.
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
*
* Define a zeroed, on-stack, instance of @type structure with a trailing
* flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards.
* Use __member_size(@name->member) to get compile-time size of @name members.
* Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
* elements in array @member.
*/
#define DEFINE_RAW_FLEX(type, name, member, count) \
__DEFINE_FLEX(type, name, member, count, = { })
/**
* DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
* flexible array member.
*
* @TYPE: structure type name, including "struct" keyword.
* @NAME: Name for a variable to define.
* @MEMBER: Name of the array member.
* @COUNTER: Name of the __counted_by member.
* @COUNT: Number of elements in the array; must be compile-time const.
*
* Define a zeroed, on-stack, instance of @TYPE structure with a trailing
* flexible array member.
* Use __struct_size(@NAME) to get compile-time size of it afterwards.
* Use __member_size(@NAME->member) to get compile-time size of @NAME members.
* Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
* elements in array @member.
*/
#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
_DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, })
/**
* STACK_FLEX_ARRAY_SIZE() - helper macro for DEFINE_FLEX() family.
* Returns the number of elements in @array.
*
* @name: Name for a variable defined in DEFINE_RAW_FLEX()/DEFINE_FLEX().
* @array: Name of the array member.
*/
#define STACK_FLEX_ARRAY_SIZE(name, array) \
(__member_size((name)->array) / sizeof(*(name)->array) + \
__must_be_array((name)->array))
#endif /* __LINUX_OVERFLOW_H */
// SPDX-License-Identifier: GPL-2.0
/*
* property.c - Unified device property interface.
*
* Copyright (C) 2014, Intel Corporation
* Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kconfig.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/phy.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
struct fwnode_handle *__dev_fwnode(struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
of_fwnode_handle(dev->of_node) : dev->fwnode;
}
EXPORT_SYMBOL_GPL(__dev_fwnode);
const struct fwnode_handle *__dev_fwnode_const(const struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
of_fwnode_handle(dev->of_node) : dev->fwnode;
}
EXPORT_SYMBOL_GPL(__dev_fwnode_const);
/**
* device_property_present - check if a property of a device is present
* @dev: Device whose property is being checked
* @propname: Name of the property
*
* Check if property @propname is present in the device firmware description.
*
* Return: true if property @propname is present. Otherwise, returns false.
*/
bool device_property_present(const struct device *dev, const char *propname)
{
return fwnode_property_present(dev_fwnode(dev), propname);
}
EXPORT_SYMBOL_GPL(device_property_present);
/**
* fwnode_property_present - check if a property of a firmware node is present
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*
* Return: true if property @propname is present. Otherwise, returns false.
*/
bool fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
bool ret;
if (IS_ERR_OR_NULL(fwnode))
return false;
ret = fwnode_call_bool_op(fwnode, property_present, propname);
if (ret)
return ret;
return fwnode_call_bool_op(fwnode->secondary, property_present, propname);
}
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
* device_property_read_bool - Return the value for a boolean property of a device
* @dev: Device whose property is being checked
* @propname: Name of the property
*
* Return if property @propname is true or false in the device firmware description.
*
* Return: true if property @propname is present. Otherwise, returns false.
*/
bool device_property_read_bool(const struct device *dev, const char *propname)
{
return fwnode_property_read_bool(dev_fwnode(dev), propname);
}
EXPORT_SYMBOL_GPL(device_property_read_bool);
/**
* fwnode_property_read_bool - Return the value for a boolean property of a firmware node
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*
* Return if property @propname is true or false in the firmware description.
*/
bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
const char *propname)
{
bool ret;
if (IS_ERR_OR_NULL(fwnode))
return false;
ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
if (ret)
return ret;
return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u8 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u8() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval)
{
return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u8_array);
/**
* device_property_read_u16_array - return a u16 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u16 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u16() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u16_array(const struct device *dev, const char *propname,
u16 *val, size_t nval)
{
return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u16_array);
/**
* device_property_read_u32_array - return a u32 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u32 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u32() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u32_array(const struct device *dev, const char *propname,
u32 *val, size_t nval)
{
return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u32_array);
/**
* device_property_read_u64_array - return a u64 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u64 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u64() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u64_array(const struct device *dev, const char *propname,
u64 *val, size_t nval)
{
return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u64_array);
/**
* device_property_read_string_array - return a string array property of device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of string properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_string_array_count() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values read on success if @val is non-NULL,
* number of values available on success if @val is NULL,
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not an array of strings,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string_array(const struct device *dev, const char *propname,
const char **val, size_t nval)
{
return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_string_array);
/**
* device_property_read_string - return a string property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The value is stored here
*
* Function reads property @propname from the device firmware description and
* stores the value into @val if found. The value is checked to be a string.
*
* Return: %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property type is not a string.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string(const struct device *dev, const char *propname,
const char **val)
{
return fwnode_property_read_string(dev_fwnode(dev), propname, val);
}
EXPORT_SYMBOL_GPL(device_property_read_string);
/**
* device_property_match_string - find a string in an array and return index
* @dev: Device to get the property of
* @propname: Name of the property holding the array
* @string: String to look for
*
* Find a given string in a string array and if it is found return the
* index back.
*
* Return: index, starting from %0, if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_match_string(const struct device *dev, const char *propname,
const char *string)
{
return fwnode_property_match_string(dev_fwnode(dev), propname, string);
}
EXPORT_SYMBOL_GPL(device_property_match_string);
static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
int ret;
if (IS_ERR_OR_NULL(fwnode))
return -EINVAL;
ret = fwnode_call_int_op(fwnode, property_read_int_array, propname,
elem_size, val, nval);
if (ret != -EINVAL)
return ret;
return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname,
elem_size, val, nval);
}
/**
* fwnode_property_read_u8_array - return a u8 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u8 properties with @propname from @fwnode and stores them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u8() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u8),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
/**
* fwnode_property_read_u16_array - return a u16 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u16 properties with @propname from @fwnode and store them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u16() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u16),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
/**
* fwnode_property_read_u32_array - return a u32 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u32 properties with @propname from @fwnode store them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u32() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u32),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
/**
* fwnode_property_read_u64_array - return a u64 array property firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u64 properties with @propname from @fwnode and store them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u64() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u64),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
/**
* fwnode_property_read_string_array - return string array property of a node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an string list property @propname from the given firmware node and store
* them to @val if found.
*
* It's recommended to call fwnode_property_string_array_count() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values read on success if @val is non-NULL,
* number of values available on success if @val is NULL,
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not an array of strings,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
int ret;
if (IS_ERR_OR_NULL(fwnode))
return -EINVAL;
ret = fwnode_call_int_op(fwnode, property_read_string_array, propname,
val, nval);
if (ret != -EINVAL)
return ret;
return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname,
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
/**
* fwnode_property_read_string - return a string property of a firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The value is stored here
*
* Read property @propname from the given firmware node and store the value into
* @val if found. The value is checked to be a string.
*
* Return: %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not a string,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val)
{
int ret = fwnode_property_read_string_array(fwnode, propname, val, 1);
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string);
/**
* fwnode_property_match_string - find a string in an array and return index
* @fwnode: Firmware node to get the property of
* @propname: Name of the property holding the array
* @string: String to look for
*
* Find a given string in a string array and if it is found return the
* index back.
*
* Return: index, starting from %0, if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string)
{
const char **values;
int nval, ret;
nval = fwnode_property_string_array_count(fwnode, propname);
if (nval < 0)
return nval;
if (nval == 0)
return -ENODATA;
values = kcalloc(nval, sizeof(*values), GFP_KERNEL);
if (!values)
return -ENOMEM;
ret = fwnode_property_read_string_array(fwnode, propname, values, nval);
if (ret < 0)
goto out_free;
ret = match_string(values, nval, string);
if (ret < 0)
ret = -ENODATA;
out_free:
kfree(values);
return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
/**
* fwnode_property_match_property_string - find a property string value in an array and return index
* @fwnode: Firmware node to get the property of
* @propname: Name of the property holding the string value
* @array: String array to search in
* @n: Size of the @array
*
* Find a property string value in a given @array and if it is found return
* the index back.
*
* Return: index, starting from %0, if the string value was found in the @array (success),
* %-ENOENT when the string value was not found in the @array,
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not a string,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
const char *propname, const char * const *array, size_t n)
{
const char *string;
int ret;
ret = fwnode_property_read_string(fwnode, propname, &string);
if (ret)
return ret;
ret = match_string(array, n, string);
if (ret < 0)
ret = -ENOENT;
return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
/**
* fwnode_property_get_reference_args() - Find a reference with arguments
* @fwnode: Firmware node where to look for the reference
* @prop: The name of the property
* @nargs_prop: The name of the property telling the number of
* arguments in the referred node. NULL if @nargs is known,
* otherwise @nargs is ignored.
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
* May be NULL.
*
* Obtain a reference based on a named property in an fwnode, with
* integer arguments.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* @args->fwnode pointer.
*
* Return: %0 on success
* %-ENOENT when the index is out of bounds, the index has an empty
* reference or the property was not found
* %-EINVAL on parse error
*/
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
int ret;
if (IS_ERR_OR_NULL(fwnode))
return -ENOENT;
ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
nargs, index, args);
if (ret == 0)
return ret;
if (IS_ERR_OR_NULL(fwnode->secondary))
return ret;
return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop,
nargs, index, args);
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
/**
* fwnode_find_reference - Find named reference to a fwnode_handle
* @fwnode: Firmware node where to look for the reference
* @name: The name of the reference
* @index: Index of the reference
*
* @index can be used when the named reference holds a table of references.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: a pointer to the reference fwnode, when found. Otherwise,
* returns an error pointer.
*/
struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *name,
unsigned int index)
{
struct fwnode_reference_args args;
int ret;
ret = fwnode_property_get_reference_args(fwnode, name, NULL, 0, index,
&args);
return ret ? ERR_PTR(ret) : args.fwnode;
}
EXPORT_SYMBOL_GPL(fwnode_find_reference);
/**
* fwnode_get_name - Return the name of a node
* @fwnode: The firmware node
*
* Return: a pointer to the node name, or %NULL.
*/
const char *fwnode_get_name(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_name);
}
EXPORT_SYMBOL_GPL(fwnode_get_name);
/**
* fwnode_get_name_prefix - Return the prefix of node for printing purposes
* @fwnode: The firmware node
*
* Return: the prefix of a node, intended to be printed right before the node.
* The prefix works also as a separator between the nodes.
*/
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_name_prefix);
}
/**
* fwnode_name_eq - Return true if node name is equal
* @fwnode: The firmware node
* @name: The name to which to compare the node name
*
* Compare the name provided as an argument to the name of the node, stopping
* the comparison at either NUL or '@' character, whichever comes first. This
* function is generally used for comparing node names while ignoring the
* possible unit address of the node.
*
* Return: true if the node name matches with the name provided in the @name
* argument, false otherwise.
*/
bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name)
{
const char *node_name;
ptrdiff_t len;
node_name = fwnode_get_name(fwnode);
if (!node_name)
return false;
len = strchrnul(node_name, '@') - node_name;
return str_has_prefix(node_name, name) == len;
}
EXPORT_SYMBOL_GPL(fwnode_name_eq);
/**
* fwnode_get_parent - Return parent firwmare node
* @fwnode: Firmware whose parent is retrieved
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_parent);
}
EXPORT_SYMBOL_GPL(fwnode_get_parent);
/**
* fwnode_get_next_parent - Iterate to the node's parent
* @fwnode: Firmware whose parent is retrieved
*
* This is like fwnode_get_parent() except that it drops the refcount
* on the passed node, making it suitable for iterating through a
* node's parents.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @fwnode
* unconditionally.
*
* Return: parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent = fwnode_get_parent(fwnode);
fwnode_handle_put(fwnode);
return parent;
}
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
* fwnode_count_parents - Return the number of parents a node has
* @fwnode: The node the parents of which are to be counted
*
* Return: the number of parents a node has.
*/
unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
unsigned int count = 0;
fwnode_for_each_parent_node(fwnode, parent)
count++;
return count;
}
EXPORT_SYMBOL_GPL(fwnode_count_parents);
/**
* fwnode_get_nth_parent - Return an nth parent of a node
* @fwnode: The node the parent of which is requested
* @depth: Distance of the parent from the node
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the nth parent of a node. If there is no parent at the requested
* @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
* fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
*/
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
unsigned int depth)
{
struct fwnode_handle *parent;
if (depth == 0)
return fwnode_handle_get(fwnode);
fwnode_for_each_parent_node(fwnode, parent) {
if (--depth == 0)
return parent;
}
return NULL;
}
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
* fwnode_get_next_child_node - Return the next child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @child
* unconditionally.
*/
struct fwnode_handle *
fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
}
EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
/**
* fwnode_get_next_available_child_node - Return the next available child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @child
* unconditionally.
*/
struct fwnode_handle *
fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
struct fwnode_handle *next_child = child;
if (IS_ERR_OR_NULL(fwnode))
return NULL;
do {
next_child = fwnode_get_next_child_node(fwnode, next_child);
if (!next_child)
return NULL;
} while (!fwnode_device_is_available(next_child));
return next_child;
}
EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
* @child: Handle to one of the device's child nodes or a %NULL handle.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @child
* unconditionally.
*/
struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child)
{
const struct fwnode_handle *fwnode = dev_fwnode(dev);
struct fwnode_handle *next;
if (IS_ERR_OR_NULL(fwnode))
return NULL;
/* Try to find a child in primary fwnode */
next = fwnode_get_next_child_node(fwnode, child);
if (next)
return next;
/* When no more children in primary, continue with secondary */
return fwnode_get_next_child_node(fwnode->secondary, child);
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
/**
* fwnode_get_named_child_node - Return first matching named child node handle
* @fwnode: Firmware node to find the named child node for.
* @childname: String to match child node name against.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
return fwnode_call_ptr_op(fwnode, get_named_child_node, childname);
}
EXPORT_SYMBOL_GPL(fwnode_get_named_child_node);
/**
* device_get_named_child_node - Return first matching named child node handle
* @dev: Device to find the named child node for.
* @childname: String to match child node name against.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *device_get_named_child_node(const struct device *dev,
const char *childname)
{
return fwnode_get_named_child_node(dev_fwnode(dev), childname);
}
EXPORT_SYMBOL_GPL(device_get_named_child_node);
/**
* fwnode_handle_get - Obtain a reference to a device node
* @fwnode: Pointer to the device node to obtain the reference to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the fwnode handle.
*/
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
{
if (!fwnode_has_op(fwnode, get))
return fwnode;
return fwnode_call_ptr_op(fwnode, get);
}
EXPORT_SYMBOL_GPL(fwnode_handle_get);
/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*
* Return: true if device is available for use. Otherwise, returns false.
*
* For fwnode node types that don't implement the .device_is_available()
* operation, this function returns true.
*/
bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (IS_ERR_OR_NULL(fwnode))
return false;
if (!fwnode_has_op(fwnode, device_is_available))
return true;
return fwnode_call_bool_op(fwnode, device_is_available);
}
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
* fwnode_get_child_node_count - return the number of child nodes for a given firmware node
* @fwnode: Pointer to the parent firmware node
*
* Return: the number of child nodes for a given firmware node.
*/
unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
unsigned int count = 0;
fwnode_for_each_child_node(fwnode, child)
count++;
return count;
}
EXPORT_SYMBOL_GPL(fwnode_get_child_node_count);
/**
* fwnode_get_named_child_node_count - number of child nodes with given name
* @fwnode: Node which child nodes are counted.
* @name: String to match child node name against.
*
* Scan child nodes and count all the nodes with a specific name. Potential
* 'number' -ending after the 'at sign' for scanned names is ignored.
* E.g.::
* fwnode_get_named_child_node_count(fwnode, "channel");
* would match all the nodes::
* channel { }, channel@0 {}, channel@0xabba {}...
*
* Return: the number of child nodes with a matching name for a given device.
*/
unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode,
const char *name)
{
struct fwnode_handle *child;
unsigned int count = 0;
fwnode_for_each_named_child_node(fwnode, child, name)
count++;
return count;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_child_node_count);
bool device_dma_supported(const struct device *dev)
{
return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported);
}
EXPORT_SYMBOL_GPL(device_dma_supported);
enum dev_dma_attr device_get_dma_attr(const struct device *dev)
{
if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr))
return DEV_DMA_NOT_SUPPORTED;
return fwnode_call_int_op(dev_fwnode(dev), device_get_dma_attr);
}
EXPORT_SYMBOL_GPL(device_get_dma_attr);
/**
* fwnode_get_phy_mode - Get phy mode for given firmware node
* @fwnode: Pointer to the given node
*
* The function gets phy interface string from property 'phy-mode' or
* 'phy-connection-type', and return its index in phy_modes table, or errno in
* error case.
*/
int fwnode_get_phy_mode(const struct fwnode_handle *fwnode)
{
const char *pm;
int err, i;
err = fwnode_property_read_string(fwnode, "phy-mode", &pm);
if (err < 0)
err = fwnode_property_read_string(fwnode,
"phy-connection-type", &pm);
if (err < 0)
return err;
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
if (!strcasecmp(pm, phy_modes(i)))
return i;
return -ENODEV;
}
EXPORT_SYMBOL_GPL(fwnode_get_phy_mode);
/**
* device_get_phy_mode - Get phy mode for given device
* @dev: Pointer to the given device
*
* The function gets phy interface string from property 'phy-mode' or
* 'phy-connection-type', and return its index in phy_modes table, or errno in
* error case.
*/
int device_get_phy_mode(struct device *dev)
{
return fwnode_get_phy_mode(dev_fwnode(dev));
}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
/**
* fwnode_iomap - Maps the memory mapped IO for a given fwnode
* @fwnode: Pointer to the firmware node
* @index: Index of the IO range
*
* Return: a pointer to the mapped memory.
*/
void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
{
return fwnode_call_ptr_op(fwnode, iomap, index);
}
EXPORT_SYMBOL(fwnode_iomap);
/**
* fwnode_irq_get - Get IRQ directly from a fwnode
* @fwnode: Pointer to the firmware node
* @index: Zero-based index of the IRQ
*
* Return: Linux IRQ number on success. Negative errno on failure.
*/
int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
{
int ret;
ret = fwnode_call_int_op(fwnode, irq_get, index);
/* We treat mapping errors as invalid case */
if (ret == 0)
return -EINVAL;
return ret;
}
EXPORT_SYMBOL(fwnode_irq_get);
/**
* fwnode_irq_get_byname - Get IRQ from a fwnode using its name
* @fwnode: Pointer to the firmware node
* @name: IRQ name
*
* Description:
* Find a match to the string @name in the 'interrupt-names' string array
* in _DSD for ACPI, or of_node for Device Tree. Then get the Linux IRQ
* number of the IRQ resource corresponding to the index of the matched
* string.
*
* Return: Linux IRQ number on success, or negative errno otherwise.
*/
int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name)
{
int index;
if (!name)
return -EINVAL;
index = fwnode_property_match_string(fwnode, "interrupt-names", name);
if (index < 0)
return index;
return fwnode_irq_get(fwnode, index);
}
EXPORT_SYMBOL(fwnode_irq_get_byname);
/**
* fwnode_graph_get_next_endpoint - Get next endpoint firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @prev
* unconditionally.
*
* Return: an endpoint firmware node pointer or %NULL if no more endpoints
* are available.
*/
struct fwnode_handle *
fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
struct fwnode_handle *ep, *port_parent = NULL;
const struct fwnode_handle *parent;
/*
* If this function is in a loop and the previous iteration returned
* an endpoint from fwnode->secondary, then we need to use the secondary
* as parent rather than @fwnode.
*/
if (prev) {
port_parent = fwnode_graph_get_port_parent(prev);
parent = port_parent;
} else {
parent = fwnode;
}
if (IS_ERR_OR_NULL(parent))
return NULL;
ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
if (ep)
goto out_put_port_parent;
ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
out_put_port_parent:
fwnode_handle_put(port_parent);
return ep;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
/**
* fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint
* @endpoint: Endpoint firmware node of the port
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the firmware node of the device the @endpoint belongs to.
*/
struct fwnode_handle *
fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint)
{
struct fwnode_handle *port, *parent;
port = fwnode_get_parent(endpoint);
parent = fwnode_call_ptr_op(port, graph_get_port_parent);
fwnode_handle_put(port);
return parent;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
/**
* fwnode_graph_get_remote_port_parent - Return fwnode of a remote device
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote device the @fwnode points to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint, *parent;
endpoint = fwnode_graph_get_remote_endpoint(fwnode);
parent = fwnode_graph_get_port_parent(endpoint);
fwnode_handle_put(endpoint);
return parent;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
/**
* fwnode_graph_get_remote_port - Return fwnode of a remote port
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote port the @fwnode points to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
{
return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode));
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
/**
* fwnode_graph_get_remote_endpoint - Return fwnode of a remote endpoint
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote endpoint the @fwnode points to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint);
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
{
struct fwnode_handle *dev_node;
bool available;
dev_node = fwnode_graph_get_remote_port_parent(ep);
available = fwnode_device_is_available(dev_node);
fwnode_handle_put(dev_node);
return available;
}
/**
* fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
* @fwnode: parent fwnode_handle containing the graph
* @port: identifier of the port node
* @endpoint: identifier of the endpoint node under the port node
* @flags: fwnode lookup flags
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the fwnode handle of the local endpoint corresponding the port and
* endpoint IDs or %NULL if not found.
*
* If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
* has not been found, look for the closest endpoint ID greater than the
* specified one and return the endpoint that corresponds to it, if present.
*
* Does not return endpoints that belong to disabled devices or endpoints that
* are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
*/
struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
u32 port, u32 endpoint, unsigned long flags)
{
struct fwnode_handle *ep, *best_ep = NULL;
unsigned int best_ep_id = 0;
bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
fwnode_graph_for_each_endpoint(fwnode, ep) {
struct fwnode_endpoint fwnode_ep = { 0 };
int ret;
if (enabled_only && !fwnode_graph_remote_available(ep))
continue;
ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
if (ret < 0)
continue;
if (fwnode_ep.port != port)
continue;
if (fwnode_ep.id == endpoint)
return ep;
if (!endpoint_next)
continue;
/*
* If the endpoint that has just been found is not the first
* matching one and the ID of the one found previously is closer
* to the requested endpoint ID, skip it.
*/
if (fwnode_ep.id < endpoint ||
(best_ep && best_ep_id < fwnode_ep.id))
continue;
fwnode_handle_put(best_ep);
best_ep = fwnode_handle_get(ep);
best_ep_id = fwnode_ep.id;
}
return best_ep;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
/**
* fwnode_graph_get_endpoint_count - Count endpoints on a device node
* @fwnode: The node related to a device
* @flags: fwnode lookup flags
* Count endpoints in a device node.
*
* If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints
* and endpoints connected to disabled devices are counted.
*/
unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode,
unsigned long flags)
{
struct fwnode_handle *ep;
unsigned int count = 0;
fwnode_graph_for_each_endpoint(fwnode, ep) {
if (flags & FWNODE_GRAPH_DEVICE_DISABLED ||
fwnode_graph_remote_available(ep))
count++;
}
return count;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_count);
/**
* fwnode_graph_parse_endpoint - parse common endpoint node properties
* @fwnode: pointer to endpoint fwnode_handle
* @endpoint: pointer to the fwnode endpoint data structure
*
* Parse @fwnode representing a graph endpoint node and store the
* information in @endpoint. The caller must hold a reference to
* @fwnode.
*/
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
memset(endpoint, 0, sizeof(*endpoint));
return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
const void *device_get_match_data(const struct device *dev)
{
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
EXPORT_SYMBOL_GPL(device_get_match_data);
static unsigned int fwnode_graph_devcon_matches(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match,
void **matches,
unsigned int matches_len)
{
struct fwnode_handle *node;
struct fwnode_handle *ep;
unsigned int count = 0;
void *ret;
fwnode_graph_for_each_endpoint(fwnode, ep) {
if (matches && count >= matches_len) {
fwnode_handle_put(ep);
break;
}
node = fwnode_graph_get_remote_port_parent(ep);
if (!fwnode_device_is_available(node)) {
fwnode_handle_put(node);
continue;
}
ret = match(node, con_id, data);
fwnode_handle_put(node);
if (ret) {
if (matches)
matches[count] = ret;
count++;
}
}
return count;
}
static unsigned int fwnode_devcon_matches(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match,
void **matches,
unsigned int matches_len)
{
struct fwnode_handle *node;
unsigned int count = 0;
unsigned int i;
void *ret;
for (i = 0; ; i++) {
if (matches && count >= matches_len)
break;
node = fwnode_find_reference(fwnode, con_id, i);
if (IS_ERR(node))
break;
ret = match(node, NULL, data);
fwnode_handle_put(node);
if (ret) {
if (matches)
matches[count] = ret;
count++;
}
}
return count;
}
/**
* fwnode_connection_find_match - Find connection from a device node
* @fwnode: Device node with the connection
* @con_id: Identifier for the connection
* @data: Data for the match function
* @match: Function to check and convert the connection description
*
* Find a connection with unique identifier @con_id between @fwnode and another
* device node. @match will be used to convert the connection description to
* data the caller is expecting to be returned.
*/
void *fwnode_connection_find_match(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match)
{
unsigned int count;
void *ret;
if (!fwnode || !match)
return NULL;
count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1);
if (count)
return ret;
count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1);
return count ? ret : NULL;
}
EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
/**
* fwnode_connection_find_matches - Find connections from a device node
* @fwnode: Device node with the connection
* @con_id: Identifier for the connection
* @data: Data for the match function
* @match: Function to check and convert the connection description
* @matches: (Optional) array of pointers to fill with matches
* @matches_len: Length of @matches
*
* Find up to @matches_len connections with unique identifier @con_id between
* @fwnode and other device nodes. @match will be used to convert the
* connection description to data the caller is expecting to be returned
* through the @matches array.
*
* If @matches is %NULL @matches_len is ignored and the total number of resolved
* matches is returned.
*
* Return: Number of matches resolved, or negative errno.
*/
int fwnode_connection_find_matches(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match,
void **matches, unsigned int matches_len)
{
unsigned int count_graph;
unsigned int count_ref;
if (!fwnode || !match)
return -EINVAL;
count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match,
matches, matches_len);
if (matches) {
matches += count_graph;
matches_len -= count_graph;
}
count_ref = fwnode_devcon_matches(fwnode, con_id, data, match,
matches, matches_len);
return count_graph + count_ref;
}
EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/panic.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* This function is used through-out the kernel (including mm and fs)
* to indicate a major problem.
*/
#include <linux/debug_locks.h>
#include <linux/sched/debug.h>
#include <linux/interrupt.h>
#include <linux/kgdb.h>
#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/vt_kern.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/ftrace.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/kexec.h>
#include <linux/panic_notifier.h>
#include <linux/sched.h>
#include <linux/string_helpers.h>
#include <linux/sysrq.h>
#include <linux/init.h>
#include <linux/nmi.h>
#include <linux/console.h>
#include <linux/bug.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
#include <linux/sysfs.h>
#include <linux/context_tracking.h>
#include <linux/seq_buf.h>
#include <linux/sys_info.h>
#include <trace/events/error_report.h>
#include <asm/sections.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
#ifdef CONFIG_SMP
/*
* Should we dump all CPUs backtraces in an oops event?
* Defaults to 0, can be changed via sysctl.
*/
static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
#else
#define sysctl_oops_all_cpu_backtrace 0
#endif /* CONFIG_SMP */
int panic_on_oops = IS_ENABLED(CONFIG_PANIC_ON_OOPS);
static unsigned long tainted_mask =
IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
bool crash_kexec_post_notifiers;
int panic_on_warn __read_mostly;
unsigned long panic_on_taint;
bool panic_on_taint_nousertaint = false;
static unsigned int warn_limit __read_mostly;
static bool panic_console_replay;
bool panic_triggering_all_cpu_backtrace;
static bool panic_this_cpu_backtrace_printed;
int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
unsigned long panic_print;
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
static void panic_print_deprecated(void)
{
pr_info_once("Kernel: The 'panic_print' parameter is now deprecated. Please use 'panic_sys_info' and 'panic_console_replay' instead.\n");
}
#ifdef CONFIG_SYSCTL
/*
* Taint values can only be increased
* This means we can safely use a temporary.
*/
static int proc_taint(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
unsigned long tmptaint = get_taint();
int err;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
t = *table;
t.data = &tmptaint;
err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
if (write) {
int i;
/*
* If we are relying on panic_on_taint not producing
* false positives due to userspace input, bail out
* before setting the requested taint flags.
*/
if (panic_on_taint_nousertaint && (tmptaint & panic_on_taint))
return -EINVAL;
/*
* Poor man's atomic or. Not worth adding a primitive
* to everyone's atomic.h for this
*/
for (i = 0; i < TAINT_FLAGS_COUNT; i++)
if ((1UL << i) & tmptaint)
add_taint(i, LOCKDEP_STILL_OK);
}
return err;
}
static int sysctl_panic_print_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
panic_print_deprecated();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static const struct ctl_table kern_panic_table[] = {
#ifdef CONFIG_SMP
{
.procname = "oops_all_cpu_backtrace",
.data = &sysctl_oops_all_cpu_backtrace,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif
{
.procname = "tainted",
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_taint,
},
{
.procname = "panic",
.data = &panic_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "panic_on_oops",
.data = &panic_on_oops,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "panic_print",
.data = &panic_print,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = sysctl_panic_print_handler,
},
{
.procname = "panic_on_warn",
.data = &panic_on_warn,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "warn_limit",
.data = &warn_limit,
.maxlen = sizeof(warn_limit),
.mode = 0644,
.proc_handler = proc_douintvec,
},
#if (defined(CONFIG_X86_32) || defined(CONFIG_PARISC)) && \
defined(CONFIG_DEBUG_STACKOVERFLOW)
{
.procname = "panic_on_stackoverflow",
.data = &sysctl_panic_on_stackoverflow,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "panic_sys_info",
.data = &panic_print,
.maxlen = sizeof(panic_print),
.mode = 0644,
.proc_handler = sysctl_sys_info_handler,
},
};
static __init int kernel_panic_sysctls_init(void)
{
register_sysctl_init("kernel", kern_panic_table);
return 0;
}
late_initcall(kernel_panic_sysctls_init);
#endif
/* The format is "panic_sys_info=tasks,mem,locks,ftrace,..." */
static int __init setup_panic_sys_info(char *buf)
{
/* There is no risk of race in kernel boot phase */
panic_print = sys_info_parse_param(buf);
return 1;
}
__setup("panic_sys_info=", setup_panic_sys_info);
static atomic_t warn_count = ATOMIC_INIT(0);
#ifdef CONFIG_SYSFS
static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page, "%d\n", atomic_read(&warn_count));
}
static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count);
static __init int kernel_panic_sysfs_init(void)
{
sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL);
return 0;
}
late_initcall(kernel_panic_sysfs_init);
#endif
static long no_blink(int state)
{
return 0;
}
/* Returns how long it waited in ms */
long (*panic_blink)(int state);
EXPORT_SYMBOL(panic_blink);
/*
* Stop ourself in panic -- architecture code may override this
*/
void __weak __noreturn panic_smp_self_stop(void)
{
while (1)
cpu_relax();
}
/*
* Stop ourselves in NMI context if another CPU has already panicked. Arch code
* may override this to prepare for crash dumping, e.g. save regs info.
*/
void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs)
{
panic_smp_self_stop();
}
/*
* Stop other CPUs in panic. Architecture dependent code may override this
* with more suitable version. For example, if the architecture supports
* crash dump, it should save registers of each stopped CPU and disable
* per-CPU features such as virtualization extensions.
*/
void __weak crash_smp_send_stop(void)
{
static int cpus_stopped;
/*
* This function can be called twice in panic path, but obviously
* we execute this only once.
*/
if (cpus_stopped)
return;
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a panic
* situation.
*/
smp_send_stop();
cpus_stopped = 1;
}
atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
bool panic_try_start(void)
{
int old_cpu, this_cpu;
/*
* Only one CPU is allowed to execute the crash_kexec() code as with
* panic(). Otherwise parallel calls of panic() and crash_kexec()
* may stop each other. To exclude them, we use panic_cpu here too.
*/
old_cpu = PANIC_CPU_INVALID;
this_cpu = raw_smp_processor_id();
return atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu);
}
EXPORT_SYMBOL(panic_try_start);
void panic_reset(void)
{
atomic_set(&panic_cpu, PANIC_CPU_INVALID);
}
EXPORT_SYMBOL(panic_reset);
bool panic_in_progress(void)
{
return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
}
EXPORT_SYMBOL(panic_in_progress);
/* Return true if a panic is in progress on the current CPU. */
bool panic_on_this_cpu(void)
{
/*
* We can use raw_smp_processor_id() here because it is impossible for
* the task to be migrated to the panic_cpu, or away from it. If
* panic_cpu has already been set, and we're not currently executing on
* that CPU, then we never will be.
*/
return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
}
EXPORT_SYMBOL(panic_on_this_cpu);
/*
* Return true if a panic is in progress on a remote CPU.
*
* On true, the local CPU should immediately release any printing resources
* that may be needed by the panic CPU.
*/
bool panic_on_other_cpu(void)
{ return (panic_in_progress() && !panic_on_this_cpu());}
EXPORT_SYMBOL(panic_on_other_cpu);
/*
* A variant of panic() called from NMI context. We return if we've already
* panicked on this CPU. If another CPU already panicked, loop in
* nmi_panic_self_stop() which can provide architecture dependent code such
* as saving register state for crash dump.
*/
void nmi_panic(struct pt_regs *regs, const char *msg)
{
if (panic_try_start())
panic("%s", msg);
else if (panic_on_other_cpu())
nmi_panic_self_stop(regs);
}
EXPORT_SYMBOL(nmi_panic);
void check_panic_on_warn(const char *origin)
{
unsigned int limit;
if (panic_on_warn)
panic("%s: panic_on_warn set ...\n", origin);
limit = READ_ONCE(warn_limit);
if (atomic_inc_return(&warn_count) >= limit && limit)
panic("%s: system warned too often (kernel.warn_limit is %d)",
origin, limit);
}
static void panic_trigger_all_cpu_backtrace(void)
{
/* Temporary allow non-panic CPUs to write their backtraces. */
panic_triggering_all_cpu_backtrace = true;
if (panic_this_cpu_backtrace_printed)
trigger_allbutcpu_cpu_backtrace(raw_smp_processor_id());
else
trigger_all_cpu_backtrace();
panic_triggering_all_cpu_backtrace = false;
}
/*
* Helper that triggers the NMI backtrace (if set in panic_print)
* and then performs the secondary CPUs shutdown - we cannot have
* the NMI backtrace after the CPUs are off!
*/
static void panic_other_cpus_shutdown(bool crash_kexec)
{
if (panic_print & SYS_INFO_ALL_CPU_BT)
panic_trigger_all_cpu_backtrace();
/*
* Note that smp_send_stop() is the usual SMP shutdown function,
* which unfortunately may not be hardened to work in a panic
* situation. If we want to do crash dump after notifier calls
* and kmsg_dump, we will need architecture dependent extra
* bits in addition to stopping other CPUs, hence we rely on
* crash_smp_send_stop() for that.
*/
if (!crash_kexec)
smp_send_stop();
else
crash_smp_send_stop();
}
/**
* vpanic - halt the system
* @fmt: The text string to print
* @args: Arguments for the format string
*
* Display a message, then perform cleanups. This function never returns.
*/
void vpanic(const char *fmt, va_list args)
{
static char buf[1024];
long i, i_next = 0, len;
int state = 0;
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
if (panic_on_warn) {
/*
* This thread may hit another WARN() in the panic path.
* Resetting this prevents additional WARN() from panicking the
* system on this thread. Other threads are blocked by the
* panic_mutex in panic().
*/
panic_on_warn = 0;
}
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
* from deadlocking the first cpu that invokes the panic, since
* there is nothing to prevent an interrupt handler (that runs
* after setting panic_cpu) from invoking panic() again.
*/
local_irq_disable();
preempt_disable_notrace();
/*
* It's possible to come here directly from a panic-assertion and
* not have preempt disabled. Some functions called from here want
* preempt to be disabled. No point enabling it later though...
*
* Only one CPU is allowed to execute the panic code from here. For
* multiple parallel invocations of panic, all other CPUs either
* stop themself or will wait until they are stopped by the 1st CPU
* with smp_send_stop().
*
* cmpxchg success means this is the 1st CPU which comes here,
* so go ahead.
* `old_cpu == this_cpu' means we came from nmi_panic() which sets
* panic_cpu to this CPU. In this case, this is also the 1st CPU.
*/
/* atomic_try_cmpxchg updates old_cpu on failure */
if (panic_try_start()) {
/* go ahead */
} else if (panic_on_other_cpu())
panic_smp_self_stop();
console_verbose();
bust_spinlocks(1);
len = vscnprintf(buf, sizeof(buf), fmt, args);
if (len && buf[len - 1] == '\n')
buf[len - 1] = '\0';
pr_emerg("Kernel panic - not syncing: %s\n", buf);
/*
* Avoid nested stack-dumping if a panic occurs during oops processing
*/
if (test_taint(TAINT_DIE) || oops_in_progress > 1) {
panic_this_cpu_backtrace_printed = true;
} else if (IS_ENABLED(CONFIG_DEBUG_BUGVERBOSE)) {
dump_stack();
panic_this_cpu_backtrace_printed = true;
}
/*
* If kgdb is enabled, give it a chance to run before we stop all
* the other CPUs or else we won't be able to debug processes left
* running on them.
*/
kgdb_panic(buf);
/*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
* If we want to run this after calling panic_notifiers, pass
* the "crash_kexec_post_notifiers" option to the kernel.
*
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
if (!_crash_kexec_post_notifiers)
__crash_kexec(NULL);
panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
printk_legacy_allow_panic_sync();
/*
* Run any panic handlers, including those that might need to
* add information to the kmsg dump output.
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
sys_info(panic_print);
kmsg_dump_desc(KMSG_DUMP_PANIC, buf);
/*
* If you doubt kdump always works fine in any situation,
* "crash_kexec_post_notifiers" offers you a chance to run
* panic_notifiers and dumping kmsg before kdump.
* Note: since some panic_notifiers can make crashed kernel
* more unstable, it can increase risks of the kdump failure too.
*
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
if (_crash_kexec_post_notifiers)
__crash_kexec(NULL);
console_unblank();
/*
* We may have ended up stopping the CPU holding the lock (in
* smp_send_stop()) while still having some valuable data in the console
* buffer. Try to acquire the lock then release it regardless of the
* result. The release will also print the buffers out. Locks debug
* should be disabled to avoid reporting bad unlock balance when
* panic() is not being callled from OOPS.
*/
debug_locks_off();
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
if ((panic_print & SYS_INFO_PANIC_CONSOLE_REPLAY) ||
panic_console_replay)
console_flush_on_panic(CONSOLE_REPLAY_ALL);
if (!panic_blink)
panic_blink = no_blink;
if (panic_timeout > 0) {
/*
* Delay timeout seconds before rebooting the machine.
* We can't use the "normal" timers since we just panicked.
*/
pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog();
if (i >= i_next) {
i += panic_blink(state ^= 1);
i_next = i + 3600 / PANIC_BLINK_SPD;
}
mdelay(PANIC_TIMER_STEP);
}
}
if (panic_timeout != 0) {
/*
* This will not be a clean reboot, with everything
* shutting down. But if there is a chance of
* rebooting the system it will be rebooted.
*/
if (panic_reboot_mode != REBOOT_UNDEFINED)
reboot_mode = panic_reboot_mode;
emergency_restart();
}
#ifdef __sparc__
{
extern int stop_a_enabled;
/* Make sure the user can actually press Stop-A (L1-A) */
stop_a_enabled = 1;
pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
"twice on console to return to the boot prom\n");
}
#endif
#if defined(CONFIG_S390)
disabled_wait();
#endif
pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
/* Do not scroll important messages printed above */
suppress_printk = 1;
/*
* The final messages may not have been printed if in a context that
* defers printing (such as NMI) and irq_work is not available.
* Explicitly flush the kernel log buffer one last time.
*/
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
nbcon_atomic_flush_unsafe();
local_irq_enable();
for (i = 0; ; i += PANIC_TIMER_STEP) {
touch_softlockup_watchdog();
if (i >= i_next) {
i += panic_blink(state ^= 1);
i_next = i + 3600 / PANIC_BLINK_SPD;
}
mdelay(PANIC_TIMER_STEP);
}
}
EXPORT_SYMBOL(vpanic);
/* Identical to vpanic(), except it takes variadic arguments instead of va_list */
void panic(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vpanic(fmt, args);
va_end(args);
}
EXPORT_SYMBOL(panic);
#define TAINT_FLAG(taint, _c_true, _c_false, _module) \
[ TAINT_##taint ] = { \
.c_true = _c_true, .c_false = _c_false, \
.module = _module, \
.desc = #taint, \
}
/*
* TAINT_FORCED_RMMOD could be a per-module flag but the module
* is being removed anyway.
*/
const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
TAINT_FLAG(PROPRIETARY_MODULE, 'P', 'G', true),
TAINT_FLAG(FORCED_MODULE, 'F', ' ', true),
TAINT_FLAG(CPU_OUT_OF_SPEC, 'S', ' ', false),
TAINT_FLAG(FORCED_RMMOD, 'R', ' ', false),
TAINT_FLAG(MACHINE_CHECK, 'M', ' ', false),
TAINT_FLAG(BAD_PAGE, 'B', ' ', false),
TAINT_FLAG(USER, 'U', ' ', false),
TAINT_FLAG(DIE, 'D', ' ', false),
TAINT_FLAG(OVERRIDDEN_ACPI_TABLE, 'A', ' ', false),
TAINT_FLAG(WARN, 'W', ' ', false),
TAINT_FLAG(CRAP, 'C', ' ', true),
TAINT_FLAG(FIRMWARE_WORKAROUND, 'I', ' ', false),
TAINT_FLAG(OOT_MODULE, 'O', ' ', true),
TAINT_FLAG(UNSIGNED_MODULE, 'E', ' ', true),
TAINT_FLAG(SOFTLOCKUP, 'L', ' ', false),
TAINT_FLAG(LIVEPATCH, 'K', ' ', true),
TAINT_FLAG(AUX, 'X', ' ', true),
TAINT_FLAG(RANDSTRUCT, 'T', ' ', true),
TAINT_FLAG(TEST, 'N', ' ', true),
TAINT_FLAG(FWCTL, 'J', ' ', true),
};
#undef TAINT_FLAG
static void print_tainted_seq(struct seq_buf *s, bool verbose)
{
const char *sep = "";
int i;
if (!tainted_mask) {
seq_buf_puts(s, "Not tainted");
return;
}
seq_buf_printf(s, "Tainted: ");
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
const struct taint_flag *t = &taint_flags[i];
bool is_set = test_bit(i, &tainted_mask);
char c = is_set ? t->c_true : t->c_false;
if (verbose) {
if (is_set) {
seq_buf_printf(s, "%s[%c]=%s", sep, c, t->desc);
sep = ", ";
}
} else {
seq_buf_putc(s, c);
}
}
}
static const char *_print_tainted(bool verbose)
{
/* FIXME: what should the size be? */
static char buf[sizeof(taint_flags)];
struct seq_buf s;
BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);
seq_buf_init(&s, buf, sizeof(buf));
print_tainted_seq(&s, verbose);
return seq_buf_str(&s);
}
/**
* print_tainted - return a string to represent the kernel taint state.
*
* For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst
*
* The string is overwritten by the next call to print_tainted(),
* but is always NULL terminated.
*/
const char *print_tainted(void)
{
return _print_tainted(false);
}
/**
* print_tainted_verbose - A more verbose version of print_tainted()
*/
const char *print_tainted_verbose(void)
{
return _print_tainted(true);
}
int test_taint(unsigned flag)
{
return test_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(test_taint);
unsigned long get_taint(void)
{
return tainted_mask;
}
/**
* add_taint: add a taint flag if not already set.
* @flag: one of the TAINT_* constants.
* @lockdep_ok: whether lock debugging is still OK.
*
* If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
* some notewortht-but-not-corrupting cases, it can be set to true.
*/
void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
{
if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
pr_warn("Disabling lock debugging due to kernel taint\n");
set_bit(flag, &tainted_mask);
if (tainted_mask & panic_on_taint) {
panic_on_taint = 0;
panic("panic_on_taint set ...");
}
}
EXPORT_SYMBOL(add_taint);
static void spin_msec(int msecs)
{
int i;
for (i = 0; i < msecs; i++) {
touch_nmi_watchdog();
mdelay(1);
}
}
/*
* It just happens that oops_enter() and oops_exit() are identically
* implemented...
*/
static void do_oops_enter_exit(void)
{
unsigned long flags;
static int spin_counter;
if (!pause_on_oops)
return;
spin_lock_irqsave(&pause_on_oops_lock, flags);
if (pause_on_oops_flag == 0) {
/* This CPU may now print the oops message */
pause_on_oops_flag = 1;
} else {
/* We need to stall this CPU */
if (!spin_counter) {
/* This CPU gets to do the counting */
spin_counter = pause_on_oops;
do {
spin_unlock(&pause_on_oops_lock);
spin_msec(MSEC_PER_SEC);
spin_lock(&pause_on_oops_lock);
} while (--spin_counter);
pause_on_oops_flag = 0;
} else {
/* This CPU waits for a different one */
while (spin_counter) {
spin_unlock(&pause_on_oops_lock);
spin_msec(1);
spin_lock(&pause_on_oops_lock);
}
}
}
spin_unlock_irqrestore(&pause_on_oops_lock, flags);
}
/*
* Return true if the calling CPU is allowed to print oops-related info.
* This is a bit racy..
*/
bool oops_may_print(void)
{
return pause_on_oops_flag == 0;
}
/*
* Called when the architecture enters its oops handler, before it prints
* anything. If this is the first CPU to oops, and it's oopsing the first
* time then let it proceed.
*
* This is all enabled by the pause_on_oops kernel boot option. We do all
* this to ensure that oopses don't scroll off the screen. It has the
* side-effect of preventing later-oopsing CPUs from mucking up the display,
* too.
*
* It turns out that the CPU which is allowed to print ends up pausing for
* the right duration, whereas all the other CPUs pause for twice as long:
* once in oops_enter(), once in oops_exit().
*/
void oops_enter(void)
{
nbcon_cpu_emergency_enter();
tracing_off();
/* can't trust the integrity of the kernel anymore: */
debug_locks_off();
do_oops_enter_exit();
if (sysctl_oops_all_cpu_backtrace)
trigger_all_cpu_backtrace();
}
static void print_oops_end_marker(void)
{
pr_warn("---[ end trace %016llx ]---\n", 0ULL);
}
/*
* Called when the architecture exits its oops handler, after printing
* everything.
*/
void oops_exit(void)
{
do_oops_enter_exit();
print_oops_end_marker();
nbcon_cpu_emergency_exit();
kmsg_dump(KMSG_DUMP_OOPS);
}
struct warn_args {
const char *fmt;
va_list args;
};
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args)
{
nbcon_cpu_emergency_enter();
disable_trace_on_warning();
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
caller);
else
pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
raw_smp_processor_id(), current->pid, caller);
#pragma GCC diagnostic push
#ifndef __clang__
#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
#endif
if (args)
vprintk(args->fmt, args->args);
#pragma GCC diagnostic pop
print_modules();
if (regs)
show_regs(regs);
check_panic_on_warn("kernel");
if (!regs)
dump_stack();
print_irqtrace_events(current);
print_oops_end_marker();
trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller);
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
nbcon_cpu_emergency_exit();
}
#ifdef CONFIG_BUG
#ifndef __WARN_FLAGS
void warn_slowpath_fmt(const char *file, int line, unsigned taint,
const char *fmt, ...)
{
bool rcu = warn_rcu_enter();
struct warn_args args;
pr_warn(CUT_HERE);
if (!fmt) {
__warn(file, line, __builtin_return_address(0), taint,
NULL, NULL);
warn_rcu_exit(rcu);
return;
}
args.fmt = fmt;
va_start(args.args, fmt);
__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
va_end(args.args);
warn_rcu_exit(rcu);
}
EXPORT_SYMBOL(warn_slowpath_fmt);
#else
void __warn_printk(const char *fmt, ...)
{
bool rcu = warn_rcu_enter();
va_list args;
pr_warn(CUT_HERE);
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
warn_rcu_exit(rcu);
}
EXPORT_SYMBOL(__warn_printk);
#endif
/* Support resetting WARN*_ONCE state */
static int clear_warn_once_set(void *data, u64 val)
{
generic_bug_clear_once();
memset(__start_once, 0, __end_once - __start_once);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set,
"%lld\n");
static __init int register_warn_debugfs(void)
{
/* Don't care about failure */
debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
&clear_warn_once_fops);
return 0;
}
device_initcall(register_warn_debugfs);
#endif
#ifdef CONFIG_STACKPROTECTOR
/*
* Called when gcc's -fstack-protector feature is used, and
* gcc detects corruption of the on-stack canary value
*/
__visible noinstr void __stack_chk_fail(void)
{
unsigned long flags;
instrumentation_begin();
flags = user_access_save();
panic("stack-protector: Kernel stack is corrupted in: %pB",
__builtin_return_address(0));
user_access_restore(flags);
instrumentation_end();
}
EXPORT_SYMBOL(__stack_chk_fail);
#endif
core_param(panic, panic_timeout, int, 0644);
core_param(pause_on_oops, pause_on_oops, int, 0644);
core_param(panic_on_warn, panic_on_warn, int, 0644);
core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
core_param(panic_console_replay, panic_console_replay, bool, 0644);
static int panic_print_set(const char *val, const struct kernel_param *kp)
{
panic_print_deprecated();
return param_set_ulong(val, kp);
}
static int panic_print_get(char *val, const struct kernel_param *kp)
{
panic_print_deprecated();
return param_get_ulong(val, kp);
}
static const struct kernel_param_ops panic_print_ops = {
.set = panic_print_set,
.get = panic_print_get,
};
__core_param_cb(panic_print, &panic_print_ops, &panic_print, 0644);
static int __init oops_setup(char *s)
{
if (!s)
return -EINVAL;
if (!strcmp(s, "panic"))
panic_on_oops = 1;
return 0;
}
early_param("oops", oops_setup);
static int __init panic_on_taint_setup(char *s)
{
char *taint_str;
if (!s)
return -EINVAL;
taint_str = strsep(&s, ",");
if (kstrtoul(taint_str, 16, &panic_on_taint))
return -EINVAL;
/* make sure panic_on_taint doesn't hold out-of-range TAINT flags */
panic_on_taint &= TAINT_FLAGS_MAX;
if (!panic_on_taint)
return -EINVAL;
if (s && !strcmp(s, "nousertaint"))
panic_on_taint_nousertaint = true;
pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%s\n",
panic_on_taint, str_enabled_disabled(panic_on_taint_nousertaint));
return 0;
}
early_param("panic_on_taint", panic_on_taint_setup);
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2002-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/unaligned.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
closely. They're more complex. --RR */
static const char *const tcp_conntrack_names[] = {
"NONE",
"SYN_SENT",
"SYN_RECV",
"ESTABLISHED",
"FIN_WAIT",
"CLOSE_WAIT",
"LAST_ACK",
"TIME_WAIT",
"CLOSE",
"SYN_SENT2",
};
enum nf_ct_tcp_action {
NFCT_TCP_IGNORE,
NFCT_TCP_INVALID,
NFCT_TCP_ACCEPT,
};
#define SECS * HZ
#define MINS * 60 SECS
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
[TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
[TCP_CONNTRACK_LAST_ACK] = 30 SECS,
[TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE] = 10 SECS,
[TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
/* RFC1122 says the R2 limit should be at least 100 seconds.
Linux uses 15 packets as limit, which corresponds
to ~13-30min depending on RTO. */
[TCP_CONNTRACK_RETRANS] = 5 MINS,
[TCP_CONNTRACK_UNACK] = 5 MINS,
};
#define sNO TCP_CONNTRACK_NONE
#define sSS TCP_CONNTRACK_SYN_SENT
#define sSR TCP_CONNTRACK_SYN_RECV
#define sES TCP_CONNTRACK_ESTABLISHED
#define sFW TCP_CONNTRACK_FIN_WAIT
#define sCW TCP_CONNTRACK_CLOSE_WAIT
#define sLA TCP_CONNTRACK_LAST_ACK
#define sTW TCP_CONNTRACK_TIME_WAIT
#define sCL TCP_CONNTRACK_CLOSE
#define sS2 TCP_CONNTRACK_SYN_SENT2
#define sIV TCP_CONNTRACK_MAX
#define sIG TCP_CONNTRACK_IGNORE
/* What TCP flags are set from RST/SYN/FIN/ACK. */
enum tcp_bit_set {
TCP_SYN_SET,
TCP_SYNACK_SET,
TCP_FIN_SET,
TCP_ACK_SET,
TCP_RST_SET,
TCP_NONE_SET,
};
/*
* The TCP state transition table needs a few words...
*
* We are the man in the middle. All the packets go through us
* but might get lost in transit to the destination.
* It is assumed that the destinations can't receive segments
* we haven't seen.
*
* The checked segment is in window, but our windows are *not*
* equivalent with the ones of the sender/receiver. We always
* try to guess the state of the current sender.
*
* The meaning of the states are:
*
* NONE: initial state
* SYN_SENT: SYN-only packet seen
* SYN_SENT2: SYN-only packet seen from reply dir, simultaneous open
* SYN_RECV: SYN-ACK packet seen
* ESTABLISHED: ACK packet seen
* FIN_WAIT: FIN packet seen
* CLOSE_WAIT: ACK seen (after FIN)
* LAST_ACK: FIN seen (after FIN)
* TIME_WAIT: last ACK seen
* CLOSE: closed connection (RST)
*
* Packets marked as IGNORED (sIG):
* if they may be either invalid or valid
* and the receiver may send back a connection
* closing RST or a SYN/ACK.
*
* Packets marked as INVALID (sIV):
* if we regard them as truly invalid packets
*/
static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
/*
* sNO -> sSS Initialize a new connection
* sSS -> sSS Retransmitted SYN
* sS2 -> sS2 Late retransmitted SYN
* sSR -> sIG
* sES -> sIG Error: SYNs in window outside the SYN_SENT state
* are errors. Receiver will reply with RST
* and close the connection.
* Or we are not in sync and hold a dead connection.
* sFW -> sIG
* sCW -> sIG
* sLA -> sIG
* sTW -> sSS Reopened connection (RFC 1122).
* sCL -> sSS
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
/*
* sNO -> sIV Too late and no reason to do anything
* sSS -> sIV Client can't send SYN and then SYN/ACK
* sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
* sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
* sES -> sIV Invalid SYN/ACK packets sent by the client
* sFW -> sIV
* sCW -> sIV
* sLA -> sIV
* sTW -> sIV
* sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
/*
* sNO -> sIV Too late and no reason to do anything...
* sSS -> sIV Client migth not send FIN in this state:
* we enforce waiting for a SYN/ACK reply first.
* sS2 -> sIV
* sSR -> sFW Close started.
* sES -> sFW
* sFW -> sLA FIN seen in both directions, waiting for
* the last ACK.
* Migth be a retransmitted FIN as well...
* sCW -> sLA
* sLA -> sLA Retransmitted FIN. Remain in the same state.
* sTW -> sTW
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
/*
* sNO -> sES Assumed.
* sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
* sS2 -> sIV
* sSR -> sES Established state is reached.
* sES -> sES :-)
* sFW -> sCW Normal close request answered by ACK.
* sCW -> sCW
* sLA -> sTW Last ACK detected (RFC5961 challenged)
* sTW -> sTW Retransmitted last ACK. Remain in the same state.
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
},
{
/* REPLY */
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
/*
* sNO -> sIV Never reached.
* sSS -> sS2 Simultaneous open
* sS2 -> sS2 Retransmitted simultaneous SYN
* sSR -> sIV Invalid SYN packets sent by the server
* sES -> sIV
* sFW -> sIV
* sCW -> sIV
* sLA -> sIV
* sTW -> sSS Reopened connection, but server may have switched role
* sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
/*
* sSS -> sSR Standard open.
* sS2 -> sSR Simultaneous open
* sSR -> sIG Retransmitted SYN/ACK, ignore it.
* sES -> sIG Late retransmitted SYN/ACK?
* sFW -> sIG Might be SYN/ACK answering ignored SYN
* sCW -> sIG
* sLA -> sIG
* sTW -> sIG
* sCL -> sIG
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
/*
* sSS -> sIV Server might not send FIN in this state.
* sS2 -> sIV
* sSR -> sFW Close started.
* sES -> sFW
* sFW -> sLA FIN seen in both directions.
* sCW -> sLA
* sLA -> sLA Retransmitted FIN.
* sTW -> sTW
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
/*
* sSS -> sIG Might be a half-open connection.
* sS2 -> sIG
* sSR -> sSR Might answer late resent SYN.
* sES -> sES :-)
* sFW -> sCW Normal close request answered by ACK.
* sCW -> sCW
* sLA -> sTW Last ACK detected (RFC5961 challenged)
* sTW -> sTW Retransmitted last ACK.
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
}
};
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
return;
seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
}
#endif
static unsigned int get_conntrack_index(const struct tcphdr *tcph)
{
if (tcph->rst) return TCP_RST_SET;
else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
else if (tcph->fin) return TCP_FIN_SET;
else if (tcph->ack) return TCP_ACK_SET;
else return TCP_NONE_SET;
}
/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
in IP Filter' by Guido van Rooij.
http://www.sane.nl/events/sane2000/papers.html
http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
The boundaries and the conditions are changed according to RFC793:
the packet must intersect the window (i.e. segments may be
after the right or before the left edge) and thus receivers may ACK
segments after the right edge of the window.
td_maxend = max(sack + max(win,1)) seen in reply packets
td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
td_maxwin += seq + len - sender.td_maxend
if seq + len > sender.td_maxend
td_end = max(seq + len) seen in sent packets
I. Upper bound for valid data: seq <= sender.td_maxend
II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
III. Upper bound for valid (s)ack: sack <= receiver.td_end
IV. Lower bound for valid (s)ack: sack >= receiver.td_end - MAXACKWINDOW
where sack is the highest right edge of sack block found in the packet
or ack in the case of packet without SACK option.
The upper bound limit for a valid (s)ack is not ignored -
we doesn't have to deal with fragments.
*/
static inline __u32 segment_seq_plus_len(__u32 seq,
size_t len,
unsigned int dataoff,
const struct tcphdr *tcph)
{
/* XXX Should I use payload length field in IP/IPv6 header ?
* - YK */
return (seq + len - dataoff - tcph->doff*4
+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
}
/* Fixme: what about big packets? */
#define MAXACKWINCONST 66000
#define MAXACKWINDOW(sender) \
((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
: MAXACKWINCONST)
/*
* Simplified tcp_parse_options routine from tcp_input.c
*/
static void tcp_options(const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
struct ip_ct_tcp_state *state)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
if (!ptr)
return;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
while (length > 0) {
int opcode=*ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
if (length < 2)
return;
opsize=*ptr++;
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
return; /* don't parse partial options */
if (opcode == TCPOPT_SACK_PERM
&& opsize == TCPOLEN_SACK_PERM)
state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
else if (opcode == TCPOPT_WINDOW
&& opsize == TCPOLEN_WINDOW) {
state->td_scale = *(u_int8_t *)ptr;
if (state->td_scale > TCP_MAX_WSCALE)
state->td_scale = TCP_MAX_WSCALE;
state->flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
const struct tcphdr *tcph, __u32 *sack)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
__u32 tmp;
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
if (!ptr)
return;
/* Fast path for timestamp-only option */
if (length == TCPOLEN_TSTAMP_ALIGNED
&& *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8)
| TCPOLEN_TIMESTAMP))
return;
while (length > 0) {
int opcode = *ptr++;
int opsize, i;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
if (length < 2)
return;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
return; /* don't parse partial options */
if (opcode == TCPOPT_SACK
&& opsize >= (TCPOLEN_SACK_BASE
+ TCPOLEN_SACK_PERBLOCK)
&& !((opsize - TCPOLEN_SACK_BASE)
% TCPOLEN_SACK_PERBLOCK)) {
for (i = 0;
i < (opsize - TCPOLEN_SACK_BASE);
i += TCPOLEN_SACK_PERBLOCK) {
tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
if (after(tmp, *sack))
*sack = tmp;
}
return;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
static void tcp_init_sender(struct ip_ct_tcp_state *sender,
struct ip_ct_tcp_state *receiver,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
u32 end, u32 win,
enum ip_conntrack_dir dir)
{
/* SYN-ACK in reply to a SYN
* or SYN from reply direction in simultaneous open.
*/
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
/* RFC 1323:
* Both sides must send the Window Scale option
* to enable window scaling in either direction.
*/
if (dir == IP_CT_DIR_REPLY &&
!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
sender->td_scale = 0;
receiver->td_scale = 0;
}
}
__printf(6, 7)
static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const struct nf_hook_state *state,
const struct ip_ct_tcp_state *sender,
enum nf_ct_tcp_action ret,
const char *fmt, ...)
{
const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
struct va_format vaf;
va_list args;
bool be_liberal;
be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
if (be_liberal)
return NFCT_TCP_ACCEPT;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
va_end(args);
return ret;
}
static enum nf_ct_tcp_action
tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
unsigned int index, const struct sk_buff *skb,
unsigned int dataoff, const struct tcphdr *tcph,
const struct nf_hook_state *hook_state)
{
struct ip_ct_tcp *state = &ct->proto.tcp;
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
__u32 seq, ack, sack, end, win, swin;
bool in_recv_win, seq_ok;
s32 receiver_offset;
u16 win_raw;
/*
* Get the required data from the packet.
*/
seq = ntohl(tcph->seq);
ack = sack = ntohl(tcph->ack_seq);
win_raw = ntohs(tcph->window);
win = win_raw;
end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
tcp_sack(skb, dataoff, tcph, &sack);
/* Take into account NAT sequence number mangling */
receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
ack -= receiver_offset;
sack -= receiver_offset;
if (sender->td_maxwin == 0) {
/*
* Initialize sender data.
*/
if (tcph->syn) {
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win, dir);
if (!tcph->ack)
/* Simultaneous open */
return NFCT_TCP_ACCEPT;
} else {
/*
* We are in the middle of a connection,
* its history is lost for us.
* Let's try to use the data from the packet.
*/
sender->td_end = end;
swin = win << sender->td_scale;
sender->td_maxwin = (swin == 0 ? 1 : swin);
sender->td_maxend = end + sender->td_maxwin;
if (receiver->td_maxwin == 0) {
/* We haven't seen traffic in the other
* direction yet but we have to tweak window
* tracking to pass III and IV until that
* happens.
*/
receiver->td_end = receiver->td_maxend = sack;
} else if (sack == receiver->td_end + 1) {
/* Likely a reply to a keepalive.
* Needed for III.
*/
receiver->td_end++;
}
}
} else if (tcph->syn &&
after(end, sender->td_end) &&
(state->state == TCP_CONNTRACK_SYN_SENT ||
state->state == TCP_CONNTRACK_SYN_RECV)) {
/*
* RFC 793: "if a TCP is reinitialized ... then it need
* not wait at all; it must only be sure to use sequence
* numbers larger than those recently used."
*
* Re-init state for this direction, just like for the first
* syn(-ack) reply, it might differ in seq, ack or tcp options.
*/
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win, dir);
if (dir == IP_CT_DIR_REPLY && !tcph->ack)
return NFCT_TCP_ACCEPT;
}
if (!(tcph->ack)) {
/*
* If there is no ACK, just pretend it was set and OK.
*/
ack = sack = receiver->td_end;
} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
(TCP_FLAG_ACK|TCP_FLAG_RST))
&& (ack == 0)) {
/*
* Broken TCP stacks, that set ACK in RST packets as well
* with zero ack value.
*/
ack = sack = receiver->td_end;
}
if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
/*
* RST sent answering SYN.
*/
seq = end = sender->td_end;
seq_ok = before(seq, sender->td_maxend + 1);
if (!seq_ok) {
u32 overshot = end - sender->td_maxend + 1;
bool ack_ok;
ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
in_recv_win = receiver->td_maxwin &&
after(end, sender->td_end - receiver->td_maxwin - 1);
if (in_recv_win &&
ack_ok &&
overshot <= receiver->td_maxwin &&
before(sack, receiver->td_end + 1)) {
/* Work around TCPs that send more bytes than allowed by
* the receive window.
*
* If the (marked as invalid) packet is allowed to pass by
* the ruleset and the peer acks this data, then its possible
* all future packets will trigger 'ACK is over upper bound' check.
*
* Thus if only the sequence check fails then do update td_end so
* possible ACK for this data can update internal state.
*/
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"%u bytes more than expected", overshot);
}
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"SEQ is over upper bound %u (over the window of the receiver)",
sender->td_maxend + 1);
}
if (!before(sack, receiver->td_end + 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"ACK is over upper bound %u (ACKed data not seen yet)",
receiver->td_end + 1);
/* Is the ending sequence in the receive window (if available)? */
in_recv_win = !receiver->td_maxwin ||
after(end, sender->td_end - receiver->td_maxwin - 1);
if (!in_recv_win)
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"SEQ is under lower bound %u (already ACKed data retransmitted)",
sender->td_end - receiver->td_maxwin - 1);
if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"ignored ACK under lower bound %u (possible overly delayed)",
receiver->td_end - MAXACKWINDOW(sender) - 1);
/* Take into account window scaling (RFC 1323). */
if (!tcph->syn)
win <<= sender->td_scale;
/* Update sender data. */
swin = win + (sack - ack);
if (sender->td_maxwin < swin)
sender->td_maxwin = swin;
if (after(end, sender->td_end)) {
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
}
if (tcph->ack) {
if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
sender->td_maxack = ack;
sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
} else if (after(ack, sender->td_maxack)) {
sender->td_maxack = ack;
}
}
/* Update receiver data. */
if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
receiver->td_maxwin += end - sender->td_maxend;
if (after(sack + win, receiver->td_maxend - 1)) {
receiver->td_maxend = sack + win;
if (win == 0)
receiver->td_maxend++;
}
if (ack == receiver->td_end)
receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
/* Check retransmissions. */
if (index == TCP_ACK_SET) {
if (state->last_dir == dir &&
state->last_seq == seq &&
state->last_ack == ack &&
state->last_end == end &&
state->last_win == win_raw) {
state->retrans++;
} else {
state->last_dir = dir;
state->last_seq = seq;
state->last_ack = ack;
state->last_end = end;
state->last_win = win_raw;
state->retrans = 0;
}
}
return NFCT_TCP_ACCEPT;
}
static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
enum ip_conntrack_dir dir,
int index,
const struct sk_buff *skb,
const struct nf_hook_state *hook_state)
{
const unsigned int *timeouts;
const struct nf_tcp_net *tn;
unsigned int timeout;
u32 expires;
if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
return;
/* We don't want to have connections hanging around in ESTABLISHED
* state for long time 'just because' conntrack deemed a FIN/RST
* out-of-window.
*
* Shrink the timeout just like when there is unacked data.
* This speeds up eviction of 'dead' connections where the
* connection and conntracks internal state are out of sync.
*/
switch (index) {
case TCP_RST_SET:
case TCP_FIN_SET:
break;
default:
return;
}
if (ct->proto.tcp.last_dir != dir &&
(ct->proto.tcp.last_index == TCP_FIN_SET ||
ct->proto.tcp.last_index == TCP_RST_SET)) {
expires = nf_ct_expires(ct);
if (expires < 120 * HZ)
return;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = tn->timeouts;
timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
if (expires > timeout) {
nf_ct_l4proto_log_invalid(skb, ct, hook_state,
"packet (index %d, dir %d) response for index %d lower timeout to %u",
index, dir, ct->proto.tcp.last_index, timeout);
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
}
} else {
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
}
}
/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
TCPHDR_URG) + 1] =
{
[TCPHDR_SYN] = 1,
[TCPHDR_SYN|TCPHDR_URG] = 1,
[TCPHDR_SYN|TCPHDR_ACK] = 1,
[TCPHDR_RST] = 1,
[TCPHDR_RST|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
[TCPHDR_ACK] = 1,
[TCPHDR_ACK|TCPHDR_URG] = 1,
};
static void tcp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
}
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
static bool tcp_error(const struct tcphdr *th,
struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
unsigned int tcplen = skb->len - dataoff;
u8 tcpflags;
/* Not whole TCP header or malformed packet */
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
tcp_error_log(skb, state, "truncated packet");
return true;
}
/* Checksum invalid? Ignore.
* We skip checking packets on the outgoing path
* because the checksum is assumed to be correct.
*/
/* FIXME: Source route IP option packets --RR */
if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING &&
nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
tcp_error_log(skb, state, "bad checksum");
return true;
}
/* Check TCP flags. */
tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
tcp_error_log(skb, state, "invalid tcp flag combination");
return true;
}
return false;
}
static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *th,
const struct nf_hook_state *state)
{
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
const struct nf_tcp_net *tn = nf_tcp_pernet(net);
/* Don't need lock here: this conntrack not in circulation yet */
new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
/* Invalid: delete conntrack */
if (new_state >= TCP_CONNTRACK_MAX) {
tcp_error_log(skb, state, "invalid new");
return false;
}
if (new_state == TCP_CONNTRACK_SYN_SENT) {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/* SYN packet */
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
} else if (tn->tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/*
* We are in the middle of a connection,
* its history is lost for us.
* Let's try to use the data from the packet.
*/
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end +
ct->proto.tcp.seen[0].td_maxwin;
/* We assume SACK and liberal window checking to handle
* window scaling */
ct->proto.tcp.seen[0].flags =
ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
IP_CT_TCP_FLAG_BE_LIBERAL;
}
/* tcp_packet will set them */
ct->proto.tcp.last_index = TCP_NONE_SET;
return true;
}
static bool tcp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.tcp.state) {
case TCP_CONNTRACK_FIN_WAIT:
case TCP_CONNTRACK_LAST_ACK:
case TCP_CONNTRACK_TIME_WAIT:
case TCP_CONNTRACK_CLOSE:
case TCP_CONNTRACK_CLOSE_WAIT:
return true;
default:
break;
}
return false;
}
void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
{
enum tcp_conntrack old_state;
const unsigned int *timeouts;
u32 timeout;
if (!nf_ct_is_confirmed(ct))
return;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
if (old_state == TCP_CONNTRACK_CLOSE ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
spin_unlock_bh(&ct->lock);
return;
}
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts) {
const struct nf_tcp_net *tn;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = tn->timeouts;
}
timeout = timeouts[TCP_CONNTRACK_CLOSE];
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
spin_unlock_bh(&ct->lock);
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
}
static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
{
state->td_end = 0;
state->td_maxend = 0;
state->td_maxwin = 0;
state->td_maxack = 0;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
}
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
enum nf_ct_tcp_action res;
enum ip_conntrack_dir dir;
const struct tcphdr *th;
struct tcphdr _tcph;
unsigned long timeout;
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return -NF_ACCEPT;
if (tcp_error(th, skb, dataoff, state))
return -NF_ACCEPT;
if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th, state))
return -NF_ACCEPT;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
dir = CTINFO2DIR(ctinfo);
index = get_conntrack_index(th);
new_state = tcp_conntracks[dir][index][old_state];
switch (new_state) {
case TCP_CONNTRACK_SYN_SENT:
if (old_state < TCP_CONNTRACK_TIME_WAIT)
break;
/* RFC 1122: "When a connection is closed actively,
* it MUST linger in TIME-WAIT state for a time 2xMSL
* (Maximum Segment Lifetime). However, it MAY accept
* a new SYN from the remote TCP to reopen the connection
* directly from TIME-WAIT state, if..."
* We ignore the conditions because we are in the
* TIME-WAIT state anyway.
*
* Handle aborted connections: we and the server
* think there is an existing connection but the client
* aborts it and starts a new one.
*/
if (((ct->proto.tcp.seen[dir].flags
| ct->proto.tcp.seen[!dir].flags)
& IP_CT_TCP_FLAG_CLOSE_INIT)
|| (ct->proto.tcp.last_dir == dir
&& ct->proto.tcp.last_index == TCP_RST_SET)) {
/* Attempt to reopen a closed/aborted connection.
* Delete this connection and look up again. */
spin_unlock_bh(&ct->lock);
/* Only repeat if we can actually remove the timer.
* Destruction may already be in progress in process
* context and we must give it a chance to terminate.
*/
if (nf_ct_kill(ct))
return -NF_REPEAT;
return NF_DROP;
}
fallthrough;
case TCP_CONNTRACK_IGNORE:
/* Ignored packets:
*
* Our connection entry may be out of sync, so ignore
* packets which may signal the real connection between
* the client and the server.
*
* a) SYN in ORIGINAL
* b) SYN/ACK in REPLY
* c) ACK in reply direction after initial SYN in original.
*
* If the ignored packet is invalid, the receiver will send
* a RST we'll catch below.
*/
if (index == TCP_SYNACK_SET
&& ct->proto.tcp.last_index == TCP_SYN_SET
&& ct->proto.tcp.last_dir != dir
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
/* b) This SYN/ACK acknowledges a SYN that we earlier
* ignored as invalid. This means that the client and
* the server are both in sync, while the firewall is
* not. We get in sync from the previously annotated
* values.
*/
old_state = TCP_CONNTRACK_SYN_SENT;
new_state = TCP_CONNTRACK_SYN_RECV;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
ct->proto.tcp.last_win == 0 ?
1 : ct->proto.tcp.last_win;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
ct->proto.tcp.last_wscale;
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
ct->proto.tcp.last_flags;
nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
break;
}
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
ct->proto.tcp.last_seq = ntohl(th->seq);
ct->proto.tcp.last_end =
segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
ct->proto.tcp.last_win = ntohs(th->window);
/* a) This is a SYN in ORIGINAL. The client and the server
* may be in sync but we are not. In that case, we annotate
* the TCP options and let the packet go through. If it is a
* valid SYN packet, the server will reply with a SYN/ACK, and
* then we'll get in sync. Otherwise, the server potentially
* responds with a challenge ACK if implementing RFC5961.
*/
if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
struct ip_ct_tcp_state seen = {};
ct->proto.tcp.last_flags =
ct->proto.tcp.last_wscale = 0;
tcp_options(skb, dataoff, th, &seen);
if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
ct->proto.tcp.last_wscale = seen.td_scale;
}
if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_SACK_PERM;
}
/* Mark the potential for RFC5961 challenge ACK,
* this pose a special problem for LAST_ACK state
* as ACK is intrepretated as ACKing last FIN.
*/
if (old_state == TCP_CONNTRACK_LAST_ACK)
ct->proto.tcp.last_flags |=
IP_CT_EXP_CHALLENGE_ACK;
}
/* possible challenge ack reply to syn */
if (old_state == TCP_CONNTRACK_SYN_SENT &&
index == TCP_ACK_SET &&
dir == IP_CT_DIR_REPLY)
ct->proto.tcp.last_ack = ntohl(th->ack_seq);
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state,
"packet (index %d) in dir %d ignored, state %s",
index, dir,
tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
/* Special case for SYN proxy: when the SYN to the server or
* the SYN/ACK from the server is lost, the client may transmit
* a keep-alive packet while in SYN_SENT state. This needs to
* be associated with the original conntrack entry in order to
* generate a new SYN with the correct sequence number.
*/
if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
spin_unlock_bh(&ct->lock);
return NF_ACCEPT;
}
/* Invalid packet */
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state,
"packet (index %d) in dir %d invalid, state %s",
index, dir,
tcp_conntrack_names[old_state]);
return -NF_ACCEPT;
case TCP_CONNTRACK_TIME_WAIT:
/* RFC5961 compliance cause stack to send "challenge-ACK"
* e.g. in response to spurious SYNs. Conntrack MUST
* not believe this ACK is acking last FIN.
*/
if (old_state == TCP_CONNTRACK_LAST_ACK &&
index == TCP_ACK_SET &&
ct->proto.tcp.last_dir != dir &&
ct->proto.tcp.last_index == TCP_SYN_SET &&
(ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
/* Detected RFC5961 challenge ACK */
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
return NF_ACCEPT; /* Don't change state */
}
break;
case TCP_CONNTRACK_SYN_SENT2:
/* tcp_conntracks table is not smart enough to handle
* simultaneous open.
*/
ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
break;
case TCP_CONNTRACK_SYN_RECV:
if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
new_state = TCP_CONNTRACK_ESTABLISHED;
break;
case TCP_CONNTRACK_CLOSE:
if (index != TCP_RST_SET)
break;
/* If we are closing, tuple might have been re-used already.
* last_index, last_ack, and all other ct fields used for
* sequence/window validation are outdated in that case.
*
* As the conntrack can already be expired by GC under pressure,
* just skip validation checks.
*/
if (tcp_can_early_drop(ct))
goto in_window;
/* td_maxack might be outdated if we let a SYN through earlier */
if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
ct->proto.tcp.last_index != TCP_SYN_SET) {
u32 seq = ntohl(th->seq);
/* If we are not in established state and SEQ=0 this is most
* likely an answer to a SYN we let go through above (last_index
* can be updated due to out-of-order ACKs).
*/
if (seq == 0 && !nf_conntrack_tcp_established(ct))
break;
if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
!tn->tcp_ignore_invalid_rst) {
/* Invalid RST */
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
return -NF_ACCEPT;
}
if (!nf_conntrack_tcp_established(ct) ||
seq == ct->proto.tcp.seen[!dir].td_maxack)
break;
/* Check if rst is part of train, such as
* foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
* foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
*/
if (ct->proto.tcp.last_index == TCP_ACK_SET &&
ct->proto.tcp.last_dir == dir &&
seq == ct->proto.tcp.last_end)
break;
/* ... RST sequence number doesn't match exactly, keep
* established state to allow a possible challenge ACK.
*/
new_state = old_state;
}
if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_SYN_SET)
|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_ACK_SET))
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
/* RST sent to invalid SYN or ACK we had let through
* at a) and c) above:
*
* a) SYN was in window then
* c) we hold a half-open connection.
*
* Delete our connection entry.
* We skip window checking, because packet might ACK
* segments we ignored. */
goto in_window;
}
/* Reset in response to a challenge-ack we let through earlier */
if (old_state == TCP_CONNTRACK_SYN_SENT &&
ct->proto.tcp.last_index == TCP_ACK_SET &&
ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
ntohl(th->seq) == ct->proto.tcp.last_ack)
goto in_window;
break;
default:
/* Keep compilers happy. */
break;
}
res = tcp_in_window(ct, dir, index,
skb, dataoff, th, state);
switch (res) {
case NFCT_TCP_IGNORE:
spin_unlock_bh(&ct->lock);
return NF_ACCEPT;
case NFCT_TCP_INVALID:
nf_tcp_handle_invalid(ct, dir, index, skb, state);
spin_unlock_bh(&ct->lock);
return -NF_ACCEPT;
case NFCT_TCP_ACCEPT:
break;
}
in_window:
/* From now on we have got in-window packets */
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
ct->proto.tcp.state = new_state;
if (old_state != new_state
&& new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = tn->timeouts;
if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if (unlikely(index == TCP_RST_SET))
timeout = timeouts[TCP_CONNTRACK_CLOSE];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
else if (ct->proto.tcp.last_win == 0 &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else
timeout = timeouts[new_state];
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
/* If only reply is a RST, we can consider ourselves not to
have an established connection: this is a fairly common
problem case, so we can delete the conntrack
immediately. --RR */
if (th->rst) {
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
/* do not renew timeout on SYN retransmit.
*
* Else port reuse by client or NAT middlebox can keep
* entry alive indefinitely (including nat info).
*/
return NF_ACCEPT;
}
/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
* pickup with loose=1. Avoid large ESTABLISHED timeout.
*/
if (new_state == TCP_CONNTRACK_ESTABLISHED &&
timeout > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
/* Set ASSURED if we see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up
connection. */
set_bit(IPS_ASSURED_BIT, &ct->status);
nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
return NF_ACCEPT;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
struct nf_ct_tcp_flags tmp = {};
spin_lock_bh(&ct->lock);
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
goto nla_put_failure;
if (destroy)
goto skip_state;
if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
ct->proto.tcp.seen[0].td_scale) ||
nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
ct->proto.tcp.seen[1].td_scale))
goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[0].flags;
if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
sizeof(struct nf_ct_tcp_flags), &tmp))
goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[1].flags;
if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
sizeof(struct nf_ct_tcp_flags), &tmp))
goto nla_put_failure;
skip_state:
spin_unlock_bh(&ct->lock);
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
spin_unlock_bh(&ct->lock);
return -1;
}
static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
[CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) },
[CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
};
#define TCP_NLATTR_SIZE ( \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
int err;
/* updates could not contain anything about the private
* protocol info, in that case skip the parsing */
if (!pattr)
return 0;
err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
tcp_nla_policy, NULL);
if (err < 0)
return err;
if (tb[CTA_PROTOINFO_TCP_STATE] &&
nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
return -EINVAL;
spin_lock_bh(&ct->lock);
if (tb[CTA_PROTOINFO_TCP_STATE])
ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
ct->proto.tcp.seen[0].flags &= ~attr->mask;
ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
ct->proto.tcp.seen[1].flags &= ~attr->mask;
ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.seen[0].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
ct->proto.tcp.seen[1].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
}
spin_unlock_bh(&ct->lock);
return 0;
}
static unsigned int tcp_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
unsigned int *timeouts = data;
int i;
if (!timeouts)
timeouts = tn->timeouts;
/* set default TCP timeouts. */
for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
timeouts[i] = tn->timeouts[i];
if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
timeouts[TCP_CONNTRACK_SYN_SENT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
timeouts[TCP_CONNTRACK_SYN_RECV] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
timeouts[TCP_CONNTRACK_ESTABLISHED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
timeouts[TCP_CONNTRACK_FIN_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
timeouts[TCP_CONNTRACK_LAST_ACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
timeouts[TCP_CONNTRACK_TIME_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
timeouts[TCP_CONNTRACK_CLOSE] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
timeouts[TCP_CONNTRACK_SYN_SENT2] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
timeouts[TCP_CONNTRACK_RETRANS] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_UNACK]) {
timeouts[TCP_CONNTRACK_UNACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
}
timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
return 0;
}
static int
tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
[CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_tcp_init_net(struct net *net)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
int i;
for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
tn->timeouts[i] = tcp_timeouts[i];
/* timeouts[0] is unused, make it same as SYN_SENT so
* ->timeouts[0] contains 'new' timeout, like udp or icmp.
*/
tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
/* If it is set to zero, we disable picking up already established
* connections.
*/
tn->tcp_loose = 1;
/* "Be conservative in what you do,
* be liberal in what you accept from others."
* If it's non-zero, we mark only out of window RST segments as INVALID.
*/
tn->tcp_be_liberal = 0;
/* If it's non-zero, we turn off RST sequence number check */
tn->tcp_ignore_invalid_rst = 0;
/* Max number of the retransmitted packets without receiving an (acceptable)
* ACK from the destination. If this number is reached, a shorter timer
* will be started.
*/
tn->tcp_max_retrans = 3;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
tn->offload_timeout = 30 * HZ;
#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
{
.l4proto = IPPROTO_TCP,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nlattr_size = TCP_NLATTR_SIZE,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_TCP_MAX,
.obj_size = sizeof(unsigned int) *
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
};
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Sleepable Read-Copy Update mechanism for mutual exclusion
*
* Copyright (C) IBM Corporation, 2006
* Copyright (C) Fujitsu, 2012
*
* Author: Paul McKenney <paulmck@linux.ibm.com>
* Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU/ *.txt
*
*/
#ifndef _LINUX_SRCU_H
#define _LINUX_SRCU_H
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/rcu_segcblist.h>
struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key);
#define init_srcu_struct(ssp) \
({ \
static struct lock_class_key __srcu_key; \
\
__init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *ssp);
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */
#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
// 0x4 // SRCU-lite is no longer with us.
#define SRCU_READ_FLAVOR_FAST 0x8 // srcu_read_lock_fast().
#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \
SRCU_READ_FLAVOR_FAST) // All of the above.
#define SRCU_READ_FLAVOR_SLOWGP SRCU_READ_FLAVOR_FAST
// Flavors requiring synchronize_rcu()
// instead of smp_mb().
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
#ifdef CONFIG_TINY_SRCU
#include <linux/srcutiny.h>
#elif defined(CONFIG_TREE_SRCU)
#include <linux/srcutree.h>
#else
#error "Unknown SRCU implementation specified to kernel configuration"
#endif
void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
void cleanup_srcu_struct(struct srcu_struct *ssp);
void synchronize_srcu(struct srcu_struct *ssp);
#define SRCU_GET_STATE_COMPLETED 0x1
/**
* get_completed_synchronize_srcu - Return a pre-completed polled state cookie
*
* Returns a value that poll_state_synchronize_srcu() will always treat
* as a cookie whose grace period has already completed.
*/
static inline unsigned long get_completed_synchronize_srcu(void)
{
return SRCU_GET_STATE_COMPLETED;
}
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
// Maximum number of unsigned long values corresponding to
// not-yet-completed SRCU grace periods.
#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2
/**
* same_state_synchronize_srcu - Are two old-state values identical?
* @oldstate1: First old-state value.
* @oldstate2: Second old-state value.
*
* The two old-state values must have been obtained from either
* get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or
* get_completed_synchronize_srcu(). Returns @true if the two values are
* identical and @false otherwise. This allows structures whose lifetimes
* are tracked by old-state values to push these values to a list header,
* allowing those structures to be slightly smaller.
*/
static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2)
{
return oldstate1 == oldstate2;
}
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
#else
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
{
return __srcu_read_lock(ssp);
}
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
{
__srcu_read_unlock(ssp, idx);
}
#endif /* CONFIG_NEED_SRCU_NMI_SAFE */
void srcu_init(void);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
* srcu_read_lock_held - might we be in SRCU read-side critical section?
* @ssp: The srcu_struct structure to check
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
* this assumes we are in an SRCU read-side critical section unless it can
* prove otherwise.
*
* Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
*
* Note that SRCU is based on its own statemachine and it doesn't
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
return lock_is_held(&ssp->dep_map);
}
/*
* Annotations provide deadlock detection for SRCU.
*
* Similar to other lockdep annotations, except there is an additional
* srcu_lock_sync(), which is basically an empty *write*-side critical section,
* see lock_sync() for more information.
*/
/* Annotates a srcu_read_lock() */
static inline void srcu_lock_acquire(struct lockdep_map *map)
{
lock_map_acquire_read(map);
}
/* Annotates a srcu_read_lock() */
static inline void srcu_lock_release(struct lockdep_map *map)
{
lock_map_release(map);
}
/* Annotates a synchronize_srcu() */
static inline void srcu_lock_sync(struct lockdep_map *map)
{
lock_map_sync(map);
}
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
return 1;
}
#define srcu_lock_acquire(m) do { } while (0)
#define srcu_lock_release(m) do { } while (0)
#define srcu_lock_sync(m) do { } while (0)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
* @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
* @c: condition to check for update-side use
*
* If PROVE_RCU is enabled, invoking this outside of an RCU read-side
* critical section will result in an RCU-lockdep splat, unless @c evaluates
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
#define srcu_dereference_check(p, ssp, c) \
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
(c) || srcu_read_lock_held(ssp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
* @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
*
* Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
* is enabled, invoking this outside of an RCU read-side critical
* section will result in an RCU-lockdep splat.
*/
#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
/**
* srcu_dereference_notrace - no tracing and no lockdep calls from here
* @p: the pointer to fetch and protect for later dereferencing
* @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
*/
#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section. Note that SRCU read-side
* critical sections may be nested. However, it is illegal to
* call anything that waits on an SRCU grace period for the same
* srcu_struct, whether directly or indirectly. Please note that
* one way to indirectly wait on an SRCU grace period is to acquire
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited().
*
* The return value from srcu_read_lock() is guaranteed to be
* non-negative. This value must be passed unaltered to the matching
* srcu_read_unlock(). Note that srcu_read_lock() and the matching
* srcu_read_unlock() must occur in the same context, for example, it is
* illegal to invoke srcu_read_unlock() in an irq handler if the matching
* srcu_read_lock() was invoked in process context. Or, for that matter to
* invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
* from another.
*/
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
srcu_lock_acquire(&ssp->dep_map);
return retval;
}
/**
* srcu_read_lock_fast - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section, but for a light-weight
* smp_mb()-free reader. See srcu_read_lock() for more information.
*
* If srcu_read_lock_fast() is ever used on an srcu_struct structure,
* then none of the other flavors may be used, whether before, during,
* or after. Note that grace-period auto-expediting is disabled for _fast
* srcu_struct structures because auto-expedited grace periods invoke
* synchronize_rcu_expedited(), IPIs and all.
*
* Note that srcu_read_lock_fast() can be invoked only from those contexts
* where RCU is watching, that is, from contexts where it would be legal
* to invoke rcu_read_lock(). Otherwise, lockdep will complain.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
{
struct srcu_ctr __percpu *retval;
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast().");
srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST);
retval = __srcu_read_lock_fast(ssp);
rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}
/*
* Used by tracing, cannot be traced and cannot call lockdep.
* See srcu_read_lock_fast() for more information.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp)
__acquires(ssp)
{
struct srcu_ctr __percpu *retval;
srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST);
retval = __srcu_read_lock_fast(ssp);
return retval;
}
/**
* srcu_down_read_fast - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter a semaphore-like SRCU read-side critical section, but for
* a light-weight smp_mb()-free reader. See srcu_read_lock_fast() and
* srcu_down_read() for more information.
*
* The same srcu_struct may be used concurrently by srcu_down_read_fast()
* and srcu_read_lock_fast().
*/
static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST);
return __srcu_read_lock_fast(ssp);
}
/**
* srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section, but in an NMI-safe manner.
* See srcu_read_lock() for more information.
*
* If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure,
* then none of the other flavors may be used, whether before, during,
* or after.
*/
static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
retval = __srcu_read_lock_nmisafe(ssp);
rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
return retval;
}
/**
* srcu_down_read - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter a semaphore-like SRCU read-side critical section. Note that
* SRCU read-side critical sections may be nested. However, it is
* illegal to call anything that waits on an SRCU grace period for the
* same srcu_struct, whether directly or indirectly. Please note that
* one way to indirectly wait on an SRCU grace period is to acquire
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited(). But if you want lockdep to help you
* keep this stuff straight, you should instead use srcu_read_lock().
*
* The semaphore-like nature of srcu_down_read() means that the matching
* srcu_up_read() can be invoked from some other context, for example,
* from some other task or from an irq handler. However, neither
* srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler.
*
* Calls to srcu_down_read() may be nested, similar to the manner in
* which calls to down_read() may be nested. The same srcu_struct may be
* used concurrently by srcu_down_read() and srcu_read_lock().
*/
static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
{
WARN_ON_ONCE(in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
return __srcu_read_lock(ssp);
}
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
*
* Exit an SRCU read-side critical section.
*/
static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
srcu_lock_release(&ssp->dep_map);
__srcu_read_unlock(ssp, idx);
}
/**
* srcu_read_unlock_fast - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @scp: return value from corresponding srcu_read_lock_fast().
*
* Exit a light-weight SRCU read-side critical section.
*/
static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
__releases(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
srcu_lock_release(&ssp->dep_map);
__srcu_read_unlock_fast(ssp, scp);
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast().");
}
/*
* Used by tracing, cannot be traced and cannot call lockdep.
* See srcu_read_unlock_fast() for more information.
*/
static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
struct srcu_ctr __percpu *scp) __releases(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
__srcu_read_unlock_fast(ssp, scp);
}
/**
* srcu_up_read_fast - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @scp: return value from corresponding srcu_read_lock_fast().
*
* Exit an SRCU read-side critical section, but not necessarily from
* the same context as the maching srcu_down_read_fast().
*/
static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
__releases(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
__srcu_read_unlock_fast(ssp, scp);
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_up_read_fast().");
}
/**
* srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock_nmisafe().
*
* Exit an SRCU read-side critical section, but in an NMI-safe manner.
*/
static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
rcu_lock_release(&ssp->dep_map);
__srcu_read_unlock_nmisafe(ssp, idx);
}
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
}
/**
* srcu_up_read - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
*
* Exit an SRCU read-side critical section, but not necessarily from
* the same context as the maching srcu_down_read().
*/
static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
}
/**
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
*
* Converts the preceding srcu_read_unlock into a two-way memory barrier.
*
* Call this after srcu_read_unlock, to guarantee that all memory operations
* that occur after smp_mb__after_srcu_read_unlock will appear to happen after
* the preceding srcu_read_unlock.
*/
static inline void smp_mb__after_srcu_read_unlock(void)
{
/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
}
/**
* smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock
*
* Converts the preceding srcu_read_lock into a two-way memory barrier.
*
* Call this after srcu_read_lock, to guarantee that all memory operations
* that occur after smp_mb__after_srcu_read_lock will appear to happen after
* the preceding srcu_read_lock.
*/
static inline void smp_mb__after_srcu_read_lock(void)
{
/* __srcu_read_lock has smp_mb() internally so nothing to do here. */
}
DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
_T->idx = srcu_read_lock(_T->lock),
srcu_read_unlock(_T->lock, _T->idx),
int idx)
DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
_T->scp = srcu_read_lock_fast(_T->lock),
srcu_read_unlock_fast(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
_T->scp = srcu_read_lock_fast_notrace(_T->lock),
srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* VLAN An implementation of 802.1Q VLAN tagging.
*
* Authors: Ben Greear <greearb@candelatech.com>
*/
#ifndef _LINUX_IF_VLAN_H_
#define _LINUX_IF_VLAN_H_
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/bug.h>
#include <uapi/linux/if_vlan.h>
#define VLAN_HLEN 4 /* The additional bytes required by VLAN
* (in addition to the Ethernet header)
*/
#define VLAN_ETH_HLEN 18 /* Total octets in header. */
#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
/*
* According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
*/
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
/**
* struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
* @h_dest: destination ethernet address
* @h_source: source ethernet address
* @h_vlan_proto: ethernet protocol
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
struct_group(addrs,
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
);
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
#include <linux/skbuff.h>
static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
{
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
/* Prefer this version in TX path, instead of
* skb_reset_mac_header() + vlan_eth_hdr()
*/
static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
{
return (struct vlan_ethhdr *)skb->data;
}
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
{
ASSERT_RTNL(); return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
}
static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
{
ASSERT_RTNL(); call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
}
static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
{
ASSERT_RTNL(); return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
}
static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
{
ASSERT_RTNL(); call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
}
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
* @rx_multicast: number of received multicast packets
* @tx_packets: number of transmitted packets
* @tx_bytes: number of transmitted bytes
* @syncp: synchronization point for 64bit counters
* @rx_errors: number of rx errors
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
u64_stats_t rx_packets;
u64_stats_t rx_bytes;
u64_stats_t rx_multicast;
u64_stats_t tx_packets;
u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
};
#if IS_ENABLED(CONFIG_VLAN_8021Q)
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
extern int vlan_for_each(struct net_device *dev,
int (*action)(struct net_device *dev, int vid,
void *arg), void *arg);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
/**
* struct vlan_priority_tci_mapping - vlan egress priority mappings
* @priority: skb priority
* @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
* @next: pointer to next struct
*/
struct vlan_priority_tci_mapping {
u32 priority;
u16 vlan_qos;
struct vlan_priority_tci_mapping *next;
};
struct proc_dir_entry;
struct netpoll;
/**
* struct vlan_dev_priv - VLAN private device data
* @nr_ingress_mappings: number of ingress priority mappings
* @ingress_priority_map: ingress priority mappings
* @nr_egress_mappings: number of egress priority mappings
* @egress_priority_map: hash of egress priority mappings
* @vlan_proto: VLAN encapsulation protocol
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
* @dev_tracker: refcount tracker for @real_dev reference
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
* @netpoll: netpoll instance "propagated" down to @real_dev
*/
struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
u32 ingress_priority_map[8];
unsigned int nr_egress_mappings;
struct vlan_priority_tci_mapping *egress_priority_map[16];
__be16 vlan_proto;
u16 vlan_id;
u16 flags;
struct net_device *real_dev;
netdevice_tracker dev_tracker;
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
};
static inline bool is_vlan_dev(const struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN;
}
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
}
static inline u16
vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
{
struct vlan_priority_tci_mapping *mp;
smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
while (mp) {
if (mp->priority == skprio) {
return mp->vlan_qos; /* This should already be shifted
* to mask correctly with the
* VLAN's TCI */
}
mp = mp->next;
}
return 0;
}
extern bool vlan_do_receive(struct sk_buff **skb);
extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
extern int vlan_vids_add_by_dev(struct net_device *dev,
const struct net_device *by_dev);
extern void vlan_vids_del_by_dev(struct net_device *dev,
const struct net_device *by_dev);
extern bool vlan_uses_dev(const struct net_device *dev);
#else
static inline bool is_vlan_dev(const struct net_device *dev)
{
return false;
}
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
{
return NULL;
}
static inline int
vlan_for_each(struct net_device *dev,
int (*action)(struct net_device *dev, int vid, void *arg),
void *arg)
{
return 0;
}
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
WARN_ON_ONCE(1);
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
WARN_ON_ONCE(1);
return 0;
}
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
WARN_ON_ONCE(1);
return 0;
}
static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
u32 skprio)
{
return 0;
}
static inline bool vlan_do_receive(struct sk_buff **skb)
{
return false;
}
static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
{
return 0;
}
static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
{
}
static inline int vlan_vids_add_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
return 0;
}
static inline void vlan_vids_del_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
}
static inline bool vlan_uses_dev(const struct net_device *dev)
{
return false;
}
#endif
/**
* eth_type_vlan - check for valid vlan ether type.
* @ethertype: ether type to check
*
* Returns: true if the ether type is a vlan ether type.
*/
static inline bool eth_type_vlan(__be16 ethertype)
{
switch (ethertype) {
case htons(ETH_P_8021Q):
case htons(ETH_P_8021AD):
return true;
default:
return false;
}
}
static inline bool vlan_hw_offload_capable(netdev_features_t features,
__be16 proto)
{
if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
return true;
if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
return true;
return false;
}
/**
* __vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Does not change skb->protocol so this function can be used during receive.
*
* Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci,
unsigned int mac_len)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
skb_push(skb, VLAN_HLEN);
/* Move the mac header sans proto to the beginning of the new header. */
if (likely(mac_len > ETH_TLEN))
memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
if (skb_mac_header_was_set(skb))
skb->mac_header -= VLAN_HLEN;
veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
/* first, the ethernet type */
if (likely(mac_len >= ETH_TLEN)) {
/* h_vlan_encapsulated_proto should already be populated, and
* skb->data has space for h_vlan_proto
*/
veth->h_vlan_proto = vlan_proto;
} else {
/* h_vlan_encapsulated_proto should not be populated, and
* skb->data has no space for h_vlan_proto
*/
veth->h_vlan_encapsulated_proto = skb->protocol;
}
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
return 0;
}
/**
* __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Does not change skb->protocol so this function can be used during receive.
*
* Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
}
/**
* vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
*
* Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto,
u16 vlan_tci,
unsigned int mac_len)
{
int err;
err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
}
return skb;
}
/**
* vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
*
* Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
}
/**
* vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
__be16 vlan_proto,
u16 vlan_tci)
{
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (skb)
skb->protocol = vlan_proto;
return skb;
}
/**
* __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
* @skb: skbuff to clear
*
* Clears the VLAN information from @skb
*/
static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
{
skb->vlan_all = 0;
}
/**
* __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
* @dst: skbuff to copy to
* @src: skbuff to copy from
*
* Copies VLAN information from @src to @dst (for branchless code)
*/
static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
{
dst->vlan_all = src->vlan_all;
}
/*
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
*
* Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (likely(skb))
__vlan_hwaccel_clear_tag(skb);
return skb;
}
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
skb->vlan_tci = vlan_tci;
}
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
* Returns: error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
if (!eth_type_vlan(veth->h_vlan_proto))
return -ENODATA;
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
}
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
* Returns: error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
if (skb_vlan_tag_present(skb)) {
*vlan_tci = skb_vlan_tag_get(skb);
return 0;
} else {
*vlan_tci = 0;
return -ENODATA;
}
}
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
* Returns: error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else {
return __vlan_get_tag(skb, vlan_tci);
}
}
/**
* __vlan_get_protocol_offset() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
* @mac_offset: MAC offset
* @depth: buffer to store length of eth and vlan tags in bytes
*
* Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
__be16 type,
int mac_offset,
int *depth)
{
unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
/* if type is 802.1Q/AD then the header should already be
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
* ETH_HLEN otherwise
*/
if (eth_type_vlan(type)) {
if (vlan_depth) { if (WARN_ON(vlan_depth < VLAN_HLEN))
return 0;
vlan_depth -= VLAN_HLEN;
} else {
vlan_depth = ETH_HLEN;
}
do {
struct vlan_hdr vhdr, *vh;
vh = skb_header_pointer(skb, mac_offset + vlan_depth,
sizeof(vhdr), &vhdr);
if (unlikely(!vh || !--parse_depth)) return 0;
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
} while (eth_type_vlan(type));
}
if (depth) *depth = vlan_depth;
return type;
}
static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
return __vlan_get_protocol_offset(skb, type, 0, depth);
}
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
*
* Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
{
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
__be16 type, int *depth)
{
int maclen;
type = __vlan_get_protocol(skb, type, &maclen);
if (type) { if (!pskb_may_pull(skb, maclen))
type = 0;
else if (depth) *depth = maclen;
}
return type;
}
/* A getter for the SKB protocol field which will handle VLAN tags consistently
* whether VLAN acceleration is enabled or not.
*/
static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
{
if (!skip_vlan)
/* VLAN acceleration strips the VLAN header from the skb and
* moves it to skb->vlan_proto
*/
return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
return vlan_get_protocol(skb);
}
static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
__be16 proto;
unsigned short *rawp;
/*
* Was a VLAN packet, grab the encapsulated protocol, which the layer
* three protocols care about.
*/
proto = vhdr->h_vlan_encapsulated_proto;
if (eth_proto_is_802_3(proto)) {
skb->protocol = proto;
return;
}
rawp = (unsigned short *)(vhdr + 1);
if (*rawp == 0xFFFF)
/*
* This is a magic hack to spot IPX packets. Older Novell
* breaks the protocol design and runs IPX over 802.3 without
* an 802.2 LLC layer. We look for FFFF which isn't a used
* 802.2 SSAP/DSAP. This won't work for fault tolerant netware
* but does for the rest.
*/
skb->protocol = htons(ETH_P_802_3);
else
/*
* Real 802.2 LLC
*/
skb->protocol = htons(ETH_P_802_2);
}
/**
* vlan_remove_tag - remove outer VLAN tag from payload
* @skb: skbuff to remove tag from
* @vlan_tci: buffer to store value
*
* Expects the skb to contain a VLAN tag in the payload, and to have skb->data
* pointing at the MAC header.
*
* Returns: a new pointer to skb->data, or NULL on failure to pull.
*/
static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
*vlan_tci = ntohs(vhdr->h_vlan_TCI);
memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
vlan_set_encap_proto(skb, vhdr);
return __skb_pull(skb, VLAN_HLEN);
}
/**
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
* Returns: true if the skb is tagged, regardless of whether it is hardware
* accelerated or not.
*/
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
{
if (!skb_vlan_tag_present(skb) &&
likely(!eth_type_vlan(skb->protocol))) return false;
return true;
}
/**
* skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
* @skb: skbuff to query
*
* Returns: true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
if (!skb_vlan_tag_present(skb)) {
struct vlan_ethhdr *veh;
if (likely(!eth_type_vlan(protocol)))
return false;
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
return false;
veh = skb_vlan_eth_hdr(skb);
protocol = veh->h_vlan_encapsulated_proto;
}
if (!eth_type_vlan(protocol))
return false;
return true;
}
/**
* vlan_features_check - drop unsafe features for skb with multiple tags.
* @skb: skbuff to query
* @features: features to be checked
*
* Returns: features without unsafe ones if the skb has multiple tags.
*/
static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
/* In the case of multi-tagged packets, use a direct mask
* instead of using netdev_interesect_features(), to make
* sure that only devices supporting NETIF_F_HW_CSUM will
* have checksum offloading support.
*/
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
}
return features;
}
/**
* compare_vlan_header - Compare two vlan headers
* @h1: Pointer to vlan header
* @h2: Pointer to vlan header
*
* Compare two vlan headers.
*
* Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
*
* Return: 0 if equal, arbitrary non-zero value if not equal.
*/
static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
const struct vlan_hdr *h2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return *(u32 *)h1 ^ *(u32 *)h2;
#else
return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
((__force u32)h1->h_vlan_encapsulated_proto ^
(__force u32)h2->h_vlan_encapsulated_proto);
#endif
}
#endif /* !(_LINUX_IF_VLAN_H_) */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NETFILTER_NETDEV_H_
#define _NETFILTER_NETDEV_H_
#include <linux/netfilter.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NETFILTER_INGRESS
static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
{
#ifdef CONFIG_JUMP_LABEL
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false;
#endif
return rcu_access_pointer(skb->dev->nf_hooks_ingress);
}
/* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb)
{
struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
int ret;
/* Must recheck the ingress hook head, in the event it became NULL
* after the check in nf_hook_ingress_active evaluated to true.
*/
if (unlikely(!e))
return 0;
nf_hook_state_init(&state, NF_NETDEV_INGRESS,
NFPROTO_NETDEV, skb->dev, NULL, NULL,
dev_net(skb->dev), NULL);
ret = nf_hook_slow(skb, &state, e, 0);
if (ret == 0)
return -1;
return ret;
}
#else /* CONFIG_NETFILTER_INGRESS */
static inline int nf_hook_ingress_active(struct sk_buff *skb)
{
return 0;
}
static inline int nf_hook_ingress(struct sk_buff *skb)
{
return 0;
}
#endif /* CONFIG_NETFILTER_INGRESS */
#ifdef CONFIG_NETFILTER_EGRESS
static inline bool nf_hook_egress_active(void)
{
#ifdef CONFIG_JUMP_LABEL
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
return false;
#endif
return true;
}
/**
* nf_hook_egress - classify packets before transmission
* @skb: packet to be classified
* @rc: result code which shall be returned by __dev_queue_xmit() on failure
* @dev: netdev whose egress hooks shall be applied to @skb
*
* Caller must hold rcu_read_lock.
*
* On ingress, packets are classified first by tc, then by netfilter.
* On egress, the order is reversed for symmetry. Conceptually, tc and
* netfilter can be thought of as layers, with netfilter layered above tc:
* When tc redirects a packet to another interface, netfilter is not applied
* because the packet is on the tc layer.
*
* The nf_skip_egress flag controls whether netfilter is applied on egress.
* It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
* packet passes through tc and netfilter. Because __dev_queue_xmit() may be
* called recursively by tunnel drivers such as vxlan, the flag is reverted to
* false after sch_handle_egress(). This ensures that netfilter is applied
* both on the overlay and underlying network.
*
* Returns: @skb on success or %NULL if the packet was consumed or filtered.
*/
static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
struct net_device *dev)
{
struct nf_hook_entries *e;
struct nf_hook_state state;
int ret;
#ifdef CONFIG_NETFILTER_SKIP_EGRESS
if (skb->nf_skip_egress)
return skb;
#endif
e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held()); if (!e)
return skb;
nf_hook_state_init(&state, NF_NETDEV_EGRESS,
NFPROTO_NETDEV, NULL, dev, NULL,
dev_net(dev), NULL);
/* nf assumes rcu_read_lock, not just read_lock_bh */
rcu_read_lock(); ret = nf_hook_slow(skb, &state, e, 0); rcu_read_unlock();
if (ret == 1) {
return skb;
} else if (ret < 0) {
*rc = NET_XMIT_DROP;
return NULL;
} else { /* ret == 0 */
*rc = NET_XMIT_SUCCESS;
return NULL;
}
}
#else /* CONFIG_NETFILTER_EGRESS */
static inline bool nf_hook_egress_active(void)
{
return false;
}
static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
struct net_device *dev)
{
return skb;
}
#endif /* CONFIG_NETFILTER_EGRESS */
static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
{
#ifdef CONFIG_NETFILTER_SKIP_EGRESS
skb->nf_skip_egress = skip;
#endif
}
static inline void nf_hook_netdev_init(struct net_device *dev)
{
#ifdef CONFIG_NETFILTER_INGRESS
RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
#endif
#ifdef CONFIG_NETFILTER_EGRESS
RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
#endif
}
#endif /* _NETFILTER_NETDEV_H_ */
// SPDX-License-Identifier: GPL-2.0-only
/*
* mm/mmap.c
*
* Written by obz.
*
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <linux/profile.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/mmdebug.h>
#include <linux/perf_event.h>
#include <linux/audit.h>
#include <linux/khugepaged.h>
#include <linux/uprobes.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/printk.h>
#include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h>
#include <linux/pkeys.h>
#include <linux/oom.h>
#include <linux/sched/mm.h>
#include <linux/ksm.h>
#include <linux/memfd.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#define CREATE_TRACE_POINTS
#include <trace/events/mmap.h>
#include "internal.h"
#ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags) (0)
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
#endif
static bool ignore_rlimit_data;
core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
/* Update vma->vm_page_prot to reflect vma->vm_flags. */
void vma_set_page_prot(struct vm_area_struct *vma)
{
vm_flags_t vm_flags = vma->vm_flags;
pgprot_t vm_page_prot;
vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
if (vma_wants_writenotify(vma, vm_page_prot)) {
vm_flags &= ~VM_SHARED;
vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
}
/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
}
/*
* check_brk_limits() - Use platform specific check of range & verify mlock
* limits.
* @addr: The address to check
* @len: The size of increase.
*
* Return: 0 on success.
*/
static int check_brk_limits(unsigned long addr, unsigned long len)
{
unsigned long mapped_addr;
mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
if (IS_ERR_VALUE(mapped_addr))
return mapped_addr;
return mlock_future_ok(current->mm, current->mm->def_flags, len)
? 0 : -EAGAIN;
}
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long newbrk, oldbrk, origbrk;
struct mm_struct *mm = current->mm;
struct vm_area_struct *brkvma, *next = NULL;
unsigned long min_brk;
bool populate = false;
LIST_HEAD(uf);
struct vma_iterator vmi;
if (mmap_write_lock_killable(mm))
return -EINTR;
origbrk = mm->brk;
min_brk = mm->start_brk;
#ifdef CONFIG_COMPAT_BRK
/*
* CONFIG_COMPAT_BRK can still be overridden by setting
* randomize_va_space to 2, which will still cause mm->start_brk
* to be arbitrarily shifted
*/
if (!current->brk_randomized)
min_brk = mm->end_data;
#endif
if (brk < min_brk)
goto out;
/*
* Check against rlimit here. If this check is done later after the test
* of oldbrk with newbrk then it can escape the test and let the data
* segment grow beyond its set limit the in case where the limit is
* not page aligned -Ram Gupta
*/
if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
mm->end_data, mm->start_data))
goto out;
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
if (oldbrk == newbrk) {
mm->brk = brk;
goto success;
}
/* Always allow shrinking brk. */
if (brk <= mm->brk) {
/* Search one past newbrk */
vma_iter_init(&vmi, mm, newbrk);
brkvma = vma_find(&vmi, oldbrk);
if (!brkvma || brkvma->vm_start >= oldbrk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
* do_vmi_align_munmap() will drop the lock on success, so
* update it before calling do_vma_munmap().
*/
mm->brk = brk;
if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
/* unlock = */ true))
goto out;
goto success_unlocked;
}
if (check_brk_limits(oldbrk, newbrk - oldbrk))
goto out;
/*
* Only check if the next VMA is within the stack_guard_gap of the
* expansion area
*/
vma_iter_init(&vmi, mm, oldbrk);
next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
brkvma = vma_prev_limit(&vmi, mm->start_brk);
/* Ok, looks good - let it rip. */
if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
goto out;
mm->brk = brk;
if (mm->def_flags & VM_LOCKED)
populate = true;
success:
mmap_write_unlock(mm);
success_unlocked:
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
out:
mm->brk = origbrk;
mmap_write_unlock(mm);
return origbrk;
}
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
*/
static inline unsigned long round_hint_to_min(unsigned long hint)
{
hint &= PAGE_MASK;
if (((void *)hint != NULL) &&
(hint < mmap_min_addr))
return PAGE_ALIGN(mmap_min_addr);
return hint;
}
bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags,
unsigned long bytes)
{
unsigned long locked_pages, limit_pages;
if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
return true;
locked_pages = bytes >> PAGE_SHIFT;
locked_pages += mm->locked_vm;
limit_pages = rlimit(RLIMIT_MEMLOCK);
limit_pages >>= PAGE_SHIFT;
return locked_pages <= limit_pages;
}
static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
{
if (S_ISREG(inode->i_mode))
return MAX_LFS_FILESIZE;
if (S_ISBLK(inode->i_mode))
return MAX_LFS_FILESIZE;
if (S_ISSOCK(inode->i_mode))
return MAX_LFS_FILESIZE;
/* Special "we do even unsigned file positions" case */
if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
return 0;
/* Yes, random drivers might want more. But I'm tired of buggy drivers */
return ULONG_MAX;
}
static inline bool file_mmap_ok(struct file *file, struct inode *inode,
unsigned long pgoff, unsigned long len)
{
u64 maxsize = file_mmap_size_max(file, inode);
if (maxsize && len > maxsize)
return false;
maxsize -= len;
if (pgoff > maxsize >> PAGE_SHIFT)
return false;
return true;
}
/**
* do_mmap() - Perform a userland memory mapping into the current process
* address space of length @len with protection bits @prot, mmap flags @flags
* (from which VMA flags will be inferred), and any additional VMA flags to
* apply @vm_flags. If this is a file-backed mapping then the file is specified
* in @file and page offset into the file via @pgoff.
*
* This function does not perform security checks on the file and assumes, if
* @uf is non-NULL, the caller has provided a list head to track unmap events
* for userfaultfd @uf.
*
* It also simply indicates whether memory population is required by setting
* @populate, which must be non-NULL, expecting the caller to actually perform
* this task itself if appropriate.
*
* This function will invoke architecture-specific (and if provided and
* relevant, file system-specific) logic to determine the most appropriate
* unmapped area in which to place the mapping if not MAP_FIXED.
*
* Callers which require userland mmap() behaviour should invoke vm_mmap(),
* which is also exported for module use.
*
* Those which require this behaviour less security checks, userfaultfd and
* populate behaviour, and who handle the mmap write lock themselves, should
* call this function.
*
* Note that the returned address may reside within a merged VMA if an
* appropriate merge were to take place, so it doesn't necessarily specify the
* start of a VMA, rather only the start of a valid mapped range of length
* @len bytes, rounded down to the nearest page size.
*
* The caller must write-lock current->mm->mmap_lock.
*
* @file: An optional struct file pointer describing the file which is to be
* mapped, if a file-backed mapping.
* @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the
* address at which to perform this mapping. See mmap (2) for details. Must be
* page-aligned.
* @len: The length of the mapping. Will be page-aligned and must be at least 1
* page in size.
* @prot: Protection bits describing access required to the mapping. See mmap
* (2) for details.
* @flags: Flags specifying how the mapping should be performed, see mmap (2)
* for details.
* @vm_flags: VMA flags which should be set by default, or 0 otherwise.
* @pgoff: Page offset into the @file if file-backed, should be 0 otherwise.
* @populate: A pointer to a value which will be set to 0 if no population of
* the range is required, or the number of bytes to populate if it is. Must be
* non-NULL. See mmap (2) for details as to under what circumstances population
* of the range occurs.
* @uf: An optional pointer to a list head to track userfaultfd unmap events
* should unmapping events arise. If provided, it is up to the caller to manage
* this.
*
* Returns: Either an error, or the address at which the requested mapping has
* been performed.
*/
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, vm_flags_t vm_flags,
unsigned long pgoff, unsigned long *populate,
struct list_head *uf)
{
struct mm_struct *mm = current->mm;
int pkey = 0;
*populate = 0;
mmap_assert_write_locked(mm);
if (!len)
return -EINVAL;
/*
* Does the application expect PROT_READ to imply PROT_EXEC?
*
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we don't add PROT_EXEC.)
*/
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
if (!(file && path_noexec(&file->f_path)))
prot |= PROT_EXEC;
/* force arch specific MAP_FIXED handling in get_unmapped_area */
if (flags & MAP_FIXED_NOREPLACE)
flags |= MAP_FIXED;
if (!(flags & MAP_FIXED))
addr = round_hint_to_min(addr);
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
if (!len)
return -ENOMEM;
/* offset overflow? */
if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
return -EOVERFLOW;
/* Too many mappings? */
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
/*
* addr is returned from get_unmapped_area,
* There are two cases:
* 1> MAP_FIXED == false
* unallocated memory, no need to check sealing.
* 1> MAP_FIXED == true
* sealing is checked inside mmap_region when
* do_vmi_munmap is called.
*/
if (prot == PROT_EXEC) {
pkey = execute_only_pkey(mm);
if (pkey < 0)
pkey = 0;
}
/* Do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open
* of the memory object, so we don't do any here.
*/
vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
if (IS_ERR_VALUE(addr))
return addr;
if (flags & MAP_FIXED_NOREPLACE) {
if (find_vma_intersection(mm, addr, addr + len))
return -EEXIST;
}
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
if (!mlock_future_ok(mm, vm_flags, len))
return -EAGAIN;
if (file) {
struct inode *inode = file_inode(file);
unsigned long flags_mask;
int err;
if (!file_mmap_ok(file, inode, pgoff, len))
return -EOVERFLOW;
flags_mask = LEGACY_MAP_MASK;
if (file->f_op->fop_flags & FOP_MMAP_SYNC)
flags_mask |= MAP_SYNC;
switch (flags & MAP_TYPE) {
case MAP_SHARED:
/*
* Force use of MAP_SHARED_VALIDATE with non-legacy
* flags. E.g. MAP_SYNC is dangerous to use with
* MAP_SHARED as you don't know which consistency model
* you will get. We silently ignore unsupported flags
* with MAP_SHARED to preserve backward compatibility.
*/
flags &= LEGACY_MAP_MASK;
fallthrough;
case MAP_SHARED_VALIDATE:
if (flags & ~flags_mask)
return -EOPNOTSUPP;
if (prot & PROT_WRITE) {
if (!(file->f_mode & FMODE_WRITE))
return -EACCES;
if (IS_SWAPFILE(file->f_mapping->host))
return -ETXTBSY;
}
/*
* Make sure we don't allow writing to an append-only
* file..
*/
if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
return -EACCES;
vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
fallthrough;
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
return -EACCES;
if (path_noexec(&file->f_path)) {
if (vm_flags & VM_EXEC)
return -EPERM;
vm_flags &= ~VM_MAYEXEC;
}
if (!can_mmap_file(file))
return -ENODEV;
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
return -EINVAL;
break;
default:
return -EINVAL;
}
/*
* Check to see if we are violating any seals and update VMA
* flags if necessary to avoid future seal violations.
*/
err = memfd_check_seals_mmap(file, &vm_flags);
if (err)
return (unsigned long)err;
} else {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
return -EINVAL;
/*
* Ignore pgoff.
*/
pgoff = 0;
vm_flags |= VM_SHARED | VM_MAYSHARE;
break;
case MAP_DROPPABLE:
if (VM_DROPPABLE == VM_NONE)
return -ENOTSUPP;
/*
* A locked or stack area makes no sense to be droppable.
*
* Also, since droppable pages can just go away at any time
* it makes no sense to copy them on fork or dump them.
*
* And don't attempt to combine with hugetlb for now.
*/
if (flags & (MAP_LOCKED | MAP_HUGETLB))
return -EINVAL;
if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
return -EINVAL;
vm_flags |= VM_DROPPABLE;
/*
* If the pages can be dropped, then it doesn't make
* sense to reserve them.
*/
vm_flags |= VM_NORESERVE;
/*
* Likewise, they're volatile enough that they
* shouldn't survive forks or coredumps.
*/
vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
fallthrough;
case MAP_PRIVATE:
/*
* Set pgoff according to addr for anon_vma.
*/
pgoff = addr >> PAGE_SHIFT;
break;
default:
return -EINVAL;
}
}
/*
* Set 'VM_NORESERVE' if we should not account for the
* memory use of this mapping.
*/
if (flags & MAP_NORESERVE) {
/* We honor MAP_NORESERVE if allowed to overcommit */
if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
vm_flags |= VM_NORESERVE;
/* hugetlb applies strict overcommit unless MAP_NORESERVE */
if (file && is_file_hugepages(file))
vm_flags |= VM_NORESERVE;
}
addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
(flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
*populate = len;
return addr;
}
unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
struct file *file = NULL;
unsigned long retval;
if (!(flags & MAP_ANONYMOUS)) {
audit_mmap_fd(fd, flags);
file = fget(fd);
if (!file)
return -EBADF;
if (is_file_hugepages(file)) {
len = ALIGN(len, huge_page_size(hstate_file(file)));
} else if (unlikely(flags & MAP_HUGETLB)) {
retval = -EINVAL;
goto out_fput;
}
} else if (flags & MAP_HUGETLB) {
struct hstate *hs;
hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (!hs)
return -EINVAL;
len = ALIGN(len, huge_page_size(hs));
/*
* VM_NORESERVE is used because the reservations will be
* taken when vm_ops->mmap() is called
*/
file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
VM_NORESERVE,
HUGETLB_ANONHUGE_INODE,
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (IS_ERR(file))
return PTR_ERR(file);
}
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
out_fput:
if (file)
fput(file);
return retval;
}
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, pgoff)
{
return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
struct mmap_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (offset_in_page(a.offset))
return -EINVAL;
return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
/*
* Determine if the allocation needs to ensure that there is no
* existing mapping within it's guard gaps, for use as start_gap.
*/
static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
{
if (vm_flags & VM_SHADOW_STACK)
return PAGE_SIZE;
return 0;
}
/*
* Search for an unmapped address range.
*
* We are looking for a range that:
* - does not intersect with any VMA;
* - is contained within the [low_limit, high_limit) interval;
* - is at least the desired size.
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
*/
unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
{
unsigned long addr;
if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
addr = unmapped_area_topdown(info);
else
addr = unmapped_area(info);
trace_vm_unmapped_area(addr, info);
return addr;
}
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
* Ugly calling convention alert:
* Return value with the low bits set means error value,
* ie
* if (ret & ~PAGE_MASK)
* error = ret;
*
* This function "knows" that -ENOMEM has the bits set.
*/
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info = {};
const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (mmap_end - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
}
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_end;
info.start_gap = stack_guard_placement(vm_flags);
if (filp && is_file_hugepages(filp))
info.align_mask = huge_page_mask_align(filp);
return vm_unmapped_area(&info);
}
#ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
vm_flags);
}
#endif
/*
* This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base):
*/
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info = {};
const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
/* requested length too big for entire address space */
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (mmap_end - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
info.start_gap = stack_guard_placement(vm_flags);
if (filp && is_file_hugepages(filp))
info.align_mask = huge_page_mask_align(filp);
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (offset_in_page(addr)) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = mmap_end;
addr = vm_unmapped_area(&info);
}
return addr;
}
#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
vm_flags);
}
#endif
unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags)
{
if (mm_flags_test(MMF_TOPDOWN, mm))
return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
flags, vm_flags);
return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
}
unsigned long
__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long)
= NULL;
unsigned long error = arch_mmap_check(addr, len, flags);
if (error)
return error;
/* Careful about overflows.. */
if (len > TASK_SIZE)
return -ENOMEM;
if (file) {
if (file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
} else if (flags & MAP_SHARED) {
/*
* mmap_region() will call shmem_zero_setup() to create a file,
* so use shmem's get_unmapped_area in case it can be huge.
*/
get_area = shmem_get_unmapped_area;
}
/* Always treat pgoff as zero for anonymous memory. */
if (!file)
pgoff = 0;
if (get_area) {
addr = get_area(file, addr, len, pgoff, flags);
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file
&& !addr /* no hint */
&& IS_ALIGNED(len, PMD_SIZE)) {
/* Ensures that larger anonymous mappings are THP aligned. */
addr = thp_get_unmapped_area_vmflags(file, addr, len,
pgoff, flags, vm_flags);
} else {
addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
pgoff, flags, vm_flags);
}
if (IS_ERR_VALUE(addr))
return addr;
if (addr > TASK_SIZE - len)
return -ENOMEM;
if (offset_in_page(addr))
return -EINVAL;
error = security_mmap_addr(addr);
return error ? error : addr;
}
unsigned long
mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
return mm_get_unmapped_area_vmflags(mm, file, addr, len,
pgoff, flags, 0);
}
EXPORT_SYMBOL(mm_get_unmapped_area);
/**
* find_vma_intersection() - Look up the first VMA which intersects the interval
* @mm: The process address space.
* @start_addr: The inclusive start user address.
* @end_addr: The exclusive end user address.
*
* Returns: The first VMA within the provided range, %NULL otherwise. Assumes
* start_addr < end_addr.
*/
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
unsigned long end_addr)
{
unsigned long index = start_addr;
mmap_assert_locked(mm);
return mt_find(&mm->mm_mt, &index, end_addr - 1);
}
EXPORT_SYMBOL(find_vma_intersection);
/**
* find_vma() - Find the VMA for a given address, or the next VMA.
* @mm: The mm_struct to check
* @addr: The address
*
* Returns: The VMA associated with addr, or the next VMA.
* May return %NULL in the case of no VMA at addr or above.
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
unsigned long index = addr;
mmap_assert_locked(mm);
return mt_find(&mm->mm_mt, &index, ULONG_MAX);
}
EXPORT_SYMBOL(find_vma);
/**
* find_vma_prev() - Find the VMA for a given address, or the next vma and
* set %pprev to the previous VMA, if any.
* @mm: The mm_struct to check
* @addr: The address
* @pprev: The pointer to set to the previous VMA
*
* Note that RCU lock is missing here since the external mmap_lock() is used
* instead.
*
* Returns: The VMA associated with @addr, or the next vma.
* May return %NULL in the case of no vma at addr or above.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, addr);
vma = vma_iter_load(&vmi);
*pprev = vma_prev(&vmi);
if (!vma)
vma = vma_next(&vmi);
return vma;
}
/* enforced gap between the expanding stack and other mappings. */
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
static int __init cmdline_parse_stack_guard_gap(char *p)
{
unsigned long val;
char *endptr;
val = simple_strtoul(p, &endptr, 10);
if (!*endptr)
stack_guard_gap = val << PAGE_SHIFT;
return 1;
}
__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
#ifdef CONFIG_STACK_GROWSUP
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
{
return expand_upwards(vma, address);
}
struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma, *prev;
addr &= PAGE_MASK;
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
if (!prev)
return NULL;
if (expand_stack_locked(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
return prev;
}
#else
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
{
return expand_downwards(vma, address);
}
struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
unsigned long start;
addr &= PAGE_MASK;
vma = find_vma(mm, addr);
if (!vma)
return NULL;
if (vma->vm_start <= addr)
return vma;
start = vma->vm_start;
if (expand_stack_locked(vma, addr))
return NULL;
if (vma->vm_flags & VM_LOCKED)
populate_vma_page_range(vma, addr, start, NULL);
return vma;
}
#endif
#if defined(CONFIG_STACK_GROWSUP)
#define vma_expand_up(vma,addr) expand_upwards(vma, addr)
#define vma_expand_down(vma, addr) (-EFAULT)
#else
#define vma_expand_up(vma,addr) (-EFAULT)
#define vma_expand_down(vma, addr) expand_downwards(vma, addr)
#endif
/*
* expand_stack(): legacy interface for page faulting. Don't use unless
* you have to.
*
* This is called with the mm locked for reading, drops the lock, takes
* the lock for writing, tries to look up a vma again, expands it if
* necessary, and downgrades the lock to reading again.
*
* If no vma is found or it can't be expanded, it returns NULL and has
* dropped the lock.
*/
struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma, *prev;
mmap_read_unlock(mm);
if (mmap_write_lock_killable(mm))
return NULL;
vma = find_vma_prev(mm, addr, &prev);
if (vma && vma->vm_start <= addr)
goto success;
if (prev && !vma_expand_up(prev, addr)) {
vma = prev;
goto success;
}
if (vma && !vma_expand_down(vma, addr))
goto success;
mmap_write_unlock(mm);
return NULL;
success:
mmap_write_downgrade(mm);
return vma;
}
/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
* @mm: The mm_struct
* @start: The start address to munmap
* @len: The length to be munmapped.
* @uf: The userfaultfd list_head
*
* Return: 0 on success, error otherwise.
*/
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{
VMA_ITERATOR(vmi, mm, start);
return do_vmi_munmap(&vmi, mm, start, len, uf, false);
}
int vm_munmap(unsigned long start, size_t len)
{
return __vm_munmap(start, len, false);
}
EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
addr = untagged_addr(addr);
return __vm_munmap(addr, len, true);
}
/*
* Emulation of deprecated remap_file_pages() syscall.
*/
SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long populate = 0;
unsigned long ret = -EINVAL;
struct file *file;
vm_flags_t vm_flags;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
current->comm, current->pid);
if (prot)
return ret;
start = start & PAGE_MASK;
size = size & PAGE_MASK;
if (start + size <= start)
return ret;
/* Does pgoff wrap? */
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
return ret;
if (mmap_read_lock_killable(mm))
return -EINTR;
/*
* Look up VMA under read lock first so we can perform the security
* without holding locks (which can be problematic). We reacquire a
* write lock later and check nothing changed underneath us.
*/
vma = vma_lookup(mm, start);
if (!vma || !(vma->vm_flags & VM_SHARED)) {
mmap_read_unlock(mm);
return -EINVAL;
}
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
flags &= MAP_NONBLOCK;
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
if (vma->vm_flags & VM_LOCKED)
flags |= MAP_LOCKED;
/* Save vm_flags used to calculate prot and flags, and recheck later. */
vm_flags = vma->vm_flags;
file = get_file(vma->vm_file);
mmap_read_unlock(mm);
/* Call outside mmap_lock to be consistent with other callers. */
ret = security_mmap_file(file, prot, flags);
if (ret) {
fput(file);
return ret;
}
ret = -EINVAL;
/* OK security check passed, take write lock + let it rip. */
if (mmap_write_lock_killable(mm)) {
fput(file);
return -EINTR;
}
vma = vma_lookup(mm, start);
if (!vma)
goto out;
/* Make sure things didn't change under us. */
if (vma->vm_flags != vm_flags)
goto out;
if (vma->vm_file != file)
goto out;
if (start + size > vma->vm_end) {
VMA_ITERATOR(vmi, mm, vma->vm_end);
struct vm_area_struct *next, *prev = vma;
for_each_vma_range(vmi, next, start + size) {
/* hole between vmas ? */
if (next->vm_start != prev->vm_end)
goto out;
if (next->vm_file != vma->vm_file)
goto out;
if (next->vm_flags != vma->vm_flags)
goto out;
if (start + size <= next->vm_end)
break;
prev = next;
}
if (!next)
goto out;
}
ret = do_mmap(vma->vm_file, start, size,
prot, flags, 0, pgoff, &populate, NULL);
out:
mmap_write_unlock(mm);
fput(file);
if (populate)
mm_populate(ret, populate);
if (!IS_ERR_VALUE(ret))
ret = 0;
return ret;
}
int vm_brk_flags(unsigned long addr, unsigned long request, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
unsigned long len;
int ret;
bool populate;
LIST_HEAD(uf);
VMA_ITERATOR(vmi, mm, addr);
len = PAGE_ALIGN(request);
if (len < request)
return -ENOMEM;
if (!len)
return 0;
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((vm_flags & (~VM_EXEC)) != 0)
return -EINVAL;
if (mmap_write_lock_killable(mm))
return -EINTR;
ret = check_brk_limits(addr, len);
if (ret)
goto limits_failed;
ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
if (ret)
goto munmap_failed;
vma = vma_prev(&vmi);
ret = do_brk_flags(&vmi, vma, addr, len, vm_flags);
populate = ((mm->def_flags & VM_LOCKED) != 0);
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate && !ret)
mm_populate(addr, len);
return ret;
munmap_failed:
limits_failed:
mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL(vm_brk_flags);
/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
VMA_ITERATOR(vmi, mm, 0);
int count = 0;
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
mmap_read_lock(mm);
arch_exit_mmap(mm);
vma = vma_next(&vmi);
if (!vma || unlikely(xa_is_zero(vma))) {
/* Can happen if dup_mmap() received an OOM */
mmap_read_unlock(mm);
mmap_write_lock(mm);
goto destroy;
}
flush_cache_mm(mm);
tlb_gather_mmu_fullmm(&tlb, mm);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
mmap_read_unlock(mm);
/*
* Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
* because the memory has been already freed.
*/
mm_flags_set(MMF_OOM_SKIP, mm);
mmap_write_lock(mm);
mt_clear_in_rcu(&mm->mm_mt);
vma_iter_set(&vmi, vma->vm_end);
free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
USER_PGTABLES_CEILING, true);
tlb_finish_mmu(&tlb);
/*
* Walk the list again, actually closing and freeing it, with preemption
* enabled, without holding any MM locks besides the unreachable
* mmap_write_lock.
*/
vma_iter_set(&vmi, vma->vm_end);
do {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
vma_mark_detached(vma);
remove_vma(vma);
count++;
cond_resched();
vma = vma_next(&vmi);
} while (vma && likely(!xa_is_zero(vma)));
BUG_ON(count != mm->map_count);
trace_exit_mmap(mm);
destroy:
__mt_destroy(&mm->mm_mt);
mmap_write_unlock(mm);
vm_unacct_memory(nr_accounted);
}
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
*/
bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
{
if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
return false;
if (is_data_mapping(flags) &&
mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
/* Workaround for Valgrind */
if (rlimit(RLIMIT_DATA) == 0 &&
mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
return true;
pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
current->comm, current->pid,
(mm->data_vm + npages) << PAGE_SHIFT,
rlimit(RLIMIT_DATA),
ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
if (!ignore_rlimit_data)
return false;
}
return true;
}
void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
{
WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
if (is_exec_mapping(flags))
mm->exec_vm += npages;
else if (is_stack_mapping(flags))
mm->stack_vm += npages;
else if (is_data_mapping(flags))
mm->data_vm += npages;
}
static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
/*
* Close hook, called for unmap() and on the old vma for mremap().
*
* Having a close hook prevents vma merging regardless of flags.
*/
static void special_mapping_close(struct vm_area_struct *vma)
{
const struct vm_special_mapping *sm = vma->vm_private_data;
if (sm->close)
sm->close(sm, vma);
}
static const char *special_mapping_name(struct vm_area_struct *vma)
{
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
}
static int special_mapping_mremap(struct vm_area_struct *new_vma)
{
struct vm_special_mapping *sm = new_vma->vm_private_data;
if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
return -EFAULT;
if (sm->mremap)
return sm->mremap(sm, new_vma);
return 0;
}
static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
{
/*
* Forbid splitting special mappings - kernel has expectations over
* the number of pages in mapping. Together with VM_DONTEXPAND
* the size of vma should stay the same over the special mapping's
* lifetime.
*/
return -EINVAL;
}
static const struct vm_operations_struct special_mapping_vmops = {
.close = special_mapping_close,
.fault = special_mapping_fault,
.mremap = special_mapping_mremap,
.name = special_mapping_name,
/* vDSO code relies that VVAR can't be accessed remotely */
.access = NULL,
.may_split = special_mapping_split,
};
static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff;
struct page **pages;
struct vm_special_mapping *sm = vma->vm_private_data;
if (sm->fault)
return sm->fault(sm, vmf->vma, vmf);
pages = sm->pages;
for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
pgoff--;
if (*pages) {
struct page *page = *pages;
get_page(page);
vmf->page = page;
return 0;
}
return VM_FAULT_SIGBUS;
}
static struct vm_area_struct *__install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
vm_flags_t vm_flags, void *priv,
const struct vm_operations_struct *ops)
{
int ret;
struct vm_area_struct *vma;
vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
vma_set_range(vma, addr, addr + len, 0);
vm_flags_init(vma, (vm_flags | mm->def_flags |
VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = ops;
vma->vm_private_data = priv;
ret = insert_vm_struct(mm, vma);
if (ret)
goto out;
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
perf_event_mmap(vma);
return vma;
out:
vm_area_free(vma);
return ERR_PTR(ret);
}
bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm)
{
return vma->vm_private_data == sm &&
vma->vm_ops == &special_mapping_vmops;
}
/*
* Called with mm->mmap_lock held for writing.
* Insert a new vma covering the given region, with the given flags.
* Its pages are supplied by the given array of struct page *.
* The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
* The region past the last page supplied will always produce SIGBUS.
* The array pointer and the pages it points to are assumed to stay alive
* for as long as this mapping might exist.
*/
struct vm_area_struct *_install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
vm_flags_t vm_flags, const struct vm_special_mapping *spec)
{
return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
&special_mapping_vmops);
}
#ifdef CONFIG_SYSCTL
#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
int sysctl_legacy_va_layout;
#endif
static const struct ctl_table mmap_table[] = {
{
.procname = "max_map_count",
.data = &sysctl_max_map_count,
.maxlen = sizeof(sysctl_max_map_count),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
{
.procname = "legacy_va_layout",
.data = &sysctl_legacy_va_layout,
.maxlen = sizeof(sysctl_legacy_va_layout),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
{
.procname = "mmap_rnd_bits",
.data = &mmap_rnd_bits,
.maxlen = sizeof(mmap_rnd_bits),
.mode = 0600,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)&mmap_rnd_bits_min,
.extra2 = (void *)&mmap_rnd_bits_max,
},
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
{
.procname = "mmap_rnd_compat_bits",
.data = &mmap_rnd_compat_bits,
.maxlen = sizeof(mmap_rnd_compat_bits),
.mode = 0600,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)&mmap_rnd_compat_bits_min,
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
#endif
};
#endif /* CONFIG_SYSCTL */
/*
* initialise the percpu counter for VM, initialise VMA state.
*/
void __init mmap_init(void)
{
int ret;
ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
VM_BUG_ON(ret);
#ifdef CONFIG_SYSCTL
register_sysctl_init("vm", mmap_table);
#endif
vma_state_init();
}
/*
* Initialise sysctl_user_reserve_kbytes.
*
* This is intended to prevent a user from starting a single memory hogging
* process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
* mode.
*
* The default value is min(3% of free memory, 128MB)
* 128MB is enough to recover with sshd/login, bash, and top/kill.
*/
static int init_user_reserve(void)
{
unsigned long free_kbytes;
free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
return 0;
}
subsys_initcall(init_user_reserve);
/*
* Initialise sysctl_admin_reserve_kbytes.
*
* The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
* to log in and kill a memory hogging process.
*
* Systems with more than 256MB will reserve 8MB, enough to recover
* with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
* only reserve 3% of free pages by default.
*/
static int init_admin_reserve(void)
{
unsigned long free_kbytes;
free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
return 0;
}
subsys_initcall(init_admin_reserve);
/*
* Reinititalise user and admin reserves if memory is added or removed.
*
* The default user reserve max is 128MB, and the default max for the
* admin reserve is 8MB. These are usually, but not always, enough to
* enable recovery from a memory hogging process using login/sshd, a shell,
* and tools like top. It may make sense to increase or even disable the
* reserve depending on the existence of swap or variations in the recovery
* tools. So, the admin may have changed them.
*
* If memory is added and the reserves have been eliminated or increased above
* the default max, then we'll trust the admin.
*
* If memory is removed and there isn't enough free memory, then we
* need to reset the reserves.
*
* Otherwise keep the reserve set by the admin.
*/
static int reserve_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long tmp, free_kbytes;
switch (action) {
case MEM_ONLINE:
/* Default max is 128MB. Leave alone if modified by operator. */
tmp = sysctl_user_reserve_kbytes;
if (tmp > 0 && tmp < SZ_128K)
init_user_reserve();
/* Default max is 8MB. Leave alone if modified by operator. */
tmp = sysctl_admin_reserve_kbytes;
if (tmp > 0 && tmp < SZ_8K)
init_admin_reserve();
break;
case MEM_OFFLINE:
free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
if (sysctl_user_reserve_kbytes > free_kbytes) {
init_user_reserve();
pr_info("vm.user_reserve_kbytes reset to %lu\n",
sysctl_user_reserve_kbytes);
}
if (sysctl_admin_reserve_kbytes > free_kbytes) {
init_admin_reserve();
pr_info("vm.admin_reserve_kbytes reset to %lu\n",
sysctl_admin_reserve_kbytes);
}
break;
default:
break;
}
return NOTIFY_OK;
}
static int __meminit init_reserve_notifier(void)
{
if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
pr_err("Failed registering memory add/remove notifier for admin reserve\n");
return 0;
}
subsys_initcall(init_reserve_notifier);
/*
* Obtain a read lock on mm->mmap_lock, if the specified address is below the
* start of the VMA, the intent is to perform a write, and it is a
* downward-growing stack, then attempt to expand the stack to contain it.
*
* This function is intended only for obtaining an argument page from an ELF
* image, and is almost certainly NOT what you want to use for any other
* purpose.
*
* IMPORTANT - VMA fields are accessed without an mmap lock being held, so the
* VMA referenced must not be linked in any user-visible tree, i.e. it must be a
* new VMA being mapped.
*
* The function assumes that addr is either contained within the VMA or below
* it, and makes no attempt to validate this value beyond that.
*
* Returns true if the read lock was obtained and a stack was perhaps expanded,
* false if the stack expansion failed.
*
* On stack expansion the function temporarily acquires an mmap write lock
* before downgrading it.
*/
bool mmap_read_lock_maybe_expand(struct mm_struct *mm,
struct vm_area_struct *new_vma,
unsigned long addr, bool write)
{
if (!write || addr >= new_vma->vm_start) {
mmap_read_lock(mm);
return true;
}
if (!(new_vma->vm_flags & VM_GROWSDOWN))
return false;
mmap_write_lock(mm);
if (expand_downwards(new_vma, addr)) {
mmap_write_unlock(mm);
return false;
}
mmap_write_downgrade(mm);
return true;
}
__latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp;
int retval;
unsigned long charge = 0;
LIST_HEAD(uf);
VMA_ITERATOR(vmi, mm, 0);
if (mmap_write_lock_killable(oldmm)) return -EINTR;
flush_cache_dup_mm(oldmm);
uprobe_dup_mmap(oldmm, mm);
/*
* Not linked in yet - no deadlock potential:
*/
mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
/* No ordering required: file already has been exposed. */
dup_mm_exe_file(mm, oldmm);
mm->total_vm = oldmm->total_vm;
mm->data_vm = oldmm->data_vm;
mm->exec_vm = oldmm->exec_vm;
mm->stack_vm = oldmm->stack_vm;
/* Use __mt_dup() to efficiently build an identical maple tree. */
retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
if (unlikely(retval)) goto out; mt_clear_in_rcu(vmi.mas.tree); for_each_vma(vmi, mpnt) {
struct file *file;
vma_start_write(mpnt); if (mpnt->vm_flags & VM_DONTCOPY) { retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start,
mpnt->vm_end, GFP_KERNEL);
if (retval)
goto loop_out;
vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
continue;
}
charge = 0;
/*
* Don't duplicate many vmas if we've been oom-killed (for
* example)
*/
if (fatal_signal_pending(current)) {
retval = -EINTR;
goto loop_out;
}
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned long len = vma_pages(mpnt);
if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
goto fail_nomem;
charge = len;
}
tmp = vm_area_dup(mpnt); if (!tmp)
goto fail_nomem;
retval = vma_dup_policy(mpnt, tmp);
if (retval)
goto fail_nomem_policy;
tmp->vm_mm = mm;
retval = dup_userfaultfd(tmp, &uf);
if (retval)
goto fail_nomem_anon_vma_fork;
if (tmp->vm_flags & VM_WIPEONFORK) {
/*
* VM_WIPEONFORK gets a clean slate in the child.
* Don't prepare anon_vma until fault since we don't
* copy page for current vma.
*/
tmp->anon_vma = NULL;
} else if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
vm_flags_clear(tmp, VM_LOCKED_MASK);
/*
* Copy/update hugetlb private vma information.
*/
if (is_vm_hugetlb_page(tmp))
hugetlb_dup_vma_private(tmp);
/*
* Link the vma into the MT. After using __mt_dup(), memory
* allocation is not necessary here, so it cannot fail.
*/
vma_iter_bulk_store(&vmi, tmp); mm->map_count++; if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
file = tmp->vm_file; if (file) {
struct address_space *mapping = file->f_mapping;
get_file(file);
i_mmap_lock_write(mapping);
if (vma_is_shared_maywrite(tmp))
mapping_allow_writable(mapping);
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
vma_interval_tree_insert_after(tmp, mpnt,
&mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
i_mmap_unlock_write(mapping);
}
if (!(tmp->vm_flags & VM_WIPEONFORK))
retval = copy_page_range(tmp, mpnt);
if (retval) {
mpnt = vma_next(&vmi);
goto loop_out;
}
}
/* a new mm has just been created */
retval = arch_dup_mmap(oldmm, mm);
loop_out:
vma_iter_free(&vmi);
if (!retval) { mt_set_in_rcu(vmi.mas.tree); ksm_fork(mm, oldmm); khugepaged_fork(mm, oldmm);
} else {
/*
* The entire maple tree has already been duplicated. If the
* mmap duplication fails, mark the failure point with
* XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
* stop releasing VMAs that have not been duplicated after this
* point.
*/
if (mpnt) {
mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
mas_store(&vmi.mas, XA_ZERO_ENTRY);
/* Avoid OOM iterating a broken tree */
mm_flags_set(MMF_OOM_SKIP, mm);
}
/*
* The mm_struct is going to exit, but the locks will be dropped
* first. Set the mm_struct as unstable is advisable as it is
* not fully initialised.
*/
mm_flags_set(MMF_UNSTABLE, mm);
}
out:
mmap_write_unlock(mm);
flush_tlb_mm(oldmm);
mmap_write_unlock(oldmm);
if (!retval)
dup_userfaultfd_complete(&uf);
else
dup_userfaultfd_fail(&uf);
return retval;
fail_nomem_anon_vma_fork:
mpol_put(vma_policy(tmp));
fail_nomem_policy:
vm_area_free(tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto loop_out;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic pidhash and scalable, time-bounded PID allocator
*
* (C) 2002-2003 Nadia Yvette Chambers, IBM
* (C) 2004 Nadia Yvette Chambers, Oracle
* (C) 2002-2004 Ingo Molnar, Red Hat
*
* pid-structures are backing objects for tasks sharing a given ID to chain
* against. There is very little to them aside from hashing them and
* parking tasks using given ID's on a list.
*
* The hash is always changed with the tasklist_lock write-acquired,
* and the hash is only accessed with the tasklist_lock at least
* read-acquired, so there's no additional SMP locking needed here.
*
* We have a list of bitmap pages, which bitmaps represent the PID space.
* Allocating and freeing PIDs is completely lockless. The worst-case
* allocation scenario when all but one out of 1 million PIDs possible are
* allocated already: the scanning of 32 list entries and at most PAGE_SIZE
* bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
*
* Pid namespaces:
* (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
* (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
* Many thanks to Oleg Nesterov for comments and help
*
*/
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/memblock.h>
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
#include <linux/syscalls.h>
#include <linux/proc_ns.h>
#include <linux/refcount.h>
#include <linux/anon_inodes.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/idr.h>
#include <linux/pidfs.h>
#include <linux/seqlock.h>
#include <net/sock.h>
#include <uapi/linux/pidfd.h>
struct pid init_struct_pid = {
.count = REFCOUNT_INIT(1),
.tasks = {
{ .first = NULL },
{ .first = NULL },
{ .first = NULL },
},
.level = 0,
.numbers = { {
.nr = 0,
.ns = &init_pid_ns,
}, }
};
static int pid_max_min = RESERVED_PIDS + 1;
static int pid_max_max = PID_MAX_LIMIT;
/*
* PID-map pages start out as NULL, they get allocated upon
* first use and are never deallocated. This way a low pid_max
* value does not cause lots of bitmaps to be allocated, but
* the scheme scales to up to 4 million PIDs, runtime.
*/
struct pid_namespace init_pid_ns = {
.ns.__ns_ref = REFCOUNT_INIT(2),
.idr = IDR_INIT(init_pid_ns.idr),
.pid_allocated = PIDNS_ADDING,
.level = 0,
.child_reaper = &init_task,
.user_ns = &init_user_ns,
.ns.inum = ns_init_inum(&init_pid_ns),
#ifdef CONFIG_PID_NS
.ns.ops = &pidns_operations,
#endif
.pid_max = PID_MAX_DEFAULT,
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
.memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC,
#endif
.ns.ns_type = ns_common_type(&init_pid_ns),
};
EXPORT_SYMBOL_GPL(init_pid_ns);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
seqcount_spinlock_t pidmap_lock_seq = SEQCNT_SPINLOCK_ZERO(pidmap_lock_seq, &pidmap_lock);
void put_pid(struct pid *pid)
{
struct pid_namespace *ns;
if (!pid)
return;
ns = pid->numbers[pid->level].ns;
if (refcount_dec_and_test(&pid->count)) {
pidfs_free_pid(pid);
kmem_cache_free(ns->pid_cachep, pid);
put_pid_ns(ns);
}
}
EXPORT_SYMBOL_GPL(put_pid);
static void delayed_put_pid(struct rcu_head *rhp)
{
struct pid *pid = container_of(rhp, struct pid, rcu);
put_pid(pid);
}
void free_pid(struct pid *pid)
{
int i;
lockdep_assert_not_held(&tasklist_lock);
spin_lock(&pidmap_lock);
for (i = 0; i <= pid->level; i++) {
struct upid *upid = pid->numbers + i;
struct pid_namespace *ns = upid->ns;
switch (--ns->pid_allocated) {
case 2:
case 1:
/* When all that is left in the pid namespace
* is the reaper wake up the reaper. The reaper
* may be sleeping in zap_pid_ns_processes().
*/
wake_up_process(ns->child_reaper);
break;
case PIDNS_ADDING:
/* Handle a fork failure of the first process */
WARN_ON(ns->child_reaper);
ns->pid_allocated = 0;
break;
}
idr_remove(&ns->idr, upid->nr);
}
pidfs_remove_pid(pid);
spin_unlock(&pidmap_lock);
call_rcu(&pid->rcu, delayed_put_pid);
}
void free_pids(struct pid **pids)
{
int tmp;
/*
* This can batch pidmap_lock.
*/
for (tmp = PIDTYPE_MAX; --tmp >= 0; )
if (pids[tmp])
free_pid(pids[tmp]);
}
struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
size_t set_tid_size)
{
struct pid *pid;
enum pid_type type;
int i, nr;
struct pid_namespace *tmp;
struct upid *upid;
int retval = -ENOMEM;
/*
* set_tid_size contains the size of the set_tid array. Starting at
* the most nested currently active PID namespace it tells alloc_pid()
* which PID to set for a process in that most nested PID namespace
* up to set_tid_size PID namespaces. It does not have to set the PID
* for a process in all nested PID namespaces but set_tid_size must
* never be greater than the current ns->level + 1.
*/
if (set_tid_size > ns->level + 1)
return ERR_PTR(-EINVAL);
pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
if (!pid)
return ERR_PTR(retval);
tmp = ns;
pid->level = ns->level;
for (i = ns->level; i >= 0; i--) {
int tid = 0;
int pid_max = READ_ONCE(tmp->pid_max); if (set_tid_size) { tid = set_tid[ns->level - i];
retval = -EINVAL;
if (tid < 1 || tid >= pid_max) goto out_free;
/*
* Also fail if a PID != 1 is requested and
* no PID 1 exists.
*/
if (tid != 1 && !tmp->child_reaper) goto out_free;
retval = -EPERM;
if (!checkpoint_restore_ns_capable(tmp->user_ns)) goto out_free; set_tid_size--;
}
idr_preload(GFP_KERNEL);
spin_lock(&pidmap_lock);
if (tid) {
nr = idr_alloc(&tmp->idr, NULL, tid,
tid + 1, GFP_ATOMIC);
/*
* If ENOSPC is returned it means that the PID is
* alreay in use. Return EEXIST in that case.
*/
if (nr == -ENOSPC)
nr = -EEXIST;
} else {
int pid_min = 1;
/*
* init really needs pid 1, but after reaching the
* maximum wrap back to RESERVED_PIDS
*/
if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
pid_min = RESERVED_PIDS;
/*
* Store a null pointer so find_pid_ns does not find
* a partially initialized PID (see below).
*/
nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
pid_max, GFP_ATOMIC);
}
spin_unlock(&pidmap_lock);
idr_preload_end();
if (nr < 0) {
retval = (nr == -ENOSPC) ? -EAGAIN : nr; goto out_free;
}
pid->numbers[i].nr = nr;
pid->numbers[i].ns = tmp;
tmp = tmp->parent;
}
/*
* ENOMEM is not the most obvious choice especially for the case
* where the child subreaper has already exited and the pid
* namespace denies the creation of any new processes. But ENOMEM
* is what we have exposed to userspace for a long time and it is
* documented behavior for pid namespaces. So we can't easily
* change it even if there were an error code better suited.
*/
retval = -ENOMEM;
get_pid_ns(ns);
refcount_set(&pid->count, 1);
spin_lock_init(&pid->lock); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]);
init_waitqueue_head(&pid->wait_pidfd);
INIT_HLIST_HEAD(&pid->inodes);
upid = pid->numbers + ns->level;
idr_preload(GFP_KERNEL);
spin_lock(&pidmap_lock);
if (!(ns->pid_allocated & PIDNS_ADDING))
goto out_unlock;
pidfs_add_pid(pid);
for ( ; upid >= pid->numbers; --upid) {
/* Make the PID visible to find_pid_ns. */
idr_replace(&upid->ns->idr, pid, upid->nr);
upid->ns->pid_allocated++;
}
spin_unlock(&pidmap_lock);
idr_preload_end();
return pid;
out_unlock:
spin_unlock(&pidmap_lock);
idr_preload_end();
put_pid_ns(ns);
out_free:
spin_lock(&pidmap_lock); while (++i <= ns->level) {
upid = pid->numbers + i;
idr_remove(&upid->ns->idr, upid->nr);
}
/* On failure to allocate the first pid, reset the state */
if (ns->pid_allocated == PIDNS_ADDING)
idr_set_cursor(&ns->idr, 0); spin_unlock(&pidmap_lock);
kmem_cache_free(ns->pid_cachep, pid);
return ERR_PTR(retval);}
void disable_pid_allocation(struct pid_namespace *ns)
{
spin_lock(&pidmap_lock);
ns->pid_allocated &= ~PIDNS_ADDING;
spin_unlock(&pidmap_lock);
}
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
return idr_find(&ns->idr, nr);
}
EXPORT_SYMBOL_GPL(find_pid_ns);
struct pid *find_vpid(int nr)
{
return find_pid_ns(nr, task_active_pid_ns(current));
}
EXPORT_SYMBOL_GPL(find_vpid);
static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
{
return (type == PIDTYPE_PID) ? &task->thread_pid : &task->signal->pids[type];
}
/*
* attach_pid() must be called with the tasklist_lock write-held.
*/
void attach_pid(struct task_struct *task, enum pid_type type)
{
struct pid *pid;
lockdep_assert_held_write(&tasklist_lock); pid = *task_pid_ptr(task, type); hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);}
static void __change_pid(struct pid **pids, struct task_struct *task,
enum pid_type type, struct pid *new)
{
struct pid **pid_ptr, *pid;
int tmp;
lockdep_assert_held_write(&tasklist_lock);
pid_ptr = task_pid_ptr(task, type);
pid = *pid_ptr;
hlist_del_rcu(&task->pid_links[type]);
*pid_ptr = new;
for (tmp = PIDTYPE_MAX; --tmp >= 0; )
if (pid_has_task(pid, tmp))
return;
WARN_ON(pids[type]);
pids[type] = pid;
}
void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type)
{
__change_pid(pids, task, type, NULL);
}
void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type,
struct pid *pid)
{
__change_pid(pids, task, type, pid);
attach_pid(task, type);
}
void exchange_tids(struct task_struct *left, struct task_struct *right)
{
struct pid *pid1 = left->thread_pid;
struct pid *pid2 = right->thread_pid;
struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
lockdep_assert_held_write(&tasklist_lock);
/* Swap the single entry tid lists */
hlists_swap_heads_rcu(head1, head2);
/* Swap the per task_struct pid */
rcu_assign_pointer(left->thread_pid, pid2);
rcu_assign_pointer(right->thread_pid, pid1);
/* Swap the cached value */
WRITE_ONCE(left->pid, pid_nr(pid2));
WRITE_ONCE(right->pid, pid_nr(pid1));
}
/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type type)
{
WARN_ON_ONCE(type == PIDTYPE_PID);
lockdep_assert_held_write(&tasklist_lock);
hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
}
struct task_struct *pid_task(struct pid *pid, enum pid_type type)
{
struct task_struct *result = NULL;
if (pid) {
struct hlist_node *first;
first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
lockdep_tasklist_lock_is_held());
if (first)
result = hlist_entry(first, struct task_struct, pid_links[(type)]);
}
return result;
}
EXPORT_SYMBOL(pid_task);
/*
* Must be called under rcu_read_lock().
*/
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"find_task_by_pid_ns() needs rcu_read_lock() protection");
return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
}
struct task_struct *find_task_by_vpid(pid_t vnr)
{
return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
}
struct task_struct *find_get_task_by_vpid(pid_t nr)
{
struct task_struct *task;
rcu_read_lock();
task = find_task_by_vpid(nr);
if (task)
get_task_struct(task);
rcu_read_unlock();
return task;
}
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
{
struct pid *pid;
rcu_read_lock(); pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); rcu_read_unlock();
return pid;
}
EXPORT_SYMBOL_GPL(get_task_pid);
struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
{
struct task_struct *result;
rcu_read_lock();
result = pid_task(pid, type);
if (result)
get_task_struct(result);
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(get_pid_task);
struct pid *find_get_pid(pid_t nr)
{
struct pid *pid;
rcu_read_lock();
pid = get_pid(find_vpid(nr));
rcu_read_unlock();
return pid;
}
EXPORT_SYMBOL_GPL(find_get_pid);
pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
{
struct upid *upid;
pid_t nr = 0;
if (pid && ns && ns->level <= pid->level) {
upid = &pid->numbers[ns->level];
if (upid->ns == ns)
nr = upid->nr;
}
return nr;}
EXPORT_SYMBOL_GPL(pid_nr_ns);
pid_t pid_vnr(struct pid *pid)
{
return pid_nr_ns(pid, task_active_pid_ns(current));}
EXPORT_SYMBOL_GPL(pid_vnr);
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns)
{ pid_t nr = 0; rcu_read_lock(); if (!ns) ns = task_active_pid_ns(current); if (ns) nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); rcu_read_unlock();
return nr;
}
EXPORT_SYMBOL(__task_pid_nr_ns);
struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
{
return ns_of_pid(task_pid(tsk));}
EXPORT_SYMBOL_GPL(task_active_pid_ns);
/*
* Used by proc to find the first pid that is greater than or equal to nr.
*
* If there is a pid at nr this function is exactly the same as find_pid_ns.
*/
struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
{
return idr_get_next(&ns->idr, &nr);
}
EXPORT_SYMBOL_GPL(find_ge_pid);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
{
CLASS(fd, f)(fd);
struct pid *pid;
if (fd_empty(f))
return ERR_PTR(-EBADF);
pid = pidfd_pid(fd_file(f));
if (!IS_ERR(pid)) {
get_pid(pid);
*flags = fd_file(f)->f_flags;
}
return pid;
}
/**
* pidfd_get_task() - Get the task associated with a pidfd
*
* @pidfd: pidfd for which to get the task
* @flags: flags associated with this pidfd
*
* Return the task associated with @pidfd. The function takes a reference on
* the returned task. The caller is responsible for releasing that reference.
*
* Return: On success, the task_struct associated with the pidfd.
* On error, a negative errno number will be returned.
*/
struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags)
{
unsigned int f_flags = 0;
struct pid *pid;
struct task_struct *task;
enum pid_type type;
switch (pidfd) {
case PIDFD_SELF_THREAD:
type = PIDTYPE_PID;
pid = get_task_pid(current, type);
break;
case PIDFD_SELF_THREAD_GROUP:
type = PIDTYPE_TGID;
pid = get_task_pid(current, type);
break;
default:
pid = pidfd_get_pid(pidfd, &f_flags);
if (IS_ERR(pid))
return ERR_CAST(pid);
type = PIDTYPE_TGID;
break;
}
task = get_pid_task(pid, type);
put_pid(pid);
if (!task)
return ERR_PTR(-ESRCH);
*flags = f_flags;
return task;
}
/**
* pidfd_create() - Create a new pid file descriptor.
*
* @pid: struct pid that the pidfd will reference
* @flags: flags to pass
*
* This creates a new pid file descriptor with the O_CLOEXEC flag set.
*
* Note, that this function can only be called after the fd table has
* been unshared to avoid leaking the pidfd to the new process.
*
* This symbol should not be explicitly exported to loadable modules.
*
* Return: On success, a cloexec pidfd is returned.
* On error, a negative errno number will be returned.
*/
static int pidfd_create(struct pid *pid, unsigned int flags)
{
int pidfd;
struct file *pidfd_file;
pidfd = pidfd_prepare(pid, flags, &pidfd_file);
if (pidfd < 0)
return pidfd;
fd_install(pidfd, pidfd_file);
return pidfd;
}
/**
* sys_pidfd_open() - Open new pid file descriptor.
*
* @pid: pid for which to retrieve a pidfd
* @flags: flags to pass
*
* This creates a new pid file descriptor with the O_CLOEXEC flag set for
* the task identified by @pid. Without PIDFD_THREAD flag the target task
* must be a thread-group leader.
*
* Return: On success, a cloexec pidfd is returned.
* On error, a negative errno number will be returned.
*/
SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
{
int fd;
struct pid *p;
if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD))
return -EINVAL;
if (pid <= 0)
return -EINVAL;
p = find_get_pid(pid);
if (!p)
return -ESRCH;
fd = pidfd_create(p, flags);
put_pid(p);
return fd;
}
#ifdef CONFIG_SYSCTL
static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root)
{
return &task_active_pid_ns(current)->set;
}
static int set_is_seen(struct ctl_table_set *set)
{
return &task_active_pid_ns(current)->set == set;
}
static int pid_table_root_permissions(struct ctl_table_header *head,
const struct ctl_table *table)
{
struct pid_namespace *pidns =
container_of(head->set, struct pid_namespace, set);
int mode = table->mode;
if (ns_capable_noaudit(pidns->user_ns, CAP_SYS_ADMIN) ||
uid_eq(current_euid(), make_kuid(pidns->user_ns, 0)))
mode = (mode & S_IRWXU) >> 6;
else if (in_egroup_p(make_kgid(pidns->user_ns, 0)))
mode = (mode & S_IRWXG) >> 3;
else
mode = mode & S_IROTH;
return (mode << 6) | (mode << 3) | mode;
}
static void pid_table_root_set_ownership(struct ctl_table_header *head,
kuid_t *uid, kgid_t *gid)
{
struct pid_namespace *pidns =
container_of(head->set, struct pid_namespace, set);
kuid_t ns_root_uid;
kgid_t ns_root_gid;
ns_root_uid = make_kuid(pidns->user_ns, 0);
if (uid_valid(ns_root_uid))
*uid = ns_root_uid;
ns_root_gid = make_kgid(pidns->user_ns, 0);
if (gid_valid(ns_root_gid))
*gid = ns_root_gid;
}
static struct ctl_table_root pid_table_root = {
.lookup = pid_table_root_lookup,
.permissions = pid_table_root_permissions,
.set_ownership = pid_table_root_set_ownership,
};
static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct pid *new_pid;
pid_t tmp_pid;
int r;
struct ctl_table tmp_table = *table;
tmp_pid = pid_vnr(cad_pid);
tmp_table.data = &tmp_pid;
r = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
if (r || !write)
return r;
new_pid = find_get_pid(tmp_pid);
if (!new_pid)
return -ESRCH;
put_pid(xchg(&cad_pid, new_pid));
return 0;
}
static const struct ctl_table pid_table[] = {
{
.procname = "pid_max",
.data = &init_pid_ns.pid_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &pid_max_min,
.extra2 = &pid_max_max,
},
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "cad_pid",
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_do_cad_pid,
},
#endif
};
#endif
int register_pidns_sysctls(struct pid_namespace *pidns)
{
#ifdef CONFIG_SYSCTL
struct ctl_table *tbl;
setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen);
tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL);
if (!tbl)
return -ENOMEM;
tbl->data = &pidns->pid_max; pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max,
PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl,
ARRAY_SIZE(pid_table));
if (!pidns->sysctls) {
kfree(tbl);
retire_sysctl_set(&pidns->set);
return -ENOMEM;
}
#endif
return 0;
}
void unregister_pidns_sysctls(struct pid_namespace *pidns)
{
#ifdef CONFIG_SYSCTL
const struct ctl_table *tbl;
tbl = pidns->sysctls->ctl_table_arg;
unregister_sysctl_table(pidns->sysctls);
retire_sysctl_set(&pidns->set);
kfree(tbl);
#endif
}
void __init pid_idr_init(void)
{
/* Verify no one has done anything silly: */
BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
/* bump default and minimum pid_max based on number of cpus */
init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max,
PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
pid_max_min = max_t(int, pid_max_min,
PIDS_PER_CPU_MIN * num_possible_cpus());
pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min);
idr_init(&init_pid_ns.idr);
init_pid_ns.pid_cachep = kmem_cache_create("pid",
struct_size_t(struct pid, numbers, 1),
__alignof__(struct pid),
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
NULL);
}
static __init int pid_namespace_sysctl_init(void)
{
#ifdef CONFIG_SYSCTL
/* "kernel" directory will have already been initialized. */
BUG_ON(register_pidns_sysctls(&init_pid_ns));
#endif
return 0;
}
subsys_initcall(pid_namespace_sysctl_init);
static struct file *__pidfd_fget(struct task_struct *task, int fd)
{
struct file *file;
int ret;
ret = down_read_killable(&task->signal->exec_update_lock);
if (ret)
return ERR_PTR(ret);
if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
file = fget_task(task, fd);
else
file = ERR_PTR(-EPERM);
up_read(&task->signal->exec_update_lock);
if (!file) {
/*
* It is possible that the target thread is exiting; it can be
* either:
* 1. before exit_signals(), which gives a real fd
* 2. before exit_files() takes the task_lock() gives a real fd
* 3. after exit_files() releases task_lock(), ->files is NULL;
* this has PF_EXITING, since it was set in exit_signals(),
* __pidfd_fget() returns EBADF.
* In case 3 we get EBADF, but that really means ESRCH, since
* the task is currently exiting and has freed its files
* struct, so we fix it up.
*/
if (task->flags & PF_EXITING)
file = ERR_PTR(-ESRCH);
else
file = ERR_PTR(-EBADF);
}
return file;
}
static int pidfd_getfd(struct pid *pid, int fd)
{
struct task_struct *task;
struct file *file;
int ret;
task = get_pid_task(pid, PIDTYPE_PID);
if (!task)
return -ESRCH;
file = __pidfd_fget(task, fd);
put_task_struct(task);
if (IS_ERR(file))
return PTR_ERR(file);
ret = receive_fd(file, NULL, O_CLOEXEC);
fput(file);
return ret;
}
/**
* sys_pidfd_getfd() - Get a file descriptor from another process
*
* @pidfd: the pidfd file descriptor of the process
* @fd: the file descriptor number to get
* @flags: flags on how to get the fd (reserved)
*
* This syscall gets a copy of a file descriptor from another process
* based on the pidfd, and file descriptor number. It requires that
* the calling process has the ability to ptrace the process represented
* by the pidfd. The process which is having its file descriptor copied
* is otherwise unaffected.
*
* Return: On success, a cloexec file descriptor is returned.
* On error, a negative errno number will be returned.
*/
SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
unsigned int, flags)
{
struct pid *pid;
/* flags is currently unused - make sure it's unset */
if (flags)
return -EINVAL;
CLASS(fd, f)(pidfd);
if (fd_empty(f))
return -EBADF;
pid = pidfd_pid(fd_file(f));
if (IS_ERR(pid))
return PTR_ERR(pid);
return pidfd_getfd(pid, fd);
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* I/O iterator iteration building functions.
*
* Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#ifndef _LINUX_IOV_ITER_H
#define _LINUX_IOV_ITER_H
#include <linux/uio.h>
#include <linux/bvec.h>
#include <linux/folio_queue.h>
typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
void *priv, void *priv2);
typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
void *priv, void *priv2);
/*
* Handle ITER_UBUF.
*/
static __always_inline
size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
iov_ustep_f step)
{
void __user *base = iter->ubuf;
size_t progress = 0, remain;
remain = step(base + iter->iov_offset, 0, len, priv, priv2); progress = len - remain; iter->iov_offset += progress;
iter->count -= progress;
return progress;
}
/*
* Handle ITER_IOVEC.
*/
static __always_inline
size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
iov_ustep_f step)
{
const struct iovec *p = iter->__iov;
size_t progress = 0, skip = iter->iov_offset;
do {
size_t remain, consumed;
size_t part = min(len, p->iov_len - skip);
if (likely(part)) {
remain = step(p->iov_base + skip, progress, part, priv, priv2);
consumed = part - remain;
progress += consumed;
skip += consumed;
len -= consumed;
if (skip < p->iov_len)
break;
}
p++;
skip = 0;
} while (len);
iter->nr_segs -= p - iter->__iov;
iter->__iov = p;
iter->iov_offset = skip;
iter->count -= progress;
return progress;
}
/*
* Handle ITER_KVEC.
*/
static __always_inline
size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
iov_step_f step)
{
const struct kvec *p = iter->kvec;
size_t progress = 0, skip = iter->iov_offset;
do {
size_t remain, consumed;
size_t part = min(len, p->iov_len - skip);
if (likely(part)) {
remain = step(p->iov_base + skip, progress, part, priv, priv2);
consumed = part - remain;
progress += consumed;
skip += consumed;
len -= consumed;
if (skip < p->iov_len)
break;
}
p++; skip = 0;
} while (len);
iter->nr_segs -= p - iter->kvec;
iter->kvec = p;
iter->iov_offset = skip;
iter->count -= progress;
return progress;
}
/*
* Handle ITER_BVEC.
*/
static __always_inline
size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
iov_step_f step)
{
const struct bio_vec *p = iter->bvec;
size_t progress = 0, skip = iter->iov_offset;
do {
size_t remain, consumed;
size_t offset = p->bv_offset + skip, part;
void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
part = min3(len,
(size_t)(p->bv_len - skip),
(size_t)(PAGE_SIZE - offset % PAGE_SIZE));
remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
kunmap_local(kaddr);
consumed = part - remain;
len -= consumed;
progress += consumed;
skip += consumed;
if (skip >= p->bv_len) {
skip = 0; p++;
}
if (remain)
break;
} while (len);
iter->nr_segs -= p - iter->bvec;
iter->bvec = p;
iter->iov_offset = skip;
iter->count -= progress;
return progress;
}
/*
* Handle ITER_FOLIOQ.
*/
static __always_inline
size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
iov_step_f step)
{
const struct folio_queue *folioq = iter->folioq;
unsigned int slot = iter->folioq_slot;
size_t progress = 0, skip = iter->iov_offset;
if (slot == folioq_nr_slots(folioq)) {
/* The iterator may have been extended. */
folioq = folioq->next;
slot = 0;
}
do {
struct folio *folio = folioq_folio(folioq, slot);
size_t part, remain = 0, consumed;
size_t fsize;
void *base;
if (!folio)
break;
fsize = folioq_folio_size(folioq, slot);
if (skip < fsize) {
base = kmap_local_folio(folio, skip);
part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
remain = step(base, progress, part, priv, priv2);
kunmap_local(base);
consumed = part - remain;
len -= consumed;
progress += consumed;
skip += consumed;
}
if (skip >= fsize) {
skip = 0;
slot++; if (slot == folioq_nr_slots(folioq) && folioq->next) {
folioq = folioq->next;
slot = 0;
}
}
if (remain)
break;
} while (len); iter->folioq_slot = slot;
iter->folioq = folioq;
iter->iov_offset = skip;
iter->count -= progress;
return progress;
}
/*
* Handle ITER_XARRAY.
*/
static __always_inline
size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
iov_step_f step)
{
struct folio *folio;
size_t progress = 0;
loff_t start = iter->xarray_start + iter->iov_offset;
pgoff_t index = start / PAGE_SIZE;
XA_STATE(xas, iter->xarray, index);
rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) {
size_t remain, consumed, offset, part, flen;
if (xas_retry(&xas, folio)) continue; if (WARN_ON(xa_is_value(folio)))
break;
if (WARN_ON(folio_test_hugetlb(folio)))
break;
offset = offset_in_folio(folio, start + progress);
flen = min(folio_size(folio) - offset, len);
while (flen) { void *base = kmap_local_folio(folio, offset);
part = min_t(size_t, flen,
PAGE_SIZE - offset_in_page(offset));
remain = step(base, progress, part, priv, priv2);
kunmap_local(base);
consumed = part - remain;
progress += consumed;
len -= consumed;
if (remain || len == 0)
goto out; flen -= consumed; offset += consumed;
}
}
out:
rcu_read_unlock();
iter->iov_offset += progress;
iter->count -= progress;
return progress;
}
/*
* Handle ITER_DISCARD.
*/
static __always_inline
size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
iov_step_f step)
{
size_t progress = len;
iter->count -= progress;
return progress;
}
/**
* iterate_and_advance2 - Iterate over an iterator
* @iter: The iterator to iterate over.
* @len: The amount to iterate over.
* @priv: Data for the step functions.
* @priv2: More data for the step functions.
* @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
* @step: Function for other iterators; given kernel addresses.
*
* Iterate over the next part of an iterator, up to the specified length. The
* buffer is presented in segments, which for kernel iteration are broken up by
* physical pages and mapped, with the mapped address being presented.
*
* Two step functions, @step and @ustep, must be provided, one for handling
* mapped kernel addresses and the other is given user addresses which have the
* potential to fault since no pinning is performed.
*
* The step functions are passed the address and length of the segment, @priv,
* @priv2 and the amount of data so far iterated over (which can, for example,
* be added to @priv to point to the right part of a second buffer). The step
* functions should return the amount of the segment they didn't process (ie. 0
* indicates complete processsing).
*
* This function returns the amount of data processed (ie. 0 means nothing was
* processed and the value of @len means processes to completion).
*/
static __always_inline
size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
void *priv2, iov_ustep_f ustep, iov_step_f step)
{
if (unlikely(iter->count < len)) len = iter->count; if (unlikely(!len))
return 0;
if (likely(iter_is_ubuf(iter))) return iterate_ubuf(iter, len, priv, priv2, ustep); if (likely(iter_is_iovec(iter))) return iterate_iovec(iter, len, priv, priv2, ustep); if (iov_iter_is_bvec(iter)) return iterate_bvec(iter, len, priv, priv2, step); if (iov_iter_is_kvec(iter)) return iterate_kvec(iter, len, priv, priv2, step); if (iov_iter_is_folioq(iter)) return iterate_folioq(iter, len, priv, priv2, step); if (iov_iter_is_xarray(iter)) return iterate_xarray(iter, len, priv, priv2, step);
return iterate_discard(iter, len, priv, priv2, step);
}
/**
* iterate_and_advance - Iterate over an iterator
* @iter: The iterator to iterate over.
* @len: The amount to iterate over.
* @priv: Data for the step functions.
* @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
* @step: Function for other iterators; given kernel addresses.
*
* As iterate_and_advance2(), but priv2 is always NULL.
*/
static __always_inline
size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
iov_ustep_f ustep, iov_step_f step)
{
return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
}
/**
* iterate_and_advance_kernel - Iterate over a kernel-internal iterator
* @iter: The iterator to iterate over.
* @len: The amount to iterate over.
* @priv: Data for the step functions.
* @priv2: More data for the step functions.
* @step: Function for other iterators; given kernel addresses.
*
* Iterate over the next part of an iterator, up to the specified length. The
* buffer is presented in segments, which for kernel iteration are broken up by
* physical pages and mapped, with the mapped address being presented.
*
* [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
* iterators; it will not handle UBUF or IOVEC-type iterators.
*
* A step functions, @step, must be provided, one for handling mapped kernel
* addresses and the other is given user addresses which have the potential to
* fault since no pinning is performed.
*
* The step functions are passed the address and length of the segment, @priv,
* @priv2 and the amount of data so far iterated over (which can, for example,
* be added to @priv to point to the right part of a second buffer). The step
* functions should return the amount of the segment they didn't process (ie. 0
* indicates complete processsing).
*
* This function returns the amount of data processed (ie. 0 means nothing was
* processed and the value of @len means processes to completion).
*/
static __always_inline
size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
void *priv2, iov_step_f step)
{
if (unlikely(iter->count < len))
len = iter->count;
if (unlikely(!len))
return 0;
if (iov_iter_is_bvec(iter))
return iterate_bvec(iter, len, priv, priv2, step);
if (iov_iter_is_kvec(iter))
return iterate_kvec(iter, len, priv, priv2, step);
if (iov_iter_is_folioq(iter))
return iterate_folioq(iter, len, priv, priv2, step);
if (iov_iter_is_xarray(iter))
return iterate_xarray(iter, len, priv, priv2, step);
return iterate_discard(iter, len, priv, priv2, step);
}
#endif /* _LINUX_IOV_ITER_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RCULIST_NULLS_H
#define _LINUX_RCULIST_NULLS_H
#ifdef __KERNEL__
/*
* RCU-protected list version
*/
#include <linux/list_nulls.h>
#include <linux/rcupdate.h>
/**
* hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
* @n: the element to delete from the hash list.
*
* Note: hlist_nulls_unhashed() on the node return true after this. It is
* useful for RCU based read lockfree traversal if the writer side
* must know if the list entry is still hashed or already unhashed.
*
* In particular, it means that we can not poison the forward pointers
* that may still be used for walking the hash list and we can only
* zero the pprev pointer so list_unhashed() will return true after
* this.
*
* The caller must take whatever precautions are necessary (such as
* holding appropriate locks) to avoid racing with another
* list-mutation primitive, such as hlist_nulls_add_head_rcu() or
* hlist_nulls_del_rcu(), running on this same list. However, it is
* perfectly legal to run concurrently with the _rcu list-traversal
* primitives, such as hlist_nulls_for_each_entry_rcu().
*/
static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
WRITE_ONCE(n->pprev, NULL);
}
}
/**
* hlist_nulls_first_rcu - returns the first element of the hash list.
* @head: the head of the list.
*/
#define hlist_nulls_first_rcu(head) \
(*((struct hlist_nulls_node __rcu __force **)&(head)->first))
/**
* hlist_nulls_next_rcu - returns the element of the list after @node.
* @node: element of the list.
*/
#define hlist_nulls_next_rcu(node) \
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
/**
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
* Note: hlist_nulls_unhashed() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the hash list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry().
*/
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n); WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
* hlist_nulls_add_head_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist_nulls,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *first = h->first;
WRITE_ONCE(n->next, first);
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
WRITE_ONCE(first->pprev, &n->next);
}
/**
* hlist_nulls_add_tail_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist_nulls,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *i, *last = NULL;
/* Note: write side code, so rcu accessors are not needed. */
for (i = h->first; !is_a_nulls(i); i = i->next)
last = i;
if (last) {
WRITE_ONCE(n->next, last->next);
n->pprev = &last->next;
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
}
}
/* after that hlist_nulls_del will work */
static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
{
n->pprev = &n->next;
n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
}
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
* @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*
* The barrier() is needed to make sure compiler doesn't cache first element [1],
* as this loop can be restarted [2]
* [1] Documentation/memory-barriers.txt around line 1533
* [2] Documentation/RCU/rculist_nulls.rst around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
for (({barrier();}), \
pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
/**
* hlist_nulls_for_each_entry_safe -
* iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
* @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*/
#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
for (({barrier();}), \
pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
#endif
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h
*
* Copyright (c) 2011, Tom Herbert <therbert@google.com>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/dynamic_queue_limits.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <trace/events/napi.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
static void dql_check_stall(struct dql *dql, unsigned short stall_thrs)
{
unsigned long now;
if (!stall_thrs)
return;
now = jiffies;
/* Check for a potential stall */
if (time_after_eq(now, dql->last_reap + stall_thrs)) {
unsigned long hist_head, t, start, end;
/* We are trying to detect a period of at least @stall_thrs
* jiffies without any Tx completions, but during first half
* of which some Tx was posted.
*/
dqs_again:
hist_head = READ_ONCE(dql->history_head);
/* pairs with smp_wmb() in dql_queued() */
smp_rmb();
/* Get the previous entry in the ring buffer, which is the
* oldest sample.
*/
start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG;
/* Advance start to continue from the last reap time */
if (time_before(start, dql->last_reap + 1))
start = dql->last_reap + 1;
/* Newest sample we should have already seen a completion for */
end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1);
/* Shrink the search space to [start, (now - start_thrs/2)] if
* `end` is beyond the stall zone
*/
if (time_before(now, end + stall_thrs / 2))
end = now - stall_thrs / 2;
/* Search for the queued time in [t, end] */
for (t = start; time_before_eq(t, end); t++)
if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG),
dql->history))
break;
/* Variable t contains the time of the queue */
if (!time_before_eq(t, end))
goto no_stall;
/* The ring buffer was modified in the meantime, retry */
if (hist_head != READ_ONCE(dql->history_head))
goto dqs_again;
dql->stall_cnt++;
dql->stall_max = max_t(unsigned short, dql->stall_max, now - t);
trace_dql_stall_detected(dql->stall_thrs, now - t,
dql->last_reap, dql->history_head,
now, dql->history);
}
no_stall:
dql->last_reap = now;
}
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
unsigned int inprogress, prev_inprogress, limit;
unsigned int ovlimit, completed, num_queued;
unsigned short stall_thrs;
bool all_prev_completed;
num_queued = READ_ONCE(dql->num_queued);
/* Read stall_thrs in advance since it belongs to the same (first)
* cache line as ->num_queued. This way, dql_check_stall() does not
* need to touch the first cache line again later, reducing the window
* of possible false sharing.
*/
stall_thrs = READ_ONCE(dql->stall_thrs);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
completed = dql->num_completed + count;
limit = dql->limit;
ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
inprogress = num_queued - completed;
prev_inprogress = dql->prev_num_queued - dql->num_completed;
all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
if ((ovlimit && !inprogress) ||
(dql->prev_ovlimit && all_prev_completed)) {
/*
* Queue considered starved if:
* - The queue was over-limit in the last interval,
* and there is no more data in the queue.
* OR
* - The queue was over-limit in the previous interval and
* when enqueuing it was possible that all queued data
* had been consumed. This covers the case when queue
* may have becomes starved between completion processing
* running and next time enqueue was scheduled.
*
* When queue is starved increase the limit by the amount
* of bytes both sent and completed in the last interval,
* plus any previous over-limit.
*/
limit += POSDIFF(completed, dql->prev_num_queued) +
dql->prev_ovlimit;
dql->slack_start_time = jiffies;
dql->lowest_slack = UINT_MAX;
} else if (inprogress && prev_inprogress && !all_prev_completed) {
/*
* Queue was not starved, check if the limit can be decreased.
* A decrease is only considered if the queue has been busy in
* the whole interval (the check above).
*
* If there is slack, the amount of excess data queued above
* the amount needed to prevent starvation, the queue limit
* can be decreased. To avoid hysteresis we consider the
* minimum amount of slack found over several iterations of the
* completion routine.
*/
unsigned int slack, slack_last_objs;
/*
* Slack is the maximum of
* - The queue limit plus previous over-limit minus twice
* the number of objects completed. Note that two times
* number of completed bytes is a basis for an upper bound
* of the limit.
* - Portion of objects in the last queuing operation that
* was not part of non-zero previous over-limit. That is
* "round down" by non-overlimit portion of the last
* queueing operation.
*/
slack = POSDIFF(limit + dql->prev_ovlimit,
2 * (completed - dql->num_completed));
slack_last_objs = dql->prev_ovlimit ?
POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
slack = max(slack, slack_last_objs);
if (slack < dql->lowest_slack)
dql->lowest_slack = slack;
if (time_after(jiffies,
dql->slack_start_time + dql->slack_hold_time)) {
limit = POSDIFF(limit, dql->lowest_slack);
dql->slack_start_time = jiffies;
dql->lowest_slack = UINT_MAX;
}
}
/* Enforce bounds on limit */
limit = clamp(limit, dql->min_limit, dql->max_limit);
if (limit != dql->limit) {
dql->limit = limit;
ovlimit = 0;
}
dql->adj_limit = limit + completed;
dql->prev_ovlimit = ovlimit;
dql->prev_last_obj_cnt = READ_ONCE(dql->last_obj_cnt);
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
dql_check_stall(dql, stall_thrs);
}
EXPORT_SYMBOL(dql_completed);
void dql_reset(struct dql *dql)
{
/* Reset all dynamic values */
dql->limit = dql->min_limit;
dql->num_queued = 0;
dql->num_completed = 0;
dql->last_obj_cnt = 0;
dql->prev_num_queued = 0;
dql->prev_last_obj_cnt = 0;
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
dql->last_reap = jiffies;
dql->history_head = jiffies / BITS_PER_LONG;
memset(dql->history, 0, sizeof(dql->history));
}
EXPORT_SYMBOL(dql_reset);
void dql_init(struct dql *dql, unsigned int hold_time)
{
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
dql->stall_thrs = 0;
dql_reset(dql);
}
EXPORT_SYMBOL(dql_init);
/*
* Non-physical true random number generator based on timing jitter --
* Linux Kernel Crypto API specific code
*
* Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL2 are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <crypto/hash.h>
#include <crypto/sha3.h>
#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <crypto/internal/rng.h>
#include "jitterentropy.h"
#define JENT_CONDITIONING_HASH "sha3-256-generic"
/***************************************************************************
* Helper function
***************************************************************************/
void *jent_kvzalloc(unsigned int len)
{
return kvzalloc(len, GFP_KERNEL);
}
void jent_kvzfree(void *ptr, unsigned int len)
{
kvfree_sensitive(ptr, len);
}
void *jent_zalloc(unsigned int len)
{
return kzalloc(len, GFP_KERNEL);
}
void jent_zfree(void *ptr)
{
kfree_sensitive(ptr);
}
/*
* Obtain a high-resolution time stamp value. The time stamp is used to measure
* the execution time of a given code path and its variations. Hence, the time
* stamp must have a sufficiently high resolution.
*
* Note, if the function returns zero because a given architecture does not
* implement a high-resolution time stamp, the RNG code's runtime test
* will detect it and will not produce output.
*/
void jent_get_nstime(__u64 *out)
{
__u64 tmp = 0;
tmp = random_get_entropy();
/*
* If random_get_entropy does not return a value, i.e. it is not
* implemented for a given architecture, use a clock source.
* hoping that there are timers we can work with.
*/
if (tmp == 0)
tmp = ktime_get_ns();
*out = tmp;
jent_raw_hires_entropy_store(tmp);
}int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
unsigned int addtl_len, __u64 hash_loop_cnt,
unsigned int stuck)
{
struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm);
u8 intermediary[SHA3_256_DIGEST_SIZE];
__u64 j = 0;
int ret;
desc->tfm = hash_state_desc->tfm;
if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) {
pr_warn_ratelimited("Unexpected digest size\n");
return -EINVAL;
}
kmsan_unpoison_memory(intermediary, sizeof(intermediary));
/*
* This loop fills a buffer which is injected into the entropy pool.
* The main reason for this loop is to execute something over which we
* can perform a timing measurement. The injection of the resulting
* data into the pool is performed to ensure the result is used and
* the compiler cannot optimize the loop away in case the result is not
* used at all. Yet that data is considered "additional information"
* considering the terminology from SP800-90A without any entropy.
*
* Note, it does not matter which or how much data you inject, we are
* interested in one Keccack1600 compression operation performed with
* the crypto_shash_final.
*/
for (j = 0; j < hash_loop_cnt; j++) { ret = crypto_shash_init(desc) ?:
crypto_shash_update(desc, intermediary,
sizeof(intermediary)) ?:
crypto_shash_finup(desc, addtl, addtl_len, intermediary);
if (ret)
goto err;
}
/*
* Inject the data from the previous loop into the pool. This data is
* not considered to contain any entropy, but it stirs the pool a bit.
*/
ret = crypto_shash_update(hash_state_desc, intermediary, sizeof(intermediary));
if (ret) goto err;
/*
* Insert the time stamp into the hash context representing the pool.
*
* If the time stamp is stuck, do not finally insert the value into the
* entropy pool. Although this operation should not do any harm even
* when the time stamp has no entropy, SP800-90B requires that any
* conditioning operation to have an identical amount of input data
* according to section 3.1.5.
*/
if (stuck) {
time = 0;
}
ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64));
err:
shash_desc_zero(desc);
memzero_explicit(intermediary, sizeof(intermediary));
return ret;}
int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len)
{
struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
u8 jent_block[SHA3_256_DIGEST_SIZE];
/* Obtain data from entropy pool and re-initialize it */
int ret = crypto_shash_final(hash_state_desc, jent_block) ?:
crypto_shash_init(hash_state_desc) ?:
crypto_shash_update(hash_state_desc, jent_block,
sizeof(jent_block));
if (!ret && dst_len)
memcpy(dst, jent_block, dst_len);
memzero_explicit(jent_block, sizeof(jent_block));
return ret;
}
/***************************************************************************
* Kernel crypto API interface
***************************************************************************/
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
struct crypto_shash *tfm;
struct shash_desc *sdesc;
};
static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
spin_lock(&rng->jent_lock);
if (rng->sdesc) {
shash_desc_zero(rng->sdesc);
kfree(rng->sdesc);
}
rng->sdesc = NULL;
if (rng->tfm)
crypto_free_shash(rng->tfm);
rng->tfm = NULL;
if (rng->entropy_collector)
jent_entropy_collector_free(rng->entropy_collector);
rng->entropy_collector = NULL;
spin_unlock(&rng->jent_lock);
}
static int jent_kcapi_init(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
struct crypto_shash *hash;
struct shash_desc *sdesc;
int size, ret = 0;
spin_lock_init(&rng->jent_lock);
/*
* Use SHA3-256 as conditioner. We allocate only the generic
* implementation as we are not interested in high-performance. The
* execution time of the SHA3 operation is measured and adds to the
* Jitter RNG's unpredictable behavior. If we have a slower hash
* implementation, the execution timing variations are larger. When
* using a fast implementation, we would need to call it more often
* as its variations are lower.
*/
hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
if (IS_ERR(hash)) {
pr_err("Cannot allocate conditioning digest\n");
return PTR_ERR(hash);
}
rng->tfm = hash;
size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
ret = -ENOMEM;
goto err;
}
sdesc->tfm = hash;
crypto_shash_init(sdesc);
rng->sdesc = sdesc;
rng->entropy_collector =
jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0,
sdesc);
if (!rng->entropy_collector) {
ret = -ENOMEM;
goto err;
}
spin_lock_init(&rng->jent_lock);
return 0;
err:
jent_kcapi_cleanup(tfm);
return ret;
}
static int jent_kcapi_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *rdata, unsigned int dlen)
{
struct jitterentropy *rng = crypto_rng_ctx(tfm);
int ret = 0;
spin_lock(&rng->jent_lock);
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
if (ret == -3) {
/* Handle permanent health test error */
/*
* If the kernel was booted with fips=1, it implies that
* the entire kernel acts as a FIPS 140 module. In this case
* an SP800-90B permanent health test error is treated as
* a FIPS module error.
*/
if (fips_enabled)
panic("Jitter RNG permanent health test failure\n");
pr_err("Jitter RNG permanent health test failure\n");
ret = -EFAULT; } else if (ret == -2) {
/* Handle intermittent health test error */
pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
ret = -EAGAIN;
} else if (ret == -1) {
/* Handle other errors */
ret = -EINVAL;
}
spin_unlock(&rng->jent_lock);
return ret;
}
static int jent_kcapi_reset(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
return 0;
}
static struct rng_alg jent_alg = {
.generate = jent_kcapi_random,
.seed = jent_kcapi_reset,
.seedsize = 0,
.base = {
.cra_name = "jitterentropy_rng",
.cra_driver_name = "jitterentropy_rng",
.cra_priority = 100,
.cra_ctxsize = sizeof(struct jitterentropy),
.cra_module = THIS_MODULE,
.cra_init = jent_kcapi_init,
.cra_exit = jent_kcapi_cleanup,
}
};
static int __init jent_mod_init(void)
{
SHASH_DESC_ON_STACK(desc, tfm);
struct crypto_shash *tfm;
int ret = 0;
jent_testing_init();
tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
if (IS_ERR(tfm)) {
jent_testing_exit();
return PTR_ERR(tfm);
}
desc->tfm = tfm;
crypto_shash_init(desc);
ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL);
shash_desc_zero(desc);
crypto_free_shash(tfm);
if (ret) {
/* Handle permanent health test error */
if (fips_enabled)
panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
jent_testing_exit();
pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
return -EFAULT;
}
return crypto_register_rng(&jent_alg);
}
static void __exit jent_mod_exit(void)
{
jent_testing_exit();
crypto_unregister_rng(&jent_alg);
}
module_init(jent_mod_init);
module_exit(jent_mod_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
MODULE_DESCRIPTION("Non-physical True Random Number Generator based on CPU Jitter");
MODULE_ALIAS_CRYPTO("jitterentropy_rng");
// SPDX-License-Identifier: GPL-2.0+
/*
* User-space Probes (UProbes)
*
* Copyright (C) IBM Corporation, 2008-2012
* Authors:
* Srikar Dronamraju
* Jim Keniston
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/pagemap.h> /* read_mapping_page */
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/export.h>
#include <linux/rmap.h> /* anon_vma_prepare */
#include <linux/mmu_notifier.h>
#include <linux/swap.h> /* folio_free_swap */
#include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>
#include <linux/khugepaged.h>
#include <linux/rcupdate_trace.h>
#include <linux/workqueue.h>
#include <linux/srcu.h>
#include <linux/oom.h> /* check_stable_address_space */
#include <linux/pagewalk.h>
#include <linux/uprobes.h>
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
static struct rb_root uprobes_tree = RB_ROOT;
/*
* allows us to skip the uprobe_mmap if there are no uprobe events active
* at this time. Probably a fine grained per inode count is better?
*/
#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */
static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
#define UPROBES_HASH_SZ 13
/* serialize uprobe->pending_list */
static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
/* Covers return_instance's uprobe lifetime. */
DEFINE_STATIC_SRCU(uretprobes_srcu);
/* Have a copy of original instruction */
#define UPROBE_COPY_INSN 0
struct uprobe {
struct rb_node rb_node; /* node in the rb tree */
refcount_t ref;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_head pending_list;
struct list_head consumers;
struct inode *inode; /* Also hold a ref to inode */
union {
struct rcu_head rcu;
struct work_struct work;
};
loff_t offset;
loff_t ref_ctr_offset;
unsigned long flags; /* "unsigned long" so bitops work */
/*
* The generic code assumes that it has two members of unknown type
* owned by the arch-specific code:
*
* insn - copy_insn() saves the original instruction here for
* arch_uprobe_analyze_insn().
*
* ixol - potentially modified instruction to execute out of
* line, copied to xol_area by xol_get_insn_slot().
*/
struct arch_uprobe arch;
};
struct delayed_uprobe {
struct list_head list;
struct uprobe *uprobe;
struct mm_struct *mm;
};
static DEFINE_MUTEX(delayed_uprobe_lock);
static LIST_HEAD(delayed_uprobe_list);
/*
* Execute out of line area: anonymous executable mapping installed
* by the probed task to execute the copy of the original instruction
* mangled by set_swbp().
*
* On a breakpoint hit, thread contests for a slot. It frees the
* slot after singlestep. Currently a fixed number of slots are
* allocated.
*/
struct xol_area {
wait_queue_head_t wq; /* if all slots are busy */
unsigned long *bitmap; /* 0 = free slot */
struct page *page;
/*
* We keep the vma's vm_start rather than a pointer to the vma
* itself. The probed process or a naughty kernel module could make
* the vma go away, and we must handle that reasonably gracefully.
*/
unsigned long vaddr; /* Page(s) of instruction slots */
};
static void uprobe_warn(struct task_struct *t, const char *msg)
{
pr_warn("uprobe: %s:%d failed to %s\n", t->comm, t->pid, msg);
}
/*
* valid_vma: Verify if the specified vma is an executable vma
* Relax restrictions while unregistering: vm_flags might have
* changed after breakpoint was inserted.
* - is_register: indicates if we are in register context.
* - Return 1 if the specified virtual address is in an
* executable vma.
*/
static bool valid_vma(struct vm_area_struct *vma, bool is_register)
{
vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
if (is_register)
flags |= VM_WRITE;
return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
}
static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
{
return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
}
static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
{
return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
}
/**
* is_swbp_insn - check if instruction is breakpoint instruction.
* @insn: instruction to be checked.
* Default implementation of is_swbp_insn
* Returns true if @insn is a breakpoint instruction.
*/
bool __weak is_swbp_insn(uprobe_opcode_t *insn)
{
return *insn == UPROBE_SWBP_INSN;
}
/**
* is_trap_insn - check if instruction is breakpoint instruction.
* @insn: instruction to be checked.
* Default implementation of is_trap_insn
* Returns true if @insn is a breakpoint instruction.
*
* This function is needed for the case where an architecture has multiple
* trap instructions (like powerpc).
*/
bool __weak is_trap_insn(uprobe_opcode_t *insn)
{
return is_swbp_insn(insn);
}
void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
{
void *kaddr = kmap_atomic(page);
memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
kunmap_atomic(kaddr);
}
static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
{
void *kaddr = kmap_atomic(page);
memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
kunmap_atomic(kaddr);
}
static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *insn,
int nbytes, void *data)
{
uprobe_opcode_t old_opcode;
bool is_swbp;
/*
* Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
* We do not check if it is any other 'trap variant' which could
* be conditional trap instruction such as the one powerpc supports.
*
* The logic is that we do not care if the underlying instruction
* is a trap variant; uprobes always wins over any other (gdb)
* breakpoint.
*/
uprobe_copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
is_swbp = is_swbp_insn(&old_opcode);
if (is_swbp_insn(insn)) {
if (is_swbp) /* register: already installed? */
return 0;
} else {
if (!is_swbp) /* unregister: was it changed by us? */
return 0;
}
return 1;
}
static struct delayed_uprobe *
delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
{
struct delayed_uprobe *du;
list_for_each_entry(du, &delayed_uprobe_list, list)
if (du->uprobe == uprobe && du->mm == mm)
return du;
return NULL;
}
static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
{
struct delayed_uprobe *du;
if (delayed_uprobe_check(uprobe, mm))
return 0;
du = kzalloc(sizeof(*du), GFP_KERNEL);
if (!du)
return -ENOMEM;
du->uprobe = uprobe;
du->mm = mm;
list_add(&du->list, &delayed_uprobe_list);
return 0;
}
static void delayed_uprobe_delete(struct delayed_uprobe *du)
{
if (WARN_ON(!du))
return;
list_del(&du->list);
kfree(du);
}
static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
{
struct list_head *pos, *q;
struct delayed_uprobe *du;
if (!uprobe && !mm)
return;
list_for_each_safe(pos, q, &delayed_uprobe_list) {
du = list_entry(pos, struct delayed_uprobe, list);
if (uprobe && du->uprobe != uprobe)
continue;
if (mm && du->mm != mm)
continue;
delayed_uprobe_delete(du);
}
}
static bool valid_ref_ctr_vma(struct uprobe *uprobe,
struct vm_area_struct *vma)
{
unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
return uprobe->ref_ctr_offset &&
vma->vm_file &&
file_inode(vma->vm_file) == uprobe->inode &&
(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
vma->vm_start <= vaddr &&
vma->vm_end > vaddr;
}
static struct vm_area_struct *
find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
{
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *tmp;
for_each_vma(vmi, tmp)
if (valid_ref_ctr_vma(uprobe, tmp))
return tmp;
return NULL;
}
static int
__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
{
void *kaddr;
struct page *page;
int ret;
short *ptr;
if (!vaddr || !d)
return -EINVAL;
ret = get_user_pages_remote(mm, vaddr, 1,
FOLL_WRITE, &page, NULL);
if (unlikely(ret <= 0)) {
/*
* We are asking for 1 page. If get_user_pages_remote() fails,
* it may return 0, in that case we have to return error.
*/
return ret == 0 ? -EBUSY : ret;
}
kaddr = kmap_atomic(page);
ptr = kaddr + (vaddr & ~PAGE_MASK);
if (unlikely(*ptr + d < 0)) {
pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
"curr val: %d, delta: %d\n", vaddr, *ptr, d);
ret = -EINVAL;
goto out;
}
*ptr += d;
ret = 0;
out:
kunmap_atomic(kaddr);
put_page(page);
return ret;
}
static void update_ref_ctr_warn(struct uprobe *uprobe,
struct mm_struct *mm, short d)
{
pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%p\n",
d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
(unsigned long long) uprobe->offset,
(unsigned long long) uprobe->ref_ctr_offset, mm);
}
static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
short d)
{
struct vm_area_struct *rc_vma;
unsigned long rc_vaddr;
int ret = 0;
rc_vma = find_ref_ctr_vma(uprobe, mm);
if (rc_vma) {
rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
ret = __update_ref_ctr(mm, rc_vaddr, d);
if (ret)
update_ref_ctr_warn(uprobe, mm, d);
if (d > 0)
return ret;
}
mutex_lock(&delayed_uprobe_lock);
if (d > 0)
ret = delayed_uprobe_add(uprobe, mm);
else
delayed_uprobe_remove(uprobe, mm);
mutex_unlock(&delayed_uprobe_lock);
return ret;
}
static bool orig_page_is_identical(struct vm_area_struct *vma,
unsigned long vaddr, struct page *page, bool *pmd_mappable)
{
const pgoff_t index = vaddr_to_offset(vma, vaddr) >> PAGE_SHIFT;
struct folio *orig_folio = filemap_get_folio(vma->vm_file->f_mapping,
index);
struct page *orig_page;
bool identical;
if (IS_ERR(orig_folio))
return false;
orig_page = folio_file_page(orig_folio, index);
*pmd_mappable = folio_test_pmd_mappable(orig_folio);
identical = folio_test_uptodate(orig_folio) &&
pages_identical(page, orig_page);
folio_put(orig_folio);
return identical;
}
static int __uprobe_write(struct vm_area_struct *vma,
struct folio_walk *fw, struct folio *folio,
unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes,
bool is_register)
{
const unsigned long vaddr = insn_vaddr & PAGE_MASK;
bool pmd_mappable;
/* For now, we'll only handle PTE-mapped folios. */
if (fw->level != FW_LEVEL_PTE)
return -EFAULT;
/*
* See can_follow_write_pte(): we'd actually prefer a writable PTE here,
* but the VMA might not be writable.
*/
if (!pte_write(fw->pte)) {
if (!PageAnonExclusive(fw->page))
return -EFAULT;
if (unlikely(userfaultfd_pte_wp(vma, fw->pte)))
return -EFAULT;
/* SOFTDIRTY is handled via pte_mkdirty() below. */
}
/*
* We'll temporarily unmap the page and flush the TLB, such that we can
* modify the page atomically.
*/
flush_cache_page(vma, vaddr, pte_pfn(fw->pte));
fw->pte = ptep_clear_flush(vma, vaddr, fw->ptep);
copy_to_page(fw->page, insn_vaddr, insn, nbytes);
/*
* When unregistering, we may only zap a PTE if uffd is disabled and
* there are no unexpected folio references ...
*/
if (is_register || userfaultfd_missing(vma) ||
(folio_ref_count(folio) != folio_expected_ref_count(folio) + 1))
goto remap;
/*
* ... and the mapped page is identical to the original page that
* would get faulted in on next access.
*/
if (!orig_page_is_identical(vma, vaddr, fw->page, &pmd_mappable))
goto remap;
dec_mm_counter(vma->vm_mm, MM_ANONPAGES);
folio_remove_rmap_pte(folio, fw->page, vma);
if (!folio_mapped(folio) && folio_test_swapcache(folio) &&
folio_trylock(folio)) {
folio_free_swap(folio);
folio_unlock(folio);
}
folio_put(folio);
return pmd_mappable;
remap:
/*
* Make sure that our copy_to_page() changes become visible before the
* set_pte_at() write.
*/
smp_wmb();
/* We modified the page. Make sure to mark the PTE dirty. */
set_pte_at(vma->vm_mm, vaddr, fw->ptep, pte_mkdirty(fw->pte));
return 0;
}
/*
* NOTE:
* Expect the breakpoint instruction to be the smallest size instruction for
* the architecture. If an arch has variable length instruction and the
* breakpoint instruction is not of the smallest length instruction
* supported by that architecture then we need to modify is_trap_at_addr and
* uprobe_write_opcode accordingly. This would never be a problem for archs
* that have fixed length instructions.
*
* uprobe_write_opcode - write the opcode at a given virtual address.
* @auprobe: arch specific probepoint information.
* @vma: the probed virtual memory area.
* @opcode_vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @opcode_vaddr.
*
* Called with mm->mmap_lock held for write.
* Return 0 (success) or a negative errno.
*/
int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
const unsigned long opcode_vaddr, uprobe_opcode_t opcode,
bool is_register)
{
return uprobe_write(auprobe, vma, opcode_vaddr, &opcode, UPROBE_SWBP_INSN_SIZE,
verify_opcode, is_register, true /* do_update_ref_ctr */, NULL);
}
int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
const unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes,
uprobe_write_verify_t verify, bool is_register, bool do_update_ref_ctr,
void *data)
{
const unsigned long vaddr = insn_vaddr & PAGE_MASK;
struct mm_struct *mm = vma->vm_mm;
struct uprobe *uprobe;
int ret, ref_ctr_updated = 0;
unsigned int gup_flags = FOLL_FORCE;
struct mmu_notifier_range range;
struct folio_walk fw;
struct folio *folio;
struct page *page;
uprobe = container_of(auprobe, struct uprobe, arch);
if (WARN_ON_ONCE(!is_cow_mapping(vma->vm_flags)))
return -EINVAL;
/*
* When registering, we have to break COW to get an exclusive anonymous
* page that we can safely modify. Use FOLL_WRITE to trigger a write
* fault if required. When unregistering, we might be lucky and the
* anon page is already gone. So defer write faults until really
* required. Use FOLL_SPLIT_PMD, because __uprobe_write()
* cannot deal with PMDs yet.
*/
if (is_register)
gup_flags |= FOLL_WRITE | FOLL_SPLIT_PMD;
retry:
ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, &page, NULL);
if (ret <= 0)
goto out;
folio = page_folio(page);
ret = verify(page, insn_vaddr, insn, nbytes, data);
if (ret <= 0) {
folio_put(folio);
goto out;
}
/* We are going to replace instruction, update ref_ctr. */
if (do_update_ref_ctr && !ref_ctr_updated && uprobe->ref_ctr_offset) {
ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
if (ret) {
folio_put(folio);
goto out;
}
ref_ctr_updated = 1;
}
ret = 0;
if (unlikely(!folio_test_anon(folio) || folio_is_zone_device(folio))) {
VM_WARN_ON_ONCE(is_register);
folio_put(folio);
goto out;
}
if (!is_register) {
/*
* In the common case, we'll be able to zap the page when
* unregistering. So trigger MMU notifiers now, as we won't
* be able to do it under PTL.
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
vaddr, vaddr + PAGE_SIZE);
mmu_notifier_invalidate_range_start(&range);
}
ret = -EAGAIN;
/* Walk the page tables again, to perform the actual update. */
if (folio_walk_start(&fw, vma, vaddr, 0)) {
if (fw.page == page)
ret = __uprobe_write(vma, &fw, folio, insn_vaddr, insn, nbytes, is_register);
folio_walk_end(&fw, vma);
}
if (!is_register)
mmu_notifier_invalidate_range_end(&range);
folio_put(folio);
switch (ret) {
case -EFAULT:
gup_flags |= FOLL_WRITE | FOLL_SPLIT_PMD;
fallthrough;
case -EAGAIN:
goto retry;
default:
break;
}
out:
/* Revert back reference counter if instruction update failed. */
if (do_update_ref_ctr && ret < 0 && ref_ctr_updated)
update_ref_ctr(uprobe, mm, is_register ? -1 : 1);
/* try collapse pmd for compound page */
if (ret > 0)
collapse_pte_mapped_thp(mm, vaddr, false);
return ret < 0 ? ret : 0;
}
/**
* set_swbp - store breakpoint at a given address.
* @auprobe: arch specific probepoint information.
* @vma: the probed virtual memory area.
* @vaddr: the virtual address to insert the opcode.
*
* For mm @mm, store the breakpoint instruction at @vaddr.
* Return 0 (success) or a negative errno.
*/
int __weak set_swbp(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
unsigned long vaddr)
{
return uprobe_write_opcode(auprobe, vma, vaddr, UPROBE_SWBP_INSN, true);
}
/**
* set_orig_insn - Restore the original instruction.
* @vma: the probed virtual memory area.
* @auprobe: arch specific probepoint information.
* @vaddr: the virtual address to insert the opcode.
*
* For mm @mm, restore the original opcode (opcode) at @vaddr.
* Return 0 (success) or a negative errno.
*/
int __weak set_orig_insn(struct arch_uprobe *auprobe,
struct vm_area_struct *vma, unsigned long vaddr)
{
return uprobe_write_opcode(auprobe, vma, vaddr,
*(uprobe_opcode_t *)&auprobe->insn, false);
}
/* uprobe should have guaranteed positive refcount */
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
refcount_inc(&uprobe->ref);
return uprobe;
}
/*
* uprobe should have guaranteed lifetime, which can be either of:
* - caller already has refcount taken (and wants an extra one);
* - uprobe is RCU protected and won't be freed until after grace period;
* - we are holding uprobes_treelock (for read or write, doesn't matter).
*/
static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
{
if (refcount_inc_not_zero(&uprobe->ref))
return uprobe;
return NULL;
}
static inline bool uprobe_is_active(struct uprobe *uprobe)
{
return !RB_EMPTY_NODE(&uprobe->rb_node);
}
static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu)
{
struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
kfree(uprobe);
}
static void uprobe_free_srcu(struct rcu_head *rcu)
{
struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace);
}
static void uprobe_free_deferred(struct work_struct *work)
{
struct uprobe *uprobe = container_of(work, struct uprobe, work);
write_lock(&uprobes_treelock);
if (uprobe_is_active(uprobe)) {
write_seqcount_begin(&uprobes_seqcount);
rb_erase(&uprobe->rb_node, &uprobes_tree);
write_seqcount_end(&uprobes_seqcount);
}
write_unlock(&uprobes_treelock);
/*
* If application munmap(exec_vma) before uprobe_unregister()
* gets called, we don't get a chance to remove uprobe from
* delayed_uprobe_list from remove_breakpoint(). Do it here.
*/
mutex_lock(&delayed_uprobe_lock);
delayed_uprobe_remove(uprobe, NULL);
mutex_unlock(&delayed_uprobe_lock);
/* start srcu -> rcu_tasks_trace -> kfree chain */
call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu);
}
static void put_uprobe(struct uprobe *uprobe)
{
if (!refcount_dec_and_test(&uprobe->ref))
return;
INIT_WORK(&uprobe->work, uprobe_free_deferred);
schedule_work(&uprobe->work);
}
/* Initialize hprobe as SRCU-protected "leased" uprobe */
static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx)
{
WARN_ON(!uprobe);
hprobe->state = HPROBE_LEASED;
hprobe->uprobe = uprobe;
hprobe->srcu_idx = srcu_idx;
}
/* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */
static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe)
{
hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE;
hprobe->uprobe = uprobe;
hprobe->srcu_idx = -1;
}
/*
* hprobe_consume() fetches hprobe's underlying uprobe and detects whether
* uprobe is SRCU protected or is refcounted. hprobe_consume() can be
* used only once for a given hprobe.
*
* Caller has to call hprobe_finalize() and pass previous hprobe_state, so
* that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever
* is appropriate.
*/
static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate)
{
*hstate = xchg(&hprobe->state, HPROBE_CONSUMED);
switch (*hstate) {
case HPROBE_LEASED:
case HPROBE_STABLE:
return hprobe->uprobe;
case HPROBE_GONE: /* uprobe is NULL, no SRCU */
case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */
return NULL;
default:
WARN(1, "hprobe invalid state %d", *hstate);
return NULL;
}
}
/*
* Reset hprobe state and, if hprobe was LEASED, release SRCU lock.
* hprobe_finalize() can only be used from current context after
* hprobe_consume() call (which determines uprobe and hstate value).
*/
static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate)
{
switch (hstate) {
case HPROBE_LEASED:
__srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
break;
case HPROBE_STABLE:
put_uprobe(hprobe->uprobe);
break;
case HPROBE_GONE:
case HPROBE_CONSUMED:
break;
default:
WARN(1, "hprobe invalid state %d", hstate);
break;
}
}
/*
* Attempt to switch (atomically) uprobe from being SRCU protected (LEASED)
* to refcounted (STABLE) state. Competes with hprobe_consume(); only one of
* them can win the race to perform SRCU unlocking. Whoever wins must perform
* SRCU unlock.
*
* Returns underlying valid uprobe or NULL, if there was no underlying uprobe
* to begin with or we failed to bump its refcount and it's going away.
*
* Returned non-NULL uprobe can be still safely used within an ongoing SRCU
* locked region. If `get` is true, it's guaranteed that non-NULL uprobe has
* an extra refcount for caller to assume and use. Otherwise, it's not
* guaranteed that returned uprobe has a positive refcount, so caller has to
* attempt try_get_uprobe(), if it needs to preserve uprobe beyond current
* SRCU lock region. See dup_utask().
*/
static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
{
enum hprobe_state hstate;
/*
* Caller should guarantee that return_instance is not going to be
* freed from under us. This can be achieved either through holding
* rcu_read_lock() or by owning return_instance in the first place.
*
* Underlying uprobe is itself protected from reuse by SRCU, so ensure
* SRCU lock is held properly.
*/
lockdep_assert(srcu_read_lock_held(&uretprobes_srcu));
hstate = READ_ONCE(hprobe->state);
switch (hstate) {
case HPROBE_STABLE:
/* uprobe has positive refcount, bump refcount, if necessary */
return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe;
case HPROBE_GONE:
/*
* SRCU was unlocked earlier and we didn't manage to take
* uprobe refcnt, so it's effectively NULL
*/
return NULL;
case HPROBE_CONSUMED:
/*
* uprobe was consumed, so it's effectively NULL as far as
* uretprobe processing logic is concerned
*/
return NULL;
case HPROBE_LEASED: {
struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe);
/*
* Try to switch hprobe state, guarding against
* hprobe_consume() or another hprobe_expire() racing with us.
* Note, if we failed to get uprobe refcount, we use special
* HPROBE_GONE state to signal that hprobe->uprobe shouldn't
* be used as it will be freed after SRCU is unlocked.
*/
if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) {
/* We won the race, we are the ones to unlock SRCU */
__srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
return get ? get_uprobe(uprobe) : uprobe;
}
/*
* We lost the race, undo refcount bump (if it ever happened),
* unless caller would like an extra refcount anyways.
*/
if (uprobe && !get)
put_uprobe(uprobe);
/*
* Even if hprobe_consume() or another hprobe_expire() wins
* the state update race and unlocks SRCU from under us, we
* still have a guarantee that underyling uprobe won't be
* freed due to ongoing caller's SRCU lock region, so we can
* return it regardless. Also, if `get` was true, we also have
* an extra ref for the caller to own. This is used in dup_utask().
*/
return uprobe;
}
default:
WARN(1, "unknown hprobe state %d", hstate);
return NULL;
}
}
static __always_inline
int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
const struct uprobe *r)
{
if (l_inode < r->inode)
return -1;
if (l_inode > r->inode)
return 1;
if (l_offset < r->offset)
return -1;
if (l_offset > r->offset)
return 1;
return 0;
}
#define __node_2_uprobe(node) \
rb_entry((node), struct uprobe, rb_node)
struct __uprobe_key {
struct inode *inode;
loff_t offset;
};
static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
{
const struct __uprobe_key *a = key;
return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
}
static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
{
struct uprobe *u = __node_2_uprobe(a);
return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
}
/*
* Assumes being inside RCU protected region.
* No refcount is taken on returned uprobe.
*/
static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
{
struct __uprobe_key key = {
.inode = inode,
.offset = offset,
};
struct rb_node *node;
unsigned int seq;
lockdep_assert(rcu_read_lock_trace_held());
do {
seq = read_seqcount_begin(&uprobes_seqcount);
node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
/*
* Lockless RB-tree lookups can result only in false negatives.
* If the element is found, it is correct and can be returned
* under RCU protection. If we find nothing, we need to
* validate that seqcount didn't change. If it did, we have to
* try again as we might have missed the element (false
* negative). If seqcount is unchanged, search truly failed.
*/
if (node)
return __node_2_uprobe(node);
} while (read_seqcount_retry(&uprobes_seqcount, seq));
return NULL;
}
/*
* Attempt to insert a new uprobe into uprobes_tree.
*
* If uprobe already exists (for given inode+offset), we just increment
* refcount of previously existing uprobe.
*
* If not, a provided new instance of uprobe is inserted into the tree (with
* assumed initial refcount == 1).
*
* In any case, we return a uprobe instance that ends up being in uprobes_tree.
* Caller has to clean up new uprobe instance, if it ended up not being
* inserted into the tree.
*
* We assume that uprobes_treelock is held for writing.
*/
static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
{
struct rb_node *node;
again:
node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
if (node) {
struct uprobe *u = __node_2_uprobe(node);
if (!try_get_uprobe(u)) {
rb_erase(node, &uprobes_tree);
RB_CLEAR_NODE(&u->rb_node);
goto again;
}
return u;
}
return uprobe;
}
/*
* Acquire uprobes_treelock and insert uprobe into uprobes_tree
* (or reuse existing one, see __insert_uprobe() comments above).
*/
static struct uprobe *insert_uprobe(struct uprobe *uprobe)
{
struct uprobe *u;
write_lock(&uprobes_treelock);
write_seqcount_begin(&uprobes_seqcount);
u = __insert_uprobe(uprobe);
write_seqcount_end(&uprobes_seqcount);
write_unlock(&uprobes_treelock);
return u;
}
static void
ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
{
pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
(unsigned long long) cur_uprobe->ref_ctr_offset,
(unsigned long long) uprobe->ref_ctr_offset);
}
static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
loff_t ref_ctr_offset)
{
struct uprobe *uprobe, *cur_uprobe;
uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
if (!uprobe)
return ERR_PTR(-ENOMEM);
uprobe->inode = inode;
uprobe->offset = offset;
uprobe->ref_ctr_offset = ref_ctr_offset;
INIT_LIST_HEAD(&uprobe->consumers);
init_rwsem(&uprobe->register_rwsem);
init_rwsem(&uprobe->consumer_rwsem);
RB_CLEAR_NODE(&uprobe->rb_node);
refcount_set(&uprobe->ref, 1);
/* add to uprobes_tree, sorted on inode:offset */
cur_uprobe = insert_uprobe(uprobe);
/* a uprobe exists for this inode:offset combination */
if (cur_uprobe != uprobe) {
if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
ref_ctr_mismatch_warn(cur_uprobe, uprobe);
put_uprobe(cur_uprobe);
kfree(uprobe);
return ERR_PTR(-EINVAL);
}
kfree(uprobe);
uprobe = cur_uprobe;
}
return uprobe;
}
static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
static atomic64_t id;
down_write(&uprobe->consumer_rwsem);
list_add_rcu(&uc->cons_node, &uprobe->consumers);
uc->id = (__u64) atomic64_inc_return(&id);
up_write(&uprobe->consumer_rwsem);
}
/*
* For uprobe @uprobe, delete the consumer @uc.
* Should never be called with consumer that's not part of @uprobe->consumers.
*/
static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
down_write(&uprobe->consumer_rwsem);
list_del_rcu(&uc->cons_node);
up_write(&uprobe->consumer_rwsem);
}
static int __copy_insn(struct address_space *mapping, struct file *filp,
void *insn, int nbytes, loff_t offset)
{
struct page *page;
/*
* Ensure that the page that has the original instruction is populated
* and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
* see uprobe_register().
*/
if (mapping->a_ops->read_folio)
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
else
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
uprobe_copy_from_page(page, offset, insn, nbytes);
put_page(page);
return 0;
}
static int copy_insn(struct uprobe *uprobe, struct file *filp)
{
struct address_space *mapping = uprobe->inode->i_mapping;
loff_t offs = uprobe->offset;
void *insn = &uprobe->arch.insn;
int size = sizeof(uprobe->arch.insn);
int len, err = -EIO;
/* Copy only available bytes, -EIO if nothing was read */
do {
if (offs >= i_size_read(uprobe->inode))
break;
len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
err = __copy_insn(mapping, filp, insn, len, offs);
if (err)
break;
insn += len;
offs += len;
size -= len;
} while (size);
return err;
}
static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
struct mm_struct *mm, unsigned long vaddr)
{
int ret = 0;
if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
return ret;
/* TODO: move this into _register, until then we abuse this sem. */
down_write(&uprobe->consumer_rwsem);
if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
goto out;
ret = copy_insn(uprobe, file);
if (ret)
goto out;
ret = -ENOTSUPP;
if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
goto out;
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
if (ret)
goto out;
smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
out:
up_write(&uprobe->consumer_rwsem);
return ret;
}
static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
{
return !uc->filter || uc->filter(uc, mm);
}
static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
{
struct uprobe_consumer *uc;
bool ret = false;
down_read(&uprobe->consumer_rwsem);
list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
ret = consumer_filter(uc, mm);
if (ret)
break;
}
up_read(&uprobe->consumer_rwsem);
return ret;
}
static int install_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma,
unsigned long vaddr)
{
struct mm_struct *mm = vma->vm_mm;
bool first_uprobe;
int ret;
ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
if (ret)
return ret;
/*
* set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
* the task can hit this breakpoint right after __replace_page().
*/
first_uprobe = !mm_flags_test(MMF_HAS_UPROBES, mm);
if (first_uprobe)
mm_flags_set(MMF_HAS_UPROBES, mm);
ret = set_swbp(&uprobe->arch, vma, vaddr);
if (!ret)
mm_flags_clear(MMF_RECALC_UPROBES, mm);
else if (first_uprobe)
mm_flags_clear(MMF_HAS_UPROBES, mm);
return ret;
}
static int remove_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma,
unsigned long vaddr)
{
struct mm_struct *mm = vma->vm_mm;
mm_flags_set(MMF_RECALC_UPROBES, mm);
return set_orig_insn(&uprobe->arch, vma, vaddr);
}
struct map_info {
struct map_info *next;
struct mm_struct *mm;
unsigned long vaddr;
};
static inline struct map_info *free_map_info(struct map_info *info)
{
struct map_info *next = info->next;
kfree(info);
return next;
}
static struct map_info *
build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
{
unsigned long pgoff = offset >> PAGE_SHIFT;
struct vm_area_struct *vma;
struct map_info *curr = NULL;
struct map_info *prev = NULL;
struct map_info *info;
int more = 0;
again:
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
if (!valid_vma(vma, is_register))
continue;
if (!prev && !more) {
/*
* Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
* reclaim. This is optimistic, no harm done if it fails.
*/
prev = kmalloc(sizeof(struct map_info),
GFP_NOWAIT | __GFP_NOMEMALLOC);
if (prev)
prev->next = NULL;
}
if (!prev) {
more++;
continue;
}
if (!mmget_not_zero(vma->vm_mm))
continue;
info = prev;
prev = prev->next;
info->next = curr;
curr = info;
info->mm = vma->vm_mm;
info->vaddr = offset_to_vaddr(vma, offset);
}
i_mmap_unlock_read(mapping);
if (!more)
goto out;
prev = curr;
while (curr) {
mmput(curr->mm);
curr = curr->next;
}
do {
info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
if (!info) {
curr = ERR_PTR(-ENOMEM);
goto out;
}
info->next = prev;
prev = info;
} while (--more);
goto again;
out:
while (prev)
prev = free_map_info(prev);
return curr;
}
static int
register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
{
bool is_register = !!new;
struct map_info *info;
int err = 0;
percpu_down_write(&dup_mmap_sem);
info = build_map_info(uprobe->inode->i_mapping,
uprobe->offset, is_register);
if (IS_ERR(info)) {
err = PTR_ERR(info);
goto out;
}
while (info) {
struct mm_struct *mm = info->mm;
struct vm_area_struct *vma;
if (err && is_register)
goto free;
/*
* We take mmap_lock for writing to avoid the race with
* find_active_uprobe_rcu() which takes mmap_lock for reading.
* Thus this install_breakpoint() can not make
* is_trap_at_addr() true right after find_uprobe_rcu()
* returns NULL in find_active_uprobe_rcu().
*/
mmap_write_lock(mm);
if (check_stable_address_space(mm))
goto unlock;
vma = find_vma(mm, info->vaddr);
if (!vma || !valid_vma(vma, is_register) ||
file_inode(vma->vm_file) != uprobe->inode)
goto unlock;
if (vma->vm_start > info->vaddr ||
vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
goto unlock;
if (is_register) {
/* consult only the "caller", new consumer. */
if (consumer_filter(new, mm))
err = install_breakpoint(uprobe, vma, info->vaddr);
} else if (mm_flags_test(MMF_HAS_UPROBES, mm)) {
if (!filter_chain(uprobe, mm))
err |= remove_breakpoint(uprobe, vma, info->vaddr);
}
unlock:
mmap_write_unlock(mm);
free:
mmput(mm);
info = free_map_info(info);
}
out:
percpu_up_write(&dup_mmap_sem);
return err;
}
/**
* uprobe_unregister_nosync - unregister an already registered probe.
* @uprobe: uprobe to remove
* @uc: identify which probe if multiple probes are colocated.
*/
void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
int err;
down_write(&uprobe->register_rwsem);
consumer_del(uprobe, uc);
err = register_for_each_vma(uprobe, NULL);
up_write(&uprobe->register_rwsem);
/* TODO : cant unregister? schedule a worker thread */
if (unlikely(err)) {
uprobe_warn(current, "unregister, leaking uprobe");
return;
}
put_uprobe(uprobe);
}
EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
void uprobe_unregister_sync(void)
{
/*
* Now that handler_chain() and handle_uretprobe_chain() iterate over
* uprobe->consumers list under RCU protection without holding
* uprobe->register_rwsem, we need to wait for RCU grace period to
* make sure that we can't call into just unregistered
* uprobe_consumer's callbacks anymore. If we don't do that, fast and
* unlucky enough caller can free consumer's memory and cause
* handler_chain() or handle_uretprobe_chain() to do an use-after-free.
*/
synchronize_rcu_tasks_trace();
synchronize_srcu(&uretprobes_srcu);
}
EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
/**
* uprobe_register - register a probe
* @inode: the file in which the probe has to be placed.
* @offset: offset from the start of the file.
* @ref_ctr_offset: offset of SDT marker / reference counter
* @uc: information on howto handle the probe..
*
* Apart from the access refcount, uprobe_register() takes a creation
* refcount (thro alloc_uprobe) if and only if this @uprobe is getting
* inserted into the rbtree (i.e first consumer for a @inode:@offset
* tuple). Creation refcount stops uprobe_unregister from freeing the
* @uprobe even before the register operation is complete. Creation
* refcount is released when the last @uc for the @uprobe
* unregisters. Caller of uprobe_register() is required to keep @inode
* (and the containing mount) referenced.
*
* Return: pointer to the new uprobe on success or an ERR_PTR on failure.
*/
struct uprobe *uprobe_register(struct inode *inode,
loff_t offset, loff_t ref_ctr_offset,
struct uprobe_consumer *uc)
{
struct uprobe *uprobe;
int ret;
/* Uprobe must have at least one set consumer */
if (!uc->handler && !uc->ret_handler)
return ERR_PTR(-EINVAL);
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
if (!inode->i_mapping->a_ops->read_folio &&
!shmem_mapping(inode->i_mapping))
return ERR_PTR(-EIO);
/* Racy, just to catch the obvious mistakes */
if (offset > i_size_read(inode))
return ERR_PTR(-EINVAL);
/*
* This ensures that uprobe_copy_from_page(), copy_to_page() and
* __update_ref_ctr() can't cross page boundary.
*/
if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
return ERR_PTR(-EINVAL);
if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
return ERR_PTR(-EINVAL);
uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
if (IS_ERR(uprobe))
return uprobe;
down_write(&uprobe->register_rwsem);
consumer_add(uprobe, uc);
ret = register_for_each_vma(uprobe, uc);
up_write(&uprobe->register_rwsem);
if (ret) {
uprobe_unregister_nosync(uprobe, uc);
/*
* Registration might have partially succeeded, so we can have
* this consumer being called right at this time. We need to
* sync here. It's ok, it's unlikely slow path.
*/
uprobe_unregister_sync();
return ERR_PTR(ret);
}
return uprobe;
}
EXPORT_SYMBOL_GPL(uprobe_register);
/**
* uprobe_apply - add or remove the breakpoints according to @uc->filter
* @uprobe: uprobe which "owns" the breakpoint
* @uc: consumer which wants to add more or remove some breakpoints
* @add: add or remove the breakpoints
* Return: 0 on success or negative error code.
*/
int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
{
struct uprobe_consumer *con;
int ret = -ENOENT;
down_write(&uprobe->register_rwsem);
rcu_read_lock_trace();
list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
if (con == uc) {
ret = register_for_each_vma(uprobe, add ? uc : NULL);
break;
}
}
rcu_read_unlock_trace();
up_write(&uprobe->register_rwsem);
return ret;
}
static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
{
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
int err = 0;
mmap_write_lock(mm);
for_each_vma(vmi, vma) {
unsigned long vaddr;
loff_t offset;
if (!valid_vma(vma, false) ||
file_inode(vma->vm_file) != uprobe->inode)
continue;
offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
if (uprobe->offset < offset ||
uprobe->offset >= offset + vma->vm_end - vma->vm_start)
continue;
vaddr = offset_to_vaddr(vma, uprobe->offset);
err |= remove_breakpoint(uprobe, vma, vaddr);
}
mmap_write_unlock(mm);
return err;
}
static struct rb_node *
find_node_in_range(struct inode *inode, loff_t min, loff_t max)
{
struct rb_node *n = uprobes_tree.rb_node;
while (n) {
struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
if (inode < u->inode) {
n = n->rb_left;
} else if (inode > u->inode) {
n = n->rb_right;
} else {
if (max < u->offset)
n = n->rb_left;
else if (min > u->offset)
n = n->rb_right;
else
break;
}
}
return n;
}
/*
* For a given range in vma, build a list of probes that need to be inserted.
*/
static void build_probe_list(struct inode *inode,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct list_head *head)
{
loff_t min, max;
struct rb_node *n, *t;
struct uprobe *u;
INIT_LIST_HEAD(head);
min = vaddr_to_offset(vma, start);
max = min + (end - start) - 1;
read_lock(&uprobes_treelock);
n = find_node_in_range(inode, min, max);
if (n) {
for (t = n; t; t = rb_prev(t)) {
u = rb_entry(t, struct uprobe, rb_node);
if (u->inode != inode || u->offset < min)
break;
/* if uprobe went away, it's safe to ignore it */
if (try_get_uprobe(u))
list_add(&u->pending_list, head);
}
for (t = n; (t = rb_next(t)); ) {
u = rb_entry(t, struct uprobe, rb_node);
if (u->inode != inode || u->offset > max)
break;
/* if uprobe went away, it's safe to ignore it */
if (try_get_uprobe(u))
list_add(&u->pending_list, head);
}
}
read_unlock(&uprobes_treelock);
}
/* @vma contains reference counter, not the probed instruction. */
static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
{
struct list_head *pos, *q;
struct delayed_uprobe *du;
unsigned long vaddr;
int ret = 0, err = 0;
mutex_lock(&delayed_uprobe_lock);
list_for_each_safe(pos, q, &delayed_uprobe_list) {
du = list_entry(pos, struct delayed_uprobe, list);
if (du->mm != vma->vm_mm ||
!valid_ref_ctr_vma(du->uprobe, vma))
continue;
vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
if (ret) {
update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
if (!err)
err = ret;
}
delayed_uprobe_delete(du);
}
mutex_unlock(&delayed_uprobe_lock);
return err;
}
/*
* Called from mmap_region/vma_merge with mm->mmap_lock acquired.
*
* Currently we ignore all errors and always return 0, the callers
* can't handle the failure anyway.
*/
int uprobe_mmap(struct vm_area_struct *vma)
{
struct list_head tmp_list;
struct uprobe *uprobe, *u;
struct inode *inode;
if (no_uprobe_events())
return 0;
if (vma->vm_file &&
(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm))
delayed_ref_ctr_inc(vma);
if (!valid_vma(vma, true))
return 0;
inode = file_inode(vma->vm_file);
if (!inode)
return 0;
mutex_lock(uprobes_mmap_hash(inode));
build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
/*
* We can race with uprobe_unregister(), this uprobe can be already
* removed. But in this case filter_chain() must return false, all
* consumers have gone away.
*/
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
if (!fatal_signal_pending(current) &&
filter_chain(uprobe, vma->vm_mm)) {
unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
install_breakpoint(uprobe, vma, vaddr);
}
put_uprobe(uprobe);
}
mutex_unlock(uprobes_mmap_hash(inode));
return 0;
}
static bool
vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
loff_t min, max;
struct inode *inode;
struct rb_node *n;
inode = file_inode(vma->vm_file);
min = vaddr_to_offset(vma, start);
max = min + (end - start) - 1;
read_lock(&uprobes_treelock);
n = find_node_in_range(inode, min, max);
read_unlock(&uprobes_treelock);
return !!n;
}
/*
* Called in context of a munmap of a vma.
*/
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (no_uprobe_events() || !valid_vma(vma, false))
return;
if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
return;
if (!mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm) ||
mm_flags_test(MMF_RECALC_UPROBES, vma->vm_mm))
return;
if (vma_has_uprobes(vma, start, end))
mm_flags_set(MMF_RECALC_UPROBES, vma->vm_mm);
}
static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
vmf->page = area->page;
get_page(vmf->page);
return 0;
}
static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
return -EPERM;
}
static const struct vm_special_mapping xol_mapping = {
.name = "[uprobes]",
.fault = xol_fault,
.mremap = xol_mremap,
};
/* Slot allocation for XOL */
static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
{
struct vm_area_struct *vma;
int ret;
if (mmap_write_lock_killable(mm))
return -EINTR;
if (mm->uprobes_state.xol_area) {
ret = -EALREADY;
goto fail;
}
if (!area->vaddr) {
/* Try to map as high as possible, this is only a hint. */
area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(area->vaddr)) {
ret = area->vaddr;
goto fail;
}
}
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO|
VM_SEALED_SYSMAP,
&xol_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
}
ret = 0;
/* pairs with get_xol_area() */
smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
fail:
mmap_write_unlock(mm);
return ret;
}
void * __weak arch_uretprobe_trampoline(unsigned long *psize)
{
static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
*psize = UPROBE_SWBP_INSN_SIZE;
return &insn;
}
static struct xol_area *__create_xol_area(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
unsigned long insns_size;
struct xol_area *area;
void *insns;
area = kzalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area))
goto out;
area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
GFP_KERNEL);
if (!area->bitmap)
goto free_area;
area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
if (!area->page)
goto free_bitmap;
area->vaddr = vaddr;
init_waitqueue_head(&area->wq);
/* Reserve the 1st slot for get_trampoline_vaddr() */
set_bit(0, area->bitmap);
insns = arch_uretprobe_trampoline(&insns_size);
arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
if (!xol_add_vma(mm, area))
return area;
__free_page(area->page);
free_bitmap:
kfree(area->bitmap);
free_area:
kfree(area);
out:
return NULL;
}
/*
* get_xol_area - Allocate process's xol_area if necessary.
* This area will be used for storing instructions for execution out of line.
*
* Returns the allocated area or NULL.
*/
static struct xol_area *get_xol_area(void)
{
struct mm_struct *mm = current->mm;
struct xol_area *area;
if (!mm->uprobes_state.xol_area)
__create_xol_area(0);
/* Pairs with xol_add_vma() smp_store_release() */
area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
return area;
}
void __weak arch_uprobe_clear_state(struct mm_struct *mm)
{
}
void __weak arch_uprobe_init_state(struct mm_struct *mm)
{
}
/*
* uprobe_clear_state - Free the area allocated for slots.
*/
void uprobe_clear_state(struct mm_struct *mm)
{
struct xol_area *area = mm->uprobes_state.xol_area;
mutex_lock(&delayed_uprobe_lock);
delayed_uprobe_remove(NULL, mm);
mutex_unlock(&delayed_uprobe_lock);
arch_uprobe_clear_state(mm);
if (!area)
return;
put_page(area->page);
kfree(area->bitmap);
kfree(area);
}
void uprobe_start_dup_mmap(void)
{ percpu_down_read(&dup_mmap_sem);}
void uprobe_end_dup_mmap(void)
{ percpu_up_read(&dup_mmap_sem);}
void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
{
if (mm_flags_test(MMF_HAS_UPROBES, oldmm)) {
mm_flags_set(MMF_HAS_UPROBES, newmm);
/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
mm_flags_set(MMF_RECALC_UPROBES, newmm);
}
}
static unsigned long xol_get_slot_nr(struct xol_area *area)
{
unsigned long slot_nr;
slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
if (slot_nr < UINSNS_PER_PAGE) {
if (!test_and_set_bit(slot_nr, area->bitmap))
return slot_nr;
}
return UINSNS_PER_PAGE;
}
/*
* xol_get_insn_slot - allocate a slot for xol.
*/
static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask)
{
struct xol_area *area = get_xol_area();
unsigned long slot_nr;
if (!area)
return false;
wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE);
utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES;
arch_uprobe_copy_ixol(area->page, utask->xol_vaddr,
&uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
return true;
}
/*
* xol_free_insn_slot - free the slot allocated by xol_get_insn_slot()
*/
static void xol_free_insn_slot(struct uprobe_task *utask)
{
struct xol_area *area = current->mm->uprobes_state.xol_area;
unsigned long offset = utask->xol_vaddr - area->vaddr;
unsigned int slot_nr;
utask->xol_vaddr = 0;
/* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */
if (WARN_ON_ONCE(offset >= PAGE_SIZE))
return;
slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
clear_bit(slot_nr, area->bitmap);
smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
if (waitqueue_active(&area->wq))
wake_up(&area->wq);
}
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
/* Initialize the slot */
copy_to_page(page, vaddr, src, len);
/*
* We probably need flush_icache_user_page() but it needs vma.
* This should work on most of architectures by default. If
* architecture needs to do something different it can define
* its own version of the function.
*/
flush_dcache_page(page);
}
/**
* uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
* @regs: Reflects the saved state of the task after it has hit a breakpoint
* instruction.
* Return the address of the breakpoint instruction.
*/
unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
}
unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
if (unlikely(utask && utask->active_uprobe))
return utask->vaddr;
return instruction_pointer(regs);
}
static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri)
{
ri->cons_cnt = 0;
ri->next = utask->ri_pool;
utask->ri_pool = ri;
}
static struct return_instance *ri_pool_pop(struct uprobe_task *utask)
{
struct return_instance *ri = utask->ri_pool;
if (likely(ri))
utask->ri_pool = ri->next;
return ri;
}
static void ri_free(struct return_instance *ri)
{
kfree(ri->extra_consumers);
kfree_rcu(ri, rcu);
}
static void free_ret_instance(struct uprobe_task *utask,
struct return_instance *ri, bool cleanup_hprobe)
{
unsigned seq;
if (cleanup_hprobe) {
enum hprobe_state hstate;
(void)hprobe_consume(&ri->hprobe, &hstate);
hprobe_finalize(&ri->hprobe, hstate);
}
/*
* At this point return_instance is unlinked from utask's
* return_instances list and this has become visible to ri_timer().
* If seqcount now indicates that ri_timer's return instance
* processing loop isn't active, we can return ri into the pool of
* to-be-reused return instances for future uretprobes. If ri_timer()
* happens to be running right now, though, we fallback to safety and
* just perform RCU-delated freeing of ri.
* Admittedly, this is a rather simple use of seqcount, but it nicely
* abstracts away all the necessary memory barriers, so we use
* a well-supported kernel primitive here.
*/
if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
/* immediate reuse of ri without RCU GP is OK */
ri_pool_push(utask, ri);
} else {
/* we might be racing with ri_timer(), so play it safe */
ri_free(ri);
}
}
/*
* Called with no locks held.
* Called in context of an exiting or an exec-ing thread.
*/
void uprobe_free_utask(struct task_struct *t)
{
struct uprobe_task *utask = t->utask;
struct return_instance *ri, *ri_next;
if (!utask)
return;
t->utask = NULL;
WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr);
timer_delete_sync(&utask->ri_timer);
ri = utask->return_instances;
while (ri) {
ri_next = ri->next;
free_ret_instance(utask, ri, true /* cleanup_hprobe */);
ri = ri_next;
}
/* free_ret_instance() above might add to ri_pool, so this loop should come last */
ri = utask->ri_pool;
while (ri) {
ri_next = ri->next;
ri_free(ri);
ri = ri_next;
}
kfree(utask);
}
#define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */
#define for_each_ret_instance_rcu(pos, head) \
for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next))
static void ri_timer(struct timer_list *timer)
{
struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer);
struct return_instance *ri;
/* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */
guard(srcu)(&uretprobes_srcu);
/* RCU protects return_instance from freeing. */
guard(rcu)();
/*
* See free_ret_instance() for notes on seqcount use.
* We also employ raw API variants to avoid lockdep false-positive
* warning complaining about enabled preemption. The timer can only be
* invoked once for a uprobe_task. Therefore there can only be one
* writer. The reader does not require an even sequence count to make
* progress, so it is OK to remain preemptible on PREEMPT_RT.
*/
raw_write_seqcount_begin(&utask->ri_seqcount);
for_each_ret_instance_rcu(ri, utask->return_instances)
hprobe_expire(&ri->hprobe, false);
raw_write_seqcount_end(&utask->ri_seqcount);
}
static struct uprobe_task *alloc_utask(void)
{
struct uprobe_task *utask;
utask = kzalloc(sizeof(*utask), GFP_KERNEL);
if (!utask)
return NULL;
timer_setup(&utask->ri_timer, ri_timer, 0);
seqcount_init(&utask->ri_seqcount);
return utask;
}
/*
* Allocate a uprobe_task object for the task if necessary.
* Called when the thread hits a breakpoint.
*
* Returns:
* - pointer to new uprobe_task on success
* - NULL otherwise
*/
static struct uprobe_task *get_utask(void)
{
if (!current->utask)
current->utask = alloc_utask();
return current->utask;
}
static struct return_instance *alloc_return_instance(struct uprobe_task *utask)
{
struct return_instance *ri;
ri = ri_pool_pop(utask);
if (ri)
return ri;
ri = kzalloc(sizeof(*ri), GFP_KERNEL);
if (!ri)
return ZERO_SIZE_PTR;
return ri;
}
static struct return_instance *dup_return_instance(struct return_instance *old)
{
struct return_instance *ri;
ri = kmemdup(old, sizeof(*ri), GFP_KERNEL);
if (!ri)
return NULL;
if (unlikely(old->cons_cnt > 1)) { ri->extra_consumers = kmemdup(old->extra_consumers,
sizeof(ri->extra_consumers[0]) * (old->cons_cnt - 1),
GFP_KERNEL);
if (!ri->extra_consumers) {
kfree(ri);
return NULL;
}
}
return ri;
}
static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
{
struct uprobe_task *n_utask;
struct return_instance **p, *o, *n;
struct uprobe *uprobe;
n_utask = alloc_utask();
if (!n_utask)
return -ENOMEM;
t->utask = n_utask;
/* protect uprobes from freeing, we'll need try_get_uprobe() them */
guard(srcu)(&uretprobes_srcu);
p = &n_utask->return_instances;
for (o = o_utask->return_instances; o; o = o->next) { n = dup_return_instance(o);
if (!n)
return -ENOMEM;
/* if uprobe is non-NULL, we'll have an extra refcount for uprobe */
uprobe = hprobe_expire(&o->hprobe, true);
/*
* New utask will have stable properly refcounted uprobe or
* NULL. Even if we failed to get refcounted uprobe, we still
* need to preserve full set of return_instances for proper
* uretprobe handling and nesting in forked task.
*/
hprobe_init_stable(&n->hprobe, uprobe);
n->next = NULL;
rcu_assign_pointer(*p, n);
p = &n->next;
n_utask->depth++;
}
return 0;
}
static void dup_xol_work(struct callback_head *work)
{
if (current->flags & PF_EXITING)
return;
if (!__create_xol_area(current->utask->dup_xol_addr) &&
!fatal_signal_pending(current))
uprobe_warn(current, "dup xol area");
}
/*
* Called in context of a new clone/fork from copy_process.
*/
void uprobe_copy_process(struct task_struct *t, u64 flags)
{
struct uprobe_task *utask = current->utask;
struct mm_struct *mm = current->mm;
struct xol_area *area;
t->utask = NULL;
if (!utask || !utask->return_instances)
return;
if (mm == t->mm && !(flags & CLONE_VFORK))
return;
if (dup_utask(t, utask)) return uprobe_warn(t, "dup ret instances");
/* The task can fork() after dup_xol_work() fails */
area = mm->uprobes_state.xol_area;
if (!area)
return uprobe_warn(t, "dup xol area");
if (mm == t->mm)
return;
t->utask->dup_xol_addr = area->vaddr;
init_task_work(&t->utask->dup_xol_work, dup_xol_work);
task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
}
/*
* Current area->vaddr notion assume the trampoline address is always
* equal area->vaddr.
*
* Returns -1 in case the xol_area is not allocated.
*/
unsigned long uprobe_get_trampoline_vaddr(void)
{
unsigned long trampoline_vaddr = UPROBE_NO_TRAMPOLINE_VADDR;
struct xol_area *area;
/* Pairs with xol_add_vma() smp_store_release() */
area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
if (area)
trampoline_vaddr = area->vaddr;
return trampoline_vaddr;
}
static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
struct pt_regs *regs)
{
struct return_instance *ri = utask->return_instances, *ri_next;
enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
ri_next = ri->next;
rcu_assign_pointer(utask->return_instances, ri_next);
utask->depth--;
free_ret_instance(utask, ri, true /* cleanup_hprobe */);
ri = ri_next;
}
}
static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
struct return_instance *ri)
{
struct uprobe_task *utask = current->utask;
unsigned long orig_ret_vaddr, trampoline_vaddr;
bool chained;
int srcu_idx;
if (!get_xol_area())
goto free;
if (utask->depth >= MAX_URETPROBE_DEPTH) {
printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
" nestedness limit pid/tgid=%d/%d\n",
current->pid, current->tgid);
goto free;
}
trampoline_vaddr = uprobe_get_trampoline_vaddr();
orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
if (orig_ret_vaddr == -1)
goto free;
/* drop the entries invalidated by longjmp() */
chained = (orig_ret_vaddr == trampoline_vaddr);
cleanup_return_instances(utask, chained, regs);
/*
* We don't want to keep trampoline address in stack, rather keep the
* original return address of first caller thru all the consequent
* instances. This also makes breakpoint unwrapping easier.
*/
if (chained) {
if (!utask->return_instances) {
/*
* This situation is not possible. Likely we have an
* attack from user-space.
*/
uprobe_warn(current, "handle tail call");
goto free;
}
orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
}
/* __srcu_read_lock() because SRCU lock survives switch to user space */
srcu_idx = __srcu_read_lock(&uretprobes_srcu);
ri->func = instruction_pointer(regs);
ri->stack = user_stack_pointer(regs);
ri->orig_ret_vaddr = orig_ret_vaddr;
ri->chained = chained;
utask->depth++;
hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx);
ri->next = utask->return_instances;
rcu_assign_pointer(utask->return_instances, ri);
mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD);
return;
free:
ri_free(ri);
}
/* Prepare to single-step probed instruction out of line. */
static int
pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
{
struct uprobe_task *utask = current->utask;
int err;
if (!try_get_uprobe(uprobe))
return -EINVAL;
if (!xol_get_insn_slot(uprobe, utask)) {
err = -ENOMEM;
goto err_out;
}
utask->vaddr = bp_vaddr;
err = arch_uprobe_pre_xol(&uprobe->arch, regs);
if (unlikely(err)) {
xol_free_insn_slot(utask);
goto err_out;
}
utask->active_uprobe = uprobe;
utask->state = UTASK_SSTEP;
return 0;
err_out:
put_uprobe(uprobe);
return err;
}
/*
* If we are singlestepping, then ensure this thread is not connected to
* non-fatal signals until completion of singlestep. When xol insn itself
* triggers the signal, restart the original insn even if the task is
* already SIGKILL'ed (since coredump should report the correct ip). This
* is even more important if the task has a handler for SIGSEGV/etc, The
* _same_ instruction should be repeated again after return from the signal
* handler, and SSTEP can never finish in this case.
*/
bool uprobe_deny_signal(void)
{
struct task_struct *t = current;
struct uprobe_task *utask = t->utask;
if (likely(!utask || !utask->active_uprobe))
return false;
WARN_ON_ONCE(utask->state != UTASK_SSTEP);
if (task_sigpending(t)) {
utask->signal_denied = true;
clear_tsk_thread_flag(t, TIF_SIGPENDING);
if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
utask->state = UTASK_SSTEP_TRAPPED;
set_tsk_thread_flag(t, TIF_UPROBE);
}
}
return true;
}
static void mmf_recalc_uprobes(struct mm_struct *mm)
{
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
for_each_vma(vmi, vma) {
if (!valid_vma(vma, false))
continue;
/*
* This is not strictly accurate, we can race with
* uprobe_unregister() and see the already removed
* uprobe if delete_uprobe() was not yet called.
* Or this uprobe can be filtered out.
*/
if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
return;
}
mm_flags_clear(MMF_HAS_UPROBES, mm);
}
static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
{
struct page *page;
uprobe_opcode_t opcode;
int result;
if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
return -EINVAL;
pagefault_disable();
result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
pagefault_enable();
if (likely(result == 0))
goto out;
result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
if (result < 0)
return result;
uprobe_copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
put_page(page);
out:
/* This needs to return true for any variant of the trap insn */
return is_trap_insn(&opcode);
}
static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr)
{
struct mm_struct *mm = current->mm;
struct uprobe *uprobe = NULL;
struct vm_area_struct *vma;
struct file *vm_file;
loff_t offset;
unsigned int seq;
guard(rcu)();
if (!mmap_lock_speculate_try_begin(mm, &seq))
return NULL;
vma = vma_lookup(mm, bp_vaddr);
if (!vma)
return NULL;
/*
* vm_file memory can be reused for another instance of struct file,
* but can't be freed from under us, so it's safe to read fields from
* it, even if the values are some garbage values; ultimately
* find_uprobe_rcu() + mmap_lock_speculation_end() check will ensure
* that whatever we speculatively found is correct
*/
vm_file = READ_ONCE(vma->vm_file);
if (!vm_file)
return NULL;
offset = (loff_t)(vma->vm_pgoff << PAGE_SHIFT) + (bp_vaddr - vma->vm_start);
uprobe = find_uprobe_rcu(vm_file->f_inode, offset);
if (!uprobe)
return NULL;
/* now double check that nothing about MM changed */
if (mmap_lock_speculate_retry(mm, seq))
return NULL;
return uprobe;
}
/* assumes being inside RCU protected region */
static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
{
struct mm_struct *mm = current->mm;
struct uprobe *uprobe = NULL;
struct vm_area_struct *vma;
uprobe = find_active_uprobe_speculative(bp_vaddr);
if (uprobe)
return uprobe;
mmap_read_lock(mm);
vma = vma_lookup(mm, bp_vaddr);
if (vma) {
if (vma->vm_file) {
struct inode *inode = file_inode(vma->vm_file);
loff_t offset = vaddr_to_offset(vma, bp_vaddr);
uprobe = find_uprobe_rcu(inode, offset);
}
if (!uprobe)
*is_swbp = is_trap_at_addr(mm, bp_vaddr);
} else {
*is_swbp = -EFAULT;
}
if (!uprobe && mm_flags_test_and_clear(MMF_RECALC_UPROBES, mm))
mmf_recalc_uprobes(mm);
mmap_read_unlock(mm);
return uprobe;
}
static struct return_instance *push_consumer(struct return_instance *ri, __u64 id, __u64 cookie)
{
struct return_consumer *ric;
if (unlikely(ri == ZERO_SIZE_PTR))
return ri;
if (unlikely(ri->cons_cnt > 0)) {
ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL);
if (!ric) {
ri_free(ri);
return ZERO_SIZE_PTR;
}
ri->extra_consumers = ric;
}
ric = likely(ri->cons_cnt == 0) ? &ri->consumer : &ri->extra_consumers[ri->cons_cnt - 1];
ric->id = id;
ric->cookie = cookie;
ri->cons_cnt++;
return ri;
}
static struct return_consumer *
return_consumer_find(struct return_instance *ri, int *iter, int id)
{
struct return_consumer *ric;
int idx;
for (idx = *iter; idx < ri->cons_cnt; idx++)
{
ric = likely(idx == 0) ? &ri->consumer : &ri->extra_consumers[idx - 1];
if (ric->id == id) {
*iter = idx + 1;
return ric;
}
}
return NULL;
}
static bool ignore_ret_handler(int rc)
{
return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
}
static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
{
struct uprobe_consumer *uc;
bool has_consumers = false, remove = true;
struct return_instance *ri = NULL;
struct uprobe_task *utask = current->utask;
utask->auprobe = &uprobe->arch;
list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
bool session = uc->handler && uc->ret_handler;
__u64 cookie = 0;
int rc = 0;
if (uc->handler) {
rc = uc->handler(uc, regs, &cookie);
WARN(rc < 0 || rc > 2,
"bad rc=0x%x from %ps()\n", rc, uc->handler);
}
remove &= rc == UPROBE_HANDLER_REMOVE;
has_consumers = true;
if (!uc->ret_handler || ignore_ret_handler(rc))
continue;
if (!ri)
ri = alloc_return_instance(utask);
if (session)
ri = push_consumer(ri, uc->id, cookie);
}
utask->auprobe = NULL;
if (!ZERO_OR_NULL_PTR(ri))
prepare_uretprobe(uprobe, regs, ri);
if (remove && has_consumers) {
down_read(&uprobe->register_rwsem);
/* re-check that removal is still required, this time under lock */
if (!filter_chain(uprobe, current->mm)) {
WARN_ON(!uprobe_is_active(uprobe));
unapply_uprobe(uprobe, current->mm);
}
up_read(&uprobe->register_rwsem);
}
}
static void
handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs)
{
struct return_consumer *ric;
struct uprobe_consumer *uc;
int ric_idx = 0;
/* all consumers unsubscribed meanwhile */
if (unlikely(!uprobe))
return;
rcu_read_lock_trace();
list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
bool session = uc->handler && uc->ret_handler;
if (uc->ret_handler) {
ric = return_consumer_find(ri, &ric_idx, uc->id);
if (!session || ric)
uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
}
}
rcu_read_unlock_trace();
}
static struct return_instance *find_next_ret_chain(struct return_instance *ri)
{
bool chained;
do {
chained = ri->chained;
ri = ri->next; /* can't be NULL if chained */
} while (chained);
return ri;
}
void uprobe_handle_trampoline(struct pt_regs *regs)
{
struct uprobe_task *utask;
struct return_instance *ri, *ri_next, *next_chain;
struct uprobe *uprobe;
enum hprobe_state hstate;
bool valid;
utask = current->utask;
if (!utask)
goto sigill;
ri = utask->return_instances;
if (!ri)
goto sigill;
do {
/*
* We should throw out the frames invalidated by longjmp().
* If this chain is valid, then the next one should be alive
* or NULL; the latter case means that nobody but ri->func
* could hit this trampoline on return. TODO: sigaltstack().
*/
next_chain = find_next_ret_chain(ri);
valid = !next_chain || arch_uretprobe_is_alive(next_chain, RP_CHECK_RET, regs);
instruction_pointer_set(regs, ri->orig_ret_vaddr);
do {
/* pop current instance from the stack of pending return instances,
* as it's not pending anymore: we just fixed up original
* instruction pointer in regs and are about to call handlers;
* this allows fixup_uretprobe_trampoline_entries() to properly fix up
* captured stack traces from uretprobe handlers, in which pending
* trampoline addresses on the stack are replaced with correct
* original return addresses
*/
ri_next = ri->next;
rcu_assign_pointer(utask->return_instances, ri_next);
utask->depth--;
uprobe = hprobe_consume(&ri->hprobe, &hstate);
if (valid)
handle_uretprobe_chain(ri, uprobe, regs);
hprobe_finalize(&ri->hprobe, hstate);
/* We already took care of hprobe, no need to waste more time on that. */
free_ret_instance(utask, ri, false /* !cleanup_hprobe */);
ri = ri_next;
} while (ri != next_chain);
} while (!valid);
return;
sigill:
uprobe_warn(current, "handle uretprobe, sending SIGILL.");
force_sig(SIGILL);
}
bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
{
return false;
}
bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
return true;
}
void __weak arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr)
{
}
/*
* Run handler and ask thread to singlestep.
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
*/
static void handle_swbp(struct pt_regs *regs)
{
struct uprobe *uprobe;
unsigned long bp_vaddr;
int is_swbp;
bp_vaddr = uprobe_get_swbp_addr(regs);
if (bp_vaddr == uprobe_get_trampoline_vaddr())
return uprobe_handle_trampoline(regs);
rcu_read_lock_trace();
uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
if (!uprobe) {
if (is_swbp > 0) {
/* No matching uprobe; signal SIGTRAP. */
force_sig(SIGTRAP);
} else {
/*
* Either we raced with uprobe_unregister() or we can't
* access this memory. The latter is only possible if
* another thread plays with our ->mm. In both cases
* we can simply restart. If this vma was unmapped we
* can pretend this insn was not executed yet and get
* the (correct) SIGSEGV after restart.
*/
instruction_pointer_set(regs, bp_vaddr);
}
goto out;
}
/* change it in advance for ->handler() and restart */
instruction_pointer_set(regs, bp_vaddr);
/*
* TODO: move copy_insn/etc into _register and remove this hack.
* After we hit the bp, _unregister + _register can install the
* new and not-yet-analyzed uprobe at the same address, restart.
*/
if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
goto out;
/*
* Pairs with the smp_wmb() in prepare_uprobe().
*
* Guarantees that if we see the UPROBE_COPY_INSN bit set, then
* we must also see the stores to &uprobe->arch performed by the
* prepare_uprobe() call.
*/
smp_rmb();
/* Tracing handlers use ->utask to communicate with fetch methods */
if (!get_utask())
goto out;
if (arch_uprobe_ignore(&uprobe->arch, regs))
goto out;
handler_chain(uprobe, regs);
/* Try to optimize after first hit. */
arch_uprobe_optimize(&uprobe->arch, bp_vaddr);
/*
* If user decided to take execution elsewhere, it makes little sense
* to execute the original instruction, so let's skip it.
*/
if (instruction_pointer(regs) != bp_vaddr)
goto out;
if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
goto out;
if (pre_ssout(uprobe, regs, bp_vaddr))
goto out;
out:
/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
rcu_read_unlock_trace();
}
void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr)
{
struct uprobe *uprobe;
int is_swbp;
guard(rcu_tasks_trace)();
uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
if (!uprobe)
return;
if (!get_utask())
return;
if (arch_uprobe_ignore(&uprobe->arch, regs))
return;
handler_chain(uprobe, regs);
}
/*
* Perform required fix-ups and disable singlestep.
* Allow pending signals to take effect.
*/
static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
{
struct uprobe *uprobe;
int err = 0;
uprobe = utask->active_uprobe;
if (utask->state == UTASK_SSTEP_ACK)
err = arch_uprobe_post_xol(&uprobe->arch, regs);
else if (utask->state == UTASK_SSTEP_TRAPPED)
arch_uprobe_abort_xol(&uprobe->arch, regs);
else
WARN_ON_ONCE(1);
put_uprobe(uprobe);
utask->active_uprobe = NULL;
utask->state = UTASK_RUNNING;
xol_free_insn_slot(utask);
if (utask->signal_denied) {
set_thread_flag(TIF_SIGPENDING);
utask->signal_denied = false;
}
if (unlikely(err)) {
uprobe_warn(current, "execute the probed insn, sending SIGILL.");
force_sig(SIGILL);
}
}
/*
* On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
* allows the thread to return from interrupt. After that handle_swbp()
* sets utask->active_uprobe.
*
* On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
* and allows the thread to return from interrupt.
*
* While returning to userspace, thread notices the TIF_UPROBE flag and calls
* uprobe_notify_resume().
*/
void uprobe_notify_resume(struct pt_regs *regs)
{
struct uprobe_task *utask;
clear_thread_flag(TIF_UPROBE);
utask = current->utask;
if (utask && utask->active_uprobe)
handle_singlestep(utask, regs);
else
handle_swbp(regs);
}
/*
* uprobe_pre_sstep_notifier gets called from interrupt context as part of
* notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
*/
int uprobe_pre_sstep_notifier(struct pt_regs *regs)
{
if (!current->mm)
return 0;
if (!mm_flags_test(MMF_HAS_UPROBES, current->mm) &&
(!current->utask || !current->utask->return_instances))
return 0;
set_thread_flag(TIF_UPROBE);
return 1;
}
/*
* uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
* mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
*/
int uprobe_post_sstep_notifier(struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
if (!current->mm || !utask || !utask->active_uprobe)
/* task is currently not uprobed */
return 0;
utask->state = UTASK_SSTEP_ACK;
set_thread_flag(TIF_UPROBE);
return 1;
}
static struct notifier_block uprobe_exception_nb = {
.notifier_call = arch_uprobe_exception_notify,
.priority = INT_MAX-1, /* notified after kprobes, kgdb */
};
void __init uprobes_init(void)
{
int i;
for (i = 0; i < UPROBES_HASH_SZ; i++)
mutex_init(&uprobes_mmap_mutex[i]);
BUG_ON(register_die_notifier(&uprobe_exception_nb));
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Security-Enhanced Linux (SELinux) security module
*
* This file contains the SELinux XFRM hook function implementations.
*
* Authors: Serge Hallyn <sergeh@us.ibm.com>
* Trent Jaeger <jaegert@us.ibm.com>
*
* Updated: Venkat Yekkirala <vyekkirala@TrustedCS.com>
*
* Granular IPSec Associations for use in MLS environments.
*
* Copyright (C) 2005 International Business Machines Corporation
* Copyright (C) 2006 Trusted Computer Solutions, Inc.
*/
/*
* USAGE:
* NOTES:
* 1. Make sure to enable the following options in your kernel config:
* CONFIG_SECURITY=y
* CONFIG_SECURITY_NETWORK=y
* CONFIG_SECURITY_NETWORK_XFRM=y
* CONFIG_SECURITY_SELINUX=m/y
* ISSUES:
* 1. Caching packets, so they are not dropped during negotiation
* 2. Emulating a reasonable SO_PEERSEC across machines
* 3. Testing addition of sk_policy's with security context via setsockopt
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/security.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/xfrm.h>
#include <net/xfrm.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <linux/atomic.h>
#include "avc.h"
#include "objsec.h"
#include "xfrm.h"
/* Labeled XFRM instance counter */
atomic_t selinux_xfrm_refcount __read_mostly = ATOMIC_INIT(0);
/*
* Returns true if the context is an LSM/SELinux context.
*/
static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
{
return (ctx &&
(ctx->ctx_doi == XFRM_SC_DOI_LSM) &&
(ctx->ctx_alg == XFRM_SC_ALG_SELINUX));
}
/*
* Returns true if the xfrm contains a security blob for SELinux.
*/
static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
{
return selinux_authorizable_ctx(x->security);
}
/*
* Allocates a xfrm_sec_state and populates it using the supplied security
* xfrm_user_sec_ctx context.
*/
static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *uctx,
gfp_t gfp)
{
int rc;
struct xfrm_sec_ctx *ctx = NULL;
u32 str_len;
if (ctxp == NULL || uctx == NULL || uctx->ctx_doi != XFRM_SC_DOI_LSM ||
uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
return -EINVAL; str_len = uctx->ctx_len;
if (str_len >= PAGE_SIZE)
return -ENOMEM; ctx = kmalloc(struct_size(ctx, ctx_str, str_len + 1), gfp);
if (!ctx)
return -ENOMEM;
ctx->ctx_doi = XFRM_SC_DOI_LSM;
ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
ctx->ctx_len = str_len + 1;
memcpy(ctx->ctx_str, &uctx[1], str_len);
ctx->ctx_str[str_len] = '\0';
rc = security_context_to_sid(ctx->ctx_str, str_len,
&ctx->ctx_sid, gfp);
if (rc)
goto err; rc = avc_has_perm(current_sid(), ctx->ctx_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
if (rc)
goto err;
*ctxp = ctx;
atomic_inc(&selinux_xfrm_refcount);
return 0;
err:
kfree(ctx); return rc;}
/*
* Free the xfrm_sec_ctx structure.
*/
static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
{
if (!ctx)
return;
atomic_dec(&selinux_xfrm_refcount);
kfree(ctx);}
/*
* Authorize the deletion of a labeled SA or policy rule.
*/
static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
{
if (!ctx)
return 0;
return avc_has_perm(current_sid(), ctx->ctx_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
NULL);
}
/*
* LSM hook implementation that authorizes that a flow can use a xfrm policy
* rule.
*/
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
int rc;
/* All flows should be treated as polmatch'ing an otherwise applicable
* "non-labeled" policy. This would prevent inadvertent "leaks". */
if (!ctx)
return 0;
/* Context sid is either set to label or ANY_ASSOC */
if (!selinux_authorizable_ctx(ctx))
return -EINVAL;
rc = avc_has_perm(fl_secid, ctx->ctx_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
return (rc == -EACCES ? -ESRCH : rc);}
/*
* LSM hook implementation that authorizes that a state matches
* the given policy, flow combo.
*/
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic)
{
u32 state_sid;
u32 flic_sid;
if (!xp->security)
if (x->security)
/* unlabeled policy and labeled SA can't match */
return 0;
else
/* unlabeled policy and unlabeled SA match all flows */
return 1;
else
if (!x->security)
/* unlabeled SA and labeled policy can't match */
return 0;
else
if (!selinux_authorizable_xfrm(x))
/* Not a SELinux-labeled SA */
return 0;
state_sid = x->security->ctx_sid;
flic_sid = flic->flowic_secid;
if (flic_sid != state_sid)
return 0;
/* We don't need a separate SA Vs. policy polmatch check since the SA
* is now of the same label as the flow and a flow Vs. policy polmatch
* check had already happened in selinux_xfrm_policy_lookup() above. */
return (avc_has_perm(flic_sid, state_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
NULL) ? 0 : 1);
}
static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x;
if (dst == NULL)
return SECSID_NULL;
x = dst->xfrm;
if (x == NULL || !selinux_authorizable_xfrm(x))
return SECSID_NULL;
return x->security->ctx_sid;
}
static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
u32 *sid, int ckall)
{
u32 sid_session = SECSID_NULL;
struct sec_path *sp = skb_sec_path(skb);
if (sp) {
int i;
for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
if (selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
if (sid_session == SECSID_NULL) {
sid_session = ctx->ctx_sid;
if (!ckall)
goto out;
} else if (sid_session != ctx->ctx_sid) {
*sid = SECSID_NULL;
return -EINVAL;
}
}
}
}
out:
*sid = sid_session;
return 0;
}
/*
* LSM hook implementation that checks and/or returns the xfrm sid for the
* incoming packet.
*/
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
{
if (skb == NULL) {
*sid = SECSID_NULL;
return 0;
}
return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
}
int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
{
int rc;
rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
if (rc == 0 && *sid == SECSID_NULL)
*sid = selinux_xfrm_skb_sid_egress(skb);
return rc;
}
/*
* LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *uctx,
gfp_t gfp)
{
return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
}
/*
* LSM hook implementation that copies security data structure from old to new
* for policy cloning.
*/
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
struct xfrm_sec_ctx *new_ctx;
if (!old_ctx)
return 0;
new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
GFP_ATOMIC);
if (!new_ctx)
return -ENOMEM;
atomic_inc(&selinux_xfrm_refcount);
*new_ctxp = new_ctx;
return 0;
}
/*
* LSM hook implementation that frees xfrm_sec_ctx security information.
*/
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
selinux_xfrm_free(ctx);
}
/*
* LSM hook implementation that authorizes deletion of labeled policies.
*/
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
return selinux_xfrm_delete(ctx);
}
/*
* LSM hook implementation that allocates a xfrm_sec_state, populates it using
* the supplied security context, and assigns it to the xfrm_state.
*/
int selinux_xfrm_state_alloc(struct xfrm_state *x,
struct xfrm_user_sec_ctx *uctx)
{
return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
}
/*
* LSM hook implementation that allocates a xfrm_sec_state and populates based
* on a secid.
*/
int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
int rc;
struct xfrm_sec_ctx *ctx;
char *ctx_str = NULL;
u32 str_len;
if (!polsec)
return 0;
if (secid == 0)
return -EINVAL;
rc = security_sid_to_context(secid, &ctx_str,
&str_len);
if (rc)
return rc;
ctx = kmalloc(struct_size(ctx, ctx_str, str_len), GFP_ATOMIC);
if (!ctx) {
rc = -ENOMEM;
goto out;
}
ctx->ctx_doi = XFRM_SC_DOI_LSM;
ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
ctx->ctx_sid = secid;
ctx->ctx_len = str_len;
memcpy(ctx->ctx_str, ctx_str, str_len);
x->security = ctx;
atomic_inc(&selinux_xfrm_refcount);
out:
kfree(ctx_str);
return rc;
}
/*
* LSM hook implementation that frees xfrm_state security information.
*/
void selinux_xfrm_state_free(struct xfrm_state *x)
{
selinux_xfrm_free(x->security);
}
/*
* LSM hook implementation that authorizes deletion of labeled SAs.
*/
int selinux_xfrm_state_delete(struct xfrm_state *x)
{
return selinux_xfrm_delete(x->security);
}
/*
* LSM hook that controls access to unlabelled packets. If
* a xfrm_state is authorizable (defined by macro) then it was
* already authorized by the IPSec process. If not, then
* we need to check for unlabelled access since this may not have
* gone thru the IPSec process.
*/
int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
struct common_audit_data *ad)
{
int i;
struct sec_path *sp = skb_sec_path(skb);
u32 peer_sid = SECINITSID_UNLABELED;
if (sp) {
for (i = 0; i < sp->len; i++) {
struct xfrm_state *x = sp->xvec[i];
if (x && selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
peer_sid = ctx->ctx_sid;
break;
}
}
}
/* This check even when there's no association involved is intended,
* according to Trent Jaeger, to make sure a process can't engage in
* non-IPsec communication unless explicitly allowed by policy. */
return avc_has_perm(sk_sid, peer_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
}
/*
* POSTROUTE_LAST hook's XFRM processing:
* If we have no security association, then we need to determine
* whether the socket is allowed to send to an unlabelled destination.
* If we do have a authorizable security association, then it has already been
* checked in the selinux_xfrm_state_pol_flow_match hook above.
*/
int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
struct common_audit_data *ad, u8 proto)
{
struct dst_entry *dst;
switch (proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:
/* We should have already seen this packet once before it
* underwent xfrm(s). No need to subject it to the unlabeled
* check. */
return 0;
default:
break;
}
dst = skb_dst(skb);
if (dst) {
struct dst_entry *iter;
for (iter = dst; iter != NULL; iter = xfrm_dst_child(iter)) {
struct xfrm_state *x = iter->xfrm;
if (x && selinux_authorizable_xfrm(x))
return 0;
}
}
/* This check even when there's no association involved is intended,
* according to Trent Jaeger, to make sure a process can't engage in
* non-IPsec communication unless explicitly allowed by policy. */
return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* kref.h - library routines for handling generic reference counted objects
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Corp.
*
* based on kobject.h which was:
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (C) 2002-2003 Open Source Development Labs
*/
#ifndef _KREF_H_
#define _KREF_H_
#include <linux/spinlock.h>
#include <linux/refcount.h>
struct kref {
refcount_t refcount;
};
#define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
/**
* kref_init - initialize object.
* @kref: object in question.
*/
static inline void kref_init(struct kref *kref)
{
refcount_set(&kref->refcount, 1);
}
static inline unsigned int kref_read(const struct kref *kref)
{
return refcount_read(&kref->refcount);
}
/**
* kref_get - increment refcount for object.
* @kref: object.
*/
static inline void kref_get(struct kref *kref)
{
refcount_inc(&kref->refcount);
}
/**
* kref_put - Decrement refcount for object
* @kref: Object
* @release: Pointer to the function that will clean up the object when the
* last reference to the object is released.
*
* Decrement the refcount, and if 0, call @release. The caller may not
* pass NULL or kfree() as the release function.
*
* Return: 1 if this call removed the object, otherwise return 0. Beware,
* if this function returns 0, another caller may have removed the object
* by the time this function returns. The return value is only certain
* if you want to see if the object is definitely released.
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
if (refcount_dec_and_test(&kref->refcount)) { release(kref); return 1;
}
return 0;
}
/**
* kref_put_mutex - Decrement refcount for object
* @kref: Object
* @release: Pointer to the function that will clean up the object when the
* last reference to the object is released.
* @mutex: Mutex which protects the release function.
*
* This variant of kref_lock() calls the @release function with the @mutex
* held. The @release function will release the mutex.
*/
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *mutex)
{
if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
return 1;
}
return 0;
}
/**
* kref_put_lock - Decrement refcount for object
* @kref: Object
* @release: Pointer to the function that will clean up the object when the
* last reference to the object is released.
* @lock: Spinlock which protects the release function.
*
* This variant of kref_lock() calls the @release function with the @lock
* held. The @release function will release the lock.
*/
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
return 1;
}
return 0;
}
/**
* kref_get_unless_zero - Increment refcount for object unless it is zero.
* @kref: object.
*
* This function is intended to simplify locking around refcounting for
* objects that can be looked up from a lookup structure, and which are
* removed from that lookup structure in the object destructor.
* Operations on such objects require at least a read lock around
* lookup + kref_get, and a write lock around kref_put + remove from lookup
* structure. Furthermore, RCU implementations become extremely tricky.
* With a lookup followed by a kref_get_unless_zero *with return value check*
* locking in the kref_put path can be deferred to the actual removal from
* the lookup structure and RCU lookups become trivial.
*
* Return: non-zero if the increment succeeded. Otherwise return 0.
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
return refcount_inc_not_zero(&kref->refcount);
}
#endif /* _KREF_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_X86_XSAVE_H
#define __ASM_X86_XSAVE_H
#include <linux/uaccess.h>
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/fpu/api.h>
#include <asm/user.h>
/* Bit 63 of XCR0 is reserved for future expansion */
#define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
#define FXSAVE_SIZE 512
#define XSAVE_HDR_SIZE 64
#define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
#define XSAVE_ALIGNMENT 64
/* All currently supported user features */
#define XFEATURE_MASK_USER_SUPPORTED (XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | \
XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM | \
XFEATURE_MASK_PKRU | \
XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR | \
XFEATURE_MASK_XTILE | \
XFEATURE_MASK_APX)
/*
* Features which are restored when returning to user space.
* PKRU is not restored on return to user space because PKRU
* is switched eagerly in switch_to() and flush_thread()
*/
#define XFEATURE_MASK_USER_RESTORE \
(XFEATURE_MASK_USER_SUPPORTED & ~XFEATURE_MASK_PKRU)
/* Features which are dynamically enabled for a process on request */
#define XFEATURE_MASK_USER_DYNAMIC XFEATURE_MASK_XTILE_DATA
/* Supervisor features which are enabled only in guest FPUs */
#define XFEATURE_MASK_GUEST_SUPERVISOR XFEATURE_MASK_CET_KERNEL
/* All currently supported supervisor features */
#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID | \
XFEATURE_MASK_CET_USER | \
XFEATURE_MASK_GUEST_SUPERVISOR)
/*
* A supervisor state component may not always contain valuable information,
* and its size may be huge. Saving/restoring such supervisor state components
* at each context switch can cause high CPU and space overhead, which should
* be avoided. Such supervisor state components should only be saved/restored
* on demand. The on-demand supervisor features are set in this mask.
*
* Unlike the existing supported supervisor features, an independent supervisor
* feature does not allocate a buffer in task->fpu, and the corresponding
* supervisor state component cannot be saved/restored at each context switch.
*
* To support an independent supervisor feature, a developer should follow the
* dos and don'ts as below:
* - Do dynamically allocate a buffer for the supervisor state component.
* - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the
* state component to/from the buffer.
* - Don't set the bit corresponding to the independent supervisor feature in
* IA32_XSS at run time, since it has been set at boot time.
*/
#define XFEATURE_MASK_INDEPENDENT (XFEATURE_MASK_LBR)
/*
* Unsupported supervisor features. When a supervisor feature in this mask is
* supported in the future, move it to the supported supervisor feature mask.
*/
#define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT)
/* All supervisor states including supported and unsupported states. */
#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
XFEATURE_MASK_INDEPENDENT | \
XFEATURE_MASK_SUPERVISOR_UNSUPPORTED)
/*
* The feature mask required to restore FPU state:
* - All user states which are not eagerly switched in switch_to()/exec()
* - The suporvisor states
*/
#define XFEATURE_MASK_FPSTATE (XFEATURE_MASK_USER_RESTORE | \
XFEATURE_MASK_SUPERVISOR_SUPPORTED)
/*
* Features in this mask have space allocated in the signal frame, but may not
* have that space initialized when the feature is in its init state.
*/
#define XFEATURE_MASK_SIGFRAME_INITOPT (XFEATURE_MASK_XTILE | \
XFEATURE_MASK_USER_DYNAMIC)
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
extern void __init update_regset_xstate_info(unsigned int size,
u64 xstate_mask);
int xfeature_size(int xfeature_nr);
void xsaves(struct xregs_state *xsave, u64 mask);
void xrstors(struct xregs_state *xsave, u64 mask);
int xfd_enable_feature(u64 xfd_err);
#ifdef CONFIG_X86_64
DECLARE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
#endif
#ifdef CONFIG_X86_64
DECLARE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
static __always_inline __pure bool fpu_state_size_dynamic(void)
{
return static_branch_unlikely(&__fpu_state_size_dynamic);
}
#else
static __always_inline __pure bool fpu_state_size_dynamic(void)
{
return false;
}
#endif
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_DCACHE_H
#define __LINUX_DCACHE_H
#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/math.h>
#include <linux/rculist.h>
#include <linux/rculist_bl.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
#include <linux/lockref.h>
#include <linux/stringhash.h>
#include <linux/wait.h>
struct path;
struct file;
struct vfsmount;
/*
* linux/include/linux/dcache.h
*
* Dirent cache data structures
*
* (C) Copyright 1997 Thomas Schoebel-Theuer,
* with heavy changes by Linus Torvalds
*/
#define IS_ROOT(x) ((x) == (x)->d_parent)
/* The hash is always the low bits of hash_len */
#ifdef __LITTLE_ENDIAN
#define HASH_LEN_DECLARE u32 hash; u32 len
#define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
#else
#define HASH_LEN_DECLARE u32 len; u32 hash
#define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
#endif
/*
* "quick string" -- eases parameter passing, but more importantly
* saves "metadata" about the string (ie length and the hash).
*
* hash comes first so it snuggles against d_parent in the
* dentry.
*/
struct qstr {
union {
struct {
HASH_LEN_DECLARE;
};
u64 hash_len;
};
const unsigned char *name;
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
#define QSTR_LEN(n,l) (struct qstr)QSTR_INIT(n,l)
#define QSTR(n) QSTR_LEN(n, strlen(n))
extern const struct qstr empty_name;
extern const struct qstr slash_name;
extern const struct qstr dotdot_name;
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
* give reasonable cacheline footprint with larger lines without the
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
# define DNAME_INLINE_WORDS 5 /* 192 bytes */
#else
# ifdef CONFIG_SMP
# define DNAME_INLINE_WORDS 9 /* 128 bytes */
# else
# define DNAME_INLINE_WORDS 11 /* 128 bytes */
# endif
#endif
#define DNAME_INLINE_LEN (DNAME_INLINE_WORDS*sizeof(unsigned long))
union shortname_store {
unsigned char string[DNAME_INLINE_LEN];
unsigned long words[DNAME_INLINE_WORDS];
};
#define d_lock d_lockref.lock
#define d_iname d_shortname.string
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
union {
struct qstr __d_name; /* for use ONLY in fs/dcache.c */
const struct qstr d_name;
};
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
union shortname_store d_shortname;
/* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */
/* Ref lookup also touches following */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
void *d_fsdata; /* fs-specific data */
/* --- cacheline 2 boundary (128 bytes) --- */
struct lockref d_lockref; /* per-dentry lock and refcount
* keep separate from RCU lookup area if
* possible!
*/
union {
struct list_head d_lru; /* LRU list */
wait_queue_head_t *d_wait; /* in-lookup ones only */
};
struct hlist_node d_sib; /* child of parent list */
struct hlist_head d_children; /* our children */
/*
* d_alias and d_rcu can share memory
*/
union {
struct hlist_node d_alias; /* inode alias list */
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
};
/*
* dentry->d_lock spinlock nesting subclasses:
*
* 0: normal
* 1: nested
*/
enum dentry_d_lock_class
{
DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
DENTRY_D_LOCK_NESTED
};
enum d_real_type {
D_REAL_DATA,
D_REAL_METADATA,
};
struct dentry_operations {
int (*d_revalidate)(struct inode *, const struct qstr *,
struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
unsigned int, const char *, const struct qstr *);
int (*d_delete)(const struct dentry *);
int (*d_init)(struct dentry *);
void (*d_release)(struct dentry *);
void (*d_prune)(struct dentry *);
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
bool (*d_unalias_trylock)(const struct dentry *);
void (*d_unalias_unlock)(const struct dentry *);
} ____cacheline_aligned;
/*
* Locking rules for dentry_operations callbacks are to be found in
* Documentation/filesystems/locking.rst. Keep it updated!
*
* FUrther descriptions are found in Documentation/filesystems/vfs.rst.
* Keep it updated too!
*/
/* d_flags entries */
enum dentry_flags {
DCACHE_OP_HASH = BIT(0),
DCACHE_OP_COMPARE = BIT(1),
DCACHE_OP_REVALIDATE = BIT(2),
DCACHE_OP_DELETE = BIT(3),
DCACHE_OP_PRUNE = BIT(4),
/*
* This dentry is possibly not currently connected to the dcache tree,
* in which case its parent will either be itself, or will have this
* flag as well. nfsd will not use a dentry with this bit set, but will
* first endeavour to clear the bit either by discovering that it is
* connected, or by performing lookup operations. Any filesystem which
* supports nfsd_operations MUST have a lookup function which, if it
* finds a directory inode with a DCACHE_DISCONNECTED dentry, will
* d_move that dentry into place and return that dentry rather than the
* passed one, typically using d_splice_alias.
*/
DCACHE_DISCONNECTED = BIT(5),
DCACHE_REFERENCED = BIT(6), /* Recently used, don't discard. */
DCACHE_DONTCACHE = BIT(7), /* Purge from memory on final dput() */
DCACHE_CANT_MOUNT = BIT(8),
DCACHE_GENOCIDE = BIT(9),
DCACHE_SHRINK_LIST = BIT(10),
DCACHE_OP_WEAK_REVALIDATE = BIT(11),
/*
* this dentry has been "silly renamed" and has to be deleted on the
* last dput()
*/
DCACHE_NFSFS_RENAMED = BIT(12),
DCACHE_FSNOTIFY_PARENT_WATCHED = BIT(13), /* Parent inode is watched by some fsnotify listener */
DCACHE_DENTRY_KILLED = BIT(14),
DCACHE_MOUNTED = BIT(15), /* is a mountpoint */
DCACHE_NEED_AUTOMOUNT = BIT(16), /* handle automount on this dir */
DCACHE_MANAGE_TRANSIT = BIT(17), /* manage transit from this dirent */
DCACHE_LRU_LIST = BIT(18),
DCACHE_ENTRY_TYPE = (7 << 19), /* bits 19..21 are for storing type: */
DCACHE_MISS_TYPE = (0 << 19), /* Negative dentry */
DCACHE_WHITEOUT_TYPE = (1 << 19), /* Whiteout dentry (stop pathwalk) */
DCACHE_DIRECTORY_TYPE = (2 << 19), /* Normal directory */
DCACHE_AUTODIR_TYPE = (3 << 19), /* Lookupless directory (presumed automount) */
DCACHE_REGULAR_TYPE = (4 << 19), /* Regular file type */
DCACHE_SPECIAL_TYPE = (5 << 19), /* Other file type */
DCACHE_SYMLINK_TYPE = (6 << 19), /* Symlink */
DCACHE_NOKEY_NAME = BIT(22), /* Encrypted name encoded without key */
DCACHE_OP_REAL = BIT(23),
DCACHE_PAR_LOOKUP = BIT(24), /* being looked up (with parent locked shared) */
DCACHE_DENTRY_CURSOR = BIT(25),
DCACHE_NORCU = BIT(26), /* No RCU delay for freeing */
};
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
extern seqlock_t rename_lock;
/*
* These are the low-level FS interfaces to the dcache..
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
/* weird procfs mess; *NOT* exported */
extern struct dentry * d_splice_alias_ops(struct inode *, struct dentry *,
const struct dentry_operations *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
const struct qstr *name);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
extern void d_mark_tmpfile(struct file *, struct inode *);
extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
extern struct dentry *d_find_alias_rcu(struct inode *);
/* test whether we have any submounts in a subdir tree */
extern int path_has_submounts(const struct path *);
/*
* This adds the entry to the hash queues.
*/
extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
static inline unsigned d_count(const struct dentry *dentry)
{
return dentry->d_lockref.count;
}
ino_t d_parent_ino(struct dentry *dentry);
/*
* helper function for dentry_operations.d_dname() members
*/
extern __printf(3, 4)
char *dynamic_dname(char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *dentry_path_raw(const struct dentry *, char *, int);
extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
/**
* dget_dlock - get a reference to a dentry
* @dentry: dentry to get a reference to
*
* Given a live dentry, increment the reference count and return the dentry.
* Caller must hold @dentry->d_lock. Making sure that dentry is alive is
* caller's resonsibility. There are many conditions sufficient to guarantee
* that; e.g. anything with non-negative refcount is alive, so's anything
* hashed, anything positive, anyone's parent, etc.
*/
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
dentry->d_lockref.count++;
return dentry;
}
/**
* dget - get a reference to a dentry
* @dentry: dentry to get a reference to
*
* Given a dentry or %NULL pointer increment the reference count
* if appropriate and return the dentry. A dentry will not be
* destroyed when it has references. Conversely, a dentry with
* no references can disappear for any number of reasons, starting
* with memory pressure. In other words, that primitive is
* used to clone an existing reference; using it on something with
* zero refcount is a bug.
*
* NOTE: it will spin if @dentry->d_lock is held. From the deadlock
* avoidance point of view it is equivalent to spin_lock()/increment
* refcount/spin_unlock(), so calling it under @dentry->d_lock is
* always a bug; so's calling it under ->d_lock on any of its descendents.
*
*/
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry)
lockref_get(&dentry->d_lockref);
return dentry;
}
extern struct dentry *dget_parent(struct dentry *dentry);
/**
* d_unhashed - is dentry hashed
* @dentry: entry to check
*
* Returns true if the dentry passed is not currently hashed.
*/
static inline int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
}
static inline int d_unlinked(const struct dentry *dentry)
{
return d_unhashed(dentry) && !IS_ROOT(dentry);
}
static inline int cant_mount(const struct dentry *dentry)
{
return (dentry->d_flags & DCACHE_CANT_MOUNT);
}
static inline void dont_mount(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_CANT_MOUNT;
spin_unlock(&dentry->d_lock);
}
extern void __d_lookup_unhash_wake(struct dentry *dentry);
static inline int d_in_lookup(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_PAR_LOOKUP;
}
static inline void d_lookup_done(struct dentry *dentry)
{
if (unlikely(d_in_lookup(dentry)))
__d_lookup_unhash_wake(dentry);
}
extern void dput(struct dentry *);
static inline bool d_managed(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_MANAGED_DENTRY;
}
static inline bool d_mountpoint(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_MOUNTED;
}
/*
* Directory cache entry type accessor functions.
*/
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
}
static inline bool d_is_whiteout(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
}
static inline bool d_can_lookup(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
}
static inline bool d_is_autodir(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE;
}
static inline bool d_is_dir(const struct dentry *dentry)
{
return d_can_lookup(dentry) || d_is_autodir(dentry);
}
static inline bool d_is_symlink(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
}
static inline bool d_is_reg(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
}
static inline bool d_is_special(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
}
static inline bool d_is_file(const struct dentry *dentry)
{
return d_is_reg(dentry) || d_is_special(dentry);
}
static inline bool d_is_negative(const struct dentry *dentry)
{
// TODO: check d_is_whiteout(dentry) also.
return d_is_miss(dentry);
}
static inline bool d_flags_negative(unsigned flags)
{
return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE;
}
static inline bool d_is_positive(const struct dentry *dentry)
{
return !d_is_negative(dentry);
}
/**
* d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs)
* @dentry: The dentry in question
*
* Returns true if the dentry represents either an absent name or a name that
* doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent
* a true miss, a whiteout that isn't represented by a 0,0 chardev or a
* fallthrough marker in an opaque directory.
*
* Note! (1) This should be used *only* by a filesystem to examine its own
* dentries. It should not be used to look at some other filesystem's
* dentries. (2) It should also be used in combination with d_inode() to get
* the inode. (3) The dentry may have something attached to ->d_lower and the
* type field of the flags may be set to something other than miss or whiteout.
*/
static inline bool d_really_is_negative(const struct dentry *dentry)
{
return dentry->d_inode == NULL;
}
/**
* d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs)
* @dentry: The dentry in question
*
* Returns true if the dentry represents a name that maps to an inode
* (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if
* that is represented on medium as a 0,0 chardev.
*
* Note! (1) This should be used *only* by a filesystem to examine its own
* dentries. It should not be used to look at some other filesystem's
* dentries. (2) It should also be used in combination with d_inode() to get
* the inode.
*/
static inline bool d_really_is_positive(const struct dentry *dentry)
{
return dentry->d_inode != NULL;
}
static inline int simple_positive(const struct dentry *dentry)
{
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
unsigned long vfs_pressure_ratio(unsigned long val);
/**
* d_inode - Get the actual inode of this dentry
* @dentry: The dentry to query
*
* This is the helper normal filesystems should use to get at their own inodes
* in their own dentries and ignore the layering superimposed upon them.
*/
static inline struct inode *d_inode(const struct dentry *dentry)
{
return dentry->d_inode;
}
/**
* d_inode_rcu - Get the actual inode of this dentry with READ_ONCE()
* @dentry: The dentry to query
*
* This is the helper normal filesystems should use to get at their own inodes
* in their own dentries and ignore the layering superimposed upon them.
*/
static inline struct inode *d_inode_rcu(const struct dentry *dentry)
{
return READ_ONCE(dentry->d_inode);
}
/**
* d_backing_inode - Get upper or lower inode we should be using
* @upper: The upper layer
*
* This is the helper that should be used to get at the inode that will be used
* if this dentry were to be opened as a file. The inode may be on the upper
* dentry or it may be on a lower dentry pinned by the upper.
*
* Normal filesystems should not use this to access their own inodes.
*/
static inline struct inode *d_backing_inode(const struct dentry *upper)
{
struct inode *inode = upper->d_inode;
return inode;
}
/**
* d_real - Return the real dentry
* @dentry: the dentry to query
* @type: the type of real dentry (data or metadata)
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
*
* See also: Documentation/filesystems/vfs.rst
*/
static inline struct dentry *d_real(struct dentry *dentry, enum d_real_type type)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
return dentry->d_op->d_real(dentry, type);
else
return dentry;
}
/**
* d_real_inode - Return the real inode hosting the data
* @dentry: The dentry to query
*
* If dentry is on a union/overlay, then return the underlying, real inode.
* Otherwise return d_inode().
*/
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
return d_inode(d_real((struct dentry *) dentry, D_REAL_DATA));
}
struct name_snapshot {
struct qstr name;
union shortname_store inline_name;
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
static inline struct dentry *d_first_child(const struct dentry *dentry)
{
return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib);
}
static inline struct dentry *d_next_sibling(const struct dentry *dentry)
{
return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib);
}
void set_default_d_op(struct super_block *, const struct dentry_operations *);
#endif /* __LINUX_DCACHE_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
*
* NOHZ implementation for low and high resolution timers
*
* Started by: Thomas Gleixner and Ingo Molnar
*/
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/nmi.h>
#include <linux/profile.h>
#include <linux/sched/signal.h>
#include <linux/sched/clock.h>
#include <linux/sched/stat.h>
#include <linux/sched/nohz.h>
#include <linux/sched/loadavg.h>
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
#include <linux/context_tracking.h>
#include <linux/mm.h>
#include <asm/irq_regs.h>
#include "tick-internal.h"
#include <trace/events/timer.h>
/*
* Per-CPU nohz control structure
*/
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
struct tick_sched *tick_get_tick_sched(int cpu)
{
return &per_cpu(tick_cpu_sched, cpu);
}
/*
* The time when the last jiffy update happened. Write access must hold
* jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
* consistent view of jiffies and last_jiffies_update.
*/
static ktime_t last_jiffies_update;
/*
* Must be called with interrupts disabled !
*/
static void tick_do_update_jiffies64(ktime_t now)
{
unsigned long ticks = 1;
ktime_t delta, nextp;
/*
* 64-bit can do a quick check without holding the jiffies lock and
* without looking at the sequence count. The smp_load_acquire()
* pairs with the update done later in this function.
*
* 32-bit cannot do that because the store of 'tick_next_period'
* consists of two 32-bit stores, and the first store could be
* moved by the CPU to a random point in the future.
*/
if (IS_ENABLED(CONFIG_64BIT)) {
if (ktime_before(now, smp_load_acquire(&tick_next_period)))
return;
} else {
unsigned int seq;
/*
* Avoid contention on 'jiffies_lock' and protect the quick
* check with the sequence count.
*/
do {
seq = read_seqcount_begin(&jiffies_seq);
nextp = tick_next_period;
} while (read_seqcount_retry(&jiffies_seq, seq));
if (ktime_before(now, nextp))
return;
}
/* Quick check failed, i.e. update is required. */
raw_spin_lock(&jiffies_lock);
/*
* Re-evaluate with the lock held. Another CPU might have done the
* update already.
*/
if (ktime_before(now, tick_next_period)) {
raw_spin_unlock(&jiffies_lock);
return;
}
write_seqcount_begin(&jiffies_seq);
delta = ktime_sub(now, tick_next_period);
if (unlikely(delta >= TICK_NSEC)) {
/* Slow path for long idle sleep times */
s64 incr = TICK_NSEC;
ticks += ktime_divns(delta, incr);
last_jiffies_update = ktime_add_ns(last_jiffies_update,
incr * ticks);
} else {
last_jiffies_update = ktime_add_ns(last_jiffies_update,
TICK_NSEC);
}
/* Advance jiffies to complete the 'jiffies_seq' protected job */
jiffies_64 += ticks;
/* Keep the tick_next_period variable up to date */
nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
if (IS_ENABLED(CONFIG_64BIT)) {
/*
* Pairs with smp_load_acquire() in the lockless quick
* check above, and ensures that the update to 'jiffies_64' is
* not reordered vs. the store to 'tick_next_period', neither
* by the compiler nor by the CPU.
*/
smp_store_release(&tick_next_period, nextp);
} else {
/*
* A plain store is good enough on 32-bit, as the quick check
* above is protected by the sequence count.
*/
tick_next_period = nextp;
}
/*
* Release the sequence count. calc_global_load() below is not
* protected by it, but 'jiffies_lock' needs to be held to prevent
* concurrent invocations.
*/
write_seqcount_end(&jiffies_seq);
calc_global_load();
raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
/*
* Initialize and return retrieve the jiffies update.
*/
static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
raw_spin_lock(&jiffies_lock);
write_seqcount_begin(&jiffies_seq);
/* Have we started the jiffies update yet ? */
if (last_jiffies_update == 0) {
u32 rem;
/*
* Ensure that the tick is aligned to a multiple of
* TICK_NSEC.
*/
div_u64_rem(tick_next_period, TICK_NSEC, &rem);
if (rem)
tick_next_period += TICK_NSEC - rem;
last_jiffies_update = tick_next_period;
}
period = last_jiffies_update;
write_seqcount_end(&jiffies_seq);
raw_spin_unlock(&jiffies_lock);
return period;
}
static inline int tick_sched_flag_test(struct tick_sched *ts,
unsigned long flag)
{
return !!(ts->flags & flag);
}
static inline void tick_sched_flag_set(struct tick_sched *ts,
unsigned long flag)
{
lockdep_assert_irqs_disabled();
ts->flags |= flag;
}
static inline void tick_sched_flag_clear(struct tick_sched *ts,
unsigned long flag)
{
lockdep_assert_irqs_disabled();
ts->flags &= ~flag;
}
#define MAX_STALLED_JIFFIES 5
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
{
int tick_cpu, cpu = smp_processor_id();
/*
* Check if the do_timer duty was dropped. We don't care about
* concurrency: This happens only when the CPU in charge went
* into a long sleep. If two CPUs happen to assign themselves to
* this duty, then the jiffies update is still serialized by
* 'jiffies_lock'.
*
* If nohz_full is enabled, this should not happen because the
* 'tick_do_timer_cpu' CPU never relinquishes.
*/
tick_cpu = READ_ONCE(tick_do_timer_cpu);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) {
#ifdef CONFIG_NO_HZ_FULL
WARN_ON_ONCE(tick_nohz_full_running);
#endif
WRITE_ONCE(tick_do_timer_cpu, cpu);
tick_cpu = cpu;
}
/* Check if jiffies need an update */
if (tick_cpu == cpu)
tick_do_update_jiffies64(now);
/*
* If the jiffies update stalled for too long (timekeeper in stop_machine()
* or VMEXIT'ed for several msecs), force an update.
*/
if (ts->last_tick_jiffies != jiffies) {
ts->stalled_jiffies = 0;
ts->last_tick_jiffies = READ_ONCE(jiffies);
} else {
if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
tick_do_update_jiffies64(now);
ts->stalled_jiffies = 0;
ts->last_tick_jiffies = READ_ONCE(jiffies);
}
}
if (tick_sched_flag_test(ts, TS_FLAG_INIDLE))
ts->got_idle_tick = 1;
}
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
{
/*
* When we are idle and the tick is stopped, we have to touch
* the watchdog as we might not schedule for a really long
* time. This happens on completely idle SMP systems while
* waiting on the login prompt. We also increment the "start of
* idle" jiffy stamp so the idle accounting adjustment we do
* when we go busy again does not account too many ticks.
*/
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) &&
tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
touch_softlockup_watchdog_sched();
if (is_idle_task(current))
ts->idle_jiffies++;
/*
* In case the current tick fired too early past its expected
* expiration, make sure we don't bypass the next clock reprogramming
* to the same deadline.
*/
ts->next_tick = 0;
}
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
}
/*
* We rearm the timer until we get disabled by the idle code.
* Called with interrupts disabled.
*/
static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer)
{
struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer);
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
tick_sched_do_timer(ts, now);
/*
* Do not call when we are not in IRQ context and have
* no valid 'regs' pointer
*/
if (regs)
tick_sched_handle(ts, regs);
else
ts->next_tick = 0;
/*
* In dynticks mode, tick reprogram is deferred:
* - to the idle task if in dynticks-idle
* - to IRQ exit if in full-dynticks.
*/
if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED)))
return HRTIMER_NORESTART;
hrtimer_forward(timer, now, TICK_NSEC);
return HRTIMER_RESTART;
}
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
bool tick_nohz_full_running;
EXPORT_SYMBOL_GPL(tick_nohz_full_running);
static atomic_t tick_dep_mask;
static bool check_tick_dependency(atomic_t *dep)
{
int val = atomic_read(dep);
if (val & TICK_DEP_MASK_POSIX_TIMER) {
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
return true;
}
if (val & TICK_DEP_MASK_PERF_EVENTS) {
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
return true;
}
if (val & TICK_DEP_MASK_SCHED) {
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
return true;
}
if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
return true;
}
if (val & TICK_DEP_MASK_RCU) {
trace_tick_stop(0, TICK_DEP_MASK_RCU);
return true;
}
if (val & TICK_DEP_MASK_RCU_EXP) {
trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
return true;
}
return false;
}
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
lockdep_assert_irqs_disabled();
if (unlikely(!cpu_online(cpu)))
return false;
if (check_tick_dependency(&tick_dep_mask))
return false;
if (check_tick_dependency(&ts->tick_dep_mask))
return false;
if (check_tick_dependency(¤t->tick_dep_mask))
return false;
if (check_tick_dependency(¤t->signal->tick_dep_mask))
return false;
return true;
}
static void nohz_full_kick_func(struct irq_work *work)
{
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
}
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
IRQ_WORK_INIT_HARD(nohz_full_kick_func);
/*
* Kick this CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary.
* This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
* is NMI safe.
*/
static void tick_nohz_full_kick(void)
{
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
}
/*
* Kick the CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary.
*/
void tick_nohz_full_kick_cpu(int cpu)
{
if (!tick_nohz_full_cpu(cpu))
return;
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
}
static void tick_nohz_kick_task(struct task_struct *tsk)
{
int cpu;
/*
* If the task is not running, run_posix_cpu_timers()
* has nothing to elapse, and an IPI can then be optimized out.
*
* activate_task() STORE p->tick_dep_mask
* STORE p->on_rq
* __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or())
* LOCK rq->lock LOAD p->on_rq
* smp_mb__after_spin_lock()
* tick_nohz_task_switch()
* LOAD p->tick_dep_mask
*
* XXX given a task picks up the dependency on schedule(), should we
* only care about tasks that are currently on the CPU instead of all
* that are on the runqueue?
*
* That is, does this want to be: task_on_cpu() / task_curr()?
*/
if (!sched_task_on_rq(tsk))
return;
/*
* If the task concurrently migrates to another CPU,
* we guarantee it sees the new tick dependency upon
* schedule.
*
* set_task_cpu(p, cpu);
* STORE p->cpu = @cpu
* __schedule() (switch to task 'p')
* LOCK rq->lock
* smp_mb__after_spin_lock() STORE p->tick_dep_mask
* tick_nohz_task_switch() smp_mb() (atomic_fetch_or())
* LOAD p->tick_dep_mask LOAD p->cpu
*/
cpu = task_cpu(tsk);
preempt_disable();
if (cpu_online(cpu))
tick_nohz_full_kick_cpu(cpu);
preempt_enable();
}
/*
* Kick all full dynticks CPUs in order to force these to re-evaluate
* their dependency on the tick and restart it if necessary.
*/
static void tick_nohz_full_kick_all(void)
{
int cpu;
if (!tick_nohz_full_running)
return;
preempt_disable();
for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
tick_nohz_full_kick_cpu(cpu);
preempt_enable();
}
static void tick_nohz_dep_set_all(atomic_t *dep,
enum tick_dep_bits bit)
{
int prev;
prev = atomic_fetch_or(BIT(bit), dep);
if (!prev)
tick_nohz_full_kick_all();
}
/*
* Set a global tick dependency. Used by perf events that rely on freq and
* unstable clocks.
*/
void tick_nohz_dep_set(enum tick_dep_bits bit)
{
tick_nohz_dep_set_all(&tick_dep_mask, bit);
}
void tick_nohz_dep_clear(enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &tick_dep_mask);
}
/*
* Set per-CPU tick dependency. Used by scheduler and perf events in order to
* manage event-throttling.
*/
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
{
int prev;
struct tick_sched *ts;
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
if (!prev) {
preempt_disable();
/* Perf needs local kick that is NMI safe */
if (cpu == smp_processor_id()) {
tick_nohz_full_kick();
} else {
/* Remote IRQ work not NMI-safe */
if (!WARN_ON_ONCE(in_nmi()))
tick_nohz_full_kick_cpu(cpu);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
/*
* Set a per-task tick dependency. RCU needs this. Also posix CPU timers
* in order to elapse per task timers.
*/
void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask))
tick_nohz_kick_task(tsk);
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
/*
* Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
* per process timers.
*/
void tick_nohz_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit)
{
int prev;
struct signal_struct *sig = tsk->signal;
prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask);
if (!prev) {
struct task_struct *t;
lockdep_assert_held(&tsk->sighand->siglock);
__for_each_thread(sig, t)
tick_nohz_kick_task(t);
}
}
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &sig->tick_dep_mask);
}
/*
* Re-evaluate the need for the tick as we switch the current task.
* It might need the tick due to per task/process properties:
* perf events, posix CPU timers, ...
*/
void __tick_nohz_task_switch(void)
{
struct tick_sched *ts;
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
if (atomic_read(¤t->tick_dep_mask) ||
atomic_read(¤t->signal->tick_dep_mask))
tick_nohz_full_kick();
}
}
/* Get the boot-time nohz CPU list from the kernel parameters. */
void __init tick_nohz_full_setup(cpumask_var_t cpumask)
{
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
cpumask_copy(tick_nohz_full_mask, cpumask);
tick_nohz_full_running = true;
}
bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
{
/*
* The 'tick_do_timer_cpu' CPU handles housekeeping duty (unbound
* timers, workqueues, timekeeping, ...) on behalf of full dynticks
* CPUs. It must remain online when nohz full is enabled.
*/
if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
return false;
return true;
}
static int tick_nohz_cpu_down(unsigned int cpu)
{
return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
}
void __init tick_nohz_init(void)
{
int cpu, ret;
if (!tick_nohz_full_running)
return;
/*
* Full dynticks uses IRQ work to drive the tick rescheduling on safe
* locking contexts. But then we need IRQ work to raise its own
* interrupts to avoid circular dependency on the tick.
*/
if (!arch_irq_work_has_interrupt()) {
pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n");
cpumask_clear(tick_nohz_full_mask);
tick_nohz_full_running = false;
return;
}
if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
!IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
pr_warn("NO_HZ: Clearing %d from nohz_full range "
"for timekeeping\n", cpu);
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
}
for_each_cpu(cpu, tick_nohz_full_mask)
ct_cpu_track_user(cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"kernel/nohz:predown", NULL,
tick_nohz_cpu_down);
WARN_ON(ret < 0);
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
cpumask_pr_args(tick_nohz_full_mask));
}
#endif /* #ifdef CONFIG_NO_HZ_FULL */
/*
* NOHZ - aka dynamic tick functionality
*/
#ifdef CONFIG_NO_HZ_COMMON
/*
* NO HZ enabled ?
*/
bool tick_nohz_enabled __read_mostly = true;
unsigned long tick_nohz_active __read_mostly;
/*
* Enable / Disable tickless mode
*/
static int __init setup_tick_nohz(char *str)
{
return (kstrtobool(str, &tick_nohz_enabled) == 0);
}
__setup("nohz=", setup_tick_nohz);
bool tick_nohz_tick_stopped(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
return tick_sched_flag_test(ts, TS_FLAG_STOPPED);
}
bool tick_nohz_tick_stopped_cpu(int cpu)
{
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
return tick_sched_flag_test(ts, TS_FLAG_STOPPED);
}
/**
* tick_nohz_update_jiffies - update jiffies when idle was interrupted
* @now: current ktime_t
*
* Called from interrupt entry when the CPU was idle
*
* In case the sched_tick was stopped on this CPU, we have to check if jiffies
* must be updated. Otherwise an interrupt handler could use a stale jiffy
* value. We do this unconditionally on any CPU, as we don't know whether the
* CPU, which has the update task assigned, is in a long sleep.
*/
static void tick_nohz_update_jiffies(ktime_t now)
{
unsigned long flags;
__this_cpu_write(tick_cpu_sched.idle_waketime, now);
local_irq_save(flags);
tick_do_update_jiffies64(now);
local_irq_restore(flags);
touch_softlockup_watchdog_sched();
}
static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
{
ktime_t delta;
if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)))
return;
delta = ktime_sub(now, ts->idle_entrytime);
write_seqcount_begin(&ts->idle_sleeptime_seq);
if (nr_iowait_cpu(smp_processor_id()) > 0)
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
else
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
ts->idle_entrytime = now;
tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE);
write_seqcount_end(&ts->idle_sleeptime_seq);
sched_clock_idle_wakeup_event();
}
static void tick_nohz_start_idle(struct tick_sched *ts)
{
write_seqcount_begin(&ts->idle_sleeptime_seq);
ts->idle_entrytime = ktime_get();
tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE);
write_seqcount_end(&ts->idle_sleeptime_seq);
sched_clock_idle_sleep_event();
}
static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
bool compute_delta, u64 *last_update_time)
{
ktime_t now, idle;
unsigned int seq;
if (!tick_nohz_active)
return -1;
now = ktime_get();
if (last_update_time)
*last_update_time = ktime_to_us(now);
do {
seq = read_seqcount_begin(&ts->idle_sleeptime_seq);
if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) {
ktime_t delta = ktime_sub(now, ts->idle_entrytime);
idle = ktime_add(*sleeptime, delta);
} else {
idle = *sleeptime;
}
} while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq));
return ktime_to_us(idle);
}
/**
* get_cpu_idle_time_us - get the total idle time of a CPU
* @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update
* counters if NULL.
*
* Return the cumulative idle time (since boot) for a given
* CPU, in microseconds. Note that this is partially broken due to
* the counter of iowait tasks that can be remotely updated without
* any synchronization. Therefore it is possible to observe backward
* values within two consecutive reads.
*
* This time is measured via accounting rather than sampling,
* and is as accurate as ktime_get() is.
*
* Return: -1 if NOHZ is not enabled, else total idle time of the @cpu
*/
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime,
!nr_iowait_cpu(cpu), last_update_time);
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
/**
* get_cpu_iowait_time_us - get the total iowait time of a CPU
* @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update
* counters if NULL.
*
* Return the cumulative iowait time (since boot) for a given
* CPU, in microseconds. Note this is partially broken due to
* the counter of iowait tasks that can be remotely updated without
* any synchronization. Therefore it is possible to observe backward
* values within two consecutive reads.
*
* This time is measured via accounting rather than sampling,
* and is as accurate as ktime_get() is.
*
* Return: -1 if NOHZ is not enabled, else total iowait time of @cpu
*/
u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime,
nr_iowait_cpu(cpu), last_update_time);
}
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
{
hrtimer_cancel(&ts->sched_timer);
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
/* Forward the time to expire in the future */
hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) {
hrtimer_start_expires(&ts->sched_timer,
HRTIMER_MODE_ABS_PINNED_HARD);
} else {
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
/*
* Reset to make sure the next tick stop doesn't get fooled by past
* cached clock deadline.
*/
ts->next_tick = 0;
}
static inline bool local_timer_softirq_pending(void)
{
return local_timers_pending() & BIT(TIMER_SOFTIRQ);
}
/*
* Read jiffies and the time when jiffies were updated last
*/
u64 get_jiffies_update(unsigned long *basej)
{
unsigned long basejiff;
unsigned int seq;
u64 basemono;
do {
seq = read_seqcount_begin(&jiffies_seq);
basemono = last_jiffies_update;
basejiff = jiffies;
} while (read_seqcount_retry(&jiffies_seq, seq));
*basej = basejiff;
return basemono;
}
/**
* tick_nohz_next_event() - return the clock monotonic based next event
* @ts: pointer to tick_sched struct
* @cpu: CPU number
*
* Return:
* *%0 - When the next event is a maximum of TICK_NSEC in the future
* and the tick is not stopped yet
* *%next_event - Next event based on clock monotonic
*/
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
{
u64 basemono, next_tick, delta, expires;
unsigned long basejiff;
int tick_cpu;
basemono = get_jiffies_update(&basejiff);
ts->last_jiffies = basejiff;
ts->timer_expires_base = basemono;
/*
* Keep the periodic tick, when RCU, architecture or irq_work
* requests it.
* Aside of that, check whether the local timer softirq is
* pending. If so, its a bad idea to call get_next_timer_interrupt(),
* because there is an already expired timer, so it will request
* immediate expiry, which rearms the hardware timer with a
* minimal delta, which brings us back to this place
* immediately. Lather, rinse and repeat...
*/
if (rcu_needs_cpu() || arch_needs_cpu() ||
irq_work_needs_cpu() || local_timer_softirq_pending()) {
next_tick = basemono + TICK_NSEC;
} else {
/*
* Get the next pending timer. If high resolution
* timers are enabled this only takes the timer wheel
* timers into account. If high resolution timers are
* disabled this also looks at the next expiring
* hrtimer.
*/
next_tick = get_next_timer_interrupt(basejiff, basemono);
ts->next_timer = next_tick;
}
/* Make sure next_tick is never before basemono! */
if (WARN_ON_ONCE(basemono > next_tick))
next_tick = basemono;
/*
* If the tick is due in the next period, keep it ticking or
* force prod the timer.
*/
delta = next_tick - basemono;
if (delta <= (u64)TICK_NSEC) {
/*
* We've not stopped the tick yet, and there's a timer in the
* next period, so no point in stopping it either, bail.
*/
if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
ts->timer_expires = 0;
goto out;
}
}
/*
* If this CPU is the one which had the do_timer() duty last, we limit
* the sleep time to the timekeeping 'max_deferment' value.
* Otherwise we can sleep as long as we want.
*/
delta = timekeeping_max_deferment();
tick_cpu = READ_ONCE(tick_do_timer_cpu);
if (tick_cpu != cpu &&
(tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST)))
delta = KTIME_MAX;
/* Calculate the next expiry time */
if (delta < (KTIME_MAX - basemono))
expires = basemono + delta;
else
expires = KTIME_MAX;
ts->timer_expires = min_t(u64, expires, next_tick);
out:
return ts->timer_expires;
}
static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
unsigned long basejiff = ts->last_jiffies;
u64 basemono = ts->timer_expires_base;
bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
int tick_cpu;
u64 expires;
/* Make sure we won't be trying to stop it twice in a row. */
ts->timer_expires_base = 0;
/*
* Now the tick should be stopped definitely - so the timer base needs
* to be marked idle as well to not miss a newly queued timer.
*/
expires = timer_base_try_to_set_idle(basejiff, basemono, &timer_idle);
if (expires > ts->timer_expires) {
/*
* This path could only happen when the first timer was removed
* between calculating the possible sleep length and now (when
* high resolution mode is not active, timer could also be a
* hrtimer).
*
* We have to stick to the original calculated expiry value to
* not stop the tick for too long with a shallow C-state (which
* was programmed by cpuidle because of an early next expiration
* value).
*/
expires = ts->timer_expires;
}
/* If the timer base is not idle, retain the not yet stopped tick. */
if (!timer_idle)
return;
/*
* If this CPU is the one which updates jiffies, then give up
* the assignment and let it be taken by the CPU which runs
* the tick timer next, which might be this CPU as well. If we
* don't drop this here, the jiffies might be stale and
* do_timer() never gets invoked. Keep track of the fact that it
* was the one which had the do_timer() duty last.
*/
tick_cpu = READ_ONCE(tick_do_timer_cpu);
if (tick_cpu == cpu) {
WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE);
tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST);
} else if (tick_cpu != TICK_DO_TIMER_NONE) {
tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST);
}
/* Skip reprogram of event if it's not changed */
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) {
/* Sanity check: make sure clockevent is actually programmed */
if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
return;
WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu "
"timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick,
dev->next_event, hrtimer_active(&ts->sched_timer),
hrtimer_get_expires(&ts->sched_timer));
}
/*
* tick_nohz_stop_tick() can be called several times before
* tick_nohz_restart_sched_tick() is called. This happens when
* interrupts arrive which do not cause a reschedule. In the first
* call we save the current tick time, so we can restart the
* scheduler tick in tick_nohz_restart_sched_tick().
*/
if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
calc_load_nohz_start();
quiet_vmstat();
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
tick_sched_flag_set(ts, TS_FLAG_STOPPED);
trace_tick_stop(1, TICK_DEP_MASK_NONE);
}
ts->next_tick = expires;
/*
* If the expiration time == KTIME_MAX, then we simply stop
* the tick timer.
*/
if (unlikely(expires == KTIME_MAX)) {
if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES))
hrtimer_cancel(&ts->sched_timer);
else
tick_program_event(KTIME_MAX, 1);
return;
}
if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) {
hrtimer_start(&ts->sched_timer, expires,
HRTIMER_MODE_ABS_PINNED_HARD);
} else {
hrtimer_set_expires(&ts->sched_timer, expires);
tick_program_event(expires, 1);
}
}
static void tick_nohz_retain_tick(struct tick_sched *ts)
{
ts->timer_expires_base = 0;
}
#ifdef CONFIG_NO_HZ_FULL
static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu)
{
if (tick_nohz_next_event(ts, cpu))
tick_nohz_stop_tick(ts, cpu);
else
tick_nohz_retain_tick(ts);
}
#endif /* CONFIG_NO_HZ_FULL */
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
tick_do_update_jiffies64(now);
/*
* Clear the timer idle flag, so we avoid IPIs on remote queueing and
* the clock forward checks in the enqueue path:
*/
timer_clear_idle();
calc_load_nohz_stop();
touch_softlockup_watchdog_sched();
/* Cancel the scheduled timer and restore the tick: */
tick_sched_flag_clear(ts, TS_FLAG_STOPPED);
tick_nohz_restart(ts, now);
}
static void __tick_nohz_full_update_tick(struct tick_sched *ts,
ktime_t now)
{
#ifdef CONFIG_NO_HZ_FULL
int cpu = smp_processor_id();
if (can_stop_full_tick(cpu, ts))
tick_nohz_full_stop_tick(ts, cpu);
else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
tick_nohz_restart_sched_tick(ts, now);
#endif
}
static void tick_nohz_full_update_tick(struct tick_sched *ts)
{
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ))
return;
__tick_nohz_full_update_tick(ts, ktime_get());
}
/*
* A pending softirq outside an IRQ (or softirq disabled section) context
* should be waiting for ksoftirqd to handle it. Therefore we shouldn't
* reach this code due to the need_resched() early check in can_stop_idle_tick().
*
* However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the
* cpu_down() process, softirqs can still be raised while ksoftirqd is parked,
* triggering the code below, since wakep_softirqd() is ignored.
*
*/
static bool report_idle_softirq(void)
{
static int ratelimit;
unsigned int pending = local_softirq_pending();
if (likely(!pending))
return false;
/* Some softirqs claim to be safe against hotplug and ksoftirqd parking */
if (!cpu_active(smp_processor_id())) {
pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK;
if (!pending)
return false;
}
/* On RT, softirq handling may be waiting on some lock */
if (local_bh_blocked())
return false;
if (ratelimit < 10) {
pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
pending);
ratelimit++;
}
return true;
}
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
{
WARN_ON_ONCE(cpu_is_offline(cpu));
if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ)))
return false;
if (need_resched())
return false;
if (unlikely(report_idle_softirq()))
return false;
if (tick_nohz_full_enabled()) {
int tick_cpu = READ_ONCE(tick_do_timer_cpu);
/*
* Keep the tick alive to guarantee timekeeping progression
* if there are full dynticks CPUs around
*/
if (tick_cpu == cpu)
return false;
/* Should not happen for nohz-full */
if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE))
return false;
}
return true;
}
/**
* tick_nohz_idle_stop_tick - stop the idle tick from the idle task
*
* When the next event is more than a tick into the future, stop the idle tick
*/
void tick_nohz_idle_stop_tick(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
int cpu = smp_processor_id();
ktime_t expires;
/*
* If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
* tick timer expiration time is known already.
*/
if (ts->timer_expires_base)
expires = ts->timer_expires;
else if (can_stop_idle_tick(cpu, ts))
expires = tick_nohz_next_event(ts, cpu);
else
return;
ts->idle_calls++;
if (expires > 0LL) {
int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
tick_nohz_stop_tick(ts, cpu);
ts->idle_sleeps++;
ts->idle_expires = expires;
if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
ts->idle_jiffies = ts->last_jiffies;
nohz_balance_enter_idle(cpu);
}
} else {
tick_nohz_retain_tick(ts);
}
}
void tick_nohz_idle_retain_tick(void)
{
tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
}
/**
* tick_nohz_idle_enter - prepare for entering idle on the current CPU
*
* Called when we start the idle loop.
*/
void tick_nohz_idle_enter(void)
{
struct tick_sched *ts;
lockdep_assert_irqs_enabled();
local_irq_disable();
ts = this_cpu_ptr(&tick_cpu_sched);
WARN_ON_ONCE(ts->timer_expires_base);
tick_sched_flag_set(ts, TS_FLAG_INIDLE);
tick_nohz_start_idle(ts);
local_irq_enable();
}
/**
* tick_nohz_irq_exit - Notify the tick about IRQ exit
*
* A timer may have been added/modified/deleted either by the current IRQ,
* or by another place using this IRQ as a notification. This IRQ may have
* also updated the RCU callback list. These events may require a
* re-evaluation of the next tick. Depending on the context:
*
* 1) If the CPU is idle and no resched is pending, just proceed with idle
* time accounting. The next tick will be re-evaluated on the next idle
* loop iteration.
*
* 2) If the CPU is nohz_full:
*
* 2.1) If there is any tick dependency, restart the tick if stopped.
*
* 2.2) If there is no tick dependency, (re-)evaluate the next tick and
* stop/update it accordingly.
*/
void tick_nohz_irq_exit(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_sched_flag_test(ts, TS_FLAG_INIDLE))
tick_nohz_start_idle(ts);
else
tick_nohz_full_update_tick(ts);
}
/**
* tick_nohz_idle_got_tick - Check whether or not the tick handler has run
*
* Return: %true if the tick handler has run, otherwise %false
*/
bool tick_nohz_idle_got_tick(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->got_idle_tick) {
ts->got_idle_tick = 0;
return true;
}
return false;
}
/**
* tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
* or the tick, whichever expires first. Note that, if the tick has been
* stopped, it returns the next hrtimer.
*
* Called from power state control code with interrupts disabled
*
* Return: the next expiration time
*/
ktime_t tick_nohz_get_next_hrtimer(void)
{
return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
}
/**
* tick_nohz_get_sleep_length - return the expected length of the current sleep
* @delta_next: duration until the next event if the tick cannot be stopped
*
* Called from power state control code with interrupts disabled.
*
* The return value of this function and/or the value returned by it through the
* @delta_next pointer can be negative which must be taken into account by its
* callers.
*
* Return: the expected length of the current sleep
*/
ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
int cpu = smp_processor_id();
/*
* The idle entry time is expected to be a sufficient approximation of
* the current time at this point.
*/
ktime_t now = ts->idle_entrytime;
ktime_t next_event;
WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE));
*delta_next = ktime_sub(dev->next_event, now);
if (!can_stop_idle_tick(cpu, ts))
return *delta_next;
next_event = tick_nohz_next_event(ts, cpu);
if (!next_event)
return *delta_next;
/*
* If the next highres timer to expire is earlier than 'next_event', the
* idle governor needs to know that.
*/
next_event = min_t(u64, next_event,
hrtimer_next_event_without(&ts->sched_timer));
return ktime_sub(next_event, now);
}
/**
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
* for a particular CPU.
* @cpu: target CPU number
*
* Called from the schedutil frequency scaling governor in scheduler context.
*
* Return: the current idle calls counter value for @cpu
*/
unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
{
struct tick_sched *ts = tick_get_tick_sched(cpu);
return ts->idle_calls;
}
static void tick_nohz_account_idle_time(struct tick_sched *ts,
ktime_t now)
{
unsigned long ticks;
ts->idle_exittime = now;
if (vtime_accounting_enabled_this_cpu())
return;
/*
* We stopped the tick in idle. update_process_times() would miss the
* time we slept, as it does only a 1 tick accounting.
* Enforce that this is accounted to idle !
*/
ticks = jiffies - ts->idle_jiffies;
/*
* We might be one off. Do not randomly account a huge number of ticks!
*/
if (ticks && ticks < LONG_MAX)
account_idle_ticks(ticks);
}
void tick_nohz_idle_restart_tick(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
ktime_t now = ktime_get();
tick_nohz_restart_sched_tick(ts, now);
tick_nohz_account_idle_time(ts, now);
}
}
static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
{
if (tick_nohz_full_cpu(smp_processor_id()))
__tick_nohz_full_update_tick(ts, now);
else
tick_nohz_restart_sched_tick(ts, now);
tick_nohz_account_idle_time(ts, now);
}
/**
* tick_nohz_idle_exit - Update the tick upon idle task exit
*
* When the idle task exits, update the tick depending on the
* following situations:
*
* 1) If the CPU is not in nohz_full mode (most cases), then
* restart the tick.
*
* 2) If the CPU is in nohz_full mode (corner case):
* 2.1) If the tick can be kept stopped (no tick dependencies)
* then re-evaluate the next tick and try to keep it stopped
* as long as possible.
* 2.2) If the tick has dependencies, restart the tick.
*
*/
void tick_nohz_idle_exit(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
bool idle_active, tick_stopped;
ktime_t now;
local_irq_disable();
WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE));
WARN_ON_ONCE(ts->timer_expires_base);
tick_sched_flag_clear(ts, TS_FLAG_INIDLE);
idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE);
tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
if (idle_active || tick_stopped)
now = ktime_get();
if (idle_active)
tick_nohz_stop_idle(ts, now);
if (tick_stopped)
tick_nohz_idle_update_tick(ts, now);
local_irq_enable();
}
/*
* In low-resolution mode, the tick handler must be implemented directly
* at the clockevent level. hrtimer can't be used instead, because its
* infrastructure actually relies on the tick itself as a backend in
* low-resolution mode (see hrtimer_run_queues()).
*/
static void tick_nohz_lowres_handler(struct clock_event_device *dev)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
dev->next_event = KTIME_MAX;
if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART))
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
static inline void tick_nohz_activate(struct tick_sched *ts)
{
if (!tick_nohz_enabled)
return;
tick_sched_flag_set(ts, TS_FLAG_NOHZ);
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
timers_update_nohz();
}
/**
* tick_nohz_switch_to_nohz - switch to NOHZ mode
*/
static void tick_nohz_switch_to_nohz(void)
{
if (!tick_nohz_enabled)
return;
if (tick_switch_to_oneshot(tick_nohz_lowres_handler))
return;
/*
* Recycle the hrtimer in 'ts', so we can share the
* highres code.
*/
tick_setup_sched_timer(false);
}
static inline void tick_nohz_irq_enter(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now;
if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE))
return;
now = ktime_get();
if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))
tick_nohz_stop_idle(ts, now);
/*
* If all CPUs are idle we may need to update a stale jiffies value.
* Note nohz_full is a special case: a timekeeper is guaranteed to stay
* alive but it might be busy looping with interrupts disabled in some
* rare case (typically stop machine). So we must make sure we have a
* last resort.
*/
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
tick_nohz_update_jiffies(now);
}
#else
static inline void tick_nohz_switch_to_nohz(void) { }
static inline void tick_nohz_irq_enter(void) { }
static inline void tick_nohz_activate(struct tick_sched *ts) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* Called from irq_enter() to notify about the possible interruption of idle()
*/
void tick_irq_enter(void)
{
tick_check_oneshot_broadcast_this_cpu();
tick_nohz_irq_enter();
}
static int sched_skew_tick;
static int __init skew_tick(char *str)
{
get_option(&str, &sched_skew_tick);
return 0;
}
early_param("skew_tick", skew_tick);
/**
* tick_setup_sched_timer - setup the tick emulation timer
* @hrtimer: whether to use the hrtimer or not
*/
void tick_setup_sched_timer(bool hrtimer)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
/* Emulate tick processing via per-CPU hrtimers: */
hrtimer_setup(&ts->sched_timer, tick_nohz_handler, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer)
tick_sched_flag_set(ts, TS_FLAG_HIGHRES);
/* Get the next period (per-CPU) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
/* Offset the tick to avert 'jiffies_lock' contention. */
if (sched_skew_tick) {
u64 offset = TICK_NSEC >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer)
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
else
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts);
}
/*
* Shut down the tick and make sure the CPU won't try to retake the timekeeping
* duty before disabling IRQs in idle for the last time.
*/
void tick_sched_timer_dying(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t idle_sleeptime, iowait_sleeptime;
unsigned long idle_calls, idle_sleeps;
/* This must happen before hrtimers are migrated! */
if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES))
hrtimer_cancel(&ts->sched_timer);
idle_sleeptime = ts->idle_sleeptime;
iowait_sleeptime = ts->iowait_sleeptime;
idle_calls = ts->idle_calls;
idle_sleeps = ts->idle_sleeps;
memset(ts, 0, sizeof(*ts));
ts->idle_sleeptime = idle_sleeptime;
ts->iowait_sleeptime = iowait_sleeptime;
ts->idle_calls = idle_calls;
ts->idle_sleeps = idle_sleeps;
}
/*
* Async notification about clocksource changes
*/
void tick_clock_notify(void)
{
int cpu;
for_each_possible_cpu(cpu)
set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
}
/*
* Async notification about clock event changes
*/
void tick_oneshot_notify(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
set_bit(0, &ts->check_clocks);
}
/*
* Check if a change happened, which makes oneshot possible.
*
* Called cyclically from the hrtimer softirq (driven by the timer
* softirq). 'allow_nohz' signals that we can switch into low-res NOHZ
* mode, because high resolution timers are disabled (either compile
* or runtime). Called with interrupts disabled.
*/
int tick_check_oneshot_change(int allow_nohz)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (!test_and_clear_bit(0, &ts->check_clocks))
return 0;
if (tick_sched_flag_test(ts, TS_FLAG_NOHZ))
return 0;
if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
return 0;
if (!allow_nohz)
return 1;
tick_nohz_switch_to_nohz();
return 0;
}
/*
* Performance events:
*
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
*
* Data type definitions, declarations, prototypes.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
#include <uapi/linux/perf_event.h>
#include <uapi/linux/bpf_perf_event.h>
/*
* Kernel-internal data types and definitions:
*/
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
# include <asm/local64.h>
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
# include <linux/rhashtable-types.h>
# include <asm/hw_breakpoint.h>
#endif
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <linux/pid_namespace.h>
#include <linux/workqueue.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/irq_work.h>
#include <linux/static_key.h>
#include <linux/jump_label_ratelimit.h>
#include <linux/atomic.h>
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
#include <linux/cgroup.h>
#include <linux/refcount.h>
#include <linux/security.h>
#include <linux/static_call.h>
#include <linux/lockdep.h>
#include <asm/local.h>
struct perf_callchain_entry {
u64 nr;
u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
struct perf_callchain_entry *entry;
u32 max_stack;
u32 nr;
short contexts;
bool contexts_maxed;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
unsigned long off, unsigned long len);
struct perf_raw_frag {
union {
struct perf_raw_frag *next;
unsigned long pad;
};
perf_copy_f copy;
void *data;
u32 size;
} __packed;
struct perf_raw_record {
struct perf_raw_frag frag;
u32 size;
};
static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
{
return frag->pad < sizeof(u64);
}
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
* hw_idx: The low level index of raw branch records
* for the most recent branch.
* -1ULL means invalid/unknown.
*
* Note that nr can vary from sample to sample
* branches (to, from) are stored from most recent
* to least recent, i.e., entries[0] contains the most
* recent branch.
* The entries[] is an abstraction of raw branch records,
* which may not be stored in age order in HW, e.g. Intel LBR.
* The hw_idx is to expose the low level index of raw
* branch record for the most recent branch aka entries[0].
* The hw_idx index is between -1 (unknown) and max depth,
* which can be retrieved in /sys/devices/cpu/caps/branches.
* For the architectures whose raw branch records are
* already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
u64 nr;
u64 hw_idx;
struct perf_branch_entry entries[];
};
struct task_struct;
/*
* extra PMU register associated with an event
*/
struct hw_perf_event_extra {
u64 config; /* register value */
unsigned int reg; /* register address or index */
int alloc; /* extra register already allocated */
int idx; /* index in shared_regs->regs[] */
};
/**
* hw_perf_event::flag values
*
* PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
* usage.
*/
#define PERF_EVENT_FLAG_ARCH 0x0fffffff
#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
/**
* struct hw_perf_event - performance event hardware details:
*/
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
union {
struct { /* hardware */
u64 config;
u64 config1;
u64 last_tag;
u64 dyn_constraint;
unsigned long config_base;
unsigned long event_base;
int event_base_rdpmc;
int idx;
int last_cpu;
int flags;
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
};
struct { /* aux / Intel-PT */
u64 aux_config;
/*
* For AUX area events, aux_paused cannot be a state
* flag because it can be updated asynchronously to
* state.
*/
unsigned int aux_paused;
};
struct { /* software */
struct hrtimer hrtimer;
};
struct { /* tracepoint */
/* for tp_event->class */
struct list_head tp_list;
};
struct { /* amd_power */
u64 pwr_acc;
u64 ptsc;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
* Crufty hack to avoid the chicken and egg
* problem hw_breakpoint has with context
* creation and event initalization.
*/
struct arch_hw_breakpoint info;
struct rhlist_head bp_list;
};
#endif
struct { /* amd_iommu */
u8 iommu_bank;
u8 iommu_cntr;
u16 padding;
u64 conf;
u64 conf1;
};
};
/*
* If the event is a per task event, this will point to the task in
* question. See the comment in perf_event_alloc().
*/
struct task_struct *target;
/*
* PMU would store hardware filter configuration
* here.
*/
void *addr_filters;
/* Last sync'ed generation of filters */
unsigned long addr_filters_gen;
/*
* hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
/* the counter is stopped */
#define PERF_HES_STOPPED 0x01
/* event->count up-to-date */
#define PERF_HES_UPTODATE 0x02
#define PERF_HES_ARCH 0x04
int state;
/*
* The last observed hardware counter value, updated with a
* local64_cmpxchg() such that pmu::read() can be called nested.
*/
local64_t prev_count;
/*
* The period to start the next sample with.
*/
u64 sample_period;
union {
struct { /* Sampling */
/*
* The period we started this sample with.
*/
u64 last_period;
/*
* However much is left of the current period;
* note that this is a full 64bit value and
* allows for generation of periods longer
* than hardware might allow.
*/
local64_t period_left;
};
struct { /* Topdown events counting for context switch */
u64 saved_metric;
u64 saved_slots;
};
};
/*
* State for throttling the event, see __perf_event_overflow() and
* perf_adjust_freq_unthr_context().
*/
u64 interrupts_seq;
u64 interrupts;
/*
* State for freq target events, see __perf_event_overflow() and
* perf_adjust_freq_unthr_context().
*/
u64 freq_time_stamp;
u64 freq_count_stamp;
#endif /* CONFIG_PERF_EVENTS */
};
struct perf_event;
struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
/* txn to add/schedule event on PMU */
#define PERF_PMU_TXN_ADD 0x1
/* txn to read event group from PMU */
#define PERF_PMU_TXN_READ 0x2
/**
* pmu::capabilities flags
*/
#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
#define PERF_PMU_CAP_NO_NMI 0x0002
#define PERF_PMU_CAP_AUX_NO_SG 0x0004
#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
#define PERF_PMU_CAP_EXCLUSIVE 0x0010
#define PERF_PMU_CAP_ITRACE 0x0020
#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
#define PERF_PMU_CAP_AUX_PAUSE 0x0200
#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
/**
* pmu::scope
*/
enum perf_pmu_scope {
PERF_PMU_SCOPE_NONE = 0,
PERF_PMU_SCOPE_CORE,
PERF_PMU_SCOPE_DIE,
PERF_PMU_SCOPE_CLUSTER,
PERF_PMU_SCOPE_PKG,
PERF_PMU_SCOPE_SYS_WIDE,
PERF_PMU_MAX_SCOPE,
};
struct perf_output_handle;
#define PMU_NULL_DEV ((void *)(~0UL))
/**
* struct pmu - generic performance monitoring unit
*/
struct pmu {
struct list_head entry;
spinlock_t events_lock;
struct list_head events;
struct module *module;
struct device *dev;
struct device *parent;
const struct attribute_group **attr_groups;
const struct attribute_group **attr_update;
const char *name;
int type;
/*
* various common per-pmu feature flags
*/
int capabilities;
/*
* PMU scope
*/
unsigned int scope;
struct perf_cpu_pmu_context * __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
/* number of address filters this PMU can do */
unsigned int nr_addr_filters;
/*
* Fully disable/enable this PMU, can be used to protect from the PMI
* as well as for lazy/batch writing of the MSRs.
*/
void (*pmu_enable) (struct pmu *pmu); /* optional */
void (*pmu_disable) (struct pmu *pmu); /* optional */
/*
* Try and initialize the event for this PMU.
*
* Returns:
* -ENOENT -- @event is not for this PMU
*
* -ENODEV -- @event is for this PMU but PMU not present
* -EBUSY -- @event is for this PMU but PMU temporarily unavailable
* -EINVAL -- @event is for this PMU but @event is not valid
* -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
* -EACCES -- @event is for this PMU, @event is valid, but no privileges
*
* 0 -- @event is for this PMU and valid
*
* Other error return values are allowed.
*/
int (*event_init) (struct perf_event *event);
/*
* Notification that the event was mapped or unmapped. Called
* in the context of the mapping task.
*/
void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
/*
* Flags for ->add()/->del()/ ->start()/->stop(). There are
* matching hw_perf_event::state flags.
*/
/* start the counter when adding */
#define PERF_EF_START 0x01
/* reload the counter when starting */
#define PERF_EF_RELOAD 0x02
/* update the counter when stopping */
#define PERF_EF_UPDATE 0x04
/* AUX area event, pause tracing */
#define PERF_EF_PAUSE 0x08
/* AUX area event, resume tracing */
#define PERF_EF_RESUME 0x10
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
* transaction, see the ->*_txn() methods.
*
* The add/del callbacks will reserve all hardware resources required
* to service the event, this includes any counter constraint
* scheduling etc.
*
* Called with IRQs disabled and the PMU disabled on the CPU the event
* is on.
*
* ->add() called without PERF_EF_START should result in the same state
* as ->add() followed by ->stop().
*
* ->del() must always PERF_EF_UPDATE stop an event. If it calls
* ->stop() that must deal with already being stopped without
* PERF_EF_UPDATE.
*/
int (*add) (struct perf_event *event, int flags);
void (*del) (struct perf_event *event, int flags);
/*
* Starts/Stops a counter present on the PMU.
*
* The PMI handler should stop the counter when perf_event_overflow()
* returns !0. ->start() will be used to continue.
*
* Also used to change the sample period.
*
* Called with IRQs disabled and the PMU disabled on the CPU the event
* is on -- will be called from NMI context with the PMU generates
* NMIs.
*
* ->stop() with PERF_EF_UPDATE will read the counter and update
* period/count values like ->read() would.
*
* ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
*
* ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not
* overlap another ->stop() with PERF_EF_PAUSE nor ->start() with
* PERF_EF_RESUME.
*
* ->start() with PERF_EF_RESUME will start as simply as possible but
* only if the counter is not otherwise stopped. Will not overlap
* another ->start() with PERF_EF_RESUME nor ->stop() with
* PERF_EF_PAUSE.
*
* Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other
* ->stop()/->start() invocations, just not itself.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
/*
* Updates the counter value of the event.
*
* For sampling capable PMUs this will also update the software period
* hw_perf_event::period_left field.
*/
void (*read) (struct perf_event *event);
/*
* Group events scheduling is treated as a transaction, add
* group events as a whole and perform one schedulability test.
* If the test fails, roll back the whole group
*
* Start the transaction, after this ->add() doesn't need to
* do schedulability tests.
*
* Optional.
*/
void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
/*
* If ->start_txn() disabled the ->add() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*
* Optional.
*/
int (*commit_txn) (struct pmu *pmu);
/*
* Will cancel the transaction, assumes ->del() is called
* for each successful ->add() during the transaction.
*
* Optional.
*/
void (*cancel_txn) (struct pmu *pmu);
/*
* Will return the value for perf_event_mmap_page::index for this event,
* if no implementation is provided it will default to 0 (see
* perf_event_idx_default).
*/
int (*event_idx) (struct perf_event *event); /*optional */
/*
* context-switches callback
*/
void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
struct task_struct *task, bool sched_in);
/*
* Kmem cache of PMU specific data
*/
struct kmem_cache *task_ctx_cache;
/*
* Set up pmu-private data structures for an AUX area
*/
void *(*setup_aux) (struct perf_event *event, void **pages,
int nr_pages, bool overwrite);
/* optional */
/*
* Free pmu-private AUX data structures
*/
void (*free_aux) (void *aux); /* optional */
/*
* Take a snapshot of the AUX buffer without touching the event
* state, so that preempting ->start()/->stop() callbacks does
* not interfere with their logic. Called in PMI context.
*
* Returns the size of AUX data copied to the output handle.
*
* Optional.
*/
long (*snapshot_aux) (struct perf_event *event,
struct perf_output_handle *handle,
unsigned long size);
/*
* Validate address range filters: make sure the HW supports the
* requested configuration and number of filters; return 0 if the
* supplied filters are valid, -errno otherwise.
*
* Runs in the context of the ioctl()ing process and is not serialized
* with the rest of the PMU callbacks.
*/
int (*addr_filters_validate) (struct list_head *filters);
/* optional */
/*
* Synchronize address range filter configuration:
* translate hw-agnostic filters into hardware configuration in
* event::hw::addr_filters.
*
* Runs as a part of filter sync sequence that is done in ->start()
* callback by calling perf_event_addr_filters_sync().
*
* May (and should) traverse event::addr_filters::list, for which its
* caller provides necessary serialization.
*/
void (*addr_filters_sync) (struct perf_event *event);
/* optional */
/*
* Check if event can be used for aux_output purposes for
* events of this PMU.
*
* Runs from perf_event_open(). Should return 0 for "no match"
* or non-zero for "match".
*/
int (*aux_output_match) (struct perf_event *event);
/* optional */
/*
* Skip programming this PMU on the given CPU. Typically needed for
* big.LITTLE things.
*/
bool (*filter) (struct pmu *pmu, int cpu); /* optional */
/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
*/
int (*check_period) (struct perf_event *event, u64 value); /* optional */
};
enum perf_addr_filter_action_t {
PERF_ADDR_FILTER_ACTION_STOP = 0,
PERF_ADDR_FILTER_ACTION_START,
PERF_ADDR_FILTER_ACTION_FILTER,
};
/**
* struct perf_addr_filter - address range filter definition
* @entry: event's filter list linkage
* @path: object file's path for file-based filters
* @offset: filter range offset
* @size: filter range size (size==0 means single address trigger)
* @action: filter/start/stop
*
* This is a hardware-agnostic filter configuration as specified by the user.
*/
struct perf_addr_filter {
struct list_head entry;
struct path path;
unsigned long offset;
unsigned long size;
enum perf_addr_filter_action_t action;
};
/**
* struct perf_addr_filters_head - container for address range filters
* @list: list of filters for this event
* @lock: spinlock that serializes accesses to the @list and event's
* (and its children's) filter generations.
* @nr_file_filters: number of file-based filters
*
* A child event will use parent's @list (and therefore @lock), so they are
* bundled together; see perf_event_addr_filters().
*/
struct perf_addr_filters_head {
struct list_head list;
raw_spinlock_t lock;
unsigned int nr_file_filters;
};
struct perf_addr_filter_range {
unsigned long start;
unsigned long size;
};
/*
* The normal states are:
*
* ACTIVE --.
* ^ |
* | |
* sched_{in,out}() |
* | |
* v |
* ,---> INACTIVE --+ <-.
* | | |
* | {dis,en}able()
* sched_in() | |
* | OFF <--' --+
* | |
* `---> ERROR ------'
*
* That is:
*
* sched_in: INACTIVE -> {ACTIVE,ERROR}
* sched_out: ACTIVE -> INACTIVE
* disable: {ACTIVE,INACTIVE} -> OFF
* enable: {OFF,ERROR} -> INACTIVE
*
* Where {OFF,ERROR} are disabled states.
*
* Then we have the {EXIT,REVOKED,DEAD} states which are various shades of
* defunct events:
*
* - EXIT means task that the even was assigned to died, but child events
* still live, and further children can still be created. But the event
* itself will never be active again. It can only transition to
* {REVOKED,DEAD};
*
* - REVOKED means the PMU the event was associated with is gone; all
* functionality is stopped but the event is still alive. Can only
* transition to DEAD;
*
* - DEAD event really is DYING tearing down state and freeing bits.
*
*/
enum perf_event_state {
PERF_EVENT_STATE_DEAD = -5,
PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */
PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */
PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
};
struct file;
struct perf_sample_data;
typedef void (*perf_overflow_handler_t)(struct perf_event *,
struct perf_sample_data *,
struct pt_regs *regs);
/*
* Event capabilities. For event_caps and groups caps.
*
* PERF_EV_CAP_SOFTWARE: Is a software event.
* PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
* from any CPU in the package where it is active.
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
* cannot be a group leader. If an event with this flag is detached from the
* group it is scheduled out and moved into an unrecoverable ERROR state.
* PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
* PMU scope where it is active.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define PERF_EV_CAP_SIBLING BIT(2)
#define PERF_EV_CAP_READ_SCOPE BIT(3)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
struct swevent_hlist {
struct hlist_head heads[SWEVENT_HLIST_SIZE];
struct rcu_head rcu_head;
};
#define PERF_ATTACH_CONTEXT 0x0001
#define PERF_ATTACH_GROUP 0x0002
#define PERF_ATTACH_TASK 0x0004
#define PERF_ATTACH_TASK_DATA 0x0008
#define PERF_ATTACH_GLOBAL_DATA 0x0010
#define PERF_ATTACH_SCHED_CB 0x0020
#define PERF_ATTACH_CHILD 0x0040
#define PERF_ATTACH_EXCLUSIVE 0x0080
#define PERF_ATTACH_CALLCHAIN 0x0100
#define PERF_ATTACH_ITRACE 0x0200
struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;
struct pmu_event_list {
raw_spinlock_t lock;
struct list_head list;
};
/*
* event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
* as such iteration must hold either lock. However, since ctx->lock is an IRQ
* safe lock, and is only held by the CPU doing the modification, having IRQs
* disabled is sufficient since it will hold-off the IPIs.
*/
#ifdef CONFIG_PROVE_LOCKING
# define lockdep_assert_event_ctx(event) \
WARN_ON_ONCE(__lockdep_enabled && \
(this_cpu_read(hardirqs_enabled) && \
lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
#else
# define lockdep_assert_event_ctx(event)
#endif
#define for_each_sibling_event(sibling, event) \
lockdep_assert_event_ctx(event); \
if ((event)->group_leader == (event)) \
list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
/**
* struct perf_event - performance event kernel representation:
*/
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
/*
* entry onto perf_event_context::event_list;
* modifications require ctx->lock
* RCU safe iterations.
*/
struct list_head event_entry;
/*
* Locked for modification by both ctx->mutex and ctx->lock; holding
* either sufficies for read.
*/
struct list_head sibling_list;
struct list_head active_list;
/*
* Node on the pinned or flexible tree located at the event context;
*/
struct rb_node group_node;
u64 group_index;
/*
* We need storage to track the entries in perf_pmu_migrate_context; we
* cannot use the event_entry because of RCU and we want to keep the
* group in tact which avoids us using the other two entries.
*/
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
/* Not serialized. Only written during event initialization. */
int event_caps;
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
unsigned int group_generation;
struct perf_event *group_leader;
/*
* event->pmu will always point to pmu in which this event belongs.
* Whereas event->pmu_ctx->pmu may point to other pmu when group of
* different pmu events is created.
*/
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
/*
* These are the total time in nanoseconds that the event
* has been enabled (i.e. eligible to run, and the task has
* been scheduled in, if this is a per-task event)
* and running (scheduled onto the CPU), respectively.
*/
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
/*
* event->pmu_ctx points to perf_event_pmu_context in which the event
* is added. This pmu_ctx can be of other pmu for sw event when that
* sw event is part of a group which also contains non-sw events.
*/
struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
/*
* These accumulate total time (in nanoseconds) that children
* events have been enabled and running, respectively.
*/
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
/*
* Protect attach/detach and child_list:
*/
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
/* mmap bits */
struct mutex mmap_mutex;
refcount_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
unsigned long rcu_batches;
int rcu_pending;
/* poll related */
wait_queue_head_t waitq;
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
unsigned int pending_wakeup;
unsigned int pending_kill;
unsigned int pending_disable;
unsigned long pending_addr; /* SIGTRAP */
struct irq_work pending_irq;
struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
atomic_t event_limit;
/* address range filters */
struct perf_addr_filters_head addr_filters;
/* vma address array for file-based filders */
struct perf_addr_filter_range *addr_filter_ranges;
unsigned long addr_filters_gen;
/* for aux_output events */
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct rcu_head rcu_head;
struct pid_namespace *ns;
u64 id;
atomic64_t lost_samples;
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
struct bpf_prog *prog;
u64 bpf_cookie;
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
struct event_filter *filter;
# ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
# endif
#endif
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp; /* cgroup event is attach to */
#endif
#ifdef CONFIG_SECURITY
void *security;
#endif
struct list_head sb_list;
struct list_head pmu_list;
/*
* Certain events gets forwarded to another pmu internally by over-
* writing kernel copy of event->attr.type without user being aware
* of it. event->orig_type contains original 'type' requested by
* user.
*/
u32 orig_type;
#endif /* CONFIG_PERF_EVENTS */
};
/*
* ,-----------------------[1:n]------------------------.
* V V
* perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
* | |
* `--[n:1]-> pmu <-[1:n]--'
*
*
* struct perf_event_pmu_context lifetime is refcount based and RCU freed
* (similar to perf_event_context). Locking is as if it were a member of
* perf_event_context; specifically:
*
* modification, both: ctx->mutex && ctx->lock
* reading, either: ctx->mutex || ctx->lock
*
* There is one exception to this; namely put_pmu_ctx() isn't always called
* with ctx->mutex held; this means that as long as we can guarantee the epc
* has events the above rules hold.
*
* Specificially, sys_perf_event_open()'s group_leader case depends on
* ctx->mutex pinning the configuration. Since we hold a reference on
* group_leader (through the filedesc) it can't go away, therefore it's
* associated pmu_ctx must exist and cannot change due to ctx->mutex.
*
* perf_event holds a refcount on perf_event_context
* perf_event holds a refcount on perf_event_pmu_context
*/
struct perf_event_pmu_context {
struct pmu *pmu;
struct perf_event_context *ctx;
struct list_head pmu_ctx_entry;
struct list_head pinned_active;
struct list_head flexible_active;
/* Used to identify the per-cpu perf_event_pmu_context */
unsigned int embedded : 1;
unsigned int nr_events;
unsigned int nr_cgroups;
unsigned int nr_freq;
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
/*
* Set when one or more (plausibly active) event can't be scheduled
* due to pmu overcommit or pmu constraints, except tolerant to
* events not necessary to be active due to scheduling constraints,
* such as cgroups.
*/
int rotate_necessary;
};
static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
{
return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active);
}
struct perf_event_groups {
struct rb_root tree;
u64 index;
};
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
/*
* Protect the states of the events in the list,
* nr_active, and the list:
*/
raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
* the list you need to lock both the mutex and the spinlock.
*/
struct mutex mutex;
struct list_head pmu_ctx_list;
struct perf_event_groups pinned_groups;
struct perf_event_groups flexible_groups;
struct list_head event_list;
int nr_events;
int nr_user;
int is_active;
int nr_stat;
int nr_freq;
int rotate_disable;
refcount_t refcount; /* event <-> ctx */
struct task_struct *task;
/*
* Context clock, runs when context enabled.
*/
u64 time;
u64 timestamp;
u64 timeoffset;
/*
* These fields let us detect when two contexts have both
* been cloned (inherited) from a common ancestor.
*/
struct perf_event_context *parent_ctx;
u64 parent_gen;
u64 generation;
int pin_count;
#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
#endif
struct rcu_head rcu_head;
/*
* The count of events for which using the switch-out fast path
* should be avoided.
*
* Sum (event->pending_work + events with
* (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)))
*
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
* that until the signal is delivered.
*/
local_t nr_no_switch_fast;
};
/**
* struct perf_ctx_data - PMU specific data for a task
* @rcu_head: To avoid the race on free PMU specific data
* @refcount: To track users
* @global: To track system-wide users
* @ctx_cache: Kmem cache of PMU specific data
* @data: PMU specific data
*
* Currently, the struct is only used in Intel LBR call stack mode to
* save/restore the call stack of a task on context switches.
*
* The rcu_head is used to prevent the race on free the data.
* The data only be allocated when Intel LBR call stack mode is enabled.
* The data will be freed when the mode is disabled.
* The content of the data will only be accessed in context switch, which
* should be protected by rcu_read_lock().
*
* Because of the alignment requirement of Intel Arch LBR, the Kmem cache
* is used to allocate the PMU specific data. The ctx_cache is to track
* the Kmem cache.
*
* Careful: Struct perf_ctx_data is added as a pointer in struct task_struct.
* When system-wide Intel LBR call stack mode is enabled, a buffer with
* constant size will be allocated for each task.
* Also, system memory consumption can further grow when the size of
* struct perf_ctx_data enlarges.
*/
struct perf_ctx_data {
struct rcu_head rcu_head;
refcount_t refcount;
int global;
struct kmem_cache *ctx_cache;
void *data;
};
struct perf_cpu_pmu_context {
struct perf_event_pmu_context epc;
struct perf_event_pmu_context *task_epc;
struct list_head sched_cb_entry;
int sched_cb_usage;
int active_oncpu;
int exclusive;
int pmu_disable_count;
raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
unsigned int hrtimer_active;
};
/**
* struct perf_event_cpu_context - per cpu event context structure
*/
struct perf_cpu_context {
struct perf_event_context ctx;
struct perf_event_context *task_ctx;
int online;
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
#endif
/*
* Per-CPU storage for iterators used in visit_groups_merge. The default
* storage is of size 2 to hold the CPU and any CPU event iterators.
*/
int heap_size;
struct perf_event **heap;
struct perf_event *heap_default[2];
};
struct perf_output_handle {
struct perf_event *event;
struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
union {
u64 flags; /* perf_output*() */
u64 aux_flags; /* perf_aux_output*() */
struct {
u64 skip_read : 1;
};
};
union {
void *addr;
unsigned long head;
};
int page;
};
struct bpf_perf_event_data_kern {
bpf_user_pt_regs_t *regs;
struct perf_sample_data *data;
struct perf_event *event;
};
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
u64 time;
u64 timestamp;
u64 timeoffset;
int active;
};
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info __percpu *info;
};
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
{
return container_of(task_css_check(task, perf_event_cgrp_id,
ctx ? lockdep_is_held(&ctx->lock)
: true),
struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */
#ifdef CONFIG_PERF_EVENTS
extern struct perf_event_context *perf_cpu_task_ctx(void);
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
unsigned long size);
extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern int perf_pmu_unregister(struct pmu *pmu);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next);
extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern struct file *perf_event_get(unsigned int fd);
extern const struct perf_event *perf_get_event(struct file *file);
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern void perf_pmu_resched(struct pmu *pmu);
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
struct task_struct *task,
perf_overflow_handler_t callback,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
extern int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
static inline bool branch_sample_no_flags(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
}
static inline bool branch_sample_no_cycles(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
}
static inline bool branch_sample_type(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
}
static inline bool branch_sample_hw_index(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
}
static inline bool branch_sample_priv(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
static inline bool branch_sample_counters(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
}
static inline bool branch_sample_call_stack(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
}
struct perf_sample_data {
/*
* Fields set by perf_sample_data_init() unconditionally,
* group so as to minimize the cachelines touched.
*/
u64 sample_flags;
u64 period;
u64 dyn_size;
/*
* Fields commonly set by __perf_event_header__init_id(),
* group so as to minimize the cachelines touched.
*/
u64 type;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
/*
* The other fields, optionally {set,used} by
* perf_{prepare,output}_sample().
*/
u64 ip;
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
u64 *br_stack_cntr;
union perf_sample_weight weight;
union perf_mem_data_src data_src;
u64 txn;
struct perf_regs regs_user;
struct perf_regs regs_intr;
u64 stack_user_size;
u64 stream_id;
u64 cgroup;
u64 addr;
u64 phys_addr;
u64 data_page_size;
u64 code_page_size;
u64 aux_size;
} ____cacheline_aligned;
/* default value for data source */
#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
PERF_MEM_S(LVL, NA) |\
PERF_MEM_S(SNOOP, NA) |\
PERF_MEM_S(LOCK, NA) |\
PERF_MEM_S(TLB, NA) |\
PERF_MEM_S(LVLNUM, NA))
static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{
/* remaining struct members initialized in perf_prepare_sample() */
data->sample_flags = PERF_SAMPLE_PERIOD;
data->period = period;
data->dyn_size = 0;
if (addr) {
data->addr = addr;
data->sample_flags |= PERF_SAMPLE_ADDR;
}
}
static inline void perf_sample_save_callchain(struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
int size = 1;
if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
return;
if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_CALLCHAIN))
return;
data->callchain = perf_callchain(event, regs);
size += data->callchain->nr;
data->dyn_size += size * sizeof(u64);
data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
}
static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
struct perf_event *event,
struct perf_raw_record *raw)
{
struct perf_raw_frag *frag = &raw->frag;
u32 sum = 0;
int size;
if (!(event->attr.sample_type & PERF_SAMPLE_RAW))
return;
if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW))
return;
do {
sum += frag->size;
if (perf_raw_frag_last(frag))
break;
frag = frag->next;
} while (1);
size = round_up(sum + sizeof(u32), sizeof(u64));
raw->size = size - sizeof(u32);
frag->pad = raw->size - sum;
data->raw = raw;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_RAW;
}
static inline bool has_branch_stack(struct perf_event *event)
{
return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
}
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
struct perf_branch_stack *brs,
u64 *brs_cntr)
{
int size = sizeof(u64); /* nr */
if (!has_branch_stack(event))
return;
if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_BRANCH_STACK))
return;
if (branch_sample_hw_index(event))
size += sizeof(u64);
brs->nr = min_t(u16, event->attr.sample_max_stack, brs->nr);
size += brs->nr * sizeof(struct perf_branch_entry);
/*
* The extension space for counters is appended after the
* struct perf_branch_stack. It is used to store the occurrences
* of events of each branch.
*/
if (brs_cntr)
size += brs->nr * sizeof(u64);
data->br_stack = brs;
data->br_stack_cntr = brs_cntr;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
static inline u32 perf_sample_data_size(struct perf_sample_data *data,
struct perf_event *event)
{
u32 size = sizeof(struct perf_event_header);
size += event->header_size + event->id_header_size;
size += data->dyn_size;
return size;
}
/*
* Clear all bitfields in the perf_branch_entry.
* The to and from fields are not cleared because they are
* systematically modified by caller.
*/
static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
{
br->mispred = 0;
br->predicted = 0;
br->in_tx = 0;
br->abort = 0;
br->cycles = 0;
br->type = 0;
br->spec = PERF_BR_SPEC_NA;
br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
extern void perf_prepare_sample(struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);
extern void perf_prepare_header(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);
extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
extern void perf_event_output_forward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
extern void perf_event_output_backward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
extern int perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
static inline bool
is_default_overflow_handler(struct perf_event *event)
{
perf_overflow_handler_t overflow_handler = event->overflow_handler;
if (likely(overflow_handler == perf_event_output_forward))
return true;
if (unlikely(overflow_handler == perf_event_output_backward))
return true;
return false;
}
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
extern void
perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample);
extern void
perf_log_lost_samples(struct perf_event *event, u64 lost);
static inline bool event_has_any_exclude_flag(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
return attr->exclude_idle || attr->exclude_user ||
attr->exclude_kernel || attr->exclude_hv ||
attr->exclude_guest || attr->exclude_host;
}
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
}
/*
* Return 1 for a software event, 0 for a hardware event
*/
static inline int is_software_event(struct perf_event *event)
{
return event->event_caps & PERF_EV_CAP_SOFTWARE;
}
/*
* Return 1 for event in sw context, 0 for event in hw context
*/
static inline int in_software_context(struct perf_event *event)
{
return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
}
static inline int is_exclusive_pmu(struct pmu *pmu)
{
return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
}
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
#endif
/*
* When generating a perf sample in-line, instead of from an interrupt /
* exception, we lack a pt_regs. This is typically used from software events
* like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
*
* We typically don't need a full set, but (for x86) do require:
* - ip for PERF_SAMPLE_IP
* - cs for user_mode() tests
* - sp for PERF_SAMPLE_CALLCHAIN
* - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
*
* NOTE: assumes @regs is otherwise already 0 filled; this is important for
* things like PERF_SAMPLE_REGS_INTR.
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
static __always_inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
if (static_key_false(&perf_swevent_enabled[event_id])) __perf_sw_event(event_id, nr, regs, addr);
}
DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
/*
* 'Special' version for the scheduler, it hard assumes no recursion,
* which is guaranteed by us not actually scheduling inside other swevents
* because those disable preemption.
*/
static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
perf_fetch_caller_regs(regs);
___perf_sw_event(event_id, nr, regs, addr);
}
extern struct static_key_false perf_sched_events;
static __always_inline bool __perf_sw_enabled(int swevt)
{
return static_key_false(&perf_swevent_enabled[swevt]);
}
static inline void perf_event_task_migrate(struct task_struct *task)
{
if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
task->sched_migrated = 1;
}
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);
if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
task->sched_migrated) {
__perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
task->sched_migrated = 0;
}
}
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
__perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
#ifdef CONFIG_CGROUP_PERF
if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) &&
perf_cgroup_from_task(prev, NULL) !=
perf_cgroup_from_task(next, NULL))
__perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0);
#endif
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
}
extern void perf_event_mmap(struct vm_area_struct *vma);
extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
bool unregister, const char *sym);
extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
#define PERF_GUEST_ACTIVE 0x01
#define PERF_GUEST_USER 0x02
struct perf_guest_info_callbacks {
unsigned int (*state)(void);
unsigned long (*get_ip)(void);
unsigned int (*handle_intel_pt_intr)(void);
};
#ifdef CONFIG_GUEST_PERF_EVENTS
extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
static inline unsigned int perf_guest_state(void)
{
return static_call(__perf_guest_state)();
}
static inline unsigned long perf_guest_get_ip(void)
{
return static_call(__perf_guest_get_ip)();
}
static inline unsigned int perf_guest_handle_intel_pt_intr(void)
{
return static_call(__perf_guest_handle_intel_pt_intr)();
}
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
#else /* !CONFIG_GUEST_PERF_EVENTS: */
static inline unsigned int perf_guest_state(void) { return 0; }
static inline unsigned long perf_guest_get_ip(void) { return 0; }
static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
#endif /* !CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
extern void perf_event_text_poke(const void *addr,
const void *old_bytes, size_t old_len,
const void *new_bytes, size_t new_len);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
extern void put_callchain_entry(int rctx);
extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;
static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip;
++ctx->contexts;
return 0;
} else {
ctx->contexts_maxed = true;
return -1; /* no more room, stop walking the stack */
}
}
static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip;
++ctx->nr;
return 0;
} else {
return -1; /* no more room, stop walking the stack */
}
}
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_sample_rate;
extern void perf_sample_event_took(u64 sample_len_ns);
/* Access to perf_event_open(2) syscall. */
#define PERF_SECURITY_OPEN 0
/* Finer grained perf_event_open(2) access control. */
#define PERF_SECURITY_CPU 1
#define PERF_SECURITY_KERNEL 2
#define PERF_SECURITY_TRACEPOINT 3
static inline int perf_is_paranoid(void)
{
return sysctl_perf_event_paranoid > -1;
}
extern int perf_allow_kernel(void);
static inline int perf_allow_cpu(void)
{
if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
return -EACCES;
return security_perf_event_open(PERF_SECURITY_CPU);
}
static inline int perf_allow_tracepoint(void)
{
if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
return -EPERM;
return security_perf_event_open(PERF_SECURITY_TRACEPOINT);
}
extern int perf_exclude_event(struct perf_event *event, struct pt_regs *regs);
extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct perf_event *event,
struct pt_regs *regs);
#ifndef perf_arch_misc_flags
# define perf_arch_misc_flags(regs) \
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_arch_instruction_pointer(regs) instruction_pointer(regs)
#endif
#ifndef perf_arch_bpf_user_pt_regs
# define perf_arch_bpf_user_pt_regs(regs) regs
#endif
#ifndef perf_arch_guest_misc_flags
static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs)
{
unsigned long guest_state = perf_guest_state();
if (!(guest_state & PERF_GUEST_ACTIVE))
return 0;
if (guest_state & PERF_GUEST_USER)
return PERF_RECORD_MISC_GUEST_USER;
else
return PERF_RECORD_MISC_GUEST_KERNEL;
}
# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
#endif
static inline bool needs_branch_stack(struct perf_event *event)
{
return event->attr.branch_sample_type != 0;
}
static inline bool has_aux(struct perf_event *event)
{
return event->pmu && event->pmu->setup_aux;
}
static inline bool has_aux_action(struct perf_event *event)
{
return event->attr.aux_sample_size ||
event->attr.aux_pause ||
event->attr.aux_resume;
}
static inline bool is_write_backward(struct perf_event *event)
{
return !!event->attr.write_backward;
}
static inline bool has_addr_filter(struct perf_event *event)
{
return event->pmu->nr_addr_filters;
}
/*
* An inherited event uses parent's filters
*/
static inline struct perf_addr_filters_head *
perf_event_addr_filters(struct perf_event *event)
{
struct perf_addr_filters_head *ifh = &event->addr_filters;
if (event->parent)
ifh = &event->parent->addr_filters;
return ifh;
}
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
{
/* Only the parent has fasync state */
if (event->parent)
event = event->parent;
return &event->fasync;
}
extern void perf_event_addr_filters_sync(struct perf_event *event);
extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event, unsigned int size);
extern int perf_output_begin_forward(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);
extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
struct perf_output_handle *handle,
unsigned long from, unsigned long to);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
static inline void
perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
{ }
static inline int
perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size) { return -EINVAL; }
static inline void *
perf_get_aux(struct perf_output_handle *handle) { return NULL; }
static inline void
perf_event_task_migrate(struct task_struct *task) { }
static inline void
perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next) { }
static inline int perf_event_init_task(struct task_struct *child,
u64 clone_flags) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
static inline const struct perf_event *perf_get_event(struct file *file)
{
return ERR_PTR(-EINVAL);
}
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
return ERR_PTR(-EINVAL);
}
static inline int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running)
{
return -EINVAL;
}
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
static inline int perf_event_refresh(struct perf_event *event, int refresh)
{
return -EINVAL;
}
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
bool unregister, const char *sym) { }
static inline void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags) { }
static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
static inline void perf_event_namespaces(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_text_poke(const void *addr,
const void *old_bytes,
size_t old_len,
const void *new_bytes,
size_t new_len) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
static inline int
perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; }
static inline u64
perf_event_pause(struct perf_event *event, bool reset) { return 0; }
static inline int
perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { return 0; }
#endif /* !CONFIG_PERF_EVENTS */
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
#else
static inline void perf_restore_debug_store(void) { }
#endif
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
struct device_attribute attr;
u64 id;
const char *event_str;
};
struct perf_pmu_events_ht_attr {
struct device_attribute attr;
u64 id;
const char *event_str_ht;
const char *event_str_noht;
};
struct perf_pmu_events_hybrid_attr {
struct device_attribute attr;
u64 id;
const char *event_str;
u64 pmu_type;
};
struct perf_pmu_format_hybrid_attr {
struct device_attribute attr;
u64 pmu_type;
};
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
static struct perf_pmu_events_attr _var = { \
.attr = __ATTR(_name, 0444, _show, NULL), \
.id = _id, \
};
#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
static struct perf_pmu_events_attr _var = { \
.attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
.id = 0, \
.event_str = _str, \
};
#define PMU_EVENT_ATTR_ID(_name, _show, _id) \
(&((struct perf_pmu_events_attr[]) { \
{ .attr = __ATTR(_name, 0444, _show, NULL), \
.id = _id, } \
})[0].attr.attr)
#define PMU_FORMAT_ATTR_SHOW(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
#define PMU_FORMAT_ATTR(_name, _format) \
PMU_FORMAT_ATTR_SHOW(_name, _format) \
\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
/* Performance counter hotplug functions */
#ifdef CONFIG_PERF_EVENTS
extern int perf_event_init_cpu(unsigned int cpu);
extern int perf_event_exit_cpu(unsigned int cpu);
#else
# define perf_event_init_cpu NULL
# define perf_event_exit_cpu NULL
#endif
extern void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
/*
* Snapshot branch stack on software events.
*
* Branch stack can be very useful in understanding software events. For
* example, when a long function, e.g. sys_perf_event_open, returns an
* errno, it is not obvious why the function failed. Branch stack could
* provide very helpful information in this type of scenarios.
*
* On software event, it is necessary to stop the hardware branch recorder
* fast. Otherwise, the hardware register/buffer will be flushed with
* entries of the triggering event. Therefore, static call is used to
* stop the hardware recorder.
*/
/*
* cnt is the number of entries allocated for entries.
* Return number of entries copied to .
*/
typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
unsigned int cnt);
DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
#ifndef PERF_NEEDS_LOPWR_CB
static inline void perf_lopwr_cb(bool mode)
{
}
#endif
#endif /* _LINUX_PERF_EVENT_H */
// SPDX-License-Identifier: GPL-2.0
/*
* kobject.h - generic kernel object infrastructure.
*
* Copyright (c) 2002-2003 Patrick Mochel
* Copyright (c) 2002-2003 Open Source Development Labs
* Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2008 Novell Inc.
*
* Please read Documentation/core-api/kobject.rst before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
*/
#ifndef _KOBJECT_H_
#define _KOBJECT_H_
#include <linux/types.h>
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/kobject_ns.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
#define UEVENT_NUM_ENVP 64 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
/* path to the userspace helper executed on an event */
extern char uevent_helper[];
#endif
/* counter to tag the uevent, read only except for the kobject core */
extern atomic64_t uevent_seqnum;
/*
* The actions here must match the index to the string array
* in lib/kobject_uevent.c
*
* Do not add new actions here without checking with the driver-core
* maintainers. Action strings are not meant to express subsystem
* or device specific properties. In most cases you want to send a
* kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event
* specific variables added to the event environment.
*/
enum kobject_action {
KOBJ_ADD,
KOBJ_REMOVE,
KOBJ_CHANGE,
KOBJ_MOVE,
KOBJ_ONLINE,
KOBJ_OFFLINE,
KOBJ_BIND,
KOBJ_UNBIND,
};
struct kobject {
const char *name;
struct list_head entry;
struct kobject *parent;
struct kset *kset;
const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
struct delayed_work release;
#endif
};
__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
__printf(2, 0) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
return kobj->name;
}
void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
__printf(3, 4) __must_check int kobject_add(struct kobject *kobj,
struct kobject *parent,
const char *fmt, ...);
__printf(4, 5) __must_check int kobject_init_and_add(struct kobject *kobj,
const struct kobj_type *ktype,
struct kobject *parent,
const char *fmt, ...);
void kobject_del(struct kobject *kobj);
struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent);
int __must_check kobject_rename(struct kobject *, const char *new_name);
int __must_check kobject_move(struct kobject *, struct kobject *);
struct kobject *kobject_get(struct kobject *kobj);
struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj);
void kobject_put(struct kobject *kobj);
const void *kobject_namespace(const struct kobject *kobj);
void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
char *kobject_get_path(const struct kobject *kobj, gfp_t flag);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj);
const void *(*namespace)(const struct kobject *kobj);
void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
char *argv[3];
char *envp[UEVENT_NUM_ENVP];
int envp_idx;
char buf[UEVENT_BUFFER_SIZE];
int buflen;
};
struct kset_uevent_ops {
int (* const filter)(const struct kobject *kobj);
const char *(* const name)(const struct kobject *kobj);
int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
char *buf);
ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count);
};
extern const struct sysfs_ops kobj_sysfs_ops;
struct sock;
/**
* struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
*
* A kset defines a group of kobjects. They can be individually
* different "types" but overall these kobjects all want to be grouped
* together and operated on in the same manner. ksets are used to
* define the attribute callbacks and other common events that happen to
* a kobject.
*
* @list: the list of all kobjects for this kset
* @list_lock: a lock for iterating over the kobjects
* @kobj: the embedded kobject for this kset (recursion, isn't it fun...)
* @uevent_ops: the set of uevent operations for this kset. These are
* called whenever a kobject has something happen to it so that the kset
* can add new environment variables, or filter out the uevents if so
* desired.
*/
struct kset {
struct list_head list;
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
} __randomize_layout;
void kset_init(struct kset *kset);
int __must_check kset_register(struct kset *kset);
void kset_unregister(struct kset *kset);
struct kset * __must_check kset_create_and_add(const char *name, const struct kset_uevent_ops *u,
struct kobject *parent_kobj);
static inline struct kset *to_kset(struct kobject *kobj)
{
return kobj ? container_of(kobj, struct kset, kobj) : NULL;
}
static inline struct kset *kset_get(struct kset *k)
{
return k ? to_kset(kobject_get(&k->kobj)) : NULL;
}
static inline void kset_put(struct kset *k)
{
kobject_put(&k->kobj);
}
static inline const struct kobj_type *get_ktype(const struct kobject *kobj)
{
return kobj->ktype;
}
struct kobject *kset_find_obj(struct kset *, const char *);
/* The global /sys/kernel/ kobject for people to chain off of */
extern struct kobject *kernel_kobj;
/* The global /sys/kernel/mm/ kobject for people to chain off of */
extern struct kobject *mm_kobj;
/* The global /sys/hypervisor/ kobject for people to chain off of */
extern struct kobject *hypervisor_kobj;
/* The global /sys/power/ kobject for people to chain off of */
extern struct kobject *power_kobj;
/* The global /sys/firmware/ kobject for people to chain off of */
extern struct kobject *firmware_kobj;
int kobject_uevent(struct kobject *kobj, enum kobject_action action);
int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
char *envp[]);
int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count);
__printf(2, 3)
int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...);
#endif /* _KOBJECT_H_ */
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _NET_RPS_H
#define _NET_RPS_H
#include <linux/types.h>
#include <linux/static_key.h>
#include <net/sock.h>
#include <net/hotdata.h>
#ifdef CONFIG_RPS
extern struct static_key_false rps_needed;
extern struct static_key_false rfs_needed;
/*
* This structure holds an RPS map which can be of variable length. The
* map is an array of CPUs.
*/
struct rps_map {
unsigned int len;
struct rcu_head rcu;
u16 cpus[];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
* tail pointer for that CPU's input queue at the time of last enqueue, a
* hardware filter index, and the hash of the flow if aRFS is enabled.
*/
struct rps_dev_flow {
u16 cpu;
u16 filter;
unsigned int last_qtail;
#ifdef CONFIG_RFS_ACCEL
u32 hash;
#endif
};
#define RPS_NO_FILTER 0xffff
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
*/
struct rps_dev_flow_table {
u8 log;
struct rcu_head rcu;
struct rps_dev_flow flows[];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
((_num) * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
* Each entry is a 32bit value. Upper part is the high-order bits
* of flow hash, lower part is CPU number.
* rps_cpu_mask is used to partition the space, depending on number of
* possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
* For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
struct rcu_head rcu;
u32 mask;
u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
#define RPS_NO_CPU 0xffff
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
unsigned int index = hash & table->mask;
u32 val = hash & ~net_hotdata.rps_cpu_mask;
/* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in get_rps_cpu().
*/
if (READ_ONCE(table->ents[index]) != val)
WRITE_ONCE(table->ents[index], val);
}
static inline void _sock_rps_record_flow_hash(__u32 hash)
{
struct rps_sock_flow_table *sock_flow_table;
if (!hash)
return;
rcu_read_lock();
sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (sock_flow_table)
rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
}
static inline void _sock_rps_record_flow(const struct sock *sk)
{
/* Reading sk->sk_rxhash might incur an expensive cache line
* miss.
*
* TCP_ESTABLISHED does cover almost all states where RFS
* might be useful, and is cheaper [1] than testing :
* IPv4: inet_sk(sk)->inet_daddr
* IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
*/
if (sk->sk_state == TCP_ESTABLISHED) {
/* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
_sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
}
static inline void _sock_rps_delete_flow(const struct sock *sk)
{
struct rps_sock_flow_table *table;
u32 hash, index;
hash = READ_ONCE(sk->sk_rxhash);
if (!hash)
return;
rcu_read_lock(); table = rcu_dereference(net_hotdata.rps_sock_flow_table); if (table) { index = hash & table->mask; if (READ_ONCE(table->ents[index]) != RPS_NO_CPU)
WRITE_ONCE(table->ents[index], RPS_NO_CPU);
}
rcu_read_unlock();
}
#endif /* CONFIG_RPS */
static inline bool rfs_is_needed(void)
{
#ifdef CONFIG_RPS
return static_branch_unlikely(&rfs_needed);
#else
return false;
#endif
}
static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
if (!rfs_is_needed())
return;
_sock_rps_record_flow_hash(hash);
#endif
}
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
if (!rfs_is_needed())
return;
_sock_rps_record_flow(sk);
#endif
}
static inline void sock_rps_delete_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
if (!rfs_is_needed())
return;
_sock_rps_delete_flow(sk);
#endif
}
static inline u32 rps_input_queue_tail_incr(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
return ++sd->input_queue_tail;
#else
return 0;
#endif
}
static inline void rps_input_queue_tail_save(u32 *dest, u32 tail)
{
#ifdef CONFIG_RPS
WRITE_ONCE(*dest, tail);
#endif
}
static inline void rps_input_queue_head_add(struct softnet_data *sd, int val)
{
#ifdef CONFIG_RPS
WRITE_ONCE(sd->input_queue_head, sd->input_queue_head + val);
#endif
}
static inline void rps_input_queue_head_incr(struct softnet_data *sd)
{
rps_input_queue_head_add(sd, 1);
}
#endif /* _NET_RPS_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* fs/libfs.c
* Library for filesystems writers.
*/
#include <linux/blkdev.h>
#include <linux/export.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/cred.h>
#include <linux/mount.h>
#include <linux/vfs.h>
#include <linux/quotaops.h>
#include <linux/mutex.h>
#include <linux/namei.h>
#include <linux/exportfs.h>
#include <linux/iversion.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* sync_mapping_buffers */
#include <linux/fs_context.h>
#include <linux/pseudo_fs.h>
#include <linux/fsnotify.h>
#include <linux/unicode.h>
#include <linux/fscrypt.h>
#include <linux/pidfs.h>
#include <linux/uaccess.h>
#include "internal.h"
int simple_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
return 0;
}
EXPORT_SYMBOL(simple_getattr);
int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
{
u64 id = huge_encode_dev(dentry->d_sb->s_dev);
buf->f_fsid = u64_to_fsid(id);
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = PAGE_SIZE;
buf->f_namelen = NAME_MAX;
return 0;
}
EXPORT_SYMBOL(simple_statfs);
/*
* Retaining negative dentries for an in-memory filesystem just wastes
* memory and lookup time: arrange for them to be deleted immediately.
*/
int always_delete_dentry(const struct dentry *dentry)
{
return 1;
}
EXPORT_SYMBOL(always_delete_dentry);
/*
* Lookup the data. This is trivial - if the dentry didn't already
* exist, we know it is negative. Set d_op to delete negative dentries.
*/
struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
if (!dentry->d_op && !(dentry->d_flags & DCACHE_DONTCACHE)) {
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_DONTCACHE;
spin_unlock(&dentry->d_lock);
}
if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
return NULL;
d_add(dentry, NULL);
return NULL;
}
EXPORT_SYMBOL(simple_lookup);
int dcache_dir_open(struct inode *inode, struct file *file)
{
file->private_data = d_alloc_cursor(file->f_path.dentry);
return file->private_data ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(dcache_dir_open);
int dcache_dir_close(struct inode *inode, struct file *file)
{
dput(file->private_data);
return 0;
}
EXPORT_SYMBOL(dcache_dir_close);
/* parent is locked at least shared */
/*
* Returns an element of siblings' list.
* We are looking for <count>th positive after <p>; if
* found, dentry is grabbed and returned to caller.
* If no such element exists, NULL is returned.
*/
static struct dentry *scan_positives(struct dentry *cursor,
struct hlist_node **p,
loff_t count,
struct dentry *last)
{
struct dentry *dentry = cursor->d_parent, *found = NULL;
spin_lock(&dentry->d_lock);
while (*p) {
struct dentry *d = hlist_entry(*p, struct dentry, d_sib);
p = &d->d_sib.next;
// we must at least skip cursors, to avoid livelocks
if (d->d_flags & DCACHE_DENTRY_CURSOR)
continue;
if (simple_positive(d) && !--count) {
spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(d))
found = dget_dlock(d);
spin_unlock(&d->d_lock);
if (likely(found))
break;
count = 1;
}
if (need_resched()) {
if (!hlist_unhashed(&cursor->d_sib))
__hlist_del(&cursor->d_sib);
hlist_add_behind(&cursor->d_sib, &d->d_sib);
p = &cursor->d_sib.next;
spin_unlock(&dentry->d_lock);
cond_resched();
spin_lock(&dentry->d_lock);
}
}
spin_unlock(&dentry->d_lock);
dput(last);
return found;
}
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry *dentry = file->f_path.dentry;
switch (whence) {
case 1:
offset += file->f_pos;
fallthrough;
case 0:
if (offset >= 0)
break;
fallthrough;
default:
return -EINVAL;
}
if (offset != file->f_pos) {
struct dentry *cursor = file->private_data;
struct dentry *to = NULL;
inode_lock_shared(dentry->d_inode);
if (offset > 2)
to = scan_positives(cursor, &dentry->d_children.first,
offset - 2, NULL);
spin_lock(&dentry->d_lock);
hlist_del_init(&cursor->d_sib);
if (to)
hlist_add_behind(&cursor->d_sib, &to->d_sib);
spin_unlock(&dentry->d_lock);
dput(to);
file->f_pos = offset;
inode_unlock_shared(dentry->d_inode);
}
return offset;
}
EXPORT_SYMBOL(dcache_dir_lseek);
/*
* Directory is locked and all positive dentries in it are safe, since
* for ramfs-type trees they can't go away without unlink() or rmdir(),
* both impossible due to the lock on directory.
*/
int dcache_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *cursor = file->private_data;
struct dentry *next = NULL;
struct hlist_node **p;
if (!dir_emit_dots(file, ctx))
return 0;
if (ctx->pos == 2)
p = &dentry->d_children.first;
else
p = &cursor->d_sib.next;
while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
d_inode(next)->i_ino,
fs_umode_to_dtype(d_inode(next)->i_mode)))
break;
ctx->pos++;
p = &next->d_sib.next;
}
spin_lock(&dentry->d_lock);
hlist_del_init(&cursor->d_sib);
if (next)
hlist_add_before(&cursor->d_sib, &next->d_sib);
spin_unlock(&dentry->d_lock);
dput(next);
return 0;
}
EXPORT_SYMBOL(dcache_readdir);
ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
{
return -EISDIR;
}
EXPORT_SYMBOL(generic_read_dir);
const struct file_operations simple_dir_operations = {
.open = dcache_dir_open,
.release = dcache_dir_close,
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
.iterate_shared = dcache_readdir,
.fsync = noop_fsync,
};
EXPORT_SYMBOL(simple_dir_operations);
const struct inode_operations simple_dir_inode_operations = {
.lookup = simple_lookup,
};
EXPORT_SYMBOL(simple_dir_inode_operations);
/* simple_offset_add() never assigns these to a dentry */
enum {
DIR_OFFSET_FIRST = 2, /* Find first real entry */
DIR_OFFSET_EOD = S32_MAX,
};
/* simple_offset_add() allocation range */
enum {
DIR_OFFSET_MIN = DIR_OFFSET_FIRST + 1,
DIR_OFFSET_MAX = DIR_OFFSET_EOD - 1,
};
static void offset_set(struct dentry *dentry, long offset)
{
dentry->d_fsdata = (void *)offset;
}
static long dentry2offset(struct dentry *dentry)
{
return (long)dentry->d_fsdata;
}
static struct lock_class_key simple_offset_lock_class;
/**
* simple_offset_init - initialize an offset_ctx
* @octx: directory offset map to be initialized
*
*/
void simple_offset_init(struct offset_ctx *octx)
{
mt_init_flags(&octx->mt, MT_FLAGS_ALLOC_RANGE);
lockdep_set_class(&octx->mt.ma_lock, &simple_offset_lock_class);
octx->next_offset = DIR_OFFSET_MIN;
}
/**
* simple_offset_add - Add an entry to a directory's offset map
* @octx: directory offset ctx to be updated
* @dentry: new dentry being added
*
* Returns zero on success. @octx and the dentry's offset are updated.
* Otherwise, a negative errno value is returned.
*/
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
{
unsigned long offset;
int ret;
if (dentry2offset(dentry) != 0)
return -EBUSY;
ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN,
DIR_OFFSET_MAX, &octx->next_offset,
GFP_KERNEL);
if (unlikely(ret < 0))
return ret == -EBUSY ? -ENOSPC : ret;
offset_set(dentry, offset);
return 0;
}
static int simple_offset_replace(struct offset_ctx *octx, struct dentry *dentry,
long offset)
{
int ret;
ret = mtree_store(&octx->mt, offset, dentry, GFP_KERNEL);
if (ret)
return ret;
offset_set(dentry, offset);
return 0;
}
/**
* simple_offset_remove - Remove an entry to a directory's offset map
* @octx: directory offset ctx to be updated
* @dentry: dentry being removed
*
*/
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
{
long offset;
offset = dentry2offset(dentry);
if (offset == 0)
return;
mtree_erase(&octx->mt, offset);
offset_set(dentry, 0);
}
/**
* simple_offset_rename - handle directory offsets for rename
* @old_dir: parent directory of source entry
* @old_dentry: dentry of source entry
* @new_dir: parent_directory of destination entry
* @new_dentry: dentry of destination
*
* Caller provides appropriate serialization.
*
* User space expects the directory offset value of the replaced
* (new) directory entry to be unchanged after a rename.
*
* Returns zero on success, a negative errno value on failure.
*/
int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
long new_offset = dentry2offset(new_dentry);
simple_offset_remove(old_ctx, old_dentry);
if (new_offset) {
offset_set(new_dentry, 0);
return simple_offset_replace(new_ctx, old_dentry, new_offset);
}
return simple_offset_add(new_ctx, old_dentry);
}
/**
* simple_offset_rename_exchange - exchange rename with directory offsets
* @old_dir: parent of dentry being moved
* @old_dentry: dentry being moved
* @new_dir: destination parent
* @new_dentry: destination dentry
*
* This API preserves the directory offset values. Caller provides
* appropriate serialization.
*
* Returns zero on success. Otherwise a negative errno is returned and the
* rename is rolled back.
*/
int simple_offset_rename_exchange(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry)
{
struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
long old_index = dentry2offset(old_dentry);
long new_index = dentry2offset(new_dentry);
int ret;
simple_offset_remove(old_ctx, old_dentry);
simple_offset_remove(new_ctx, new_dentry);
ret = simple_offset_replace(new_ctx, old_dentry, new_index);
if (ret)
goto out_restore;
ret = simple_offset_replace(old_ctx, new_dentry, old_index);
if (ret) {
simple_offset_remove(new_ctx, old_dentry);
goto out_restore;
}
ret = simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
if (ret) {
simple_offset_remove(new_ctx, old_dentry);
simple_offset_remove(old_ctx, new_dentry);
goto out_restore;
}
return 0;
out_restore:
(void)simple_offset_replace(old_ctx, old_dentry, old_index);
(void)simple_offset_replace(new_ctx, new_dentry, new_index);
return ret;
}
/**
* simple_offset_destroy - Release offset map
* @octx: directory offset ctx that is about to be destroyed
*
* During fs teardown (eg. umount), a directory's offset map might still
* contain entries. xa_destroy() cleans out anything that remains.
*/
void simple_offset_destroy(struct offset_ctx *octx)
{
mtree_destroy(&octx->mt);
}
/**
* offset_dir_llseek - Advance the read position of a directory descriptor
* @file: an open directory whose position is to be updated
* @offset: a byte offset
* @whence: enumerator describing the starting position for this update
*
* SEEK_END, SEEK_DATA, and SEEK_HOLE are not supported for directories.
*
* Returns the updated read position if successful; otherwise a
* negative errno is returned and the read position remains unchanged.
*/
static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
{
switch (whence) {
case SEEK_CUR:
offset += file->f_pos;
fallthrough;
case SEEK_SET:
if (offset >= 0)
break;
fallthrough;
default:
return -EINVAL;
}
return vfs_setpos(file, offset, LONG_MAX);
}
static struct dentry *find_positive_dentry(struct dentry *parent,
struct dentry *dentry,
bool next)
{
struct dentry *found = NULL;
spin_lock(&parent->d_lock);
if (next)
dentry = d_next_sibling(dentry);
else if (!dentry)
dentry = d_first_child(parent);
hlist_for_each_entry_from(dentry, d_sib) {
if (!simple_positive(dentry))
continue;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(dentry))
found = dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
if (likely(found))
break;
}
spin_unlock(&parent->d_lock);
return found;
}
static noinline_for_stack struct dentry *
offset_dir_lookup(struct dentry *parent, loff_t offset)
{
struct inode *inode = d_inode(parent);
struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
struct dentry *child, *found = NULL;
MA_STATE(mas, &octx->mt, offset, offset);
if (offset == DIR_OFFSET_FIRST)
found = find_positive_dentry(parent, NULL, false);
else {
rcu_read_lock();
child = mas_find_rev(&mas, DIR_OFFSET_MIN);
found = find_positive_dentry(parent, child, false);
rcu_read_unlock();
}
return found;
}
static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
return dir_emit(ctx, dentry->d_name.name, dentry->d_name.len,
inode->i_ino, fs_umode_to_dtype(inode->i_mode));
}
static void offset_iterate_dir(struct file *file, struct dir_context *ctx)
{
struct dentry *dir = file->f_path.dentry;
struct dentry *dentry;
dentry = offset_dir_lookup(dir, ctx->pos);
if (!dentry)
goto out_eod;
while (true) {
struct dentry *next;
ctx->pos = dentry2offset(dentry);
if (!offset_dir_emit(ctx, dentry))
break;
next = find_positive_dentry(dir, dentry, true);
dput(dentry);
if (!next)
goto out_eod;
dentry = next;
}
dput(dentry);
return;
out_eod:
ctx->pos = DIR_OFFSET_EOD;
}
/**
* offset_readdir - Emit entries starting at offset @ctx->pos
* @file: an open directory to iterate over
* @ctx: directory iteration context
*
* Caller must hold @file's i_rwsem to prevent insertion or removal of
* entries during this call.
*
* On entry, @ctx->pos contains an offset that represents the first entry
* to be read from the directory.
*
* The operation continues until there are no more entries to read, or
* until the ctx->actor indicates there is no more space in the caller's
* output buffer.
*
* On return, @ctx->pos contains an offset that will read the next entry
* in this directory when offset_readdir() is called again with @ctx.
* Caller places this value in the d_off field of the last entry in the
* user's buffer.
*
* Return values:
* %0 - Complete
*/
static int offset_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dir = file->f_path.dentry;
lockdep_assert_held(&d_inode(dir)->i_rwsem);
if (!dir_emit_dots(file, ctx))
return 0;
if (ctx->pos != DIR_OFFSET_EOD)
offset_iterate_dir(file, ctx);
return 0;
}
const struct file_operations simple_offset_dir_operations = {
.llseek = offset_dir_llseek,
.iterate_shared = offset_readdir,
.read = generic_read_dir,
.fsync = noop_fsync,
};
struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
{
struct dentry *child = NULL, *d;
spin_lock(&parent->d_lock);
d = prev ? d_next_sibling(prev) : d_first_child(parent);
hlist_for_each_entry_from(d, d_sib) {
if (simple_positive(d)) {
spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(d))
child = dget_dlock(d);
spin_unlock(&d->d_lock);
if (likely(child))
break;
}
}
spin_unlock(&parent->d_lock);
dput(prev);
return child;
}
EXPORT_SYMBOL(find_next_child);
static void __simple_recursive_removal(struct dentry *dentry,
void (*callback)(struct dentry *),
bool locked)
{
struct dentry *this = dget(dentry);
while (true) {
struct dentry *victim = NULL, *child;
struct inode *inode = this->d_inode;
inode_lock_nested(inode, I_MUTEX_CHILD);
if (d_is_dir(this))
inode->i_flags |= S_DEAD;
while ((child = find_next_child(this, victim)) == NULL) {
// kill and ascend
// update metadata while it's still locked
inode_set_ctime_current(inode);
clear_nlink(inode);
inode_unlock(inode);
victim = this;
this = this->d_parent;
inode = this->d_inode;
if (!locked || victim != dentry)
inode_lock_nested(inode, I_MUTEX_CHILD);
if (simple_positive(victim)) {
d_invalidate(victim); // avoid lost mounts
if (callback)
callback(victim);
fsnotify_delete(inode, d_inode(victim), victim);
dput(victim); // unpin it
}
if (victim == dentry) {
inode_set_mtime_to_ts(inode,
inode_set_ctime_current(inode));
if (d_is_dir(dentry))
drop_nlink(inode);
if (!locked)
inode_unlock(inode);
dput(dentry);
return;
}
}
inode_unlock(inode);
this = child;
}
}
void simple_recursive_removal(struct dentry *dentry,
void (*callback)(struct dentry *))
{
return __simple_recursive_removal(dentry, callback, false);
}
EXPORT_SYMBOL(simple_recursive_removal);
/* caller holds parent directory with I_MUTEX_PARENT */
void locked_recursive_removal(struct dentry *dentry,
void (*callback)(struct dentry *))
{
return __simple_recursive_removal(dentry, callback, true);
}
EXPORT_SYMBOL(locked_recursive_removal);
static const struct super_operations simple_super_operations = {
.statfs = simple_statfs,
};
static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
{
struct pseudo_fs_context *ctx = fc->fs_private;
struct inode *root;
s->s_maxbytes = MAX_LFS_FILESIZE;
s->s_blocksize = PAGE_SIZE;
s->s_blocksize_bits = PAGE_SHIFT;
s->s_magic = ctx->magic;
s->s_op = ctx->ops ?: &simple_super_operations;
s->s_export_op = ctx->eops;
s->s_xattr = ctx->xattr;
s->s_time_gran = 1;
root = new_inode(s);
if (!root)
return -ENOMEM;
/*
* since this is the first inode, make it number 1. New inodes created
* after this must take care not to collide with it (by passing
* max_reserved of 1 to iunique).
*/
root->i_ino = 1;
root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
simple_inode_init_ts(root);
s->s_root = d_make_root(root);
if (!s->s_root)
return -ENOMEM;
set_default_d_op(s, ctx->dops);
return 0;
}
static int pseudo_fs_get_tree(struct fs_context *fc)
{
return get_tree_nodev(fc, pseudo_fs_fill_super);
}
static void pseudo_fs_free(struct fs_context *fc)
{
kfree(fc->fs_private);
}
static const struct fs_context_operations pseudo_fs_context_ops = {
.free = pseudo_fs_free,
.get_tree = pseudo_fs_get_tree,
};
/*
* Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
* will never be mountable)
*/
struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
unsigned long magic)
{
struct pseudo_fs_context *ctx;
ctx = kzalloc(sizeof(struct pseudo_fs_context), GFP_KERNEL);
if (likely(ctx)) {
ctx->magic = magic;
fc->fs_private = ctx;
fc->ops = &pseudo_fs_context_ops;
fc->sb_flags |= SB_NOUSER;
fc->global = true;
}
return ctx;
}
EXPORT_SYMBOL(init_pseudo);
int simple_open(struct inode *inode, struct file *file)
{
if (inode->i_private)
file->private_data = inode->i_private;
return 0;
}
EXPORT_SYMBOL(simple_open);
int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(old_dentry);
inode_set_mtime_to_ts(dir,
inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inc_nlink(inode);
ihold(inode);
dget(dentry);
d_instantiate(dentry, inode);
return 0;
}
EXPORT_SYMBOL(simple_link);
int simple_empty(struct dentry *dentry)
{
struct dentry *child;
int ret = 0;
spin_lock(&dentry->d_lock);
hlist_for_each_entry(child, &dentry->d_children, d_sib) {
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(child)) {
spin_unlock(&child->d_lock);
goto out;
}
spin_unlock(&child->d_lock);
}
ret = 1;
out:
spin_unlock(&dentry->d_lock);
return ret;
}
EXPORT_SYMBOL(simple_empty);
int simple_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
inode_set_mtime_to_ts(dir,
inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
drop_nlink(inode);
dput(dentry);
return 0;
}
EXPORT_SYMBOL(simple_unlink);
int simple_rmdir(struct inode *dir, struct dentry *dentry)
{
if (!simple_empty(dentry))
return -ENOTEMPTY;
drop_nlink(d_inode(dentry));
simple_unlink(dir, dentry);
drop_nlink(dir);
return 0;
}
EXPORT_SYMBOL(simple_rmdir);
/**
* simple_rename_timestamp - update the various inode timestamps for rename
* @old_dir: old parent directory
* @old_dentry: dentry that is being renamed
* @new_dir: new parent directory
* @new_dentry: target for rename
*
* POSIX mandates that the old and new parent directories have their ctime and
* mtime updated, and that inodes of @old_dentry and @new_dentry (if any), have
* their ctime updated.
*/
void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *newino = d_inode(new_dentry);
inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
if (new_dir != old_dir)
inode_set_mtime_to_ts(new_dir,
inode_set_ctime_current(new_dir));
inode_set_ctime_current(d_inode(old_dentry));
if (newino)
inode_set_ctime_current(newino);
}
EXPORT_SYMBOL_GPL(simple_rename_timestamp);
int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
bool old_is_dir = d_is_dir(old_dentry);
bool new_is_dir = d_is_dir(new_dentry);
if (old_dir != new_dir && old_is_dir != new_is_dir) {
if (old_is_dir) {
drop_nlink(old_dir);
inc_nlink(new_dir);
} else {
drop_nlink(new_dir);
inc_nlink(old_dir);
}
}
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
return 0;
}
EXPORT_SYMBOL_GPL(simple_rename_exchange);
int simple_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
int they_are_dirs = d_is_dir(old_dentry);
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
if (flags & RENAME_EXCHANGE)
return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
if (d_really_is_positive(new_dentry)) {
simple_unlink(new_dir, new_dentry);
if (they_are_dirs) {
drop_nlink(d_inode(new_dentry));
drop_nlink(old_dir);
}
} else if (they_are_dirs) {
drop_nlink(old_dir);
inc_nlink(new_dir);
}
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
return 0;
}
EXPORT_SYMBOL(simple_rename);
/**
* simple_setattr - setattr for simple filesystem
* @idmap: idmap of the target mount
* @dentry: dentry
* @iattr: iattr structure
*
* Returns 0 on success, -error on failure.
*
* simple_setattr is a simple ->setattr implementation without a proper
* implementation of size changes.
*
* It can either be used for in-memory filesystems or special files
* on simple regular filesystems. Anything that needs to change on-disk
* or wire state on size changes needs its own setattr method.
*/
int simple_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int error;
error = setattr_prepare(idmap, dentry, iattr);
if (error)
return error;
if (iattr->ia_valid & ATTR_SIZE)
truncate_setsize(inode, iattr->ia_size);
setattr_copy(idmap, inode, iattr);
mark_inode_dirty(inode);
return 0;
}
EXPORT_SYMBOL(simple_setattr);
static int simple_read_folio(struct file *file, struct folio *folio)
{
folio_zero_range(folio, 0, folio_size(folio));
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
folio_unlock(folio);
return 0;
}
int simple_write_begin(const struct kiocb *iocb, struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata)
{
struct folio *folio;
folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
*foliop = folio;
if (!folio_test_uptodate(folio) && (len != folio_size(folio))) {
size_t from = offset_in_folio(folio, pos);
folio_zero_segments(folio, 0, from,
from + len, folio_size(folio));
}
return 0;
}
EXPORT_SYMBOL(simple_write_begin);
/**
* simple_write_end - .write_end helper for non-block-device FSes
* @iocb: kernel I/O control block
* @mapping: "
* @pos: "
* @len: "
* @copied: "
* @folio: "
* @fsdata: "
*
* simple_write_end does the minimum needed for updating a folio after
* writing is done. It has the same API signature as the .write_end of
* address_space_operations vector. So it can just be set onto .write_end for
* FSes that don't need any other processing. i_rwsem is assumed to be held
* exclusively.
* Block based filesystems should use generic_write_end().
* NOTE: Even though i_size might get updated by this function, mark_inode_dirty
* is not called, so a filesystem that actually does store data in .write_inode
* should extend on what's done here with a call to mark_inode_dirty() in the
* case that i_size has changed.
*
* Use *ONLY* with simple_read_folio()
*/
static int simple_write_end(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata)
{
struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied;
/* zero the stale part of the folio if we did a short copy */
if (!folio_test_uptodate(folio)) {
if (copied < len) {
size_t from = offset_in_folio(folio, pos);
folio_zero_range(folio, from + copied, len - copied);
}
folio_mark_uptodate(folio);
}
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold the i_rwsem.
*/
if (last_pos > inode->i_size)
i_size_write(inode, last_pos);
folio_mark_dirty(folio);
folio_unlock(folio);
folio_put(folio);
return copied;
}
/*
* Provides ramfs-style behavior: data in the pagecache, but no writeback.
*/
const struct address_space_operations ram_aops = {
.read_folio = simple_read_folio,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
.dirty_folio = noop_dirty_folio,
};
EXPORT_SYMBOL(ram_aops);
/*
* the inodes created here are not hashed. If you use iunique to generate
* unique inode values later for this filesystem, then you must take care
* to pass it an appropriate max_reserved value to avoid collisions.
*/
int simple_fill_super(struct super_block *s, unsigned long magic,
const struct tree_descr *files)
{
struct inode *inode;
struct dentry *dentry;
int i;
s->s_blocksize = PAGE_SIZE;
s->s_blocksize_bits = PAGE_SHIFT;
s->s_magic = magic;
s->s_op = &simple_super_operations;
s->s_time_gran = 1;
inode = new_inode(s);
if (!inode)
return -ENOMEM;
/*
* because the root inode is 1, the files array must not contain an
* entry at index 1
*/
inode->i_ino = 1;
inode->i_mode = S_IFDIR | 0755;
simple_inode_init_ts(inode);
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
set_nlink(inode, 2);
s->s_root = d_make_root(inode);
if (!s->s_root)
return -ENOMEM;
for (i = 0; !files->name || files->name[0]; i++, files++) {
if (!files->name)
continue;
/* warn if it tries to conflict with the root inode */
if (unlikely(i == 1))
printk(KERN_WARNING "%s: %s passed in a files array"
"with an index of 1!\n", __func__,
s->s_type->name);
dentry = d_alloc_name(s->s_root, files->name);
if (!dentry)
return -ENOMEM;
inode = new_inode(s);
if (!inode) {
dput(dentry);
return -ENOMEM;
}
inode->i_mode = S_IFREG | files->mode;
simple_inode_init_ts(inode);
inode->i_fop = files->ops;
inode->i_ino = i;
d_add(dentry, inode);
}
return 0;
}
EXPORT_SYMBOL(simple_fill_super);
static DEFINE_SPINLOCK(pin_fs_lock);
int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count)
{
struct vfsmount *mnt = NULL;
spin_lock(&pin_fs_lock);
if (unlikely(!*mount)) {
spin_unlock(&pin_fs_lock);
mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
spin_lock(&pin_fs_lock);
if (!*mount)
*mount = mnt;
}
mntget(*mount);
++*count;
spin_unlock(&pin_fs_lock);
mntput(mnt);
return 0;
}
EXPORT_SYMBOL(simple_pin_fs);
void simple_release_fs(struct vfsmount **mount, int *count)
{
struct vfsmount *mnt;
spin_lock(&pin_fs_lock);
mnt = *mount;
if (!--*count)
*mount = NULL;
spin_unlock(&pin_fs_lock);
mntput(mnt);
}
EXPORT_SYMBOL(simple_release_fs);
/**
* simple_read_from_buffer - copy data from the buffer to user space
* @to: the user space buffer to read to
* @count: the maximum number of bytes to read
* @ppos: the current position in the buffer
* @from: the buffer to read from
* @available: the size of the buffer
*
* The simple_read_from_buffer() function reads up to @count bytes from the
* buffer @from at offset @ppos into the user space address starting at @to.
*
* On success, the number of bytes read is returned and the offset @ppos is
* advanced by this number, or negative value is returned on error.
**/
ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
const void *from, size_t available)
{
loff_t pos = *ppos;
size_t ret;
if (pos < 0)
return -EINVAL;
if (pos >= available || !count)
return 0;
if (count > available - pos)
count = available - pos;
ret = copy_to_user(to, from + pos, count);
if (ret == count)
return -EFAULT;
count -= ret;
*ppos = pos + count;
return count;
}
EXPORT_SYMBOL(simple_read_from_buffer);
/**
* simple_write_to_buffer - copy data from user space to the buffer
* @to: the buffer to write to
* @available: the size of the buffer
* @ppos: the current position in the buffer
* @from: the user space buffer to read from
* @count: the maximum number of bytes to read
*
* The simple_write_to_buffer() function reads up to @count bytes from the user
* space address starting at @from into the buffer @to at offset @ppos.
*
* On success, the number of bytes written is returned and the offset @ppos is
* advanced by this number, or negative value is returned on error.
**/
ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count)
{
loff_t pos = *ppos;
size_t res;
if (pos < 0)
return -EINVAL;
if (pos >= available || !count)
return 0;
if (count > available - pos)
count = available - pos;
res = copy_from_user(to + pos, from, count);
if (res == count)
return -EFAULT;
count -= res;
*ppos = pos + count;
return count;
}
EXPORT_SYMBOL(simple_write_to_buffer);
/**
* memory_read_from_buffer - copy data from the buffer
* @to: the kernel space buffer to read to
* @count: the maximum number of bytes to read
* @ppos: the current position in the buffer
* @from: the buffer to read from
* @available: the size of the buffer
*
* The memory_read_from_buffer() function reads up to @count bytes from the
* buffer @from at offset @ppos into the kernel space address starting at @to.
*
* On success, the number of bytes read is returned and the offset @ppos is
* advanced by this number, or negative value is returned on error.
**/
ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available)
{
loff_t pos = *ppos;
if (pos < 0)
return -EINVAL;
if (pos >= available)
return 0;
if (count > available - pos)
count = available - pos;
memcpy(to, from + pos, count);
*ppos = pos + count;
return count;
}
EXPORT_SYMBOL(memory_read_from_buffer);
/*
* Transaction based IO.
* The file expects a single write which triggers the transaction, and then
* possibly a read which collects the result - which is stored in a
* file-local buffer.
*/
void simple_transaction_set(struct file *file, size_t n)
{
struct simple_transaction_argresp *ar = file->private_data;
BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
/*
* The barrier ensures that ar->size will really remain zero until
* ar->data is ready for reading.
*/
smp_mb();
ar->size = n;
}
EXPORT_SYMBOL(simple_transaction_set);
char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
{
struct simple_transaction_argresp *ar;
static DEFINE_SPINLOCK(simple_transaction_lock);
if (size > SIMPLE_TRANSACTION_LIMIT - 1)
return ERR_PTR(-EFBIG);
ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL);
if (!ar)
return ERR_PTR(-ENOMEM);
spin_lock(&simple_transaction_lock);
/* only one write allowed per open */
if (file->private_data) {
spin_unlock(&simple_transaction_lock);
free_page((unsigned long)ar);
return ERR_PTR(-EBUSY);
}
file->private_data = ar;
spin_unlock(&simple_transaction_lock);
if (copy_from_user(ar->data, buf, size))
return ERR_PTR(-EFAULT);
return ar->data;
}
EXPORT_SYMBOL(simple_transaction_get);
ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
{
struct simple_transaction_argresp *ar = file->private_data;
if (!ar)
return 0;
return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
}
EXPORT_SYMBOL(simple_transaction_read);
int simple_transaction_release(struct inode *inode, struct file *file)
{
free_page((unsigned long)file->private_data);
return 0;
}
EXPORT_SYMBOL(simple_transaction_release);
/* Simple attribute files */
struct simple_attr {
int (*get)(void *, u64 *);
int (*set)(void *, u64);
char get_buf[24]; /* enough to store a u64 and "\n\0" */
char set_buf[24];
void *data;
const char *fmt; /* format for read operation */
struct mutex mutex; /* protects access to these buffers */
};
/* simple_attr_open is called by an actual attribute open file operation
* to set the attribute specific access operations. */
int simple_attr_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt)
{
struct simple_attr *attr;
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
attr->get = get;
attr->set = set;
attr->data = inode->i_private;
attr->fmt = fmt;
mutex_init(&attr->mutex);
file->private_data = attr;
return nonseekable_open(inode, file);
}
EXPORT_SYMBOL_GPL(simple_attr_open);
int simple_attr_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
EXPORT_SYMBOL_GPL(simple_attr_release); /* GPL-only? This? Really? */
/* read from the buffer that is filled with the get function */
ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
struct simple_attr *attr;
size_t size;
ssize_t ret;
attr = file->private_data;
if (!attr->get)
return -EACCES;
ret = mutex_lock_interruptible(&attr->mutex);
if (ret)
return ret;
if (*ppos && attr->get_buf[0]) {
/* continued read */
size = strlen(attr->get_buf);
} else {
/* first read */
u64 val;
ret = attr->get(attr->data, &val);
if (ret)
goto out;
size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
attr->fmt, (unsigned long long)val);
}
ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
out:
mutex_unlock(&attr->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(simple_attr_read);
/* interpret the buffer as a number to call the set function with */
static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf,
size_t len, loff_t *ppos, bool is_signed)
{
struct simple_attr *attr;
unsigned long long val;
size_t size;
ssize_t ret;
attr = file->private_data;
if (!attr->set)
return -EACCES;
ret = mutex_lock_interruptible(&attr->mutex);
if (ret)
return ret;
ret = -EFAULT;
size = min(sizeof(attr->set_buf) - 1, len);
if (copy_from_user(attr->set_buf, buf, size))
goto out;
attr->set_buf[size] = '\0';
if (is_signed)
ret = kstrtoll(attr->set_buf, 0, &val);
else
ret = kstrtoull(attr->set_buf, 0, &val);
if (ret)
goto out;
ret = attr->set(attr->data, val);
if (ret == 0)
ret = len; /* on success, claim we got the whole input */
out:
mutex_unlock(&attr->mutex);
return ret;
}
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
return simple_attr_write_xsigned(file, buf, len, ppos, false);
}
EXPORT_SYMBOL_GPL(simple_attr_write);
ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
return simple_attr_write_xsigned(file, buf, len, ppos, true);
}
EXPORT_SYMBOL_GPL(simple_attr_write_signed);
/**
* generic_encode_ino32_fh - generic export_operations->encode_fh function
* @inode: the object to encode
* @fh: where to store the file handle fragment
* @max_len: maximum length to store there (in 4 byte units)
* @parent: parent directory inode, if wanted
*
* This generic encode_fh function assumes that the 32 inode number
* is suitable for locating an inode, and that the generation number
* can be used to check that it is still valid. It places them in the
* filehandle fragment where export_decode_fh expects to find them.
*/
int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
struct inode *parent)
{
struct fid *fid = (void *)fh;
int len = *max_len;
int type = FILEID_INO32_GEN;
if (parent && (len < 4)) {
*max_len = 4;
return FILEID_INVALID;
} else if (len < 2) {
*max_len = 2;
return FILEID_INVALID;
}
len = 2;
fid->i32.ino = inode->i_ino;
fid->i32.gen = inode->i_generation;
if (parent) {
fid->i32.parent_ino = parent->i_ino;
fid->i32.parent_gen = parent->i_generation;
len = 4;
type = FILEID_INO32_GEN_PARENT;
}
*max_len = len;
return type;
}
EXPORT_SYMBOL_GPL(generic_encode_ino32_fh);
/**
* generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
* @sb: filesystem to do the file handle conversion on
* @fid: file handle to convert
* @fh_len: length of the file handle in bytes
* @fh_type: type of file handle
* @get_inode: filesystem callback to retrieve inode
*
* This function decodes @fid as long as it has one of the well-known
* Linux filehandle types and calls @get_inode on it to retrieve the
* inode for the object specified in the file handle.
*/
struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type, struct inode *(*get_inode)
(struct super_block *sb, u64 ino, u32 gen))
{
struct inode *inode = NULL;
if (fh_len < 2)
return NULL;
switch (fh_type) {
case FILEID_INO32_GEN:
case FILEID_INO32_GEN_PARENT:
inode = get_inode(sb, fid->i32.ino, fid->i32.gen);
break;
}
return d_obtain_alias(inode);
}
EXPORT_SYMBOL_GPL(generic_fh_to_dentry);
/**
* generic_fh_to_parent - generic helper for the fh_to_parent export operation
* @sb: filesystem to do the file handle conversion on
* @fid: file handle to convert
* @fh_len: length of the file handle in bytes
* @fh_type: type of file handle
* @get_inode: filesystem callback to retrieve inode
*
* This function decodes @fid as long as it has one of the well-known
* Linux filehandle types and calls @get_inode on it to retrieve the
* inode for the _parent_ object specified in the file handle if it
* is specified in the file handle, or NULL otherwise.
*/
struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type, struct inode *(*get_inode)
(struct super_block *sb, u64 ino, u32 gen))
{
struct inode *inode = NULL;
if (fh_len <= 2)
return NULL;
switch (fh_type) {
case FILEID_INO32_GEN_PARENT:
inode = get_inode(sb, fid->i32.parent_ino,
(fh_len > 3 ? fid->i32.parent_gen : 0));
break;
}
return d_obtain_alias(inode);
}
EXPORT_SYMBOL_GPL(generic_fh_to_parent);
/**
* __generic_file_fsync - generic fsync implementation for simple filesystems
*
* @file: file to synchronize
* @start: start offset in bytes
* @end: end offset in bytes (inclusive)
* @datasync: only synchronize essential metadata if true
*
* This is a generic implementation of the fsync method for simple
* filesystems which track all non-inode metadata in the buffers list
* hanging off the address_space structure.
*/
int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file->f_mapping->host;
int err;
int ret;
err = file_write_and_wait_range(file, start, end);
if (err)
return err;
inode_lock(inode);
ret = sync_mapping_buffers(inode->i_mapping);
if (!(inode->i_state & I_DIRTY_ALL))
goto out;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
goto out;
err = sync_inode_metadata(inode, 1);
if (ret == 0)
ret = err;
out:
inode_unlock(inode);
/* check and advance again to catch errors after syncing out buffers */
err = file_check_and_advance_wb_err(file);
if (ret == 0)
ret = err;
return ret;
}
EXPORT_SYMBOL(__generic_file_fsync);
/**
* generic_file_fsync - generic fsync implementation for simple filesystems
* with flush
* @file: file to synchronize
* @start: start offset in bytes
* @end: end offset in bytes (inclusive)
* @datasync: only synchronize essential metadata if true
*
*/
int generic_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file->f_mapping->host;
int err;
err = __generic_file_fsync(file, start, end, datasync);
if (err)
return err;
return blkdev_issue_flush(inode->i_sb->s_bdev);
}
EXPORT_SYMBOL(generic_file_fsync);
/**
* generic_check_addressable - Check addressability of file system
* @blocksize_bits: log of file system block size
* @num_blocks: number of blocks in file system
*
* Determine whether a file system with @num_blocks blocks (and a
* block size of 2**@blocksize_bits) is addressable by the sector_t
* and page cache of the system. Return 0 if so and -EFBIG otherwise.
*/
int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
{
u64 last_fs_block = num_blocks - 1;
u64 last_fs_page, max_bytes;
if (check_shl_overflow(num_blocks, blocksize_bits, &max_bytes))
return -EFBIG;
last_fs_page = (max_bytes >> PAGE_SHIFT) - 1;
if (unlikely(num_blocks == 0))
return 0;
if (blocksize_bits < 9)
return -EINVAL;
if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
(last_fs_page > (pgoff_t)(~0ULL))) {
return -EFBIG;
}
return 0;
}
EXPORT_SYMBOL(generic_check_addressable);
/*
* No-op implementation of ->fsync for in-memory filesystems.
*/
int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
return 0;
}
EXPORT_SYMBOL(noop_fsync);
ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
/*
* iomap based filesystems support direct I/O without need for
* this callback. However, it still needs to be set in
* inode->a_ops so that open/fcntl know that direct I/O is
* generally supported.
*/
return -EINVAL;
}
EXPORT_SYMBOL_GPL(noop_direct_IO);
/* Because kfree isn't assignment-compatible with void(void*) ;-/ */
void kfree_link(void *p)
{
kfree(p);
}
EXPORT_SYMBOL(kfree_link);
struct inode *alloc_anon_inode(struct super_block *s)
{
static const struct address_space_operations anon_aops = {
.dirty_folio = noop_dirty_folio,
};
struct inode *inode = new_inode_pseudo(s);
if (!inode)
return ERR_PTR(-ENOMEM);
inode->i_ino = get_next_ino();
inode->i_mapping->a_ops = &anon_aops;
/*
* Mark the inode dirty from the very beginning,
* that way it will never be moved to the dirty
* list because mark_inode_dirty() will think
* that it already _is_ on the dirty list.
*/
inode->i_state = I_DIRTY;
/*
* Historically anonymous inodes don't have a type at all and
* userspace has come to rely on this.
*/
inode->i_mode = S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_flags |= S_PRIVATE | S_ANON_INODE;
simple_inode_init_ts(inode);
return inode;
}
EXPORT_SYMBOL(alloc_anon_inode);
/**
* simple_nosetlease - generic helper for prohibiting leases
* @filp: file pointer
* @arg: type of lease to obtain
* @flp: new lease supplied for insertion
* @priv: private data for lm_setup operation
*
* Generic helper for filesystems that do not wish to allow leases to be set.
* All arguments are ignored and it just returns -EINVAL.
*/
int
simple_nosetlease(struct file *filp, int arg, struct file_lease **flp,
void **priv)
{
return -EINVAL;
}
EXPORT_SYMBOL(simple_nosetlease);
/**
* simple_get_link - generic helper to get the target of "fast" symlinks
* @dentry: not used here
* @inode: the symlink inode
* @done: not used here
*
* Generic helper for filesystems to use for symlink inodes where a pointer to
* the symlink target is stored in ->i_link. NOTE: this isn't normally called,
* since as an optimization the path lookup code uses any non-NULL ->i_link
* directly, without calling ->get_link(). But ->get_link() still must be set,
* to mark the inode_operations as being for a symlink.
*
* Return: the symlink target
*/
const char *simple_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *done)
{
return inode->i_link;
}
EXPORT_SYMBOL(simple_get_link);
const struct inode_operations simple_symlink_inode_operations = {
.get_link = simple_get_link,
};
EXPORT_SYMBOL(simple_symlink_inode_operations);
/*
* Operations for a permanently empty directory.
*/
static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
return ERR_PTR(-ENOENT);
}
static int empty_dir_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
return -EPERM;
}
static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t size)
{
return -EOPNOTSUPP;
}
static const struct inode_operations empty_dir_inode_operations = {
.lookup = empty_dir_lookup,
.setattr = empty_dir_setattr,
.listxattr = empty_dir_listxattr,
};
static loff_t empty_dir_llseek(struct file *file, loff_t offset, int whence)
{
/* An empty directory has two entries . and .. at offsets 0 and 1 */
return generic_file_llseek_size(file, offset, whence, 2, 2);
}
static int empty_dir_readdir(struct file *file, struct dir_context *ctx)
{
dir_emit_dots(file, ctx);
return 0;
}
static const struct file_operations empty_dir_operations = {
.llseek = empty_dir_llseek,
.read = generic_read_dir,
.iterate_shared = empty_dir_readdir,
.fsync = noop_fsync,
};
void make_empty_dir_inode(struct inode *inode)
{
set_nlink(inode, 2);
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_rdev = 0;
inode->i_size = 0;
inode->i_blkbits = PAGE_SHIFT;
inode->i_blocks = 0;
inode->i_op = &empty_dir_inode_operations;
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &empty_dir_operations;
}
bool is_empty_dir_inode(struct inode *inode)
{
return (inode->i_fop == &empty_dir_operations) &&
(inode->i_op == &empty_dir_inode_operations);
}
#if IS_ENABLED(CONFIG_UNICODE)
/**
* generic_ci_d_compare - generic d_compare implementation for casefolding filesystems
* @dentry: dentry whose name we are checking against
* @len: len of name of dentry
* @str: str pointer to name of dentry
* @name: Name to compare against
*
* Return: 0 if names match, 1 if mismatch, or -ERRNO
*/
int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
const struct dentry *parent;
const struct inode *dir;
union shortname_store strbuf;
struct qstr qstr;
/*
* Attempt a case-sensitive match first. It is cheaper and
* should cover most lookups, including all the sane
* applications that expect a case-sensitive filesystem.
*
* This comparison is safe under RCU because the caller
* guarantees the consistency between str and len. See
* __d_lookup_rcu_op_compare() for details.
*/
if (len == name->len && !memcmp(str, name->name, len))
return 0;
parent = READ_ONCE(dentry->d_parent);
dir = READ_ONCE(parent->d_inode);
if (!dir || !IS_CASEFOLDED(dir))
return 1;
qstr.len = len;
qstr.name = str;
/*
* If the dentry name is stored in-line, then it may be concurrently
* modified by a rename. If this happens, the VFS will eventually retry
* the lookup, so it doesn't matter what ->d_compare() returns.
* However, it's unsafe to call utf8_strncasecmp() with an unstable
* string. Therefore, we have to copy the name into a temporary buffer.
* As above, len is guaranteed to match str, so the shortname case
* is exactly when str points to ->d_shortname.
*/
if (qstr.name == dentry->d_shortname.string) {
strbuf = dentry->d_shortname; // NUL is guaranteed to be in there
qstr.name = strbuf.string;
/* prevent compiler from optimizing out the temporary buffer */
barrier();
}
return utf8_strncasecmp(dentry->d_sb->s_encoding, name, &qstr);
}
EXPORT_SYMBOL(generic_ci_d_compare);
/**
* generic_ci_d_hash - generic d_hash implementation for casefolding filesystems
* @dentry: dentry of the parent directory
* @str: qstr of name whose hash we should fill in
*
* Return: 0 if hash was successful or unchanged, and -EINVAL on error
*/
int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
{
const struct inode *dir = READ_ONCE(dentry->d_inode);
struct super_block *sb = dentry->d_sb;
const struct unicode_map *um = sb->s_encoding;
int ret;
if (!dir || !IS_CASEFOLDED(dir))
return 0;
ret = utf8_casefold_hash(um, dentry, str);
if (ret < 0 && sb_has_strict_encoding(sb))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(generic_ci_d_hash);
static const struct dentry_operations generic_ci_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
#ifdef CONFIG_FS_ENCRYPTION
.d_revalidate = fscrypt_d_revalidate,
#endif
};
/**
* generic_ci_match() - Match a name (case-insensitively) with a dirent.
* This is a filesystem helper for comparison with directory entries.
* generic_ci_d_compare should be used in VFS' ->d_compare instead.
*
* @parent: Inode of the parent of the dirent under comparison
* @name: name under lookup.
* @folded_name: Optional pre-folded name under lookup
* @de_name: Dirent name.
* @de_name_len: dirent name length.
*
* Test whether a case-insensitive directory entry matches the filename
* being searched. If @folded_name is provided, it is used instead of
* recalculating the casefold of @name.
*
* Return: > 0 if the directory entry matches, 0 if it doesn't match, or
* < 0 on error.
*/
int generic_ci_match(const struct inode *parent,
const struct qstr *name,
const struct qstr *folded_name,
const u8 *de_name, u32 de_name_len)
{
const struct super_block *sb = parent->i_sb;
const struct unicode_map *um = sb->s_encoding;
struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
struct qstr dirent = QSTR_INIT(de_name, de_name_len);
int res = 0;
if (IS_ENCRYPTED(parent)) {
const struct fscrypt_str encrypted_name =
FSTR_INIT((u8 *) de_name, de_name_len);
if (WARN_ON_ONCE(!fscrypt_has_encryption_key(parent)))
return -EINVAL;
decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
if (!decrypted_name.name)
return -ENOMEM;
res = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name,
&decrypted_name);
if (res < 0) {
kfree(decrypted_name.name);
return res;
}
dirent.name = decrypted_name.name;
dirent.len = decrypted_name.len;
}
/*
* Attempt a case-sensitive match first. It is cheaper and
* should cover most lookups, including all the sane
* applications that expect a case-sensitive filesystem.
*/
if (dirent.len == name->len &&
!memcmp(name->name, dirent.name, dirent.len))
goto out;
if (folded_name->name)
res = utf8_strncasecmp_folded(um, folded_name, &dirent);
else
res = utf8_strncasecmp(um, name, &dirent);
out:
kfree(decrypted_name.name);
if (res < 0 && sb_has_strict_encoding(sb)) {
pr_err_ratelimited("Directory contains filename that is invalid UTF-8");
return 0;
}
return !res;
}
EXPORT_SYMBOL(generic_ci_match);
#endif
#ifdef CONFIG_FS_ENCRYPTION
static const struct dentry_operations generic_encrypted_dentry_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
#endif
/**
* generic_set_sb_d_ops - helper for choosing the set of
* filesystem-wide dentry operations for the enabled features
* @sb: superblock to be configured
*
* Filesystems supporting casefolding and/or fscrypt can call this
* helper at mount-time to configure default dentry_operations to the
* best set of dentry operations required for the enabled features.
* The helper must be called after these have been configured, but
* before the root dentry is created.
*/
void generic_set_sb_d_ops(struct super_block *sb)
{
#if IS_ENABLED(CONFIG_UNICODE)
if (sb->s_encoding) {
set_default_d_op(sb, &generic_ci_dentry_ops);
return;
}
#endif
#ifdef CONFIG_FS_ENCRYPTION
if (sb->s_cop) {
set_default_d_op(sb, &generic_encrypted_dentry_ops);
return;
}
#endif
}
EXPORT_SYMBOL(generic_set_sb_d_ops);
/**
* inode_maybe_inc_iversion - increments i_version
* @inode: inode with the i_version that should be updated
* @force: increment the counter even if it's not necessary?
*
* Every time the inode is modified, the i_version field must be seen to have
* changed by any observer.
*
* If "force" is set or the QUERIED flag is set, then ensure that we increment
* the value, and clear the queried flag.
*
* In the common case where neither is set, then we can return "false" without
* updating i_version.
*
* If this function returns false, and no other metadata has changed, then we
* can avoid logging the metadata.
*/
bool inode_maybe_inc_iversion(struct inode *inode, bool force)
{
u64 cur, new;
/*
* The i_version field is not strictly ordered with any other inode
* information, but the legacy inode_inc_iversion code used a spinlock
* to serialize increments.
*
* We add a full memory barrier to ensure that any de facto ordering
* with other state is preserved (either implicitly coming from cmpxchg
* or explicitly from smp_mb if we don't know upfront if we will execute
* the former).
*
* These barriers pair with inode_query_iversion().
*/
cur = inode_peek_iversion_raw(inode);
if (!force && !(cur & I_VERSION_QUERIED)) {
smp_mb();
cur = inode_peek_iversion_raw(inode);
}
do {
/* If flag is clear then we needn't do anything */
if (!force && !(cur & I_VERSION_QUERIED))
return false;
/* Since lowest bit is flag, add 2 to avoid it */
new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
return true;
}
EXPORT_SYMBOL(inode_maybe_inc_iversion);
/**
* inode_query_iversion - read i_version for later use
* @inode: inode from which i_version should be read
*
* Read the inode i_version counter. This should be used by callers that wish
* to store the returned i_version for later comparison. This will guarantee
* that a later query of the i_version will result in a different value if
* anything has changed.
*
* In this implementation, we fetch the current value, set the QUERIED flag and
* then try to swap it into place with a cmpxchg, if it wasn't already set. If
* that fails, we try again with the newly fetched value from the cmpxchg.
*/
u64 inode_query_iversion(struct inode *inode)
{
u64 cur, new;
bool fenced = false;
/*
* Memory barriers (implicit in cmpxchg, explicit in smp_mb) pair with
* inode_maybe_inc_iversion(), see that routine for more details.
*/
cur = inode_peek_iversion_raw(inode);
do {
/* If flag is already set, then no need to swap */
if (cur & I_VERSION_QUERIED) {
if (!fenced)
smp_mb();
break;
}
fenced = true;
new = cur | I_VERSION_QUERIED;
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
return cur >> I_VERSION_QUERIED_SHIFT;
}
EXPORT_SYMBOL(inode_query_iversion);
ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
ssize_t direct_written, ssize_t buffered_written)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
loff_t pos = iocb->ki_pos - buffered_written;
loff_t end = iocb->ki_pos - 1;
int err;
/*
* If the buffered write fallback returned an error, we want to return
* the number of bytes which were written by direct I/O, or the error
* code if that was zero.
*
* Note that this differs from normal direct-io semantics, which will
* return -EFOO even if some bytes were written.
*/
if (unlikely(buffered_written < 0)) {
if (direct_written)
return direct_written;
return buffered_written;
}
/*
* We need to ensure that the page cache pages are written to disk and
* invalidated to preserve the expected O_DIRECT semantics.
*/
err = filemap_write_and_wait_range(mapping, pos, end);
if (err < 0) {
/*
* We don't know how much we wrote, so just return the number of
* bytes which were direct-written
*/
iocb->ki_pos -= buffered_written;
if (direct_written)
return direct_written;
return err;
}
invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
return direct_written + buffered_written;
}
EXPORT_SYMBOL_GPL(direct_write_fallback);
/**
* simple_inode_init_ts - initialize the timestamps for a new inode
* @inode: inode to be initialized
*
* When a new inode is created, most filesystems set the timestamps to the
* current time. Add a helper to do this.
*/
struct timespec64 simple_inode_init_ts(struct inode *inode)
{
struct timespec64 ts = inode_set_ctime_current(inode);
inode_set_atime_to_ts(inode, ts);
inode_set_mtime_to_ts(inode, ts);
return ts;
}
EXPORT_SYMBOL(simple_inode_init_ts);
struct dentry *stashed_dentry_get(struct dentry **stashed)
{
struct dentry *dentry;
guard(rcu)();
dentry = rcu_dereference(*stashed);
if (!dentry)
return NULL;
if (IS_ERR(dentry))
return dentry;
if (!lockref_get_not_dead(&dentry->d_lockref))
return NULL;
return dentry;
}
static struct dentry *prepare_anon_dentry(struct dentry **stashed,
struct super_block *sb,
void *data)
{
struct dentry *dentry;
struct inode *inode;
const struct stashed_operations *sops = sb->s_fs_info;
int ret;
inode = new_inode_pseudo(sb);
if (!inode) {
sops->put_data(data);
return ERR_PTR(-ENOMEM);
}
inode->i_flags |= S_IMMUTABLE;
inode->i_mode = S_IFREG;
simple_inode_init_ts(inode);
ret = sops->init_inode(inode, data);
if (ret < 0) {
iput(inode);
return ERR_PTR(ret);
}
/* Notice when this is changed. */
WARN_ON_ONCE(!S_ISREG(inode->i_mode));
dentry = d_alloc_anon(sb);
if (!dentry) {
iput(inode);
return ERR_PTR(-ENOMEM);
}
/* Store address of location where dentry's supposed to be stashed. */
dentry->d_fsdata = stashed;
/* @data is now owned by the fs */
d_instantiate(dentry, inode);
return dentry;
}
struct dentry *stash_dentry(struct dentry **stashed, struct dentry *dentry)
{
guard(rcu)();
for (;;) {
struct dentry *old;
/* Assume any old dentry was cleared out. */
old = cmpxchg(stashed, NULL, dentry);
if (likely(!old))
return dentry;
/* Check if somebody else installed a reusable dentry. */
if (lockref_get_not_dead(&old->d_lockref))
return old;
/* There's an old dead dentry there, try to take it over. */
if (likely(try_cmpxchg(stashed, &old, dentry)))
return dentry;
}
}
/**
* path_from_stashed - create path from stashed or new dentry
* @stashed: where to retrieve or stash dentry
* @mnt: mnt of the filesystems to use
* @data: data to store in inode->i_private
* @path: path to create
*
* The function tries to retrieve a stashed dentry from @stashed. If the dentry
* is still valid then it will be reused. If the dentry isn't able the function
* will allocate a new dentry and inode. It will then check again whether it
* can reuse an existing dentry in case one has been added in the meantime or
* update @stashed with the newly added dentry.
*
* Special-purpose helper for nsfs and pidfs.
*
* Return: On success zero and on failure a negative error is returned.
*/
int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
struct path *path)
{
struct dentry *dentry, *res;
const struct stashed_operations *sops = mnt->mnt_sb->s_fs_info;
/* See if dentry can be reused. */
res = stashed_dentry_get(stashed);
if (IS_ERR(res))
return PTR_ERR(res);
if (res) {
sops->put_data(data);
goto make_path;
}
/* Allocate a new dentry. */
dentry = prepare_anon_dentry(stashed, mnt->mnt_sb, data);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
/* Added a new dentry. @data is now owned by the filesystem. */
if (sops->stash_dentry)
res = sops->stash_dentry(stashed, dentry);
else
res = stash_dentry(stashed, dentry);
if (IS_ERR(res)) {
dput(dentry);
return PTR_ERR(res);
}
if (res != dentry)
dput(dentry);
make_path:
path->dentry = res;
path->mnt = mntget(mnt);
VFS_WARN_ON_ONCE(path->dentry->d_fsdata != stashed);
VFS_WARN_ON_ONCE(d_inode(path->dentry)->i_private != data);
return 0;
}
void stashed_dentry_prune(struct dentry *dentry)
{
struct dentry **stashed = dentry->d_fsdata;
struct inode *inode = d_inode(dentry);
if (WARN_ON_ONCE(!stashed))
return;
if (!inode)
return;
/*
* Only replace our own @dentry as someone else might've
* already cleared out @dentry and stashed their own
* dentry in there.
*/
cmpxchg(stashed, dentry, NULL);
}
/* parent must be held exclusive */
struct dentry *simple_start_creating(struct dentry *parent, const char *name)
{
struct dentry *dentry;
struct inode *dir = d_inode(parent);
inode_lock(dir);
if (unlikely(IS_DEADDIR(dir))) {
inode_unlock(dir);
return ERR_PTR(-ENOENT);
}
dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry)) {
inode_unlock(dir);
return dentry;
}
if (dentry->d_inode) {
dput(dentry);
inode_unlock(dir);
return ERR_PTR(-EEXIST);
}
return dentry;
}
EXPORT_SYMBOL(simple_start_creating);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filesystem access notification for Linux
*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*/
#ifndef __LINUX_FSNOTIFY_BACKEND_H
#define __LINUX_FSNOTIFY_BACKEND_H
#ifdef __KERNEL__
#include <linux/idr.h> /* inotify uses this */
#include <linux/fs.h> /* struct inode */
#include <linux/list.h>
#include <linux/path.h> /* struct path */
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
#include <linux/mempool.h>
#include <linux/sched/mm.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
* convert between them. dnotify only needs conversion at watch creation
* so no perf loss there. fanotify isn't defined yet, so it can use the
* wholes if it needs more events.
*/
#define FS_ACCESS 0x00000001 /* File was accessed */
#define FS_MODIFY 0x00000002 /* File was modified */
#define FS_ATTRIB 0x00000004 /* Metadata changed */
#define FS_CLOSE_WRITE 0x00000008 /* Writable file was closed */
#define FS_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define FS_OPEN 0x00000020 /* File was opened */
#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
#define FS_CREATE 0x00000100 /* Subfile was created */
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */
/*
* FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify
* which does not support FS_ERROR.
*/
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
/* #define FS_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */
#define FS_PRE_ACCESS 0x00100000 /* Pre-content access hook */
#define FS_MNT_ATTACH 0x01000000 /* Mount was attached */
#define FS_MNT_DETACH 0x02000000 /* Mount was detached */
#define FS_MNT_MOVE (FS_MNT_ATTACH | FS_MNT_DETACH)
/*
* Set on inode mark that cares about things that happen to its children.
* Always set for dnotify and inotify.
* Set on inode/sb/mount marks that care about parent/name info.
*/
#define FS_EVENT_ON_CHILD 0x08000000
#define FS_RENAME 0x10000000 /* File was renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
#define FS_ISDIR 0x40000000 /* event occurred against dir */
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
/*
* Directory entry modification events - reported only to directory
* where entry is modified and not to a watching parent.
* The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
* when a directory entry inside a child subdir changes.
*/
#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
/* Mount namespace events */
#define FSNOTIFY_MNT_EVENTS (FS_MNT_ATTACH | FS_MNT_DETACH)
/* Content events can be used to inspect file content */
#define FSNOTIFY_CONTENT_PERM_EVENTS (FS_OPEN_PERM | FS_OPEN_EXEC_PERM | \
FS_ACCESS_PERM)
/* Pre-content events can be used to fill file content */
#define FSNOTIFY_PRE_CONTENT_EVENTS (FS_PRE_ACCESS)
#define ALL_FSNOTIFY_PERM_EVENTS (FSNOTIFY_CONTENT_PERM_EVENTS | \
FSNOTIFY_PRE_CONTENT_EVENTS)
/*
* This is a list of all events that may get sent to a parent that is watching
* with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory.
*/
#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \
FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \
FS_OPEN | FS_OPEN_EXEC)
/*
* This is a list of all events that may get sent with the parent inode as the
* @to_tell argument of fsnotify().
* It may include events that can be sent to an inode/sb/mount mark, but cannot
* be sent to a parent watching children.
*/
#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD)
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
FSNOTIFY_MNT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
FS_ERROR)
/* Extra flags that may be reported with event or control handling of events */
#define ALL_FSNOTIFY_FLAGS (FS_ISDIR | FS_EVENT_ON_CHILD | FS_DN_MULTISHOT)
#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark;
struct fsnotify_event_private_data;
struct fsnotify_fname;
struct fsnotify_iter_info;
struct mem_cgroup;
/*
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
* handle_event - main call for a group to handle an fs event
* @group: group to notify
* @mask: event type and flags
* @data: object that event happened on
* @data_type: type of object for fanotify_data_XXX() accessors
* @dir: optional directory associated with event -
* if @file_name is not NULL, this is the directory that
* @file_name is relative to
* @file_name: optional file name associated with event
* @cookie: inotify rename cookie
* @iter_info: array of marks from this group that are interested in the event
*
* handle_inode_event - simple variant of handle_event() for groups that only
* have inode marks and don't have ignore mask
* @mark: mark to notify
* @mask: event type and flags
* @inode: inode that event happened on
* @dir: optional directory associated with event -
* if @file_name is not NULL, this is the directory that
* @file_name is relative to.
* Either @inode or @dir must be non-NULL.
* @file_name: optional file name associated with event
* @cookie: inotify rename cookie
*
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
* MUST be holding a reference on each mark and that reference must be
* dropped in this function. inotify uses this function to send
* userspace messages that marks have been removed.
*/
struct fsnotify_ops {
int (*handle_event)(struct fsnotify_group *group, u32 mask,
const void *data, int data_type, struct inode *dir,
const struct qstr *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info);
int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask,
struct inode *inode, struct inode *dir,
const struct qstr *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event);
/* called on final put+free to free memory */
void (*free_mark)(struct fsnotify_mark *mark);
};
/*
* all of the information about the original object we want to now send to
* a group. If you want to carry more info from the accessing task to the
* listener this structure is where you need to be adding fields.
*/
struct fsnotify_event {
struct list_head list;
};
/*
* fsnotify group priorities.
* Events are sent in order from highest priority to lowest priority.
*/
enum fsnotify_group_prio {
FSNOTIFY_PRIO_NORMAL = 0, /* normal notifiers, no permissions */
FSNOTIFY_PRIO_CONTENT, /* fanotify permission events */
FSNOTIFY_PRIO_PRE_CONTENT, /* fanotify pre-content events */
__FSNOTIFY_PRIO_NUM
};
/*
* A group is a "thing" that wants to receive notification about filesystem
* events. The mask holds the subset of event types this group cares about.
* refcnt on a group is up to the implementor and at any moment if it goes 0
* everything will be cleaned up.
*/
struct fsnotify_group {
const struct fsnotify_ops *ops; /* how this group handles things */
/*
* How the refcnt is used is up to each group. When the refcnt hits 0
* fsnotify will clean up all of the resources associated with this group.
* As an example, the dnotify group will always have a refcnt=1 and that
* will never change. Inotify, on the other hand, has a group per
* inotify_init() and the refcnt will hit 0 only when that fd has been
* closed.
*/
refcount_t refcnt; /* things with interest in this group */
/* needed to send notification to userspace */
spinlock_t notification_lock; /* protect the notification_list */
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
enum fsnotify_group_prio priority; /* priority for sending events */
bool shutdown; /* group is being shut down, don't queue more events */
#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
int flags;
unsigned int owner_flags; /* stored flags of mark_mutex owner */
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
atomic_t user_waits; /* Number of tasks waiting for user
* response */
struct list_head marks_list; /* all inode marks for this group */
struct fasync_struct *fsn_fa; /* async notification */
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
struct mem_cgroup *memcg; /* memcg to charge allocations */
struct user_namespace *user_ns; /* user ns where group was created */
/* groups can define private fields here or use the void *private */
union {
void *private;
#ifdef CONFIG_INOTIFY_USER
struct inotify_group_private_data {
spinlock_t idr_lock;
struct idr idr;
struct ucounts *ucounts;
} inotify_data;
#endif
#ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data {
/* Hash table of events for merge */
struct hlist_head *merge_hash;
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
int flags; /* flags from fanotify_init() */
int f_flags; /* event_f_flags from fanotify_init() */
struct ucounts *ucounts;
mempool_t error_events_pool;
/* chained on perm_group_list */
struct list_head perm_grp_list;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
};
/*
* These helpers are used to prevent deadlock when reclaiming inodes with
* evictable marks of the same group that is allocating a new mark.
*/
static inline void fsnotify_group_lock(struct fsnotify_group *group)
{
mutex_lock(&group->mark_mutex);
group->owner_flags = memalloc_nofs_save();
}
static inline void fsnotify_group_unlock(struct fsnotify_group *group)
{
memalloc_nofs_restore(group->owner_flags);
mutex_unlock(&group->mark_mutex);
}
static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
{
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
}
/* When calling fsnotify tell it if the data is a path or inode */
enum fsnotify_data_type {
FSNOTIFY_EVENT_NONE,
FSNOTIFY_EVENT_FILE_RANGE,
FSNOTIFY_EVENT_PATH,
FSNOTIFY_EVENT_INODE,
FSNOTIFY_EVENT_DENTRY,
FSNOTIFY_EVENT_MNT,
FSNOTIFY_EVENT_ERROR,
};
struct fs_error_report {
int error;
struct inode *inode;
struct super_block *sb;
};
struct file_range {
const struct path *path;
loff_t pos;
size_t count;
};
static inline const struct path *file_range_path(const struct file_range *range)
{
return range->path;
}
struct fsnotify_mnt {
const struct mnt_namespace *ns;
u64 mnt_id;
};
static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_INODE:
return (struct inode *)data;
case FSNOTIFY_EVENT_DENTRY:
return d_inode(data);
case FSNOTIFY_EVENT_PATH:
return d_inode(((const struct path *)data)->dentry);
case FSNOTIFY_EVENT_FILE_RANGE:
return d_inode(file_range_path(data)->dentry);
case FSNOTIFY_EVENT_ERROR:
return ((struct fs_error_report *)data)->inode;
default:
return NULL;
}
}
static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_DENTRY:
/* Non const is needed for dget() */
return (struct dentry *)data;
case FSNOTIFY_EVENT_PATH:
return ((const struct path *)data)->dentry;
case FSNOTIFY_EVENT_FILE_RANGE:
return file_range_path(data)->dentry;
default:
return NULL;
}
}
static inline const struct path *fsnotify_data_path(const void *data,
int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_PATH:
return data;
case FSNOTIFY_EVENT_FILE_RANGE:
return file_range_path(data);
default:
return NULL;
}
}
static inline struct super_block *fsnotify_data_sb(const void *data,
int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_INODE:
return ((struct inode *)data)->i_sb;
case FSNOTIFY_EVENT_DENTRY:
return ((struct dentry *)data)->d_sb;
case FSNOTIFY_EVENT_PATH:
return ((const struct path *)data)->dentry->d_sb;
case FSNOTIFY_EVENT_FILE_RANGE:
return file_range_path(data)->dentry->d_sb;
case FSNOTIFY_EVENT_ERROR:
return ((struct fs_error_report *) data)->sb;
default:
return NULL;
}
}
static inline const struct fsnotify_mnt *fsnotify_data_mnt(const void *data,
int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_MNT:
return data;
default:
return NULL;
}
}
static inline u64 fsnotify_data_mnt_id(const void *data, int data_type)
{
const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
return mnt_data ? mnt_data->mnt_id : 0;
}
static inline struct fs_error_report *fsnotify_data_error_report(
const void *data,
int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_ERROR:
return (struct fs_error_report *) data;
default:
return NULL;
}
}
static inline const struct file_range *fsnotify_data_file_range(
const void *data,
int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_FILE_RANGE:
return (struct file_range *)data;
default:
return NULL;
}
}
/*
* Index to merged marks iterator array that correlates to a type of watch.
* The type of watched object can be deduced from the iterator type, but not
* the other way around, because an event can match different watched objects
* of the same object type.
* For example, both parent and child are watching an object of type inode.
*/
enum fsnotify_iter_type {
FSNOTIFY_ITER_TYPE_INODE,
FSNOTIFY_ITER_TYPE_VFSMOUNT,
FSNOTIFY_ITER_TYPE_SB,
FSNOTIFY_ITER_TYPE_PARENT,
FSNOTIFY_ITER_TYPE_INODE2,
FSNOTIFY_ITER_TYPE_MNTNS,
FSNOTIFY_ITER_TYPE_COUNT
};
/* The type of object that a mark is attached to */
enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_ANY = -1,
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
FSNOTIFY_OBJ_TYPE_MNTNS,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
static inline bool fsnotify_valid_obj_type(unsigned int obj_type)
{
return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT);
}
struct fsnotify_iter_info {
struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT];
struct fsnotify_group *current_group;
unsigned int report_mask;
int srcu_idx;
};
static inline bool fsnotify_iter_should_report_type(
struct fsnotify_iter_info *iter_info, int iter_type)
{
return (iter_info->report_mask & (1U << iter_type));
}
static inline void fsnotify_iter_set_report_type(
struct fsnotify_iter_info *iter_info, int iter_type)
{
iter_info->report_mask |= (1U << iter_type);
}
static inline struct fsnotify_mark *fsnotify_iter_mark(
struct fsnotify_iter_info *iter_info, int iter_type)
{
if (fsnotify_iter_should_report_type(iter_info, iter_type))
return iter_info->marks[iter_type];
return NULL;
}
static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type,
struct fsnotify_mark **markp)
{
while (type < FSNOTIFY_ITER_TYPE_COUNT) {
*markp = fsnotify_iter_mark(iter, type);
if (*markp)
break;
type++;
}
return type;
}
#define FSNOTIFY_ITER_FUNCS(name, NAME) \
static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
struct fsnotify_iter_info *iter_info) \
{ \
return fsnotify_iter_mark(iter_info, FSNOTIFY_ITER_TYPE_##NAME); \
}
FSNOTIFY_ITER_FUNCS(inode, INODE)
FSNOTIFY_ITER_FUNCS(parent, PARENT)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
#define fsnotify_foreach_iter_type(type) \
for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++)
#define fsnotify_foreach_iter_mark_type(iter, mark, type) \
for (type = 0; \
type = fsnotify_iter_step(iter, type, &mark), \
type < FSNOTIFY_ITER_TYPE_COUNT; \
type++)
/*
* Inode/vfsmount/sb point to this structure which tracks all marks attached to
* the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this
* structure. We destroy this structure when there are no more marks attached
* to it. The structure is protected by fsnotify_mark_srcu.
*/
struct fsnotify_mark_connector {
spinlock_t lock;
unsigned char type; /* Type of object [lock] */
unsigned char prio; /* Highest priority group */
#define FSNOTIFY_CONN_FLAG_IS_WATCHED 0x01
#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
unsigned short flags; /* flags [lock] */
union {
/* Object pointer [lock] */
void *obj;
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
struct hlist_head list;
};
/*
* Container for per-sb fsnotify state (sb marks and more).
* Attached lazily on first marked object on the sb and freed when killing sb.
*/
struct fsnotify_sb_info {
struct fsnotify_mark_connector __rcu *sb_marks;
/*
* Number of inode/mount/sb objects that are being watched in this sb.
* Note that inodes objects are currently double-accounted.
*
* The value in watched_objects[prio] is the number of objects that are
* watched by groups of priority >= prio, so watched_objects[0] is the
* total number of watched objects in this sb.
*/
atomic_long_t watched_objects[__FSNOTIFY_PRIO_NUM];
};
static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb)
{
#ifdef CONFIG_FSNOTIFY
return READ_ONCE(sb->s_fsnotify_info);
#else
return NULL;
#endif
}
static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb)
{
return &fsnotify_sb_info(sb)->watched_objects[0];
}
/*
* A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
* These are flushed when an inode is evicted from core and may be flushed
* when the inode is modified (as seen by fsnotify_access). Some fsnotify
* users (such as dnotify) will flush these when the open fd is closed and not
* at inode eviction or modification.
*
* Text in brackets is showing the lock(s) protecting modifications of a
* particular entry. obj_lock means either inode->i_lock or
* mnt->mnt_root->d_lock depending on the mark type.
*/
struct fsnotify_mark {
/* Mask this mark is for [mark->lock, group->mark_mutex] */
__u32 mask;
/* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
refcount_t refcnt;
/* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */
struct fsnotify_group *group;
/* List of marks by group->marks_list. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */
struct list_head g_list;
/* Protects inode / mnt pointers, flags, masks */
spinlock_t lock;
/* List of marks for inode / vfsmount [connector->lock, mark ref] */
struct hlist_node obj_list;
/* Head of list of marks for an object [mark ref] */
struct fsnotify_mark_connector *connector;
/* Events types and flags to ignore [mark->lock, group->mark_mutex] */
__u32 ignore_mask;
/* General fsnotify mark flags */
#define FSNOTIFY_MARK_FLAG_ALIVE 0x0001
#define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002
/* inotify mark flags */
#define FSNOTIFY_MARK_FLAG_EXCL_UNLINK 0x0010
#define FSNOTIFY_MARK_FLAG_IN_ONESHOT 0x0020
/* fanotify mark flags */
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800
#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000
unsigned int flags; /* flags [mark->lock] */
};
#ifdef CONFIG_FSNOTIFY
/* called from the vfs helpers */
/* main fsnotify call to send events */
extern int fsnotify(__u32 mask, const void *data, int data_type,
struct inode *dir, const struct qstr *name,
struct inode *inode, u32 cookie);
extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
int data_type);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
extern void __fsnotify_mntns_delete(struct mnt_namespace *mntns);
extern void fsnotify_sb_free(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
extern void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt);
static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
{
/* FS_EVENT_ON_CHILD is set on marks that want parent/name info */
if (!(mask & FS_EVENT_ON_CHILD))
return 0;
/*
* This object might be watched by a mark that cares about parent/name
* info, does it care about the specific set of events that can be
* reported with parent/name info?
*/
return mask & FS_EVENTS_POSS_TO_PARENT;
}
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
__u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
/* FS_EVENT_ON_CHILD is set if the inode may care */
if (!(parent_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
return parent_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
* Update the dentry with a flag indicating the interest of its parent to receive
* filesystem events when those events happens to this dentry->d_inode.
*/
static inline void fsnotify_update_flags(struct dentry *dentry)
{
assert_spin_locked(&dentry->d_lock);
/*
* Serialisation of setting PARENT_WATCHED on the dentries is provided
* by d_lock. If inotify_inode_watched changes after we have taken
* d_lock, the following fsnotify_set_children_dentry_flags call will
* find our entry, so it will spin until we complete here, and update
* us with the new state.
*/
if (fsnotify_inode_watches_children(dentry->d_parent->d_inode)) dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
}
/* called from fsnotify listeners, such as fanotify or dnotify */
/* create a new group */
extern struct fsnotify_group *fsnotify_alloc_group(
const struct fsnotify_ops *ops,
int flags);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* group destruction begins, stop queuing new events */
extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
extern int fsnotify_fasync(int fd, struct file *file, int on);
/* Free event from memory */
extern void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
extern int fsnotify_insert_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct fsnotify_group *,
struct fsnotify_event *),
void (*insert)(struct fsnotify_group *,
struct fsnotify_event *));
static inline int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct fsnotify_group *,
struct fsnotify_event *))
{
return fsnotify_insert_event(group, event, merge, NULL);
}
/* Queue overflow event to a notification group */
static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
{
fsnotify_add_event(group, group->overflow_event, NULL);
}
static inline bool fsnotify_is_overflow_event(u32 mask)
{
return mask & FS_Q_OVERFLOW;
}
static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
{
assert_spin_locked(&group->notification_lock);
return list_empty(&group->notification_list);
}
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
/* return AND dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
/* Remove event queued in the notification list */
extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* functions used to manipulate the marks attached to inodes */
/*
* Canonical "ignore mask" including event flags.
*
* Note the subtle semantic difference from the legacy ->ignored_mask.
* ->ignored_mask traditionally only meant which events should be ignored,
* while ->ignore_mask also includes flags regarding the type of objects on
* which events should be ignored.
*/
static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark)
{
__u32 ignore_mask = mark->ignore_mask;
/* The event flags in ignore mask take effect */
if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
return ignore_mask;
/*
* Legacy behavior:
* - Always ignore events on dir
* - Ignore events on child if parent is watching children
*/
ignore_mask |= FS_ISDIR;
ignore_mask &= ~FS_EVENT_ON_CHILD;
ignore_mask |= mark->mask & FS_EVENT_ON_CHILD;
return ignore_mask;
}
/* Legacy ignored_mask - only event types to ignore */
static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark)
{
return mark->ignore_mask & ALL_FSNOTIFY_EVENTS;
}
/*
* Check if mask (or ignore mask) should be applied depending if victim is a
* directory and whether it is reported to a watching parent.
*/
static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir,
int iter_type)
{
/* Should mask be applied to a directory? */
if (is_dir && !(mask & FS_ISDIR))
return false;
/* Should mask be applied to a child? */
if (iter_type == FSNOTIFY_ITER_TYPE_PARENT &&
!(mask & FS_EVENT_ON_CHILD))
return false;
return true;
}
/*
* Effective ignore mask taking into account if event victim is a
* directory and whether it is reported to a watching parent.
*/
static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark,
bool is_dir, int iter_type)
{
__u32 ignore_mask = fsnotify_ignored_events(mark);
if (!ignore_mask)
return 0;
/* For non-dir and non-child, no need to consult the event flags */
if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT)
return ignore_mask;
ignore_mask = fsnotify_ignore_mask(mark);
if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type))
return 0;
return ignore_mask & ALL_FSNOTIFY_EVENTS;
}
/* Get mask for calculating object interest taking ignore mask into account */
static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
{
__u32 mask = mark->mask;
if (!fsnotify_ignored_events(mark))
return mask;
/* Interest in FS_MODIFY may be needed for clearing ignore mask */
if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
mask |= FS_MODIFY;
/*
* If mark is interested in ignoring events on children, the object must
* show interest in those events for fsnotify_parent() to notice it.
*/
return mask | mark->ignore_mask;
}
/* Get mask of events for a list of marks */
extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
/* Calculate mask of events for a list of marks */
extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn);
extern void fsnotify_init_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* Find mark belonging to given group in the list of marks */
struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type,
struct fsnotify_group *group);
/* attach the mark to the object */
int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj,
unsigned int obj_type, int add_flags);
int fsnotify_add_mark_locked(struct fsnotify_mark *mark, void *obj,
unsigned int obj_type, int add_flags);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct inode *inode,
int add_flags)
{
return fsnotify_add_mark(mark, inode, FSNOTIFY_OBJ_TYPE_INODE,
add_flags);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
int add_flags)
{
return fsnotify_add_mark_locked(mark, inode, FSNOTIFY_OBJ_TYPE_INODE,
add_flags);
}
static inline struct fsnotify_mark *fsnotify_find_inode_mark(
struct inode *inode,
struct fsnotify_group *group)
{
return fsnotify_find_mark(inode, FSNOTIFY_OBJ_TYPE_INODE, group);
}
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* detach mark from inode / mount list, group list, drop inode reference */
extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
/* free mark */
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* Wait until all marks queued for destruction are destroyed */
extern void fsnotify_wait_marks_destroyed(void);
/* Clear all of the marks of a group attached to a given object type */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
unsigned int obj_type);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
static inline void fsnotify_init_event(struct fsnotify_event *event)
{
INIT_LIST_HEAD(&event->list);
}
int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
size_t count);
#else
static inline int fsnotify_pre_content(const struct path *path,
const loff_t *ppos, size_t count)
{
return 0;
}
static inline int fsnotify(__u32 mask, const void *data, int data_type,
struct inode *dir, const struct qstr *name,
struct inode *inode, u32 cookie)
{
return 0;
}
static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask,
const void *data, int data_type)
{
return 0;
}
static inline void __fsnotify_inode_delete(struct inode *inode)
{}
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
static inline void fsnotify_sb_delete(struct super_block *sb)
{}
static inline void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
{}
static inline void fsnotify_sb_free(struct super_block *sb)
{}
static inline void fsnotify_update_flags(struct dentry *dentry)
{}
static inline u32 fsnotify_get_cookie(void)
{
return 0;
}
static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
static inline void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
{}
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
#endif /* __LINUX_FSNOTIFY_BACKEND_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/* Common capabilities, needed by capability.o.
*/
#include <linux/capability.h>
#include <linux/audit.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/lsm_hooks.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/ptrace.h>
#include <linux/xattr.h>
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
#include <linux/user_namespace.h>
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/mnt_idmapping.h>
#include <uapi/linux/lsm.h>
#define CREATE_TRACE_POINTS
#include <trace/events/capability.h>
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
* However if fE is also set, then the intent is for only
* the file capabilities to be applied, and the setuid-root
* bit is left on either to change the uid (plausible) or
* to get full privilege on a kernel without file capabilities
* support. So in that case we do not raise capabilities.
*
* Warn if that happens, once per boot.
*/
static void warn_setuid_and_fcaps_mixed(const char *fname)
{
static int warned;
if (!warned) {
printk(KERN_INFO "warning: `%s' has both setuid-root and"
" effective capabilities. Therefore not raising all"
" capabilities.\n", fname);
warned = 1;
}
}
/**
* cap_capable_helper - Determine whether a task has a particular effective
* capability.
* @cred: The credentials to use
* @target_ns: The user namespace of the resource being accessed
* @cred_ns: The user namespace of the credentials
* @cap: The capability to check for
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
*
* See cap_capable for more details.
*/
static inline int cap_capable_helper(const struct cred *cred,
struct user_namespace *target_ns,
const struct user_namespace *cred_ns,
int cap)
{
struct user_namespace *ns = target_ns;
/* See if cred has the capability in the target user namespace
* by examining the target user namespace and all of the target
* user namespace's parents.
*/
for (;;) {
/* Do we have the necessary capabilities? */
if (likely(ns == cred_ns)) return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
/*
* If we're already at a lower level than we're looking for,
* we're done searching.
*/
if (ns->level <= cred_ns->level)
return -EPERM;
/*
* The owner of the user namespace in the parent of the
* user namespace has all caps.
*/
if ((ns->parent == cred_ns) && uid_eq(ns->owner, cred->euid)) return 0;
/*
* If you have a capability in a parent user ns, then you have
* it over all children user namespaces as well.
*/
ns = ns->parent;
}
/* We never get here */
}
/**
* cap_capable - Determine whether a task has a particular effective capability
* @cred: The credentials to use
* @target_ns: The user namespace of the resource being accessed
* @cap: The capability to check for
* @opts: Bitmask of options defined in include/linux/security.h (unused)
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
*
* NOTE WELL: cap_capable() has reverse semantics to the capable() call
* and friends. That is cap_capable() returns an int 0 when a task has
* a capability, while the kernel's capable(), has_ns_capability(),
* has_ns_capability_noaudit(), and has_capability_noaudit() return a
* bool true (1) for this case.
*/
int cap_capable(const struct cred *cred, struct user_namespace *target_ns,
int cap, unsigned int opts)
{
const struct user_namespace *cred_ns = cred->user_ns;
int ret = cap_capable_helper(cred, target_ns, cred_ns, cap); trace_cap_capable(cred, target_ns, cred_ns, cap, ret);
return ret;
}
/**
* cap_settime - Determine whether the current process may set the system clock
* @ts: The time to set
* @tz: The timezone to set
*
* Determine whether the current process may set the system clock and timezone
* information, returning 0 if permission granted, -ve if denied.
*/
int cap_settime(const struct timespec64 *ts, const struct timezone *tz)
{
if (!capable(CAP_SYS_TIME))
return -EPERM;
return 0;
}
/**
* cap_ptrace_access_check - Determine whether the current process may access
* another
* @child: The process to be accessed
* @mode: The mode of attachment.
*
* If we are in the same or an ancestor user_ns and have all the target
* task's capabilities, then ptrace access is allowed.
* If we have the ptrace capability to the target user_ns, then ptrace
* access is allowed.
* Else denied.
*
* Determine whether a process may access another, returning 0 if permission
* granted, -ve if denied.
*/
int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
const struct cred *cred, *child_cred;
const kernel_cap_t *caller_caps;
rcu_read_lock();
cred = current_cred();
child_cred = __task_cred(child);
if (mode & PTRACE_MODE_FSCREDS)
caller_caps = &cred->cap_effective;
else
caller_caps = &cred->cap_permitted;
if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, *caller_caps))
goto out;
if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
rcu_read_unlock();
return ret;
}
/**
* cap_ptrace_traceme - Determine whether another process may trace the current
* @parent: The task proposed to be the tracer
*
* If parent is in the same or an ancestor user_ns and has all current's
* capabilities, then ptrace access is allowed.
* If parent has the ptrace capability to current's user_ns, then ptrace
* access is allowed.
* Else denied.
*
* Determine whether the nominated task is permitted to trace the current
* process, returning 0 if permission is granted, -ve if denied.
*/
int cap_ptrace_traceme(struct task_struct *parent)
{
int ret = 0;
const struct cred *cred, *child_cred;
rcu_read_lock();
cred = __task_cred(parent);
child_cred = current_cred();
if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
goto out;
if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
rcu_read_unlock();
return ret;
}
/**
* cap_capget - Retrieve a task's capability sets
* @target: The task from which to retrieve the capability sets
* @effective: The place to record the effective set
* @inheritable: The place to record the inheritable set
* @permitted: The place to record the permitted set
*
* This function retrieves the capabilities of the nominated task and returns
* them to the caller.
*/
int cap_capget(const struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
const struct cred *cred;
/* Derived from kernel/capability.c:sys_capget. */
rcu_read_lock();
cred = __task_cred(target);
*effective = cred->cap_effective;
*inheritable = cred->cap_inheritable;
*permitted = cred->cap_permitted;
rcu_read_unlock();
return 0;
}
/*
* Determine whether the inheritable capabilities are limited to the old
* permitted set. Returns 1 if they are limited, 0 if they are not.
*/
static inline int cap_inh_is_capped(void)
{
/* they are so limited unless the current task has the CAP_SETPCAP
* capability
*/
if (cap_capable(current_cred(), current_cred()->user_ns,
CAP_SETPCAP, CAP_OPT_NONE) == 0)
return 0;
return 1;
}
/**
* cap_capset - Validate and apply proposed changes to current's capabilities
* @new: The proposed new credentials; alterations should be made here
* @old: The current task's current credentials
* @effective: A pointer to the proposed new effective capabilities set
* @inheritable: A pointer to the proposed new inheritable capabilities set
* @permitted: A pointer to the proposed new permitted capabilities set
*
* This function validates and applies a proposed mass change to the current
* process's capability sets. The changes are made to the proposed new
* credentials, and assuming no error, will be committed by the caller of LSM.
*/
int cap_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{ if (cap_inh_is_capped() &&
!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_permitted)))
/* incapable of using this inheritable set */
return -EPERM; if (!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_bset)))
/* no new pI capabilities outside bounding set */
return -EPERM;
/* verify restrictions on target's new Permitted set */
if (!cap_issubset(*permitted, old->cap_permitted))
return -EPERM;
/* verify the _new_Effective_ is a subset of the _new_Permitted_ */
if (!cap_issubset(*effective, *permitted))
return -EPERM;
new->cap_effective = *effective;
new->cap_inheritable = *inheritable;
new->cap_permitted = *permitted;
/*
* Mask off ambient bits that are no longer both permitted and
* inheritable.
*/
new->cap_ambient = cap_intersect(new->cap_ambient,
cap_intersect(*permitted,
*inheritable));
if (WARN_ON(!cap_ambient_invariant_ok(new))) return -EINVAL;
return 0;
}
/**
* cap_inode_need_killpriv - Determine if inode change affects privileges
* @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
*
* Determine if an inode having a change applied that's marked ATTR_KILL_PRIV
* affects the security markings on that inode, and if it is, should
* inode_killpriv() be invoked or the change rejected.
*
* Return: 1 if security.capability has a value, meaning inode_killpriv()
* is required, 0 otherwise, meaning inode_killpriv() is not required.
*/
int cap_inode_need_killpriv(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
int error;
error = __vfs_getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0);
return error > 0;
}
/**
* cap_inode_killpriv - Erase the security markings on an inode
*
* @idmap: idmap of the mount the inode was found from
* @dentry: The inode/dentry to alter
*
* Erase the privilege-enhancing security markings on an inode.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*
* Return: 0 if successful, -ve on error.
*/
int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry)
{
int error;
error = __vfs_removexattr(idmap, dentry, XATTR_NAME_CAPS);
if (error == -EOPNOTSUPP)
error = 0;
return error;
}
static bool rootid_owns_currentns(vfsuid_t rootvfsuid)
{
struct user_namespace *ns;
kuid_t kroot;
if (!vfsuid_valid(rootvfsuid))
return false;
kroot = vfsuid_into_kuid(rootvfsuid);
for (ns = current_user_ns();; ns = ns->parent) {
if (from_kuid(ns, kroot) == 0)
return true;
if (ns == &init_user_ns)
break;
}
return false;
}
static __u32 sansflags(__u32 m)
{
return m & ~VFS_CAP_FLAGS_EFFECTIVE;
}
static bool is_v2header(int size, const struct vfs_cap_data *cap)
{
if (size != XATTR_CAPS_SZ_2)
return false;
return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
}
static bool is_v3header(int size, const struct vfs_cap_data *cap)
{
if (size != XATTR_CAPS_SZ_3)
return false;
return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
}
/*
* getsecurity: We are called for security.* before any attempt to read the
* xattr from the inode itself.
*
* This gives us a chance to read the on-disk value and convert it. If we
* return -EOPNOTSUPP, then vfs_getxattr() will call the i_op handler.
*
* Note we are not called by vfs_getxattr_alloc(), but that is only called
* by the integrity subsystem, which really wants the unconverted values -
* so that's good.
*/
int cap_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode, const char *name, void **buffer,
bool alloc)
{
int size;
kuid_t kroot;
vfsuid_t vfsroot;
u32 nsmagic, magic;
uid_t root, mappedroot;
char *tmpbuf = NULL;
struct vfs_cap_data *cap;
struct vfs_ns_cap_data *nscap = NULL;
struct dentry *dentry;
struct user_namespace *fs_ns;
if (strcmp(name, "capability") != 0)
return -EOPNOTSUPP;
dentry = d_find_any_alias(inode);
if (!dentry)
return -EINVAL;
size = vfs_getxattr_alloc(idmap, dentry, XATTR_NAME_CAPS, &tmpbuf,
sizeof(struct vfs_ns_cap_data), GFP_NOFS);
dput(dentry);
/* gcc11 complains if we don't check for !tmpbuf */
if (size < 0 || !tmpbuf)
goto out_free;
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
if (is_v2header(size, cap)) {
root = 0;
} else if (is_v3header(size, cap)) {
nscap = (struct vfs_ns_cap_data *) tmpbuf;
root = le32_to_cpu(nscap->rootid);
} else {
size = -EINVAL;
goto out_free;
}
kroot = make_kuid(fs_ns, root);
/* If this is an idmapped mount shift the kuid. */
vfsroot = make_vfsuid(idmap, fs_ns, kroot);
/* If the root kuid maps to a valid uid in current ns, then return
* this as a nscap. */
mappedroot = from_kuid(current_user_ns(), vfsuid_into_kuid(vfsroot));
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
size = sizeof(struct vfs_ns_cap_data);
if (alloc) {
if (!nscap) {
/* v2 -> v3 conversion */
nscap = kzalloc(size, GFP_ATOMIC);
if (!nscap) {
size = -ENOMEM;
goto out_free;
}
nsmagic = VFS_CAP_REVISION_3;
magic = le32_to_cpu(cap->magic_etc);
if (magic & VFS_CAP_FLAGS_EFFECTIVE)
nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
nscap->magic_etc = cpu_to_le32(nsmagic);
} else {
/* use allocated v3 buffer */
tmpbuf = NULL;
}
nscap->rootid = cpu_to_le32(mappedroot);
*buffer = nscap;
}
goto out_free;
}
if (!rootid_owns_currentns(vfsroot)) {
size = -EOVERFLOW;
goto out_free;
}
/* This comes from a parent namespace. Return as a v2 capability */
size = sizeof(struct vfs_cap_data);
if (alloc) {
if (nscap) {
/* v3 -> v2 conversion */
cap = kzalloc(size, GFP_ATOMIC);
if (!cap) {
size = -ENOMEM;
goto out_free;
}
magic = VFS_CAP_REVISION_2;
nsmagic = le32_to_cpu(nscap->magic_etc);
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
magic |= VFS_CAP_FLAGS_EFFECTIVE;
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
cap->magic_etc = cpu_to_le32(magic);
} else {
/* use unconverted v2 */
tmpbuf = NULL;
}
*buffer = cap;
}
out_free:
kfree(tmpbuf);
return size;
}
/**
* rootid_from_xattr - translate root uid of vfs caps
*
* @value: vfs caps value which may be modified by this function
* @size: size of @ivalue
* @task_ns: user namespace of the caller
*/
static vfsuid_t rootid_from_xattr(const void *value, size_t size,
struct user_namespace *task_ns)
{
const struct vfs_ns_cap_data *nscap = value;
uid_t rootid = 0;
if (size == XATTR_CAPS_SZ_3)
rootid = le32_to_cpu(nscap->rootid);
return VFSUIDT_INIT(make_kuid(task_ns, rootid));
}
static bool validheader(size_t size, const struct vfs_cap_data *cap)
{
return is_v2header(size, cap) || is_v3header(size, cap);
}
/**
* cap_convert_nscap - check vfs caps
*
* @idmap: idmap of the mount the inode was found from
* @dentry: used to retrieve inode to check permissions on
* @ivalue: vfs caps value which may be modified by this function
* @size: size of @ivalue
*
* User requested a write of security.capability. If needed, update the
* xattr to change from v2 to v3, or to fixup the v3 rootid.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*
* Return: On success, return the new size; on error, return < 0.
*/
int cap_convert_nscap(struct mnt_idmap *idmap, struct dentry *dentry,
const void **ivalue, size_t size)
{
struct vfs_ns_cap_data *nscap;
uid_t nsrootid;
const struct vfs_cap_data *cap = *ivalue;
__u32 magic, nsmagic;
struct inode *inode = d_backing_inode(dentry);
struct user_namespace *task_ns = current_user_ns(),
*fs_ns = inode->i_sb->s_user_ns;
kuid_t rootid;
vfsuid_t vfsrootid;
size_t newsize;
if (!*ivalue)
return -EINVAL;
if (!validheader(size, cap))
return -EINVAL;
if (!capable_wrt_inode_uidgid(idmap, inode, CAP_SETFCAP))
return -EPERM;
if (size == XATTR_CAPS_SZ_2 && (idmap == &nop_mnt_idmap))
if (ns_capable(inode->i_sb->s_user_ns, CAP_SETFCAP))
/* user is privileged, just write the v2 */
return size;
vfsrootid = rootid_from_xattr(*ivalue, size, task_ns);
if (!vfsuid_valid(vfsrootid))
return -EINVAL;
rootid = from_vfsuid(idmap, fs_ns, vfsrootid);
if (!uid_valid(rootid))
return -EINVAL;
nsrootid = from_kuid(fs_ns, rootid);
if (nsrootid == -1)
return -EINVAL;
newsize = sizeof(struct vfs_ns_cap_data);
nscap = kmalloc(newsize, GFP_ATOMIC);
if (!nscap)
return -ENOMEM;
nscap->rootid = cpu_to_le32(nsrootid);
nsmagic = VFS_CAP_REVISION_3;
magic = le32_to_cpu(cap->magic_etc);
if (magic & VFS_CAP_FLAGS_EFFECTIVE)
nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
nscap->magic_etc = cpu_to_le32(nsmagic);
memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
*ivalue = nscap;
return newsize;
}
/*
* Calculate the new process capability sets from the capability sets attached
* to a file.
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
bool *effective,
bool *has_fcap)
{
struct cred *new = bprm->cred;
int ret = 0;
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
if (caps->magic_etc & VFS_CAP_REVISION_MASK)
*has_fcap = true;
/*
* pP' = (X & fP) | (pI & fI)
* The addition of pA' is handled later.
*/
new->cap_permitted.val =
(new->cap_bset.val & caps->permitted.val) |
(new->cap_inheritable.val & caps->inheritable.val);
if (caps->permitted.val & ~new->cap_permitted.val)
/* insufficient to execute correctly */
ret = -EPERM;
/*
* For legacy apps, with no internal support for recognizing they
* do not have enough capabilities, we return an error if they are
* missing some "forced" (aka file-permitted) capabilities.
*/
return *effective ? ret : 0;
}
/**
* get_vfs_caps_from_disk - retrieve vfs caps from disk
*
* @idmap: idmap of the mount the inode was found from
* @dentry: dentry from which @inode is retrieved
* @cpu_caps: vfs capabilities
*
* Extract the on-exec-apply capability sets for an executable file.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*/
int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
const struct dentry *dentry,
struct cpu_vfs_cap_data *cpu_caps)
{
struct inode *inode = d_backing_inode(dentry);
__u32 magic_etc;
int size;
struct vfs_ns_cap_data data, *nscaps = &data;
struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
kuid_t rootkuid;
vfsuid_t rootvfsuid;
struct user_namespace *fs_ns;
memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
if (!inode)
return -ENODATA;
fs_ns = inode->i_sb->s_user_ns;
size = __vfs_getxattr((struct dentry *)dentry, inode,
XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
if (size == -ENODATA || size == -EOPNOTSUPP)
/* no data, that's ok */
return -ENODATA;
if (size < 0)
return size;
if (size < sizeof(magic_etc))
return -EINVAL;
cpu_caps->magic_etc = magic_etc = le32_to_cpu(caps->magic_etc);
rootkuid = make_kuid(fs_ns, 0);
switch (magic_etc & VFS_CAP_REVISION_MASK) {
case VFS_CAP_REVISION_1:
if (size != XATTR_CAPS_SZ_1)
return -EINVAL;
break;
case VFS_CAP_REVISION_2:
if (size != XATTR_CAPS_SZ_2)
return -EINVAL;
break;
case VFS_CAP_REVISION_3:
if (size != XATTR_CAPS_SZ_3)
return -EINVAL;
rootkuid = make_kuid(fs_ns, le32_to_cpu(nscaps->rootid));
break;
default:
return -EINVAL;
}
rootvfsuid = make_vfsuid(idmap, fs_ns, rootkuid);
if (!vfsuid_valid(rootvfsuid))
return -ENODATA;
/* Limit the caps to the mounter of the filesystem
* or the more limited uid specified in the xattr.
*/
if (!rootid_owns_currentns(rootvfsuid))
return -ENODATA;
cpu_caps->permitted.val = le32_to_cpu(caps->data[0].permitted);
cpu_caps->inheritable.val = le32_to_cpu(caps->data[0].inheritable);
/*
* Rev1 had just a single 32-bit word, later expanded
* to a second one for the high bits
*/
if ((magic_etc & VFS_CAP_REVISION_MASK) != VFS_CAP_REVISION_1) {
cpu_caps->permitted.val += (u64)le32_to_cpu(caps->data[1].permitted) << 32;
cpu_caps->inheritable.val += (u64)le32_to_cpu(caps->data[1].inheritable) << 32;
}
cpu_caps->permitted.val &= CAP_VALID_MASK;
cpu_caps->inheritable.val &= CAP_VALID_MASK;
cpu_caps->rootid = vfsuid_into_kuid(rootvfsuid);
return 0;
}
/*
* Attempt to get the on-exec apply capability sets for an executable file from
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
static int get_file_caps(struct linux_binprm *bprm, const struct file *file,
bool *effective, bool *has_fcap)
{
int rc = 0;
struct cpu_vfs_cap_data vcaps;
cap_clear(bprm->cred->cap_permitted);
if (!file_caps_enabled)
return 0;
if (!mnt_may_suid(file->f_path.mnt))
return 0;
/*
* This check is redundant with mnt_may_suid() but is kept to make
* explicit that capability bits are limited to s_user_ns and its
* descendants.
*/
if (!current_in_userns(file->f_path.mnt->mnt_sb->s_user_ns))
return 0;
rc = get_vfs_caps_from_disk(file_mnt_idmap(file),
file->f_path.dentry, &vcaps);
if (rc < 0) {
if (rc == -EINVAL)
printk(KERN_NOTICE "Invalid argument reading file caps for %s\n",
bprm->filename);
else if (rc == -ENODATA)
rc = 0;
goto out;
}
rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_fcap);
out:
if (rc)
cap_clear(bprm->cred->cap_permitted);
return rc;
}
static inline bool root_privileged(void) { return !issecure(SECURE_NOROOT); }
static inline bool __is_real(kuid_t uid, struct cred *cred)
{ return uid_eq(cred->uid, uid); }
static inline bool __is_eff(kuid_t uid, struct cred *cred)
{ return uid_eq(cred->euid, uid); }
static inline bool __is_suid(kuid_t uid, struct cred *cred)
{ return !__is_real(uid, cred) && __is_eff(uid, cred); }
/*
* handle_privileged_root - Handle case of privileged root
* @bprm: The execution parameters, including the proposed creds
* @has_fcap: Are any file capabilities set?
* @effective: Do we have effective root privilege?
* @root_uid: This namespace' root UID WRT initial USER namespace
*
* Handle the case where root is privileged and hasn't been neutered by
* SECURE_NOROOT. If file capabilities are set, they won't be combined with
* set UID root and nothing is changed. If we are root, cap_permitted is
* updated. If we have become set UID root, the effective bit is set.
*/
static void handle_privileged_root(struct linux_binprm *bprm, bool has_fcap,
bool *effective, kuid_t root_uid)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
if (!root_privileged())
return;
/*
* If the legacy file capability is set, then don't set privs
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
if (has_fcap && __is_suid(root_uid, new)) {
warn_setuid_and_fcaps_mixed(bprm->filename);
return;
}
/*
* To support inheritance of root-permissions and suid-root
* executables under compatibility mode, we override the
* capability sets for the file.
*/
if (__is_eff(root_uid, new) || __is_real(root_uid, new)) {
/* pP' = (cap_bset & ~0) | (pI & ~0) */
new->cap_permitted = cap_combine(old->cap_bset,
old->cap_inheritable);
}
/*
* If only the real uid is 0, we do not set the effective bit.
*/
if (__is_eff(root_uid, new))
*effective = true;
}
#define __cap_gained(field, target, source) \
!cap_issubset(target->cap_##field, source->cap_##field)
#define __cap_grew(target, source, cred) \
!cap_issubset(cred->cap_##target, cred->cap_##source)
#define __cap_full(field, cred) \
cap_issubset(CAP_FULL_SET, cred->cap_##field)
/*
* 1) Audit candidate if current->cap_effective is set
*
* We do not bother to audit if 3 things are true:
* 1) cap_effective has all caps
* 2) we became root *OR* are were already root
* 3) root is supposed to have all caps (SECURE_NOROOT)
* Since this is just a normal root execing a process.
*
* Number 1 above might fail if you don't have a full bset, but I think
* that is interesting information to audit.
*
* A number of other conditions require logging:
* 2) something prevented setuid root getting all caps
* 3) non-setuid root gets fcaps
* 4) non-setuid root gets ambient
*/
static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
kuid_t root, bool has_fcap)
{
bool ret = false;
if ((__cap_grew(effective, ambient, new) &&
!(__cap_full(effective, new) &&
(__is_eff(root, new) || __is_real(root, new)) &&
root_privileged())) ||
(root_privileged() &&
__is_suid(root, new) &&
!__cap_full(effective, new)) ||
(uid_eq(new->euid, old->euid) &&
((has_fcap &&
__cap_gained(permitted, new, old)) ||
__cap_gained(ambient, new, old))))
ret = true;
return ret;
}
/**
* cap_bprm_creds_from_file - Set up the proposed credentials for execve().
* @bprm: The execution parameters, including the proposed creds
* @file: The file to pull the credentials from
*
* Set up the proposed credentials for a new execution context being
* constructed by execve(). The proposed creds in @bprm->cred is altered,
* which won't take effect immediately.
*
* Return: 0 if successful, -ve on error.
*/
int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file)
{
/* Process setpcap binaries and capabilities for uid 0 */
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
bool effective = false, has_fcap = false, id_changed;
int ret;
kuid_t root_uid;
if (WARN_ON(!cap_ambient_invariant_ok(old)))
return -EPERM;
ret = get_file_caps(bprm, file, &effective, &has_fcap);
if (ret < 0)
return ret;
root_uid = make_kuid(new->user_ns, 0);
handle_privileged_root(bprm, has_fcap, &effective, root_uid);
/* if we have fs caps, clear dangerous personality flags */
if (__cap_gained(permitted, new, old))
bprm->per_clear |= PER_CLEAR_ON_SETID;
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
* credentials unless they have the appropriate permit.
*
* In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
id_changed = !uid_eq(new->euid, old->euid) || !in_group_p(new->egid);
if ((id_changed || __cap_gained(permitted, new, old)) &&
((bprm->unsafe & ~LSM_UNSAFE_PTRACE) ||
!ptracer_capable(current, new->user_ns))) {
/* downgrade; they get no more than they had, and maybe less */
if (!ns_capable(new->user_ns, CAP_SETUID) ||
(bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
new->euid = new->uid;
new->egid = new->gid;
}
new->cap_permitted = cap_intersect(new->cap_permitted,
old->cap_permitted);
}
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
/* File caps or setid cancels ambient. */
if (has_fcap || id_changed)
cap_clear(new->cap_ambient);
/*
* Now that we've computed pA', update pP' to give:
* pP' = (X & fP) | (pI & fI) | pA'
*/
new->cap_permitted = cap_combine(new->cap_permitted, new->cap_ambient);
/*
* Set pE' = (fE ? pP' : pA'). Because pA' is zero if fE is set,
* this is the same as pE' = (fE ? pP' : 0) | pA'.
*/
if (effective)
new->cap_effective = new->cap_permitted;
else
new->cap_effective = new->cap_ambient;
if (WARN_ON(!cap_ambient_invariant_ok(new)))
return -EPERM;
if (nonroot_raised_pE(new, old, root_uid, has_fcap)) {
ret = audit_log_bprm_fcaps(bprm, new, old);
if (ret < 0)
return ret;
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
if (WARN_ON(!cap_ambient_invariant_ok(new)))
return -EPERM;
/* Check for privilege-elevated exec. */
if (id_changed ||
!uid_eq(new->euid, old->uid) ||
!gid_eq(new->egid, old->gid) ||
(!__is_real(root_uid, new) &&
(effective ||
__cap_grew(permitted, ambient, new))))
bprm->secureexec = 1;
return 0;
}
/**
* cap_inode_setxattr - Determine whether an xattr may be altered
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
* @value: The value that the xattr will be changed to
* @size: The size of value
* @flags: The replacement flag
*
* Determine whether an xattr may be altered or set on an inode, returning 0 if
* permission is granted, -ve if denied.
*
* This is used to make sure security xattrs don't get updated or set by those
* who aren't privileged to do so.
*/
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct user_namespace *user_ns = dentry->d_sb->s_user_ns;
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
/*
* For XATTR_NAME_CAPS the check will be done in
* cap_convert_nscap(), called by setxattr()
*/
if (strcmp(name, XATTR_NAME_CAPS) == 0)
return 0;
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/**
* cap_inode_removexattr - Determine whether an xattr may be removed
*
* @idmap: idmap of the mount the inode was found from
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
*
* Determine whether an xattr may be removed from an inode, returning 0 if
* permission is granted, -ve if denied.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*
* This is used to make sure security xattrs don't get removed by those who
* aren't privileged to remove them.
*/
int cap_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
{
struct user_namespace *user_ns = dentry->d_sb->s_user_ns;
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
if (strcmp(name, XATTR_NAME_CAPS) == 0) {
/* security.capability gets namespaced */
struct inode *inode = d_backing_inode(dentry);
if (!inode)
return -EINVAL;
if (!capable_wrt_inode_uidgid(idmap, inode, CAP_SETFCAP))
return -EPERM;
return 0;
}
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/*
* cap_emulate_setxuid() fixes the effective / permitted capabilities of
* a process after a call to setuid, setreuid, or setresuid.
*
* 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
* {r,e,s}uid != 0, the permitted and effective capabilities are
* cleared.
*
* 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
* capabilities of the process are cleared.
*
* 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
* capabilities are set to the permitted capabilities.
*
* fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
* never happen.
*
* -astor
*
* cevans - New behaviour, Oct '99
* A process may, via prctl(), elect to keep its capabilities when it
* calls setuid() and switches away from uid==0. Both permitted and
* effective sets will be retained.
* Without this change, it was impossible for a daemon to drop only some
* of its privilege. The call to setuid(!=0) would drop all privileges!
* Keeping uid 0 is not an option because uid 0 owns too many vital
* files..
* Thanks to Olaf Kirch and Peter Benie for spotting this.
*/
static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
{
kuid_t root_uid = make_kuid(old->user_ns, 0);
if ((uid_eq(old->uid, root_uid) || uid_eq(old->euid, root_uid) ||
uid_eq(old->suid, root_uid)) &&
(!uid_eq(new->uid, root_uid) && !uid_eq(new->euid, root_uid) &&
!uid_eq(new->suid, root_uid))) {
if (!issecure(SECURE_KEEP_CAPS)) {
cap_clear(new->cap_permitted);
cap_clear(new->cap_effective);
}
/*
* Pre-ambient programs expect setresuid to nonroot followed
* by exec to drop capabilities. We should make sure that
* this remains the case.
*/
cap_clear(new->cap_ambient);
}
if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
cap_clear(new->cap_effective);
if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid)) new->cap_effective = new->cap_permitted;
}
/**
* cap_task_fix_setuid - Fix up the results of setuid() call
* @new: The proposed credentials
* @old: The current task's current credentials
* @flags: Indications of what has changed
*
* Fix up the results of setuid() call before the credential changes are
* actually applied.
*
* Return: 0 to grant the changes, -ve to deny them.
*/
int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
{
switch (flags) {
case LSM_SETID_RE:
case LSM_SETID_ID:
case LSM_SETID_RES:
/* juggle the capabilities to follow [RES]UID changes unless
* otherwise suppressed */
if (!issecure(SECURE_NO_SETUID_FIXUP)) cap_emulate_setxuid(new, old);
break;
case LSM_SETID_FS:
/* juggle the capabilities to follow FSUID changes, unless
* otherwise suppressed
*
* FIXME - is fsuser used for all CAP_FS_MASK capabilities?
* if not, we might be a bit too harsh here.
*/
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
kuid_t root_uid = make_kuid(old->user_ns, 0);
if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_drop_fs_set(new->cap_effective);
if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_raise_fs_set(new->cap_effective,
new->cap_permitted);
}
break;
default:
return -EINVAL;
}
return 0;}
/*
* Rationale: code calling task_setscheduler, task_setioprio, and
* task_setnice, assumes that
* . if capable(cap_sys_nice), then those actions should be allowed
* . if not capable(cap_sys_nice), but acting on your own processes,
* then those actions should be allowed
* This is insufficient now since you can call code without suid, but
* yet with increased caps.
* So we check for increased caps on the target process.
*/
static int cap_safe_nice(struct task_struct *p)
{
int is_subset, ret = 0;
rcu_read_lock();
is_subset = cap_issubset(__task_cred(p)->cap_permitted,
current_cred()->cap_permitted);
if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
ret = -EPERM;
rcu_read_unlock();
return ret;
}
/**
* cap_task_setscheduler - Determine if scheduler policy change is permitted
* @p: The task to affect
*
* Determine if the requested scheduler policy change is permitted for the
* specified task.
*
* Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setscheduler(struct task_struct *p)
{
return cap_safe_nice(p);
}
/**
* cap_task_setioprio - Determine if I/O priority change is permitted
* @p: The task to affect
* @ioprio: The I/O priority to set
*
* Determine if the requested I/O priority change is permitted for the specified
* task.
*
* Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setioprio(struct task_struct *p, int ioprio)
{
return cap_safe_nice(p);
}
/**
* cap_task_setnice - Determine if task priority change is permitted
* @p: The task to affect
* @nice: The nice value to set
*
* Determine if the requested task priority change is permitted for the
* specified task.
*
* Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setnice(struct task_struct *p, int nice)
{
return cap_safe_nice(p);
}
/*
* Implement PR_CAPBSET_DROP. Attempt to remove the specified capability from
* the current task's bounding set. Returns 0 on success, -ve on error.
*/
static int cap_prctl_drop(unsigned long cap)
{
struct cred *new;
if (!ns_capable(current_user_ns(), CAP_SETPCAP))
return -EPERM;
if (!cap_valid(cap))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
cap_lower(new->cap_bset, cap);
return commit_creds(new);
}
/**
* cap_task_prctl - Implement process control functions for this security module
* @option: The process control function requested
* @arg2: The argument data for this function
* @arg3: The argument data for this function
* @arg4: The argument data for this function
* @arg5: The argument data for this function
*
* Allow process control functions (sys_prctl()) to alter capabilities; may
* also deny access to other functions not otherwise implemented here.
*
* Return: 0 or +ve on success, -ENOSYS if this function is not implemented
* here, other -ve on error. If -ENOSYS is returned, sys_prctl() and other LSM
* modules will consider performing the function.
*/
int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
const struct cred *old = current_cred();
struct cred *new;
switch (option) {
case PR_CAPBSET_READ:
if (!cap_valid(arg2))
return -EINVAL;
return !!cap_raised(old->cap_bset, arg2);
case PR_CAPBSET_DROP:
return cap_prctl_drop(arg2);
/*
* The next four prctl's remain to assist with transitioning a
* system from legacy UID=0 based privilege (when filesystem
* capabilities are not in use) to a system using filesystem
* capabilities only - as the POSIX.1e draft intended.
*
* Note:
*
* PR_SET_SECUREBITS =
* issecure_mask(SECURE_KEEP_CAPS_LOCKED)
* | issecure_mask(SECURE_NOROOT)
* | issecure_mask(SECURE_NOROOT_LOCKED)
* | issecure_mask(SECURE_NO_SETUID_FIXUP)
* | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
*
* will ensure that the current process and all of its
* children will be locked into a pure
* capability-based-privilege environment.
*/
case PR_SET_SECUREBITS:
if ((((old->securebits & SECURE_ALL_LOCKS) >> 1)
& (old->securebits ^ arg2)) /*[1]*/
|| ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
* [3] no setting of unsupported bits
*/
)
/* cannot change a locked bit */
return -EPERM;
/*
* Doing anything requires privilege (go read about the
* "sendmail capabilities bug"), except for unprivileged bits.
* Indeed, the SECURE_ALL_UNPRIVILEGED bits are not
* restrictions enforced by the kernel but by user space on
* itself.
*/
if (cap_capable(current_cred(), current_cred()->user_ns,
CAP_SETPCAP, CAP_OPT_NONE) != 0) {
const unsigned long unpriv_and_locks =
SECURE_ALL_UNPRIVILEGED |
SECURE_ALL_UNPRIVILEGED << 1;
const unsigned long changed = old->securebits ^ arg2;
/* For legacy reason, denies non-change. */
if (!changed)
return -EPERM;
/* Denies privileged changes. */
if (changed & ~unpriv_and_locks)
return -EPERM;
}
new = prepare_creds();
if (!new)
return -ENOMEM;
new->securebits = arg2;
return commit_creds(new);
case PR_GET_SECUREBITS:
return old->securebits;
case PR_GET_KEEPCAPS:
return !!issecure(SECURE_KEEP_CAPS);
case PR_SET_KEEPCAPS:
if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
return -EINVAL;
if (issecure(SECURE_KEEP_CAPS_LOCKED))
return -EPERM;
new = prepare_creds();
if (!new)
return -ENOMEM;
if (arg2)
new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
else
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
return commit_creds(new);
case PR_CAP_AMBIENT:
if (arg2 == PR_CAP_AMBIENT_CLEAR_ALL) {
if (arg3 | arg4 | arg5)
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
cap_clear(new->cap_ambient);
return commit_creds(new);
}
if (((!cap_valid(arg3)) | arg4 | arg5))
return -EINVAL;
if (arg2 == PR_CAP_AMBIENT_IS_SET) {
return !!cap_raised(current_cred()->cap_ambient, arg3);
} else if (arg2 != PR_CAP_AMBIENT_RAISE &&
arg2 != PR_CAP_AMBIENT_LOWER) {
return -EINVAL;
} else {
if (arg2 == PR_CAP_AMBIENT_RAISE &&
(!cap_raised(current_cred()->cap_permitted, arg3) ||
!cap_raised(current_cred()->cap_inheritable,
arg3) ||
issecure(SECURE_NO_CAP_AMBIENT_RAISE)))
return -EPERM;
new = prepare_creds();
if (!new)
return -ENOMEM;
if (arg2 == PR_CAP_AMBIENT_RAISE)
cap_raise(new->cap_ambient, arg3);
else
cap_lower(new->cap_ambient, arg3);
return commit_creds(new);
}
default:
/* No functionality available - continue with default */
return -ENOSYS;
}
}
/**
* cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
* @mm: The VM space in which the new mapping is to be made
* @pages: The size of the mapping
*
* Determine whether the allocation of a new virtual mapping by the current
* task is permitted.
*
* Return: 0 if permission granted, negative error code if not.
*/
int cap_vm_enough_memory(struct mm_struct *mm, long pages)
{
return cap_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN,
CAP_OPT_NOAUDIT);
}
/**
* cap_mmap_addr - check if able to map given addr
* @addr: address attempting to be mapped
*
* If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
* capability security module.
*
* Return: 0 if this mapping should be allowed or -EPERM if not.
*/
int cap_mmap_addr(unsigned long addr)
{
int ret = 0;
if (addr < dac_mmap_min_addr) {
ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO,
CAP_OPT_NONE);
/* set PF_SUPERPRIV if it turns out we allow the low mmap */
if (ret == 0)
current->flags |= PF_SUPERPRIV;
}
return ret;
}
#ifdef CONFIG_SECURITY
static const struct lsm_id capability_lsmid = {
.name = "capability",
.id = LSM_ID_CAPABILITY,
};
static struct security_hook_list capability_hooks[] __ro_after_init = {
LSM_HOOK_INIT(capable, cap_capable),
LSM_HOOK_INIT(settime, cap_settime),
LSM_HOOK_INIT(ptrace_access_check, cap_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, cap_ptrace_traceme),
LSM_HOOK_INIT(capget, cap_capget),
LSM_HOOK_INIT(capset, cap_capset),
LSM_HOOK_INIT(bprm_creds_from_file, cap_bprm_creds_from_file),
LSM_HOOK_INIT(inode_need_killpriv, cap_inode_need_killpriv),
LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv),
LSM_HOOK_INIT(inode_getsecurity, cap_inode_getsecurity),
LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
LSM_HOOK_INIT(task_fix_setuid, cap_task_fix_setuid),
LSM_HOOK_INIT(task_prctl, cap_task_prctl),
LSM_HOOK_INIT(task_setscheduler, cap_task_setscheduler),
LSM_HOOK_INIT(task_setioprio, cap_task_setioprio),
LSM_HOOK_INIT(task_setnice, cap_task_setnice),
LSM_HOOK_INIT(vm_enough_memory, cap_vm_enough_memory),
};
static int __init capability_init(void)
{
security_add_hooks(capability_hooks, ARRAY_SIZE(capability_hooks),
&capability_lsmid);
return 0;
}
DEFINE_LSM(capability) = {
.name = "capability",
.order = LSM_ORDER_FIRST,
.init = capability_init,
};
#endif /* CONFIG_SECURITY */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
#include <linux/args.h>
#include <linux/array_size.h>
#include <linux/cleanup.h> /* for DEFINE_FREE() */
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
#include <linux/err.h> /* for ERR_PTR() */
#include <linux/errno.h> /* for E2BIG */
#include <linux/overflow.h> /* for check_mul_overflow() */
#include <linux/stdarg.h>
#include <uapi/linux/string.h>
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t) __realloc_size(2);
extern void *vmemdup_user(const void __user *, size_t) __realloc_size(2);
extern void *memdup_user_nul(const void __user *, size_t);
/**
* memdup_array_user - duplicate array from user space
* @src: source address in user space
* @n: number of array members to copy
* @size: size of one array member
*
* Return: an ERR_PTR() on failure. Result is physically
* contiguous, to be freed by kfree().
*/
static inline __realloc_size(2, 3)
void *memdup_array_user(const void __user *src, size_t n, size_t size)
{
size_t nbytes;
if (check_mul_overflow(n, size, &nbytes))
return ERR_PTR(-EOVERFLOW);
return memdup_user(src, nbytes);
}
/**
* vmemdup_array_user - duplicate array from user space
* @src: source address in user space
* @n: number of array members to copy
* @size: size of one array member
*
* Return: an ERR_PTR() on failure. Result may be not
* physically contiguous. Use kvfree() to free.
*/
static inline __realloc_size(2, 3)
void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
{
size_t nbytes;
if (check_mul_overflow(n, size, &nbytes))
return ERR_PTR(-EOVERFLOW);
return vmemdup_user(src, nbytes);
}
/*
* Include machine specific inline routines
*/
#include <asm/string.h>
#ifndef __HAVE_ARCH_STRCPY
extern char * strcpy(char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
#endif
ssize_t sized_strscpy(char *, const char *, size_t);
/*
* The 2 argument style can only be used when dst is an array with a
* known size.
*/
#define __strscpy0(dst, src, ...) \
sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst) + \
__must_be_cstr(dst) + __must_be_cstr(src))
#define __strscpy1(dst, src, size) \
sized_strscpy(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
#define __strscpy_pad0(dst, src, ...) \
sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst) + \
__must_be_cstr(dst) + __must_be_cstr(src))
#define __strscpy_pad1(dst, src, size) \
sized_strscpy_pad(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
/**
* strscpy - Copy a C-string into a sized buffer
* @dst: Where to copy the string to
* @src: Where to copy the string from
* @...: Size of destination buffer (optional)
*
* Copy the source string @src, or as much of it as fits, into the
* destination @dst buffer. The behavior is undefined if the string
* buffers overlap. The destination @dst buffer is always NUL terminated,
* unless it's zero-sized.
*
* The size argument @... is only required when @dst is not an array, or
* when the copy needs to be smaller than sizeof(@dst).
*
* Preferred to strncpy() since it always returns a valid string, and
* doesn't unnecessarily force the tail of the destination buffer to be
* zero padded. If padding is desired please use strscpy_pad().
*
* Returns the number of characters copied in @dst (not including the
* trailing %NUL) or -E2BIG if @size is 0 or the copy from @src was
* truncated.
*/
#define strscpy(dst, src, ...) \
CONCATENATE(__strscpy, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__)
#define sized_strscpy_pad(dest, src, count) ({ \
char *__dst = (dest); \
const char *__src = (src); \
const size_t __count = (count); \
ssize_t __wrote; \
\
__wrote = sized_strscpy(__dst, __src, __count); \
if (__wrote >= 0 && __wrote < __count) \
memset(__dst + __wrote + 1, 0, __count - __wrote - 1); \
__wrote; \
})
/**
* strscpy_pad() - Copy a C-string into a sized buffer
* @dst: Where to copy the string to
* @src: Where to copy the string from
* @...: Size of destination buffer
*
* Copy the string, or as much of it as fits, into the dest buffer. The
* behavior is undefined if the string buffers overlap. The destination
* buffer is always %NUL terminated, unless it's zero-sized.
*
* If the source string is shorter than the destination buffer, the
* remaining bytes in the buffer will be filled with %NUL bytes.
*
* For full explanation of why you may want to consider using the
* 'strscpy' functions please see the function docstring for strscpy().
*
* Returns:
* * The number of characters copied (not including the trailing %NULs)
* * -E2BIG if count is 0 or @src was truncated.
*/
#define strscpy_pad(dst, src, ...) \
CONCATENATE(__strscpy_pad, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__)
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
#ifndef __HAVE_ARCH_STRNCAT
extern char * strncat(char *, const char *, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRLCAT
extern size_t strlcat(char *, const char *, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRCMP
extern int strcmp(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRNCMP
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
#endif
#ifndef __HAVE_ARCH_STRNCASECMP
extern int strncasecmp(const char *s1, const char *s2, size_t n);
#endif
#ifndef __HAVE_ARCH_STRCHR
extern char * strchr(const char *,int);
#endif
#ifndef __HAVE_ARCH_STRCHRNUL
extern char * strchrnul(const char *,int);
#endif
extern char * strnchrnul(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRNCHR
extern char * strnchr(const char *, size_t, int);
#endif
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
extern char * __must_check skip_spaces(const char *);
extern char *strim(char *);
static inline __must_check char *strstrip(char *str)
{
return strim(str);
}
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *, const char *);
#endif
#ifndef __HAVE_ARCH_STRNSTR
extern char * strnstr(const char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
#endif
#ifndef __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRPBRK
extern char * strpbrk(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRSEP
extern char * strsep(char **,const char *);
#endif
#ifndef __HAVE_ARCH_STRSPN
extern __kernel_size_t strspn(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_STRCSPN
extern __kernel_size_t strcspn(const char *,const char *);
#endif
#ifndef __HAVE_ARCH_MEMSET
extern void * memset(void *,int,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMSET16
extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMSET32
extern void *memset32(uint32_t *, uint32_t, __kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMSET64
extern void *memset64(uint64_t *, uint64_t, __kernel_size_t);
#endif
static inline void *memset_l(unsigned long *p, unsigned long v,
__kernel_size_t n)
{
if (BITS_PER_LONG == 32)
return memset32((uint32_t *)p, v, n);
else
return memset64((uint64_t *)p, v, n);
}
static inline void *memset_p(void **p, void *v, __kernel_size_t n)
{
if (BITS_PER_LONG == 32)
return memset32((uint32_t *)p, (uintptr_t)v, n);
else
return memset64((uint64_t *)p, (uintptr_t)v, n);
}
extern void **__memcat_p(void **a, void **b);
#define memcat_p(a, b) ({ \
BUILD_BUG_ON_MSG(!__same_type(*(a), *(b)), \
"type mismatch in memcat_p()"); \
(typeof(*a) *)__memcat_p((void **)(a), (void **)(b)); \
})
#ifndef __HAVE_ARCH_MEMCPY
extern void * memcpy(void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMMOVE
extern void * memmove(void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
extern void * memscan(void *,int,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCMP
extern int memcmp(const void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_BCMP
extern int bcmp(const void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE
static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}
#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *str, char old, char new);
/**
* mem_is_zero - Check if an area of memory is all 0's.
* @s: The memory area
* @n: The size of the area
*
* Return: True if the area of memory is all 0's.
*/
static inline bool mem_is_zero(const void *s, size_t n)
{
return !memchr_inv(s, 0, n);
}
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
__realloc_size(2, 3);
/* lib/argv_split.c */
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
DEFINE_FREE(argv_free, char **, if (!IS_ERR_OR_NULL(_T)) argv_free(_T))
/* lib/cmdline.c */
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern bool sysfs_streq(const char *s1, const char *s2);
int match_string(const char * const *array, size_t n, const char *string);
int __sysfs_match_string(const char * const *array, size_t n, const char *s);
/**
* sysfs_match_string - matches given string in an array
* @_a: array of strings
* @_s: string to match with
*
* Helper for __sysfs_match_string(). Calculates the size of @a automatically.
*/
#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
__printf(3, 0) int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
__printf(3, 0) int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
int ptr_to_hashval(const void *ptr, unsigned long *hashval_out);
size_t memweight(const void *ptr, size_t bytes);
/**
* memzero_explicit - Fill a region of memory (e.g. sensitive
* keying data) with 0s.
* @s: Pointer to the start of the area.
* @count: The size of the area.
*
* Note: usually using memset() is just fine (!), but in cases
* where clearing out _local_ data at the end of a scope is
* necessary, memzero_explicit() should be used instead in
* order to prevent the compiler from optimising away zeroing.
*
* memzero_explicit() doesn't need an arch-specific version as
* it just invokes the one of memset() implicitly.
*/
static inline void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count); barrier_data(s);
}
/**
* kbasename - return the last part of a pathname.
*
* @path: path to extract the filename from.
*/
static inline const char *kbasename(const char *path)
{
const char *tail = strrchr(path, '/');
return tail ? tail + 1 : path;
}
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
#include <linux/fortify-string.h>
#endif
#ifndef unsafe_memcpy
#define unsafe_memcpy(dst, src, bytes, justification) \
memcpy(dst, src, bytes)
#endif
void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
int pad);
/**
* strtomem_pad - Copy NUL-terminated string to non-NUL-terminated buffer
*
* @dest: Pointer of destination character array (marked as __nonstring)
* @src: Pointer to NUL-terminated string
* @pad: Padding character to fill any remaining bytes of @dest after copy
*
* This is a replacement for strncpy() uses where the destination is not
* a NUL-terminated string, but with bounds checking on the source size, and
* an explicit padding character. If padding is not required, use strtomem().
*
* Note that the size of @dest is not an argument, as the length of @dest
* must be discoverable by the compiler.
*/
#define strtomem_pad(dest, src, pad) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
__must_be_noncstr(dest) + \
ARRAY_SIZE(dest); \
const size_t _src_len = __must_be_cstr(src) + \
__builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
memcpy_and_pad(dest, _dest_len, src, \
strnlen(src, min(_src_len, _dest_len)), pad); \
} while (0)
/**
* strtomem - Copy NUL-terminated string to non-NUL-terminated buffer
*
* @dest: Pointer of destination character array (marked as __nonstring)
* @src: Pointer to NUL-terminated string
*
* This is a replacement for strncpy() uses where the destination is not
* a NUL-terminated string, but with bounds checking on the source size, and
* without trailing padding. If padding is required, use strtomem_pad().
*
* Note that the size of @dest is not an argument, as the length of @dest
* must be discoverable by the compiler.
*/
#define strtomem(dest, src) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
__must_be_noncstr(dest) + \
ARRAY_SIZE(dest); \
const size_t _src_len = __must_be_cstr(src) + \
__builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
} while (0)
/**
* memtostr - Copy a possibly non-NUL-term string to a NUL-term string
* @dest: Pointer to destination NUL-terminates string
* @src: Pointer to character array (likely marked as __nonstring)
*
* This is a replacement for strncpy() uses where the source is not
* a NUL-terminated string.
*
* Note that sizes of @dest and @src must be known at compile-time.
*/
#define memtostr(dest, src) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
__must_be_cstr(dest) + \
ARRAY_SIZE(dest); \
const size_t _src_len = __must_be_noncstr(src) + \
__builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
!__builtin_constant_p(_src_len) || \
_dest_len == 0 || _dest_len == (size_t)-1 || \
_src_len == 0 || _src_len == (size_t)-1); \
memcpy(dest, src, _copy_len); \
dest[_copy_len] = '\0'; \
} while (0)
/**
* memtostr_pad - Copy a possibly non-NUL-term string to a NUL-term string
* with NUL padding in the destination
* @dest: Pointer to destination NUL-terminates string
* @src: Pointer to character array (likely marked as __nonstring)
*
* This is a replacement for strncpy() uses where the source is not
* a NUL-terminated string.
*
* Note that sizes of @dest and @src must be known at compile-time.
*/
#define memtostr_pad(dest, src) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
__must_be_cstr(dest) + \
ARRAY_SIZE(dest); \
const size_t _src_len = __must_be_noncstr(src) + \
__builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
!__builtin_constant_p(_src_len) || \
_dest_len == 0 || _dest_len == (size_t)-1 || \
_src_len == 0 || _src_len == (size_t)-1); \
memcpy(dest, src, _copy_len); \
memset(&dest[_copy_len], 0, _dest_len - _copy_len); \
} while (0)
/**
* memset_after - Set a value after a struct member to the end of a struct
*
* @obj: Address of target struct instance
* @v: Byte value to repeatedly write
* @member: after which struct member to start writing bytes
*
* This is good for clearing padding following the given member.
*/
#define memset_after(obj, v, member) \
({ \
u8 *__ptr = (u8 *)(obj); \
typeof(v) __val = (v); \
memset(__ptr + offsetofend(typeof(*(obj)), member), __val, \
sizeof(*(obj)) - offsetofend(typeof(*(obj)), member)); \
})
/**
* memset_startat - Set a value starting at a member to the end of a struct
*
* @obj: Address of target struct instance
* @v: Byte value to repeatedly write
* @member: struct member to start writing at
*
* Note that if there is padding between the prior member and the target
* member, memset_after() should be used to clear the prior padding.
*/
#define memset_startat(obj, v, member) \
({ \
u8 *__ptr = (u8 *)(obj); \
typeof(v) __val = (v); \
memset(__ptr + offsetof(typeof(*(obj)), member), __val, \
sizeof(*(obj)) - offsetof(typeof(*(obj)), member)); \
})
/**
* str_has_prefix - Test if a string has a given prefix
* @str: The string to test
* @prefix: The string to see if @str starts with
*
* A common way to test a prefix of a string is to do:
* strncmp(str, prefix, sizeof(prefix) - 1)
*
* But this can lead to bugs due to typos, or if prefix is a pointer
* and not a constant. Instead use str_has_prefix().
*
* Returns:
* * strlen(@prefix) if @str starts with @prefix
* * 0 if @str does not start with @prefix
*/
static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
{
size_t len = strlen(prefix);
return strncmp(str, prefix, len) == 0 ? len : 0;
}
/**
* strstarts - does @str start with @prefix?
* @str: string to examine
* @prefix: prefix to look for.
*/
static inline bool strstarts(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}
#endif /* _LINUX_STRING_H_ */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Kernel Probes (KProbes)
*
* Copyright (C) IBM Corporation, 2002, 2004
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
* Probes initial implementation (includes suggestions from
* Rusty Russell).
* 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
* hlists and exceptions notifier as suggested by Andi Kleen.
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* interface to access function arguments.
* 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
* exceptions notifier to be first on the priority list.
* 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
* <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
* <prasanna@in.ibm.com> added function-return probes.
*/
#define pr_fmt(fmt) "kprobes: " fmt
#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sysctl.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/static_call.h>
#include <linux/perf_event.h>
#include <linux/execmem.h>
#include <linux/cleanup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
#include <linux/uaccess.h>
#define KPROBE_HASH_BITS 6
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
#if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
#define kprobe_sysctls_init() do { } while (0)
#endif
static int kprobes_initialized;
/* kprobe_table can be accessed by
* - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
* Or
* - RCU hlist traversal under disabling preempt (breakpoint handlers)
*/
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with 'kprobe_mutex' held */
static bool kprobes_all_disarmed;
/* This protects 'kprobe_table' and 'optimizing_list' */
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
unsigned int __unused)
{
return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
}
/*
* Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
* kprobes can not probe.
*/
static LIST_HEAD(kprobe_blacklist);
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* 'kprobe::ainsn.insn' points to the copy of the instruction to be
* single-stepped. x86_64, POWER4 and above have no-exec support and
* stepping on the instruction on a vmalloced/kmalloced/data page
* is a recipe for disaster
*/
struct kprobe_insn_page {
struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
struct kprobe_insn_cache *cache;
int nused;
int ngarbage;
char slot_used[];
};
static int slots_per_page(struct kprobe_insn_cache *c)
{
return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
}
enum kprobe_slot_state {
SLOT_CLEAN = 0,
SLOT_DIRTY = 1,
SLOT_USED = 2,
};
void __weak *alloc_insn_page(void)
{
/*
* Use execmem_alloc() so this page is within +/- 2GB of where the
* kernel image and loaded module images reside. This is required
* for most of the architectures.
* (e.g. x86-64 needs this to handle the %rip-relative fixups.)
*/
return execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
}
static void free_insn_page(void *page)
{
execmem_free(page);
}
struct kprobe_insn_cache kprobe_insn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
.alloc = alloc_insn_page,
.free = free_insn_page,
.sym = KPROBE_INSN_PAGE_SYM,
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
.nr_garbage = 0,
};
static int collect_garbage_slots(struct kprobe_insn_cache *c);
/**
* __get_insn_slot - Find a slot on an executable page for an instruction.
* @c: Pointer to kprobe instruction cache
*
* Description: Locates available slot on existing executable pages,
* allocates an executable page if there's no room on existing ones.
* Return: Pointer to instruction slot on success, NULL on failure.
*/
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip;
/* Since the slot array is not protected by rcu, we need a mutex */
guard(mutex)(&c->mutex);
do {
guard(rcu)();
list_for_each_entry_rcu(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) {
int i;
for (i = 0; i < slots_per_page(c); i++) {
if (kip->slot_used[i] == SLOT_CLEAN) {
kip->slot_used[i] = SLOT_USED;
kip->nused++;
return kip->insns + (i * c->insn_size);
}
}
/* kip->nused is broken. Fix it. */
kip->nused = slots_per_page(c);
WARN_ON(1);
}
}
/* If there are any garbage slots, collect it and try again. */
} while (c->nr_garbage && collect_garbage_slots(c) == 0);
/* All out of space. Need to allocate a new page. */
kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL);
if (!kip)
return NULL;
kip->insns = c->alloc();
if (!kip->insns) {
kfree(kip);
return NULL;
}
INIT_LIST_HEAD(&kip->list);
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
kip->ngarbage = 0;
kip->cache = c;
list_add_rcu(&kip->list, &c->pages);
/* Record the perf ksymbol register event after adding the page */
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
PAGE_SIZE, false, c->sym);
return kip->insns;
}
/* Return true if all garbages are collected, otherwise false. */
static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
{
kip->slot_used[idx] = SLOT_CLEAN;
kip->nused--;
if (kip->nused != 0)
return false;
/*
* Page is no longer in use. Free it unless
* it's the last one. We keep the last one
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
if (!list_is_singular(&kip->list)) {
/*
* Record perf ksymbol unregister event before removing
* the page.
*/
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
(unsigned long)kip->insns, PAGE_SIZE, true,
kip->cache->sym);
list_del_rcu(&kip->list);
synchronize_rcu();
kip->cache->free(kip->insns);
kfree(kip);
}
return true;
}
static int collect_garbage_slots(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip, *next;
/* Ensure no-one is interrupted on the garbages */
synchronize_rcu();
list_for_each_entry_safe(kip, next, &c->pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
kip->ngarbage = 0; /* we will collect all garbages */
for (i = 0; i < slots_per_page(c); i++) {
if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
break;
}
}
c->nr_garbage = 0;
return 0;
}
static long __find_insn_page(struct kprobe_insn_cache *c,
kprobe_opcode_t *slot, struct kprobe_insn_page **pkip)
{
struct kprobe_insn_page *kip = NULL;
long idx;
guard(rcu)();
list_for_each_entry_rcu(kip, &c->pages, list) {
idx = ((long)slot - (long)kip->insns) /
(c->insn_size * sizeof(kprobe_opcode_t));
if (idx >= 0 && idx < slots_per_page(c)) {
*pkip = kip;
return idx;
}
}
/* Could not find this slot. */
WARN_ON(1);
*pkip = NULL;
return -1;
}
void __free_insn_slot(struct kprobe_insn_cache *c,
kprobe_opcode_t *slot, int dirty)
{
struct kprobe_insn_page *kip = NULL;
long idx;
guard(mutex)(&c->mutex);
idx = __find_insn_page(c, slot, &kip);
/* Mark and sweep: this may sleep */
if (kip) {
/* Check double free */
WARN_ON(kip->slot_used[idx] != SLOT_USED);
if (dirty) {
kip->slot_used[idx] = SLOT_DIRTY;
kip->ngarbage++;
if (++c->nr_garbage > slots_per_page(c))
collect_garbage_slots(c);
} else {
collect_one_slot(kip, idx);
}
}
}
/*
* Check given address is on the page of kprobe instruction slots.
* This will be used for checking whether the address on a stack
* is on a text area or not.
*/
bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
{
struct kprobe_insn_page *kip;
bool ret = false;
rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { if (addr >= (unsigned long)kip->insns &&
addr < (unsigned long)kip->insns + PAGE_SIZE) {
ret = true;
break;
}
}
rcu_read_unlock();
return ret;
}
int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
unsigned long *value, char *type, char *sym)
{
struct kprobe_insn_page *kip;
int ret = -ERANGE;
rcu_read_lock();
list_for_each_entry_rcu(kip, &c->pages, list) {
if ((*symnum)--)
continue;
strscpy(sym, c->sym, KSYM_NAME_LEN);
*type = 't';
*value = (unsigned long)kip->insns;
ret = 0;
break;
}
rcu_read_unlock();
return ret;
}
#ifdef CONFIG_OPTPROBES
void __weak *alloc_optinsn_page(void)
{
return alloc_insn_page();
}
void __weak free_optinsn_page(void *page)
{
free_insn_page(page);
}
/* For optimized_kprobe buffer */
struct kprobe_insn_cache kprobe_optinsn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
.alloc = alloc_optinsn_page,
.free = free_optinsn_page,
.sym = KPROBE_OPTINSN_PAGE_SYM,
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
.nr_garbage = 0,
};
#endif /* CONFIG_OPTPROBES */
#endif /* __ARCH_WANT_KPROBES_INSN_SLOT */
/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
{
__this_cpu_write(kprobe_instance, kp);
}
static inline void reset_kprobe_instance(void)
{
__this_cpu_write(kprobe_instance, NULL);
}
/*
* This routine is called either:
* - under the 'kprobe_mutex' - during kprobe_[un]register().
* OR
* - with preemption disabled - from architecture specific code.
*/
struct kprobe *get_kprobe(void *addr)
{
struct hlist_head *head;
struct kprobe *p;
head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
hlist_for_each_entry_rcu(p, head, hlist,
lockdep_is_held(&kprobe_mutex)) {
if (p->addr == addr)
return p;
}
return NULL;
}
NOKPROBE_SYMBOL(get_kprobe);
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
/* Return true if 'p' is an aggregator */
static inline bool kprobe_aggrprobe(struct kprobe *p)
{
return p->pre_handler == aggr_pre_handler;
}
/* Return true if 'p' is unused */
static inline bool kprobe_unused(struct kprobe *p)
{
return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
list_empty(&p->list);
}
/* Keep all fields in the kprobe consistent. */
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
{
memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
}
#ifdef CONFIG_OPTPROBES
/* NOTE: This is protected by 'kprobe_mutex'. */
static bool kprobes_allow_optimization;
/*
* Call all 'kprobe::pre_handler' on the list, but ignores its return value.
* This must be called from arch-dep optimized caller.
*/
void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
kp->pre_handler(kp, regs);
}
reset_kprobe_instance();
}
}
NOKPROBE_SYMBOL(opt_pre_handler);
/* Free optimized instructions and optimized_kprobe */
static void free_aggr_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
op = container_of(p, struct optimized_kprobe, kp);
arch_remove_optimized_kprobe(op);
arch_remove_kprobe(p);
kfree(op);
}
/* Return true if the kprobe is ready for optimization. */
static inline int kprobe_optready(struct kprobe *p)
{
struct optimized_kprobe *op;
if (kprobe_aggrprobe(p)) {
op = container_of(p, struct optimized_kprobe, kp);
return arch_prepared_optinsn(&op->optinsn);
}
return 0;
}
/* Return true if the kprobe is disarmed. Note: p must be on hash list */
bool kprobe_disarmed(struct kprobe *p)
{
struct optimized_kprobe *op;
/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
if (!kprobe_aggrprobe(p))
return kprobe_disabled(p);
op = container_of(p, struct optimized_kprobe, kp);
return kprobe_disabled(p) && list_empty(&op->list);
}
/* Return true if the probe is queued on (un)optimizing lists */
static bool kprobe_queued(struct kprobe *p)
{
struct optimized_kprobe *op;
if (kprobe_aggrprobe(p)) {
op = container_of(p, struct optimized_kprobe, kp);
if (!list_empty(&op->list))
return true;
}
return false;
}
/*
* Return an optimized kprobe whose optimizing code replaces
* instructions including 'addr' (exclude breakpoint).
*/
static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
{
int i;
struct kprobe *p = NULL;
struct optimized_kprobe *op;
/* Don't check i == 0, since that is a breakpoint case. */
for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
p = get_kprobe(addr - i);
if (p && kprobe_optready(p)) {
op = container_of(p, struct optimized_kprobe, kp);
if (arch_within_optimized_kprobe(op, addr))
return p;
}
return NULL;
}
/* Optimization staging list, protected by 'kprobe_mutex' */
static LIST_HEAD(optimizing_list);
static LIST_HEAD(unoptimizing_list);
static LIST_HEAD(freeing_list);
static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
#define OPTIMIZE_DELAY 5
/*
* Optimize (replace a breakpoint with a jump) kprobes listed on
* 'optimizing_list'.
*/
static void do_optimize_kprobes(void)
{
lockdep_assert_held(&text_mutex);
/*
* The optimization/unoptimization refers 'online_cpus' via
* stop_machine() and cpu-hotplug modifies the 'online_cpus'.
* And same time, 'text_mutex' will be held in cpu-hotplug and here.
* This combination can cause a deadlock (cpu-hotplug tries to lock
* 'text_mutex' but stop_machine() can not be done because
* the 'online_cpus' has been changed)
* To avoid this deadlock, caller must have locked cpu-hotplug
* for preventing cpu-hotplug outside of 'text_mutex' locking.
*/
lockdep_assert_cpus_held();
/* Optimization never be done when disarmed */
if (kprobes_all_disarmed || !kprobes_allow_optimization ||
list_empty(&optimizing_list))
return;
arch_optimize_kprobes(&optimizing_list);
}
/*
* Unoptimize (replace a jump with a breakpoint and remove the breakpoint
* if need) kprobes listed on 'unoptimizing_list'.
*/
static void do_unoptimize_kprobes(void)
{
struct optimized_kprobe *op, *tmp;
lockdep_assert_held(&text_mutex);
/* See comment in do_optimize_kprobes() */
lockdep_assert_cpus_held();
if (!list_empty(&unoptimizing_list))
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
/* Switching from detour code to origin */
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
/* Disarm probes if marked disabled and not gone */
if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
arch_disarm_kprobe(&op->kp);
if (kprobe_unused(&op->kp)) {
/*
* Remove unused probes from hash list. After waiting
* for synchronization, these probes are reclaimed.
* (reclaiming is done by do_free_cleaned_kprobes().)
*/
hlist_del_rcu(&op->kp.hlist);
} else
list_del_init(&op->list);
}
}
/* Reclaim all kprobes on the 'freeing_list' */
static void do_free_cleaned_kprobes(void)
{
struct optimized_kprobe *op, *tmp;
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
list_del_init(&op->list);
if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
/*
* This must not happen, but if there is a kprobe
* still in use, keep it on kprobes hash list.
*/
continue;
}
free_aggr_kprobe(&op->kp);
}
}
/* Start optimizer after OPTIMIZE_DELAY passed */
static void kick_kprobe_optimizer(void)
{
schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
}
/* Kprobe jump optimizer */
static void kprobe_optimizer(struct work_struct *work)
{
guard(mutex)(&kprobe_mutex);
scoped_guard(cpus_read_lock) {
guard(mutex)(&text_mutex);
/*
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
* kprobes before waiting for quiesence period.
*/
do_unoptimize_kprobes();
/*
* Step 2: Wait for quiesence period to ensure all potentially
* preempted tasks to have normally scheduled. Because optprobe
* may modify multiple instructions, there is a chance that Nth
* instruction is preempted. In that case, such tasks can return
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
* Note that on non-preemptive kernel, this is transparently converted
* to synchronoze_sched() to wait for all interrupts to have completed.
*/
synchronize_rcu_tasks();
/* Step 3: Optimize kprobes after quiesence period */
do_optimize_kprobes();
/* Step 4: Free cleaned kprobes after quiesence period */
do_free_cleaned_kprobes();
}
/* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
kick_kprobe_optimizer();
}
static void wait_for_kprobe_optimizer_locked(void)
{
lockdep_assert_held(&kprobe_mutex);
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
mutex_unlock(&kprobe_mutex);
/* This will also make 'optimizing_work' execute immmediately */
flush_delayed_work(&optimizing_work);
/* 'optimizing_work' might not have been queued yet, relax */
cpu_relax();
mutex_lock(&kprobe_mutex);
}
}
/* Wait for completing optimization and unoptimization */
void wait_for_kprobe_optimizer(void)
{
guard(mutex)(&kprobe_mutex);
wait_for_kprobe_optimizer_locked();
}
bool optprobe_queued_unopt(struct optimized_kprobe *op)
{
struct optimized_kprobe *_op;
list_for_each_entry(_op, &unoptimizing_list, list) {
if (op == _op)
return true;
}
return false;
}
/* Optimize kprobe if p is ready to be optimized */
static void optimize_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
/* Check if the kprobe is disabled or not ready for optimization. */
if (!kprobe_optready(p) || !kprobes_allow_optimization ||
(kprobe_disabled(p) || kprobes_all_disarmed))
return;
/* kprobes with 'post_handler' can not be optimized */
if (p->post_handler)
return;
op = container_of(p, struct optimized_kprobe, kp);
/* Check there is no other kprobes at the optimized instructions */
if (arch_check_optimized_kprobe(op) < 0)
return;
/* Check if it is already optimized. */
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
if (optprobe_queued_unopt(op)) {
/* This is under unoptimizing. Just dequeue the probe */
list_del_init(&op->list);
}
return;
}
op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
/*
* On the 'unoptimizing_list' and 'optimizing_list',
* 'op' must have OPTIMIZED flag
*/
if (WARN_ON_ONCE(!list_empty(&op->list)))
return;
list_add(&op->list, &optimizing_list);
kick_kprobe_optimizer();
}
/* Short cut to direct unoptimizing */
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
{
lockdep_assert_cpus_held();
arch_unoptimize_kprobe(op);
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
}
/* Unoptimize a kprobe if p is optimized */
static void unoptimize_kprobe(struct kprobe *p, bool force)
{
struct optimized_kprobe *op;
if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
return; /* This is not an optprobe nor optimized */
op = container_of(p, struct optimized_kprobe, kp);
if (!kprobe_optimized(p))
return;
if (!list_empty(&op->list)) {
if (optprobe_queued_unopt(op)) {
/* Queued in unoptimizing queue */
if (force) {
/*
* Forcibly unoptimize the kprobe here, and queue it
* in the freeing list for release afterwards.
*/
force_unoptimize_kprobe(op);
list_move(&op->list, &freeing_list);
}
} else {
/* Dequeue from the optimizing queue */
list_del_init(&op->list);
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
}
return;
}
/* Optimized kprobe case */
if (force) {
/* Forcibly update the code: this is a special case */
force_unoptimize_kprobe(op);
} else {
list_add(&op->list, &unoptimizing_list);
kick_kprobe_optimizer();
}
}
/* Cancel unoptimizing for reusing */
static int reuse_unused_kprobe(struct kprobe *ap)
{
struct optimized_kprobe *op;
/*
* Unused kprobe MUST be on the way of delayed unoptimizing (means
* there is still a relative jump) and disabled.
*/
op = container_of(ap, struct optimized_kprobe, kp);
WARN_ON_ONCE(list_empty(&op->list));
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again. (remove from 'op->list') */
if (!kprobe_optready(ap))
return -EINVAL;
optimize_kprobe(ap);
return 0;
}
/* Remove optimized instructions */
static void kill_optimized_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
op = container_of(p, struct optimized_kprobe, kp);
if (!list_empty(&op->list))
/* Dequeue from the (un)optimization queue */
list_del_init(&op->list);
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
if (kprobe_unused(p)) {
/*
* Unused kprobe is on unoptimizing or freeing list. We move it
* to freeing_list and let the kprobe_optimizer() remove it from
* the kprobe hash list and free it.
*/
if (optprobe_queued_unopt(op))
list_move(&op->list, &freeing_list);
}
/* Don't touch the code, because it is already freed. */
arch_remove_optimized_kprobe(op);
}
static inline
void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
if (!kprobe_ftrace(p))
arch_prepare_optimized_kprobe(op, p);
}
/* Try to prepare optimized instructions */
static void prepare_optimized_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
op = container_of(p, struct optimized_kprobe, kp);
__prepare_optimized_kprobe(op, p);
}
/* Allocate new optimized_kprobe and try to prepare optimized instructions. */
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
if (!op)
return NULL;
INIT_LIST_HEAD(&op->list);
op->kp.addr = p->addr;
__prepare_optimized_kprobe(op, p);
return &op->kp;
}
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
/*
* Prepare an optimized_kprobe and optimize it.
* NOTE: 'p' must be a normal registered kprobe.
*/
static void try_to_optimize_kprobe(struct kprobe *p)
{
struct kprobe *ap;
struct optimized_kprobe *op;
/* Impossible to optimize ftrace-based kprobe. */
if (kprobe_ftrace(p))
return;
/* For preparing optimization, jump_label_text_reserved() is called. */
guard(cpus_read_lock)();
guard(jump_label_lock)();
guard(mutex)(&text_mutex);
ap = alloc_aggr_kprobe(p);
if (!ap)
return;
op = container_of(ap, struct optimized_kprobe, kp);
if (!arch_prepared_optinsn(&op->optinsn)) {
/* If failed to setup optimizing, fallback to kprobe. */
arch_remove_optimized_kprobe(op);
kfree(op);
return;
}
init_aggr_kprobe(ap, p);
optimize_kprobe(ap); /* This just kicks optimizer thread. */
}
static void optimize_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
guard(mutex)(&kprobe_mutex);
/* If optimization is already allowed, just return. */
if (kprobes_allow_optimization)
return;
cpus_read_lock();
kprobes_allow_optimization = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry(p, head, hlist)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
cpus_read_unlock();
pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
}
#ifdef CONFIG_SYSCTL
static void unoptimize_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
guard(mutex)(&kprobe_mutex);
/* If optimization is already prohibited, just return. */
if (!kprobes_allow_optimization)
return;
cpus_read_lock();
kprobes_allow_optimization = false;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry(p, head, hlist) {
if (!kprobe_disabled(p))
unoptimize_kprobe(p, false);
}
}
cpus_read_unlock();
/* Wait for unoptimizing completion. */
wait_for_kprobe_optimizer_locked();
pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
}
static DEFINE_MUTEX(kprobe_sysctl_mutex);
static int sysctl_kprobes_optimization;
static int proc_kprobes_optimization_handler(const struct ctl_table *table,
int write, void *buffer,
size_t *length, loff_t *ppos)
{
int ret;
guard(mutex)(&kprobe_sysctl_mutex);
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (sysctl_kprobes_optimization)
optimize_all_kprobes();
else
unoptimize_all_kprobes();
return ret;
}
static const struct ctl_table kprobe_sysctls[] = {
{
.procname = "kprobes-optimization",
.data = &sysctl_kprobes_optimization,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_kprobes_optimization_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
};
static void __init kprobe_sysctls_init(void)
{
register_sysctl_init("debug", kprobe_sysctls);
}
#endif /* CONFIG_SYSCTL */
/* Put a breakpoint for a probe. */
static void __arm_kprobe(struct kprobe *p)
{
struct kprobe *_p;
lockdep_assert_held(&text_mutex);
/* Find the overlapping optimized kprobes. */
_p = get_optimized_kprobe(p->addr);
if (unlikely(_p))
/* Fallback to unoptimized kprobe */
unoptimize_kprobe(_p, true);
arch_arm_kprobe(p);
optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
}
/* Remove the breakpoint of a probe. */
static void __disarm_kprobe(struct kprobe *p, bool reopt)
{
struct kprobe *_p;
lockdep_assert_held(&text_mutex);
/* Try to unoptimize */
unoptimize_kprobe(p, kprobes_all_disarmed);
if (!kprobe_queued(p)) {
arch_disarm_kprobe(p);
/* If another kprobe was blocked, re-optimize it. */
_p = get_optimized_kprobe(p->addr);
if (unlikely(_p) && reopt)
optimize_kprobe(_p);
}
/*
* TODO: Since unoptimization and real disarming will be done by
* the worker thread, we can not check whether another probe are
* unoptimized because of this probe here. It should be re-optimized
* by the worker thread.
*/
}
#else /* !CONFIG_OPTPROBES */
#define optimize_kprobe(p) do {} while (0)
#define unoptimize_kprobe(p, f) do {} while (0)
#define kill_optimized_kprobe(p) do {} while (0)
#define prepare_optimized_kprobe(p) do {} while (0)
#define try_to_optimize_kprobe(p) do {} while (0)
#define __arm_kprobe(p) arch_arm_kprobe(p)
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
#define kprobe_disarmed(p) kprobe_disabled(p)
#define wait_for_kprobe_optimizer_locked() \
lockdep_assert_held(&kprobe_mutex)
static int reuse_unused_kprobe(struct kprobe *ap)
{
/*
* If the optimized kprobe is NOT supported, the aggr kprobe is
* released at the same time that the last aggregated kprobe is
* unregistered.
* Thus there should be no chance to reuse unused kprobe.
*/
WARN_ON_ONCE(1);
return -EINVAL;
}
static void free_aggr_kprobe(struct kprobe *p)
{
arch_remove_kprobe(p);
kfree(p);
}
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
}
#endif /* CONFIG_OPTPROBES */
#ifdef CONFIG_KPROBES_ON_FTRACE
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
.func = kprobe_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS,
};
static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
.func = kprobe_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
};
static int kprobe_ipmodify_enabled;
static int kprobe_ftrace_enabled;
bool kprobe_ftrace_disabled;
static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
int *cnt)
{
int ret;
lockdep_assert_held(&kprobe_mutex);
ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
return ret;
if (*cnt == 0) {
ret = register_ftrace_function(ops);
if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) {
/*
* At this point, sinec ops is not registered, we should be sefe from
* registering empty filter.
*/
ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
return ret;
}
}
(*cnt)++;
return ret;
}
static int arm_kprobe_ftrace(struct kprobe *p)
{
bool ipmodify = (p->post_handler != NULL);
return __arm_kprobe_ftrace(p,
ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
}
static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
int *cnt)
{
int ret;
lockdep_assert_held(&kprobe_mutex);
if (*cnt == 1) {
ret = unregister_ftrace_function(ops);
if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
return ret;
}
(*cnt)--;
ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
p->addr, ret);
return ret;
}
static int disarm_kprobe_ftrace(struct kprobe *p)
{
bool ipmodify = (p->post_handler != NULL);
return __disarm_kprobe_ftrace(p,
ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
}
void kprobe_ftrace_kill(void)
{
kprobe_ftrace_disabled = true;
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
static inline int arm_kprobe_ftrace(struct kprobe *p)
{
return -ENODEV;
}
static inline int disarm_kprobe_ftrace(struct kprobe *p)
{
return -ENODEV;
}
#endif
static int prepare_kprobe(struct kprobe *p)
{
/* Must ensure p->addr is really on ftrace */
if (kprobe_ftrace(p))
return arch_prepare_kprobe_ftrace(p);
return arch_prepare_kprobe(p);
}
static int arm_kprobe(struct kprobe *kp)
{
if (unlikely(kprobe_ftrace(kp)))
return arm_kprobe_ftrace(kp);
guard(cpus_read_lock)();
guard(mutex)(&text_mutex);
__arm_kprobe(kp);
return 0;
}
static int disarm_kprobe(struct kprobe *kp, bool reopt)
{
if (unlikely(kprobe_ftrace(kp)))
return disarm_kprobe_ftrace(kp);
guard(cpus_read_lock)();
guard(mutex)(&text_mutex);
__disarm_kprobe(kp, reopt);
return 0;
}
/*
* Aggregate handlers for multiple kprobes support - these handlers
* take care of invoking the individual kprobe handlers on p->list
*/
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
if (kp->pre_handler(kp, regs))
return 1;
}
reset_kprobe_instance();
}
return 0;
}
NOKPROBE_SYMBOL(aggr_pre_handler);
static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
if (kp->post_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
kp->post_handler(kp, regs, flags);
reset_kprobe_instance();
}
}
}
NOKPROBE_SYMBOL(aggr_post_handler);
/* Walks the list and increments 'nmissed' if 'p' has child probes. */
void kprobes_inc_nmissed_count(struct kprobe *p)
{
struct kprobe *kp;
if (!kprobe_aggrprobe(p)) {
p->nmissed++;
} else {
list_for_each_entry_rcu(kp, &p->list, list)
kp->nmissed++;
}
}
NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
static struct kprobe kprobe_busy = {
.addr = (void *) get_kprobe,
};
void kprobe_busy_begin(void)
{
struct kprobe_ctlblk *kcb;
preempt_disable();
__this_cpu_write(current_kprobe, &kprobe_busy);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
}
void kprobe_busy_end(void)
{
__this_cpu_write(current_kprobe, NULL);
preempt_enable();
}
/* Add the new probe to 'ap->list'. */
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
if (p->post_handler)
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
list_add_rcu(&p->list, &ap->list);
if (p->post_handler && !ap->post_handler)
ap->post_handler = aggr_post_handler;
return 0;
}
/*
* Fill in the required fields of the aggregator kprobe. Replace the
* earlier kprobe in the hlist with the aggregator kprobe.
*/
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
{
/* Copy the insn slot of 'p' to 'ap'. */
copy_kprobe(p, ap);
flush_insn_slot(ap);
ap->addr = p->addr;
ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
ap->pre_handler = aggr_pre_handler;
/* We don't care the kprobe which has gone. */
if (p->post_handler && !kprobe_gone(p))
ap->post_handler = aggr_post_handler;
INIT_LIST_HEAD(&ap->list);
INIT_HLIST_NODE(&ap->hlist);
list_add_rcu(&p->list, &ap->list);
hlist_replace_rcu(&p->hlist, &ap->hlist);
}
/*
* This registers the second or subsequent kprobe at the same address.
*/
static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
{
int ret = 0;
struct kprobe *ap = orig_p;
scoped_guard(cpus_read_lock) {
/* For preparing optimization, jump_label_text_reserved() is called */
guard(jump_label_lock)();
guard(mutex)(&text_mutex);
if (!kprobe_aggrprobe(orig_p)) {
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
ap = alloc_aggr_kprobe(orig_p);
if (!ap)
return -ENOMEM;
init_aggr_kprobe(ap, orig_p);
} else if (kprobe_unused(ap)) {
/* This probe is going to die. Rescue it */
ret = reuse_unused_kprobe(ap);
if (ret)
return ret;
}
if (kprobe_gone(ap)) {
/*
* Attempting to insert new probe at the same location that
* had a probe in the module vaddr area which already
* freed. So, the instruction slot has already been
* released. We need a new slot for the new probe.
*/
ret = arch_prepare_kprobe(ap);
if (ret)
/*
* Even if fail to allocate new slot, don't need to
* free the 'ap'. It will be used next time, or
* freed by unregister_kprobe().
*/
return ret;
/* Prepare optimized instructions if possible. */
prepare_optimized_kprobe(ap);
/*
* Clear gone flag to prevent allocating new slot again, and
* set disabled flag because it is not armed yet.
*/
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
| KPROBE_FLAG_DISABLED;
}
/* Copy the insn slot of 'p' to 'ap'. */
copy_kprobe(ap, p);
ret = add_new_kprobe(ap, p);
}
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed) {
/* Arm the breakpoint again. */
ret = arm_kprobe(ap);
if (ret) {
ap->flags |= KPROBE_FLAG_DISABLED;
list_del_rcu(&p->list);
synchronize_rcu();
}
}
}
return ret;
}
bool __weak arch_within_kprobe_blacklist(unsigned long addr)
{
/* The '__kprobes' functions and entry code must not be probed. */
return addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end;
}
static bool __within_kprobe_blacklist(unsigned long addr)
{
struct kprobe_blacklist_entry *ent;
if (arch_within_kprobe_blacklist(addr))
return true;
/*
* If 'kprobe_blacklist' is defined, check the address and
* reject any probe registration in the prohibited area.
*/
list_for_each_entry(ent, &kprobe_blacklist, list) {
if (addr >= ent->start_addr && addr < ent->end_addr)
return true;
}
return false;
}
bool within_kprobe_blacklist(unsigned long addr)
{
char symname[KSYM_NAME_LEN], *p;
if (__within_kprobe_blacklist(addr))
return true;
/* Check if the address is on a suffixed-symbol */
if (!lookup_symbol_name(addr, symname)) {
p = strchr(symname, '.');
if (!p)
return false;
*p = '\0';
addr = (unsigned long)kprobe_lookup_name(symname, 0);
if (addr)
return __within_kprobe_blacklist(addr);
}
return false;
}
/*
* arch_adjust_kprobe_addr - adjust the address
* @addr: symbol base address
* @offset: offset within the symbol
* @on_func_entry: was this @addr+@offset on the function entry
*
* Typically returns @addr + @offset, except for special cases where the
* function might be prefixed by a CFI landing pad, in that case any offset
* inside the landing pad is mapped to the first 'real' instruction of the
* symbol.
*
* Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
* instruction at +0.
*/
kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
unsigned long offset,
bool *on_func_entry)
{
*on_func_entry = !offset;
return (kprobe_opcode_t *)(addr + offset);
}
/*
* If 'symbol_name' is specified, look it up and add the 'offset'
* to it. This way, we can specify a relative address to a symbol.
* This returns encoded errors if it fails to look up symbol or invalid
* combination of parameters.
*/
static kprobe_opcode_t *
_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
unsigned long offset, bool *on_func_entry)
{
if ((symbol_name && addr) || (!symbol_name && !addr))
return ERR_PTR(-EINVAL);
if (symbol_name) {
/*
* Input: @sym + @offset
* Output: @addr + @offset
*
* NOTE: kprobe_lookup_name() does *NOT* fold the offset
* argument into it's output!
*/
addr = kprobe_lookup_name(symbol_name, offset);
if (!addr)
return ERR_PTR(-ENOENT);
}
/*
* So here we have @addr + @offset, displace it into a new
* @addr' + @offset' where @addr' is the symbol start address.
*/
addr = (void *)addr + offset;
if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
return ERR_PTR(-ENOENT);
addr = (void *)addr - offset;
/*
* Then ask the architecture to re-combine them, taking care of
* magical function entry details while telling us if this was indeed
* at the start of the function.
*/
addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
if (!addr)
return ERR_PTR(-EINVAL);
return addr;
}
static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{
bool on_func_entry;
return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
}
/*
* Check the 'p' is valid and return the aggregator kprobe
* at the same address.
*/
static struct kprobe *__get_valid_kprobe(struct kprobe *p)
{
struct kprobe *ap, *list_p;
lockdep_assert_held(&kprobe_mutex);
ap = get_kprobe(p->addr);
if (unlikely(!ap))
return NULL;
if (p == ap)
return ap;
list_for_each_entry(list_p, &ap->list, list)
if (list_p == p)
/* kprobe p is a valid probe */
return ap;
return NULL;
}
/*
* Warn and return error if the kprobe is being re-registered since
* there must be a software bug.
*/
static inline int warn_kprobe_rereg(struct kprobe *p)
{
guard(mutex)(&kprobe_mutex);
if (WARN_ON_ONCE(__get_valid_kprobe(p)))
return -EINVAL;
return 0;
}
static int check_ftrace_location(struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
if (ftrace_location(addr) == addr) {
#ifdef CONFIG_KPROBES_ON_FTRACE
p->flags |= KPROBE_FLAG_FTRACE;
#else
return -EINVAL;
#endif
}
return 0;
}
static bool is_cfi_preamble_symbol(unsigned long addr)
{
char symbuf[KSYM_NAME_LEN];
if (lookup_symbol_name(addr, symbuf))
return false;
return str_has_prefix(symbuf, "__cfi_") ||
str_has_prefix(symbuf, "__pfx_");
}
static int check_kprobe_address_safe(struct kprobe *p,
struct module **probed_mod)
{
int ret;
ret = check_ftrace_location(p);
if (ret)
return ret;
guard(jump_label_lock)();
/* Ensure the address is in a text area, and find a module if exists. */
*probed_mod = NULL;
if (!core_kernel_text((unsigned long) p->addr)) {
guard(rcu)();
*probed_mod = __module_text_address((unsigned long) p->addr);
if (!(*probed_mod))
return -EINVAL;
/*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
*/
if (unlikely(!try_module_get(*probed_mod)))
return -ENOENT;
}
/* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr) ||
is_cfi_preamble_symbol((unsigned long)p->addr)) {
module_put(*probed_mod);
return -EINVAL;
}
/* Get module refcount and reject __init functions for loaded modules. */
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
/*
* If the module freed '.init.text', we couldn't insert
* kprobes in there.
*/
if (within_module_init((unsigned long)p->addr, *probed_mod) &&
!module_is_coming(*probed_mod)) {
module_put(*probed_mod);
return -ENOENT;
}
}
return 0;
}
static int __register_kprobe(struct kprobe *p)
{
int ret;
struct kprobe *old_p;
guard(mutex)(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p)
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
return register_aggr_kprobe(old_p, p);
scoped_guard(cpus_read_lock) {
/* Prevent text modification */
guard(mutex)(&text_mutex);
ret = prepare_kprobe(p);
if (ret)
return ret;
}
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
synchronize_rcu();
}
}
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
return 0;
}
int register_kprobe(struct kprobe *p)
{
int ret;
struct module *probed_mod;
kprobe_opcode_t *addr;
bool on_func_entry;
/* Canonicalize probe address from symbol */
addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
if (IS_ERR(addr))
return PTR_ERR(addr);
p->addr = addr;
ret = warn_kprobe_rereg(p);
if (ret)
return ret;
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED;
if (on_func_entry)
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
p->nmissed = 0;
INIT_LIST_HEAD(&p->list);
ret = check_kprobe_address_safe(p, &probed_mod);
if (ret)
return ret;
ret = __register_kprobe(p);
if (probed_mod)
module_put(probed_mod);
return ret;
}
EXPORT_SYMBOL_GPL(register_kprobe);
/* Check if all probes on the 'ap' are disabled. */
static bool aggr_kprobe_disabled(struct kprobe *ap)
{
struct kprobe *kp;
lockdep_assert_held(&kprobe_mutex);
list_for_each_entry(kp, &ap->list, list)
if (!kprobe_disabled(kp))
/*
* Since there is an active probe on the list,
* we can't disable this 'ap'.
*/
return false;
return true;
}
static struct kprobe *__disable_kprobe(struct kprobe *p)
{
struct kprobe *orig_p;
int ret;
lockdep_assert_held(&kprobe_mutex);
/* Get an original kprobe for return */
orig_p = __get_valid_kprobe(p);
if (unlikely(orig_p == NULL))
return ERR_PTR(-EINVAL);
if (kprobe_disabled(p))
return orig_p;
/* Disable probe if it is a child probe */
if (p != orig_p)
p->flags |= KPROBE_FLAG_DISABLED;
/* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
/*
* Don't be lazy here. Even if 'kprobes_all_disarmed'
* is false, 'orig_p' might not have been armed yet.
* Note arm_all_kprobes() __tries__ to arm all kprobes
* on the best effort basis.
*/
if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
ret = disarm_kprobe(orig_p, true);
if (ret) {
p->flags &= ~KPROBE_FLAG_DISABLED;
return ERR_PTR(ret);
}
}
orig_p->flags |= KPROBE_FLAG_DISABLED;
}
return orig_p;
}
/*
* Unregister a kprobe without a scheduler synchronization.
*/
static int __unregister_kprobe_top(struct kprobe *p)
{
struct kprobe *ap, *list_p;
/* Disable kprobe. This will disarm it if needed. */
ap = __disable_kprobe(p);
if (IS_ERR(ap))
return PTR_ERR(ap);
WARN_ON(ap != p && !kprobe_aggrprobe(ap));
/*
* If the probe is an independent(and non-optimized) kprobe
* (not an aggrprobe), the last kprobe on the aggrprobe, or
* kprobe is already disarmed, just remove from the hash list.
*/
if (ap == p ||
(list_is_singular(&ap->list) && kprobe_disarmed(ap))) {
/*
* !disarmed could be happen if the probe is under delayed
* unoptimizing.
*/
hlist_del_rcu(&ap->hlist);
return 0;
}
/* If disabling probe has special handlers, update aggrprobe */
if (p->post_handler && !kprobe_gone(p)) {
list_for_each_entry(list_p, &ap->list, list) {
if ((list_p != p) && (list_p->post_handler))
break;
}
/* No other probe has post_handler */
if (list_entry_is_head(list_p, &ap->list, list)) {
/*
* For the kprobe-on-ftrace case, we keep the
* post_handler setting to identify this aggrprobe
* armed with kprobe_ipmodify_ops.
*/
if (!kprobe_ftrace(ap))
ap->post_handler = NULL;
}
}
/*
* Remove from the aggrprobe: this path will do nothing in
* __unregister_kprobe_bottom().
*/
list_del_rcu(&p->list);
if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
/*
* Try to optimize this probe again, because post
* handler may have been changed.
*/
optimize_kprobe(ap);
return 0;
}
static void __unregister_kprobe_bottom(struct kprobe *p)
{
struct kprobe *ap;
if (list_empty(&p->list))
/* This is an independent kprobe */
arch_remove_kprobe(p);
else if (list_is_singular(&p->list)) {
/* This is the last child of an aggrprobe */
ap = list_entry(p->list.next, struct kprobe, list);
list_del(&p->list);
free_aggr_kprobe(ap);
}
/* Otherwise, do nothing. */
}
int register_kprobes(struct kprobe **kps, int num)
{
int i, ret = 0;
if (num <= 0)
return -EINVAL;
for (i = 0; i < num; i++) {
ret = register_kprobe(kps[i]);
if (ret < 0) {
if (i > 0)
unregister_kprobes(kps, i);
break;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(register_kprobes);
void unregister_kprobe(struct kprobe *p)
{
unregister_kprobes(&p, 1);
}
EXPORT_SYMBOL_GPL(unregister_kprobe);
void unregister_kprobes(struct kprobe **kps, int num)
{
int i;
if (num <= 0)
return;
scoped_guard(mutex, &kprobe_mutex) {
for (i = 0; i < num; i++)
if (__unregister_kprobe_top(kps[i]) < 0)
kps[i]->addr = NULL;
}
synchronize_rcu();
for (i = 0; i < num; i++)
if (kps[i]->addr)
__unregister_kprobe_bottom(kps[i]);
}
EXPORT_SYMBOL_GPL(unregister_kprobes);
int __weak kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return NOTIFY_DONE;
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
static struct notifier_block kprobe_exceptions_nb = {
.notifier_call = kprobe_exceptions_notify,
.priority = 0x7fffffff /* we need to be notified first */
};
#ifdef CONFIG_KRETPROBES
#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
/* callbacks for objpool of kretprobe instances */
static int kretprobe_init_inst(void *nod, void *context)
{
struct kretprobe_instance *ri = nod;
ri->rph = context;
return 0;
}
static int kretprobe_fini_pool(struct objpool_head *head, void *context)
{
kfree(context);
return 0;
}
static void free_rp_inst_rcu(struct rcu_head *head)
{
struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
struct kretprobe_holder *rph = ri->rph;
objpool_drop(ri, &rph->pool);
}
NOKPROBE_SYMBOL(free_rp_inst_rcu);
static void recycle_rp_inst(struct kretprobe_instance *ri)
{
struct kretprobe *rp = get_kretprobe(ri);
if (likely(rp))
objpool_push(ri, &rp->rph->pool);
else
call_rcu(&ri->rcu, free_rp_inst_rcu);
}
NOKPROBE_SYMBOL(recycle_rp_inst);
/*
* This function is called from delayed_put_task_struct() when a task is
* dead and cleaned up to recycle any kretprobe instances associated with
* this task. These left over instances represent probed functions that
* have been called but will never return.
*/
void kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
struct llist_node *node;
/* Early boot, not yet initialized. */
if (unlikely(!kprobes_initialized))
return;
kprobe_busy_begin();
node = __llist_del_all(&tk->kretprobe_instances);
while (node) {
ri = container_of(node, struct kretprobe_instance, llist);
node = node->next;
recycle_rp_inst(ri);
}
kprobe_busy_end();
}
NOKPROBE_SYMBOL(kprobe_flush_task);
static inline void free_rp_inst(struct kretprobe *rp)
{
struct kretprobe_holder *rph = rp->rph;
if (!rph)
return;
rp->rph = NULL;
objpool_fini(&rph->pool);
}
/* This assumes the 'tsk' is the current task or the is not running. */
static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
struct llist_node **cur)
{
struct kretprobe_instance *ri = NULL;
struct llist_node *node = *cur;
if (!node)
node = tsk->kretprobe_instances.first;
else
node = node->next;
while (node) {
ri = container_of(node, struct kretprobe_instance, llist);
if (ri->ret_addr != kretprobe_trampoline_addr()) {
*cur = node;
return ri->ret_addr;
}
node = node->next;
}
return NULL;
}
NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
/**
* kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
* @tsk: Target task
* @fp: A frame pointer
* @cur: a storage of the loop cursor llist_node pointer for next call
*
* Find the correct return address modified by a kretprobe on @tsk in unsigned
* long type. If it finds the return address, this returns that address value,
* or this returns 0.
* The @tsk must be 'current' or a task which is not running. @fp is a hint
* to get the currect return address - which is compared with the
* kretprobe_instance::fp field. The @cur is a loop cursor for searching the
* kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
* first call, but '@cur' itself must NOT NULL.
*/
unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
struct llist_node **cur)
{
struct kretprobe_instance *ri;
kprobe_opcode_t *ret;
if (WARN_ON_ONCE(!cur))
return 0;
do {
ret = __kretprobe_find_ret_addr(tsk, cur);
if (!ret)
break;
ri = container_of(*cur, struct kretprobe_instance, llist);
} while (ri->fp != fp);
return (unsigned long)ret;
}
NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
kprobe_opcode_t *correct_ret_addr)
{
/*
* Do nothing by default. Please fill this to update the fake return
* address on the stack with the correct one on each arch if possible.
*/
}
unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
void *frame_pointer)
{
struct kretprobe_instance *ri = NULL;
struct llist_node *first, *node = NULL;
kprobe_opcode_t *correct_ret_addr;
struct kretprobe *rp;
/* Find correct address and all nodes for this frame. */
correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
if (!correct_ret_addr) {
pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
BUG_ON(1);
}
/*
* Set the return address as the instruction pointer, because if the
* user handler calls stack_trace_save_regs() with this 'regs',
* the stack trace will start from the instruction pointer.
*/
instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
/* Run the user handler of the nodes. */
first = current->kretprobe_instances.first;
while (first) {
ri = container_of(first, struct kretprobe_instance, llist);
if (WARN_ON_ONCE(ri->fp != frame_pointer))
break;
rp = get_kretprobe(ri);
if (rp && rp->handler) {
struct kprobe *prev = kprobe_running();
__this_cpu_write(current_kprobe, &rp->kp);
ri->ret_addr = correct_ret_addr;
rp->handler(ri, regs);
__this_cpu_write(current_kprobe, prev);
}
if (first == node)
break;
first = first->next;
}
arch_kretprobe_fixup_return(regs, correct_ret_addr);
/* Unlink all nodes for this frame. */
first = current->kretprobe_instances.first;
current->kretprobe_instances.first = node->next;
node->next = NULL;
/* Recycle free instances. */
while (first) {
ri = container_of(first, struct kretprobe_instance, llist);
first = first->next;
recycle_rp_inst(ri);
}
return (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
/*
* This kprobe pre_handler is registered with every kretprobe. When probe
* hits it will set up the return probe.
*/
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe *rp = container_of(p, struct kretprobe, kp);
struct kretprobe_holder *rph = rp->rph;
struct kretprobe_instance *ri;
ri = objpool_pop(&rph->pool);
if (!ri) {
rp->nmissed++;
return 0;
}
if (rp->entry_handler && rp->entry_handler(ri, regs)) {
objpool_push(ri, &rph->pool);
return 0;
}
arch_prepare_kretprobe(ri, regs);
__llist_add(&ri->llist, ¤t->kretprobe_instances);
return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);
#else /* CONFIG_KRETPROBE_ON_RETHOOK */
/*
* This kprobe pre_handler is registered with every kretprobe. When probe
* hits it will set up the return probe.
*/
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe *rp = container_of(p, struct kretprobe, kp);
struct kretprobe_instance *ri;
struct rethook_node *rhn;
rhn = rethook_try_get(rp->rh);
if (!rhn) {
rp->nmissed++;
return 0;
}
ri = container_of(rhn, struct kretprobe_instance, node);
if (rp->entry_handler && rp->entry_handler(ri, regs))
rethook_recycle(rhn);
else
rethook_hook(rhn, regs, kprobe_ftrace(p));
return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);
static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
unsigned long ret_addr,
struct pt_regs *regs)
{
struct kretprobe *rp = (struct kretprobe *)data;
struct kretprobe_instance *ri;
struct kprobe_ctlblk *kcb;
/* The data must NOT be null. This means rethook data structure is broken. */
if (WARN_ON_ONCE(!data) || !rp->handler)
return;
__this_cpu_write(current_kprobe, &rp->kp);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
ri = container_of(rh, struct kretprobe_instance, node);
rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
NOKPROBE_SYMBOL(kretprobe_rethook_handler);
#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
/**
* kprobe_on_func_entry() -- check whether given address is function entry
* @addr: Target address
* @sym: Target symbol name
* @offset: The offset from the symbol or the address
*
* This checks whether the given @addr+@offset or @sym+@offset is on the
* function entry address or not.
* This returns 0 if it is the function entry, or -EINVAL if it is not.
* And also it returns -ENOENT if it fails the symbol or address lookup.
* Caller must pass @addr or @sym (either one must be NULL), or this
* returns -EINVAL.
*/
int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
bool on_func_entry;
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);
if (IS_ERR(kp_addr))
return PTR_ERR(kp_addr);
if (!on_func_entry)
return -EINVAL;
return 0;
}
int register_kretprobe(struct kretprobe *rp)
{
int ret;
int i;
void *addr;
ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
if (ret)
return ret;
/* If only 'rp->kp.addr' is specified, check reregistering kprobes */
if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
return -EINVAL;
if (kretprobe_blacklist_size) {
addr = kprobe_addr(&rp->kp);
if (IS_ERR(addr))
return PTR_ERR(addr);
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
if (kretprobe_blacklist[i].addr == addr)
return -EINVAL;
}
}
if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
return -E2BIG;
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0)
rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler,
sizeof(struct kretprobe_instance) +
rp->data_size, rp->maxactive);
if (IS_ERR(rp->rh))
return PTR_ERR(rp->rh);
rp->nmissed = 0;
/* Establish function entry probe point */
ret = register_kprobe(&rp->kp);
if (ret != 0) {
rethook_free(rp->rh);
rp->rh = NULL;
}
#else /* !CONFIG_KRETPROBE_ON_RETHOOK */
rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
if (!rp->rph)
return -ENOMEM;
if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size +
sizeof(struct kretprobe_instance), GFP_KERNEL,
rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) {
kfree(rp->rph);
rp->rph = NULL;
return -ENOMEM;
}
rcu_assign_pointer(rp->rph->rp, rp);
rp->nmissed = 0;
/* Establish function entry probe point */
ret = register_kprobe(&rp->kp);
if (ret != 0)
free_rp_inst(rp);
#endif
return ret;
}
EXPORT_SYMBOL_GPL(register_kretprobe);
int register_kretprobes(struct kretprobe **rps, int num)
{
int ret = 0, i;
if (num <= 0)
return -EINVAL;
for (i = 0; i < num; i++) {
ret = register_kretprobe(rps[i]);
if (ret < 0) {
if (i > 0)
unregister_kretprobes(rps, i);
break;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(register_kretprobes);
void unregister_kretprobe(struct kretprobe *rp)
{
unregister_kretprobes(&rp, 1);
}
EXPORT_SYMBOL_GPL(unregister_kretprobe);
void unregister_kretprobes(struct kretprobe **rps, int num)
{
int i;
if (num <= 0)
return;
for (i = 0; i < num; i++) {
guard(mutex)(&kprobe_mutex);
if (__unregister_kprobe_top(&rps[i]->kp) < 0)
rps[i]->kp.addr = NULL;
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
rethook_free(rps[i]->rh);
#else
rcu_assign_pointer(rps[i]->rph->rp, NULL);
#endif
}
synchronize_rcu();
for (i = 0; i < num; i++) {
if (rps[i]->kp.addr) {
__unregister_kprobe_bottom(&rps[i]->kp);
#ifndef CONFIG_KRETPROBE_ON_RETHOOK
free_rp_inst(rps[i]);
#endif
}
}
}
EXPORT_SYMBOL_GPL(unregister_kretprobes);
#else /* CONFIG_KRETPROBES */
int register_kretprobe(struct kretprobe *rp)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(register_kretprobe);
int register_kretprobes(struct kretprobe **rps, int num)
{
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(register_kretprobes);
void unregister_kretprobe(struct kretprobe *rp)
{
}
EXPORT_SYMBOL_GPL(unregister_kretprobe);
void unregister_kretprobes(struct kretprobe **rps, int num)
{
}
EXPORT_SYMBOL_GPL(unregister_kretprobes);
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
{
return 0;
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);
#endif /* CONFIG_KRETPROBES */
/* Set the kprobe gone and remove its instruction buffer. */
static void kill_kprobe(struct kprobe *p)
{
struct kprobe *kp;
lockdep_assert_held(&kprobe_mutex);
/*
* The module is going away. We should disarm the kprobe which
* is using ftrace, because ftrace framework is still available at
* 'MODULE_STATE_GOING' notification.
*/
if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
disarm_kprobe_ftrace(p);
p->flags |= KPROBE_FLAG_GONE;
if (kprobe_aggrprobe(p)) {
/*
* If this is an aggr_kprobe, we have to list all the
* chained probes and mark them GONE.
*/
list_for_each_entry(kp, &p->list, list)
kp->flags |= KPROBE_FLAG_GONE;
p->post_handler = NULL;
kill_optimized_kprobe(p);
}
/*
* Here, we can remove insn_slot safely, because no thread calls
* the original probed function (which will be freed soon) any more.
*/
arch_remove_kprobe(p);
}
/* Disable one kprobe */
int disable_kprobe(struct kprobe *kp)
{
struct kprobe *p;
guard(mutex)(&kprobe_mutex);
/* Disable this kprobe */
p = __disable_kprobe(kp);
return IS_ERR(p) ? PTR_ERR(p) : 0;
}
EXPORT_SYMBOL_GPL(disable_kprobe);
/* Enable one kprobe */
int enable_kprobe(struct kprobe *kp)
{
int ret = 0;
struct kprobe *p;
guard(mutex)(&kprobe_mutex);
/* Check whether specified probe is valid. */
p = __get_valid_kprobe(kp);
if (unlikely(p == NULL))
return -EINVAL;
if (kprobe_gone(kp))
/* This kprobe has gone, we couldn't enable it. */
return -EINVAL;
if (p != kp)
kp->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p->flags &= ~KPROBE_FLAG_DISABLED;
ret = arm_kprobe(p);
if (ret) {
p->flags |= KPROBE_FLAG_DISABLED;
if (p != kp)
kp->flags |= KPROBE_FLAG_DISABLED;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(enable_kprobe);
/* Caller must NOT call this in usual path. This is only for critical case */
void dump_kprobe(struct kprobe *kp)
{
pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
kp->symbol_name, kp->offset, kp->addr);
}
NOKPROBE_SYMBOL(dump_kprobe);
int kprobe_add_ksym_blacklist(unsigned long entry)
{
struct kprobe_blacklist_entry *ent;
unsigned long offset = 0, size = 0;
if (!kernel_text_address(entry) ||
!kallsyms_lookup_size_offset(entry, &size, &offset))
return -EINVAL;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->start_addr = entry;
ent->end_addr = entry + size;
INIT_LIST_HEAD(&ent->list);
list_add_tail(&ent->list, &kprobe_blacklist);
return (int)size;
}
/* Add all symbols in given area into kprobe blacklist */
int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
{
unsigned long entry;
int ret = 0;
for (entry = start; entry < end; entry += ret) {
ret = kprobe_add_ksym_blacklist(entry);
if (ret < 0)
return ret;
if (ret == 0) /* In case of alias symbol */
ret = 1;
}
return 0;
}
int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
char *type, char *sym)
{
return -ERANGE;
}
int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
return 0;
#ifdef CONFIG_OPTPROBES
if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
return 0;
#endif
#endif
if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
return 0;
return -ERANGE;
}
int __init __weak arch_populate_kprobe_blacklist(void)
{
return 0;
}
/*
* Lookup and populate the kprobe_blacklist.
*
* Unlike the kretprobe blacklist, we'll need to determine
* the range of addresses that belong to the said functions,
* since a kprobe need not necessarily be at the beginning
* of a function.
*/
static int __init populate_kprobe_blacklist(unsigned long *start,
unsigned long *end)
{
unsigned long entry;
unsigned long *iter;
int ret;
for (iter = start; iter < end; iter++) {
entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
ret = kprobe_add_ksym_blacklist(entry);
if (ret == -EINVAL)
continue;
if (ret < 0)
return ret;
}
/* Symbols in '__kprobes_text' are blacklisted */
ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
(unsigned long)__kprobes_text_end);
if (ret)
return ret;
/* Symbols in 'noinstr' section are blacklisted */
ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
(unsigned long)__noinstr_text_end);
return ret ? : arch_populate_kprobe_blacklist();
}
#ifdef CONFIG_MODULES
/* Remove all symbols in given area from kprobe blacklist */
static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
{
struct kprobe_blacklist_entry *ent, *n;
list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
if (ent->start_addr < start || ent->start_addr >= end)
continue;
list_del(&ent->list);
kfree(ent);
}
}
static void kprobe_remove_ksym_blacklist(unsigned long entry)
{
kprobe_remove_area_blacklist(entry, entry + 1);
}
static void add_module_kprobe_blacklist(struct module *mod)
{
unsigned long start, end;
int i;
if (mod->kprobe_blacklist) {
for (i = 0; i < mod->num_kprobe_blacklist; i++)
kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
}
start = (unsigned long)mod->kprobes_text_start;
if (start) {
end = start + mod->kprobes_text_size;
kprobe_add_area_blacklist(start, end);
}
start = (unsigned long)mod->noinstr_text_start;
if (start) {
end = start + mod->noinstr_text_size;
kprobe_add_area_blacklist(start, end);
}
}
static void remove_module_kprobe_blacklist(struct module *mod)
{
unsigned long start, end;
int i;
if (mod->kprobe_blacklist) {
for (i = 0; i < mod->num_kprobe_blacklist; i++)
kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
}
start = (unsigned long)mod->kprobes_text_start;
if (start) {
end = start + mod->kprobes_text_size;
kprobe_remove_area_blacklist(start, end);
}
start = (unsigned long)mod->noinstr_text_start;
if (start) {
end = start + mod->noinstr_text_size;
kprobe_remove_area_blacklist(start, end);
}
}
/* Module notifier call back, checking kprobes on the module */
static int kprobes_module_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct module *mod = data;
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
int checkcore = (val == MODULE_STATE_GOING);
guard(mutex)(&kprobe_mutex);
if (val == MODULE_STATE_COMING)
add_module_kprobe_blacklist(mod);
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
return NOTIFY_DONE;
/*
* When 'MODULE_STATE_GOING' was notified, both of module '.text' and
* '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was
* notified, only '.init.text' section would be freed. We need to
* disable kprobes which have been inserted in the sections.
*/
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry(p, head, hlist)
if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore &&
within_module_core((unsigned long)p->addr, mod))) {
/*
* The vaddr this probe is installed will soon
* be vfreed buy not synced to disk. Hence,
* disarming the breakpoint isn't needed.
*
* Note, this will also move any optimized probes
* that are pending to be removed from their
* corresponding lists to the 'freeing_list' and
* will not be touched by the delayed
* kprobe_optimizer() work handler.
*/
kill_kprobe(p);
}
}
if (val == MODULE_STATE_GOING)
remove_module_kprobe_blacklist(mod);
return NOTIFY_DONE;
}
static struct notifier_block kprobe_module_nb = {
.notifier_call = kprobes_module_callback,
.priority = 0
};
static int kprobe_register_module_notifier(void)
{
return register_module_notifier(&kprobe_module_nb);
}
#else
static int kprobe_register_module_notifier(void)
{
return 0;
}
#endif /* CONFIG_MODULES */
void kprobe_free_init_mem(void)
{
void *start = (void *)(&__init_begin);
void *end = (void *)(&__init_end);
struct hlist_head *head;
struct kprobe *p;
int i;
guard(mutex)(&kprobe_mutex);
/* Kill all kprobes on initmem because the target code has been freed. */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry(p, head, hlist) {
if (start <= (void *)p->addr && (void *)p->addr < end)
kill_kprobe(p);
}
}
}
static int __init init_kprobes(void)
{
int i, err;
/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
for (i = 0; i < KPROBE_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kprobe_table[i]);
err = populate_kprobe_blacklist(__start_kprobe_blacklist,
__stop_kprobe_blacklist);
if (err)
pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
if (kretprobe_blacklist_size) {
/* lookup the function address from its name */
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
kretprobe_blacklist[i].addr =
kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
if (!kretprobe_blacklist[i].addr)
pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
kretprobe_blacklist[i].name);
}
}
/* By default, kprobes are armed */
kprobes_all_disarmed = false;
#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
/* Init 'kprobe_optinsn_slots' for allocation */
kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
#endif
err = arch_init_kprobes();
if (!err)
err = register_die_notifier(&kprobe_exceptions_nb);
if (!err)
err = kprobe_register_module_notifier();
kprobes_initialized = (err == 0);
kprobe_sysctls_init();
return err;
}
early_initcall(init_kprobes);
#if defined(CONFIG_OPTPROBES)
static int __init init_optprobes(void)
{
/*
* Enable kprobe optimization - this kicks the optimizer which
* depends on synchronize_rcu_tasks() and ksoftirqd, that is
* not spawned in early initcall. So delay the optimization.
*/
optimize_all_kprobes();
return 0;
}
subsys_initcall(init_optprobes);
#endif
#ifdef CONFIG_DEBUG_FS
static void report_probe(struct seq_file *pi, struct kprobe *p,
const char *sym, int offset, char *modname, struct kprobe *pp)
{
char *kprobe_type;
void *addr = p->addr;
if (p->pre_handler == pre_handler_kretprobe)
kprobe_type = "r";
else
kprobe_type = "k";
if (!kallsyms_show_value(pi->file->f_cred))
addr = NULL;
if (sym)
seq_printf(pi, "%px %s %s+0x%x %s ",
addr, kprobe_type, sym, offset,
(modname ? modname : " "));
else /* try to use %pS */
seq_printf(pi, "%px %s %pS ",
addr, kprobe_type, p->addr);
if (!pp)
pp = p;
seq_printf(pi, "%s%s%s%s\n",
(kprobe_gone(p) ? "[GONE]" : ""),
((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
}
static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
{
return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
}
static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
(*pos)++;
if (*pos >= KPROBE_TABLE_SIZE)
return NULL;
return pos;
}
static void kprobe_seq_stop(struct seq_file *f, void *v)
{
/* Nothing to do */
}
static int show_kprobe_addr(struct seq_file *pi, void *v)
{
struct hlist_head *head;
struct kprobe *p, *kp;
const char *sym;
unsigned int i = *(loff_t *) v;
unsigned long offset = 0;
char *modname, namebuf[KSYM_NAME_LEN];
head = &kprobe_table[i];
preempt_disable();
hlist_for_each_entry_rcu(p, head, hlist) {
sym = kallsyms_lookup((unsigned long)p->addr, NULL,
&offset, &modname, namebuf);
if (kprobe_aggrprobe(p)) {
list_for_each_entry_rcu(kp, &p->list, list)
report_probe(pi, kp, sym, offset, modname, p);
} else
report_probe(pi, p, sym, offset, modname, NULL);
}
preempt_enable();
return 0;
}
static const struct seq_operations kprobes_sops = {
.start = kprobe_seq_start,
.next = kprobe_seq_next,
.stop = kprobe_seq_stop,
.show = show_kprobe_addr
};
DEFINE_SEQ_ATTRIBUTE(kprobes);
/* kprobes/blacklist -- shows which functions can not be probed */
static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&kprobe_mutex);
return seq_list_start(&kprobe_blacklist, *pos);
}
static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
return seq_list_next(v, &kprobe_blacklist, pos);
}
static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
{
struct kprobe_blacklist_entry *ent =
list_entry(v, struct kprobe_blacklist_entry, list);
/*
* If '/proc/kallsyms' is not showing kernel address, we won't
* show them here either.
*/
if (!kallsyms_show_value(m->file->f_cred))
seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
(void *)ent->start_addr);
else
seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
(void *)ent->end_addr, (void *)ent->start_addr);
return 0;
}
static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
{
mutex_unlock(&kprobe_mutex);
}
static const struct seq_operations kprobe_blacklist_sops = {
.start = kprobe_blacklist_seq_start,
.next = kprobe_blacklist_seq_next,
.stop = kprobe_blacklist_seq_stop,
.show = kprobe_blacklist_seq_show,
};
DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
static int arm_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i, total = 0, errors = 0;
int err, ret = 0;
guard(mutex)(&kprobe_mutex);
/* If kprobes are armed, just return */
if (!kprobes_all_disarmed)
return 0;
/*
* optimize_kprobe() called by arm_kprobe() checks
* kprobes_all_disarmed, so set kprobes_all_disarmed before
* arm_kprobe.
*/
kprobes_all_disarmed = false;
/* Arming kprobes doesn't optimize kprobe itself */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
/* Arm all kprobes on a best-effort basis */
hlist_for_each_entry(p, head, hlist) {
if (!kprobe_disabled(p)) {
err = arm_kprobe(p);
if (err) {
errors++;
ret = err;
}
total++;
}
}
}
if (errors)
pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
errors, total);
else
pr_info("Kprobes globally enabled\n");
return ret;
}
static int disarm_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i, total = 0, errors = 0;
int err, ret = 0;
guard(mutex)(&kprobe_mutex);
/* If kprobes are already disarmed, just return */
if (kprobes_all_disarmed)
return 0;
kprobes_all_disarmed = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
/* Disarm all kprobes on a best-effort basis */
hlist_for_each_entry(p, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
err = disarm_kprobe(p, false);
if (err) {
errors++;
ret = err;
}
total++;
}
}
}
if (errors)
pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
errors, total);
else
pr_info("Kprobes globally disabled\n");
/* Wait for disarming all kprobes by optimizer */
wait_for_kprobe_optimizer_locked();
return ret;
}
/*
* XXX: The debugfs bool file interface doesn't allow for callbacks
* when the bool state is switched. We can reuse that facility when
* available
*/
static ssize_t read_enabled_file_bool(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[3];
if (!kprobes_all_disarmed)
buf[0] = '1';
else
buf[0] = '0';
buf[1] = '\n';
buf[2] = 0x00;
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t write_enabled_file_bool(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
bool enable;
int ret;
ret = kstrtobool_from_user(user_buf, count, &enable);
if (ret)
return ret;
ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
if (ret)
return ret;
return count;
}
static const struct file_operations fops_kp = {
.read = read_enabled_file_bool,
.write = write_enabled_file_bool,
.llseek = default_llseek,
};
static int __init debugfs_kprobe_init(void)
{
struct dentry *dir;
dir = debugfs_create_dir("kprobes", NULL);
debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
debugfs_create_file("blacklist", 0400, dir, NULL,
&kprobe_blacklist_fops);
return 0;
}
late_initcall(debugfs_kprobe_init);
#endif /* CONFIG_DEBUG_FS */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Security plug functions
*
* Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
* Copyright (C) 2016 Mellanox Technologies
* Copyright (C) 2023 Microsoft Corporation <paul@paul-moore.com>
*/
#define pr_fmt(fmt) "LSM: " fmt
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_read_file.h>
#include <linux/lsm_hooks.h>
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/xattr.h>
#include <linux/msg.h>
#include <linux/overflow.h>
#include <linux/perf_event.h>
#include <linux/fs.h>
#include <net/flow.h>
#include <net/sock.h>
#define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX
/*
* Identifier for the LSM static calls.
* HOOK is an LSM hook as defined in linux/lsm_hookdefs.h
* IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT
*/
#define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX
/*
* Call the macro M for each LSM hook MAX_LSM_COUNT times.
*/
#define LSM_LOOP_UNROLL(M, ...) \
do { \
UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \
} while (0)
#define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__)
/*
* These are descriptions of the reasons that can be passed to the
* security_locked_down() LSM hook. Placing this array here allows
* all security modules to use the same descriptions for auditing
* purposes.
*/
const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = {
[LOCKDOWN_NONE] = "none",
[LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
[LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
[LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
[LOCKDOWN_KEXEC] = "kexec of unsigned images",
[LOCKDOWN_HIBERNATION] = "hibernation",
[LOCKDOWN_PCI_ACCESS] = "direct PCI access",
[LOCKDOWN_IOPORT] = "raw io port access",
[LOCKDOWN_MSR] = "raw MSR access",
[LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
[LOCKDOWN_DEVICE_TREE] = "modifying device tree contents",
[LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
[LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
[LOCKDOWN_DEBUGFS] = "debugfs access",
[LOCKDOWN_XMON_WR] = "xmon write access",
[LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
[LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
[LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection",
[LOCKDOWN_INTEGRITY_MAX] = "integrity",
[LOCKDOWN_KCORE] = "/proc/kcore access",
[LOCKDOWN_KPROBES] = "use of kprobes",
[LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
[LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
[LOCKDOWN_PERF] = "unsafe use of perf",
[LOCKDOWN_TRACEFS] = "use of tracefs",
[LOCKDOWN_XMON_RW] = "xmon read and write access",
[LOCKDOWN_XFRM_SECRET] = "xfrm SA secret",
[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
};
static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
static struct kmem_cache *lsm_file_cache;
static struct kmem_cache *lsm_inode_cache;
char *lsm_names;
static struct lsm_blob_sizes blob_sizes __ro_after_init;
/* Boot-time LSM user choice */
static __initdata const char *chosen_lsm_order;
static __initdata const char *chosen_major_lsm;
static __initconst const char *const builtin_lsm_order = CONFIG_LSM;
/* Ordered list of LSMs to initialize. */
static __initdata struct lsm_info *ordered_lsms[MAX_LSM_COUNT + 1];
static __initdata struct lsm_info *exclusive;
#ifdef CONFIG_HAVE_STATIC_CALL
#define LSM_HOOK_TRAMP(NAME, NUM) \
&STATIC_CALL_TRAMP(LSM_STATIC_CALL(NAME, NUM))
#else
#define LSM_HOOK_TRAMP(NAME, NUM) NULL
#endif
/*
* Define static calls and static keys for each LSM hook.
*/
#define DEFINE_LSM_STATIC_CALL(NUM, NAME, RET, ...) \
DEFINE_STATIC_CALL_NULL(LSM_STATIC_CALL(NAME, NUM), \
*((RET(*)(__VA_ARGS__))NULL)); \
DEFINE_STATIC_KEY_FALSE(SECURITY_HOOK_ACTIVE_KEY(NAME, NUM));
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
LSM_DEFINE_UNROLL(DEFINE_LSM_STATIC_CALL, NAME, RET, __VA_ARGS__)
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
#undef DEFINE_LSM_STATIC_CALL
/*
* Initialise a table of static calls for each LSM hook.
* DEFINE_STATIC_CALL_NULL invocation above generates a key (STATIC_CALL_KEY)
* and a trampoline (STATIC_CALL_TRAMP) which are used to call
* __static_call_update when updating the static call.
*
* The static calls table is used by early LSMs, some architectures can fault on
* unaligned accesses and the fault handling code may not be ready by then.
* Thus, the static calls table should be aligned to avoid any unhandled faults
* in early init.
*/
struct lsm_static_calls_table
static_calls_table __ro_after_init __aligned(sizeof(u64)) = {
#define INIT_LSM_STATIC_CALL(NUM, NAME) \
(struct lsm_static_call) { \
.key = &STATIC_CALL_KEY(LSM_STATIC_CALL(NAME, NUM)), \
.trampoline = LSM_HOOK_TRAMP(NAME, NUM), \
.active = &SECURITY_HOOK_ACTIVE_KEY(NAME, NUM), \
},
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
.NAME = { \
LSM_DEFINE_UNROLL(INIT_LSM_STATIC_CALL, NAME) \
},
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
#undef INIT_LSM_STATIC_CALL
};
static __initdata bool debug;
#define init_debug(...) \
do { \
if (debug) \
pr_info(__VA_ARGS__); \
} while (0)
static bool __init is_enabled(struct lsm_info *lsm)
{
if (!lsm->enabled)
return false;
return *lsm->enabled;
}
/* Mark an LSM's enabled flag. */
static int lsm_enabled_true __initdata = 1;
static int lsm_enabled_false __initdata = 0;
static void __init set_enabled(struct lsm_info *lsm, bool enabled)
{
/*
* When an LSM hasn't configured an enable variable, we can use
* a hard-coded location for storing the default enabled state.
*/
if (!lsm->enabled) {
if (enabled)
lsm->enabled = &lsm_enabled_true;
else
lsm->enabled = &lsm_enabled_false;
} else if (lsm->enabled == &lsm_enabled_true) {
if (!enabled)
lsm->enabled = &lsm_enabled_false;
} else if (lsm->enabled == &lsm_enabled_false) {
if (enabled)
lsm->enabled = &lsm_enabled_true;
} else {
*lsm->enabled = enabled;
}
}
/* Is an LSM already listed in the ordered LSMs list? */
static bool __init exists_ordered_lsm(struct lsm_info *lsm)
{
struct lsm_info **check;
for (check = ordered_lsms; *check; check++)
if (*check == lsm)
return true;
return false;
}
/* Append an LSM to the list of ordered LSMs to initialize. */
static int last_lsm __initdata;
static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
{
/* Ignore duplicate selections. */
if (exists_ordered_lsm(lsm))
return;
if (WARN(last_lsm == MAX_LSM_COUNT, "%s: out of LSM static calls!?\n", from))
return;
/* Enable this LSM, if it is not already set. */
if (!lsm->enabled)
lsm->enabled = &lsm_enabled_true;
ordered_lsms[last_lsm++] = lsm;
init_debug("%s ordered: %s (%s)\n", from, lsm->name,
is_enabled(lsm) ? "enabled" : "disabled");
}
/* Is an LSM allowed to be initialized? */
static bool __init lsm_allowed(struct lsm_info *lsm)
{
/* Skip if the LSM is disabled. */
if (!is_enabled(lsm))
return false;
/* Not allowed if another exclusive LSM already initialized. */
if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
init_debug("exclusive disabled: %s\n", lsm->name);
return false;
}
return true;
}
static void __init lsm_set_blob_size(int *need, int *lbs)
{
int offset;
if (*need <= 0)
return;
offset = ALIGN(*lbs, sizeof(void *));
*lbs = offset + *need;
*need = offset;
}
static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
{
if (!needed)
return;
lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
lsm_set_blob_size(&needed->lbs_ib, &blob_sizes.lbs_ib);
/*
* The inode blob gets an rcu_head in addition to
* what the modules might need.
*/
if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
blob_sizes.lbs_inode = sizeof(struct rcu_head);
lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
lsm_set_blob_size(&needed->lbs_key, &blob_sizes.lbs_key);
lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
lsm_set_blob_size(&needed->lbs_perf_event, &blob_sizes.lbs_perf_event);
lsm_set_blob_size(&needed->lbs_sock, &blob_sizes.lbs_sock);
lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
lsm_set_blob_size(&needed->lbs_tun_dev, &blob_sizes.lbs_tun_dev);
lsm_set_blob_size(&needed->lbs_xattr_count,
&blob_sizes.lbs_xattr_count);
lsm_set_blob_size(&needed->lbs_bdev, &blob_sizes.lbs_bdev);
lsm_set_blob_size(&needed->lbs_bpf_map, &blob_sizes.lbs_bpf_map);
lsm_set_blob_size(&needed->lbs_bpf_prog, &blob_sizes.lbs_bpf_prog);
lsm_set_blob_size(&needed->lbs_bpf_token, &blob_sizes.lbs_bpf_token);
}
/* Prepare LSM for initialization. */
static void __init prepare_lsm(struct lsm_info *lsm)
{
int enabled = lsm_allowed(lsm);
/* Record enablement (to handle any following exclusive LSMs). */
set_enabled(lsm, enabled);
/* If enabled, do pre-initialization work. */
if (enabled) {
if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
exclusive = lsm;
init_debug("exclusive chosen: %s\n", lsm->name);
}
lsm_set_blob_sizes(lsm->blobs);
}
}
/* Initialize a given LSM, if it is enabled. */
static void __init initialize_lsm(struct lsm_info *lsm)
{
if (is_enabled(lsm)) {
int ret;
init_debug("initializing %s\n", lsm->name);
ret = lsm->init();
WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
}
}
/*
* Current index to use while initializing the lsm id list.
*/
u32 lsm_active_cnt __ro_after_init;
const struct lsm_id *lsm_idlist[MAX_LSM_COUNT];
/* Populate ordered LSMs list from comma-separated LSM name list. */
static void __init ordered_lsm_parse(const char *order, const char *origin)
{
struct lsm_info *lsm;
char *sep, *name, *next;
/* LSM_ORDER_FIRST is always first. */
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (lsm->order == LSM_ORDER_FIRST)
append_ordered_lsm(lsm, " first");
}
/* Process "security=", if given. */
if (chosen_major_lsm) {
struct lsm_info *major;
/*
* To match the original "security=" behavior, this
* explicitly does NOT fallback to another Legacy Major
* if the selected one was separately disabled: disable
* all non-matching Legacy Major LSMs.
*/
for (major = __start_lsm_info; major < __end_lsm_info;
major++) {
if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
strcmp(major->name, chosen_major_lsm) != 0) {
set_enabled(major, false);
init_debug("security=%s disabled: %s (only one legacy major LSM)\n",
chosen_major_lsm, major->name);
}
}
}
sep = kstrdup(order, GFP_KERNEL);
next = sep;
/* Walk the list, looking for matching LSMs. */
while ((name = strsep(&next, ",")) != NULL) {
bool found = false;
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (strcmp(lsm->name, name) == 0) {
if (lsm->order == LSM_ORDER_MUTABLE)
append_ordered_lsm(lsm, origin);
found = true;
}
}
if (!found)
init_debug("%s ignored: %s (not built into kernel)\n",
origin, name);
}
/* Process "security=", if given. */
if (chosen_major_lsm) {
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (exists_ordered_lsm(lsm))
continue;
if (strcmp(lsm->name, chosen_major_lsm) == 0)
append_ordered_lsm(lsm, "security=");
}
}
/* LSM_ORDER_LAST is always last. */
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (lsm->order == LSM_ORDER_LAST)
append_ordered_lsm(lsm, " last");
}
/* Disable all LSMs not in the ordered list. */
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (exists_ordered_lsm(lsm))
continue;
set_enabled(lsm, false);
init_debug("%s skipped: %s (not in requested order)\n",
origin, lsm->name);
}
kfree(sep);
}
static void __init lsm_static_call_init(struct security_hook_list *hl)
{
struct lsm_static_call *scall = hl->scalls;
int i;
for (i = 0; i < MAX_LSM_COUNT; i++) {
/* Update the first static call that is not used yet */
if (!scall->hl) {
__static_call_update(scall->key, scall->trampoline,
hl->hook.lsm_func_addr);
scall->hl = hl;
static_branch_enable(scall->active);
return;
}
scall++;
}
panic("%s - Ran out of static slots.\n", __func__);
}
static void __init lsm_early_cred(struct cred *cred);
static void __init lsm_early_task(struct task_struct *task);
static int lsm_append(const char *new, char **result);
static void __init report_lsm_order(void)
{
struct lsm_info **lsm, *early;
int first = 0;
pr_info("initializing lsm=");
/* Report each enabled LSM name, comma separated. */
for (early = __start_early_lsm_info;
early < __end_early_lsm_info; early++)
if (is_enabled(early))
pr_cont("%s%s", first++ == 0 ? "" : ",", early->name);
for (lsm = ordered_lsms; *lsm; lsm++)
if (is_enabled(*lsm))
pr_cont("%s%s", first++ == 0 ? "" : ",", (*lsm)->name);
pr_cont("\n");
}
static void __init ordered_lsm_init(void)
{
struct lsm_info **lsm;
if (chosen_lsm_order) {
if (chosen_major_lsm) {
pr_warn("security=%s is ignored because it is superseded by lsm=%s\n",
chosen_major_lsm, chosen_lsm_order);
chosen_major_lsm = NULL;
}
ordered_lsm_parse(chosen_lsm_order, "cmdline");
} else
ordered_lsm_parse(builtin_lsm_order, "builtin");
for (lsm = ordered_lsms; *lsm; lsm++)
prepare_lsm(*lsm);
report_lsm_order();
init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
init_debug("file blob size = %d\n", blob_sizes.lbs_file);
init_debug("ib blob size = %d\n", blob_sizes.lbs_ib);
init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
#ifdef CONFIG_KEYS
init_debug("key blob size = %d\n", blob_sizes.lbs_key);
#endif /* CONFIG_KEYS */
init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
init_debug("sock blob size = %d\n", blob_sizes.lbs_sock);
init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
init_debug("perf event blob size = %d\n", blob_sizes.lbs_perf_event);
init_debug("task blob size = %d\n", blob_sizes.lbs_task);
init_debug("tun device blob size = %d\n", blob_sizes.lbs_tun_dev);
init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count);
init_debug("bdev blob size = %d\n", blob_sizes.lbs_bdev);
init_debug("bpf map blob size = %d\n", blob_sizes.lbs_bpf_map);
init_debug("bpf prog blob size = %d\n", blob_sizes.lbs_bpf_prog);
init_debug("bpf token blob size = %d\n", blob_sizes.lbs_bpf_token);
/*
* Create any kmem_caches needed for blobs
*/
if (blob_sizes.lbs_file)
lsm_file_cache = kmem_cache_create("lsm_file_cache",
blob_sizes.lbs_file, 0,
SLAB_PANIC, NULL);
if (blob_sizes.lbs_inode)
lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
blob_sizes.lbs_inode, 0,
SLAB_PANIC, NULL);
lsm_early_cred((struct cred *) current->cred);
lsm_early_task(current);
for (lsm = ordered_lsms; *lsm; lsm++)
initialize_lsm(*lsm);
}
int __init early_security_init(void)
{
struct lsm_info *lsm;
for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
if (!lsm->enabled)
lsm->enabled = &lsm_enabled_true;
prepare_lsm(lsm);
initialize_lsm(lsm);
}
return 0;
}
/**
* security_init - initializes the security framework
*
* This should be called early in the kernel initialization sequence.
*/
int __init security_init(void)
{
struct lsm_info *lsm;
init_debug("legacy security=%s\n", chosen_major_lsm ? : " *unspecified*");
init_debug(" CONFIG_LSM=%s\n", builtin_lsm_order);
init_debug("boot arg lsm=%s\n", chosen_lsm_order ? : " *unspecified*");
/*
* Append the names of the early LSM modules now that kmalloc() is
* available
*/
for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
init_debug(" early started: %s (%s)\n", lsm->name,
is_enabled(lsm) ? "enabled" : "disabled");
if (lsm->enabled)
lsm_append(lsm->name, &lsm_names);
}
/* Load LSMs in specified order. */
ordered_lsm_init();
return 0;
}
/* Save user chosen LSM */
static int __init choose_major_lsm(char *str)
{
chosen_major_lsm = str;
return 1;
}
__setup("security=", choose_major_lsm);
/* Explicitly choose LSM initialization order. */
static int __init choose_lsm_order(char *str)
{
chosen_lsm_order = str;
return 1;
}
__setup("lsm=", choose_lsm_order);
/* Enable LSM order debugging. */
static int __init enable_debug(char *str)
{
debug = true;
return 1;
}
__setup("lsm.debug", enable_debug);
static bool match_last_lsm(const char *list, const char *lsm)
{
const char *last;
if (WARN_ON(!list || !lsm))
return false;
last = strrchr(list, ',');
if (last)
/* Pass the comma, strcmp() will check for '\0' */
last++;
else
last = list;
return !strcmp(last, lsm);
}
static int lsm_append(const char *new, char **result)
{
char *cp;
if (*result == NULL) {
*result = kstrdup(new, GFP_KERNEL);
if (*result == NULL)
return -ENOMEM;
} else {
/* Check if it is the last registered name */
if (match_last_lsm(*result, new))
return 0;
cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
if (cp == NULL)
return -ENOMEM;
kfree(*result);
*result = cp;
}
return 0;
}
/**
* security_add_hooks - Add a modules hooks to the hook lists.
* @hooks: the hooks to add
* @count: the number of hooks to add
* @lsmid: the identification information for the security module
*
* Each LSM has to register its hooks with the infrastructure.
*/
void __init security_add_hooks(struct security_hook_list *hooks, int count,
const struct lsm_id *lsmid)
{
int i;
/*
* A security module may call security_add_hooks() more
* than once during initialization, and LSM initialization
* is serialized. Landlock is one such case.
* Look at the previous entry, if there is one, for duplication.
*/
if (lsm_active_cnt == 0 || lsm_idlist[lsm_active_cnt - 1] != lsmid) {
if (lsm_active_cnt >= MAX_LSM_COUNT)
panic("%s Too many LSMs registered.\n", __func__);
lsm_idlist[lsm_active_cnt++] = lsmid;
}
for (i = 0; i < count; i++) {
hooks[i].lsmid = lsmid;
lsm_static_call_init(&hooks[i]);
}
/*
* Don't try to append during early_security_init(), we'll come back
* and fix this up afterwards.
*/
if (slab_is_available()) {
if (lsm_append(lsmid->name, &lsm_names) < 0)
panic("%s - Cannot get early memory.\n", __func__);
}
}
int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
event, data);
}
EXPORT_SYMBOL(call_blocking_lsm_notifier);
int register_blocking_lsm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
nb);
}
EXPORT_SYMBOL(register_blocking_lsm_notifier);
int unregister_blocking_lsm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
nb);
}
EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
/**
* lsm_blob_alloc - allocate a composite blob
* @dest: the destination for the blob
* @size: the size of the blob
* @gfp: allocation type
*
* Allocate a blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_blob_alloc(void **dest, size_t size, gfp_t gfp)
{
if (size == 0) {
*dest = NULL;
return 0;
}
*dest = kzalloc(size, gfp);
if (*dest == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_cred_alloc - allocate a composite cred blob
* @cred: the cred that needs a blob
* @gfp: allocation type
*
* Allocate the cred blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
{
return lsm_blob_alloc(&cred->security, blob_sizes.lbs_cred, gfp);
}
/**
* lsm_early_cred - during initialization allocate a composite cred blob
* @cred: the cred that needs a blob
*
* Allocate the cred blob for all the modules
*/
static void __init lsm_early_cred(struct cred *cred)
{
int rc = lsm_cred_alloc(cred, GFP_KERNEL);
if (rc)
panic("%s: Early cred alloc failed.\n", __func__);
}
/**
* lsm_file_alloc - allocate a composite file blob
* @file: the file that needs a blob
*
* Allocate the file blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_file_alloc(struct file *file)
{
if (!lsm_file_cache) {
file->f_security = NULL;
return 0;
}
file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
if (file->f_security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_inode_alloc - allocate a composite inode blob
* @inode: the inode that needs a blob
* @gfp: allocation flags
*
* Allocate the inode blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_inode_alloc(struct inode *inode, gfp_t gfp)
{
if (!lsm_inode_cache) {
inode->i_security = NULL;
return 0;
}
inode->i_security = kmem_cache_zalloc(lsm_inode_cache, gfp);
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_task_alloc - allocate a composite task blob
* @task: the task that needs a blob
*
* Allocate the task blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_task_alloc(struct task_struct *task)
{
return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL);
}
/**
* lsm_ipc_alloc - allocate a composite ipc blob
* @kip: the ipc that needs a blob
*
* Allocate the ipc blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
{
return lsm_blob_alloc(&kip->security, blob_sizes.lbs_ipc, GFP_KERNEL);
}
#ifdef CONFIG_KEYS
/**
* lsm_key_alloc - allocate a composite key blob
* @key: the key that needs a blob
*
* Allocate the key blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_key_alloc(struct key *key)
{
return lsm_blob_alloc(&key->security, blob_sizes.lbs_key, GFP_KERNEL);
}
#endif /* CONFIG_KEYS */
/**
* lsm_msg_msg_alloc - allocate a composite msg_msg blob
* @mp: the msg_msg that needs a blob
*
* Allocate the ipc blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_msg_msg_alloc(struct msg_msg *mp)
{
return lsm_blob_alloc(&mp->security, blob_sizes.lbs_msg_msg,
GFP_KERNEL);
}
/**
* lsm_bdev_alloc - allocate a composite block_device blob
* @bdev: the block_device that needs a blob
*
* Allocate the block_device blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_bdev_alloc(struct block_device *bdev)
{
return lsm_blob_alloc(&bdev->bd_security, blob_sizes.lbs_bdev,
GFP_KERNEL);
}
#ifdef CONFIG_BPF_SYSCALL
/**
* lsm_bpf_map_alloc - allocate a composite bpf_map blob
* @map: the bpf_map that needs a blob
*
* Allocate the bpf_map blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_bpf_map_alloc(struct bpf_map *map)
{
return lsm_blob_alloc(&map->security, blob_sizes.lbs_bpf_map, GFP_KERNEL);
}
/**
* lsm_bpf_prog_alloc - allocate a composite bpf_prog blob
* @prog: the bpf_prog that needs a blob
*
* Allocate the bpf_prog blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_bpf_prog_alloc(struct bpf_prog *prog)
{
return lsm_blob_alloc(&prog->aux->security, blob_sizes.lbs_bpf_prog, GFP_KERNEL);
}
/**
* lsm_bpf_token_alloc - allocate a composite bpf_token blob
* @token: the bpf_token that needs a blob
*
* Allocate the bpf_token blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_bpf_token_alloc(struct bpf_token *token)
{
return lsm_blob_alloc(&token->security, blob_sizes.lbs_bpf_token, GFP_KERNEL);
}
#endif /* CONFIG_BPF_SYSCALL */
/**
* lsm_early_task - during initialization allocate a composite task blob
* @task: the task that needs a blob
*
* Allocate the task blob for all the modules
*/
static void __init lsm_early_task(struct task_struct *task)
{
int rc = lsm_task_alloc(task);
if (rc)
panic("%s: Early task alloc failed.\n", __func__);
}
/**
* lsm_superblock_alloc - allocate a composite superblock blob
* @sb: the superblock that needs a blob
*
* Allocate the superblock blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_superblock_alloc(struct super_block *sb)
{
return lsm_blob_alloc(&sb->s_security, blob_sizes.lbs_superblock,
GFP_KERNEL);
}
/**
* lsm_fill_user_ctx - Fill a user space lsm_ctx structure
* @uctx: a userspace LSM context to be filled
* @uctx_len: available uctx size (input), used uctx size (output)
* @val: the new LSM context value
* @val_len: the size of the new LSM context value
* @id: LSM id
* @flags: LSM defined flags
*
* Fill all of the fields in a userspace lsm_ctx structure. If @uctx is NULL
* simply calculate the required size to output via @utc_len and return
* success.
*
* Returns 0 on success, -E2BIG if userspace buffer is not large enough,
* -EFAULT on a copyout error, -ENOMEM if memory can't be allocated.
*/
int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len,
void *val, size_t val_len,
u64 id, u64 flags)
{
struct lsm_ctx *nctx = NULL;
size_t nctx_len;
int rc = 0;
nctx_len = ALIGN(struct_size(nctx, ctx, val_len), sizeof(void *));
if (nctx_len > *uctx_len) {
rc = -E2BIG;
goto out;
}
/* no buffer - return success/0 and set @uctx_len to the req size */
if (!uctx)
goto out;
nctx = kzalloc(nctx_len, GFP_KERNEL);
if (nctx == NULL) {
rc = -ENOMEM;
goto out;
}
nctx->id = id;
nctx->flags = flags;
nctx->len = nctx_len;
nctx->ctx_len = val_len;
memcpy(nctx->ctx, val, val_len);
if (copy_to_user(uctx, nctx, nctx_len))
rc = -EFAULT;
out:
kfree(nctx);
*uctx_len = nctx_len;
return rc;
}
/*
* The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
* can be accessed with:
*
* LSM_RET_DEFAULT(<hook_name>)
*
* The macros below define static constants for the default value of each
* LSM hook.
*/
#define LSM_RET_DEFAULT(NAME) (NAME##_default)
#define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME)
#define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \
static const int __maybe_unused LSM_RET_DEFAULT(NAME) = (DEFAULT);
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME)
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
/*
* Hook list operation macros.
*
* call_void_hook:
* This is a hook that does not return a value.
*
* call_int_hook:
* This is a hook that returns a value.
*/
#define __CALL_STATIC_VOID(NUM, HOOK, ...) \
do { \
if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \
static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \
} \
} while (0);
#define call_void_hook(HOOK, ...) \
do { \
LSM_LOOP_UNROLL(__CALL_STATIC_VOID, HOOK, __VA_ARGS__); \
} while (0)
#define __CALL_STATIC_INT(NUM, R, HOOK, LABEL, ...) \
do { \
if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \
R = static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \
if (R != LSM_RET_DEFAULT(HOOK)) \
goto LABEL; \
} \
} while (0);
#define call_int_hook(HOOK, ...) \
({ \
__label__ OUT; \
int RC = LSM_RET_DEFAULT(HOOK); \
\
LSM_LOOP_UNROLL(__CALL_STATIC_INT, RC, HOOK, OUT, __VA_ARGS__); \
OUT: \
RC; \
})
#define lsm_for_each_hook(scall, NAME) \
for (scall = static_calls_table.NAME; \
scall - static_calls_table.NAME < MAX_LSM_COUNT; scall++) \
if (static_key_enabled(&scall->active->key))
/* Security operations */
/**
* security_binder_set_context_mgr() - Check if becoming binder ctx mgr is ok
* @mgr: task credentials of current binder process
*
* Check whether @mgr is allowed to be the binder context manager.
*
* Return: Return 0 if permission is granted.
*/
int security_binder_set_context_mgr(const struct cred *mgr)
{
return call_int_hook(binder_set_context_mgr, mgr);
}
/**
* security_binder_transaction() - Check if a binder transaction is allowed
* @from: sending process
* @to: receiving process
*
* Check whether @from is allowed to invoke a binder transaction call to @to.
*
* Return: Returns 0 if permission is granted.
*/
int security_binder_transaction(const struct cred *from,
const struct cred *to)
{
return call_int_hook(binder_transaction, from, to);
}
/**
* security_binder_transfer_binder() - Check if a binder transfer is allowed
* @from: sending process
* @to: receiving process
*
* Check whether @from is allowed to transfer a binder reference to @to.
*
* Return: Returns 0 if permission is granted.
*/
int security_binder_transfer_binder(const struct cred *from,
const struct cred *to)
{
return call_int_hook(binder_transfer_binder, from, to);
}
/**
* security_binder_transfer_file() - Check if a binder file xfer is allowed
* @from: sending process
* @to: receiving process
* @file: file being transferred
*
* Check whether @from is allowed to transfer @file to @to.
*
* Return: Returns 0 if permission is granted.
*/
int security_binder_transfer_file(const struct cred *from,
const struct cred *to, const struct file *file)
{
return call_int_hook(binder_transfer_file, from, to, file);
}
/**
* security_ptrace_access_check() - Check if tracing is allowed
* @child: target process
* @mode: PTRACE_MODE flags
*
* Check permission before allowing the current process to trace the @child
* process. Security modules may also want to perform a process tracing check
* during an execve in the set_security or apply_creds hooks of tracing check
* during an execve in the bprm_set_creds hook of binprm_security_ops if the
* process is being traced and its security attributes would be changed by the
* execve.
*
* Return: Returns 0 if permission is granted.
*/
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
return call_int_hook(ptrace_access_check, child, mode);
}
/**
* security_ptrace_traceme() - Check if tracing is allowed
* @parent: tracing process
*
* Check that the @parent process has sufficient permission to trace the
* current process before allowing the current process to present itself to the
* @parent process for tracing.
*
* Return: Returns 0 if permission is granted.
*/
int security_ptrace_traceme(struct task_struct *parent)
{
return call_int_hook(ptrace_traceme, parent);
}
/**
* security_capget() - Get the capability sets for a process
* @target: target process
* @effective: effective capability set
* @inheritable: inheritable capability set
* @permitted: permitted capability set
*
* Get the @effective, @inheritable, and @permitted capability sets for the
* @target process. The hook may also perform permission checking to determine
* if the current process is allowed to see the capability sets of the @target
* process.
*
* Return: Returns 0 if the capability sets were successfully obtained.
*/
int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
return call_int_hook(capget, target, effective, inheritable, permitted);
}
/**
* security_capset() - Set the capability sets for a process
* @new: new credentials for the target process
* @old: current credentials of the target process
* @effective: effective capability set
* @inheritable: inheritable capability set
* @permitted: permitted capability set
*
* Set the @effective, @inheritable, and @permitted capability sets for the
* current process.
*
* Return: Returns 0 and update @new if permission is granted.
*/
int security_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{ return call_int_hook(capset, new, old, effective, inheritable,
permitted);
}
/**
* security_capable() - Check if a process has the necessary capability
* @cred: credentials to examine
* @ns: user namespace
* @cap: capability requested
* @opts: capability check options
*
* Check whether the @tsk process has the @cap capability in the indicated
* credentials. @cap contains the capability <include/linux/capability.h>.
* @opts contains options for the capable check <include/linux/security.h>.
*
* Return: Returns 0 if the capability is granted.
*/
int security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts)
{ return call_int_hook(capable, cred, ns, cap, opts);}
/**
* security_quotactl() - Check if a quotactl() syscall is allowed for this fs
* @cmds: commands
* @type: type
* @id: id
* @sb: filesystem
*
* Check whether the quotactl syscall is allowed for this @sb.
*
* Return: Returns 0 if permission is granted.
*/
int security_quotactl(int cmds, int type, int id, const struct super_block *sb)
{
return call_int_hook(quotactl, cmds, type, id, sb);
}
/**
* security_quota_on() - Check if QUOTAON is allowed for a dentry
* @dentry: dentry
*
* Check whether QUOTAON is allowed for @dentry.
*
* Return: Returns 0 if permission is granted.
*/
int security_quota_on(struct dentry *dentry)
{
return call_int_hook(quota_on, dentry);
}
/**
* security_syslog() - Check if accessing the kernel message ring is allowed
* @type: SYSLOG_ACTION_* type
*
* Check permission before accessing the kernel message ring or changing
* logging to the console. See the syslog(2) manual page for an explanation of
* the @type values.
*
* Return: Return 0 if permission is granted.
*/
int security_syslog(int type)
{
return call_int_hook(syslog, type);
}
/**
* security_settime64() - Check if changing the system time is allowed
* @ts: new time
* @tz: timezone
*
* Check permission to change the system time, struct timespec64 is defined in
* <include/linux/time64.h> and timezone is defined in <include/linux/time.h>.
*
* Return: Returns 0 if permission is granted.
*/
int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
{
return call_int_hook(settime, ts, tz);
}
/**
* security_vm_enough_memory_mm() - Check if allocating a new mem map is allowed
* @mm: mm struct
* @pages: number of pages
*
* Check permissions for allocating a new virtual mapping. If all LSMs return
* a positive value, __vm_enough_memory() will be called with cap_sys_admin
* set. If at least one LSM returns 0 or negative, __vm_enough_memory() will be
* called with cap_sys_admin cleared.
*
* Return: Returns 0 if permission is granted by the LSM infrastructure to the
* caller.
*/
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
struct lsm_static_call *scall;
int cap_sys_admin = 1;
int rc;
/*
* The module will respond with 0 if it thinks the __vm_enough_memory()
* call should be made with the cap_sys_admin set. If all of the modules
* agree that it should be set it will. If any module thinks it should
* not be set it won't.
*/
lsm_for_each_hook(scall, vm_enough_memory) {
rc = scall->hl->hook.vm_enough_memory(mm, pages);
if (rc < 0) {
cap_sys_admin = 0;
break;
}
}
return __vm_enough_memory(mm, pages, cap_sys_admin);}
/**
* security_bprm_creds_for_exec() - Prepare the credentials for exec()
* @bprm: binary program information
*
* If the setup in prepare_exec_creds did not setup @bprm->cred->security
* properly for executing @bprm->file, update the LSM's portion of
* @bprm->cred->security to be what commit_creds needs to install for the new
* program. This hook may also optionally check permissions (e.g. for
* transitions between security domains). The hook must set @bprm->secureexec
* to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm
* contains the linux_binprm structure.
*
* If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is
* set. The result must be the same as without this flag even if the execution
* will never really happen and @bprm will always be dropped.
*
* This hook must not change current->cred, only @bprm->cred.
*
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_creds_for_exec(struct linux_binprm *bprm)
{
return call_int_hook(bprm_creds_for_exec, bprm);
}
/**
* security_bprm_creds_from_file() - Update linux_binprm creds based on file
* @bprm: binary program information
* @file: associated file
*
* If @file is setpcap, suid, sgid or otherwise marked to change privilege upon
* exec, update @bprm->cred to reflect that change. This is called after
* finding the binary that will be executed without an interpreter. This
* ensures that the credentials will not be derived from a script that the
* binary will need to reopen, which when reopend may end up being a completely
* different file. This hook may also optionally check permissions (e.g. for
* transitions between security domains). The hook must set @bprm->secureexec
* to 1 if AT_SECURE should be set to request libc enable secure mode. The
* hook must add to @bprm->per_clear any personality flags that should be
* cleared from current->personality. @bprm contains the linux_binprm
* structure.
*
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file)
{
return call_int_hook(bprm_creds_from_file, bprm, file);
}
/**
* security_bprm_check() - Mediate binary handler search
* @bprm: binary program information
*
* This hook mediates the point when a search for a binary handler will begin.
* It allows a check against the @bprm->cred->security value which was set in
* the preceding creds_for_exec call. The argv list and envp list are reliably
* available in @bprm. This hook may be called multiple times during a single
* execve. @bprm contains the linux_binprm structure.
*
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_check(struct linux_binprm *bprm)
{
return call_int_hook(bprm_check_security, bprm);
}
/**
* security_bprm_committing_creds() - Install creds for a process during exec()
* @bprm: binary program information
*
* Prepare to install the new security attributes of a process being
* transformed by an execve operation, based on the old credentials pointed to
* by @current->cred and the information set in @bprm->cred by the
* bprm_creds_for_exec hook. @bprm points to the linux_binprm structure. This
* hook is a good place to perform state changes on the process such as closing
* open file descriptors to which access will no longer be granted when the
* attributes are changed. This is called immediately before commit_creds().
*/
void security_bprm_committing_creds(const struct linux_binprm *bprm)
{
call_void_hook(bprm_committing_creds, bprm);
}
/**
* security_bprm_committed_creds() - Tidy up after cred install during exec()
* @bprm: binary program information
*
* Tidy up after the installation of the new security attributes of a process
* being transformed by an execve operation. The new credentials have, by this
* point, been set to @current->cred. @bprm points to the linux_binprm
* structure. This hook is a good place to perform state changes on the
* process such as clearing out non-inheritable signal state. This is called
* immediately after commit_creds().
*/
void security_bprm_committed_creds(const struct linux_binprm *bprm)
{
call_void_hook(bprm_committed_creds, bprm);
}
/**
* security_fs_context_submount() - Initialise fc->security
* @fc: new filesystem context
* @reference: dentry reference for submount/remount
*
* Fill out the ->security field for a new fs_context.
*
* Return: Returns 0 on success or negative error code on failure.
*/
int security_fs_context_submount(struct fs_context *fc, struct super_block *reference)
{
return call_int_hook(fs_context_submount, fc, reference);
}
/**
* security_fs_context_dup() - Duplicate a fs_context LSM blob
* @fc: destination filesystem context
* @src_fc: source filesystem context
*
* Allocate and attach a security structure to sc->security. This pointer is
* initialised to NULL by the caller. @fc indicates the new filesystem context.
* @src_fc indicates the original filesystem context.
*
* Return: Returns 0 on success or a negative error code on failure.
*/
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
{
return call_int_hook(fs_context_dup, fc, src_fc);
}
/**
* security_fs_context_parse_param() - Configure a filesystem context
* @fc: filesystem context
* @param: filesystem parameter
*
* Userspace provided a parameter to configure a superblock. The LSM can
* consume the parameter or return it to the caller for use elsewhere.
*
* Return: If the parameter is used by the LSM it should return 0, if it is
* returned to the caller -ENOPARAM is returned, otherwise a negative
* error code is returned.
*/
int security_fs_context_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
struct lsm_static_call *scall;
int trc;
int rc = -ENOPARAM;
lsm_for_each_hook(scall, fs_context_parse_param) {
trc = scall->hl->hook.fs_context_parse_param(fc, param);
if (trc == 0)
rc = 0;
else if (trc != -ENOPARAM)
return trc;
}
return rc;
}
/**
* security_sb_alloc() - Allocate a super_block LSM blob
* @sb: filesystem superblock
*
* Allocate and attach a security structure to the sb->s_security field. The
* s_security field is initialized to NULL when the structure is allocated.
* @sb contains the super_block structure to be modified.
*
* Return: Returns 0 if operation was successful.
*/
int security_sb_alloc(struct super_block *sb)
{
int rc = lsm_superblock_alloc(sb);
if (unlikely(rc))
return rc;
rc = call_int_hook(sb_alloc_security, sb); if (unlikely(rc)) security_sb_free(sb);
return rc;
}
/**
* security_sb_delete() - Release super_block LSM associated objects
* @sb: filesystem superblock
*
* Release objects tied to a superblock (e.g. inodes). @sb contains the
* super_block structure being released.
*/
void security_sb_delete(struct super_block *sb)
{
call_void_hook(sb_delete, sb);
}
/**
* security_sb_free() - Free a super_block LSM blob
* @sb: filesystem superblock
*
* Deallocate and clear the sb->s_security field. @sb contains the super_block
* structure to be modified.
*/
void security_sb_free(struct super_block *sb)
{
call_void_hook(sb_free_security, sb);
kfree(sb->s_security);
sb->s_security = NULL;
}
/**
* security_free_mnt_opts() - Free memory associated with mount options
* @mnt_opts: LSM processed mount options
*
* Free memory associated with @mnt_ops.
*/
void security_free_mnt_opts(void **mnt_opts)
{
if (!*mnt_opts)
return;
call_void_hook(sb_free_mnt_opts, *mnt_opts); *mnt_opts = NULL;}
EXPORT_SYMBOL(security_free_mnt_opts);
/**
* security_sb_eat_lsm_opts() - Consume LSM mount options
* @options: mount options
* @mnt_opts: LSM processed mount options
*
* Eat (scan @options) and save them in @mnt_opts.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
{
return call_int_hook(sb_eat_lsm_opts, options, mnt_opts);
}
EXPORT_SYMBOL(security_sb_eat_lsm_opts);
/**
* security_sb_mnt_opts_compat() - Check if new mount options are allowed
* @sb: filesystem superblock
* @mnt_opts: new mount options
*
* Determine if the new mount options in @mnt_opts are allowed given the
* existing mounted filesystem at @sb. @sb superblock being compared.
*
* Return: Returns 0 if options are compatible.
*/
int security_sb_mnt_opts_compat(struct super_block *sb,
void *mnt_opts)
{
return call_int_hook(sb_mnt_opts_compat, sb, mnt_opts);
}
EXPORT_SYMBOL(security_sb_mnt_opts_compat);
/**
* security_sb_remount() - Verify no incompatible mount changes during remount
* @sb: filesystem superblock
* @mnt_opts: (re)mount options
*
* Extracts security system specific mount options and verifies no changes are
* being made to those options.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_remount(struct super_block *sb,
void *mnt_opts)
{
return call_int_hook(sb_remount, sb, mnt_opts);
}
EXPORT_SYMBOL(security_sb_remount);
/**
* security_sb_kern_mount() - Check if a kernel mount is allowed
* @sb: filesystem superblock
*
* Mount this @sb if allowed by permissions.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_kern_mount(const struct super_block *sb)
{
return call_int_hook(sb_kern_mount, sb);
}
/**
* security_sb_show_options() - Output the mount options for a superblock
* @m: output file
* @sb: filesystem superblock
*
* Show (print on @m) mount options for this @sb.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_sb_show_options(struct seq_file *m, struct super_block *sb)
{
return call_int_hook(sb_show_options, m, sb);
}
/**
* security_sb_statfs() - Check if accessing fs stats is allowed
* @dentry: superblock handle
*
* Check permission before obtaining filesystem statistics for the @mnt
* mountpoint. @dentry is a handle on the superblock for the filesystem.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_statfs(struct dentry *dentry)
{
return call_int_hook(sb_statfs, dentry);
}
/**
* security_sb_mount() - Check permission for mounting a filesystem
* @dev_name: filesystem backing device
* @path: mount point
* @type: filesystem type
* @flags: mount flags
* @data: filesystem specific data
*
* Check permission before an object specified by @dev_name is mounted on the
* mount point named by @nd. For an ordinary mount, @dev_name identifies a
* device if the file system type requires a device. For a remount
* (@flags & MS_REMOUNT), @dev_name is irrelevant. For a loopback/bind mount
* (@flags & MS_BIND), @dev_name identifies the pathname of the object being
* mounted.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data)
{
return call_int_hook(sb_mount, dev_name, path, type, flags, data);
}
/**
* security_sb_umount() - Check permission for unmounting a filesystem
* @mnt: mounted filesystem
* @flags: unmount flags
*
* Check permission before the @mnt file system is unmounted.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_umount(struct vfsmount *mnt, int flags)
{
return call_int_hook(sb_umount, mnt, flags);
}
/**
* security_sb_pivotroot() - Check permissions for pivoting the rootfs
* @old_path: new location for current rootfs
* @new_path: location of the new rootfs
*
* Check permission before pivoting the root filesystem.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_pivotroot(const struct path *old_path,
const struct path *new_path)
{
return call_int_hook(sb_pivotroot, old_path, new_path);
}
/**
* security_sb_set_mnt_opts() - Set the mount options for a filesystem
* @sb: filesystem superblock
* @mnt_opts: binary mount options
* @kern_flags: kernel flags (in)
* @set_kern_flags: kernel flags (out)
*
* Set the security relevant mount options used for a superblock.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sb_set_mnt_opts(struct super_block *sb,
void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
struct lsm_static_call *scall;
int rc = mnt_opts ? -EOPNOTSUPP : LSM_RET_DEFAULT(sb_set_mnt_opts);
lsm_for_each_hook(scall, sb_set_mnt_opts) {
rc = scall->hl->hook.sb_set_mnt_opts(sb, mnt_opts, kern_flags,
set_kern_flags);
if (rc != LSM_RET_DEFAULT(sb_set_mnt_opts))
break;
}
return rc;}
EXPORT_SYMBOL(security_sb_set_mnt_opts);
/**
* security_sb_clone_mnt_opts() - Duplicate superblock mount options
* @oldsb: source superblock
* @newsb: destination superblock
* @kern_flags: kernel flags (in)
* @set_kern_flags: kernel flags (out)
*
* Copy all security options from a given superblock to another.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
return call_int_hook(sb_clone_mnt_opts, oldsb, newsb,
kern_flags, set_kern_flags);
}
EXPORT_SYMBOL(security_sb_clone_mnt_opts);
/**
* security_move_mount() - Check permissions for moving a mount
* @from_path: source mount point
* @to_path: destination mount point
*
* Check permission before a mount is moved.
*
* Return: Returns 0 if permission is granted.
*/
int security_move_mount(const struct path *from_path,
const struct path *to_path)
{
return call_int_hook(move_mount, from_path, to_path);
}
/**
* security_path_notify() - Check if setting a watch is allowed
* @path: file path
* @mask: event mask
* @obj_type: file path type
*
* Check permissions before setting a watch on events as defined by @mask, on
* an object at @path, whose type is defined by @obj_type.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_notify(const struct path *path, u64 mask,
unsigned int obj_type)
{
return call_int_hook(path_notify, path, mask, obj_type);
}
/**
* security_inode_alloc() - Allocate an inode LSM blob
* @inode: the inode
* @gfp: allocation flags
*
* Allocate and attach a security structure to @inode->i_security. The
* i_security field is initialized to NULL when the inode structure is
* allocated.
*
* Return: Return 0 if operation was successful.
*/
int security_inode_alloc(struct inode *inode, gfp_t gfp)
{ int rc = lsm_inode_alloc(inode, gfp); if (unlikely(rc))
return rc;
rc = call_int_hook(inode_alloc_security, inode); if (unlikely(rc)) security_inode_free(inode); return rc;}
static void inode_free_by_rcu(struct rcu_head *head)
{
/* The rcu head is at the start of the inode blob */
call_void_hook(inode_free_security_rcu, head);
kmem_cache_free(lsm_inode_cache, head);
}
/**
* security_inode_free() - Free an inode's LSM blob
* @inode: the inode
*
* Release any LSM resources associated with @inode, although due to the
* inode's RCU protections it is possible that the resources will not be
* fully released until after the current RCU grace period has elapsed.
*
* It is important for LSMs to note that despite being present in a call to
* security_inode_free(), @inode may still be referenced in a VFS path walk
* and calls to security_inode_permission() may be made during, or after,
* a call to security_inode_free(). For this reason the inode->i_security
* field is released via a call_rcu() callback and any LSMs which need to
* retain inode state for use in security_inode_permission() should only
* release that state in the inode_free_security_rcu() LSM hook callback.
*/
void security_inode_free(struct inode *inode)
{
call_void_hook(inode_free_security, inode);
if (!inode->i_security)
return;
call_rcu((struct rcu_head *)inode->i_security, inode_free_by_rcu);
}
/**
* security_dentry_init_security() - Perform dentry initialization
* @dentry: the dentry to initialize
* @mode: mode used to determine resource type
* @name: name of the last path component
* @xattr_name: name of the security/LSM xattr
* @lsmctx: pointer to the resulting LSM context
*
* Compute a context for a dentry as the inode is not yet available since NFSv4
* has no label backed by an EA anyway. It is important to note that
* @xattr_name does not need to be free'd by the caller, it is a static string.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name,
const char **xattr_name,
struct lsm_context *lsmctx)
{
return call_int_hook(dentry_init_security, dentry, mode, name,
xattr_name, lsmctx);
}
EXPORT_SYMBOL(security_dentry_init_security);
/**
* security_dentry_create_files_as() - Perform dentry initialization
* @dentry: the dentry to initialize
* @mode: mode used to determine resource type
* @name: name of the last path component
* @old: creds to use for LSM context calculations
* @new: creds to modify
*
* Compute a context for a dentry as the inode is not yet available and set
* that context in passed in creds so that new files are created using that
* context. Context is calculated using the passed in creds and not the creds
* of the caller.
*
* Return: Returns 0 on success, error on failure.
*/
int security_dentry_create_files_as(struct dentry *dentry, int mode,
const struct qstr *name,
const struct cred *old, struct cred *new)
{
return call_int_hook(dentry_create_files_as, dentry, mode,
name, old, new);
}
EXPORT_SYMBOL(security_dentry_create_files_as);
/**
* security_inode_init_security() - Initialize an inode's LSM context
* @inode: the inode
* @dir: parent directory
* @qstr: last component of the pathname
* @initxattrs: callback function to write xattrs
* @fs_data: filesystem specific data
*
* Obtain the security attribute name suffix and value to set on a newly
* created inode and set up the incore security field for the new inode. This
* hook is called by the fs code as part of the inode creation transaction and
* provides for atomic labeling of the inode, unlike the post_create/mkdir/...
* hooks called by the VFS.
*
* The hook function is expected to populate the xattrs array, by calling
* lsm_get_xattr_slot() to retrieve the slots reserved by the security module
* with the lbs_xattr_count field of the lsm_blob_sizes structure. For each
* slot, the hook function should set ->name to the attribute name suffix
* (e.g. selinux), to allocate ->value (will be freed by the caller) and set it
* to the attribute value, to set ->value_len to the length of the value. If
* the security module does not use security attributes or does not wish to put
* a security attribute on this particular inode, then it should return
* -EOPNOTSUPP to skip this processing.
*
* Return: Returns 0 if the LSM successfully initialized all of the inode
* security attributes that are required, negative values otherwise.
*/
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
const initxattrs initxattrs, void *fs_data)
{
struct lsm_static_call *scall;
struct xattr *new_xattrs = NULL;
int ret = -EOPNOTSUPP, xattr_count = 0;
if (unlikely(IS_PRIVATE(inode)))
return 0;
if (!blob_sizes.lbs_xattr_count)
return 0;
if (initxattrs) {
/* Allocate +1 as terminator. */
new_xattrs = kcalloc(blob_sizes.lbs_xattr_count + 1,
sizeof(*new_xattrs), GFP_NOFS);
if (!new_xattrs)
return -ENOMEM;
}
lsm_for_each_hook(scall, inode_init_security) {
ret = scall->hl->hook.inode_init_security(inode, dir, qstr, new_xattrs,
&xattr_count);
if (ret && ret != -EOPNOTSUPP)
goto out;
/*
* As documented in lsm_hooks.h, -EOPNOTSUPP in this context
* means that the LSM is not willing to provide an xattr, not
* that it wants to signal an error. Thus, continue to invoke
* the remaining LSMs.
*/
}
/* If initxattrs() is NULL, xattr_count is zero, skip the call. */
if (!xattr_count)
goto out;
ret = initxattrs(inode, new_xattrs, fs_data);
out:
for (; xattr_count > 0; xattr_count--)
kfree(new_xattrs[xattr_count - 1].value);
kfree(new_xattrs);
return (ret == -EOPNOTSUPP) ? 0 : ret;
}
EXPORT_SYMBOL(security_inode_init_security);
/**
* security_inode_init_security_anon() - Initialize an anonymous inode
* @inode: the inode
* @name: the anonymous inode class
* @context_inode: an optional related inode
*
* Set up the incore security field for the new anonymous inode and return
* whether the inode creation is permitted by the security module or not.
*
* Return: Returns 0 on success, -EACCES if the security module denies the
* creation of this inode, or another -errno upon other errors.
*/
int security_inode_init_security_anon(struct inode *inode,
const struct qstr *name,
const struct inode *context_inode)
{
return call_int_hook(inode_init_security_anon, inode, name,
context_inode);
}
#ifdef CONFIG_SECURITY_PATH
/**
* security_path_mknod() - Check if creating a special file is allowed
* @dir: parent directory
* @dentry: new file
* @mode: new file mode
* @dev: device number
*
* Check permissions when creating a file. Note that this hook is called even
* if mknod operation is being done for a regular file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_mknod(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_mknod, dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
/**
* security_path_post_mknod() - Update inode security after reg file creation
* @idmap: idmap of the mount
* @dentry: new file
*
* Update inode security field after a regular file has been created.
*/
void security_path_post_mknod(struct mnt_idmap *idmap, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
call_void_hook(path_post_mknod, idmap, dentry);
}
/**
* security_path_mkdir() - Check if creating a new directory is allowed
* @dir: parent directory
* @dentry: new directory
* @mode: new directory mode
*
* Check permissions to create a new directory in the existing directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_mkdir(const struct path *dir, struct dentry *dentry,
umode_t mode)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_mkdir, dir, dentry, mode);
}
EXPORT_SYMBOL(security_path_mkdir);
/**
* security_path_rmdir() - Check if removing a directory is allowed
* @dir: parent directory
* @dentry: directory to remove
*
* Check the permission to remove a directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_rmdir(const struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_rmdir, dir, dentry);
}
/**
* security_path_unlink() - Check if removing a hard link is allowed
* @dir: parent directory
* @dentry: file
*
* Check the permission to remove a hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_unlink(const struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_unlink, dir, dentry);
}
EXPORT_SYMBOL(security_path_unlink);
/**
* security_path_symlink() - Check if creating a symbolic link is allowed
* @dir: parent directory
* @dentry: symbolic link
* @old_name: file pathname
*
* Check the permission to create a symbolic link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_symlink, dir, dentry, old_name);
}
/**
* security_path_link - Check if creating a hard link is allowed
* @old_dentry: existing file
* @new_dir: new parent directory
* @new_dentry: new link
*
* Check permission before creating a new hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
return 0;
return call_int_hook(path_link, old_dentry, new_dir, new_dentry);
}
/**
* security_path_rename() - Check if renaming a file is allowed
* @old_dir: parent directory of the old file
* @old_dentry: the old file
* @new_dir: parent directory of the new file
* @new_dentry: the new file
* @flags: flags
*
* Check for permission to rename a file or directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
(d_is_positive(new_dentry) &&
IS_PRIVATE(d_backing_inode(new_dentry)))))
return 0;
return call_int_hook(path_rename, old_dir, old_dentry, new_dir,
new_dentry, flags);
}
EXPORT_SYMBOL(security_path_rename);
/**
* security_path_truncate() - Check if truncating a file is allowed
* @path: file
*
* Check permission before truncating the file indicated by path. Note that
* truncation permissions may also be checked based on already opened files,
* using the security_file_truncate() hook.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_truncate(const struct path *path)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(path_truncate, path);
}
/**
* security_path_chmod() - Check if changing the file's mode is allowed
* @path: file
* @mode: new mode
*
* Check for permission to change a mode of the file @path. The new mode is
* specified in @mode which is a bitmask of constants from
* <include/uapi/linux/stat.h>.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_chmod(const struct path *path, umode_t mode)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(path_chmod, path, mode);
}
/**
* security_path_chown() - Check if changing the file's owner/group is allowed
* @path: file
* @uid: file owner
* @gid: file group
*
* Check for permission to change owner/group of a file or directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(path_chown, path, uid, gid);
}
/**
* security_path_chroot() - Check if changing the root directory is allowed
* @path: directory
*
* Check for permission to change root directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_chroot(const struct path *path)
{
return call_int_hook(path_chroot, path);
}
#endif /* CONFIG_SECURITY_PATH */
/**
* security_inode_create() - Check if creating a file is allowed
* @dir: the parent directory
* @dentry: the file being created
* @mode: requested file mode
*
* Check permission to create a regular file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_create(struct inode *dir, struct dentry *dentry,
umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_create, dir, dentry, mode);
}
EXPORT_SYMBOL_GPL(security_inode_create);
/**
* security_inode_post_create_tmpfile() - Update inode security of new tmpfile
* @idmap: idmap of the mount
* @inode: inode of the new tmpfile
*
* Update inode security data after a tmpfile has been created.
*/
void security_inode_post_create_tmpfile(struct mnt_idmap *idmap,
struct inode *inode)
{
if (unlikely(IS_PRIVATE(inode)))
return;
call_void_hook(inode_post_create_tmpfile, idmap, inode);
}
/**
* security_inode_link() - Check if creating a hard link is allowed
* @old_dentry: existing file
* @dir: new parent directory
* @new_dentry: new link
*
* Check permission before creating a new hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
return 0;
return call_int_hook(inode_link, old_dentry, dir, new_dentry);
}
/**
* security_inode_unlink() - Check if removing a hard link is allowed
* @dir: parent directory
* @dentry: file
*
* Check the permission to remove a hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_unlink(struct inode *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_unlink, dir, dentry);
}
/**
* security_inode_symlink() - Check if creating a symbolic link is allowed
* @dir: parent directory
* @dentry: symbolic link
* @old_name: existing filename
*
* Check the permission to create a symbolic link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_symlink(struct inode *dir, struct dentry *dentry,
const char *old_name)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_symlink, dir, dentry, old_name);
}
/**
* security_inode_mkdir() - Check if creating a new directory is allowed
* @dir: parent directory
* @dentry: new directory
* @mode: new directory mode
*
* Check permissions to create a new directory in the existing directory
* associated with inode structure @dir.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_mkdir, dir, dentry, mode);
}
EXPORT_SYMBOL_GPL(security_inode_mkdir);
/**
* security_inode_rmdir() - Check if removing a directory is allowed
* @dir: parent directory
* @dentry: directory to be removed
*
* Check the permission to remove a directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_rmdir, dir, dentry);
}
/**
* security_inode_mknod() - Check if creating a special file is allowed
* @dir: parent directory
* @dentry: new file
* @mode: new file mode
* @dev: device number
*
* Check permissions when creating a special file (or a socket or a fifo file
* created via the mknod system call). Note that if mknod operation is being
* done for a regular file, then the create hook will be called and not this
* hook.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t dev)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_mknod, dir, dentry, mode, dev);
}
/**
* security_inode_rename() - Check if renaming a file is allowed
* @old_dir: parent directory of the old file
* @old_dentry: the old file
* @new_dir: parent directory of the new file
* @new_dentry: the new file
* @flags: flags
*
* Check for permission to rename a file or directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
(d_is_positive(new_dentry) &&
IS_PRIVATE(d_backing_inode(new_dentry)))))
return 0;
if (flags & RENAME_EXCHANGE) {
int err = call_int_hook(inode_rename, new_dir, new_dentry,
old_dir, old_dentry);
if (err)
return err;
}
return call_int_hook(inode_rename, old_dir, old_dentry,
new_dir, new_dentry);
}
/**
* security_inode_readlink() - Check if reading a symbolic link is allowed
* @dentry: link
*
* Check the permission to read the symbolic link.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_readlink(struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_readlink, dentry);
}
/**
* security_inode_follow_link() - Check if following a symbolic link is allowed
* @dentry: link dentry
* @inode: link inode
* @rcu: true if in RCU-walk mode
*
* Check permission to follow a symbolic link when looking up a pathname. If
* @rcu is true, @inode is not stable.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return call_int_hook(inode_follow_link, dentry, inode, rcu);
}
/**
* security_inode_permission() - Check if accessing an inode is allowed
* @inode: inode
* @mask: access mask
*
* Check permission before accessing an inode. This hook is called by the
* existing Linux permission function, so a security module can use it to
* provide additional checking for existing Linux permission checks. Notice
* that this hook is called when a file is opened (as well as many other
* operations), whereas the file_security_ops permission hook is called when
* the actual read/write operations are performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_permission(struct inode *inode, int mask)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return call_int_hook(inode_permission, inode, mask);
}
/**
* security_inode_setattr() - Check if setting file attributes is allowed
* @idmap: idmap of the mount
* @dentry: file
* @attr: new attributes
*
* Check permission before setting file attributes. Note that the kernel call
* to notify_change is performed from several locations, whenever file
* attributes change (such as when a file is truncated, chown/chmod operations,
* transferring disk quotas, etc).
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_setattr, idmap, dentry, attr);
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
/**
* security_inode_post_setattr() - Update the inode after a setattr operation
* @idmap: idmap of the mount
* @dentry: file
* @ia_valid: file attributes set
*
* Update inode security field after successful setting file attributes.
*/
void security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
int ia_valid)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
call_void_hook(inode_post_setattr, idmap, dentry, ia_valid);
}
/**
* security_inode_getattr() - Check if getting file attributes is allowed
* @path: file
*
* Check permission before obtaining file attributes.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_getattr(const struct path *path)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(inode_getattr, path);
}
/**
* security_inode_setxattr() - Check if setting file xattrs is allowed
* @idmap: idmap of the mount
* @dentry: file
* @name: xattr name
* @value: xattr value
* @size: size of xattr value
* @flags: flags
*
* This hook performs the desired permission checks before setting the extended
* attributes (xattrs) on @dentry. It is important to note that we have some
* additional logic before the main LSM implementation calls to detect if we
* need to perform an additional capability check at the LSM layer.
*
* Normally we enforce a capability check prior to executing the various LSM
* hook implementations, but if a LSM wants to avoid this capability check,
* it can register a 'inode_xattr_skipcap' hook and return a value of 1 for
* xattrs that it wants to avoid the capability check, leaving the LSM fully
* responsible for enforcing the access control for the specific xattr. If all
* of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook,
* or return a 0 (the default return value), the capability check is still
* performed. If no 'inode_xattr_skipcap' hooks are registered the capability
* check is performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_setxattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
int rc;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
/* enforce the capability checks at the lsm layer, if needed */
if (!call_int_hook(inode_xattr_skipcap, name)) {
rc = cap_inode_setxattr(dentry, name, value, size, flags);
if (rc)
return rc;
}
return call_int_hook(inode_setxattr, idmap, dentry, name, value, size,
flags);
}
/**
* security_inode_set_acl() - Check if setting posix acls is allowed
* @idmap: idmap of the mount
* @dentry: file
* @acl_name: acl name
* @kacl: acl struct
*
* Check permission before setting posix acls, the posix acls in @kacl are
* identified by @acl_name.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_set_acl(struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name,
struct posix_acl *kacl)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_set_acl, idmap, dentry, acl_name, kacl);
}
/**
* security_inode_post_set_acl() - Update inode security from posix acls set
* @dentry: file
* @acl_name: acl name
* @kacl: acl struct
*
* Update inode security data after successfully setting posix acls on @dentry.
* The posix acls in @kacl are identified by @acl_name.
*/
void security_inode_post_set_acl(struct dentry *dentry, const char *acl_name,
struct posix_acl *kacl)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
call_void_hook(inode_post_set_acl, dentry, acl_name, kacl);
}
/**
* security_inode_get_acl() - Check if reading posix acls is allowed
* @idmap: idmap of the mount
* @dentry: file
* @acl_name: acl name
*
* Check permission before getting osix acls, the posix acls are identified by
* @acl_name.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_get_acl(struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_get_acl, idmap, dentry, acl_name);
}
/**
* security_inode_remove_acl() - Check if removing a posix acl is allowed
* @idmap: idmap of the mount
* @dentry: file
* @acl_name: acl name
*
* Check permission before removing posix acls, the posix acls are identified
* by @acl_name.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_remove_acl(struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_remove_acl, idmap, dentry, acl_name);
}
/**
* security_inode_post_remove_acl() - Update inode security after rm posix acls
* @idmap: idmap of the mount
* @dentry: file
* @acl_name: acl name
*
* Update inode security data after successfully removing posix acls on
* @dentry in @idmap. The posix acls are identified by @acl_name.
*/
void security_inode_post_remove_acl(struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
call_void_hook(inode_post_remove_acl, idmap, dentry, acl_name);
}
/**
* security_inode_post_setxattr() - Update the inode after a setxattr operation
* @dentry: file
* @name: xattr name
* @value: xattr value
* @size: xattr value size
* @flags: flags
*
* Update inode security field after successful setxattr operation.
*/
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
}
/**
* security_inode_getxattr() - Check if xattr access is allowed
* @dentry: file
* @name: xattr name
*
* Check permission before obtaining the extended attributes identified by
* @name for @dentry.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_getxattr(struct dentry *dentry, const char *name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_getxattr, dentry, name);
}
/**
* security_inode_listxattr() - Check if listing xattrs is allowed
* @dentry: file
*
* Check permission before obtaining the list of extended attribute names for
* @dentry.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_listxattr(struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_listxattr, dentry);
}
/**
* security_inode_removexattr() - Check if removing an xattr is allowed
* @idmap: idmap of the mount
* @dentry: file
* @name: xattr name
*
* This hook performs the desired permission checks before setting the extended
* attributes (xattrs) on @dentry. It is important to note that we have some
* additional logic before the main LSM implementation calls to detect if we
* need to perform an additional capability check at the LSM layer.
*
* Normally we enforce a capability check prior to executing the various LSM
* hook implementations, but if a LSM wants to avoid this capability check,
* it can register a 'inode_xattr_skipcap' hook and return a value of 1 for
* xattrs that it wants to avoid the capability check, leaving the LSM fully
* responsible for enforcing the access control for the specific xattr. If all
* of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook,
* or return a 0 (the default return value), the capability check is still
* performed. If no 'inode_xattr_skipcap' hooks are registered the capability
* check is performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
{
int rc;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
/* enforce the capability checks at the lsm layer, if needed */
if (!call_int_hook(inode_xattr_skipcap, name)) {
rc = cap_inode_removexattr(idmap, dentry, name);
if (rc)
return rc;
}
return call_int_hook(inode_removexattr, idmap, dentry, name);
}
/**
* security_inode_post_removexattr() - Update the inode after a removexattr op
* @dentry: file
* @name: xattr name
*
* Update the inode after a successful removexattr operation.
*/
void security_inode_post_removexattr(struct dentry *dentry, const char *name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
call_void_hook(inode_post_removexattr, dentry, name);
}
/**
* security_inode_file_setattr() - check if setting fsxattr is allowed
* @dentry: file to set filesystem extended attributes on
* @fa: extended attributes to set on the inode
*
* Called when file_setattr() syscall or FS_IOC_FSSETXATTR ioctl() is called on
* inode
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_file_setattr(struct dentry *dentry, struct file_kattr *fa)
{
return call_int_hook(inode_file_setattr, dentry, fa);
}
/**
* security_inode_file_getattr() - check if retrieving fsxattr is allowed
* @dentry: file to retrieve filesystem extended attributes from
* @fa: extended attributes to get
*
* Called when file_getattr() syscall or FS_IOC_FSGETXATTR ioctl() is called on
* inode
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_file_getattr(struct dentry *dentry, struct file_kattr *fa)
{
return call_int_hook(inode_file_getattr, dentry, fa);
}
/**
* security_inode_need_killpriv() - Check if security_inode_killpriv() required
* @dentry: associated dentry
*
* Called when an inode has been changed to determine if
* security_inode_killpriv() should be called.
*
* Return: Return <0 on error to abort the inode change operation, return 0 if
* security_inode_killpriv() does not need to be called, return >0 if
* security_inode_killpriv() does need to be called.
*/
int security_inode_need_killpriv(struct dentry *dentry)
{
return call_int_hook(inode_need_killpriv, dentry);
}
/**
* security_inode_killpriv() - The setuid bit is removed, update LSM state
* @idmap: idmap of the mount
* @dentry: associated dentry
*
* The @dentry's setuid bit is being removed. Remove similar security labels.
* Called with the dentry->d_inode->i_mutex held.
*
* Return: Return 0 on success. If error is returned, then the operation
* causing setuid bit removal is failed.
*/
int security_inode_killpriv(struct mnt_idmap *idmap,
struct dentry *dentry)
{
return call_int_hook(inode_killpriv, idmap, dentry);
}
/**
* security_inode_getsecurity() - Get the xattr security label of an inode
* @idmap: idmap of the mount
* @inode: inode
* @name: xattr name
* @buffer: security label buffer
* @alloc: allocation flag
*
* Retrieve a copy of the extended attribute representation of the security
* label associated with @name for @inode via @buffer. Note that @name is the
* remainder of the attribute name after the security prefix has been removed.
* @alloc is used to specify if the call should return a value via the buffer
* or just the value length.
*
* Return: Returns size of buffer on success.
*/
int security_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode, const char *name,
void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
return LSM_RET_DEFAULT(inode_getsecurity);
return call_int_hook(inode_getsecurity, idmap, inode, name, buffer,
alloc);
}
/**
* security_inode_setsecurity() - Set the xattr security label of an inode
* @inode: inode
* @name: xattr name
* @value: security label
* @size: length of security label
* @flags: flags
*
* Set the security label associated with @name for @inode from the extended
* attribute value @value. @size indicates the size of the @value in bytes.
* @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. Note that @name is the
* remainder of the attribute name after the security. prefix has been removed.
*
* Return: Returns 0 on success.
*/
int security_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(inode)))
return LSM_RET_DEFAULT(inode_setsecurity);
return call_int_hook(inode_setsecurity, inode, name, value, size,
flags);
}
/**
* security_inode_listsecurity() - List the xattr security label names
* @inode: inode
* @buffer: buffer
* @buffer_size: size of buffer
*
* Copy the extended attribute names for the security labels associated with
* @inode into @buffer. The maximum size of @buffer is specified by
* @buffer_size. @buffer may be NULL to request the size of the buffer
* required.
*
* Return: Returns number of bytes used/required on success.
*/
int security_inode_listsecurity(struct inode *inode,
char *buffer, size_t buffer_size)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return call_int_hook(inode_listsecurity, inode, buffer, buffer_size);
}
EXPORT_SYMBOL(security_inode_listsecurity);
/**
* security_inode_getlsmprop() - Get an inode's LSM data
* @inode: inode
* @prop: lsm specific information to return
*
* Get the lsm specific information associated with the node.
*/
void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop)
{
call_void_hook(inode_getlsmprop, inode, prop);
}
/**
* security_inode_copy_up() - Create new creds for an overlayfs copy-up op
* @src: union dentry of copy-up file
* @new: newly created creds
*
* A file is about to be copied up from lower layer to upper layer of overlay
* filesystem. Security module can prepare a set of new creds and modify as
* need be and return new creds. Caller will switch to new creds temporarily to
* create new file and release newly allocated creds.
*
* Return: Returns 0 on success or a negative error code on error.
*/
int security_inode_copy_up(struct dentry *src, struct cred **new)
{
return call_int_hook(inode_copy_up, src, new);
}
EXPORT_SYMBOL(security_inode_copy_up);
/**
* security_inode_copy_up_xattr() - Filter xattrs in an overlayfs copy-up op
* @src: union dentry of copy-up file
* @name: xattr name
*
* Filter the xattrs being copied up when a unioned file is copied up from a
* lower layer to the union/overlay layer. The caller is responsible for
* reading and writing the xattrs, this hook is merely a filter.
*
* Return: Returns 0 to accept the xattr, -ECANCELED to discard the xattr,
* -EOPNOTSUPP if the security module does not know about attribute,
* or a negative error code to abort the copy up.
*/
int security_inode_copy_up_xattr(struct dentry *src, const char *name)
{
int rc;
rc = call_int_hook(inode_copy_up_xattr, src, name);
if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
return rc;
return LSM_RET_DEFAULT(inode_copy_up_xattr);
}
EXPORT_SYMBOL(security_inode_copy_up_xattr);
/**
* security_inode_setintegrity() - Set the inode's integrity data
* @inode: inode
* @type: type of integrity, e.g. hash digest, signature, etc
* @value: the integrity value
* @size: size of the integrity value
*
* Register a verified integrity measurement of a inode with LSMs.
* LSMs should free the previously saved data if @value is NULL.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_inode_setintegrity(const struct inode *inode,
enum lsm_integrity_type type, const void *value,
size_t size)
{
return call_int_hook(inode_setintegrity, inode, type, value, size);
}
EXPORT_SYMBOL(security_inode_setintegrity);
/**
* security_kernfs_init_security() - Init LSM context for a kernfs node
* @kn_dir: parent kernfs node
* @kn: the kernfs node to initialize
*
* Initialize the security context of a newly created kernfs node based on its
* own and its parent's attributes.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{ return call_int_hook(kernfs_init_security, kn_dir, kn);}
/**
* security_file_permission() - Check file permissions
* @file: file
* @mask: requested permissions
*
* Check file permissions before accessing an open file. This hook is called
* by various operations that read or write files. A security module can use
* this hook to perform additional checking on these operations, e.g. to
* revalidate permissions on use to support privilege bracketing or policy
* changes. Notice that this hook is used when the actual read/write
* operations are performed, whereas the inode_security_ops hook is called when
* a file is opened (as well as many other operations). Although this hook can
* be used to revalidate permissions for various system call operations that
* read or write files, it does not address the revalidation of permissions for
* memory-mapped files. Security modules must handle this separately if they
* need such revalidation.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_permission(struct file *file, int mask)
{
return call_int_hook(file_permission, file, mask);
}
/**
* security_file_alloc() - Allocate and init a file's LSM blob
* @file: the file
*
* Allocate and attach a security structure to the file->f_security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Return 0 if the hook is successful and permission is granted.
*/
int security_file_alloc(struct file *file)
{ int rc = lsm_file_alloc(file); if (rc)
return rc;
rc = call_int_hook(file_alloc_security, file); if (unlikely(rc)) security_file_free(file); return rc;}
/**
* security_file_release() - Perform actions before releasing the file ref
* @file: the file
*
* Perform actions before releasing the last reference to a file.
*/
void security_file_release(struct file *file)
{
call_void_hook(file_release, file);
}
/**
* security_file_free() - Free a file's LSM blob
* @file: the file
*
* Deallocate and free any security structures stored in file->f_security.
*/
void security_file_free(struct file *file)
{
void *blob;
call_void_hook(file_free_security, file);
blob = file->f_security;
if (blob) {
file->f_security = NULL;
kmem_cache_free(lsm_file_cache, blob);
}
}
/**
* security_file_ioctl() - Check if an ioctl is allowed
* @file: associated file
* @cmd: ioctl cmd
* @arg: ioctl arguments
*
* Check permission for an ioctl operation on @file. Note that @arg sometimes
* represents a user space pointer; in other cases, it may be a simple integer
* value. When @arg represents a user space pointer, it should never be used
* by the security module.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return call_int_hook(file_ioctl, file, cmd, arg);
}
EXPORT_SYMBOL_GPL(security_file_ioctl);
/**
* security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode
* @file: associated file
* @cmd: ioctl cmd
* @arg: ioctl arguments
*
* Compat version of security_file_ioctl() that correctly handles 32-bit
* processes running on 64-bit kernels.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
{
return call_int_hook(file_ioctl_compat, file, cmd, arg);
}
EXPORT_SYMBOL_GPL(security_file_ioctl_compat);
static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
{
/*
* Does we have PROT_READ and does the application expect
* it to imply PROT_EXEC? If not, nothing to talk about...
*/
if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
return prot;
if (!(current->personality & READ_IMPLIES_EXEC))
return prot;
/*
* if that's an anonymous mapping, let it.
*/
if (!file)
return prot | PROT_EXEC;
/*
* ditto if it's not on noexec mount, except that on !MMU we need
* NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
*/
if (!path_noexec(&file->f_path)) {
#ifndef CONFIG_MMU
if (file->f_op->mmap_capabilities) {
unsigned caps = file->f_op->mmap_capabilities(file);
if (!(caps & NOMMU_MAP_EXEC))
return prot;
}
#endif
return prot | PROT_EXEC;
}
/* anything on noexec mount won't get PROT_EXEC */
return prot;
}
/**
* security_mmap_file() - Check if mmap'ing a file is allowed
* @file: file
* @prot: protection applied by the kernel
* @flags: flags
*
* Check permissions for a mmap operation. The @file may be NULL, e.g. if
* mapping anonymous memory.
*
* Return: Returns 0 if permission is granted.
*/
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
return call_int_hook(mmap_file, file, prot, mmap_prot(file, prot),
flags);
}
/**
* security_mmap_addr() - Check if mmap'ing an address is allowed
* @addr: address
*
* Check permissions for a mmap operation at @addr.
*
* Return: Returns 0 if permission is granted.
*/
int security_mmap_addr(unsigned long addr)
{
return call_int_hook(mmap_addr, addr);
}
/**
* security_file_mprotect() - Check if changing memory protections is allowed
* @vma: memory region
* @reqprot: application requested protection
* @prot: protection applied by the kernel
*
* Check permissions before changing memory access permissions.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot)
{
return call_int_hook(file_mprotect, vma, reqprot, prot);
}
/**
* security_file_lock() - Check if a file lock is allowed
* @file: file
* @cmd: lock operation (e.g. F_RDLCK, F_WRLCK)
*
* Check permission before performing file locking operations. Note the hook
* mediates both flock and fcntl style locks.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_lock(struct file *file, unsigned int cmd)
{
return call_int_hook(file_lock, file, cmd);
}
/**
* security_file_fcntl() - Check if fcntl() op is allowed
* @file: file
* @cmd: fcntl command
* @arg: command argument
*
* Check permission before allowing the file operation specified by @cmd from
* being performed on the file @file. Note that @arg sometimes represents a
* user space pointer; in other cases, it may be a simple integer value. When
* @arg represents a user space pointer, it should never be used by the
* security module.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
return call_int_hook(file_fcntl, file, cmd, arg);
}
/**
* security_file_set_fowner() - Set the file owner info in the LSM blob
* @file: the file
*
* Save owner security information (typically from current->security) in
* file->f_security for later use by the send_sigiotask hook.
*
* This hook is called with file->f_owner.lock held.
*
* Return: Returns 0 on success.
*/
void security_file_set_fowner(struct file *file)
{
call_void_hook(file_set_fowner, file);
}
/**
* security_file_send_sigiotask() - Check if sending SIGIO/SIGURG is allowed
* @tsk: target task
* @fown: signal sender
* @sig: signal to be sent, SIGIO is sent if 0
*
* Check permission for the file owner @fown to send SIGIO or SIGURG to the
* process @tsk. Note that this hook is sometimes called from interrupt. Note
* that the fown_struct, @fown, is never outside the context of a struct file,
* so the file structure (and associated security information) can always be
* obtained: container_of(fown, struct file, f_owner).
*
* Return: Returns 0 if permission is granted.
*/
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig)
{
return call_int_hook(file_send_sigiotask, tsk, fown, sig);
}
/**
* security_file_receive() - Check if receiving a file via IPC is allowed
* @file: file being received
*
* This hook allows security modules to control the ability of a process to
* receive an open file descriptor via socket IPC.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_receive(struct file *file)
{
return call_int_hook(file_receive, file);
}
/**
* security_file_open() - Save open() time state for late use by the LSM
* @file:
*
* Save open-time permission checking state for later use upon file_permission,
* and recheck access if anything has changed since inode_permission.
*
* We can check if a file is opened for execution (e.g. execve(2) call), either
* directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags &
* __FMODE_EXEC .
*
* Return: Returns 0 if permission is granted.
*/
int security_file_open(struct file *file)
{
return call_int_hook(file_open, file);
}
/**
* security_file_post_open() - Evaluate a file after it has been opened
* @file: the file
* @mask: access mask
*
* Evaluate an opened file and the access mask requested with open(). The hook
* is useful for LSMs that require the file content to be available in order to
* make decisions.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_post_open(struct file *file, int mask)
{
return call_int_hook(file_post_open, file, mask);
}
EXPORT_SYMBOL_GPL(security_file_post_open);
/**
* security_file_truncate() - Check if truncating a file is allowed
* @file: file
*
* Check permission before truncating a file, i.e. using ftruncate. Note that
* truncation permission may also be checked based on the path, using the
* @path_truncate hook.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_truncate(struct file *file)
{
return call_int_hook(file_truncate, file);
}
/**
* security_task_alloc() - Allocate a task's LSM blob
* @task: the task
* @clone_flags: flags indicating what is being shared
*
* Handle allocation of task-related resources.
*
* Return: Returns a zero on success, negative values on failure.
*/
int security_task_alloc(struct task_struct *task, u64 clone_flags)
{
int rc = lsm_task_alloc(task);
if (rc)
return rc;
rc = call_int_hook(task_alloc, task, clone_flags); if (unlikely(rc)) security_task_free(task);
return rc;
}
/**
* security_task_free() - Free a task's LSM blob and related resources
* @task: task
*
* Handle release of task-related resources. Note that this can be called from
* interrupt context.
*/
void security_task_free(struct task_struct *task)
{
call_void_hook(task_free, task);
kfree(task->security);
task->security = NULL;
}
/**
* security_cred_alloc_blank() - Allocate the min memory to allow cred_transfer
* @cred: credentials
* @gfp: gfp flags
*
* Only allocate sufficient memory and attach to @cred such that
* cred_transfer() will not get ENOMEM.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
int rc = lsm_cred_alloc(cred, gfp);
if (rc)
return rc;
rc = call_int_hook(cred_alloc_blank, cred, gfp);
if (unlikely(rc))
security_cred_free(cred);
return rc;
}
/**
* security_cred_free() - Free the cred's LSM blob and associated resources
* @cred: credentials
*
* Deallocate and clear the cred->security field in a set of credentials.
*/
void security_cred_free(struct cred *cred)
{
/*
* There is a failure case in prepare_creds() that
* may result in a call here with ->security being NULL.
*/
if (unlikely(cred->security == NULL))
return;
call_void_hook(cred_free, cred);
kfree(cred->security);
cred->security = NULL;
}
/**
* security_prepare_creds() - Prepare a new set of credentials
* @new: new credentials
* @old: original credentials
* @gfp: gfp flags
*
* Prepare a new set of credentials by copying the data from the old set.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
{
int rc = lsm_cred_alloc(new, gfp);
if (rc)
return rc;
rc = call_int_hook(cred_prepare, new, old, gfp); if (unlikely(rc)) security_cred_free(new);
return rc;
}
/**
* security_transfer_creds() - Transfer creds
* @new: target credentials
* @old: original credentials
*
* Transfer data from original creds to new creds.
*/
void security_transfer_creds(struct cred *new, const struct cred *old)
{
call_void_hook(cred_transfer, new, old);
}
/**
* security_cred_getsecid() - Get the secid from a set of credentials
* @c: credentials
* @secid: secid value
*
* Retrieve the security identifier of the cred structure @c. In case of
* failure, @secid will be set to zero.
*/
void security_cred_getsecid(const struct cred *c, u32 *secid)
{
*secid = 0;
call_void_hook(cred_getsecid, c, secid);
}
EXPORT_SYMBOL(security_cred_getsecid);
/**
* security_cred_getlsmprop() - Get the LSM data from a set of credentials
* @c: credentials
* @prop: destination for the LSM data
*
* Retrieve the security data of the cred structure @c. In case of
* failure, @prop will be cleared.
*/
void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop)
{
lsmprop_init(prop);
call_void_hook(cred_getlsmprop, c, prop);
}
EXPORT_SYMBOL(security_cred_getlsmprop);
/**
* security_kernel_act_as() - Set the kernel credentials to act as secid
* @new: credentials
* @secid: secid
*
* Set the credentials for a kernel service to act as (subjective context).
* The current task must be the one that nominated @secid.
*
* Return: Returns 0 if successful.
*/
int security_kernel_act_as(struct cred *new, u32 secid)
{
return call_int_hook(kernel_act_as, new, secid);
}
/**
* security_kernel_create_files_as() - Set file creation context using an inode
* @new: target credentials
* @inode: reference inode
*
* Set the file creation context in a set of credentials to be the same as the
* objective context of the specified inode. The current task must be the one
* that nominated @inode.
*
* Return: Returns 0 if successful.
*/
int security_kernel_create_files_as(struct cred *new, struct inode *inode)
{
return call_int_hook(kernel_create_files_as, new, inode);
}
/**
* security_kernel_module_request() - Check if loading a module is allowed
* @kmod_name: module name
*
* Ability to trigger the kernel to automatically upcall to userspace for
* userspace to load a kernel module with the given name.
*
* Return: Returns 0 if successful.
*/
int security_kernel_module_request(char *kmod_name)
{ return call_int_hook(kernel_module_request, kmod_name);}
/**
* security_kernel_read_file() - Read a file specified by userspace
* @file: file
* @id: file identifier
* @contents: trust if security_kernel_post_read_file() will be called
*
* Read a file specified by userspace.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
bool contents)
{
return call_int_hook(kernel_read_file, file, id, contents);
}
EXPORT_SYMBOL_GPL(security_kernel_read_file);
/**
* security_kernel_post_read_file() - Read a file specified by userspace
* @file: file
* @buf: file contents
* @size: size of file contents
* @id: file identifier
*
* Read a file specified by userspace. This must be paired with a prior call
* to security_kernel_read_file() call that indicated this hook would also be
* called, see security_kernel_read_file() for more information.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id)
{
return call_int_hook(kernel_post_read_file, file, buf, size, id);
}
EXPORT_SYMBOL_GPL(security_kernel_post_read_file);
/**
* security_kernel_load_data() - Load data provided by userspace
* @id: data identifier
* @contents: true if security_kernel_post_load_data() will be called
*
* Load data provided by userspace.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
{
return call_int_hook(kernel_load_data, id, contents);
}
EXPORT_SYMBOL_GPL(security_kernel_load_data);
/**
* security_kernel_post_load_data() - Load userspace data from a non-file source
* @buf: data
* @size: size of data
* @id: data identifier
* @description: text description of data, specific to the id value
*
* Load data provided by a non-file source (usually userspace buffer). This
* must be paired with a prior security_kernel_load_data() call that indicated
* this hook would also be called, see security_kernel_load_data() for more
* information.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_post_load_data(char *buf, loff_t size,
enum kernel_load_data_id id,
char *description)
{
return call_int_hook(kernel_post_load_data, buf, size, id, description);
}
EXPORT_SYMBOL_GPL(security_kernel_post_load_data);
/**
* security_task_fix_setuid() - Update LSM with new user id attributes
* @new: updated credentials
* @old: credentials being replaced
* @flags: LSM_SETID_* flag values
*
* Update the module's state after setting one or more of the user identity
* attributes of the current process. The @flags parameter indicates which of
* the set*uid system calls invoked this hook. If @new is the set of
* credentials that will be installed. Modifications should be made to this
* rather than to @current->cred.
*
* Return: Returns 0 on success.
*/
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{ return call_int_hook(task_fix_setuid, new, old, flags);}
/**
* security_task_fix_setgid() - Update LSM with new group id attributes
* @new: updated credentials
* @old: credentials being replaced
* @flags: LSM_SETID_* flag value
*
* Update the module's state after setting one or more of the group identity
* attributes of the current process. The @flags parameter indicates which of
* the set*gid system calls invoked this hook. @new is the set of credentials
* that will be installed. Modifications should be made to this rather than to
* @current->cred.
*
* Return: Returns 0 on success.
*/
int security_task_fix_setgid(struct cred *new, const struct cred *old,
int flags)
{
return call_int_hook(task_fix_setgid, new, old, flags);
}
/**
* security_task_fix_setgroups() - Update LSM with new supplementary groups
* @new: updated credentials
* @old: credentials being replaced
*
* Update the module's state after setting the supplementary group identity
* attributes of the current process. @new is the set of credentials that will
* be installed. Modifications should be made to this rather than to
* @current->cred.
*
* Return: Returns 0 on success.
*/
int security_task_fix_setgroups(struct cred *new, const struct cred *old)
{
return call_int_hook(task_fix_setgroups, new, old);
}
/**
* security_task_setpgid() - Check if setting the pgid is allowed
* @p: task being modified
* @pgid: new pgid
*
* Check permission before setting the process group identifier of the process
* @p to @pgid.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return call_int_hook(task_setpgid, p, pgid);
}
/**
* security_task_getpgid() - Check if getting the pgid is allowed
* @p: task
*
* Check permission before getting the process group identifier of the process
* @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getpgid(struct task_struct *p)
{
return call_int_hook(task_getpgid, p);
}
/**
* security_task_getsid() - Check if getting the session id is allowed
* @p: task
*
* Check permission before getting the session identifier of the process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getsid(struct task_struct *p)
{
return call_int_hook(task_getsid, p);
}
/**
* security_current_getlsmprop_subj() - Current task's subjective LSM data
* @prop: lsm specific information
*
* Retrieve the subjective security identifier of the current task and return
* it in @prop.
*/
void security_current_getlsmprop_subj(struct lsm_prop *prop)
{
lsmprop_init(prop);
call_void_hook(current_getlsmprop_subj, prop);
}
EXPORT_SYMBOL(security_current_getlsmprop_subj);
/**
* security_task_getlsmprop_obj() - Get a task's objective LSM data
* @p: target task
* @prop: lsm specific information
*
* Retrieve the objective security identifier of the task_struct in @p and
* return it in @prop.
*/
void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop)
{
lsmprop_init(prop);
call_void_hook(task_getlsmprop_obj, p, prop);
}
EXPORT_SYMBOL(security_task_getlsmprop_obj);
/**
* security_task_setnice() - Check if setting a task's nice value is allowed
* @p: target task
* @nice: nice value
*
* Check permission before setting the nice value of @p to @nice.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setnice(struct task_struct *p, int nice)
{
return call_int_hook(task_setnice, p, nice);
}
/**
* security_task_setioprio() - Check if setting a task's ioprio is allowed
* @p: target task
* @ioprio: ioprio value
*
* Check permission before setting the ioprio value of @p to @ioprio.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setioprio(struct task_struct *p, int ioprio)
{
return call_int_hook(task_setioprio, p, ioprio);
}
/**
* security_task_getioprio() - Check if getting a task's ioprio is allowed
* @p: task
*
* Check permission before getting the ioprio value of @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getioprio(struct task_struct *p)
{
return call_int_hook(task_getioprio, p);
}
/**
* security_task_prlimit() - Check if get/setting resources limits is allowed
* @cred: current task credentials
* @tcred: target task credentials
* @flags: LSM_PRLIMIT_* flag bits indicating a get/set/both
*
* Check permission before getting and/or setting the resource limits of
* another task.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
unsigned int flags)
{
return call_int_hook(task_prlimit, cred, tcred, flags);
}
/**
* security_task_setrlimit() - Check if setting a new rlimit value is allowed
* @p: target task's group leader
* @resource: resource whose limit is being set
* @new_rlim: new resource limit
*
* Check permission before setting the resource limits of process @p for
* @resource to @new_rlim. The old resource limit values can be examined by
* dereferencing (p->signal->rlim + resource).
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim)
{
return call_int_hook(task_setrlimit, p, resource, new_rlim);
}
/**
* security_task_setscheduler() - Check if setting sched policy/param is allowed
* @p: target task
*
* Check permission before setting scheduling policy and/or parameters of
* process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setscheduler(struct task_struct *p)
{
return call_int_hook(task_setscheduler, p);
}
/**
* security_task_getscheduler() - Check if getting scheduling info is allowed
* @p: target task
*
* Check permission before obtaining scheduling information for process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getscheduler(struct task_struct *p)
{
return call_int_hook(task_getscheduler, p);
}
/**
* security_task_movememory() - Check if moving memory is allowed
* @p: task
*
* Check permission before moving memory owned by process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_movememory(struct task_struct *p)
{
return call_int_hook(task_movememory, p);
}
/**
* security_task_kill() - Check if sending a signal is allowed
* @p: target process
* @info: signal information
* @sig: signal value
* @cred: credentials of the signal sender, NULL if @current
*
* Check permission before sending signal @sig to @p. @info can be NULL, the
* constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or
* SI_FROMKERNEL(info) is true, then the signal should be viewed as coming from
* the kernel and should typically be permitted. SIGIO signals are handled
* separately by the send_sigiotask hook in file_security_ops.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
return call_int_hook(task_kill, p, info, sig, cred);
}
/**
* security_task_prctl() - Check if a prctl op is allowed
* @option: operation
* @arg2: argument
* @arg3: argument
* @arg4: argument
* @arg5: argument
*
* Check permission before performing a process control operation on the
* current process.
*
* Return: Return -ENOSYS if no-one wanted to handle this op, any other value
* to cause prctl() to return immediately with that value.
*/
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int thisrc;
int rc = LSM_RET_DEFAULT(task_prctl);
struct lsm_static_call *scall;
lsm_for_each_hook(scall, task_prctl) {
thisrc = scall->hl->hook.task_prctl(option, arg2, arg3, arg4, arg5);
if (thisrc != LSM_RET_DEFAULT(task_prctl)) {
rc = thisrc;
if (thisrc != 0)
break;
}
}
return rc;
}
/**
* security_task_to_inode() - Set the security attributes of a task's inode
* @p: task
* @inode: inode
*
* Set the security attributes for an inode based on an associated task's
* security attributes, e.g. for /proc/pid inodes.
*/
void security_task_to_inode(struct task_struct *p, struct inode *inode)
{
call_void_hook(task_to_inode, p, inode);
}
/**
* security_create_user_ns() - Check if creating a new userns is allowed
* @cred: prepared creds
*
* Check permission prior to creating a new user namespace.
*
* Return: Returns 0 if successful, otherwise < 0 error code.
*/
int security_create_user_ns(const struct cred *cred)
{
return call_int_hook(userns_create, cred);
}
/**
* security_ipc_permission() - Check if sysv ipc access is allowed
* @ipcp: ipc permission structure
* @flag: requested permissions
*
* Check permissions for access to IPC.
*
* Return: Returns 0 if permission is granted.
*/
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
{
return call_int_hook(ipc_permission, ipcp, flag);
}
/**
* security_ipc_getlsmprop() - Get the sysv ipc object LSM data
* @ipcp: ipc permission structure
* @prop: pointer to lsm information
*
* Get the lsm information associated with the ipc object.
*/
void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop)
{
lsmprop_init(prop);
call_void_hook(ipc_getlsmprop, ipcp, prop);
}
/**
* security_msg_msg_alloc() - Allocate a sysv ipc message LSM blob
* @msg: message structure
*
* Allocate and attach a security structure to the msg->security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Return 0 if operation was successful and permission is granted.
*/
int security_msg_msg_alloc(struct msg_msg *msg)
{
int rc = lsm_msg_msg_alloc(msg);
if (unlikely(rc))
return rc;
rc = call_int_hook(msg_msg_alloc_security, msg);
if (unlikely(rc))
security_msg_msg_free(msg);
return rc;
}
/**
* security_msg_msg_free() - Free a sysv ipc message LSM blob
* @msg: message structure
*
* Deallocate the security structure for this message.
*/
void security_msg_msg_free(struct msg_msg *msg)
{
call_void_hook(msg_msg_free_security, msg);
kfree(msg->security);
msg->security = NULL;
}
/**
* security_msg_queue_alloc() - Allocate a sysv ipc msg queue LSM blob
* @msq: sysv ipc permission structure
*
* Allocate and attach a security structure to @msg. The security field is
* initialized to NULL when the structure is first created.
*
* Return: Returns 0 if operation was successful and permission is granted.
*/
int security_msg_queue_alloc(struct kern_ipc_perm *msq)
{
int rc = lsm_ipc_alloc(msq);
if (unlikely(rc))
return rc;
rc = call_int_hook(msg_queue_alloc_security, msq);
if (unlikely(rc))
security_msg_queue_free(msq);
return rc;
}
/**
* security_msg_queue_free() - Free a sysv ipc msg queue LSM blob
* @msq: sysv ipc permission structure
*
* Deallocate security field @perm->security for the message queue.
*/
void security_msg_queue_free(struct kern_ipc_perm *msq)
{
call_void_hook(msg_queue_free_security, msq);
kfree(msq->security);
msq->security = NULL;
}
/**
* security_msg_queue_associate() - Check if a msg queue operation is allowed
* @msq: sysv ipc permission structure
* @msqflg: operation flags
*
* Check permission when a message queue is requested through the msgget system
* call. This hook is only called when returning the message queue identifier
* for an existing message queue, not when a new message queue is created.
*
* Return: Return 0 if permission is granted.
*/
int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
{
return call_int_hook(msg_queue_associate, msq, msqflg);
}
/**
* security_msg_queue_msgctl() - Check if a msg queue operation is allowed
* @msq: sysv ipc permission structure
* @cmd: operation
*
* Check permission when a message control operation specified by @cmd is to be
* performed on the message queue with permissions.
*
* Return: Returns 0 if permission is granted.
*/
int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
{
return call_int_hook(msg_queue_msgctl, msq, cmd);
}
/**
* security_msg_queue_msgsnd() - Check if sending a sysv ipc message is allowed
* @msq: sysv ipc permission structure
* @msg: message
* @msqflg: operation flags
*
* Check permission before a message, @msg, is enqueued on the message queue
* with permissions specified in @msq.
*
* Return: Returns 0 if permission is granted.
*/
int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
struct msg_msg *msg, int msqflg)
{
return call_int_hook(msg_queue_msgsnd, msq, msg, msqflg);
}
/**
* security_msg_queue_msgrcv() - Check if receiving a sysv ipc msg is allowed
* @msq: sysv ipc permission structure
* @msg: message
* @target: target task
* @type: type of message requested
* @mode: operation flags
*
* Check permission before a message, @msg, is removed from the message queue.
* The @target task structure contains a pointer to the process that will be
* receiving the message (not equal to the current process when inline receives
* are being performed).
*
* Return: Returns 0 if permission is granted.
*/
int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
struct task_struct *target, long type, int mode)
{
return call_int_hook(msg_queue_msgrcv, msq, msg, target, type, mode);
}
/**
* security_shm_alloc() - Allocate a sysv shm LSM blob
* @shp: sysv ipc permission structure
*
* Allocate and attach a security structure to the @shp security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Returns 0 if operation was successful and permission is granted.
*/
int security_shm_alloc(struct kern_ipc_perm *shp)
{
int rc = lsm_ipc_alloc(shp);
if (unlikely(rc))
return rc;
rc = call_int_hook(shm_alloc_security, shp);
if (unlikely(rc))
security_shm_free(shp);
return rc;
}
/**
* security_shm_free() - Free a sysv shm LSM blob
* @shp: sysv ipc permission structure
*
* Deallocate the security structure @perm->security for the memory segment.
*/
void security_shm_free(struct kern_ipc_perm *shp)
{
call_void_hook(shm_free_security, shp);
kfree(shp->security);
shp->security = NULL;
}
/**
* security_shm_associate() - Check if a sysv shm operation is allowed
* @shp: sysv ipc permission structure
* @shmflg: operation flags
*
* Check permission when a shared memory region is requested through the shmget
* system call. This hook is only called when returning the shared memory
* region identifier for an existing region, not when a new shared memory
* region is created.
*
* Return: Returns 0 if permission is granted.
*/
int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
{
return call_int_hook(shm_associate, shp, shmflg);
}
/**
* security_shm_shmctl() - Check if a sysv shm operation is allowed
* @shp: sysv ipc permission structure
* @cmd: operation
*
* Check permission when a shared memory control operation specified by @cmd is
* to be performed on the shared memory region with permissions in @shp.
*
* Return: Return 0 if permission is granted.
*/
int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
{
return call_int_hook(shm_shmctl, shp, cmd);
}
/**
* security_shm_shmat() - Check if a sysv shm attach operation is allowed
* @shp: sysv ipc permission structure
* @shmaddr: address of memory region to attach
* @shmflg: operation flags
*
* Check permissions prior to allowing the shmat system call to attach the
* shared memory segment with permissions @shp to the data segment of the
* calling process. The attaching address is specified by @shmaddr.
*
* Return: Returns 0 if permission is granted.
*/
int security_shm_shmat(struct kern_ipc_perm *shp,
char __user *shmaddr, int shmflg)
{
return call_int_hook(shm_shmat, shp, shmaddr, shmflg);
}
/**
* security_sem_alloc() - Allocate a sysv semaphore LSM blob
* @sma: sysv ipc permission structure
*
* Allocate and attach a security structure to the @sma security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Returns 0 if operation was successful and permission is granted.
*/
int security_sem_alloc(struct kern_ipc_perm *sma)
{
int rc = lsm_ipc_alloc(sma);
if (unlikely(rc))
return rc;
rc = call_int_hook(sem_alloc_security, sma);
if (unlikely(rc))
security_sem_free(sma);
return rc;
}
/**
* security_sem_free() - Free a sysv semaphore LSM blob
* @sma: sysv ipc permission structure
*
* Deallocate security structure @sma->security for the semaphore.
*/
void security_sem_free(struct kern_ipc_perm *sma)
{
call_void_hook(sem_free_security, sma);
kfree(sma->security);
sma->security = NULL;
}
/**
* security_sem_associate() - Check if a sysv semaphore operation is allowed
* @sma: sysv ipc permission structure
* @semflg: operation flags
*
* Check permission when a semaphore is requested through the semget system
* call. This hook is only called when returning the semaphore identifier for
* an existing semaphore, not when a new one must be created.
*
* Return: Returns 0 if permission is granted.
*/
int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
{
return call_int_hook(sem_associate, sma, semflg);
}
/**
* security_sem_semctl() - Check if a sysv semaphore operation is allowed
* @sma: sysv ipc permission structure
* @cmd: operation
*
* Check permission when a semaphore operation specified by @cmd is to be
* performed on the semaphore.
*
* Return: Returns 0 if permission is granted.
*/
int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
{
return call_int_hook(sem_semctl, sma, cmd);
}
/**
* security_sem_semop() - Check if a sysv semaphore operation is allowed
* @sma: sysv ipc permission structure
* @sops: operations to perform
* @nsops: number of operations
* @alter: flag indicating changes will be made
*
* Check permissions before performing operations on members of the semaphore
* set. If the @alter flag is nonzero, the semaphore set may be modified.
*
* Return: Returns 0 if permission is granted.
*/
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter)
{
return call_int_hook(sem_semop, sma, sops, nsops, alter);
}
/**
* security_d_instantiate() - Populate an inode's LSM state based on a dentry
* @dentry: dentry
* @inode: inode
*
* Fill in @inode security information for a @dentry if allowed.
*/
void security_d_instantiate(struct dentry *dentry, struct inode *inode)
{ if (unlikely(inode && IS_PRIVATE(inode)))
return;
call_void_hook(d_instantiate, dentry, inode);}
EXPORT_SYMBOL(security_d_instantiate);
/*
* Please keep this in sync with it's counterpart in security/lsm_syscalls.c
*/
/**
* security_getselfattr - Read an LSM attribute of the current process.
* @attr: which attribute to return
* @uctx: the user-space destination for the information, or NULL
* @size: pointer to the size of space available to receive the data
* @flags: special handling options. LSM_FLAG_SINGLE indicates that only
* attributes associated with the LSM identified in the passed @ctx be
* reported.
*
* A NULL value for @uctx can be used to get both the number of attributes
* and the size of the data.
*
* Returns the number of attributes found on success, negative value
* on error. @size is reset to the total size of the data.
* If @size is insufficient to contain the data -E2BIG is returned.
*/
int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
u32 __user *size, u32 flags)
{
struct lsm_static_call *scall;
struct lsm_ctx lctx = { .id = LSM_ID_UNDEF, };
u8 __user *base = (u8 __user *)uctx;
u32 entrysize;
u32 total = 0;
u32 left;
bool toobig = false;
bool single = false;
int count = 0;
int rc;
if (attr == LSM_ATTR_UNDEF)
return -EINVAL;
if (size == NULL)
return -EINVAL;
if (get_user(left, size))
return -EFAULT;
if (flags) {
/*
* Only flag supported is LSM_FLAG_SINGLE
*/
if (flags != LSM_FLAG_SINGLE || !uctx)
return -EINVAL;
if (copy_from_user(&lctx, uctx, sizeof(lctx)))
return -EFAULT;
/*
* If the LSM ID isn't specified it is an error.
*/
if (lctx.id == LSM_ID_UNDEF)
return -EINVAL;
single = true;
}
/*
* In the usual case gather all the data from the LSMs.
* In the single case only get the data from the LSM specified.
*/
lsm_for_each_hook(scall, getselfattr) {
if (single && lctx.id != scall->hl->lsmid->id)
continue;
entrysize = left;
if (base)
uctx = (struct lsm_ctx __user *)(base + total);
rc = scall->hl->hook.getselfattr(attr, uctx, &entrysize, flags);
if (rc == -EOPNOTSUPP)
continue;
if (rc == -E2BIG) {
rc = 0;
left = 0;
toobig = true;
} else if (rc < 0)
return rc;
else
left -= entrysize;
total += entrysize;
count += rc;
if (single)
break;
}
if (put_user(total, size))
return -EFAULT;
if (toobig)
return -E2BIG;
if (count == 0)
return LSM_RET_DEFAULT(getselfattr);
return count;
}
/*
* Please keep this in sync with it's counterpart in security/lsm_syscalls.c
*/
/**
* security_setselfattr - Set an LSM attribute on the current process.
* @attr: which attribute to set
* @uctx: the user-space source for the information
* @size: the size of the data
* @flags: reserved for future use, must be 0
*
* Set an LSM attribute for the current process. The LSM, attribute
* and new value are included in @uctx.
*
* Returns 0 on success, -EINVAL if the input is inconsistent, -EFAULT
* if the user buffer is inaccessible, E2BIG if size is too big, or an
* LSM specific failure.
*/
int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
u32 size, u32 flags)
{
struct lsm_static_call *scall;
struct lsm_ctx *lctx;
int rc = LSM_RET_DEFAULT(setselfattr);
u64 required_len;
if (flags)
return -EINVAL;
if (size < sizeof(*lctx))
return -EINVAL;
if (size > PAGE_SIZE)
return -E2BIG;
lctx = memdup_user(uctx, size);
if (IS_ERR(lctx))
return PTR_ERR(lctx);
if (size < lctx->len ||
check_add_overflow(sizeof(*lctx), lctx->ctx_len, &required_len) ||
lctx->len < required_len) {
rc = -EINVAL;
goto free_out;
}
lsm_for_each_hook(scall, setselfattr)
if ((scall->hl->lsmid->id) == lctx->id) {
rc = scall->hl->hook.setselfattr(attr, lctx, size, flags);
break;
}
free_out:
kfree(lctx);
return rc;
}
/**
* security_getprocattr() - Read an attribute for a task
* @p: the task
* @lsmid: LSM identification
* @name: attribute name
* @value: attribute value
*
* Read attribute @name for task @p and store it into @value if allowed.
*
* Return: Returns the length of @value on success, a negative value otherwise.
*/
int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value)
{
struct lsm_static_call *scall;
lsm_for_each_hook(scall, getprocattr) {
if (lsmid != 0 && lsmid != scall->hl->lsmid->id)
continue;
return scall->hl->hook.getprocattr(p, name, value);
}
return LSM_RET_DEFAULT(getprocattr);
}
/**
* security_setprocattr() - Set an attribute for a task
* @lsmid: LSM identification
* @name: attribute name
* @value: attribute value
* @size: attribute value size
*
* Write (set) the current task's attribute @name to @value, size @size if
* allowed.
*
* Return: Returns bytes written on success, a negative value otherwise.
*/
int security_setprocattr(int lsmid, const char *name, void *value, size_t size)
{
struct lsm_static_call *scall;
lsm_for_each_hook(scall, setprocattr) {
if (lsmid != 0 && lsmid != scall->hl->lsmid->id)
continue;
return scall->hl->hook.setprocattr(name, value, size);
}
return LSM_RET_DEFAULT(setprocattr);
}
/**
* security_ismaclabel() - Check if the named attribute is a MAC label
* @name: full extended attribute name
*
* Check if the extended attribute specified by @name represents a MAC label.
*
* Return: Returns 1 if name is a MAC attribute otherwise returns 0.
*/
int security_ismaclabel(const char *name)
{
return call_int_hook(ismaclabel, name);
}
EXPORT_SYMBOL(security_ismaclabel);
/**
* security_secid_to_secctx() - Convert a secid to a secctx
* @secid: secid
* @cp: the LSM context
*
* Convert secid to security context. If @cp is NULL the length of the
* result will be returned, but no data will be returned. This
* does mean that the length could change between calls to check the length and
* the next call which actually allocates and returns the data.
*
* Return: Return length of data on success, error on failure.
*/
int security_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
return call_int_hook(secid_to_secctx, secid, cp);
}
EXPORT_SYMBOL(security_secid_to_secctx);
/**
* security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx
* @prop: lsm specific information
* @cp: the LSM context
* @lsmid: which security module to report
*
* Convert a @prop entry to security context. If @cp is NULL the
* length of the result will be returned. This does mean that the
* length could change between calls to check the length and the
* next call which actually allocates and returns the @cp.
*
* @lsmid identifies which LSM should supply the context.
* A value of LSM_ID_UNDEF indicates that the first LSM suppling
* the hook should be used. This is used in cases where the
* ID of the supplying LSM is unambiguous.
*
* Return: Return length of data on success, error on failure.
*/
int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp,
int lsmid)
{
struct lsm_static_call *scall;
lsm_for_each_hook(scall, lsmprop_to_secctx) {
if (lsmid != LSM_ID_UNDEF && lsmid != scall->hl->lsmid->id)
continue;
return scall->hl->hook.lsmprop_to_secctx(prop, cp);
}
return LSM_RET_DEFAULT(lsmprop_to_secctx);
}
EXPORT_SYMBOL(security_lsmprop_to_secctx);
/**
* security_secctx_to_secid() - Convert a secctx to a secid
* @secdata: secctx
* @seclen: length of secctx
* @secid: secid
*
* Convert security context to secid.
*
* Return: Returns 0 on success, error on failure.
*/
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
*secid = 0;
return call_int_hook(secctx_to_secid, secdata, seclen, secid);
}
EXPORT_SYMBOL(security_secctx_to_secid);
/**
* security_release_secctx() - Free a secctx buffer
* @cp: the security context
*
* Release the security context.
*/
void security_release_secctx(struct lsm_context *cp)
{
call_void_hook(release_secctx, cp);
memset(cp, 0, sizeof(*cp));
}
EXPORT_SYMBOL(security_release_secctx);
/**
* security_inode_invalidate_secctx() - Invalidate an inode's security label
* @inode: inode
*
* Notify the security module that it must revalidate the security context of
* an inode.
*/
void security_inode_invalidate_secctx(struct inode *inode)
{
call_void_hook(inode_invalidate_secctx, inode);
}
EXPORT_SYMBOL(security_inode_invalidate_secctx);
/**
* security_inode_notifysecctx() - Notify the LSM of an inode's security label
* @inode: inode
* @ctx: secctx
* @ctxlen: length of secctx
*
* Notify the security module of what the security context of an inode should
* be. Initializes the incore security context managed by the security module
* for this inode. Example usage: NFS client invokes this hook to initialize
* the security context in its incore inode to the value provided by the server
* for the file when the server returned the file's attributes to the client.
* Must be called with inode->i_mutex locked.
*
* Return: Returns 0 on success, error on failure.
*/
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return call_int_hook(inode_notifysecctx, inode, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_notifysecctx);
/**
* security_inode_setsecctx() - Change the security label of an inode
* @dentry: inode
* @ctx: secctx
* @ctxlen: length of secctx
*
* Change the security context of an inode. Updates the incore security
* context managed by the security module and invokes the fs code as needed
* (via __vfs_setxattr_noperm) to update any backing xattrs that represent the
* context. Example usage: NFS server invokes this hook to change the security
* context in its incore inode and on the backing filesystem to a value
* provided by the client on a SETATTR operation. Must be called with
* inode->i_mutex locked.
*
* Return: Returns 0 on success, error on failure.
*/
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
return call_int_hook(inode_setsecctx, dentry, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_setsecctx);
/**
* security_inode_getsecctx() - Get the security label of an inode
* @inode: inode
* @cp: security context
*
* On success, returns 0 and fills out @cp with the security context
* for the given @inode.
*
* Return: Returns 0 on success, error on failure.
*/
int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp)
{
memset(cp, 0, sizeof(*cp));
return call_int_hook(inode_getsecctx, inode, cp);
}
EXPORT_SYMBOL(security_inode_getsecctx);
#ifdef CONFIG_WATCH_QUEUE
/**
* security_post_notification() - Check if a watch notification can be posted
* @w_cred: credentials of the task that set the watch
* @cred: credentials of the task which triggered the watch
* @n: the notification
*
* Check to see if a watch notification can be posted to a particular queue.
*
* Return: Returns 0 if permission is granted.
*/
int security_post_notification(const struct cred *w_cred,
const struct cred *cred,
struct watch_notification *n)
{
return call_int_hook(post_notification, w_cred, cred, n);
}
#endif /* CONFIG_WATCH_QUEUE */
#ifdef CONFIG_KEY_NOTIFICATIONS
/**
* security_watch_key() - Check if a task is allowed to watch for key events
* @key: the key to watch
*
* Check to see if a process is allowed to watch for event notifications from
* a key or keyring.
*
* Return: Returns 0 if permission is granted.
*/
int security_watch_key(struct key *key)
{
return call_int_hook(watch_key, key);
}
#endif /* CONFIG_KEY_NOTIFICATIONS */
#ifdef CONFIG_SECURITY_NETWORK
/**
* security_netlink_send() - Save info and check if netlink sending is allowed
* @sk: sending socket
* @skb: netlink message
*
* Save security information for a netlink message so that permission checking
* can be performed when the message is processed. The security information
* can be saved using the eff_cap field of the netlink_skb_parms structure.
* Also may be used to provide fine grained control over message transmission.
*
* Return: Returns 0 if the information was successfully saved and message is
* allowed to be transmitted.
*/
int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{ return call_int_hook(netlink_send, sk, skb);}
/**
* security_unix_stream_connect() - Check if a AF_UNIX stream is allowed
* @sock: originating sock
* @other: peer sock
* @newsk: new sock
*
* Check permissions before establishing a Unix domain stream connection
* between @sock and @other.
*
* The @unix_stream_connect and @unix_may_send hooks were necessary because
* Linux provides an alternative to the conventional file name space for Unix
* domain sockets. Whereas binding and connecting to sockets in the file name
* space is mediated by the typical file permissions (and caught by the mknod
* and permission hooks in inode_security_ops), binding and connecting to
* sockets in the abstract name space is completely unmediated. Sufficient
* control of Unix domain sockets in the abstract name space isn't possible
* using only the socket layer hooks, since we need to know the actual target
* socket, which is not looked up until we are inside the af_unix code.
*
* Return: Returns 0 if permission is granted.
*/
int security_unix_stream_connect(struct sock *sock, struct sock *other,
struct sock *newsk)
{
return call_int_hook(unix_stream_connect, sock, other, newsk);
}
EXPORT_SYMBOL(security_unix_stream_connect);
/**
* security_unix_may_send() - Check if AF_UNIX socket can send datagrams
* @sock: originating sock
* @other: peer sock
*
* Check permissions before connecting or sending datagrams from @sock to
* @other.
*
* The @unix_stream_connect and @unix_may_send hooks were necessary because
* Linux provides an alternative to the conventional file name space for Unix
* domain sockets. Whereas binding and connecting to sockets in the file name
* space is mediated by the typical file permissions (and caught by the mknod
* and permission hooks in inode_security_ops), binding and connecting to
* sockets in the abstract name space is completely unmediated. Sufficient
* control of Unix domain sockets in the abstract name space isn't possible
* using only the socket layer hooks, since we need to know the actual target
* socket, which is not looked up until we are inside the af_unix code.
*
* Return: Returns 0 if permission is granted.
*/
int security_unix_may_send(struct socket *sock, struct socket *other)
{
return call_int_hook(unix_may_send, sock, other);
}
EXPORT_SYMBOL(security_unix_may_send);
/**
* security_socket_create() - Check if creating a new socket is allowed
* @family: protocol family
* @type: communications type
* @protocol: requested protocol
* @kern: set to 1 if a kernel socket is requested
*
* Check permissions prior to creating a new socket.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_create(int family, int type, int protocol, int kern)
{ return call_int_hook(socket_create, family, type, protocol, kern);}
/**
* security_socket_post_create() - Initialize a newly created socket
* @sock: socket
* @family: protocol family
* @type: communications type
* @protocol: requested protocol
* @kern: set to 1 if a kernel socket is requested
*
* This hook allows a module to update or allocate a per-socket security
* structure. Note that the security field was not added directly to the socket
* structure, but rather, the socket security information is stored in the
* associated inode. Typically, the inode alloc_security hook will allocate
* and attach security information to SOCK_INODE(sock)->i_security. This hook
* may be used to update the SOCK_INODE(sock)->i_security field with additional
* information that wasn't available when the inode was allocated.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{ return call_int_hook(socket_post_create, sock, family, type,
protocol, kern);
}
/**
* security_socket_socketpair() - Check if creating a socketpair is allowed
* @socka: first socket
* @sockb: second socket
*
* Check permissions before creating a fresh pair of sockets.
*
* Return: Returns 0 if permission is granted and the connection was
* established.
*/
int security_socket_socketpair(struct socket *socka, struct socket *sockb)
{
return call_int_hook(socket_socketpair, socka, sockb);
}
EXPORT_SYMBOL(security_socket_socketpair);
/**
* security_socket_bind() - Check if a socket bind operation is allowed
* @sock: socket
* @address: requested bind address
* @addrlen: length of address
*
* Check permission before socket protocol layer bind operation is performed
* and the socket @sock is bound to the address specified in the @address
* parameter.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_bind(struct socket *sock,
struct sockaddr *address, int addrlen)
{
return call_int_hook(socket_bind, sock, address, addrlen);
}
/**
* security_socket_connect() - Check if a socket connect operation is allowed
* @sock: socket
* @address: address of remote connection point
* @addrlen: length of address
*
* Check permission before socket protocol layer connect operation attempts to
* connect socket @sock to a remote address, @address.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_connect(struct socket *sock,
struct sockaddr *address, int addrlen)
{
return call_int_hook(socket_connect, sock, address, addrlen);
}
/**
* security_socket_listen() - Check if a socket is allowed to listen
* @sock: socket
* @backlog: connection queue size
*
* Check permission before socket protocol layer listen operation.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_listen(struct socket *sock, int backlog)
{
return call_int_hook(socket_listen, sock, backlog);
}
/**
* security_socket_accept() - Check if a socket is allowed to accept connections
* @sock: listening socket
* @newsock: newly creation connection socket
*
* Check permission before accepting a new connection. Note that the new
* socket, @newsock, has been created and some information copied to it, but
* the accept operation has not actually been performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_accept(struct socket *sock, struct socket *newsock)
{
return call_int_hook(socket_accept, sock, newsock);
}
/**
* security_socket_sendmsg() - Check if sending a message is allowed
* @sock: sending socket
* @msg: message to send
* @size: size of message
*
* Check permission before transmitting a message to another socket.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
{ return call_int_hook(socket_sendmsg, sock, msg, size);}
/**
* security_socket_recvmsg() - Check if receiving a message is allowed
* @sock: receiving socket
* @msg: message to receive
* @size: size of message
* @flags: operational flags
*
* Check permission before receiving a message from a socket.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
int size, int flags)
{
return call_int_hook(socket_recvmsg, sock, msg, size, flags);
}
/**
* security_socket_getsockname() - Check if reading the socket addr is allowed
* @sock: socket
*
* Check permission before reading the local address (name) of the socket
* object.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_getsockname(struct socket *sock)
{
return call_int_hook(socket_getsockname, sock);
}
/**
* security_socket_getpeername() - Check if reading the peer's addr is allowed
* @sock: socket
*
* Check permission before the remote address (name) of a socket object.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_getpeername(struct socket *sock)
{
return call_int_hook(socket_getpeername, sock);
}
/**
* security_socket_getsockopt() - Check if reading a socket option is allowed
* @sock: socket
* @level: option's protocol level
* @optname: option name
*
* Check permissions before retrieving the options associated with socket
* @sock.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_getsockopt(struct socket *sock, int level, int optname)
{
return call_int_hook(socket_getsockopt, sock, level, optname);
}
/**
* security_socket_setsockopt() - Check if setting a socket option is allowed
* @sock: socket
* @level: option's protocol level
* @optname: option name
*
* Check permissions before setting the options associated with socket @sock.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_setsockopt(struct socket *sock, int level, int optname)
{ return call_int_hook(socket_setsockopt, sock, level, optname);}
/**
* security_socket_shutdown() - Checks if shutting down the socket is allowed
* @sock: socket
* @how: flag indicating how sends and receives are handled
*
* Checks permission before all or part of a connection on the socket @sock is
* shut down.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_shutdown(struct socket *sock, int how)
{
return call_int_hook(socket_shutdown, sock, how);
}
/**
* security_sock_rcv_skb() - Check if an incoming network packet is allowed
* @sk: destination sock
* @skb: incoming packet
*
* Check permissions on incoming network packets. This hook is distinct from
* Netfilter's IP input hooks since it is the first time that the incoming
* sk_buff @skb has been associated with a particular socket, @sk. Must not
* sleep inside this hook because some callers hold spinlocks.
*
* Return: Returns 0 if permission is granted.
*/
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ return call_int_hook(socket_sock_rcv_skb, sk, skb);}
EXPORT_SYMBOL(security_sock_rcv_skb);
/**
* security_socket_getpeersec_stream() - Get the remote peer label
* @sock: socket
* @optval: destination buffer
* @optlen: size of peer label copied into the buffer
* @len: maximum size of the destination buffer
*
* This hook allows the security module to provide peer socket security state
* for unix or connected tcp sockets to userspace via getsockopt SO_GETPEERSEC.
* For tcp sockets this can be meaningful if the socket is associated with an
* ipsec SA.
*
* Return: Returns 0 if all is well, otherwise, typical getsockopt return
* values.
*/
int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
sockptr_t optlen, unsigned int len)
{
return call_int_hook(socket_getpeersec_stream, sock, optval, optlen,
len);
}
/**
* security_socket_getpeersec_dgram() - Get the remote peer label
* @sock: socket
* @skb: datagram packet
* @secid: remote peer label secid
*
* This hook allows the security module to provide peer socket security state
* for udp sockets on a per-packet basis to userspace via getsockopt
* SO_GETPEERSEC. The application must first have indicated the IP_PASSSEC
* option via getsockopt. It can then retrieve the security state returned by
* this hook for a packet via the SCM_SECURITY ancillary message type.
*
* Return: Returns 0 on success, error on failure.
*/
int security_socket_getpeersec_dgram(struct socket *sock,
struct sk_buff *skb, u32 *secid)
{ return call_int_hook(socket_getpeersec_dgram, sock, skb, secid);}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);
/**
* lsm_sock_alloc - allocate a composite sock blob
* @sock: the sock that needs a blob
* @gfp: allocation mode
*
* Allocate the sock blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_sock_alloc(struct sock *sock, gfp_t gfp)
{
return lsm_blob_alloc(&sock->sk_security, blob_sizes.lbs_sock, gfp);
}
/**
* security_sk_alloc() - Allocate and initialize a sock's LSM blob
* @sk: sock
* @family: protocol family
* @priority: gfp flags
*
* Allocate and attach a security structure to the sk->sk_security field, which
* is used to copy security attributes between local stream sockets.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
int rc = lsm_sock_alloc(sk, priority);
if (unlikely(rc))
return rc;
rc = call_int_hook(sk_alloc_security, sk, family, priority); if (unlikely(rc)) security_sk_free(sk);
return rc;
}
/**
* security_sk_free() - Free the sock's LSM blob
* @sk: sock
*
* Deallocate security structure.
*/
void security_sk_free(struct sock *sk)
{
call_void_hook(sk_free_security, sk);
kfree(sk->sk_security);
sk->sk_security = NULL;
}
/**
* security_sk_clone() - Clone a sock's LSM state
* @sk: original sock
* @newsk: target sock
*
* Clone/copy security structure.
*/
void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
call_void_hook(sk_clone_security, sk, newsk);
}
EXPORT_SYMBOL(security_sk_clone);
/**
* security_sk_classify_flow() - Set a flow's secid based on socket
* @sk: original socket
* @flic: target flow
*
* Set the target flow's secid to socket's secid.
*/
void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic)
{
call_void_hook(sk_getsecid, sk, &flic->flowic_secid);
}
EXPORT_SYMBOL(security_sk_classify_flow);
/**
* security_req_classify_flow() - Set a flow's secid based on request_sock
* @req: request_sock
* @flic: target flow
*
* Sets @flic's secid to @req's secid.
*/
void security_req_classify_flow(const struct request_sock *req,
struct flowi_common *flic)
{
call_void_hook(req_classify_flow, req, flic);
}
EXPORT_SYMBOL(security_req_classify_flow);
/**
* security_sock_graft() - Reconcile LSM state when grafting a sock on a socket
* @sk: sock being grafted
* @parent: target parent socket
*
* Sets @parent's inode secid to @sk's secid and update @sk with any necessary
* LSM state from @parent.
*/
void security_sock_graft(struct sock *sk, struct socket *parent)
{
call_void_hook(sock_graft, sk, parent);
}
EXPORT_SYMBOL(security_sock_graft);
/**
* security_inet_conn_request() - Set request_sock state using incoming connect
* @sk: parent listening sock
* @skb: incoming connection
* @req: new request_sock
*
* Initialize the @req LSM state based on @sk and the incoming connect in @skb.
*
* Return: Returns 0 if permission is granted.
*/
int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return call_int_hook(inet_conn_request, sk, skb, req);
}
EXPORT_SYMBOL(security_inet_conn_request);
/**
* security_inet_csk_clone() - Set new sock LSM state based on request_sock
* @newsk: new sock
* @req: connection request_sock
*
* Set that LSM state of @sock using the LSM state from @req.
*/
void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req)
{
call_void_hook(inet_csk_clone, newsk, req);
}
/**
* security_inet_conn_established() - Update sock's LSM state with connection
* @sk: sock
* @skb: connection packet
*
* Update @sock's LSM state to represent a new connection from @skb.
*/
void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb)
{
call_void_hook(inet_conn_established, sk, skb);
}
EXPORT_SYMBOL(security_inet_conn_established);
/**
* security_secmark_relabel_packet() - Check if setting a secmark is allowed
* @secid: new secmark value
*
* Check if the process should be allowed to relabel packets to @secid.
*
* Return: Returns 0 if permission is granted.
*/
int security_secmark_relabel_packet(u32 secid)
{
return call_int_hook(secmark_relabel_packet, secid);
}
EXPORT_SYMBOL(security_secmark_relabel_packet);
/**
* security_secmark_refcount_inc() - Increment the secmark labeling rule count
*
* Tells the LSM to increment the number of secmark labeling rules loaded.
*/
void security_secmark_refcount_inc(void)
{
call_void_hook(secmark_refcount_inc);
}
EXPORT_SYMBOL(security_secmark_refcount_inc);
/**
* security_secmark_refcount_dec() - Decrement the secmark labeling rule count
*
* Tells the LSM to decrement the number of secmark labeling rules loaded.
*/
void security_secmark_refcount_dec(void)
{
call_void_hook(secmark_refcount_dec);
}
EXPORT_SYMBOL(security_secmark_refcount_dec);
/**
* security_tun_dev_alloc_security() - Allocate a LSM blob for a TUN device
* @security: pointer to the LSM blob
*
* This hook allows a module to allocate a security structure for a TUN device,
* returning the pointer in @security.
*
* Return: Returns a zero on success, negative values on failure.
*/
int security_tun_dev_alloc_security(void **security)
{
int rc;
rc = lsm_blob_alloc(security, blob_sizes.lbs_tun_dev, GFP_KERNEL);
if (rc)
return rc;
rc = call_int_hook(tun_dev_alloc_security, *security);
if (rc) {
kfree(*security);
*security = NULL;
}
return rc;
}
EXPORT_SYMBOL(security_tun_dev_alloc_security);
/**
* security_tun_dev_free_security() - Free a TUN device LSM blob
* @security: LSM blob
*
* This hook allows a module to free the security structure for a TUN device.
*/
void security_tun_dev_free_security(void *security)
{
kfree(security);
}
EXPORT_SYMBOL(security_tun_dev_free_security);
/**
* security_tun_dev_create() - Check if creating a TUN device is allowed
*
* Check permissions prior to creating a new TUN device.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_create(void)
{
return call_int_hook(tun_dev_create);
}
EXPORT_SYMBOL(security_tun_dev_create);
/**
* security_tun_dev_attach_queue() - Check if attaching a TUN queue is allowed
* @security: TUN device LSM blob
*
* Check permissions prior to attaching to a TUN device queue.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_attach_queue(void *security)
{
return call_int_hook(tun_dev_attach_queue, security);
}
EXPORT_SYMBOL(security_tun_dev_attach_queue);
/**
* security_tun_dev_attach() - Update TUN device LSM state on attach
* @sk: associated sock
* @security: TUN device LSM blob
*
* This hook can be used by the module to update any security state associated
* with the TUN device's sock structure.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_attach(struct sock *sk, void *security)
{
return call_int_hook(tun_dev_attach, sk, security);
}
EXPORT_SYMBOL(security_tun_dev_attach);
/**
* security_tun_dev_open() - Update TUN device LSM state on open
* @security: TUN device LSM blob
*
* This hook can be used by the module to update any security state associated
* with the TUN device's security structure.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_open(void *security)
{
return call_int_hook(tun_dev_open, security);
}
EXPORT_SYMBOL(security_tun_dev_open);
/**
* security_sctp_assoc_request() - Update the LSM on a SCTP association req
* @asoc: SCTP association
* @skb: packet requesting the association
*
* Passes the @asoc and @chunk->skb of the association INIT packet to the LSM.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return call_int_hook(sctp_assoc_request, asoc, skb);
}
EXPORT_SYMBOL(security_sctp_assoc_request);
/**
* security_sctp_bind_connect() - Validate a list of addrs for a SCTP option
* @sk: socket
* @optname: SCTP option to validate
* @address: list of IP addresses to validate
* @addrlen: length of the address list
*
* Validiate permissions required for each address associated with sock @sk.
* Depending on @optname, the addresses will be treated as either a connect or
* bind service. The @addrlen is calculated on each IPv4 and IPv6 address using
* sizeof(struct sockaddr_in) or sizeof(struct sockaddr_in6).
*
* Return: Returns 0 on success, error on failure.
*/
int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address, int addrlen)
{
return call_int_hook(sctp_bind_connect, sk, optname, address, addrlen);
}
EXPORT_SYMBOL(security_sctp_bind_connect);
/**
* security_sctp_sk_clone() - Clone a SCTP sock's LSM state
* @asoc: SCTP association
* @sk: original sock
* @newsk: target sock
*
* Called whenever a new socket is created by accept(2) (i.e. a TCP style
* socket) or when a socket is 'peeled off' e.g userspace calls
* sctp_peeloff(3).
*/
void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk)
{
call_void_hook(sctp_sk_clone, asoc, sk, newsk);
}
EXPORT_SYMBOL(security_sctp_sk_clone);
/**
* security_sctp_assoc_established() - Update LSM state when assoc established
* @asoc: SCTP association
* @skb: packet establishing the association
*
* Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet to the
* security module.
*
* Return: Returns 0 if permission is granted.
*/
int security_sctp_assoc_established(struct sctp_association *asoc,
struct sk_buff *skb)
{
return call_int_hook(sctp_assoc_established, asoc, skb);
}
EXPORT_SYMBOL(security_sctp_assoc_established);
/**
* security_mptcp_add_subflow() - Inherit the LSM label from the MPTCP socket
* @sk: the owning MPTCP socket
* @ssk: the new subflow
*
* Update the labeling for the given MPTCP subflow, to match the one of the
* owning MPTCP socket. This hook has to be called after the socket creation and
* initialization via the security_socket_create() and
* security_socket_post_create() LSM hooks.
*
* Return: Returns 0 on success or a negative error code on failure.
*/
int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
{
return call_int_hook(mptcp_add_subflow, sk, ssk);
}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
/**
* security_ib_pkey_access() - Check if access to an IB pkey is allowed
* @sec: LSM blob
* @subnet_prefix: subnet prefix of the port
* @pkey: IB pkey
*
* Check permission to access a pkey when modifying a QP.
*
* Return: Returns 0 if permission is granted.
*/
int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
{
return call_int_hook(ib_pkey_access, sec, subnet_prefix, pkey);
}
EXPORT_SYMBOL(security_ib_pkey_access);
/**
* security_ib_endport_manage_subnet() - Check if SMPs traffic is allowed
* @sec: LSM blob
* @dev_name: IB device name
* @port_num: port number
*
* Check permissions to send and receive SMPs on a end port.
*
* Return: Returns 0 if permission is granted.
*/
int security_ib_endport_manage_subnet(void *sec,
const char *dev_name, u8 port_num)
{
return call_int_hook(ib_endport_manage_subnet, sec, dev_name, port_num);
}
EXPORT_SYMBOL(security_ib_endport_manage_subnet);
/**
* security_ib_alloc_security() - Allocate an Infiniband LSM blob
* @sec: LSM blob
*
* Allocate a security structure for Infiniband objects.
*
* Return: Returns 0 on success, non-zero on failure.
*/
int security_ib_alloc_security(void **sec)
{
int rc;
rc = lsm_blob_alloc(sec, blob_sizes.lbs_ib, GFP_KERNEL);
if (rc)
return rc;
rc = call_int_hook(ib_alloc_security, *sec);
if (rc) {
kfree(*sec);
*sec = NULL;
}
return rc;
}
EXPORT_SYMBOL(security_ib_alloc_security);
/**
* security_ib_free_security() - Free an Infiniband LSM blob
* @sec: LSM blob
*
* Deallocate an Infiniband security structure.
*/
void security_ib_free_security(void *sec)
{
kfree(sec);
}
EXPORT_SYMBOL(security_ib_free_security);
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
/**
* security_xfrm_policy_alloc() - Allocate a xfrm policy LSM blob
* @ctxp: xfrm security context being added to the SPD
* @sec_ctx: security label provided by userspace
* @gfp: gfp flags
*
* Allocate a security structure to the xp->security field; the security field
* is initialized to NULL when the xfrm_policy is allocated.
*
* Return: Return 0 if operation was successful.
*/
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *sec_ctx,
gfp_t gfp)
{ return call_int_hook(xfrm_policy_alloc_security, ctxp, sec_ctx, gfp);}
EXPORT_SYMBOL(security_xfrm_policy_alloc);
/**
* security_xfrm_policy_clone() - Clone xfrm policy LSM state
* @old_ctx: xfrm security context
* @new_ctxp: target xfrm security context
*
* Allocate a security structure in new_ctxp that contains the information from
* the old_ctx structure.
*
* Return: Return 0 if operation was successful.
*/
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
return call_int_hook(xfrm_policy_clone_security, old_ctx, new_ctxp);
}
/**
* security_xfrm_policy_free() - Free a xfrm security context
* @ctx: xfrm security context
*
* Free LSM resources associated with @ctx.
*/
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{ call_void_hook(xfrm_policy_free_security, ctx);}
EXPORT_SYMBOL(security_xfrm_policy_free);
/**
* security_xfrm_policy_delete() - Check if deleting a xfrm policy is allowed
* @ctx: xfrm security context
*
* Authorize deletion of a SPD entry.
*
* Return: Returns 0 if permission is granted.
*/
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{ return call_int_hook(xfrm_policy_delete_security, ctx);}
/**
* security_xfrm_state_alloc() - Allocate a xfrm state LSM blob
* @x: xfrm state being added to the SAD
* @sec_ctx: security label provided by userspace
*
* Allocate a security structure to the @x->security field; the security field
* is initialized to NULL when the xfrm_state is allocated. Set the context to
* correspond to @sec_ctx.
*
* Return: Return 0 if operation was successful.
*/
int security_xfrm_state_alloc(struct xfrm_state *x,
struct xfrm_user_sec_ctx *sec_ctx)
{ return call_int_hook(xfrm_state_alloc, x, sec_ctx);}
EXPORT_SYMBOL(security_xfrm_state_alloc);
/**
* security_xfrm_state_alloc_acquire() - Allocate a xfrm state LSM blob
* @x: xfrm state being added to the SAD
* @polsec: associated policy's security context
* @secid: secid from the flow
*
* Allocate a security structure to the x->security field; the security field
* is initialized to NULL when the xfrm_state is allocated. Set the context to
* correspond to secid.
*
* Return: Returns 0 if operation was successful.
*/
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
return call_int_hook(xfrm_state_alloc_acquire, x, polsec, secid);
}
/**
* security_xfrm_state_delete() - Check if deleting a xfrm state is allowed
* @x: xfrm state
*
* Authorize deletion of x->security.
*
* Return: Returns 0 if permission is granted.
*/
int security_xfrm_state_delete(struct xfrm_state *x)
{ return call_int_hook(xfrm_state_delete_security, x);}
EXPORT_SYMBOL(security_xfrm_state_delete);
/**
* security_xfrm_state_free() - Free a xfrm state
* @x: xfrm state
*
* Deallocate x->security.
*/
void security_xfrm_state_free(struct xfrm_state *x)
{
call_void_hook(xfrm_state_free_security, x);
}
/**
* security_xfrm_policy_lookup() - Check if using a xfrm policy is allowed
* @ctx: target xfrm security context
* @fl_secid: flow secid used to authorize access
*
* Check permission when a flow selects a xfrm_policy for processing XFRMs on a
* packet. The hook is called when selecting either a per-socket policy or a
* generic xfrm policy.
*
* Return: Return 0 if permission is granted, -ESRCH otherwise, or -errno on
* other errors.
*/
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{ return call_int_hook(xfrm_policy_lookup, ctx, fl_secid);}
/**
* security_xfrm_state_pol_flow_match() - Check for a xfrm match
* @x: xfrm state to match
* @xp: xfrm policy to check for a match
* @flic: flow to check for a match.
*
* Check @xp and @flic for a match with @x.
*
* Return: Returns 1 if there is a match.
*/
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic)
{
struct lsm_static_call *scall;
int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
/*
* Since this function is expected to return 0 or 1, the judgment
* becomes difficult if multiple LSMs supply this call. Fortunately,
* we can use the first LSM's judgment because currently only SELinux
* supplies this call.
*
* For speed optimization, we explicitly break the loop rather than
* using the macro
*/
lsm_for_each_hook(scall, xfrm_state_pol_flow_match) {
rc = scall->hl->hook.xfrm_state_pol_flow_match(x, xp, flic);
break;
}
return rc;
}
/**
* security_xfrm_decode_session() - Determine the xfrm secid for a packet
* @skb: xfrm packet
* @secid: secid
*
* Decode the packet in @skb and return the security label in @secid.
*
* Return: Return 0 if all xfrms used have the same secid.
*/
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
{
return call_int_hook(xfrm_decode_session, skb, secid, 1);
}
void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic)
{
int rc = call_int_hook(xfrm_decode_session, skb, &flic->flowic_secid,
0);
BUG_ON(rc);
}
EXPORT_SYMBOL(security_skb_classify_flow);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
#ifdef CONFIG_KEYS
/**
* security_key_alloc() - Allocate and initialize a kernel key LSM blob
* @key: key
* @cred: credentials
* @flags: allocation flags
*
* Permit allocation of a key and assign security data. Note that key does not
* have a serial number assigned at this point.
*
* Return: Return 0 if permission is granted, -ve error otherwise.
*/
int security_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
int rc = lsm_key_alloc(key);
if (unlikely(rc))
return rc;
rc = call_int_hook(key_alloc, key, cred, flags);
if (unlikely(rc))
security_key_free(key);
return rc;
}
/**
* security_key_free() - Free a kernel key LSM blob
* @key: key
*
* Notification of destruction; free security data.
*/
void security_key_free(struct key *key)
{
kfree(key->security);
key->security = NULL;
}
/**
* security_key_permission() - Check if a kernel key operation is allowed
* @key_ref: key reference
* @cred: credentials of actor requesting access
* @need_perm: requested permissions
*
* See whether a specific operational right is granted to a process on a key.
*
* Return: Return 0 if permission is granted, -ve error otherwise.
*/
int security_key_permission(key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm)
{
return call_int_hook(key_permission, key_ref, cred, need_perm);
}
/**
* security_key_getsecurity() - Get the key's security label
* @key: key
* @buffer: security label buffer
*
* Get a textual representation of the security context attached to a key for
* the purposes of honouring KEYCTL_GETSECURITY. This function allocates the
* storage for the NUL-terminated string and the caller should free it.
*
* Return: Returns the length of @buffer (including terminating NUL) or -ve if
* an error occurs. May also return 0 (and a NULL buffer pointer) if
* there is no security label assigned to the key.
*/
int security_key_getsecurity(struct key *key, char **buffer)
{
*buffer = NULL;
return call_int_hook(key_getsecurity, key, buffer);
}
/**
* security_key_post_create_or_update() - Notification of key create or update
* @keyring: keyring to which the key is linked to
* @key: created or updated key
* @payload: data used to instantiate or update the key
* @payload_len: length of payload
* @flags: key flags
* @create: flag indicating whether the key was created or updated
*
* Notify the caller of a key creation or update.
*/
void security_key_post_create_or_update(struct key *keyring, struct key *key,
const void *payload, size_t payload_len,
unsigned long flags, bool create)
{
call_void_hook(key_post_create_or_update, keyring, key, payload,
payload_len, flags, create);
}
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
/**
* security_audit_rule_init() - Allocate and init an LSM audit rule struct
* @field: audit action
* @op: rule operator
* @rulestr: rule context
* @lsmrule: receive buffer for audit rule struct
* @gfp: GFP flag used for kmalloc
*
* Allocate and initialize an LSM audit rule structure.
*
* Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of
* an invalid rule.
*/
int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
gfp_t gfp)
{
return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp);
}
/**
* security_audit_rule_known() - Check if an audit rule contains LSM fields
* @krule: audit rule
*
* Specifies whether given @krule contains any fields related to the current
* LSM.
*
* Return: Returns 1 in case of relation found, 0 otherwise.
*/
int security_audit_rule_known(struct audit_krule *krule)
{
return call_int_hook(audit_rule_known, krule);
}
/**
* security_audit_rule_free() - Free an LSM audit rule struct
* @lsmrule: audit rule struct
*
* Deallocate the LSM audit rule structure previously allocated by
* audit_rule_init().
*/
void security_audit_rule_free(void *lsmrule)
{
call_void_hook(audit_rule_free, lsmrule);
}
/**
* security_audit_rule_match() - Check if a label matches an audit rule
* @prop: security label
* @field: LSM audit field
* @op: matching operator
* @lsmrule: audit rule
*
* Determine if given @secid matches a rule previously approved by
* security_audit_rule_known().
*
* Return: Returns 1 if secid matches the rule, 0 if it does not, -ERRNO on
* failure.
*/
int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op,
void *lsmrule)
{
return call_int_hook(audit_rule_match, prop, field, op, lsmrule);
}
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
/**
* security_bpf() - Check if the bpf syscall operation is allowed
* @cmd: command
* @attr: bpf attribute
* @size: size
* @kernel: whether or not call originated from kernel
*
* Do a initial check for all bpf syscalls after the attribute is copied into
* the kernel. The actual security module can implement their own rules to
* check the specific cmd they need.
*
* Return: Returns 0 if permission is granted.
*/
int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
return call_int_hook(bpf, cmd, attr, size, kernel);
}
/**
* security_bpf_map() - Check if access to a bpf map is allowed
* @map: bpf map
* @fmode: mode
*
* Do a check when the kernel generates and returns a file descriptor for eBPF
* maps.
*
* Return: Returns 0 if permission is granted.
*/
int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
return call_int_hook(bpf_map, map, fmode);
}
/**
* security_bpf_prog() - Check if access to a bpf program is allowed
* @prog: bpf program
*
* Do a check when the kernel generates and returns a file descriptor for eBPF
* programs.
*
* Return: Returns 0 if permission is granted.
*/
int security_bpf_prog(struct bpf_prog *prog)
{
return call_int_hook(bpf_prog, prog);
}
/**
* security_bpf_map_create() - Check if BPF map creation is allowed
* @map: BPF map object
* @attr: BPF syscall attributes used to create BPF map
* @token: BPF token used to grant user access
* @kernel: whether or not call originated from kernel
*
* Do a check when the kernel creates a new BPF map. This is also the
* point where LSM blob is allocated for LSMs that need them.
*
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
struct bpf_token *token, bool kernel)
{
int rc;
rc = lsm_bpf_map_alloc(map);
if (unlikely(rc))
return rc;
rc = call_int_hook(bpf_map_create, map, attr, token, kernel);
if (unlikely(rc))
security_bpf_map_free(map);
return rc;
}
/**
* security_bpf_prog_load() - Check if loading of BPF program is allowed
* @prog: BPF program object
* @attr: BPF syscall attributes used to create BPF program
* @token: BPF token used to grant user access to BPF subsystem
* @kernel: whether or not call originated from kernel
*
* Perform an access control check when the kernel loads a BPF program and
* allocates associated BPF program object. This hook is also responsible for
* allocating any required LSM state for the BPF program.
*
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token, bool kernel)
{
int rc;
rc = lsm_bpf_prog_alloc(prog);
if (unlikely(rc))
return rc;
rc = call_int_hook(bpf_prog_load, prog, attr, token, kernel);
if (unlikely(rc))
security_bpf_prog_free(prog);
return rc;
}
/**
* security_bpf_token_create() - Check if creating of BPF token is allowed
* @token: BPF token object
* @attr: BPF syscall attributes used to create BPF token
* @path: path pointing to BPF FS mount point from which BPF token is created
*
* Do a check when the kernel instantiates a new BPF token object from BPF FS
* instance. This is also the point where LSM blob can be allocated for LSMs.
*
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
const struct path *path)
{
int rc;
rc = lsm_bpf_token_alloc(token);
if (unlikely(rc))
return rc;
rc = call_int_hook(bpf_token_create, token, attr, path);
if (unlikely(rc))
security_bpf_token_free(token);
return rc;
}
/**
* security_bpf_token_cmd() - Check if BPF token is allowed to delegate
* requested BPF syscall command
* @token: BPF token object
* @cmd: BPF syscall command requested to be delegated by BPF token
*
* Do a check when the kernel decides whether provided BPF token should allow
* delegation of requested BPF syscall command.
*
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
{
return call_int_hook(bpf_token_cmd, token, cmd);
}
/**
* security_bpf_token_capable() - Check if BPF token is allowed to delegate
* requested BPF-related capability
* @token: BPF token object
* @cap: capabilities requested to be delegated by BPF token
*
* Do a check when the kernel decides whether provided BPF token should allow
* delegation of requested BPF-related capabilities.
*
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_token_capable(const struct bpf_token *token, int cap)
{
return call_int_hook(bpf_token_capable, token, cap);
}
/**
* security_bpf_map_free() - Free a bpf map's LSM blob
* @map: bpf map
*
* Clean up the security information stored inside bpf map.
*/
void security_bpf_map_free(struct bpf_map *map)
{
call_void_hook(bpf_map_free, map);
kfree(map->security);
map->security = NULL;
}
/**
* security_bpf_prog_free() - Free a BPF program's LSM blob
* @prog: BPF program struct
*
* Clean up the security information stored inside BPF program.
*/
void security_bpf_prog_free(struct bpf_prog *prog)
{
call_void_hook(bpf_prog_free, prog);
kfree(prog->aux->security);
prog->aux->security = NULL;
}
/**
* security_bpf_token_free() - Free a BPF token's LSM blob
* @token: BPF token struct
*
* Clean up the security information stored inside BPF token.
*/
void security_bpf_token_free(struct bpf_token *token)
{
call_void_hook(bpf_token_free, token);
kfree(token->security);
token->security = NULL;
}
#endif /* CONFIG_BPF_SYSCALL */
/**
* security_locked_down() - Check if a kernel feature is allowed
* @what: requested kernel feature
*
* Determine whether a kernel feature that potentially enables arbitrary code
* execution in kernel space should be permitted.
*
* Return: Returns 0 if permission is granted.
*/
int security_locked_down(enum lockdown_reason what)
{ return call_int_hook(locked_down, what);}
EXPORT_SYMBOL(security_locked_down);
/**
* security_bdev_alloc() - Allocate a block device LSM blob
* @bdev: block device
*
* Allocate and attach a security structure to @bdev->bd_security. The
* security field is initialized to NULL when the bdev structure is
* allocated.
*
* Return: Return 0 if operation was successful.
*/
int security_bdev_alloc(struct block_device *bdev)
{
int rc = 0;
rc = lsm_bdev_alloc(bdev);
if (unlikely(rc))
return rc;
rc = call_int_hook(bdev_alloc_security, bdev);
if (unlikely(rc))
security_bdev_free(bdev);
return rc;
}
EXPORT_SYMBOL(security_bdev_alloc);
/**
* security_bdev_free() - Free a block device's LSM blob
* @bdev: block device
*
* Deallocate the bdev security structure and set @bdev->bd_security to NULL.
*/
void security_bdev_free(struct block_device *bdev)
{
if (!bdev->bd_security)
return;
call_void_hook(bdev_free_security, bdev);
kfree(bdev->bd_security);
bdev->bd_security = NULL;
}
EXPORT_SYMBOL(security_bdev_free);
/**
* security_bdev_setintegrity() - Set the device's integrity data
* @bdev: block device
* @type: type of integrity, e.g. hash digest, signature, etc
* @value: the integrity value
* @size: size of the integrity value
*
* Register a verified integrity measurement of a bdev with LSMs.
* LSMs should free the previously saved data if @value is NULL.
* Please note that the new hook should be invoked every time the security
* information is updated to keep these data current. For example, in dm-verity,
* if the mapping table is reloaded and configured to use a different dm-verity
* target with a new roothash and signing information, the previously stored
* data in the LSM blob will become obsolete. It is crucial to re-invoke the
* hook to refresh these data and ensure they are up to date. This necessity
* arises from the design of device-mapper, where a device-mapper device is
* first created, and then targets are subsequently loaded into it. These
* targets can be modified multiple times during the device's lifetime.
* Therefore, while the LSM blob is allocated during the creation of the block
* device, its actual contents are not initialized at this stage and can change
* substantially over time. This includes alterations from data that the LSMs
* 'trusts' to those they do not, making it essential to handle these changes
* correctly. Failure to address this dynamic aspect could potentially allow
* for bypassing LSM checks.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_bdev_setintegrity(struct block_device *bdev,
enum lsm_integrity_type type, const void *value,
size_t size)
{
return call_int_hook(bdev_setintegrity, bdev, type, value, size);
}
EXPORT_SYMBOL(security_bdev_setintegrity);
#ifdef CONFIG_PERF_EVENTS
/**
* security_perf_event_open() - Check if a perf event open is allowed
* @type: type of event
*
* Check whether the @type of perf_event_open syscall is allowed.
*
* Return: Returns 0 if permission is granted.
*/
int security_perf_event_open(int type)
{
return call_int_hook(perf_event_open, type);
}
/**
* security_perf_event_alloc() - Allocate a perf event LSM blob
* @event: perf event
*
* Allocate and save perf_event security info.
*
* Return: Returns 0 on success, error on failure.
*/
int security_perf_event_alloc(struct perf_event *event)
{
int rc;
rc = lsm_blob_alloc(&event->security, blob_sizes.lbs_perf_event,
GFP_KERNEL);
if (rc)
return rc;
rc = call_int_hook(perf_event_alloc, event);
if (rc) {
kfree(event->security);
event->security = NULL;
}
return rc;
}
/**
* security_perf_event_free() - Free a perf event LSM blob
* @event: perf event
*
* Release (free) perf_event security info.
*/
void security_perf_event_free(struct perf_event *event)
{
kfree(event->security);
event->security = NULL;
}
/**
* security_perf_event_read() - Check if reading a perf event label is allowed
* @event: perf event
*
* Read perf_event security info if allowed.
*
* Return: Returns 0 if permission is granted.
*/
int security_perf_event_read(struct perf_event *event)
{
return call_int_hook(perf_event_read, event);
}
/**
* security_perf_event_write() - Check if writing a perf event label is allowed
* @event: perf event
*
* Write perf_event security info if allowed.
*
* Return: Returns 0 if permission is granted.
*/
int security_perf_event_write(struct perf_event *event)
{
return call_int_hook(perf_event_write, event);
}
#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_IO_URING
/**
* security_uring_override_creds() - Check if overriding creds is allowed
* @new: new credentials
*
* Check if the current task, executing an io_uring operation, is allowed to
* override it's credentials with @new.
*
* Return: Returns 0 if permission is granted.
*/
int security_uring_override_creds(const struct cred *new)
{
return call_int_hook(uring_override_creds, new);
}
/**
* security_uring_sqpoll() - Check if IORING_SETUP_SQPOLL is allowed
*
* Check whether the current task is allowed to spawn a io_uring polling thread
* (IORING_SETUP_SQPOLL).
*
* Return: Returns 0 if permission is granted.
*/
int security_uring_sqpoll(void)
{
return call_int_hook(uring_sqpoll);
}
/**
* security_uring_cmd() - Check if a io_uring passthrough command is allowed
* @ioucmd: command
*
* Check whether the file_operations uring_cmd is allowed to run.
*
* Return: Returns 0 if permission is granted.
*/
int security_uring_cmd(struct io_uring_cmd *ioucmd)
{
return call_int_hook(uring_cmd, ioucmd);
}
/**
* security_uring_allowed() - Check if io_uring_setup() is allowed
*
* Check whether the current task is allowed to call io_uring_setup().
*
* Return: Returns 0 if permission is granted.
*/
int security_uring_allowed(void)
{
return call_int_hook(uring_allowed);
}
#endif /* CONFIG_IO_URING */
/**
* security_initramfs_populated() - Notify LSMs that initramfs has been loaded
*
* Tells the LSMs the initramfs has been unpacked into the rootfs.
*/
void security_initramfs_populated(void)
{
call_void_hook(initramfs_populated);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IPC_NAMESPACE_H__
#define __IPC_NAMESPACE_H__
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/rwsem.h>
#include <linux/notifier.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/refcount.h>
#include <linux/rhashtable-types.h>
#include <linux/sysctl.h>
#include <linux/percpu_counter.h>
struct user_namespace;
struct ipc_ids {
int in_use;
unsigned short seq;
struct rw_semaphore rwsem;
struct idr ipcs_idr;
int max_idx;
int last_idx; /* For wrap around detection */
#ifdef CONFIG_CHECKPOINT_RESTORE
int next_id;
#endif
struct rhashtable key_ht;
};
struct ipc_namespace {
struct ipc_ids ids[3];
int sem_ctls[4];
int used_sems;
unsigned int msg_ctlmax;
unsigned int msg_ctlmnb;
unsigned int msg_ctlmni;
struct percpu_counter percpu_msg_bytes;
struct percpu_counter percpu_msg_hdrs;
size_t shm_ctlmax;
size_t shm_ctlall;
unsigned long shm_tot;
int shm_ctlmni;
/*
* Defines whether IPC_RMID is forced for _all_ shm segments regardless
* of shmctl()
*/
int shm_rmid_forced;
struct notifier_block ipcns_nb;
/* The kern_mount of the mqueuefs sb. We take a ref on it */
struct vfsmount *mq_mnt;
/* # queues in this ns, protected by mq_lock */
unsigned int mq_queues_count;
/* next fields are set through sysctl */
unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */
unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */
unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */
unsigned int mq_msg_default;
unsigned int mq_msgsize_default;
struct ctl_table_set mq_set;
struct ctl_table_header *mq_sysctls;
struct ctl_table_set ipc_set;
struct ctl_table_header *ipc_sysctls;
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct llist_node mnt_llist;
struct ns_common ns;
} __randomize_layout;
extern struct ipc_namespace init_ipc_ns;
extern spinlock_t mq_lock;
#ifdef CONFIG_SYSVIPC
extern void shm_destroy_orphaned(struct ipc_namespace *ns);
#else /* CONFIG_SYSVIPC */
static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
#endif /* CONFIG_SYSVIPC */
#ifdef CONFIG_POSIX_MQUEUE
extern int mq_init_ns(struct ipc_namespace *ns);
/*
* POSIX Message Queue default values:
*
* MIN_*: Lowest value an admin can set the maximum unprivileged limit to
* DFLT_*MAX: Default values for the maximum unprivileged limits
* DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply
* an attribute to the open call and the queue must be created
* HARD_*: Highest value the maximums can be set to. These are enforced
* on CAP_SYS_RESOURCE apps as well making them inviolate (so make them
* suitably high)
*
* POSIX Requirements:
* Per app minimum openable message queues - 8. This does not map well
* to the fact that we limit the number of queues on a per namespace
* basis instead of a per app basis. So, make the default high enough
* that no given app should have a hard time opening 8 queues.
* Minimum maximum for HARD_MSGMAX - 32767. I bumped this to 65536.
* Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this. However,
* we have run into a situation where running applications in the wild
* require this to be at least 5MB, and preferably 10MB, so I set the
* value to 16MB in hopes that this user is the worst of the bunch and
* the new maximum will handle anyone else. I may have to revisit this
* in the future.
*/
#define DFLT_QUEUESMAX 256
#define MIN_MSGMAX 1
#define DFLT_MSG 10U
#define DFLT_MSGMAX 10
#define HARD_MSGMAX 65536
#define MIN_MSGSIZEMAX 128
#define DFLT_MSGSIZE 8192U
#define DFLT_MSGSIZEMAX 8192
#define HARD_MSGSIZEMAX (16*1024*1024)
#else
static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
#endif
#if defined(CONFIG_IPC_NS)
static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
{
return container_of(ns, struct ipc_namespace, ns);
}
extern struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns);
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns) ns_ref_inc(ns);
return ns;
}
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
{
if (ns) { if (ns_ref_get(ns))
return ns;
}
return NULL;
}
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns)
{
if (flags & CLONE_NEWIPC)
return ERR_PTR(-EINVAL);
return ns;
}
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
return ns;
}
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
{
return ns;
}
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
#endif
#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
void retire_mq_sysctls(struct ipc_namespace *ns);
bool setup_mq_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
static inline void retire_mq_sysctls(struct ipc_namespace *ns)
{
}
static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
{
return true;
}
#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
#ifdef CONFIG_SYSVIPC_SYSCTL
bool setup_ipc_sysctls(struct ipc_namespace *ns);
void retire_ipc_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_SYSVIPC_SYSCTL */
static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
{
}
static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
{
return true;
}
#endif /* CONFIG_SYSVIPC_SYSCTL */
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Detect hard and soft lockups on a system
*
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
*
* Note: Most of this code is borrowed heavily from the original softlockup
* detector, so thanks to Ingo for the initial implementation.
* Some chunks also taken from the old x86-specific nmi watchdog code, thanks
* to those contributors as well.
*/
#define pr_fmt(fmt) "watchdog: " fmt
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/kernel_stat.h>
#include <linux/kvm_para.h>
#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/stop_machine.h>
#include <linux/sysctl.h>
#include <linux/tick.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
#include <asm/irq_regs.h>
static DEFINE_MUTEX(watchdog_mutex);
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
# define WATCHDOG_HARDLOCKUP_DEFAULT 1
#else
# define WATCHDOG_HARDLOCKUP_DEFAULT 0
#endif
#define NUM_SAMPLE_PERIODS 5
unsigned long __read_mostly watchdog_enabled;
int __read_mostly watchdog_user_enabled = 1;
static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
static int __read_mostly watchdog_softlockup_user_enabled = 1;
int __read_mostly watchdog_thresh = 10;
static int __read_mostly watchdog_thresh_next;
static int __read_mostly watchdog_hardlockup_available;
struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
#ifdef CONFIG_HARDLOCKUP_DETECTOR
# ifdef CONFIG_SMP
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
# endif /* CONFIG_SMP */
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
*/
unsigned int __read_mostly hardlockup_panic =
IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
#ifdef CONFIG_SYSFS
static unsigned int hardlockup_count;
static ssize_t hardlockup_count_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page, "%u\n", hardlockup_count);
}
static struct kobj_attribute hardlockup_count_attr = __ATTR_RO(hardlockup_count);
static __init int kernel_hardlockup_sysfs_init(void)
{
sysfs_add_file_to_group(kernel_kobj, &hardlockup_count_attr.attr, NULL);
return 0;
}
late_initcall(kernel_hardlockup_sysfs_init);
#endif // CONFIG_SYSFS
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these
* cases this function can be called to disable hard lockup detection. This
* function should only be executed once by the boot processor before the
* kernel command line parameters are parsed, because otherwise it is not
* possible to override this in hardlockup_panic_setup().
*/
void __init hardlockup_detector_disable(void)
{
watchdog_hardlockup_user_enabled = 0;
}
static int __init hardlockup_panic_setup(char *str)
{
next:
if (!strncmp(str, "panic", 5))
hardlockup_panic = 1;
else if (!strncmp(str, "nopanic", 7))
hardlockup_panic = 0;
else if (!strncmp(str, "0", 1))
watchdog_hardlockup_user_enabled = 0;
else if (!strncmp(str, "1", 1))
watchdog_hardlockup_user_enabled = 1;
else if (!strncmp(str, "r", 1))
hardlockup_config_perf_event(str + 1);
while (*(str++)) {
if (*str == ',') {
str++;
goto next;
}
}
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
static unsigned long hard_lockup_nmi_warn;
notrace void arch_touch_nmi_watchdog(void)
{
/*
* Using __raw here because some code paths have
* preemption enabled. If preemption is enabled
* then interrupts should be enabled too, in which
* case we shouldn't have to worry about the watchdog
* going off.
*/
raw_cpu_write(watchdog_hardlockup_touched, true);
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
void watchdog_hardlockup_touch_cpu(unsigned int cpu)
{
per_cpu(watchdog_hardlockup_touched, cpu) = true;
}
static bool is_hardlockup(unsigned int cpu)
{
int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
return true;
/*
* NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
* for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
* written/read by a single CPU.
*/
per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
return false;
}
static void watchdog_hardlockup_kick(void)
{
int new_interrupts;
new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
watchdog_buddy_check_hardlockup(new_interrupts);
}
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
{
if (per_cpu(watchdog_hardlockup_touched, cpu)) {
per_cpu(watchdog_hardlockup_touched, cpu) = false;
return;
}
/*
* Check for a hardlockup by making sure the CPU's timer
* interrupt is incrementing. The timer interrupt should have
* fired multiple times before we overflow'd. If it hasn't
* then this is a good indication the cpu is stuck
*/
if (is_hardlockup(cpu)) {
unsigned int this_cpu = smp_processor_id();
unsigned long flags;
#ifdef CONFIG_SYSFS
++hardlockup_count;
#endif
/* Only print hardlockups once. */
if (per_cpu(watchdog_hardlockup_warned, cpu))
return;
/*
* Prevent multiple hard-lockup reports if one cpu is already
* engaged in dumping all cpu back traces.
*/
if (sysctl_hardlockup_all_cpu_backtrace) {
if (test_and_set_bit_lock(0, &hard_lockup_nmi_warn))
return;
}
/*
* NOTE: we call printk_cpu_sync_get_irqsave() after printing
* the lockup message. While it would be nice to serialize
* that printout, we really want to make sure that if some
* other CPU somehow locked up while holding the lock associated
* with printk_cpu_sync_get_irqsave() that we can still at least
* get the message about the lockup out.
*/
pr_emerg("CPU%u: Watchdog detected hard LOCKUP on cpu %u\n", this_cpu, cpu);
printk_cpu_sync_get_irqsave(flags);
print_modules();
print_irqtrace_events(current);
if (cpu == this_cpu) {
if (regs)
show_regs(regs);
else
dump_stack();
printk_cpu_sync_put_irqrestore(flags);
} else {
printk_cpu_sync_put_irqrestore(flags);
trigger_single_cpu_backtrace(cpu);
}
if (sysctl_hardlockup_all_cpu_backtrace) {
trigger_allbutcpu_cpu_backtrace(cpu);
if (!hardlockup_panic)
clear_bit_unlock(0, &hard_lockup_nmi_warn);
}
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
per_cpu(watchdog_hardlockup_warned, cpu) = true;
} else {
per_cpu(watchdog_hardlockup_warned, cpu) = false;
}
}
#else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
static inline void watchdog_hardlockup_kick(void) { }
#endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
/*
* These functions can be overridden based on the configured hardlockdup detector.
*
* watchdog_hardlockup_enable/disable can be implemented to start and stop when
* softlockup watchdog start and stop. The detector must select the
* SOFTLOCKUP_DETECTOR Kconfig.
*/
void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
/*
* Watchdog-detector specific API.
*
* Return 0 when hardlockup watchdog is available, negative value otherwise.
* Note that the negative value means that a delayed probe might
* succeed later.
*/
int __weak __init watchdog_hardlockup_probe(void)
{
return -ENODEV;
}
/**
* watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
*
* The reconfiguration steps are:
* watchdog_hardlockup_stop();
* update_variables();
* watchdog_hardlockup_start();
*/
void __weak watchdog_hardlockup_stop(void) { }
/**
* watchdog_hardlockup_start - Start the watchdog after reconfiguration
*
* Counterpart to watchdog_hardlockup_stop().
*
* The following variables have been updated in update_variables() and
* contain the currently valid configuration:
* - watchdog_enabled
* - watchdog_thresh
* - watchdog_cpumask
*/
void __weak watchdog_hardlockup_start(void) { }
/**
* lockup_detector_update_enable - Update the sysctl enable bit
*
* Caller needs to make sure that the hard watchdogs are off, so this
* can't race with watchdog_hardlockup_disable().
*/
static void lockup_detector_update_enable(void)
{
watchdog_enabled = 0;
if (!watchdog_user_enabled)
return;
if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
if (watchdog_softlockup_user_enabled)
watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
}
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
/*
* Delay the soflockup report when running a known slow code.
* It does _not_ affect the timestamp of the last successdul reschedule.
*/
#define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
#endif
static struct cpumask watchdog_allowed_mask __read_mostly;
/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;
#ifdef CONFIG_SYSFS
static unsigned int softlockup_count;
static ssize_t softlockup_count_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page, "%u\n", softlockup_count);
}
static struct kobj_attribute softlockup_count_attr = __ATTR_RO(softlockup_count);
static __init int kernel_softlockup_sysfs_init(void)
{
sysfs_add_file_to_group(kernel_kobj, &softlockup_count_attr.attr, NULL);
return 0;
}
late_initcall(kernel_softlockup_sysfs_init);
#endif // CONFIG_SYSFS
/* Timestamp taken after the last successful reschedule. */
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
/* Timestamp of the last softlockup report. */
static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static unsigned long soft_lockup_nmi_warn;
static int __init softlockup_panic_setup(char *str)
{
softlockup_panic = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("softlockup_panic=", softlockup_panic_setup);
static int __init nowatchdog_setup(char *str)
{
watchdog_user_enabled = 0;
return 1;
}
__setup("nowatchdog", nowatchdog_setup);
static int __init nosoftlockup_setup(char *str)
{
watchdog_softlockup_user_enabled = 0;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
static int __init watchdog_thresh_setup(char *str)
{
get_option(&str, &watchdog_thresh);
return 1;
}
__setup("watchdog_thresh=", watchdog_thresh_setup);
#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
enum stats_per_group {
STATS_SYSTEM,
STATS_SOFTIRQ,
STATS_HARDIRQ,
STATS_IDLE,
NUM_STATS_PER_GROUP,
};
static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = {
CPUTIME_SYSTEM,
CPUTIME_SOFTIRQ,
CPUTIME_IRQ,
CPUTIME_IDLE,
};
static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
static DEFINE_PER_CPU(u8, cpustat_util[NUM_SAMPLE_PERIODS][NUM_STATS_PER_GROUP]);
static DEFINE_PER_CPU(u8, cpustat_tail);
/*
* We don't need nanosecond resolution. A granularity of 16ms is
* sufficient for our precision, allowing us to use u16 to store
* cpustats, which will roll over roughly every ~1000 seconds.
* 2^24 ~= 16 * 10^6
*/
static u16 get_16bit_precision(u64 data_ns)
{
/*
* 2^24ns ~= 16.8ms
* Round to the nearest multiple of 16.8 milliseconds.
*/
return (data_ns + (1 << 23)) >> 24LL;
}
static void update_cpustat(void)
{
int i;
u8 util;
u16 old_stat, new_stat;
struct kernel_cpustat kcpustat;
u64 *cpustat = kcpustat.cpustat;
u8 tail = __this_cpu_read(cpustat_tail);
u16 sample_period_16 = get_16bit_precision(sample_period);
kcpustat_cpu_fetch(&kcpustat, smp_processor_id());
for (i = 0; i < NUM_STATS_PER_GROUP; i++) {
old_stat = __this_cpu_read(cpustat_old[i]);
new_stat = get_16bit_precision(cpustat[tracked_stats[i]]);
util = DIV_ROUND_UP(100 * (new_stat - old_stat), sample_period_16);
/*
* Since we use 16-bit precision, the raw data will undergo
* integer division, which may sometimes result in data loss,
* and then result might exceed 100%. To avoid confusion,
* we enforce a 100% display cap when calculations exceed this threshold.
*/
if (util > 100)
util = 100;
__this_cpu_write(cpustat_util[tail][i], util);
__this_cpu_write(cpustat_old[i], new_stat);
}
__this_cpu_write(cpustat_tail, (tail + 1) % NUM_SAMPLE_PERIODS);
}
static void print_cpustat(void)
{
int i, group;
u8 tail = __this_cpu_read(cpustat_tail);
u64 sample_period_msecond = sample_period;
do_div(sample_period_msecond, NSEC_PER_MSEC);
/*
* Outputting the "watchdog" prefix on every line is redundant and not
* concise, and the original alarm information is sufficient for
* positioning in logs, hence here printk() is used instead of pr_crit().
*/
printk(KERN_CRIT "CPU#%d Utilization every %llums during lockup:\n",
smp_processor_id(), sample_period_msecond);
for (i = 0; i < NUM_SAMPLE_PERIODS; i++) {
group = (tail + i) % NUM_SAMPLE_PERIODS;
printk(KERN_CRIT "\t#%d: %3u%% system,\t%3u%% softirq,\t"
"%3u%% hardirq,\t%3u%% idle\n", i + 1,
__this_cpu_read(cpustat_util[group][STATS_SYSTEM]),
__this_cpu_read(cpustat_util[group][STATS_SOFTIRQ]),
__this_cpu_read(cpustat_util[group][STATS_HARDIRQ]),
__this_cpu_read(cpustat_util[group][STATS_IDLE]));
}
}
#define HARDIRQ_PERCENT_THRESH 50
#define NUM_HARDIRQ_REPORT 5
struct irq_counts {
int irq;
u32 counts;
};
static DEFINE_PER_CPU(bool, snapshot_taken);
/* Tabulate the most frequent interrupts. */
static void tabulate_irq_count(struct irq_counts *irq_counts, int irq, u32 counts, int rank)
{
int i;
struct irq_counts new_count = {irq, counts};
for (i = 0; i < rank; i++) {
if (counts > irq_counts[i].counts)
swap(new_count, irq_counts[i]);
}
}
/*
* If the hardirq time exceeds HARDIRQ_PERCENT_THRESH% of the sample_period,
* then the cause of softlockup might be interrupt storm. In this case, it
* would be useful to start interrupt counting.
*/
static bool need_counting_irqs(void)
{
u8 util;
int tail = __this_cpu_read(cpustat_tail);
tail = (tail + NUM_HARDIRQ_REPORT - 1) % NUM_HARDIRQ_REPORT;
util = __this_cpu_read(cpustat_util[tail][STATS_HARDIRQ]);
return util > HARDIRQ_PERCENT_THRESH;
}
static void start_counting_irqs(void)
{
if (!__this_cpu_read(snapshot_taken)) {
kstat_snapshot_irqs();
__this_cpu_write(snapshot_taken, true);
}
}
static void stop_counting_irqs(void)
{
__this_cpu_write(snapshot_taken, false);
}
static void print_irq_counts(void)
{
unsigned int i, count;
struct irq_counts irq_counts_sorted[NUM_HARDIRQ_REPORT] = {
{-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}
};
if (__this_cpu_read(snapshot_taken)) {
for_each_active_irq(i) {
count = kstat_get_irq_since_snapshot(i);
tabulate_irq_count(irq_counts_sorted, i, count, NUM_HARDIRQ_REPORT);
}
/*
* Outputting the "watchdog" prefix on every line is redundant and not
* concise, and the original alarm information is sufficient for
* positioning in logs, hence here printk() is used instead of pr_crit().
*/
printk(KERN_CRIT "CPU#%d Detect HardIRQ Time exceeds %d%%. Most frequent HardIRQs:\n",
smp_processor_id(), HARDIRQ_PERCENT_THRESH);
for (i = 0; i < NUM_HARDIRQ_REPORT; i++) {
if (irq_counts_sorted[i].irq == -1)
break;
printk(KERN_CRIT "\t#%u: %-10u\tirq#%d\n",
i + 1, irq_counts_sorted[i].counts,
irq_counts_sorted[i].irq);
}
/*
* If the hardirq time is less than HARDIRQ_PERCENT_THRESH% in the last
* sample_period, then we suspect the interrupt storm might be subsiding.
*/
if (!need_counting_irqs())
stop_counting_irqs();
}
}
static void report_cpu_status(void)
{
print_cpustat();
print_irq_counts();
}
#else
static inline void update_cpustat(void) { }
static inline void report_cpu_status(void) { }
static inline bool need_counting_irqs(void) { return false; }
static inline void start_counting_irqs(void) { }
static inline void stop_counting_irqs(void) { }
#endif
/*
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
* lockups can have false positives under extreme conditions. So we generally
* want a higher threshold for soft lockups than for hard lockups. So we couple
* the thresholds with a factor: we make the soft threshold twice the amount of
* time the hard threshold is.
*/
static int get_softlockup_thresh(void)
{
return watchdog_thresh * 2;
}
/*
* Returns seconds, approximately. We don't need nanosecond
* resolution, and we don't need to waste time with a big divide when
* 2^30ns == 1.074s.
*/
static unsigned long get_timestamp(void)
{
return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
}
static void set_sample_period(void)
{
/*
* convert watchdog_thresh from seconds to ns
* the divide by 5 is to give hrtimer several chances (two
* or three with the current relation between the soft
* and hard thresholds) to increment before the
* hardlockup detector generates a warning
*/
sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / NUM_SAMPLE_PERIODS);
watchdog_update_hrtimer_threshold(sample_period);
}
static void update_report_ts(void)
{
__this_cpu_write(watchdog_report_ts, get_timestamp());
}
/* Commands for resetting the watchdog */
static void update_touch_ts(void)
{
__this_cpu_write(watchdog_touch_ts, get_timestamp());
update_report_ts();
}
/**
* touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
*
* Call when the scheduler may have stalled for legitimate reasons
* preventing the watchdog task from executing - e.g. the scheduler
* entering idle state. This should only be used for scheduler events.
* Use touch_softlockup_watchdog() for everything else.
*/
notrace void touch_softlockup_watchdog_sched(void)
{
/*
* Preemption can be enabled. It doesn't matter which CPU's watchdog
* report period gets restarted here, so use the raw_ operation.
*/
raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
}
notrace void touch_softlockup_watchdog(void)
{
touch_softlockup_watchdog_sched();
wq_watchdog_touch(raw_smp_processor_id());
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
void touch_all_softlockup_watchdogs(void)
{
int cpu;
/*
* watchdog_mutex cannpt be taken here, as this might be called
* from (soft)interrupt context, so the access to
* watchdog_allowed_cpumask might race with a concurrent update.
*
* The watchdog time stamp can race against a concurrent real
* update as well, the only side effect might be a cycle delay for
* the softlockup check.
*/
for_each_cpu(cpu, &watchdog_allowed_mask) {
per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
wq_watchdog_touch(cpu);
}
}
void touch_softlockup_watchdog_sync(void)
{
__this_cpu_write(softlockup_touch_sync, true);
__this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
}
static int is_softlockup(unsigned long touch_ts,
unsigned long period_ts,
unsigned long now)
{
if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
/*
* If period_ts has not been updated during a sample_period, then
* in the subsequent few sample_periods, period_ts might also not
* be updated, which could indicate a potential softlockup. In
* this case, if we suspect the cause of the potential softlockup
* might be interrupt storm, then we need to count the interrupts
* to find which interrupt is storming.
*/
if (time_after_eq(now, period_ts + get_softlockup_thresh() / NUM_SAMPLE_PERIODS) &&
need_counting_irqs())
start_counting_irqs();
/*
* A poorly behaving BPF scheduler can live-lock the system into
* soft lockups. Tell sched_ext to try ejecting the BPF
* scheduler when close to a soft lockup.
*/
if (time_after_eq(now, period_ts + get_softlockup_thresh() * 3 / 4))
scx_softlockup(now - touch_ts);
/* Warn about unreasonable delays. */
if (time_after(now, period_ts + get_softlockup_thresh()))
return now - touch_ts;
}
return 0;
}
/* watchdog detector functions */
static DEFINE_PER_CPU(struct completion, softlockup_completion);
static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
/*
* The watchdog feed function - touches the timestamp.
*
* It only runs once every sample_period seconds (4 seconds by
* default) to reset the softlockup timestamp. If this gets delayed
* for more than 2*watchdog_thresh seconds then the debug-printout
* triggers in watchdog_timer_fn().
*/
static int softlockup_fn(void *data)
{
update_touch_ts();
stop_counting_irqs();
complete(this_cpu_ptr(&softlockup_completion));
return 0;
}
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
unsigned long touch_ts, period_ts, now;
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
unsigned long flags;
if (!watchdog_enabled)
return HRTIMER_NORESTART;
/*
* pass the buddy check if a panic is in process
*/
if (panic_in_progress())
return HRTIMER_NORESTART;
watchdog_hardlockup_kick();
/* kick the softlockup detector */
if (completion_done(this_cpu_ptr(&softlockup_completion))) {
reinit_completion(this_cpu_ptr(&softlockup_completion));
stop_one_cpu_nowait(smp_processor_id(),
softlockup_fn, NULL,
this_cpu_ptr(&softlockup_stop_work));
}
/* .. and repeat */
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
/*
* Read the current timestamp first. It might become invalid anytime
* when a virtual machine is stopped by the host or when the watchog
* is touched from NMI.
*/
now = get_timestamp();
/*
* If a virtual machine is stopped by the host it can look to
* the watchdog like a soft lockup. This function touches the watchdog.
*/
kvm_check_and_clear_guest_paused();
/*
* The stored timestamp is comparable with @now only when not touched.
* It might get touched anytime from NMI. Make sure that is_softlockup()
* uses the same (valid) value.
*/
period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
update_cpustat();
/* Reset the interval when touched by known problematic code. */
if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
/*
* If the time stamp was touched atomically
* make sure the scheduler tick is up to date.
*/
__this_cpu_write(softlockup_touch_sync, false);
sched_clock_tick();
}
update_report_ts();
return HRTIMER_RESTART;
}
/* Check for a softlockup. */
touch_ts = __this_cpu_read(watchdog_touch_ts);
duration = is_softlockup(touch_ts, period_ts, now);
if (unlikely(duration)) {
#ifdef CONFIG_SYSFS
++softlockup_count;
#endif
/*
* Prevent multiple soft-lockup reports if one cpu is already
* engaged in dumping all cpu back traces.
*/
if (softlockup_all_cpu_backtrace) {
if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
return HRTIMER_RESTART;
}
/* Start period for the next softlockup warning. */
update_report_ts();
printk_cpu_sync_get_irqsave(flags);
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
report_cpu_status();
print_modules();
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
printk_cpu_sync_put_irqrestore(flags);
if (softlockup_all_cpu_backtrace) {
trigger_allbutcpu_cpu_backtrace(smp_processor_id());
if (!softlockup_panic)
clear_bit_unlock(0, &soft_lockup_nmi_warn);
}
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (softlockup_panic)
panic("softlockup: hung tasks");
}
return HRTIMER_RESTART;
}
static void watchdog_enable(unsigned int cpu)
{
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
struct completion *done = this_cpu_ptr(&softlockup_completion);
WARN_ON_ONCE(cpu != smp_processor_id());
init_completion(done);
complete(done);
/*
* Start the timer first to prevent the hardlockup watchdog triggering
* before the timer has a chance to fire.
*/
hrtimer_setup(hrtimer, watchdog_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
HRTIMER_MODE_REL_PINNED_HARD);
/* Initialize timestamp */
update_touch_ts();
/* Enable the hardlockup detector */
if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
watchdog_hardlockup_enable(cpu);
}
static void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
WARN_ON_ONCE(cpu != smp_processor_id());
/*
* Disable the hardlockup detector first. That prevents that a large
* delay between disabling the timer and disabling the hardlockup
* detector causes a false positive.
*/
watchdog_hardlockup_disable(cpu);
hrtimer_cancel(hrtimer);
wait_for_completion(this_cpu_ptr(&softlockup_completion));
}
static int softlockup_stop_fn(void *data)
{
watchdog_disable(smp_processor_id());
return 0;
}
static void softlockup_stop_all(void)
{
int cpu;
if (!softlockup_initialized)
return;
for_each_cpu(cpu, &watchdog_allowed_mask)
smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
cpumask_clear(&watchdog_allowed_mask);
}
static int softlockup_start_fn(void *data)
{
watchdog_enable(smp_processor_id());
return 0;
}
static void softlockup_start_all(void)
{
int cpu;
cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
for_each_cpu(cpu, &watchdog_allowed_mask)
smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
}
int lockup_detector_online_cpu(unsigned int cpu)
{
if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
watchdog_enable(cpu);
return 0;
}
int lockup_detector_offline_cpu(unsigned int cpu)
{
if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
watchdog_disable(cpu);
return 0;
}
static void __lockup_detector_reconfigure(bool thresh_changed)
{
cpus_read_lock();
watchdog_hardlockup_stop();
softlockup_stop_all();
/*
* To prevent watchdog_timer_fn from using the old interval and
* the new watchdog_thresh at the same time, which could lead to
* false softlockup reports, it is necessary to update the
* watchdog_thresh after the softlockup is completed.
*/
if (thresh_changed)
watchdog_thresh = READ_ONCE(watchdog_thresh_next);
set_sample_period();
lockup_detector_update_enable();
if (watchdog_enabled && watchdog_thresh)
softlockup_start_all();
watchdog_hardlockup_start();
cpus_read_unlock();
}
void lockup_detector_reconfigure(void)
{
mutex_lock(&watchdog_mutex);
__lockup_detector_reconfigure(false);
mutex_unlock(&watchdog_mutex);
}
/*
* Create the watchdog infrastructure and configure the detector(s).
*/
static __init void lockup_detector_setup(void)
{
/*
* If sysctl is off and watchdog got disabled on the command line,
* nothing to do here.
*/
lockup_detector_update_enable();
if (!IS_ENABLED(CONFIG_SYSCTL) &&
!(watchdog_enabled && watchdog_thresh))
return;
mutex_lock(&watchdog_mutex);
__lockup_detector_reconfigure(false);
softlockup_initialized = true;
mutex_unlock(&watchdog_mutex);
}
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static void __lockup_detector_reconfigure(bool thresh_changed)
{
cpus_read_lock();
watchdog_hardlockup_stop();
if (thresh_changed)
watchdog_thresh = READ_ONCE(watchdog_thresh_next);
lockup_detector_update_enable();
watchdog_hardlockup_start();
cpus_read_unlock();
}
void lockup_detector_reconfigure(void)
{
__lockup_detector_reconfigure(false);
}
static inline void lockup_detector_setup(void)
{
__lockup_detector_reconfigure(false);
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
/**
* lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
*
* Special interface for parisc. It prevents lockup detector warnings from
* the default pm_poweroff() function which busy loops forever.
*/
void lockup_detector_soft_poweroff(void)
{
watchdog_enabled = 0;
}
#ifdef CONFIG_SYSCTL
/* Propagate any changes to the watchdog infrastructure */
static void proc_watchdog_update(bool thresh_changed)
{
/* Remove impossible cpus to keep sysctl output clean. */
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
__lockup_detector_reconfigure(thresh_changed);
}
/*
* common function for watchdog, nmi_watchdog and soft_watchdog parameter
*
* caller | table->data points to | 'which'
* -------------------|----------------------------------|-------------------------------
* proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
* | | WATCHDOG_SOFTOCKUP_ENABLED
* -------------------|----------------------------------|-------------------------------
* proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
* -------------------|----------------------------------|-------------------------------
* proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
*/
static int proc_watchdog_common(int which, const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err, old, *param = table->data;
mutex_lock(&watchdog_mutex);
old = *param;
if (!write) {
/*
* On read synchronize the userspace interface. This is a
* racy snapshot.
*/
*param = (watchdog_enabled & which) != 0;
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
*param = old;
} else {
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!err && old != READ_ONCE(*param))
proc_watchdog_update(false);
}
mutex_unlock(&watchdog_mutex);
return err;
}
/*
* /proc/sys/kernel/watchdog
*/
static int proc_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
WATCHDOG_SOFTOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
/*
* /proc/sys/kernel/nmi_watchdog
*/
static int proc_nmi_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!watchdog_hardlockup_available && write)
return -ENOTSUPP;
return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
/*
* /proc/sys/kernel/soft_watchdog
*/
static int proc_soft_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
#endif
/*
* /proc/sys/kernel/watchdog_thresh
*/
static int proc_watchdog_thresh(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err, old;
mutex_lock(&watchdog_mutex);
watchdog_thresh_next = READ_ONCE(watchdog_thresh);
old = watchdog_thresh_next;
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!err && write && old != READ_ONCE(watchdog_thresh_next))
proc_watchdog_update(true);
mutex_unlock(&watchdog_mutex);
return err;
}
/*
* The cpumask is the mask of possible cpus that the watchdog can run
* on, not the mask of cpus it is actually running on. This allows the
* user to specify a mask that will include cpus that have not yet
* been brought online, if desired.
*/
static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err;
mutex_lock(&watchdog_mutex);
err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
if (!err && write)
proc_watchdog_update(false);
mutex_unlock(&watchdog_mutex);
return err;
}
static const int sixty = 60;
static const struct ctl_table watchdog_sysctls[] = {
{
.procname = "watchdog",
.data = &watchdog_user_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_watchdog,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "watchdog_thresh",
.data = &watchdog_thresh_next,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_watchdog_thresh,
.extra1 = SYSCTL_ZERO,
.extra2 = (void *)&sixty,
},
{
.procname = "watchdog_cpumask",
.data = &watchdog_cpumask_bits,
.maxlen = NR_CPUS,
.mode = 0644,
.proc_handler = proc_watchdog_cpumask,
},
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
{
.procname = "soft_watchdog",
.data = &watchdog_softlockup_user_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_soft_watchdog,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "softlockup_panic",
.data = &softlockup_panic,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#ifdef CONFIG_SMP
{
.procname = "softlockup_all_cpu_backtrace",
.data = &sysctl_softlockup_all_cpu_backtrace,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif /* CONFIG_SMP */
#endif
#ifdef CONFIG_HARDLOCKUP_DETECTOR
{
.procname = "hardlockup_panic",
.data = &hardlockup_panic,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#ifdef CONFIG_SMP
{
.procname = "hardlockup_all_cpu_backtrace",
.data = &sysctl_hardlockup_all_cpu_backtrace,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif /* CONFIG_SMP */
#endif
};
static struct ctl_table watchdog_hardlockup_sysctl[] = {
{
.procname = "nmi_watchdog",
.data = &watchdog_hardlockup_user_enabled,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_nmi_watchdog,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
};
static void __init watchdog_sysctl_init(void)
{
register_sysctl_init("kernel", watchdog_sysctls);
if (watchdog_hardlockup_available)
watchdog_hardlockup_sysctl[0].mode = 0644;
register_sysctl_init("kernel", watchdog_hardlockup_sysctl);
}
#else
#define watchdog_sysctl_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
static void __init lockup_detector_delay_init(struct work_struct *work);
static bool allow_lockup_detector_init_retry __initdata;
static struct work_struct detector_work __initdata =
__WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
static void __init lockup_detector_delay_init(struct work_struct *work)
{
int ret;
ret = watchdog_hardlockup_probe();
if (ret) {
if (ret == -ENODEV)
pr_info("NMI not fully supported\n");
else
pr_info("Delayed init of the lockup detector failed: %d\n", ret);
pr_info("Hard watchdog permanently disabled\n");
return;
}
allow_lockup_detector_init_retry = false;
watchdog_hardlockup_available = true;
lockup_detector_setup();
}
/*
* lockup_detector_retry_init - retry init lockup detector if possible.
*
* Retry hardlockup detector init. It is useful when it requires some
* functionality that has to be initialized later on a particular
* platform.
*/
void __init lockup_detector_retry_init(void)
{
/* Must be called before late init calls */
if (!allow_lockup_detector_init_retry)
return;
schedule_work(&detector_work);
}
/*
* Ensure that optional delayed hardlockup init is proceed before
* the init code and memory is freed.
*/
static int __init lockup_detector_check(void)
{
/* Prevent any later retry. */
allow_lockup_detector_init_retry = false;
/* Make sure no work is pending. */
flush_work(&detector_work);
watchdog_sysctl_init();
return 0;
}
late_initcall_sync(lockup_detector_check);
void __init lockup_detector_init(void)
{
if (tick_nohz_full_enabled())
pr_info("Disabling watchdog on nohz_full cores by default\n");
cpumask_copy(&watchdog_cpumask,
housekeeping_cpumask(HK_TYPE_TIMER));
if (!watchdog_hardlockup_probe())
watchdog_hardlockup_available = true;
else
allow_lockup_detector_init_retry = true;
lockup_detector_setup();
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_PREEMPT_H
#define __ASM_PREEMPT_H
#include <asm/rmwcc.h>
#include <asm/percpu.h>
#include <linux/static_call_types.h>
DECLARE_PER_CPU_CACHE_HOT(int, __preempt_count);
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
/*
* We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
* that a decrement hitting 0 means we can and should reschedule.
*/
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
/*
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
* that think a non-zero value indicates we cannot preempt.
*/
static __always_inline int preempt_count(void)
{
return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
}
static __always_inline void preempt_count_set(int pc)
{
int old, new;
old = raw_cpu_read_4(__preempt_count);
do {
new = (old & PREEMPT_NEED_RESCHED) |
(pc & ~PREEMPT_NEED_RESCHED);
} while (!raw_cpu_try_cmpxchg_4(__preempt_count, &old, new));
}
/*
* must be macros to avoid header recursion hell
*/
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
} while (0)
/*
* We fold the NEED_RESCHED bit into the preempt count such that
* preempt_enable() can decrement and test for needing to reschedule with a
* single instruction.
*
* We invert the actual bit, so that when the decrement hits 0 we know we both
* need to resched (the bit is cleared) and can resched (no preempt count).
*/
static __always_inline void set_preempt_need_resched(void)
{
raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
}
static __always_inline void clear_preempt_need_resched(void)
{
raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
}
static __always_inline bool test_preempt_need_resched(void)
{
return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
}
/*
* The various preempt_count add/sub methods
*/
static __always_inline void __preempt_count_add(int val)
{
raw_cpu_add_4(__preempt_count, val);
}
static __always_inline void __preempt_count_sub(int val)
{
raw_cpu_add_4(__preempt_count, -val);
}
/*
* Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
static __always_inline bool __preempt_count_dec_and_test(void)
{
return GEN_UNARY_RMWcc("decl", __my_cpu_var(__preempt_count), e,
__percpu_arg([var]));
}
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(int preempt_offset)
{
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
extern asmlinkage void preempt_schedule_thunk(void);
#define preempt_schedule_dynamic_enabled preempt_schedule_thunk
#define preempt_schedule_dynamic_disabled NULL
extern asmlinkage void preempt_schedule_notrace(void);
extern asmlinkage void preempt_schedule_notrace_thunk(void);
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace_thunk
#define preempt_schedule_notrace_dynamic_disabled NULL
#ifdef CONFIG_PREEMPT_DYNAMIC
DECLARE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
#define __preempt_schedule() \
do { \
__STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule); \
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
} while (0)
DECLARE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
#define __preempt_schedule_notrace() \
do { \
__STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule_notrace); \
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : ASM_CALL_CONSTRAINT); \
} while (0)
#else /* PREEMPT_DYNAMIC */
#define __preempt_schedule() \
asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT);
#define __preempt_schedule_notrace() \
asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT);
#endif /* PREEMPT_DYNAMIC */
#endif /* PREEMPTION */
#endif /* __ASM_PREEMPT_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* workqueue.h --- work queue handling for Linux.
*/
#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H
#include <linux/alloc_tag.h>
#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cpumask_types.h>
#include <linux/rcupdate.h>
#include <linux/workqueue_types.h>
/*
* The first word is the work queue pointer and the flags rolled into
* one
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
enum work_bits {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */
WORK_STRUCT_PWQ_BIT, /* data points to pwq */
WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */
#endif
WORK_STRUCT_FLAG_BITS,
/* color for workqueue flushing */
WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS,
WORK_STRUCT_COLOR_BITS = 4,
/*
* When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
* debugobjects turned off. This makes pwqs aligned to 256 bytes (512
* bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
*
* MSB
* [ pwq pointer ] [ flush color ] [ STRUCT flags ]
* 4 bits 4 or 5 bits
*/
WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS,
/*
* data contains off-queue information when !WORK_STRUCT_PWQ.
*
* MSB
* [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
* 16 bits 1 bit 4 or 5 bits
*/
WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS,
WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT,
WORK_OFFQ_FLAG_END,
WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
WORK_OFFQ_DISABLE_BITS = 16,
/*
* When a work item is off queue, the high bits encode off-queue flags
* and the last pool it was on. Cap pool ID to 31 bits and use the
* highest number to indicate that no pool is associated.
*/
WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS,
WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
};
enum work_flags {
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
#else
WORK_STRUCT_STATIC = 0,
#endif
};
enum wq_misc_consts {
WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
/* not bound to any CPU, prefer the local CPU */
WORK_CPU_UNBOUND = NR_CPUS,
/* bit mask for work_busy() return values */
WORK_BUSY_PENDING = 1 << 0,
WORK_BUSY_RUNNING = 1 << 1,
/* maximum string length for set_worker_desc() */
WORKER_DESC_LEN = 32,
};
/* Convenience constants - of type 'unsigned long', not 'enum'! */
#define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT)
#define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
#define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
#define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
#define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
#define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
#define WORK_DATA_STATIC_INIT() \
ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
struct delayed_work {
struct work_struct work;
struct timer_list timer;
/* target workqueue and CPU ->timer uses to queue ->work */
struct workqueue_struct *wq;
int cpu;
};
struct rcu_work {
struct work_struct work;
struct rcu_head rcu;
/* target workqueue ->rcu uses to queue ->work */
struct workqueue_struct *wq;
};
enum wq_affn_scope {
WQ_AFFN_DFL, /* use system default */
WQ_AFFN_CPU, /* one pod per CPU */
WQ_AFFN_SMT, /* one pod poer SMT */
WQ_AFFN_CACHE, /* one pod per LLC */
WQ_AFFN_NUMA, /* one pod per NUMA node */
WQ_AFFN_SYSTEM, /* one pod across the whole system */
WQ_AFFN_NR_TYPES,
};
/**
* struct workqueue_attrs - A struct for workqueue attributes.
*
* This can be used to change attributes of an unbound workqueue.
*/
struct workqueue_attrs {
/**
* @nice: nice level
*/
int nice;
/**
* @cpumask: allowed CPUs
*
* Work items in this workqueue are affine to these CPUs and not allowed
* to execute on other CPUs. A pool serving a workqueue must have the
* same @cpumask.
*/
cpumask_var_t cpumask;
/**
* @__pod_cpumask: internal attribute used to create per-pod pools
*
* Internal use only.
*
* Per-pod unbound worker pools are used to improve locality. Always a
* subset of ->cpumask. A workqueue can be associated with multiple
* worker pools with disjoint @__pod_cpumask's. Whether the enforcement
* of a pool's @__pod_cpumask is strict depends on @affn_strict.
*/
cpumask_var_t __pod_cpumask;
/**
* @affn_strict: affinity scope is strict
*
* If clear, workqueue will make a best-effort attempt at starting the
* worker inside @__pod_cpumask but the scheduler is free to migrate it
* outside.
*
* If set, workers are only allowed to run inside @__pod_cpumask.
*/
bool affn_strict;
/*
* Below fields aren't properties of a worker_pool. They only modify how
* :c:func:`apply_workqueue_attrs` select pools and thus don't
* participate in pool hash calculations or equality comparisons.
*
* If @affn_strict is set, @cpumask isn't a property of a worker_pool
* either.
*/
/**
* @affn_scope: unbound CPU affinity scope
*
* CPU pods are used to improve execution locality of unbound work
* items. There are multiple pod types, one for each wq_affn_scope, and
* every CPU in the system belongs to one pod in every pod type. CPUs
* that belong to the same pod share the worker pool. For example,
* selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
* pool for each NUMA node.
*/
enum wq_affn_scope affn_scope;
/**
* @ordered: work items must be executed one by one in queueing order
*/
bool ordered;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
return container_of(work, struct delayed_work, work);
}
static inline struct rcu_work *to_rcu_work(struct work_struct *work)
{
return container_of(work, struct rcu_work, work);
}
struct execute_work {
struct work_struct work;
};
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting _key
* here is required, otherwise it could get initialised to the
* copy of the lockdep_map!
*/
#define __WORK_INIT_LOCKDEP_MAP(n, k) \
.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
#else
#define __WORK_INIT_LOCKDEP_MAP(n, k)
#endif
#define __WORK_INITIALIZER(n, f) { \
.data = WORK_DATA_STATIC_INIT(), \
.entry = { &(n).entry, &(n).entry }, \
.func = (f), \
__WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
}
#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
.work = __WORK_INITIALIZER((n).work, (f)), \
.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
(tflags) | TIMER_IRQSAFE), \
}
#define DECLARE_WORK(n, f) \
struct work_struct n = __WORK_INITIALIZER(n, f)
#define DECLARE_DELAYED_WORK(n, f) \
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
#define DECLARE_DEFERRABLE_WORK(n, f) \
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
#ifdef CONFIG_DEBUG_OBJECTS_WORK
extern void __init_work(struct work_struct *work, int onstack);
extern void destroy_work_on_stack(struct work_struct *work);
extern void destroy_delayed_work_on_stack(struct delayed_work *work);
static inline unsigned int work_static(struct work_struct *work)
{
return *work_data_bits(work) & WORK_STRUCT_STATIC;
}
#else
static inline void __init_work(struct work_struct *work, int onstack) { }
static inline void destroy_work_on_stack(struct work_struct *work) { }
static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
static inline unsigned int work_static(struct work_struct *work) { return 0; }
#endif
/*
* initialize all of a work item in one go
*
* NOTE! No point in using "atomic_long_set()": using a direct
* assignment of the work data initializer allows the compiler
* to generate better code.
*/
#ifdef CONFIG_LOCKDEP
#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
#else
#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
#endif
#define __INIT_WORK(_work, _func, _onstack) \
do { \
static __maybe_unused struct lock_class_key __key; \
\
__INIT_WORK_KEY(_work, _func, _onstack, &__key); \
} while (0)
#define INIT_WORK(_work, _func) \
__INIT_WORK((_work), (_func), 0)
#define INIT_WORK_ONSTACK(_work, _func) \
__INIT_WORK((_work), (_func), 1)
#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
__INIT_WORK_KEY((_work), (_func), 1, _key)
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
__timer_init(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
do { \
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
__timer_init_on_stack(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
#define INIT_DELAYED_WORK(_work, _func) \
__INIT_DELAYED_WORK(_work, _func, 0)
#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
#define INIT_DEFERRABLE_WORK(_work, _func) \
__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
#define INIT_RCU_WORK(_work, _func) \
INIT_WORK(&(_work)->work, (_func))
#define INIT_RCU_WORK_ONSTACK(_work, _func) \
INIT_WORK_ONSTACK(&(_work)->work, (_func))
/**
* work_pending - Find out whether a work item is currently pending
* @work: The work item in question
*/
#define work_pending(work) \
test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
/**
* delayed_work_pending - Find out whether a delayable work item is currently
* pending
* @w: The work item in question
*/
#define delayed_work_pending(w) \
work_pending(&(w)->work)
/*
* Workqueue flags and constants. For details, please refer to
* Documentation/core-api/workqueue.rst.
*/
enum wq_flags {
WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
/*
* Per-cpu workqueues are generally preferred because they tend to
* show better performance thanks to cache locality. Per-cpu
* workqueues exclude the scheduler from choosing the CPU to
* execute the worker threads, which has an unfortunate side effect
* of increasing power consumption.
*
* The scheduler considers a CPU idle if it doesn't have any task
* to execute and tries to keep idle cores idle to conserve power;
* however, for example, a per-cpu work item scheduled from an
* interrupt handler on an idle CPU will force the scheduler to
* execute the work item on that CPU breaking the idleness, which in
* turn may lead to more scheduling choices which are sub-optimal
* in terms of power consumption.
*
* Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
* but become unbound if workqueue.power_efficient kernel param is
* specified. Per-cpu workqueues which are identified to
* contribute significantly to power-consumption are identified and
* marked with this flag and enabling the power_efficient mode
* leads to noticeable power saving at the cost of small
* performance disadvantage.
*
* http://thread.gmane.org/gmane.linux.kernel/1480396
*/
WQ_POWER_EFFICIENT = 1 << 7,
WQ_PERCPU = 1 << 8, /* bound to a specific cpu */
__WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
/* BH wq only allows the following flags */
__WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI | WQ_PERCPU,
};
enum wq_consts {
WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */
WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
/*
* Per-node default cap on min_active. Unless explicitly set, min_active
* is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
* workqueue_struct->min_active definition.
*/
WQ_DFL_MIN_ACTIVE = 8,
};
/*
* System-wide workqueues which are always present.
*
* system_percpu_wq is the one used by schedule[_delayed]_work[_on]().
* Multi-CPU multi-threaded. There are users which expect relatively
* short queue flush time. Don't queue works which can run for too
* long.
*
* system_highpri_wq is similar to system_percpu_wq but for work items which
* require WQ_HIGHPRI.
*
* system_long_wq is similar to system_percpu_wq but may host long running
* works. Queue flushing might take relatively long.
*
* system_dfl_wq is unbound workqueue. Workers are not bound to
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
* resources are available.
*
* system_freezable_wq is equivalent to system_percpu_wq except that it's
* freezable.
*
* *_power_efficient_wq are inclined towards saving power and converted
* into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
* they are same as their non-power-efficient counterparts - e.g.
* system_power_efficient_wq is identical to system_percpu_wq if
* 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
*
* system_bh[_highpri]_wq are convenience interface to softirq. BH work items
* are executed in the queueing CPU's BH context in the queueing order.
*/
extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */
extern struct workqueue_struct *system_percpu_wq;
extern struct workqueue_struct *system_highpri_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_dfl_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
extern struct workqueue_struct *system_bh_wq;
extern struct workqueue_struct *system_bh_highpri_wq;
void workqueue_softirq_action(bool highpri);
void workqueue_softirq_dead(unsigned int cpu);
/**
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default
* @...: args for @fmt
*
* For a per-cpu workqueue, @max_active limits the number of in-flight work
* items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
* executing at most one work item for the workqueue.
*
* For unbound workqueues, @max_active limits the number of in-flight work items
* for the whole system. e.g. @max_active of 16 indicates that there can be
* at most 16 work items executing for the workqueue in the whole system.
*
* As sharing the same active counter for an unbound workqueue across multiple
* NUMA nodes can be expensive, @max_active is distributed to each NUMA node
* according to the proportion of the number of online CPUs and enforced
* independently.
*
* Depending on online CPU distribution, a node may end up with per-node
* max_active which is significantly lower than @max_active, which can lead to
* deadlocks if the per-node concurrency limit is lower than the maximum number
* of interdependent work items for the workqueue.
*
* To guarantee forward progress regardless of online CPU distribution, the
* concurrency limit on every node is guaranteed to be equal to or greater than
* min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
* that the sum of per-node max_active's may be larger than @max_active.
*
* For detailed information on %WQ_\* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
__printf(1, 4) struct workqueue_struct *
alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...);
#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__))
#ifdef CONFIG_LOCKDEP
/**
* alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default
* @lockdep_map: user-defined lockdep_map
* @...: args for @fmt
*
* Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
* workqueues created with the same purpose and to avoid leaking a lockdep_map
* on each workqueue creation.
*
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
__printf(1, 5) struct workqueue_struct *
alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
struct lockdep_map *lockdep_map, ...);
/**
* alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
* user-defined lockdep_map
*
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
* @lockdep_map: user-defined lockdep_map
* @args: args for @fmt
*
* Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
* Useful for workqueues created with the same purpose and to avoid leaking a
* lockdep_map on each workqueue creation.
*
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\
1, lockdep_map, ##args))
#endif
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
* @args: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
* implemented as unbound workqueues with @max_active of one.
*
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_PERCPU, 1, (name))
#define create_freezable_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
WQ_MEM_RECLAIM, 1, (name))
#define create_singlethread_workqueue(name) \
alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
#define from_work(var, callback_work, work_fieldname) \
container_of(callback_work, typeof(*var), work_fieldname)
extern void destroy_workqueue(struct workqueue_struct *wq);
struct workqueue_attrs *alloc_workqueue_attrs_noprof(void);
#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__))
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
extern bool queue_work_node(int node, struct workqueue_struct *wq,
struct work_struct *work);
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
extern void __flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern bool disable_work(struct work_struct *work);
extern bool disable_work_sync(struct work_struct *work);
extern bool enable_work(struct work_struct *work);
extern bool disable_delayed_work(struct delayed_work *dwork);
extern bool disable_delayed_work_sync(struct delayed_work *dwork);
extern bool enable_delayed_work(struct delayed_work *dwork);
extern bool flush_rcu_work(struct rcu_work *rwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
extern void workqueue_set_min_active(struct workqueue_struct *wq,
int min_active);
extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
extern void show_all_workqueues(void);
extern void show_freezable_workqueues(void);
extern void show_one_workqueue(struct workqueue_struct *wq);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
/**
* queue_work - queue work on a workqueue
* @wq: workqueue to use
* @work: work to queue
*
* Returns %false if @work was already on a queue, %true otherwise.
*
* We queue the work to the CPU on which it was submitted, but if the CPU dies
* it can be processed by another CPU.
*
* Memory-ordering properties: If it returns %true, guarantees that all stores
* preceding the call to queue_work() in the program order will be visible from
* the CPU which will execute @work by the time such work executes, e.g.,
*
* { x is initially 0 }
*
* CPU0 CPU1
*
* WRITE_ONCE(x, 1); [ @work is being executed ]
* r0 = queue_work(wq, work); r1 = READ_ONCE(x);
*
* Forbids: r0 == true && r1 == 0
*/
static inline bool queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
return queue_work_on(WORK_CPU_UNBOUND, wq, work);
}
/**
* queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use
* @dwork: delayable work to queue
* @delay: number of jiffies to wait before queueing
*
* Equivalent to queue_delayed_work_on() but tries to use the local CPU.
*/
static inline bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}
/**
* mod_delayed_work - modify delay of or queue a delayed work
* @wq: workqueue to use
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* mod_delayed_work_on() on local CPU.
*/
static inline bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}
/**
* schedule_work_on - put work task on a specific cpu
* @cpu: cpu to put the work task on
* @work: job to be done
*
* This puts a job on a specific cpu
*/
static inline bool schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, system_percpu_wq, work);
}
/**
* schedule_work - put work task in global workqueue
* @work: job to be done
*
* Returns %false if @work was already on the kernel-global workqueue and
* %true otherwise.
*
* This puts a job in the kernel-global workqueue if it was not already
* queued and leaves it in the same position on the kernel-global
* workqueue otherwise.
*
* Shares the same memory-ordering properties of queue_work(), cf. the
* DocBook header of queue_work().
*/
static inline bool schedule_work(struct work_struct *work)
{
return queue_work(system_percpu_wq, work);
}
/**
* enable_and_queue_work - Enable and queue a work item on a specific workqueue
* @wq: The target workqueue
* @work: The work item to be enabled and queued
*
* This function combines the operations of enable_work() and queue_work(),
* providing a convenient way to enable and queue a work item in a single call.
* It invokes enable_work() on @work and then queues it if the disable depth
* reached 0. Returns %true if the disable depth reached 0 and @work is queued,
* and %false otherwise.
*
* Note that @work is always queued when disable depth reaches zero. If the
* desired behavior is queueing only if certain events took place while @work is
* disabled, the user should implement the necessary state tracking and perform
* explicit conditional queueing after enable_work().
*/
static inline bool enable_and_queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
if (enable_work(work)) {
queue_work(wq, work);
return true;
}
return false;
}
/*
* Detect attempt to flush system-wide workqueues at compile time when possible.
* Warn attempt to flush system-wide workqueues at runtime.
*
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
* for reasons and steps for converting system-wide workqueues into local workqueues.
*/
extern void __warn_flushing_systemwide_wq(void)
__compiletime_warning("Please avoid flushing system-wide workqueues.");
/* Please stop using this function, for this function will be removed in near future. */
#define flush_scheduled_work() \
({ \
__warn_flushing_systemwide_wq(); \
__flush_workqueue(system_percpu_wq); \
})
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
\
if ((__builtin_constant_p(_wq == system_percpu_wq) && \
_wq == system_percpu_wq) || \
(__builtin_constant_p(_wq == system_highpri_wq) && \
_wq == system_highpri_wq) || \
(__builtin_constant_p(_wq == system_long_wq) && \
_wq == system_long_wq) || \
(__builtin_constant_p(_wq == system_dfl_wq) && \
_wq == system_dfl_wq) || \
(__builtin_constant_p(_wq == system_freezable_wq) && \
_wq == system_freezable_wq) || \
(__builtin_constant_p(_wq == system_power_efficient_wq) && \
_wq == system_power_efficient_wq) || \
(__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
_wq == system_freezable_power_efficient_wq)) \
__warn_flushing_systemwide_wq(); \
__flush_workqueue(_wq); \
})
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done
* @delay: number of jiffies to wait
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue on the specified CPU.
*/
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work_on(cpu, system_percpu_wq, dwork, delay);
}
/**
* schedule_delayed_work - put work task in global workqueue after delay
* @dwork: job to be done
* @delay: number of jiffies to wait or 0 for immediate execution
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue.
*/
static inline bool schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(system_percpu_wq, dwork, delay);
}
#ifndef CONFIG_SMP
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
return fn(arg);
}
static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
{
return fn(arg);
}
#else
long work_on_cpu_key(int cpu, long (*fn)(void *),
void *arg, struct lock_class_key *key);
/*
* A new key is defined for each caller to make sure the work
* associated with the function doesn't share its locking class.
*/
#define work_on_cpu(_cpu, _fn, _arg) \
({ \
static struct lock_class_key __key; \
\
work_on_cpu_key(_cpu, _fn, _arg, &__key); \
})
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
extern void freeze_workqueues_begin(void);
extern bool freeze_workqueues_busy(void);
extern void thaw_workqueues(void);
#endif /* CONFIG_FREEZER */
#ifdef CONFIG_SYSFS
int workqueue_sysfs_register(struct workqueue_struct *wq);
#else /* CONFIG_SYSFS */
static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
{ return 0; }
#endif /* CONFIG_SYSFS */
#ifdef CONFIG_WQ_WATCHDOG
void wq_watchdog_touch(int cpu);
#else /* CONFIG_WQ_WATCHDOG */
static inline void wq_watchdog_touch(int cpu) { }
#endif /* CONFIG_WQ_WATCHDOG */
#ifdef CONFIG_SMP
int workqueue_prepare_cpu(unsigned int cpu);
int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif
void __init workqueue_init_early(void);
void __init workqueue_init(void);
void __init workqueue_init_topology(void);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_WAIT_H
#define _LINUX_WAIT_H
/*
* Linux wait queue related types and methods
*/
#include <linux/list.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <asm/current.h>
typedef struct wait_queue_entry wait_queue_entry_t;
typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
/* wait_queue_entry::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
#define WQ_FLAG_CUSTOM 0x04
#define WQ_FLAG_DONE 0x08
#define WQ_FLAG_PRIORITY 0x10
/*
* A single wait-queue entry structure:
*/
struct wait_queue_entry {
unsigned int flags;
void *private;
wait_queue_func_t func;
struct list_head entry;
};
struct wait_queue_head {
spinlock_t lock;
struct list_head head;
};
typedef struct wait_queue_head wait_queue_head_t;
struct task_struct;
/*
* Macros for declaration and initialisaton of the datatypes
*/
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
.private = tsk, \
.func = default_wake_function, \
.entry = { NULL, NULL } }
#define DECLARE_WAITQUEUE(name, tsk) \
struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.head = LIST_HEAD_INIT(name.head) }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
#define init_waitqueue_head(wq_head) \
do { \
static struct lock_class_key __key; \
\
__init_waitqueue_head((wq_head), #wq_head, &__key); \
} while (0)
#ifdef CONFIG_LOCKDEP
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
({ init_waitqueue_head(&name); name; })
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
#else
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
#endif
static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
{
wq_entry->flags = 0;
wq_entry->private = p;
wq_entry->func = default_wake_function;
}
static inline void
init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
{
wq_entry->flags = 0;
wq_entry->private = NULL;
wq_entry->func = func;
}
/**
* waitqueue_active -- locklessly test for waiters on the queue
* @wq_head: the waitqueue to test for waiters
*
* returns true if the wait list is not empty
*
* NOTE: this function is lockless and requires care, incorrect usage _will_
* lead to sporadic and non-obvious failure.
*
* Use either while holding wait_queue_head::lock or when used for wakeups
* with an extra smp_mb() like::
*
* CPU0 - waker CPU1 - waiter
*
* for (;;) {
* @cond = true; prepare_to_wait(&wq_head, &wait, state);
* smp_mb(); // smp_mb() from set_current_state()
* if (waitqueue_active(wq_head)) if (@cond)
* wake_up(wq_head); break;
* schedule();
* }
* finish_wait(&wq_head, &wait);
*
* Because without the explicit smp_mb() it's possible for the
* waitqueue_active() load to get hoisted over the @cond store such that we'll
* observe an empty wait list while the waiter might not observe @cond.
*
* Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
* which (when the lock is uncontended) are of roughly equal cost.
*/
static inline int waitqueue_active(struct wait_queue_head *wq_head)
{
return !list_empty(&wq_head->head);
}
/**
* wq_has_single_sleeper - check if there is only one sleeper
* @wq_head: wait queue head
*
* Returns true of wq_head has only one sleeper on the list.
*
* Please refer to the comment for waitqueue_active.
*/
static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
{
return list_is_singular(&wq_head->head);
}
/**
* wq_has_sleeper - check if there are any waiting processes
* @wq_head: wait queue head
*
* Returns true if wq_head has waiting processes
*
* Please refer to the comment for waitqueue_active.
*/
static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
{
/*
* We need to be sure we are in sync with the
* add_wait_queue modifications to the wait queue.
*
* This memory barrier should be paired with one on the
* waiting side.
*/
smp_mb();
return waitqueue_active(wq_head);
}
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head,
struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
struct list_head *head = &wq_head->head;
struct wait_queue_entry *wq;
list_for_each_entry(wq, &wq_head->head, entry) {
if (!(wq->flags & WQ_FLAG_PRIORITY))
break;
head = &wq->entry;
}
list_add(&wq_entry->entry, head);
}
/*
* Used for wake-one threads:
*/
static inline void
__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue(wq_head, wq_entry);
}
static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
list_add_tail(&wq_entry->entry, &wq_head->head);
}
static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_entry_tail(wq_head, wq_entry);
}
static inline void
__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
list_del(&wq_entry->entry);
}
int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
/*
* Wakeup macros to be used to report events to the targets.
*/
#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
#define wake_up_poll_on_current_cpu(x, m) \
__wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define wake_up_interruptible_sync_poll(x, m) \
__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
#define wake_up_interruptible_sync_poll_locked(x, m) \
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
/**
* wake_up_pollfree - signal that a polled waitqueue is going away
* @wq_head: the wait queue head
*
* In the very rare cases where a ->poll() implementation uses a waitqueue whose
* lifetime is tied to a task rather than to the 'struct file' being polled,
* this function must be called before the waitqueue is freed so that
* non-blocking polls (e.g. epoll) are notified that the queue is going away.
*
* The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
* an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
*/
static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
{
/*
* For performance reasons, we don't always take the queue lock here.
* Therefore, we might race with someone removing the last entry from
* the queue, and proceed while they still hold the queue lock.
* However, rcu_read_lock() is required to be held in such cases, so we
* can safely proceed with an RCU-delayed free.
*/
if (waitqueue_active(wq_head))
__wake_up_pollfree(wq_head);
}
#define ___wait_cond_timeout(condition) \
({ \
bool __cond = (condition); \
if (__cond && !__ret) \
__ret = 1; \
__cond || !__ret; \
})
#define ___wait_is_interruptible(state) \
(!__builtin_constant_p(state) || \
(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
/*
* The below macro ___wait_event() has an explicit shadow of the __ret
* variable when used from the wait_event_*() macros.
*
* This is so that both can use the ___wait_cond_timeout() construct
* to wrap the condition.
*
* The type inconsistency of the wait_event_*() __ret variable is also
* on purpose; we use long where we can return timeout values and int
* otherwise.
*/
#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
({ \
__label__ __out; \
struct wait_queue_entry __wq_entry; \
long __ret = ret; /* explicit shadow */ \
\
init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
for (;;) { \
long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
\
if (condition) \
break; \
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
goto __out; \
} \
\
cmd; \
\
if (condition) \
break; \
} \
finish_wait(&wq_head, &__wq_entry); \
__out: __ret; \
})
#define __wait_event(wq_head, condition) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
schedule())
/**
* wait_event - sleep until a condition gets true
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*/
#define wait_event(wq_head, condition) \
do { \
might_sleep(); \
if (condition) \
break; \
__wait_event(wq_head, condition); \
} while (0)
#define __io_wait_event(wq_head, condition) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
io_schedule())
/*
* io_wait_event() -- like wait_event() but with io_schedule()
*/
#define io_wait_event(wq_head, condition) \
do { \
might_sleep(); \
if (condition) \
break; \
__io_wait_event(wq_head, condition); \
} while (0)
#define __wait_event_freezable(wq_head, condition) \
___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \
0, 0, schedule())
/**
* wait_event_freezable - sleep (or freeze) until a condition gets true
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
* to system load) until the @condition evaluates to true. The
* @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*/
#define wait_event_freezable(wq_head, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_freezable(wq_head, condition); \
__ret; \
})
#define __wait_event_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_UNINTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret))
/**
* wait_event_timeout - sleep until a condition gets true or a timeout elapses
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* Returns:
* 0 if the @condition evaluated to %false after the @timeout elapsed,
* 1 if the @condition evaluated to %true after the @timeout elapsed,
* or the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed.
*/
#define wait_event_timeout(wq_head, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_timeout(wq_head, condition, timeout); \
__ret; \
})
#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
(TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \
__ret = schedule_timeout(__ret))
/*
* like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
* increasing load and is freezable.
*/
#define wait_event_freezable_timeout(wq_head, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
__ret; \
})
#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
cmd1; schedule(); cmd2)
/*
* Just like wait_event_cmd(), except it sets exclusive flag
*/
#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
do { \
if (condition) \
break; \
__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
} while (0)
#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
cmd1; schedule(); cmd2)
/**
* wait_event_cmd - sleep until a condition gets true
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @cmd1: the command will be executed before sleep
* @cmd2: the command will be executed after sleep
*
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*/
#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
do { \
if (condition) \
break; \
__wait_event_cmd(wq_head, condition, cmd1, cmd2); \
} while (0)
#define __wait_event_interruptible(wq_head, condition) \
___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule())
/**
* wait_event_interruptible - sleep until a condition gets true
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a
* signal and 0 if @condition evaluated to true.
*/
#define wait_event_interruptible(wq_head, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_interruptible(wq_head, condition); \
__ret; \
})
#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_INTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret))
/**
* wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* Returns:
* 0 if the @condition evaluated to %false after the @timeout elapsed,
* 1 if the @condition evaluated to %true after the @timeout elapsed,
* the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
* interrupted by a signal.
*/
#define wait_event_interruptible_timeout(wq_head, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_interruptible_timeout(wq_head, \
condition, timeout); \
__ret; \
})
#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
({ \
int __ret = 0; \
struct hrtimer_sleeper __t; \
\
hrtimer_setup_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
HRTIMER_MODE_REL); \
if ((timeout) != KTIME_MAX) { \
hrtimer_set_expires_range_ns(&__t.timer, timeout, \
current->timer_slack_ns); \
hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
} \
\
__ret = ___wait_event(wq_head, condition, state, 0, 0, \
if (!__t.task) { \
__ret = -ETIME; \
break; \
} \
schedule()); \
\
hrtimer_cancel(&__t.timer); \
destroy_hrtimer_on_stack(&__t.timer); \
__ret; \
})
/**
* wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, as a ktime_t
*
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function returns 0 if @condition became true, or -ETIME if the timeout
* elapsed.
*/
#define wait_event_hrtimeout(wq_head, condition, timeout) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
TASK_UNINTERRUPTIBLE); \
__ret; \
})
/**
* wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, as a ktime_t
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function returns 0 if @condition became true, -ERESTARTSYS if it was
* interrupted by a signal, or -ETIME if the timeout elapsed.
*/
#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
({ \
long __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_hrtimeout(wq, condition, timeout, \
TASK_INTERRUPTIBLE); \
__ret; \
})
#define __wait_event_interruptible_exclusive(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
schedule())
#define wait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_interruptible_exclusive(wq, condition); \
__ret; \
})
#define __wait_event_killable_exclusive(wq, condition) \
___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
schedule())
#define wait_event_killable_exclusive(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_killable_exclusive(wq, condition); \
__ret; \
})
#define __wait_event_freezable_exclusive(wq, condition) \
___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
schedule())
#define wait_event_freezable_exclusive(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_freezable_exclusive(wq, condition); \
__ret; \
})
/**
* wait_event_idle - wait for a condition without contributing to system load
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_IDLE) until the
* @condition evaluates to true.
* The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
*/
#define wait_event_idle(wq_head, condition) \
do { \
might_sleep(); \
if (!(condition)) \
___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
} while (0)
/**
* wait_event_idle_exclusive - wait for a condition with contributing to system load
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_IDLE) until the
* @condition evaluates to true.
* The @condition is checked each time the waitqueue @wq_head is woken up.
*
* The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
* set thus if other processes wait on the same list, when this
* process is woken further processes are not considered.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
*/
#define wait_event_idle_exclusive(wq_head, condition) \
do { \
might_sleep(); \
if (!(condition)) \
___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
} while (0)
#define __wait_event_idle_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_IDLE, 0, timeout, \
__ret = schedule_timeout(__ret))
/**
* wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_IDLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* Returns:
* 0 if the @condition evaluated to %false after the @timeout elapsed,
* 1 if the @condition evaluated to %true after the @timeout elapsed,
* or the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed.
*/
#define wait_event_idle_timeout(wq_head, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
__ret; \
})
#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_IDLE, 1, timeout, \
__ret = schedule_timeout(__ret))
/**
* wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_IDLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq_head is woken up.
*
* The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
* set thus if other processes wait on the same list, when this
* process is woken further processes are not considered.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* Returns:
* 0 if the @condition evaluated to %false after the @timeout elapsed,
* 1 if the @condition evaluated to %true after the @timeout elapsed,
* or the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed.
*/
#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
__ret; \
})
extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
({ \
int __ret; \
DEFINE_WAIT(__wait); \
if (exclusive) \
__wait.flags |= WQ_FLAG_EXCLUSIVE; \
do { \
__ret = fn(&(wq), &__wait); \
if (__ret) \
break; \
} while (!(condition)); \
__remove_wait_queue(&(wq), &__wait); \
__set_current_state(TASK_RUNNING); \
__ret; \
})
/**
* wait_event_interruptible_locked - sleep until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq is woken up.
*
* It must be called with wq.lock being held. This spinlock is
* unlocked while sleeping but @condition testing is done while lock
* is held and when this macro exits the lock is held.
*
* The lock is locked/unlocked using spin_lock()/spin_unlock()
* functions which must match the way they are locked/unlocked outside
* of this macro.
*
* wake_up_locked() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a
* signal and 0 if @condition evaluated to true.
*/
#define wait_event_interruptible_locked(wq, condition) \
((condition) \
? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
/**
* wait_event_interruptible_locked_irq - sleep until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq is woken up.
*
* It must be called with wq.lock being held. This spinlock is
* unlocked while sleeping but @condition testing is done while lock
* is held and when this macro exits the lock is held.
*
* The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
* functions which must match the way they are locked/unlocked outside
* of this macro.
*
* wake_up_locked() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a
* signal and 0 if @condition evaluated to true.
*/
#define wait_event_interruptible_locked_irq(wq, condition) \
((condition) \
? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
/**
* wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq is woken up.
*
* It must be called with wq.lock being held. This spinlock is
* unlocked while sleeping but @condition testing is done while lock
* is held and when this macro exits the lock is held.
*
* The lock is locked/unlocked using spin_lock()/spin_unlock()
* functions which must match the way they are locked/unlocked outside
* of this macro.
*
* The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
* set thus when other process waits process on the list if this
* process is awaken further processes are not considered.
*
* wake_up_locked() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a
* signal and 0 if @condition evaluated to true.
*/
#define wait_event_interruptible_exclusive_locked(wq, condition) \
((condition) \
? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
/**
* wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq is woken up.
*
* It must be called with wq.lock being held. This spinlock is
* unlocked while sleeping but @condition testing is done while lock
* is held and when this macro exits the lock is held.
*
* The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
* functions which must match the way they are locked/unlocked outside
* of this macro.
*
* The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
* set thus when other process waits process on the list if this
* process is awaken further processes are not considered.
*
* wake_up_locked() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a
* signal and 0 if @condition evaluated to true.
*/
#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
((condition) \
? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
#define __wait_event_killable(wq, condition) \
___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
/**
* wait_event_killable - sleep until a condition gets true
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_KILLABLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a
* signal and 0 if @condition evaluated to true.
*/
#define wait_event_killable(wq_head, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_killable(wq_head, condition); \
__ret; \
})
#define __wait_event_state(wq, condition, state) \
___wait_event(wq, condition, state, 0, 0, schedule())
/**
* wait_event_state - sleep until a condition gets true
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @state: state to sleep in
*
* The process is put to sleep (@state) until the @condition evaluates to true
* or a signal is received (when allowed by @state). The @condition is checked
* each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a signal
* (when allowed by @state) and 0 if @condition evaluated to true.
*/
#define wait_event_state(wq_head, condition, state) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_state(wq_head, condition, state); \
__ret; \
})
#define __wait_event_state_exclusive(wq, condition, state) \
___wait_event(wq, condition, state, 1, 0, schedule())
#define wait_event_state_exclusive(wq, condition, state) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_state_exclusive(wq, condition, state); \
__ret; \
})
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_KILLABLE, 0, timeout, \
__ret = schedule_timeout(__ret))
/**
* wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_KILLABLE) until the
* @condition evaluates to true or a kill signal is received.
* The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* Returns:
* 0 if the @condition evaluated to %false after the @timeout elapsed,
* 1 if the @condition evaluated to %true after the @timeout elapsed,
* the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
* interrupted by a kill signal.
*
* Only kill signals interrupt this process.
*/
#define wait_event_killable_timeout(wq_head, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_killable_timeout(wq_head, \
condition, timeout); \
__ret; \
})
#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
spin_unlock_irq(&lock); \
cmd; \
schedule(); \
spin_lock_irq(&lock))
/**
* wait_event_lock_irq_cmd - sleep until a condition gets true. The
* condition is checked under the lock. This
* is expected to be called with the lock
* taken.
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before cmd
* and schedule() and reacquired afterwards.
* @cmd: a command which is invoked outside the critical section before
* sleep
*
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* This is supposed to be called while holding the lock. The lock is
* dropped before invoking the cmd and going to sleep and is reacquired
* afterwards.
*/
#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
do { \
if (condition) \
break; \
__wait_event_lock_irq(wq_head, condition, lock, cmd); \
} while (0)
/**
* wait_event_lock_irq - sleep until a condition gets true. The
* condition is checked under the lock. This
* is expected to be called with the lock
* taken.
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before schedule()
* and reacquired afterwards.
*
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* This is supposed to be called while holding the lock. The lock is
* dropped before going to sleep and is reacquired afterwards.
*/
#define wait_event_lock_irq(wq_head, condition, lock) \
do { \
if (condition) \
break; \
__wait_event_lock_irq(wq_head, condition, lock, ); \
} while (0)
#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
spin_unlock_irq(&lock); \
cmd; \
schedule(); \
spin_lock_irq(&lock))
/**
* wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
* The condition is checked under the lock. This is expected to
* be called with the lock taken.
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before cmd and
* schedule() and reacquired afterwards.
* @cmd: a command which is invoked outside the critical section before
* sleep
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received. The @condition is
* checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* This is supposed to be called while holding the lock. The lock is
* dropped before invoking the cmd and going to sleep and is reacquired
* afterwards.
*
* The macro will return -ERESTARTSYS if it was interrupted by a signal
* and 0 if @condition evaluated to true.
*/
#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
({ \
int __ret = 0; \
if (!(condition)) \
__ret = __wait_event_interruptible_lock_irq(wq_head, \
condition, lock, cmd); \
__ret; \
})
/**
* wait_event_interruptible_lock_irq - sleep until a condition gets true.
* The condition is checked under the lock. This is expected
* to be called with the lock taken.
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before schedule()
* and reacquired afterwards.
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or signal is received. The @condition is
* checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* This is supposed to be called while holding the lock. The lock is
* dropped before going to sleep and is reacquired afterwards.
*
* The macro will return -ERESTARTSYS if it was interrupted by a signal
* and 0 if @condition evaluated to true.
*/
#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
({ \
int __ret = 0; \
if (!(condition)) \
__ret = __wait_event_interruptible_lock_irq(wq_head, \
condition, lock,); \
__ret; \
})
#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
state, 0, timeout, \
spin_unlock_irq(&lock); \
__ret = schedule_timeout(__ret); \
spin_lock_irq(&lock));
/**
* wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
* true or a timeout elapses. The condition is checked under
* the lock. This is expected to be called with the lock taken.
* @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before schedule()
* and reacquired afterwards.
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or signal is received. The @condition is
* checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* This is supposed to be called while holding the lock. The lock is
* dropped before going to sleep and is reacquired afterwards.
*
* The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
* was interrupted by a signal, and the remaining jiffies otherwise
* if the condition evaluated to true before the timeout elapsed.
*/
#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_lock_irq_timeout( \
wq_head, condition, lock, timeout, \
TASK_INTERRUPTIBLE); \
__ret; \
})
#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_lock_irq_timeout( \
wq_head, condition, lock, timeout, \
TASK_UNINTERRUPTIBLE); \
__ret; \
})
/*
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
#define DEFINE_WAIT_FUNC(name, function) \
struct wait_queue_entry name = { \
.private = current, \
.func = function, \
.entry = LIST_HEAD_INIT((name).entry), \
}
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
#define init_wait_func(wait, function) \
do { \
(wait)->private = current; \
(wait)->func = function; \
INIT_LIST_HEAD(&(wait)->entry); \
(wait)->flags = 0; \
} while (0)
#define init_wait(wait) init_wait_func(wait, autoremove_wake_function)
typedef int (*task_call_f)(struct task_struct *p, void *arg);
extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
#endif /* _LINUX_WAIT_H */
// SPDX-License-Identifier: GPL-2.0+
/*
* Universal/legacy driver for 8250/16550-type serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2001 Russell King.
*
* Supports:
* early_serial_setup() ports
* userspace-configurable "phantom" ports
* serial8250_register_8250_port() ports
*/
#include <linux/acpi.h>
#include <linux/hashtable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/tty.h>
#include <linux/ratelimit.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/nmi.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/irq.h>
#include "8250.h"
#define PASS_LIMIT 512
struct irq_info {
struct hlist_node node;
int irq;
spinlock_t lock; /* Protects list not the hash */
struct list_head *head;
};
#define IRQ_HASH_BITS 5 /* Can be adjusted later */
static DEFINE_HASHTABLE(irq_lists, IRQ_HASH_BITS);
static DEFINE_MUTEX(hash_mutex); /* Used to walk the hash */
/*
* This is the serial driver's interrupt routine.
*
* Arjan thinks the old way was overly complex, so it got simplified.
* Alan disagrees, saying that need the complexity to handle the weird
* nature of ISA shared interrupts. (This is a special exception.)
*
* In order to handle ISA shared interrupts properly, we need to check
* that all ports have been serviced, and therefore the ISA interrupt
* line has been de-asserted.
*
* This means we need to loop through all ports. checking that they
* don't have an interrupt pending.
*/
static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
{
struct irq_info *i = dev_id;
struct list_head *l, *end = NULL;
int pass_counter = 0, handled = 0;
guard(spinlock)(&i->lock);
l = i->head;
do {
struct uart_8250_port *up = list_entry(l, struct uart_8250_port, list);
struct uart_port *port = &up->port;
if (port->handle_irq(port)) {
handled = 1;
end = NULL;
} else if (end == NULL)
end = l;
l = l->next;
if (l == i->head && pass_counter++ > PASS_LIMIT)
break;
} while (l != end);
return IRQ_RETVAL(handled);
}
/*
* To support ISA shared interrupts, we need to have one interrupt
* handler that ensures that the IRQ line has been deasserted
* before returning. Failing to do this will result in the IRQ
* line being stuck active, and, since ISA irqs are edge triggered,
* no more IRQs will be seen.
*/
static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up)
{
spin_lock_irq(&i->lock);
if (!list_empty(i->head)) {
if (i->head == &up->list)
i->head = i->head->next;
list_del(&up->list);
} else {
BUG_ON(i->head != &up->list);
i->head = NULL;
}
spin_unlock_irq(&i->lock);
/* List empty so throw away the hash node */
if (i->head == NULL) {
hlist_del(&i->node);
kfree(i);
}
}
/*
* Either:
* - find the corresponding info in the hashtable and return it, or
* - allocate a new one, add it to the hashtable and return it.
*/
static struct irq_info *serial_get_or_create_irq_info(const struct uart_8250_port *up)
{
struct irq_info *i;
guard(mutex)(&hash_mutex);
hash_for_each_possible(irq_lists, i, node, up->port.irq)
if (i->irq == up->port.irq)
return i;
i = kzalloc(sizeof(*i), GFP_KERNEL);
if (i == NULL)
return ERR_PTR(-ENOMEM);
spin_lock_init(&i->lock);
i->irq = up->port.irq;
hash_add(irq_lists, &i->node, i->irq);
return i;
}
static int serial_link_irq_chain(struct uart_8250_port *up)
{
struct irq_info *i;
int ret;
i = serial_get_or_create_irq_info(up);
if (IS_ERR(i))
return PTR_ERR(i);
scoped_guard(spinlock_irq, &i->lock) {
if (i->head) {
list_add(&up->list, i->head);
return 0;
}
INIT_LIST_HEAD(&up->list);
i->head = &up->list;
}
ret = request_irq(up->port.irq, serial8250_interrupt, up->port.irqflags, up->port.name, i);
if (ret < 0)
serial_do_unlink(i, up);
return ret;
}
static void serial_unlink_irq_chain(struct uart_8250_port *up)
{
struct irq_info *i;
guard(mutex)(&hash_mutex);
hash_for_each_possible(irq_lists, i, node, up->port.irq)
if (i->irq == up->port.irq) {
if (WARN_ON(i->head == NULL))
return;
if (list_empty(i->head))
free_irq(up->port.irq, i);
serial_do_unlink(i, up);
return;
}
WARN_ON(1);
}
/*
* This function is used to handle ports that do not have an
* interrupt. This doesn't work very well for 16450's, but gives
* barely passable results for a 16550A. (Although at the expense
* of much CPU overhead).
*/
static void serial8250_timeout(struct timer_list *t)
{
struct uart_8250_port *up = timer_container_of(up, t, timer);
up->port.handle_irq(&up->port);
mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
}
static void serial8250_backup_timeout(struct timer_list *t)
{
struct uart_8250_port *up = timer_container_of(up, t, timer);
unsigned int iir, ier = 0, lsr;
unsigned long flags;
uart_port_lock_irqsave(&up->port, &flags);
/*
* Must disable interrupts or else we risk racing with the interrupt
* based handler.
*/
if (up->port.irq) {
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, 0);
}
iir = serial_in(up, UART_IIR);
/*
* This should be a safe test for anyone who doesn't trust the
* IIR bits on their UART, but it's specifically designed for
* the "Diva" UART used on the management processor on many HP
* ia64 and parisc boxes.
*/
lsr = serial_lsr_in(up);
if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
(!kfifo_is_empty(&up->port.state->port.xmit_fifo) ||
up->port.x_char) &&
(lsr & UART_LSR_THRE)) {
iir &= ~(UART_IIR_ID | UART_IIR_NO_INT);
iir |= UART_IIR_THRI;
}
if (!(iir & UART_IIR_NO_INT))
serial8250_tx_chars(up);
if (up->port.irq)
serial_out(up, UART_IER, ier);
uart_port_unlock_irqrestore(&up->port, flags);
/* Standard timer interval plus 0.2s to keep the port running */
mod_timer(&up->timer,
jiffies + uart_poll_timeout(&up->port) + HZ / 5);
}
static void univ8250_setup_timer(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
/*
* The above check will only give an accurate result the first time
* the port is opened so this value needs to be preserved.
*/
if (up->bugs & UART_BUG_THRE) {
pr_debug("%s - using backup timer\n", port->name);
up->timer.function = serial8250_backup_timeout;
mod_timer(&up->timer, jiffies +
uart_poll_timeout(port) + HZ / 5);
}
/*
* If the "interrupt" for this port doesn't correspond with any
* hardware interrupt, we use a timer-based system. The original
* driver used to do this with IRQ0.
*/
if (!port->irq)
mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
}
static int univ8250_setup_irq(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
if (port->irq)
return serial_link_irq_chain(up);
return 0;
}
static void univ8250_release_irq(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
timer_delete_sync(&up->timer);
up->timer.function = serial8250_timeout;
if (port->irq)
serial_unlink_irq_chain(up);
}
const struct uart_ops *univ8250_port_base_ops;
struct uart_ops univ8250_port_ops;
static const struct uart_8250_ops univ8250_driver_ops = {
.setup_irq = univ8250_setup_irq,
.release_irq = univ8250_release_irq,
.setup_timer = univ8250_setup_timer,
};
static struct uart_8250_port serial8250_ports[UART_NR];
/**
* serial8250_get_port - retrieve struct uart_8250_port
* @line: serial line number
*
* This function retrieves struct uart_8250_port for the specific line.
* This struct *must* *not* be used to perform a 8250 or serial core operation
* which is not accessible otherwise. Its only purpose is to make the struct
* accessible to the runtime-pm callbacks for context suspend/restore.
* The lock assumption made here is none because runtime-pm suspend/resume
* callbacks should not be invoked if there is any operation performed on the
* port.
*/
struct uart_8250_port *serial8250_get_port(int line)
{
return &serial8250_ports[line];
}
EXPORT_SYMBOL_GPL(serial8250_get_port);
static inline void serial8250_apply_quirks(struct uart_8250_port *up)
{
up->port.quirks |= skip_txen_test ? UPQ_NO_TXEN_TEST : 0;
}
struct uart_8250_port *serial8250_setup_port(int index)
{
struct uart_8250_port *up;
if (index >= UART_NR)
return NULL;
up = &serial8250_ports[index];
up->port.line = index;
up->port.port_id = index;
serial8250_init_port(up);
if (!univ8250_port_base_ops)
univ8250_port_base_ops = up->port.ops;
up->port.ops = &univ8250_port_ops;
timer_setup(&up->timer, serial8250_timeout, 0);
up->ops = &univ8250_driver_ops;
serial8250_set_defaults(up);
return up;
}
void __init serial8250_register_ports(struct uart_driver *drv, struct device *dev)
{
int i;
for (i = 0; i < nr_uarts; i++) {
struct uart_8250_port *up = &serial8250_ports[i];
if (up->port.type == PORT_8250_CIR)
continue;
if (up->port.dev)
continue;
up->port.dev = dev;
if (uart_console_registered(&up->port))
pm_runtime_get_sync(up->port.dev);
serial8250_apply_quirks(up);
uart_add_one_port(drv, &up->port);
}
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
static void univ8250_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_8250_port *up = &serial8250_ports[co->index];
serial8250_console_write(up, s, count);
}
static int univ8250_console_setup(struct console *co, char *options)
{
struct uart_8250_port *up;
struct uart_port *port;
int retval, i;
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index < 0 || co->index >= UART_NR)
co->index = 0;
/*
* If the console is past the initial isa ports, init more ports up to
* co->index as needed and increment nr_uarts accordingly.
*/
for (i = nr_uarts; i <= co->index; i++) {
up = serial8250_setup_port(i);
if (!up)
return -ENODEV;
nr_uarts++;
}
port = &serial8250_ports[co->index].port;
/* link port to console */
uart_port_set_cons(port, co);
retval = serial8250_console_setup(port, options, false);
if (retval != 0)
uart_port_set_cons(port, NULL);
return retval;
}
static int univ8250_console_exit(struct console *co)
{
struct uart_port *port;
port = &serial8250_ports[co->index].port;
return serial8250_console_exit(port);
}
/**
* univ8250_console_match - non-standard console matching
* @co: registering console
* @name: name from console command line
* @idx: index from console command line
* @options: ptr to option string from console command line
*
* Only attempts to match console command lines of the form:
* console=uart[8250],io|mmio|mmio16|mmio32,<addr>[,<options>]
* console=uart[8250],0x<addr>[,<options>]
* This form is used to register an initial earlycon boot console and
* replace it with the serial8250_console at 8250 driver init.
*
* Performs console setup for a match (as required by interface)
* If no <options> are specified, then assume the h/w is already setup.
*
* Returns 0 if console matches; otherwise non-zero to use default matching
*/
static int univ8250_console_match(struct console *co, char *name, int idx,
char *options)
{
char match[] = "uart"; /* 8250-specific earlycon name */
enum uart_iotype iotype;
resource_size_t addr;
int i;
if (strncmp(name, match, 4) != 0)
return -ENODEV;
if (uart_parse_earlycon(options, &iotype, &addr, &options))
return -ENODEV;
/* try to match the port specified on the command line */
for (i = 0; i < nr_uarts; i++) {
struct uart_port *port = &serial8250_ports[i].port;
if (port->iotype != iotype)
continue;
if ((iotype == UPIO_MEM || iotype == UPIO_MEM16 ||
iotype == UPIO_MEM32 || iotype == UPIO_MEM32BE)
&& (port->mapbase != addr))
continue;
if (iotype == UPIO_PORT && port->iobase != addr)
continue;
co->index = i;
uart_port_set_cons(port, co);
return serial8250_console_setup(port, options, true);
}
return -ENODEV;
}
static struct console univ8250_console = {
.name = "ttyS",
.write = univ8250_console_write,
.device = uart_console_device,
.setup = univ8250_console_setup,
.exit = univ8250_console_exit,
.match = univ8250_console_match,
.flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
.data = &serial8250_reg,
};
static int __init univ8250_console_init(void)
{
if (nr_uarts == 0)
return -ENODEV;
serial8250_isa_init_ports();
register_console(&univ8250_console);
return 0;
}
console_initcall(univ8250_console_init);
#define SERIAL8250_CONSOLE (&univ8250_console)
#else
#define SERIAL8250_CONSOLE NULL
#endif
struct uart_driver serial8250_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.cons = SERIAL8250_CONSOLE,
};
/*
* early_serial_setup - early registration for 8250 ports
*
* Setup an 8250 port structure prior to console initialisation. Use
* after console initialisation will cause undefined behaviour.
*/
int __init early_serial_setup(struct uart_port *port)
{
struct uart_port *p;
if (port->line >= ARRAY_SIZE(serial8250_ports) || nr_uarts == 0)
return -ENODEV;
serial8250_isa_init_ports();
p = &serial8250_ports[port->line].port;
p->iobase = port->iobase;
p->membase = port->membase;
p->irq = port->irq;
p->irqflags = port->irqflags;
p->uartclk = port->uartclk;
p->fifosize = port->fifosize;
p->regshift = port->regshift;
p->iotype = port->iotype;
p->flags = port->flags;
p->mapbase = port->mapbase;
p->mapsize = port->mapsize;
p->private_data = port->private_data;
p->type = port->type;
p->line = port->line;
serial8250_set_defaults(up_to_u8250p(p));
if (port->serial_in)
p->serial_in = port->serial_in;
if (port->serial_out)
p->serial_out = port->serial_out;
if (port->handle_irq)
p->handle_irq = port->handle_irq;
return 0;
}
/**
* serial8250_suspend_port - suspend one serial port
* @line: serial line number
*
* Suspend one serial port.
*/
void serial8250_suspend_port(int line)
{
struct uart_8250_port *up = &serial8250_ports[line];
struct uart_port *port = &up->port;
if (!console_suspend_enabled && uart_console(port) &&
port->type != PORT_8250) {
unsigned char canary = 0xa5;
serial_out(up, UART_SCR, canary);
if (serial_in(up, UART_SCR) == canary)
up->canary = canary;
}
uart_suspend_port(&serial8250_reg, port);
}
EXPORT_SYMBOL(serial8250_suspend_port);
/**
* serial8250_resume_port - resume one serial port
* @line: serial line number
*
* Resume one serial port.
*/
void serial8250_resume_port(int line)
{
struct uart_8250_port *up = &serial8250_ports[line];
struct uart_port *port = &up->port;
up->canary = 0;
if (up->capabilities & UART_NATSEMI) {
/* Ensure it's still in high speed mode */
serial_port_out(port, UART_LCR, 0xE0);
ns16550a_goto_highspeed(up);
serial_port_out(port, UART_LCR, 0);
port->uartclk = 921600*16;
}
uart_resume_port(&serial8250_reg, port);
}
EXPORT_SYMBOL(serial8250_resume_port);
/*
* serial8250_register_8250_port and serial8250_unregister_port allows for
* 16x50 serial ports to be configured at run-time, to support PCMCIA
* modems and PCI multiport cards.
*/
static DEFINE_MUTEX(serial_mutex);
static struct uart_8250_port *serial8250_find_match_or_unused(const struct uart_port *port)
{
int i;
/*
* First, find a port entry which matches.
*/
for (i = 0; i < nr_uarts; i++)
if (uart_match_port(&serial8250_ports[i].port, port))
return &serial8250_ports[i];
/* try line number first if still available */
i = port->line;
if (i < nr_uarts && serial8250_ports[i].port.type == PORT_UNKNOWN &&
serial8250_ports[i].port.iobase == 0)
return &serial8250_ports[i];
/*
* We didn't find a matching entry, so look for the first
* free entry. We look for one which hasn't been previously
* used (indicated by zero iobase).
*/
for (i = 0; i < nr_uarts; i++)
if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
serial8250_ports[i].port.iobase == 0)
return &serial8250_ports[i];
/*
* That also failed. Last resort is to find any entry which
* doesn't have a real port associated with it.
*/
for (i = 0; i < nr_uarts; i++)
if (serial8250_ports[i].port.type == PORT_UNKNOWN)
return &serial8250_ports[i];
return NULL;
}
static void serial_8250_overrun_backoff_work(struct work_struct *work)
{
struct uart_8250_port *up = container_of(to_delayed_work(work), struct uart_8250_port,
overrun_backoff);
guard(uart_port_lock_irqsave)(&up->port);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
}
/**
* serial8250_register_8250_port - register a serial port
* @up: serial port template
*
* Configure the serial port specified by the request. If the
* port exists and is in use, it is hung up and unregistered
* first.
*
* The port is then probed and if necessary the IRQ is autodetected
* If this fails an error is returned.
*
* On success the port is ready to use and the line number is returned.
*/
int serial8250_register_8250_port(const struct uart_8250_port *up)
{
struct uart_8250_port *uart;
int ret;
if (up->port.uartclk == 0)
return -EINVAL;
guard(mutex)(&serial_mutex);
uart = serial8250_find_match_or_unused(&up->port);
if (!uart) {
/*
* If the port is past the initial isa ports, initialize a new
* port and increment nr_uarts accordingly.
*/
uart = serial8250_setup_port(nr_uarts);
if (!uart)
return -ENOSPC;
nr_uarts++;
}
/* Check if it is CIR already. We check this below again, see there why. */
if (uart->port.type == PORT_8250_CIR)
return -ENODEV;
if (uart->port.dev)
uart_remove_one_port(&serial8250_reg, &uart->port);
uart->port.ctrl_id = up->port.ctrl_id;
uart->port.port_id = up->port.port_id;
uart->port.iobase = up->port.iobase;
uart->port.membase = up->port.membase;
uart->port.irq = up->port.irq;
uart->port.irqflags = up->port.irqflags;
uart->port.uartclk = up->port.uartclk;
uart->port.fifosize = up->port.fifosize;
uart->port.regshift = up->port.regshift;
uart->port.iotype = up->port.iotype;
uart->port.flags = up->port.flags | UPF_BOOT_AUTOCONF;
uart->bugs = up->bugs;
uart->port.mapbase = up->port.mapbase;
uart->port.mapsize = up->port.mapsize;
uart->port.private_data = up->port.private_data;
uart->tx_loadsz = up->tx_loadsz;
uart->capabilities = up->capabilities;
uart->port.throttle = up->port.throttle;
uart->port.unthrottle = up->port.unthrottle;
uart->port.rs485_config = up->port.rs485_config;
uart->port.rs485_supported = up->port.rs485_supported;
uart->port.rs485 = up->port.rs485;
uart->rs485_start_tx = up->rs485_start_tx;
uart->rs485_stop_tx = up->rs485_stop_tx;
uart->lsr_save_mask = up->lsr_save_mask;
uart->dma = up->dma;
/* Take tx_loadsz from fifosize if it wasn't set separately */
if (uart->port.fifosize && !uart->tx_loadsz)
uart->tx_loadsz = uart->port.fifosize;
if (up->port.dev) {
uart->port.dev = up->port.dev;
ret = uart_get_rs485_mode(&uart->port);
if (ret)
goto err;
}
if (up->port.flags & UPF_FIXED_TYPE)
uart->port.type = up->port.type;
/*
* Only call mctrl_gpio_init(), if the device has no ACPI
* companion device
*/
if (!has_acpi_companion(uart->port.dev)) {
struct mctrl_gpios *gpios = mctrl_gpio_init(&uart->port, 0);
if (IS_ERR(gpios)) {
ret = PTR_ERR(gpios);
goto err;
} else {
uart->gpios = gpios;
}
}
serial8250_set_defaults(uart);
/* Possibly override default I/O functions. */
if (up->port.serial_in)
uart->port.serial_in = up->port.serial_in;
if (up->port.serial_out)
uart->port.serial_out = up->port.serial_out;
if (up->port.handle_irq)
uart->port.handle_irq = up->port.handle_irq;
/* Possibly override set_termios call */
if (up->port.set_termios)
uart->port.set_termios = up->port.set_termios;
if (up->port.set_ldisc)
uart->port.set_ldisc = up->port.set_ldisc;
if (up->port.get_mctrl)
uart->port.get_mctrl = up->port.get_mctrl;
if (up->port.set_mctrl)
uart->port.set_mctrl = up->port.set_mctrl;
if (up->port.get_divisor)
uart->port.get_divisor = up->port.get_divisor;
if (up->port.set_divisor)
uart->port.set_divisor = up->port.set_divisor;
if (up->port.startup)
uart->port.startup = up->port.startup;
if (up->port.shutdown)
uart->port.shutdown = up->port.shutdown;
if (up->port.pm)
uart->port.pm = up->port.pm;
if (up->port.handle_break)
uart->port.handle_break = up->port.handle_break;
if (up->dl_read)
uart->dl_read = up->dl_read;
if (up->dl_write)
uart->dl_write = up->dl_write;
/* Check the type (again)! It might have changed by the port.type assignment above. */
if (uart->port.type != PORT_8250_CIR) {
if (uart_console_registered(&uart->port))
pm_runtime_get_sync(uart->port.dev);
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
&uart->capabilities);
serial8250_apply_quirks(uart);
ret = uart_add_one_port(&serial8250_reg,
&uart->port);
if (ret)
goto err;
ret = uart->port.line;
} else {
dev_info(uart->port.dev,
"skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
uart->port.iobase,
(unsigned long long)uart->port.mapbase,
uart->port.irq);
ret = 0;
}
if (!uart->lsr_save_mask)
uart->lsr_save_mask = LSR_SAVE_FLAGS; /* Use default LSR mask */
/* Initialise interrupt backoff work if required */
if (up->overrun_backoff_time_ms > 0) {
uart->overrun_backoff_time_ms =
up->overrun_backoff_time_ms;
INIT_DELAYED_WORK(&uart->overrun_backoff,
serial_8250_overrun_backoff_work);
} else {
uart->overrun_backoff_time_ms = 0;
}
return ret;
err:
uart->port.dev = NULL;
return ret;
}
EXPORT_SYMBOL(serial8250_register_8250_port);
/**
* serial8250_unregister_port - remove a 16x50 serial port at runtime
* @line: serial line number
*
* Remove one serial port. This may not be called from interrupt
* context. We hand the port back to the our control.
*/
void serial8250_unregister_port(int line)
{
struct uart_8250_port *uart = &serial8250_ports[line];
guard(mutex)(&serial_mutex);
if (uart->em485) {
guard(uart_port_lock_irqsave)(&uart->port);
serial8250_em485_destroy(uart);
}
uart_remove_one_port(&serial8250_reg, &uart->port);
if (serial8250_isa_devs) {
uart->port.flags &= ~UPF_BOOT_AUTOCONF;
uart->port.type = PORT_UNKNOWN;
uart->port.dev = &serial8250_isa_devs->dev;
uart->port.port_id = line;
uart->capabilities = 0;
serial8250_init_port(uart);
serial8250_apply_quirks(uart);
uart_add_one_port(&serial8250_reg, &uart->port);
} else {
uart->port.dev = NULL;
}
}
EXPORT_SYMBOL(serial8250_unregister_port);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/timer.h>
#include <linux/acpi_pmtmr.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/percpu.h>
#include <linux/timex.h>
#include <linux/static_key.h>
#include <linux/static_call.h>
#include <asm/cpuid/api.h>
#include <asm/hpet.h>
#include <asm/timer.h>
#include <asm/vgtod.h>
#include <asm/time.h>
#include <asm/delay.h>
#include <asm/hypervisor.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
#include <asm/geode.h>
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include <asm/i8259.h>
#include <asm/msr.h>
#include <asm/topology.h>
#include <asm/uv/uv.h>
#include <asm/sev.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
unsigned int __read_mostly tsc_khz;
EXPORT_SYMBOL(tsc_khz);
#define KHZ 1000
/*
* TSC can be unstable due to cpufreq or due to unsynced TSCs
*/
static int __read_mostly tsc_unstable;
static unsigned int __initdata tsc_early_khz;
static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
int tsc_clocksource_reliable;
static int __read_mostly tsc_force_recalibrate;
static struct clocksource_base art_base_clk = {
.id = CSID_X86_ART,
};
static bool have_art;
struct cyc2ns {
struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
seqcount_latch_t seq; /* 32 + 4 = 36 */
}; /* fits one cacheline */
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
static int __init tsc_early_khz_setup(char *buf)
{
return kstrtouint(buf, 0, &tsc_early_khz);
}
early_param("tsc_early_khz", tsc_early_khz_setup);
__always_inline void __cyc2ns_read(struct cyc2ns_data *data)
{
int seq, idx;
do {
seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
idx = seq & 1;
data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
}
__always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
{
preempt_disable_notrace();
__cyc2ns_read(data);
}
__always_inline void cyc2ns_read_end(void)
{
preempt_enable_notrace();
}
/*
* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
* ns = cycles / (freq / ns_per_sec)
* ns = cycles * (ns_per_sec / freq)
* ns = cycles * (10^9 / (cpu_khz * 10^3))
* ns = cycles * (10^6 / cpu_khz)
*
* Then we use scaling math (suggested by george@mvista.com) to get:
* ns = cycles * (10^6 * SC / cpu_khz) / SC
* ns = cycles * cyc2ns_scale / SC
*
* And since SC is a constant power of two, we can convert the div
* into a shift. The larger SC is, the more accurate the conversion, but
* cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
* (64-bit result) can be used.
*
* We can use khz divisor instead of mhz to keep a better precision.
* (mathieu.desnoyers@polymtl.ca)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc)
{
struct cyc2ns_data data;
unsigned long long ns;
__cyc2ns_read(&data);
ns = data.cyc2ns_offset;
ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
return ns;
}
static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
unsigned long long ns;
preempt_disable_notrace();
ns = __cycles_2_ns(cyc);
preempt_enable_notrace();
return ns;
}
static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
unsigned long long ns_now;
struct cyc2ns_data data;
struct cyc2ns *c2n;
ns_now = cycles_2_ns(tsc_now);
/*
* Compute a new multiplier as per the above comment and ensure our
* time function is continuous; see the comment near struct
* cyc2ns_data.
*/
clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
NSEC_PER_MSEC, 0);
/*
* cyc2ns_shift is exported via arch_perf_update_userpage() where it is
* not expected to be greater than 31 due to the original published
* conversion algorithm shifting a 32-bit value (now specifies a 64-bit
* value) - refer perf_event_mmap_page documentation in perf_event.h.
*/
if (data.cyc2ns_shift == 32) {
data.cyc2ns_shift = 31;
data.cyc2ns_mul >>= 1;
}
data.cyc2ns_offset = ns_now -
mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
c2n = per_cpu_ptr(&cyc2ns, cpu);
write_seqcount_latch_begin(&c2n->seq);
c2n->data[0] = data;
write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
write_seqcount_latch_end(&c2n->seq);
}
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
unsigned long flags;
local_irq_save(flags);
sched_clock_idle_sleep_event();
if (khz)
__set_cyc2ns_scale(khz, cpu, tsc_now);
sched_clock_idle_wakeup_event();
local_irq_restore(flags);
}
/*
* Initialize cyc2ns for boot cpu
*/
static void __init cyc2ns_init_boot_cpu(void)
{
struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
seqcount_latch_init(&c2n->seq);
__set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
}
/*
* Secondary CPUs do not run through tsc_init(), so set up
* all the scale factors for all CPUs, assuming the same
* speed as the bootup CPU.
*/
static void __init cyc2ns_init_secondary_cpus(void)
{
unsigned int cpu, this_cpu = smp_processor_id();
struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
struct cyc2ns_data *data = c2n->data;
for_each_possible_cpu(cpu) {
if (cpu != this_cpu) {
seqcount_latch_init(&c2n->seq);
c2n = per_cpu_ptr(&cyc2ns, cpu);
c2n->data[0] = data[0];
c2n->data[1] = data[1];
}
}
}
/*
* Scheduler clock - returns current time in nanosec units.
*/
noinstr u64 native_sched_clock(void)
{
if (static_branch_likely(&__use_tsc)) {
u64 tsc_now = rdtsc();
/* return the value in ns */
return __cycles_2_ns(tsc_now);
}
/*
* Fall back to jiffies if there's no TSC available:
* ( But note that we still use it if the TSC is marked
* unstable. We do this because unlike Time Of Day,
* the scheduler clock tolerates small errors and it's
* very important for it to be as fast as the platform
* can achieve it. )
*/
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}
/*
* Generate a sched_clock if you already have a TSC value.
*/
u64 native_sched_clock_from_tsc(u64 tsc)
{
return cycles_2_ns(tsc);
}
/* We need to define a real function for sched_clock, to override the
weak default version */
#ifdef CONFIG_PARAVIRT
noinstr u64 sched_clock_noinstr(void)
{
return paravirt_sched_clock();
}
bool using_native_sched_clock(void)
{
return static_call_query(pv_sched_clock) == native_sched_clock;
}
#else
u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
bool using_native_sched_clock(void) { return true; }
#endif
notrace u64 sched_clock(void)
{
u64 now;
preempt_disable_notrace();
now = sched_clock_noinstr();
preempt_enable_notrace();
return now;}
int check_tsc_unstable(void)
{
return tsc_unstable;
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
mark_tsc_unstable("boot parameter notsc");
return 1;
}
#else
/*
* disable flag for tsc. Takes effect by clearing the TSC cpu flag
* in cpu/common.c
*/
int __init notsc_setup(char *str)
{
setup_clear_cpu_cap(X86_FEATURE_TSC);
return 1;
}
#endif
__setup("notsc", notsc_setup);
static int no_sched_irq_time;
static int no_tsc_watchdog;
static int tsc_as_watchdog;
static int __init tsc_setup(char *str)
{
if (!strcmp(str, "reliable"))
tsc_clocksource_reliable = 1;
if (!strncmp(str, "noirqtime", 9))
no_sched_irq_time = 1;
if (!strcmp(str, "unstable"))
mark_tsc_unstable("boot parameter");
if (!strcmp(str, "nowatchdog")) {
no_tsc_watchdog = 1;
if (tsc_as_watchdog)
pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n",
__func__);
tsc_as_watchdog = 0;
}
if (!strcmp(str, "recalibrate"))
tsc_force_recalibrate = 1;
if (!strcmp(str, "watchdog")) {
if (no_tsc_watchdog)
pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n",
__func__);
else
tsc_as_watchdog = 1;
}
return 1;
}
__setup("tsc=", tsc_setup);
#define MAX_RETRIES 5
#define TSC_DEFAULT_THRESHOLD 0x20000
/*
* Read TSC and the reference counters. Take care of any disturbances
*/
static u64 tsc_read_refs(u64 *p, int hpet)
{
u64 t1, t2;
u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
int i;
for (i = 0; i < MAX_RETRIES; i++) {
t1 = get_cycles();
if (hpet)
*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
else
*p = acpi_pm_read_early();
t2 = get_cycles();
if ((t2 - t1) < thresh)
return t2;
}
return ULLONG_MAX;
}
/*
* Calculate the TSC frequency from HPET reference
*/
static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
{
u64 tmp;
if (hpet2 < hpet1)
hpet2 += 0x100000000ULL;
hpet2 -= hpet1;
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
do_div(tmp, 1000000);
deltatsc = div64_u64(deltatsc, tmp);
return (unsigned long) deltatsc;
}
/*
* Calculate the TSC frequency from PMTimer reference
*/
static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
{
u64 tmp;
if (!pm1 && !pm2)
return ULONG_MAX;
if (pm2 < pm1)
pm2 += (u64)ACPI_PM_OVRRUN;
pm2 -= pm1;
tmp = pm2 * 1000000000LL;
do_div(tmp, PMTMR_TICKS_PER_SEC);
do_div(deltatsc, tmp);
return (unsigned long) deltatsc;
}
#define CAL_MS 10
#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
#define CAL_PIT_LOOPS 1000
#define CAL2_MS 50
#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
#define CAL2_PIT_LOOPS 5000
/*
* Try to calibrate the TSC against the Programmable
* Interrupt Timer and return the frequency of the TSC
* in kHz.
*
* Return ULONG_MAX on failure to calibrate.
*/
static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
{
u64 tsc, t1, t2, delta;
unsigned long tscmin, tscmax;
int pitcnt;
if (!has_legacy_pic()) {
/*
* Relies on tsc_early_delay_calibrate() to have given us semi
* usable udelay(), wait for the same 50ms we would have with
* the PIT loop below.
*/
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
return ULONG_MAX;
}
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
/*
* Setup CTC channel 2* for mode 0, (interrupt on terminal
* count mode), binary count. Set the latch register to 50ms
* (LSB then MSB) to begin countdown.
*/
outb(0xb0, 0x43);
outb(latch & 0xff, 0x42);
outb(latch >> 8, 0x42);
tsc = t1 = t2 = get_cycles();
pitcnt = 0;
tscmax = 0;
tscmin = ULONG_MAX;
while ((inb(0x61) & 0x20) == 0) {
t2 = get_cycles();
delta = t2 - tsc;
tsc = t2;
if ((unsigned long) delta < tscmin)
tscmin = (unsigned int) delta;
if ((unsigned long) delta > tscmax)
tscmax = (unsigned int) delta;
pitcnt++;
}
/*
* Sanity checks:
*
* If we were not able to read the PIT more than loopmin
* times, then we have been hit by a massive SMI
*
* If the maximum is 10 times larger than the minimum,
* then we got hit by an SMI as well.
*/
if (pitcnt < loopmin || tscmax > 10 * tscmin)
return ULONG_MAX;
/* Calculate the PIT value */
delta = t2 - t1;
do_div(delta, ms);
return delta;
}
/*
* This reads the current MSB of the PIT counter, and
* checks if we are running on sufficiently fast and
* non-virtualized hardware.
*
* Our expectations are:
*
* - the PIT is running at roughly 1.19MHz
*
* - each IO is going to take about 1us on real hardware,
* but we allow it to be much faster (by a factor of 10) or
* _slightly_ slower (ie we allow up to a 2us read+counter
* update - anything else implies a unacceptably slow CPU
* or PIT for the fast calibration to work.
*
* - with 256 PIT ticks to read the value, we have 214us to
* see the same MSB (and overhead like doing a single TSC
* read per MSB value etc).
*
* - We're doing 2 reads per loop (LSB, MSB), and we expect
* them each to take about a microsecond on real hardware.
* So we expect a count value of around 100. But we'll be
* generous, and accept anything over 50.
*
* - if the PIT is stuck, and we see *many* more reads, we
* return early (and the next caller of pit_expect_msb()
* then consider it a failure when they don't see the
* next expected value).
*
* These expectations mean that we know that we have seen the
* transition from one expected value to another with a fairly
* high accuracy, and we didn't miss any events. We can thus
* use the TSC value at the transitions to calculate a pretty
* good value for the TSC frequency.
*/
static inline int pit_verify_msb(unsigned char val)
{
/* Ignore LSB */
inb(0x42);
return inb(0x42) == val;
}
static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
{
int count;
u64 tsc = 0, prev_tsc = 0;
for (count = 0; count < 50000; count++) {
if (!pit_verify_msb(val))
break;
prev_tsc = tsc;
tsc = get_cycles();
}
*deltap = get_cycles() - prev_tsc;
*tscp = tsc;
/*
* We require _some_ success, but the quality control
* will be based on the error terms on the TSC values.
*/
return count > 5;
}
/*
* How many MSB values do we want to see? We aim for
* a maximum error rate of 500ppm (in practice the
* real error is much smaller), but refuse to spend
* more than 50ms on it.
*/
#define MAX_QUICK_PIT_MS 50
#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
static unsigned long quick_pit_calibrate(void)
{
int i;
u64 tsc, delta;
unsigned long d1, d2;
if (!has_legacy_pic())
return 0;
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
/*
* Counter 2, mode 0 (one-shot), binary count
*
* NOTE! Mode 2 decrements by two (and then the
* output is flipped each time, giving the same
* final output frequency as a decrement-by-one),
* so mode 0 is much better when looking at the
* individual counts.
*/
outb(0xb0, 0x43);
/* Start at 0xffff */
outb(0xff, 0x42);
outb(0xff, 0x42);
/*
* The PIT starts counting at the next edge, so we
* need to delay for a microsecond. The easiest way
* to do that is to just read back the 16-bit counter
* once from the PIT.
*/
pit_verify_msb(0);
if (pit_expect_msb(0xff, &tsc, &d1)) {
for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
if (!pit_expect_msb(0xff-i, &delta, &d2))
break;
delta -= tsc;
/*
* Extrapolate the error and fail fast if the error will
* never be below 500 ppm.
*/
if (i == 1 &&
d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
return 0;
/*
* Iterate until the error is less than 500 ppm
*/
if (d1+d2 >= delta >> 11)
continue;
/*
* Check the PIT one more time to verify that
* all TSC reads were stable wrt the PIT.
*
* This also guarantees serialization of the
* last cycle read ('d2') in pit_expect_msb.
*/
if (!pit_verify_msb(0xfe - i))
break;
goto success;
}
}
pr_info("Fast TSC calibration failed\n");
return 0;
success:
/*
* Ok, if we get here, then we've seen the
* MSB of the PIT decrement 'i' times, and the
* error has shrunk to less than 500 ppm.
*
* As a result, we can depend on there not being
* any odd delays anywhere, and the TSC reads are
* reliable (within the error).
*
* kHz = ticks / time-in-seconds / 1000;
* kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
* kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
*/
delta *= PIT_TICK_RATE;
do_div(delta, i*256*1000);
pr_info("Fast TSC calibration using PIT\n");
return delta;
}
/**
* native_calibrate_tsc - determine TSC frequency
* Determine TSC frequency via CPUID, else return 0.
*/
unsigned long native_calibrate_tsc(void)
{
unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
unsigned int crystal_khz;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return 0;
eax_denominator = ebx_numerator = ecx_hz = edx = 0;
/* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
if (ebx_numerator == 0 || eax_denominator == 0)
return 0;
crystal_khz = ecx_hz / 1000;
/*
* Denverton SoCs don't report crystal clock, and also don't support
* CPUID_LEAF_FREQ for the calculation below, so hardcode the 25MHz
* crystal clock.
*/
if (crystal_khz == 0 &&
boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
crystal_khz = 25000;
/*
* TSC frequency reported directly by CPUID is a "hardware reported"
* frequency and is the most accurate one so far we have. This
* is considered a known frequency.
*/
if (crystal_khz != 0)
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
/*
* Some Intel SoCs like Skylake and Kabylake don't report the crystal
* clock, but we can easily calculate it to a high degree of accuracy
* by considering the crystal ratio and the CPU speed.
*/
if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= CPUID_LEAF_FREQ) {
unsigned int eax_base_mhz, ebx, ecx, edx;
cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx, &ecx, &edx);
crystal_khz = eax_base_mhz * 1000 *
eax_denominator / ebx_numerator;
}
if (crystal_khz == 0)
return 0;
/*
* For Atom SoCs TSC is the only reliable clocksource.
* Mark TSC reliable so no watchdog on it.
*/
if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
#ifdef CONFIG_X86_LOCAL_APIC
/*
* The local APIC appears to be fed by the core crystal clock
* (which sounds entirely sensible). We can set the global
* lapic_timer_period here to avoid having to calibrate the APIC
* timer later.
*/
lapic_timer_period = crystal_khz * 1000 / HZ;
#endif
return crystal_khz * ebx_numerator / eax_denominator;
}
static unsigned long cpu_khz_from_cpuid(void)
{
unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
if (boot_cpu_data.cpuid_level < CPUID_LEAF_FREQ)
return 0;
eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
return eax_base_mhz * 1000;
}
/*
* calibrate cpu using pit, hpet, and ptimer methods. They are available
* later in boot after acpi is initialized.
*/
static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
unsigned long flags, latch, ms;
int hpet = is_hpet_enabled(), i, loopmin;
/*
* Run 5 calibration loops to get the lowest frequency value
* (the best estimate). We use two different calibration modes
* here:
*
* 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
* load a timeout of 50ms. We read the time right after we
* started the timer and wait until the PIT count down reaches
* zero. In each wait loop iteration we read the TSC and check
* the delta to the previous read. We keep track of the min
* and max values of that delta. The delta is mostly defined
* by the IO time of the PIT access, so we can detect when
* any disturbance happened between the two reads. If the
* maximum time is significantly larger than the minimum time,
* then we discard the result and have another try.
*
* 2) Reference counter. If available we use the HPET or the
* PMTIMER as a reference to check the sanity of that value.
* We use separate TSC readouts and check inside of the
* reference read for any possible disturbance. We discard
* disturbed values here as well. We do that around the PIT
* calibration delay loop as we have to wait for a certain
* amount of time anyway.
*/
/* Preset PIT loop values */
latch = CAL_LATCH;
ms = CAL_MS;
loopmin = CAL_PIT_LOOPS;
for (i = 0; i < 3; i++) {
unsigned long tsc_pit_khz;
/*
* Read the start value and the reference count of
* hpet/pmtimer when available. Then do the PIT
* calibration, which will take at least 50ms, and
* read the end value.
*/
local_irq_save(flags);
tsc1 = tsc_read_refs(&ref1, hpet);
tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
tsc2 = tsc_read_refs(&ref2, hpet);
local_irq_restore(flags);
/* Pick the lowest PIT TSC calibration so far */
tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
/* hpet or pmtimer available ? */
if (ref1 == ref2)
continue;
/* Check, whether the sampling was disturbed */
if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
continue;
tsc2 = (tsc2 - tsc1) * 1000000LL;
if (hpet)
tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
else
tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
/* Check the reference deviation */
delta = ((u64) tsc_pit_min) * 100;
do_div(delta, tsc_ref_min);
/*
* If both calibration results are inside a 10% window
* then we can be sure, that the calibration
* succeeded. We break out of the loop right away. We
* use the reference value, as it is more precise.
*/
if (delta >= 90 && delta <= 110) {
pr_info("PIT calibration matches %s. %d loops\n",
hpet ? "HPET" : "PMTIMER", i + 1);
return tsc_ref_min;
}
/*
* Check whether PIT failed more than once. This
* happens in virtualized environments. We need to
* give the virtual PC a slightly longer timeframe for
* the HPET/PMTIMER to make the result precise.
*/
if (i == 1 && tsc_pit_min == ULONG_MAX) {
latch = CAL2_LATCH;
ms = CAL2_MS;
loopmin = CAL2_PIT_LOOPS;
}
}
/*
* Now check the results.
*/
if (tsc_pit_min == ULONG_MAX) {
/* PIT gave no useful value */
pr_warn("Unable to calibrate against PIT\n");
/* We don't have an alternative source, disable TSC */
if (!hpet && !ref1 && !ref2) {
pr_notice("No reference (HPET/PMTIMER) available\n");
return 0;
}
/* The alternative source failed as well, disable TSC */
if (tsc_ref_min == ULONG_MAX) {
pr_warn("HPET/PMTIMER calibration failed\n");
return 0;
}
/* Use the alternative source */
pr_info("using %s reference calibration\n",
hpet ? "HPET" : "PMTIMER");
return tsc_ref_min;
}
/* We don't have an alternative source, use the PIT calibration value */
if (!hpet && !ref1 && !ref2) {
pr_info("Using PIT calibration value\n");
return tsc_pit_min;
}
/* The alternative source failed, use the PIT calibration value */
if (tsc_ref_min == ULONG_MAX) {
pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
return tsc_pit_min;
}
/*
* The calibration values differ too much. In doubt, we use
* the PIT value as we know that there are PMTIMERs around
* running at double speed. At least we let the user know:
*/
pr_warn("PIT calibration deviates from %s: %lu %lu\n",
hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
pr_info("Using PIT calibration value\n");
return tsc_pit_min;
}
/**
* native_calibrate_cpu_early - can calibrate the cpu early in boot
*/
unsigned long native_calibrate_cpu_early(void)
{
unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
if (!fast_calibrate)
fast_calibrate = cpu_khz_from_msr();
if (!fast_calibrate) {
local_irq_save(flags);
fast_calibrate = quick_pit_calibrate();
local_irq_restore(flags);
}
return fast_calibrate;
}
/**
* native_calibrate_cpu - calibrate the cpu
*/
static unsigned long native_calibrate_cpu(void)
{
unsigned long tsc_freq = native_calibrate_cpu_early();
if (!tsc_freq)
tsc_freq = pit_hpet_ptimer_calibrate_cpu();
return tsc_freq;
}
void recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
unsigned long cpu_khz_old = cpu_khz;
if (!boot_cpu_has(X86_FEATURE_TSC))
return;
cpu_khz = x86_platform.calibrate_cpu();
tsc_khz = x86_platform.calibrate_tsc();
if (tsc_khz == 0)
tsc_khz = cpu_khz;
else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
cpu_khz = tsc_khz;
cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
cpu_khz_old, cpu_khz);
#endif
}
EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void)
{
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
cyc2ns_suspend = sched_clock();
}
/*
* Even on processors with invariant TSC, TSC gets reset in some the
* ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
* arbitrary value (still sync'd across cpu's) during resume from such sleep
* states. To cope up with this, recompute the cyc2ns_offset for each cpu so
* that sched_clock() continues from the point where it was left off during
* suspend.
*/
void tsc_restore_sched_clock_state(void)
{
unsigned long long offset;
unsigned long flags;
int cpu;
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
local_irq_save(flags);
/*
* We're coming out of suspend, there's no concurrency yet; don't
* bother being nice about the RCU stuff, just write to both
* data fields.
*/
this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
offset = cyc2ns_suspend - sched_clock();
for_each_possible_cpu(cpu) {
per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
}
local_irq_restore(flags);
}
#ifdef CONFIG_CPU_FREQ
/*
* Frequency scaling support. Adjust the TSC based timer when the CPU frequency
* changes.
*
* NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
* as unstable and give up in those cases.
*
* Should fix up last_tsc too. Currently gettimeofday in the
* first tick after the change will be slightly wrong.
*/
static unsigned int ref_freq;
static unsigned long loops_per_jiffy_ref;
static unsigned long tsc_khz_ref;
static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
if (num_online_cpus() > 1) {
mark_tsc_unstable("cpufreq changes on SMP");
return 0;
}
if (!ref_freq) {
ref_freq = freq->old;
loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
tsc_khz_ref = tsc_khz;
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
boot_cpu_data.loops_per_jiffy =
cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes");
set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
}
return 0;
}
static struct notifier_block time_cpufreq_notifier_block = {
.notifier_call = time_cpufreq_notifier
};
static int __init cpufreq_register_tsc_scaling(void)
{
if (!boot_cpu_has(X86_FEATURE_TSC))
return 0;
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
return 0;
cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
return 0;
}
core_initcall(cpufreq_register_tsc_scaling);
#endif /* CONFIG_CPU_FREQ */
#define ART_MIN_DENOMINATOR (1)
/*
* If ART is present detect the numerator:denominator to convert to TSC
*/
static void __init detect_art(void)
{
unsigned int unused;
if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return;
/*
* Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
* and the TSC counter resets must not occur asynchronously.
*/
if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
!boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
tsc_async_resets)
return;
cpuid(CPUID_LEAF_TSC, &art_base_clk.denominator,
&art_base_clk.numerator, &art_base_clk.freq_khz, &unused);
art_base_clk.freq_khz /= KHZ;
if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
return;
rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
/* Make this sticky over multiple CPU init calls */
setup_force_cpu_cap(X86_FEATURE_ART);
}
/* clocksource code */
static void tsc_resume(struct clocksource *cs)
{
tsc_verify_tsc_adjust(true);
}
/*
* We used to compare the TSC to the cycle_last value in the clocksource
* structure to avoid a nasty time-warp. This can be observed in a
* very small window right after one CPU updated cycle_last under
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
* is smaller than the cycle_last reference value due to a TSC which
* is slightly behind. This delta is nowhere else observable, but in
* that case it results in a forward time jump in the range of hours
* due to the unsigned delta calculation of the time keeping core
* code, which is necessary to support wrapping clocksources like pm
* timer.
*
* This sanity check is now done in the core timekeeping code.
* checking the result of read_tsc() - cycle_last for being negative.
* That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
*/
static u64 read_tsc(struct clocksource *cs)
{
return (u64)rdtsc_ordered();
}
static void tsc_cs_mark_unstable(struct clocksource *cs)
{
if (tsc_unstable)
return;
tsc_unstable = 1;
if (using_native_sched_clock())
clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to clocksource watchdog\n");
}
static void tsc_cs_tick_stable(struct clocksource *cs)
{
if (tsc_unstable)
return;
if (using_native_sched_clock())
sched_clock_tick_stable();
}
static int tsc_cs_enable(struct clocksource *cs)
{
vclocks_set_used(VDSO_CLOCKMODE_TSC);
return 0;
}
/*
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
*/
static struct clocksource clocksource_tsc_early = {
.name = "tsc-early",
.rating = 299,
.uncertainty_margin = 32 * NSEC_PER_MSEC,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY,
.id = CSID_X86_TSC_EARLY,
.vdso_clock_mode = VDSO_CLOCKMODE_TSC,
.enable = tsc_cs_enable,
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc_early.list),
};
/*
* Must mark VALID_FOR_HRES early such that when we unregister tsc_early
* this one will immediately take over. We will only register if TSC has
* been found good.
*/
static struct clocksource clocksource_tsc = {
.name = "tsc",
.rating = 300,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES |
CLOCK_SOURCE_MUST_VERIFY |
CLOCK_SOURCE_VERIFY_PERCPU,
.id = CSID_X86_TSC,
.vdso_clock_mode = VDSO_CLOCKMODE_TSC,
.enable = tsc_cs_enable,
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc.list),
};
void mark_tsc_unstable(char *reason)
{
if (tsc_unstable)
return;
tsc_unstable = 1;
if (using_native_sched_clock())
clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason);
clocksource_mark_unstable(&clocksource_tsc_early);
clocksource_mark_unstable(&clocksource_tsc);
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static void __init tsc_disable_clocksource_watchdog(void)
{
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
}
bool tsc_clocksource_watchdog_disabled(void)
{
return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) &&
tsc_as_watchdog && !no_tsc_watchdog;
}
static void __init check_system_tsc_reliable(void)
{
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
if (is_geode_lx()) {
/* RTSC counts during suspend */
#define RTSC_SUSP 0x100
unsigned long res_low, res_high;
rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
/* Geode_LX - the OLPC CPU has a very reliable TSC */
if (res_low & RTSC_SUSP)
tsc_clocksource_reliable = 1;
}
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
/*
* Disable the clocksource watchdog when the system has:
* - TSC running at constant frequency
* - TSC which does not stop in C-States
* - the TSC_ADJUST register which allows to detect even minimal
* modifications
* - not more than four packages
*/
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
topology_max_packages() <= 4)
tsc_disable_clocksource_watchdog();
}
/*
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
*/
int unsynchronized_tsc(void)
{
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
return 1;
#ifdef CONFIG_SMP
if (apic_is_clustered_box())
return 1;
#endif
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
return 0;
if (tsc_clocksource_reliable)
return 0;
/*
* Intel systems are normally all synchronized.
* Exceptions must mark TSC as unstable:
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
/* assume multi socket systems are not synchronized: */
if (topology_max_packages() > 1)
return 1;
}
return 0;
}
static void tsc_refine_calibration_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
/**
* tsc_refine_calibration_work - Further refine tsc freq calibration
* @work: ignored.
*
* This functions uses delayed work over a period of a
* second to further refine the TSC freq value. Since this is
* timer based, instead of loop based, we don't block the boot
* process while this longer calibration is done.
*
* If there are any calibration anomalies (too many SMIs, etc),
* or the refined calibration is off by 1% of the fast early
* calibration, we throw out the new calibration and use the
* early calibration.
*/
static void tsc_refine_calibration_work(struct work_struct *work)
{
static u64 tsc_start = ULLONG_MAX, ref_start;
static int hpet;
u64 tsc_stop, ref_stop, delta;
unsigned long freq;
int cpu;
/* Don't bother refining TSC on unstable systems */
if (tsc_unstable)
goto unreg;
/*
* Since the work is started early in boot, we may be
* delayed the first time we expire. So set the workqueue
* again once we know timers are working.
*/
if (tsc_start == ULLONG_MAX) {
restart:
/*
* Only set hpet once, to avoid mixing hardware
* if the hpet becomes enabled later.
*/
hpet = is_hpet_enabled();
tsc_start = tsc_read_refs(&ref_start, hpet);
schedule_delayed_work(&tsc_irqwork, HZ);
return;
}
tsc_stop = tsc_read_refs(&ref_stop, hpet);
/* hpet or pmtimer available ? */
if (ref_start == ref_stop)
goto out;
/* Check, whether the sampling was disturbed */
if (tsc_stop == ULLONG_MAX)
goto restart;
delta = tsc_stop - tsc_start;
delta *= 1000000LL;
if (hpet)
freq = calc_hpet_ref(delta, ref_start, ref_stop);
else
freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
/* Will hit this only if tsc_force_recalibrate has been set */
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
/* Warn if the deviation exceeds 500 ppm */
if (abs(tsc_khz - freq) > (tsc_khz >> 11)) {
pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n");
pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n",
(unsigned long)tsc_khz / 1000,
(unsigned long)tsc_khz % 1000);
}
pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n",
hpet ? "HPET" : "PM_TIMER",
(unsigned long)freq / 1000,
(unsigned long)freq % 1000);
return;
}
/* Make sure we're within 1% */
if (abs(tsc_khz - freq) > tsc_khz/100)
goto out;
tsc_khz = freq;
pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
(unsigned long)tsc_khz / 1000,
(unsigned long)tsc_khz % 1000);
/* Inform the TSC deadline clockevent devices about the recalibration */
lapic_update_tsc_freq();
/* Update the sched_clock() rate to match the clocksource one */
for_each_possible_cpu(cpu)
set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
out:
if (tsc_unstable)
goto unreg;
if (boot_cpu_has(X86_FEATURE_ART)) {
have_art = true;
clocksource_tsc.base = &art_base_clk;
}
clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early);
}
static int __init init_tsc_clocksource(void)
{
if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
return 0;
if (tsc_unstable) {
clocksource_unregister(&clocksource_tsc_early);
return 0;
}
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
/*
* When TSC frequency is known (retrieved via MSR or CPUID), we skip
* the refined calibration and directly register it as a clocksource.
*/
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
if (boot_cpu_has(X86_FEATURE_ART)) {
have_art = true;
clocksource_tsc.base = &art_base_clk;
}
clocksource_register_khz(&clocksource_tsc, tsc_khz);
clocksource_unregister(&clocksource_tsc_early);
if (!tsc_force_recalibrate)
return 0;
}
schedule_delayed_work(&tsc_irqwork, 0);
return 0;
}
/*
* We use device_initcall here, to ensure we run after the hpet
* is fully initialized, which may occur at fs_initcall time.
*/
device_initcall(init_tsc_clocksource);
static bool __init determine_cpu_tsc_frequencies(bool early)
{
/* Make sure that cpu and tsc are not already calibrated */
WARN_ON(cpu_khz || tsc_khz);
if (early) {
cpu_khz = x86_platform.calibrate_cpu();
if (tsc_early_khz) {
tsc_khz = tsc_early_khz;
} else {
tsc_khz = x86_platform.calibrate_tsc();
clocksource_tsc.freq_khz = tsc_khz;
}
} else {
/* We should not be here with non-native cpu calibration */
WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
cpu_khz = pit_hpet_ptimer_calibrate_cpu();
}
/*
* Trust non-zero tsc_khz as authoritative,
* and use it to sanity check cpu_khz,
* which will be off if system timer is off.
*/
if (tsc_khz == 0)
tsc_khz = cpu_khz;
else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
cpu_khz = tsc_khz;
if (tsc_khz == 0)
return false;
pr_info("Detected %lu.%03lu MHz processor\n",
(unsigned long)cpu_khz / KHZ,
(unsigned long)cpu_khz % KHZ);
if (cpu_khz != tsc_khz) {
pr_info("Detected %lu.%03lu MHz TSC",
(unsigned long)tsc_khz / KHZ,
(unsigned long)tsc_khz % KHZ);
}
return true;
}
static unsigned long __init get_loops_per_jiffy(void)
{
u64 lpj = (u64)tsc_khz * KHZ;
do_div(lpj, HZ);
return lpj;
}
static void __init tsc_enable_sched_clock(void)
{
loops_per_jiffy = get_loops_per_jiffy();
use_tsc_delay();
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
cyc2ns_init_boot_cpu();
static_branch_enable(&__use_tsc);
}
void __init tsc_early_init(void)
{
if (!boot_cpu_has(X86_FEATURE_TSC))
return;
/* Don't change UV TSC multi-chassis synchronization */
if (is_early_uv_system())
return;
snp_secure_tsc_init();
if (!determine_cpu_tsc_frequencies(true))
return;
tsc_enable_sched_clock();
}
void __init tsc_init(void)
{
if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return;
}
/*
* native_calibrate_cpu_early can only calibrate using methods that are
* available early in boot.
*/
if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
x86_platform.calibrate_cpu = native_calibrate_cpu;
if (!tsc_khz) {
/* We failed to determine frequencies earlier, try again */
if (!determine_cpu_tsc_frequencies(false)) {
mark_tsc_unstable("could not calculate TSC khz");
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return;
}
tsc_enable_sched_clock();
}
cyc2ns_init_secondary_cpus();
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
lpj_fine = get_loops_per_jiffy();
check_system_tsc_reliable();
if (unsynchronized_tsc()) {
mark_tsc_unstable("TSCs unsynchronized");
return;
}
if (tsc_clocksource_reliable || no_tsc_watchdog)
tsc_disable_clocksource_watchdog();
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();
}
#ifdef CONFIG_SMP
/*
* Check whether existing calibration data can be reused.
*/
unsigned long calibrate_delay_is_known(void)
{
int sibling, cpu = smp_processor_id();
int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
const struct cpumask *mask = topology_core_cpumask(cpu);
/*
* If TSC has constant frequency and TSC is synchronized across
* sockets then reuse CPU0 calibration.
*/
if (constant_tsc && !tsc_unstable)
return cpu_data(0).loops_per_jiffy;
/*
* If TSC has constant frequency and TSC is not synchronized across
* sockets and this is not the first CPU in the socket, then reuse
* the calibration value of an already online CPU on that socket.
*
* This assumes that CONSTANT_TSC is consistent for all CPUs in a
* socket.
*/
if (!constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
if (sibling < nr_cpu_ids)
return cpu_data(sibling).loops_per_jiffy;
return 0;
}
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/mm/swap.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
/*
* This file contains the default values for the operation of the
* Linux VM subsystem. Fine-tuning documentation can be found in
* Documentation/admin-guide/sysctl/vm.rst.
* Started 18.12.91
* Swap aging added 23.2.95, Stephen Tweedie.
* Buffermem limits added 12.3.98, Rik van Riel.
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mm_inline.h>
#include <linux/percpu_counter.h>
#include <linux/memremap.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
#include <linux/local_lock.h>
#include <linux/buffer_head.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/pagemap.h>
/* How many pages do we try to swap or page in/out together? As a power of 2 */
int page_cluster;
static const int page_cluster_max = 31;
struct cpu_fbatches {
/*
* The following folio batches are grouped together because they are protected
* by disabling preemption (and interrupts remain enabled).
*/
local_lock_t lock;
struct folio_batch lru_add;
struct folio_batch lru_deactivate_file;
struct folio_batch lru_deactivate;
struct folio_batch lru_lazyfree;
#ifdef CONFIG_SMP
struct folio_batch lru_activate;
#endif
/* Protecting the following batches which require disabling interrupts */
local_lock_t lock_irq;
struct folio_batch lru_move_tail;
};
static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
.lock_irq = INIT_LOCAL_LOCK(lock_irq),
};
static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
unsigned long *flagsp)
{
if (folio_test_lru(folio)) {
folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
lruvec_del_folio(*lruvecp, folio);
__folio_clear_lru_flags(folio);
}
}
/*
* This path almost never happens for VM activity - pages are normally freed
* in batches. But it gets used by networking - and for compound pages.
*/
static void page_cache_release(struct folio *folio)
{
struct lruvec *lruvec = NULL;
unsigned long flags;
__page_cache_release(folio, &lruvec, &flags);
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
}
void __folio_put(struct folio *folio)
{
if (unlikely(folio_is_zone_device(folio))) {
free_zone_device_folio(folio);
return;
}
if (folio_test_hugetlb(folio)) {
free_huge_folio(folio);
return;
}
page_cache_release(folio);
folio_unqueue_deferred_split(folio);
mem_cgroup_uncharge(folio);
free_frozen_pages(&folio->page, folio_order(folio));
}
EXPORT_SYMBOL(__folio_put);
typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
static void lru_add(struct lruvec *lruvec, struct folio *folio)
{
int was_unevictable = folio_test_clear_unevictable(folio);
long nr_pages = folio_nr_pages(folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
/*
* Is an smp_mb__after_atomic() still required here, before
* folio_evictable() tests the mlocked flag, to rule out the possibility
* of stranding an evictable folio on an unevictable LRU? I think
* not, because __munlock_folio() only clears the mlocked flag
* while the LRU lock is held.
*
* (That is not true of __page_cache_release(), and not necessarily
* true of folios_put(): but those only clear the mlocked flag after
* folio_put_testzero() has excluded any other users of the folio.)
*/
if (folio_evictable(folio)) {
if (was_unevictable)
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
folio_clear_active(folio);
folio_set_unevictable(folio);
/*
* folio->mlock_count = !!folio_test_mlocked(folio)?
* But that leaves __mlock_folio() in doubt whether another
* actor has already counted the mlock or not. Err on the
* safe side, underestimate, let page reclaim fix it, rather
* than leaving a page on the unevictable LRU indefinitely.
*/
folio->mlock_count = 0;
if (!was_unevictable)
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
}
lruvec_add_folio(lruvec, folio);
trace_mm_lru_insertion(folio);
}
static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
{
int i;
struct lruvec *lruvec = NULL;
unsigned long flags = 0;
for (i = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i];
/* block memcg migration while the folio moves between lru */
if (move_fn != lru_add && !folio_test_clear_lru(folio))
continue;
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio);
folio_set_lru(folio);
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
folios_put(fbatch);
}
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
struct folio *folio, move_fn_t move_fn, bool disable_irq)
{
unsigned long flags;
folio_get(folio); if (disable_irq) local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
else
local_lock(&cpu_fbatches.lock); if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || !folio_may_be_lru_cached(folio) || lru_cache_disabled()) folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); if (disable_irq) local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
else
local_unlock(&cpu_fbatches.lock);}
#define folio_batch_add_and_move(folio, op) \
__folio_batch_add_and_move( \
&cpu_fbatches.op, \
folio, \
op, \
offsetof(struct cpu_fbatches, op) >= \
offsetof(struct cpu_fbatches, lock_irq) \
)
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
{
if (folio_test_unevictable(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
lruvec_add_folio_tail(lruvec, folio);
__count_vm_events(PGROTATED, folio_nr_pages(folio));
}
/*
* Writeback is about to end against a folio which has been marked for
* immediate reclaim. If it still appears to be reclaimable, move it
* to the tail of the inactive list.
*
* folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
*/
void folio_rotate_reclaimable(struct folio *folio)
{
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
folio_test_unevictable(folio) || !folio_test_lru(folio))
return;
folio_batch_add_and_move(folio, lru_move_tail);
}
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
unsigned int nr_io, unsigned int nr_rotated)
__releases(lruvec->lru_lock)
{
unsigned long cost;
/*
* Reflect the relative cost of incurring IO and spending CPU
* time on rotations. This doesn't attempt to make a precise
* comparison, it just says: if reloads are about comparable
* between the LRU lists, or rotations are overwhelmingly
* different between them, adjust scan balance for CPU work.
*/
cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated;
if (!cost) {
spin_unlock_irq(&lruvec->lru_lock);
return;
}
for (;;) {
unsigned long lrusize;
/* Record cost event */
if (file)
lruvec->file_cost += cost;
else
lruvec->anon_cost += cost;
/*
* Decay previous events
*
* Because workloads change over time (and to avoid
* overflow) we keep these statistics as a floating
* average, which ends up weighing recent refaults
* more than old ones.
*/
lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
lruvec_page_state(lruvec, NR_ACTIVE_FILE);
if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
lruvec->file_cost /= 2;
lruvec->anon_cost /= 2;
}
spin_unlock_irq(&lruvec->lru_lock);
lruvec = parent_lruvec(lruvec);
if (!lruvec)
break;
spin_lock_irq(&lruvec->lru_lock);
}
}
void lru_note_cost_refault(struct folio *folio)
{
struct lruvec *lruvec;
lruvec = folio_lruvec_lock_irq(folio);
lru_note_cost_unlock_irq(lruvec, folio_is_file_lru(folio),
folio_nr_pages(folio), 0);
}
static void lru_activate(struct lruvec *lruvec, struct folio *folio)
{
long nr_pages = folio_nr_pages(folio);
if (folio_test_active(folio) || folio_test_unevictable(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_set_active(folio);
lruvec_add_folio(lruvec, folio);
trace_mm_lru_activate(folio);
__count_vm_events(PGACTIVATE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
}
#ifdef CONFIG_SMP
static void folio_activate_drain(int cpu)
{
struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_activate);
}
void folio_activate(struct folio *folio)
{
if (folio_test_active(folio) || folio_test_unevictable(folio) ||
!folio_test_lru(folio))
return;
folio_batch_add_and_move(folio, lru_activate);
}
#else
static inline void folio_activate_drain(int cpu)
{
}
void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
if (!folio_test_clear_lru(folio))
return;
lruvec = folio_lruvec_lock_irq(folio);
lru_activate(lruvec, folio);
unlock_page_lruvec_irq(lruvec);
folio_set_lru(folio);
}
#endif
static void __lru_cache_activate_folio(struct folio *folio)
{
struct folio_batch *fbatch;
int i;
local_lock(&cpu_fbatches.lock);
fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
/*
* Search backwards on the optimistic assumption that the folio being
* activated has just been added to this batch. Note that only
* the local batch is examined as a !LRU folio could be in the
* process of being released, reclaimed, migrated or on a remote
* batch that is currently being drained. Furthermore, marking
* a remote batch's folio active potentially hits a race where
* a folio is marked active just after it is added to the inactive
* list causing accounting errors and BUG_ON checks to trigger.
*/
for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
struct folio *batch_folio = fbatch->folios[i];
if (batch_folio == folio) {
folio_set_active(folio);
break;
}
}
local_unlock(&cpu_fbatches.lock);
}
#ifdef CONFIG_LRU_GEN
static void lru_gen_inc_refs(struct folio *folio)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
if (folio_test_unevictable(folio))
return;
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio)) {
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return;
}
do {
if ((old_flags & LRU_REFS_MASK) == LRU_REFS_MASK) {
if (!folio_test_workingset(folio))
folio_set_workingset(folio);
return;
}
new_flags = old_flags + BIT(LRU_REFS_PGOFF);
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
}
static bool lru_gen_clear_refs(struct folio *folio)
{
struct lru_gen_folio *lrugen;
int gen = folio_lru_gen(folio);
int type = folio_is_file_lru(folio);
if (gen < 0)
return true;
set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS | BIT(PG_workingset), 0);
lrugen = &folio_lruvec(folio)->lrugen;
/* whether can do without shuffling under the LRU lock */
return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type]));
}
#else /* !CONFIG_LRU_GEN */
static void lru_gen_inc_refs(struct folio *folio)
{
}
static bool lru_gen_clear_refs(struct folio *folio)
{
return false;
}
#endif /* CONFIG_LRU_GEN */
/**
* folio_mark_accessed - Mark a folio as having seen activity.
* @folio: The folio to mark.
*
* This function will perform one of the following transitions:
*
* * inactive,unreferenced -> inactive,referenced
* * inactive,referenced -> active,unreferenced
* * active,unreferenced -> active,referenced
*
* When a newly allocated folio is not yet visible, so safe for non-atomic ops,
* __folio_set_referenced() may be substituted for folio_mark_accessed().
*/
void folio_mark_accessed(struct folio *folio)
{
if (folio_test_dropbehind(folio))
return;
if (lru_gen_enabled()) {
lru_gen_inc_refs(folio);
return;
}
if (!folio_test_referenced(folio)) {
folio_set_referenced(folio);
} else if (folio_test_unevictable(folio)) {
/*
* Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
* this list is never rotated or maintained, so marking an
* unevictable page accessed has no effect.
*/
} else if (!folio_test_active(folio)) {
/*
* If the folio is on the LRU, queue it for activation via
* cpu_fbatches.lru_activate. Otherwise, assume the folio is in a
* folio_batch, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
if (folio_test_lru(folio))
folio_activate(folio);
else
__lru_cache_activate_folio(folio);
folio_clear_referenced(folio);
workingset_activation(folio);
}
if (folio_test_idle(folio))
folio_clear_idle(folio);
}
EXPORT_SYMBOL(folio_mark_accessed);
/**
* folio_add_lru - Add a folio to an LRU list.
* @folio: The folio to be added to the LRU.
*
* Queue the folio for addition to the LRU. The decision on whether
* to add the page to the [in]active [file|anon] list is deferred until the
* folio_batch is drained. This gives a chance for the caller of folio_add_lru()
* have the folio added to the active list using folio_mark_accessed().
*/
void folio_add_lru(struct folio *folio)
{ VM_BUG_ON_FOLIO(folio_test_active(folio) &&
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
/* see the comment in lru_gen_folio_seq() */
if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio);
folio_batch_add_and_move(folio, lru_add);
}
EXPORT_SYMBOL(folio_add_lru);
/**
* folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
* @folio: The folio to be added to the LRU.
* @vma: VMA in which the folio is mapped.
*
* If the VMA is mlocked, @folio is added to the unevictable list.
* Otherwise, it is treated the same way as folio_add_lru().
*/
void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
{ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
mlock_new_folio(folio);
else
folio_add_lru(folio);
}
/*
* If the folio cannot be invalidated, it is moved to the
* inactive list to speed up its reclaim. It is moved to the
* head of the list, rather than the tail, to give the flusher
* threads some time to write it out, as this is much more
* effective than the single-page writeout from reclaim.
*
* If the folio isn't mapped and dirty/writeback, the folio
* could be reclaimed asap using the reclaim flag.
*
* 1. active, mapped folio -> none
* 2. active, dirty/writeback folio -> inactive, head, reclaim
* 3. inactive, mapped folio -> none
* 4. inactive, dirty/writeback folio -> inactive, head, reclaim
* 5. inactive, clean -> inactive, tail
* 6. Others -> none
*
* In 4, it moves to the head of the inactive list so the folio is
* written out by flusher threads as this is much more efficient
* than the single-page writeout from reclaim.
*/
static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
{
bool active = folio_test_active(folio) || lru_gen_enabled();
long nr_pages = folio_nr_pages(folio);
if (folio_test_unevictable(folio))
return;
/* Some processes are using the folio */
if (folio_mapped(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
folio_clear_referenced(folio);
if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
/*
* Setting the reclaim flag could race with
* folio_end_writeback() and confuse readahead. But the
* race window is _really_ small and it's not a critical
* problem.
*/
lruvec_add_folio(lruvec, folio);
folio_set_reclaim(folio);
} else {
/*
* The folio's writeback ended while it was in the batch.
* We move that folio to the tail of the inactive list.
*/
lruvec_add_folio_tail(lruvec, folio);
__count_vm_events(PGROTATED, nr_pages);
}
if (active) {
__count_vm_events(PGDEACTIVATE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
nr_pages);
}
}
static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
{
long nr_pages = folio_nr_pages(folio);
if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
folio_clear_referenced(folio);
lruvec_add_folio(lruvec, folio);
__count_vm_events(PGDEACTIVATE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
{
long nr_pages = folio_nr_pages(folio);
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
return;
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
if (lru_gen_enabled())
lru_gen_clear_refs(folio);
else
folio_clear_referenced(folio);
/*
* Lazyfree folios are clean anonymous folios. They have
* the swapbacked flag cleared, to distinguish them from normal
* anonymous folios
*/
folio_clear_swapbacked(folio);
lruvec_add_folio(lruvec, folio);
__count_vm_events(PGLAZYFREE, nr_pages);
count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
}
/*
* Drain pages out of the cpu's folio_batch.
* Either "cpu" is the current CPU, and preemption has already been
* disabled; or "cpu" is being hot-unplugged, and is already dead.
*/
void lru_add_drain_cpu(int cpu)
{
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
struct folio_batch *fbatch = &fbatches->lru_add;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_add);
fbatch = &fbatches->lru_move_tail;
/* Disabling interrupts below acts as a compiler barrier. */
if (data_race(folio_batch_count(fbatch))) {
unsigned long flags;
/* No harm done if a racing interrupt already did this */
local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
folio_batch_move_lru(fbatch, lru_move_tail);
local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
}
fbatch = &fbatches->lru_deactivate_file;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_deactivate_file);
fbatch = &fbatches->lru_deactivate;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_deactivate);
fbatch = &fbatches->lru_lazyfree;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_lazyfree);
folio_activate_drain(cpu);
}
/**
* deactivate_file_folio() - Deactivate a file folio.
* @folio: Folio to deactivate.
*
* This function hints to the VM that @folio is a good reclaim candidate,
* for example if its invalidation fails due to the folio being dirty
* or under writeback.
*
* Context: Caller holds a reference on the folio.
*/
void deactivate_file_folio(struct folio *folio)
{
/* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
return;
if (lru_gen_enabled() && lru_gen_clear_refs(folio))
return;
folio_batch_add_and_move(folio, lru_deactivate_file);
}
/*
* folio_deactivate - deactivate a folio
* @folio: folio to deactivate
*
* folio_deactivate() moves @folio to the inactive list if @folio was on the
* active list and was not unevictable. This is done to accelerate the
* reclaim of @folio.
*/
void folio_deactivate(struct folio *folio)
{
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
return;
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
return;
folio_batch_add_and_move(folio, lru_deactivate);
}
/**
* folio_mark_lazyfree - make an anon folio lazyfree
* @folio: folio to deactivate
*
* folio_mark_lazyfree() moves @folio to the inactive file list.
* This is done to accelerate the reclaim of @folio.
*/
void folio_mark_lazyfree(struct folio *folio)
{
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
!folio_test_lru(folio) ||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
return;
folio_batch_add_and_move(folio, lru_lazyfree);
}
void lru_add_drain(void)
{
local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock);
mlock_drain_local();
}
/*
* It's called from per-cpu workqueue context in SMP case so
* lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
* the same cpu. It shouldn't be a problem in !SMP case since
* the core is only one and the locks will disable preemption.
*/
static void lru_add_and_bh_lrus_drain(void)
{
local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock);
invalidate_bh_lrus_cpu();
mlock_drain_local();
}
void lru_add_drain_cpu_zone(struct zone *zone)
{
local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
drain_local_pages(zone);
local_unlock(&cpu_fbatches.lock);
mlock_drain_local();
}
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_and_bh_lrus_drain();
}
static bool cpu_needs_drain(unsigned int cpu)
{
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
/* Check these in order of likelihood that they're not zero */
return folio_batch_count(&fbatches->lru_add) ||
folio_batch_count(&fbatches->lru_move_tail) ||
folio_batch_count(&fbatches->lru_deactivate_file) ||
folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) ||
folio_batch_count(&fbatches->lru_activate) ||
need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL);
}
/*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
* executed on the offlined cpu.
* Calling this function with cpu hotplug locks held can actually lead
* to obscure indirect dependencies via WQ context.
*/
static inline void __lru_add_drain_all(bool force_all_cpus)
{
/*
* lru_drain_gen - Global pages generation number
*
* (A) Definition: global lru_drain_gen = x implies that all generations
* 0 < n <= x are already *scheduled* for draining.
*
* This is an optimization for the highly-contended use case where a
* user space workload keeps constantly generating a flow of pages for
* each CPU.
*/
static unsigned int lru_drain_gen;
static struct cpumask has_work;
static DEFINE_MUTEX(lock);
unsigned cpu, this_gen;
/*
* Make sure nobody triggers this path before mm_percpu_wq is fully
* initialized.
*/
if (WARN_ON(!mm_percpu_wq))
return;
/*
* Guarantee folio_batch counter stores visible by this CPU
* are visible to other CPUs before loading the current drain
* generation.
*/
smp_mb();
/*
* (B) Locally cache global LRU draining generation number
*
* The read barrier ensures that the counter is loaded before the mutex
* is taken. It pairs with smp_mb() inside the mutex critical section
* at (D).
*/
this_gen = smp_load_acquire(&lru_drain_gen);
/* It helps everyone if we do our own local drain immediately. */
lru_add_drain();
mutex_lock(&lock);
/*
* (C) Exit the draining operation if a newer generation, from another
* lru_add_drain_all(), was already scheduled for draining. Check (A).
*/
if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
goto done;
/*
* (D) Increment global generation number
*
* Pairs with smp_load_acquire() at (B), outside of the critical
* section. Use a full memory barrier to guarantee that the
* new global drain generation number is stored before loading
* folio_batch counters.
*
* This pairing must be done here, before the for_each_online_cpu loop
* below which drains the page vectors.
*
* Let x, y, and z represent some system CPU numbers, where x < y < z.
* Assume CPU #z is in the middle of the for_each_online_cpu loop
* below and has already reached CPU #y's per-cpu data. CPU #x comes
* along, adds some pages to its per-cpu vectors, then calls
* lru_add_drain_all().
*
* If the paired barrier is done at any later step, e.g. after the
* loop, CPU #x will just exit at (C) and miss flushing out all of its
* added pages.
*/
WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
smp_mb();
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
if (cpu_needs_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
queue_work_on(cpu, mm_percpu_wq, work);
__cpumask_set_cpu(cpu, &has_work);
}
}
for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu));
done:
mutex_unlock(&lock);
}
void lru_add_drain_all(void)
{
__lru_add_drain_all(false);
}
#else
void lru_add_drain_all(void)
{
lru_add_drain();
}
#endif /* CONFIG_SMP */
atomic_t lru_disable_count = ATOMIC_INIT(0);
/*
* lru_cache_disable() needs to be called before we start compiling
* a list of folios to be migrated using folio_isolate_lru().
* It drains folios on LRU cache and then disable on all cpus until
* lru_cache_enable is called.
*
* Must be paired with a call to lru_cache_enable().
*/
void lru_cache_disable(void)
{
atomic_inc(&lru_disable_count);
/*
* Readers of lru_disable_count are protected by either disabling
* preemption or rcu_read_lock:
*
* preempt_disable, local_irq_disable [bh_lru_lock()]
* rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT]
* preempt_disable [local_lock !CONFIG_PREEMPT_RT]
*
* Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
* preempt_disable() regions of code. So any CPU which sees
* lru_disable_count = 0 will have exited the critical
* section when synchronize_rcu() returns.
*/
synchronize_rcu_expedited();
#ifdef CONFIG_SMP
__lru_add_drain_all(true);
#else
lru_add_and_bh_lrus_drain();
#endif
}
/**
* folios_put_refs - Reduce the reference count on a batch of folios.
* @folios: The folios.
* @refs: The number of refs to subtract from each folio.
*
* Like folio_put(), but for a batch of folios. This is more efficient
* than writing the loop yourself as it will optimise the locks which need
* to be taken if the folios are freed. The folios batch is returned
* empty and ready to be reused for another batch; there is no need
* to reinitialise it. If @refs is NULL, we subtract one from each
* folio refcount.
*
* Context: May be called in process or interrupt context, but not in NMI
* context. May be called while holding a spinlock.
*/
void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
{
int i, j;
struct lruvec *lruvec = NULL;
unsigned long flags = 0;
for (i = 0, j = 0; i < folios->nr; i++) {
struct folio *folio = folios->folios[i];
unsigned int nr_refs = refs ? refs[i] : 1;
if (is_huge_zero_folio(folio))
continue;
if (folio_is_zone_device(folio)) {
if (lruvec) {
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
if (folio_ref_sub_and_test(folio, nr_refs))
free_zone_device_folio(folio);
continue;
}
if (!folio_ref_sub_and_test(folio, nr_refs))
continue;
/* hugetlb has its own memcg */
if (folio_test_hugetlb(folio)) {
if (lruvec) {
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
free_huge_folio(folio);
continue;
}
folio_unqueue_deferred_split(folio);
__page_cache_release(folio, &lruvec, &flags);
if (j != i)
folios->folios[j] = folio;
j++;
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
if (!j) {
folio_batch_reinit(folios);
return;
}
folios->nr = j;
mem_cgroup_uncharge_folios(folios);
free_unref_folios(folios);
}
EXPORT_SYMBOL(folios_put_refs);
/**
* release_pages - batched put_page()
* @arg: array of pages to release
* @nr: number of pages
*
* Decrement the reference count on all the pages in @arg. If it
* fell to zero, remove the page from the LRU and free it.
*
* Note that the argument can be an array of pages, encoded pages,
* or folio pointers. We ignore any encoded bits, and turn any of
* them into just a folio that gets free'd.
*/
void release_pages(release_pages_arg arg, int nr)
{
struct folio_batch fbatch;
int refs[PAGEVEC_SIZE];
struct encoded_page **encoded = arg.encoded_pages;
int i;
folio_batch_init(&fbatch);
for (i = 0; i < nr; i++) {
/* Turn any of the argument types into a folio */
struct folio *folio = page_folio(encoded_page_ptr(encoded[i]));
/* Is our next entry actually "nr_pages" -> "nr_refs" ? */
refs[fbatch.nr] = 1;
if (unlikely(encoded_page_flags(encoded[i]) &
ENCODED_PAGE_BIT_NR_PAGES_NEXT))
refs[fbatch.nr] = encoded_nr_pages(encoded[++i]);
if (folio_batch_add(&fbatch, folio) > 0)
continue;
folios_put_refs(&fbatch, refs);
}
if (fbatch.nr)
folios_put_refs(&fbatch, refs);
}
EXPORT_SYMBOL(release_pages);
/*
* The folios which we're about to release may be in the deferred lru-addition
* queues. That would prevent them from really being freed right now. That's
* OK from a correctness point of view but is inefficient - those folios may be
* cache-warm and we want to give them back to the page allocator ASAP.
*
* So __folio_batch_release() will drain those queues here.
* folio_batch_move_lru() calls folios_put() directly to avoid
* mutual recursion.
*/
void __folio_batch_release(struct folio_batch *fbatch)
{
if (!fbatch->percpu_pvec_drained) {
lru_add_drain();
fbatch->percpu_pvec_drained = true;
}
folios_put(fbatch);
}
EXPORT_SYMBOL(__folio_batch_release);
/**
* folio_batch_remove_exceptionals() - Prune non-folios from a batch.
* @fbatch: The batch to prune
*
* find_get_entries() fills a batch with both folios and shadow/swap/DAX
* entries. This function prunes all the non-folio entries from @fbatch
* without leaving holes, so that it can be passed on to folio-only batch
* operations.
*/
void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
{
unsigned int i, j;
for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i];
if (!xa_is_value(folio))
fbatch->folios[j++] = folio;
}
fbatch->nr = j;
}
static const struct ctl_table swap_sysctl_table[] = {
{
.procname = "page-cluster",
.data = &page_cluster,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = (void *)&page_cluster_max,
}
};
/*
* Perform any setup for the swap system
*/
void __init swap_setup(void)
{
unsigned long megs = PAGES_TO_MB(totalram_pages());
/* Use a smaller cluster for small-memory machines */
if (megs < 16)
page_cluster = 2;
else
page_cluster = 3;
/*
* Right now other parts of the system means that we
* _really_ don't want to cluster much more
*/
register_sysctl_init("vm", swap_sysctl_table);
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux network device link state notification
*
* Author:
* Stefan Rompf <sux@loplof.de>
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include "dev.h"
enum lw_bits {
LW_URGENT = 0,
};
static unsigned long linkwatch_flags;
static unsigned long linkwatch_nextevent;
static void linkwatch_event(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
static unsigned int default_operstate(const struct net_device *dev)
{
if (netif_testing(dev))
return IF_OPER_TESTING;
/* Some uppers (DSA) have additional sources for being down, so
* first check whether lower is indeed the source of its down state.
*/
if (!netif_carrier_ok(dev)) {
struct net_device *peer;
int iflink;
/* If called from netdev_run_todo()/linkwatch_sync_dev(),
* dev_net(dev) can be already freed, and RTNL is not held.
*/
if (dev->reg_state <= NETREG_REGISTERED)
iflink = dev_get_iflink(dev);
else
iflink = dev->ifindex;
if (iflink == dev->ifindex)
return IF_OPER_DOWN;
ASSERT_RTNL();
peer = __dev_get_by_index(dev_net(dev), iflink);
if (!peer)
return IF_OPER_DOWN;
return netif_carrier_ok(peer) ? IF_OPER_DOWN :
IF_OPER_LOWERLAYERDOWN;
}
if (netif_dormant(dev))
return IF_OPER_DORMANT;
return IF_OPER_UP;
}
static void rfc2863_policy(struct net_device *dev)
{
unsigned int operstate = default_operstate(dev);
if (operstate == READ_ONCE(dev->operstate))
return;
switch(dev->link_mode) {
case IF_LINK_MODE_TESTING:
if (operstate == IF_OPER_UP)
operstate = IF_OPER_TESTING;
break;
case IF_LINK_MODE_DORMANT:
if (operstate == IF_OPER_UP)
operstate = IF_OPER_DORMANT;
break;
case IF_LINK_MODE_DEFAULT:
default:
break;
}
WRITE_ONCE(dev->operstate, operstate);
}
void linkwatch_init_dev(struct net_device *dev)
{
/* Handle pre-registration link state changes */
if (!netif_carrier_ok(dev) || netif_dormant(dev) ||
netif_testing(dev))
rfc2863_policy(dev);}
static bool linkwatch_urgent_event(struct net_device *dev)
{
if (!netif_running(dev))
return false;
if (dev->ifindex != dev_get_iflink(dev))
return true;
if (netif_is_lag_port(dev) || netif_is_lag_master(dev))
return true;
return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
}
static void linkwatch_add_event(struct net_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&lweventlist_lock, flags);
if (list_empty(&dev->link_watch_list)) {
list_add_tail(&dev->link_watch_list, &lweventlist);
netdev_hold(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
}
static void linkwatch_schedule_work(int urgent)
{
unsigned long delay = linkwatch_nextevent - jiffies;
if (test_bit(LW_URGENT, &linkwatch_flags))
return;
/* Minimise down-time: drop delay for up event. */
if (urgent) {
if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
return;
delay = 0;
}
/* If we wrap around we'll delay it by at most HZ. */
if (delay > HZ)
delay = 0;
/*
* If urgent, schedule immediate execution; otherwise, don't
* override the existing timer.
*/
if (test_bit(LW_URGENT, &linkwatch_flags))
mod_delayed_work(system_dfl_wq, &linkwatch_work, 0);
else
queue_delayed_work(system_dfl_wq, &linkwatch_work, delay);
}
static void linkwatch_do_dev(struct net_device *dev)
{
/*
* Make sure the above read is complete since it can be
* rewritten as soon as we clear the bit below.
*/
smp_mb__before_atomic();
/* We are about to handle this device,
* so new events can be accepted
*/
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
rfc2863_policy(dev);
if (dev->flags & IFF_UP) {
if (netif_carrier_ok(dev))
dev_activate(dev);
else
dev_deactivate(dev);
netif_state_change(dev);
}
/* Note: our callers are responsible for calling netdev_tracker_free().
* This is the reason we use __dev_put() instead of dev_put().
*/
__dev_put(dev);
}
static void __linkwatch_run_queue(int urgent_only)
{
#define MAX_DO_DEV_PER_LOOP 100
int do_dev = MAX_DO_DEV_PER_LOOP;
/* Use a local list here since we add non-urgent
* events back to the global one when called with
* urgent_only=1.
*/
LIST_HEAD(wrk);
/* Give urgent case more budget */
if (urgent_only)
do_dev += MAX_DO_DEV_PER_LOOP;
/*
* Limit the number of linkwatch events to one
* per second so that a runaway driver does not
* cause a storm of messages on the netlink
* socket. This limit does not apply to up events
* while the device qdisc is down.
*/
if (!urgent_only)
linkwatch_nextevent = jiffies + HZ;
/* Limit wrap-around effect on delay. */
else if (time_after(linkwatch_nextevent, jiffies + HZ))
linkwatch_nextevent = jiffies;
clear_bit(LW_URGENT, &linkwatch_flags);
spin_lock_irq(&lweventlist_lock);
list_splice_init(&lweventlist, &wrk);
while (!list_empty(&wrk) && do_dev > 0) {
struct net_device *dev;
dev = list_first_entry(&wrk, struct net_device, link_watch_list);
list_del_init(&dev->link_watch_list);
if (!netif_device_present(dev) ||
(urgent_only && !linkwatch_urgent_event(dev))) {
list_add_tail(&dev->link_watch_list, &lweventlist);
continue;
}
/* We must free netdev tracker under
* the spinlock protection.
*/
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
spin_unlock_irq(&lweventlist_lock);
netdev_lock_ops(dev);
linkwatch_do_dev(dev);
netdev_unlock_ops(dev);
do_dev--;
spin_lock_irq(&lweventlist_lock);
}
/* Add the remaining work back to lweventlist */
list_splice_init(&wrk, &lweventlist);
if (!list_empty(&lweventlist))
linkwatch_schedule_work(0);
spin_unlock_irq(&lweventlist_lock);
}
static bool linkwatch_clean_dev(struct net_device *dev)
{
unsigned long flags;
bool clean = false;
spin_lock_irqsave(&lweventlist_lock, flags);
if (!list_empty(&dev->link_watch_list)) {
list_del_init(&dev->link_watch_list);
clean = true;
/* We must release netdev tracker under
* the spinlock protection.
*/
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
return clean;
}
void __linkwatch_sync_dev(struct net_device *dev)
{
netdev_ops_assert_locked(dev);
if (linkwatch_clean_dev(dev))
linkwatch_do_dev(dev);
}
void linkwatch_sync_dev(struct net_device *dev)
{
if (linkwatch_clean_dev(dev)) {
netdev_lock_ops(dev);
linkwatch_do_dev(dev);
netdev_unlock_ops(dev);
}
}
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
{
__linkwatch_run_queue(0);
}
static void linkwatch_event(struct work_struct *dummy)
{
rtnl_lock();
__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
rtnl_unlock();
}
void linkwatch_fire_event(struct net_device *dev)
{
bool urgent = linkwatch_urgent_event(dev);
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
linkwatch_add_event(dev);
} else if (!urgent)
return;
linkwatch_schedule_work(urgent);
}
EXPORT_SYMBOL(linkwatch_fire_event);
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/mm/memory.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
/*
* demand-loading started 01.12.91 - seems it is high on the list of
* things wanted, and it should be easy to implement. - Linus
*/
/*
* Ok, demand-loading was easy, shared pages a little bit tricker. Shared
* pages started 02.12.91, seems to work. - Linus.
*
* Tested sharing by executing about 30 /bin/sh: under the old kernel it
* would have taken more than the 6M I have free, but it worked well as
* far as I could see.
*
* Also corrected some "invalidate()"s - I wasn't doing enough of them.
*/
/*
* Real VM (paging to/from disk) started 18.12.91. Much more work and
* thought has to go into this. Oh, well..
* 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
* Found it. Everything seems to work now.
* 20.12.91 - Ok, making the swap-device changeable like the root.
*/
/*
* 05.04.94 - Multi-page memory management added for v1.1.
* Idea by Alex Bligh (alex@cconcepts.co.uk)
*
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
* (Gerhard.Wichert@pdb.siemens.de)
*
* Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
*/
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/sched/mm.h>
#include <linux/sched/numa_balancing.h>
#include <linux/sched/task.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/memremap.h>
#include <linux/kmsan.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
#include <linux/shmem_fs.h>
#include <linux/memory-tiers.h>
#include <linux/debugfs.h>
#include <linux/userfaultfd_k.h>
#include <linux/dax.h>
#include <linux/oom.h>
#include <linux/numa.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/vmalloc.h>
#include <linux/sched/sysctl.h>
#include <trace/events/kmem.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include "pgalloc-track.h"
#include "internal.h"
#include "swap.h"
#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
static vm_fault_t do_fault(struct vm_fault *vmf);
static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
static bool vmf_pte_changed(struct vm_fault *vmf);
/*
* Return true if the original pte was a uffd-wp pte marker (so the pte was
* wr-protected).
*/
static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
{
if (!userfaultfd_wp(vmf->vma))
return false;
if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
return false;
return pte_marker_uffd_wp(vmf->orig_pte);
}
/*
* Randomize the address space (stacks, mmaps, brk, etc.).
*
* ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
* as ancient (libc5 based) binaries can segfault. )
*/
int randomize_va_space __read_mostly =
#ifdef CONFIG_COMPAT_BRK
1;
#else
2;
#endif
static const struct ctl_table mmu_sysctl_table[] = {
{
.procname = "randomize_va_space",
.data = &randomize_va_space,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
};
static int __init init_mm_sysctl(void)
{
register_sysctl_init("kernel", mmu_sysctl_table);
return 0;
}
subsys_initcall(init_mm_sysctl);
#ifndef arch_wants_old_prefaulted_pte
static inline bool arch_wants_old_prefaulted_pte(void)
{
/*
* Transitioning a PTE from 'old' to 'young' can be expensive on
* some architectures, even if it's performed in hardware. By
* default, "false" means prefaulted entries will be 'young'.
*/
return false;
}
#endif
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
return 1;
}
__setup("norandmaps", disable_randmaps);
unsigned long zero_pfn __read_mostly;
EXPORT_SYMBOL(zero_pfn);
unsigned long highest_memmap_pfn __read_mostly;
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
static int __init init_zero_pfn(void)
{
zero_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
early_initcall(init_zero_pfn);
void mm_trace_rss_stat(struct mm_struct *mm, int member)
{
trace_rss_stat(mm, member);
}
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
mm_dec_nr_ptes(tlb->mm);
}
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pmd_t *pmd;
unsigned long next;
unsigned long start;
start = addr;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
start &= PUD_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PUD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
mm_dec_nr_pmds(tlb->mm);
}
static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pud_t *pud;
unsigned long next;
unsigned long start;
start = addr;
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
start &= P4D_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= P4D_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pud = pud_offset(p4d, start);
p4d_clear(p4d);
pud_free_tlb(tlb, pud, start);
mm_dec_nr_puds(tlb->mm);
}
static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
p4d_t *p4d;
unsigned long next;
unsigned long start;
start = addr;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d))
continue;
free_pud_range(tlb, p4d, addr, next, floor, ceiling);
} while (p4d++, addr = next, addr != end);
start &= PGDIR_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PGDIR_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
p4d = p4d_offset(pgd, start);
pgd_clear(pgd);
p4d_free_tlb(tlb, p4d, start);
}
/**
* free_pgd_range - Unmap and free page tables in the range
* @tlb: the mmu_gather containing pending TLB flush info
* @addr: virtual address start
* @end: virtual address end
* @floor: lowest address boundary
* @ceiling: highest address boundary
*
* This function tears down all user-level page tables in the
* specified virtual address range [@addr..@end). It is part of
* the memory unmap flow.
*/
void free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
unsigned long next;
/*
* The next few lines have given us lots of grief...
*
* Why are we testing PMD* at this top level? Because often
* there will be no work to do at all, and we'd prefer not to
* go all the way down to the bottom just to discover that.
*
* Why all these "- 1"s? Because 0 represents both the bottom
* of the address space and the top of it (using -1 for the
* top wouldn't help much: the masks would do the wrong thing).
* The rule is that addr 0 and floor 0 refer to the bottom of
* the address space, but end 0 and ceiling 0 refer to the top
* Comparisons need to use "end - 1" and "ceiling - 1" (though
* that end 0 case should be mythical).
*
* Wherever addr is brought up or ceiling brought down, we must
* be careful to reject "the opposite 0" before it confuses the
* subsequent tests. But what about where end is brought down
* by PMD_SIZE below? no, end can't go down to 0 there.
*
* Whereas we round start (addr) and ceiling down, by different
* masks at different levels, in order to test whether a table
* now has no other vmas using it, so can be freed, we don't
* bother to round floor or end up - the tests don't need that.
*/
addr &= PMD_MASK;
if (addr < floor) {
addr += PMD_SIZE;
if (!addr)
return;
}
if (ceiling) {
ceiling &= PMD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
end -= PMD_SIZE;
if (addr > end - 1)
return;
/*
* We add page table cache pages with PAGE_SIZE,
* (see pte_free_tlb()), flush the tlb if we need
*/
tlb_change_page_size(tlb, PAGE_SIZE);
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked)
{
struct unlink_vma_file_batch vb;
tlb_free_vmas(tlb);
do {
unsigned long addr = vma->vm_start;
struct vm_area_struct *next;
/*
* Note: USER_PGTABLES_CEILING may be passed as ceiling and may
* be 0. This will underflow and is okay.
*/
next = mas_find(mas, ceiling - 1);
if (unlikely(xa_is_zero(next)))
next = NULL;
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
unlink_file_vma_batch_init(&vb);
unlink_file_vma_batch_add(&vb, vma);
/*
* Optimization: gather nearby vmas into one call down
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
vma = next;
next = mas_find(mas, ceiling - 1);
if (unlikely(xa_is_zero(next)))
next = NULL;
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
unlink_file_vma_batch_add(&vb, vma);
}
unlink_file_vma_batch_final(&vb);
free_pgd_range(tlb, addr, vma->vm_end,
floor, next ? next->vm_start : ceiling);
vma = next;
} while (vma);
}
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
{ spinlock_t *ptl = pmd_lock(mm, pmd); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
mm_inc_nr_ptes(mm);
/*
* Ensure all pte setup (eg. pte page lock and page clearing) are
* visible before the pte is made visible to other CPUs by being
* put into page tables.
*
* The other side of the story is the pointer chasing in the page
* table walking code (when walking the page table without locking;
* ie. most of the time). Fortunately, these data accesses consist
* of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the
* smp_rmb() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
pmd_populate(mm, pmd, *pte);
*pte = NULL;
}
spin_unlock(ptl);}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{
pgtable_t new = pte_alloc_one(mm);
if (!new)
return -ENOMEM;
pmd_install(mm, pmd, &new);
if (new) pte_free(mm, new); return 0;}
int __pte_alloc_kernel(pmd_t *pmd)
{
pte_t *new = pte_alloc_one_kernel(&init_mm);
if (!new)
return -ENOMEM;
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
smp_wmb(); /* See comment in pmd_install() */
pmd_populate_kernel(&init_mm, pmd, new);
new = NULL;
}
spin_unlock(&init_mm.page_table_lock);
if (new)
pte_free_kernel(&init_mm, new);
return 0;
}
static inline void init_rss_vec(int *rss)
{
memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
}
static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) if (rss[i]) add_mm_counter(mm, i, rss[i]);
}
static bool is_bad_page_map_ratelimited(void)
{
static unsigned long resume;
static unsigned long nr_shown;
static unsigned long nr_unshown;
/*
* Allow a burst of 60 reports, then keep quiet for that minute;
* or allow a steady drip of one report per second.
*/
if (nr_shown == 60) {
if (time_before(jiffies, resume)) {
nr_unshown++;
return true;
}
if (nr_unshown) {
pr_alert("BUG: Bad page map: %lu messages suppressed\n",
nr_unshown);
nr_unshown = 0;
}
nr_shown = 0;
}
if (nr_shown++ == 0)
resume = jiffies + 60 * HZ;
return false;
}
static void __print_bad_page_map_pgtable(struct mm_struct *mm, unsigned long addr)
{
unsigned long long pgdv, p4dv, pudv, pmdv;
p4d_t p4d, *p4dp;
pud_t pud, *pudp;
pmd_t pmd, *pmdp;
pgd_t *pgdp;
/*
* Although this looks like a fully lockless pgtable walk, it is not:
* see locking requirements for print_bad_page_map().
*/
pgdp = pgd_offset(mm, addr);
pgdv = pgd_val(*pgdp);
if (!pgd_present(*pgdp) || pgd_leaf(*pgdp)) {
pr_alert("pgd:%08llx\n", pgdv);
return;
}
p4dp = p4d_offset(pgdp, addr);
p4d = p4dp_get(p4dp);
p4dv = p4d_val(p4d);
if (!p4d_present(p4d) || p4d_leaf(p4d)) {
pr_alert("pgd:%08llx p4d:%08llx\n", pgdv, p4dv);
return;
}
pudp = pud_offset(p4dp, addr);
pud = pudp_get(pudp);
pudv = pud_val(pud);
if (!pud_present(pud) || pud_leaf(pud)) {
pr_alert("pgd:%08llx p4d:%08llx pud:%08llx\n", pgdv, p4dv, pudv);
return;
}
pmdp = pmd_offset(pudp, addr);
pmd = pmdp_get(pmdp);
pmdv = pmd_val(pmd);
/*
* Dumping the PTE would be nice, but it's tricky with CONFIG_HIGHPTE,
* because the table should already be mapped by the caller and
* doing another map would be bad. print_bad_page_map() should
* already take care of printing the PTE.
*/
pr_alert("pgd:%08llx p4d:%08llx pud:%08llx pmd:%08llx\n", pgdv,
p4dv, pudv, pmdv);
}
/*
* This function is called to print an error when a bad page table entry (e.g.,
* corrupted page table entry) is found. For example, we might have a
* PFN-mapped pte in a region that doesn't allow it.
*
* The calling function must still handle the error.
*
* This function must be called during a proper page table walk, as it will
* re-walk the page table to dump information: the caller MUST prevent page
* table teardown (by holding mmap, vma or rmap lock) and MUST hold the leaf
* page table lock.
*/
static void print_bad_page_map(struct vm_area_struct *vma,
unsigned long addr, unsigned long long entry, struct page *page,
enum pgtable_level level)
{
struct address_space *mapping;
pgoff_t index;
if (is_bad_page_map_ratelimited())
return;
mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
index = linear_page_index(vma, addr);
pr_alert("BUG: Bad page map in process %s %s:%08llx", current->comm,
pgtable_level_to_str(level), entry);
__print_bad_page_map_pgtable(vma->vm_mm, addr);
if (page)
dump_page(page, "bad page map");
pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
pr_alert("file:%pD fault:%ps mmap:%ps mmap_prepare: %ps read_folio:%ps\n",
vma->vm_file,
vma->vm_ops ? vma->vm_ops->fault : NULL,
vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
vma->vm_file ? vma->vm_file->f_op->mmap_prepare : NULL,
mapping ? mapping->a_ops->read_folio : NULL);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
#define print_bad_pte(vma, addr, pte, page) \
print_bad_page_map(vma, addr, pte_val(pte), page, PGTABLE_LEVEL_PTE)
/**
* __vm_normal_page() - Get the "struct page" associated with a page table entry.
* @vma: The VMA mapping the page table entry.
* @addr: The address where the page table entry is mapped.
* @pfn: The PFN stored in the page table entry.
* @special: Whether the page table entry is marked "special".
* @level: The page table level for error reporting purposes only.
* @entry: The page table entry value for error reporting purposes only.
*
* "Special" mappings do not wish to be associated with a "struct page" (either
* it doesn't exist, or it exists but they don't want to touch it). In this
* case, NULL is returned here. "Normal" mappings do have a struct page and
* are ordinarily refcounted.
*
* Page mappings of the shared zero folios are always considered "special", as
* they are not ordinarily refcounted: neither the refcount nor the mapcount
* of these folios is adjusted when mapping them into user page tables.
* Selected page table walkers (such as GUP) can still identify mappings of the
* shared zero folios and work with the underlying "struct page".
*
* There are 2 broad cases. Firstly, an architecture may define a "special"
* page table entry bit, such as pte_special(), in which case this function is
* trivial. Secondly, an architecture may not have a spare page table
* entry bit, which requires a more complicated scheme, described below.
*
* With CONFIG_FIND_NORMAL_PAGE, we might have the "special" bit set on
* page table entries that actually map "normal" pages: however, that page
* cannot be looked up through the PFN stored in the page table entry, but
* instead will be looked up through vm_ops->find_normal_page(). So far, this
* only applies to PTEs.
*
* A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
* special mapping (even if there are underlying and valid "struct pages").
* COWed pages of a VM_PFNMAP are always normal.
*
* The way we recognize COWed pages within VM_PFNMAP mappings is through the
* rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
* set, and the vm_pgoff will point to the first PFN mapped: thus every special
* mapping will always honor the rule
*
* pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
*
* And for normal mappings this is false.
*
* This restricts such mappings to be a linear translation from virtual address
* to pfn. To get around this restriction, we allow arbitrary mappings so long
* as the vma is not a COW mapping; in that case, we know that all ptes are
* special (because none can have been COWed).
*
*
* In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
*
* VM_MIXEDMAP mappings can likewise contain memory with or without "struct
* page" backing, however the difference is that _all_ pages with a struct
* page (that is, those where pfn_valid is true, except the shared zero
* folios) are refcounted and considered normal pages by the VM.
*
* The disadvantage is that pages are refcounted (which can be slower and
* simply not an option for some PFNMAP users). The advantage is that we
* don't have to follow the strict linearity rule of PFNMAP mappings in
* order to support COWable mappings.
*
* Return: Returns the "struct page" if this is a "normal" mapping. Returns
* NULL if this is a "special" mapping.
*/
static inline struct page *__vm_normal_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, bool special,
unsigned long long entry, enum pgtable_level level)
{
if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
if (unlikely(special)) {
#ifdef CONFIG_FIND_NORMAL_PAGE
if (vma->vm_ops && vma->vm_ops->find_normal_page)
return vma->vm_ops->find_normal_page(vma, addr);
#endif /* CONFIG_FIND_NORMAL_PAGE */
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL;
if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
return NULL;
print_bad_page_map(vma, addr, entry, NULL, level);
return NULL;
}
/*
* With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table
* mappings (incl. shared zero folios) are marked accordingly.
*/
} else {
if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
/* If it has a "struct page", it's "normal". */
if (!pfn_valid(pfn))
return NULL;
} else {
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
/* Only CoW'ed anon folios are "normal". */
if (pfn == vma->vm_pgoff + off)
return NULL;
if (!is_cow_mapping(vma->vm_flags))
return NULL;
}
}
if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
return NULL;
}
if (unlikely(pfn > highest_memmap_pfn)) {
/* Corrupted page table entry. */
print_bad_page_map(vma, addr, entry, NULL, level);
return NULL;
}
/*
* NOTE! We still have PageReserved() pages in the page tables.
* For example, VDSO mappings can cause them to exist.
*/
VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)); return pfn_to_page(pfn);
}
/**
* vm_normal_page() - Get the "struct page" associated with a PTE
* @vma: The VMA mapping the @pte.
* @addr: The address where the @pte is mapped.
* @pte: The PTE.
*
* Get the "struct page" associated with a PTE. See __vm_normal_page()
* for details on "normal" and "special" mappings.
*
* Return: Returns the "struct page" if this is a "normal" mapping. Returns
* NULL if this is a "special" mapping.
*/
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
{
return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte),
pte_val(pte), PGTABLE_LEVEL_PTE);
}
/**
* vm_normal_folio() - Get the "struct folio" associated with a PTE
* @vma: The VMA mapping the @pte.
* @addr: The address where the @pte is mapped.
* @pte: The PTE.
*
* Get the "struct folio" associated with a PTE. See __vm_normal_page()
* for details on "normal" and "special" mappings.
*
* Return: Returns the "struct folio" if this is a "normal" mapping. Returns
* NULL if this is a "special" mapping.
*/
struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
{
struct page *page = vm_normal_page(vma, addr, pte);
if (page)
return page_folio(page);
return NULL;
}
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
/**
* vm_normal_page_pmd() - Get the "struct page" associated with a PMD
* @vma: The VMA mapping the @pmd.
* @addr: The address where the @pmd is mapped.
* @pmd: The PMD.
*
* Get the "struct page" associated with a PTE. See __vm_normal_page()
* for details on "normal" and "special" mappings.
*
* Return: Returns the "struct page" if this is a "normal" mapping. Returns
* NULL if this is a "special" mapping.
*/
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd)
{
return __vm_normal_page(vma, addr, pmd_pfn(pmd), pmd_special(pmd),
pmd_val(pmd), PGTABLE_LEVEL_PMD);
}
/**
* vm_normal_folio_pmd() - Get the "struct folio" associated with a PMD
* @vma: The VMA mapping the @pmd.
* @addr: The address where the @pmd is mapped.
* @pmd: The PMD.
*
* Get the "struct folio" associated with a PTE. See __vm_normal_page()
* for details on "normal" and "special" mappings.
*
* Return: Returns the "struct folio" if this is a "normal" mapping. Returns
* NULL if this is a "special" mapping.
*/
struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd)
{
struct page *page = vm_normal_page_pmd(vma, addr, pmd);
if (page)
return page_folio(page);
return NULL;
}
/**
* vm_normal_page_pud() - Get the "struct page" associated with a PUD
* @vma: The VMA mapping the @pud.
* @addr: The address where the @pud is mapped.
* @pud: The PUD.
*
* Get the "struct page" associated with a PUD. See __vm_normal_page()
* for details on "normal" and "special" mappings.
*
* Return: Returns the "struct page" if this is a "normal" mapping. Returns
* NULL if this is a "special" mapping.
*/
struct page *vm_normal_page_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t pud)
{
return __vm_normal_page(vma, addr, pud_pfn(pud), pud_special(pud),
pud_val(pud), PGTABLE_LEVEL_PUD);
}
#endif
/**
* restore_exclusive_pte - Restore a device-exclusive entry
* @vma: VMA covering @address
* @folio: the mapped folio
* @page: the mapped folio page
* @address: the virtual address
* @ptep: pte pointer into the locked page table mapping the folio page
* @orig_pte: pte value at @ptep
*
* Restore a device-exclusive non-swap entry to an ordinary present pte.
*
* The folio and the page table must be locked, and MMU notifiers must have
* been called to invalidate any (exclusive) device mappings.
*
* Locking the folio makes sure that anybody who just converted the pte to
* a device-exclusive entry can map it into the device to make forward
* progress without others converting it back until the folio was unlocked.
*
* If the folio lock ever becomes an issue, we can stop relying on the folio
* lock; it might make some scenarios with heavy thrashing less likely to
* make forward progress, but these scenarios might not be valid use cases.
*
* Note that the folio lock does not protect against all cases of concurrent
* page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers
* must use MMU notifiers to sync against any concurrent changes.
*/
static void restore_exclusive_pte(struct vm_area_struct *vma,
struct folio *folio, struct page *page, unsigned long address,
pte_t *ptep, pte_t orig_pte)
{
pte_t pte;
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
if (pte_swp_soft_dirty(orig_pte))
pte = pte_mksoft_dirty(pte);
if (pte_swp_uffd_wp(orig_pte))
pte = pte_mkuffd_wp(pte);
if ((vma->vm_flags & VM_WRITE) &&
can_change_pte_writable(vma, address, pte)) {
if (folio_test_dirty(folio))
pte = pte_mkdirty(pte);
pte = pte_mkwrite(pte, vma);
}
set_pte_at(vma->vm_mm, address, ptep, pte);
/*
* No need to invalidate - it was non-present before. However
* secondary CPUs may have mappings that need invalidating.
*/
update_mmu_cache(vma, address, ptep);
}
/*
* Tries to restore an exclusive pte if the page lock can be acquired without
* sleeping.
*/
static int try_restore_exclusive_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, pte_t orig_pte)
{
struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
struct folio *folio = page_folio(page);
if (folio_trylock(folio)) {
restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte);
folio_unlock(folio);
return 0;
}
return -EBUSY;
}
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
* covered by this vma.
*/
static unsigned long
copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, unsigned long addr, int *rss)
{
vm_flags_t vm_flags = dst_vma->vm_flags;
pte_t orig_pte = ptep_get(src_pte);
pte_t pte = orig_pte;
struct folio *folio;
struct page *page;
swp_entry_t entry = pte_to_swp_entry(orig_pte); if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0)
return -EIO;
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
if (list_empty(&dst_mm->mmlist))
list_add(&dst_mm->mmlist,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
/* Mark the swap entry as shared. */
if (pte_swp_exclusive(orig_pte)) {
pte = pte_swp_clear_exclusive(orig_pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
rss[MM_SWAPENTS]++; } else if (is_migration_entry(entry)) { folio = pfn_swap_entry_folio(entry); rss[mm_counter(folio)]++; if (!is_readable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both parent and child
* to be set to read. A previously exclusive entry is
* now shared.
*/
entry = make_readable_migration_entry(
swp_offset(entry));
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(orig_pte))
pte = pte_swp_mksoft_dirty(pte);
if (pte_swp_uffd_wp(orig_pte))
pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
} else if (is_device_private_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
folio = page_folio(page);
/*
* Update rss count even for unaddressable pages, as
* they should treated just like normal pages in this
* respect.
*
* We will likely want to have some new rss counters
* for unaddressable pages, at some point. But for now
* keep things as they are.
*/
folio_get(folio);
rss[mm_counter(folio)]++;
/* Cannot fail as these pages cannot get pinned. */
folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma);
/*
* We do not preserve soft-dirty information, because so
* far, checkpoint/restore is the only feature that
* requires that. And checkpoint/restore does not work
* when a device driver is involved (you cannot easily
* save and restore device driver state).
*/
if (is_writable_device_private_entry(entry) &&
is_cow_mapping(vm_flags)) {
entry = make_readable_device_private_entry(
swp_offset(entry));
pte = swp_entry_to_pte(entry);
if (pte_swp_uffd_wp(orig_pte))
pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
} else if (is_device_exclusive_entry(entry)) {
/*
* Make device exclusive entries present by restoring the
* original entry then copying as for a present pte. Device
* exclusive entries currently only support private writable
* (ie. COW) mappings.
*/
VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
return -EBUSY;
return -ENOENT;
} else if (is_pte_marker_entry(entry)) {
pte_marker marker = copy_pte_marker(entry, dst_vma);
if (marker)
set_pte_at(dst_mm, addr, dst_pte,
make_pte_marker(marker));
return 0;
}
if (!userfaultfd_wp(dst_vma))
pte = pte_swp_clear_uffd_wp(pte);
set_pte_at(dst_mm, addr, dst_pte, pte);
return 0;
}
/*
* Copy a present and normal page.
*
* NOTE! The usual case is that this isn't required;
* instead, the caller can just increase the page refcount
* and re-use the pte the traditional way.
*
* And if we need a pre-allocated page but don't yet have
* one, return a negative error to let the preallocation
* code know so that it can do so outside the page table
* lock.
*/
static inline int
copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
struct folio **prealloc, struct page *page)
{
struct folio *new_folio;
pte_t pte;
new_folio = *prealloc;
if (!new_folio)
return -EAGAIN;
/*
* We have a prealloc page, all good! Take it
* over and copy the page & arm it.
*/
if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
return -EHWPOISON;
*prealloc = NULL;
__folio_mark_uptodate(new_folio);
folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, dst_vma);
rss[MM_ANONPAGES]++;
/* All done, just insert the new page copy in the child */
pte = folio_mk_pte(new_folio, dst_vma->vm_page_prot); pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
/* Uffd-wp needs to be delivered to dest pte as well */
pte = pte_mkuffd_wp(pte);
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
return 0;
}
static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
pte_t pte, unsigned long addr, int nr)
{
struct mm_struct *src_mm = src_vma->vm_mm;
/* If it's a COW mapping, write protect it both processes. */
if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { wrprotect_ptes(src_mm, addr, src_pte, nr); pte = pte_wrprotect(pte);
}
/* If it's a shared mapping, mark it clean in the child. */
if (src_vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
if (!userfaultfd_wp(dst_vma))
pte = pte_clear_uffd_wp(pte);
set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
}
/*
* Copy one present PTE, trying to batch-process subsequent PTEs that map
* consecutive pages of the same folio by copying them as well.
*
* Returns -EAGAIN if one preallocated page is required to copy the next PTE.
* Otherwise, returns the number of copied PTEs (at least 1).
*/
static inline int
copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
int max_nr, int *rss, struct folio **prealloc)
{
fpb_t flags = FPB_MERGE_WRITE;
struct page *page;
struct folio *folio;
int err, nr;
page = vm_normal_page(src_vma, addr, pte);
if (unlikely(!page))
goto copy_pte; folio = page_folio(page);
/*
* If we likely have to copy, just don't bother with batching. Make
* sure that the common "small folio" case is as fast as possible
* by keeping the batching logic separate.
*/
if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { if (!(src_vma->vm_flags & VM_SHARED))
flags |= FPB_RESPECT_DIRTY;
if (vma_soft_dirty_enabled(src_vma))
flags |= FPB_RESPECT_SOFT_DIRTY;
nr = folio_pte_batch_flags(folio, src_vma, src_pte, &pte, max_nr, flags);
folio_ref_add(folio, nr);
if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
nr, dst_vma, src_vma))) {
folio_ref_sub(folio, nr);
return -EAGAIN;
}
rss[MM_ANONPAGES] += nr;
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
} else {
folio_dup_file_rmap_ptes(folio, page, nr, dst_vma); rss[mm_counter_file(folio)] += nr;
}
__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
addr, nr);
return nr;
}
folio_get(folio); if (folio_test_anon(folio)) {
/*
* If this page may have been pinned by the parent process,
* copy the page immediately for the child so that we'll always
* guarantee the pinned page won't be randomly replaced in the
* future.
*/
if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) {
/* Page may be pinned, we have to copy. */
folio_put(folio);
err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
addr, rss, prealloc, page);
return err ? err : 1;
}
rss[MM_ANONPAGES]++; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
} else {
folio_dup_file_rmap_pte(folio, page, dst_vma); rss[mm_counter_file(folio)]++;
}
copy_pte:
__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1); return 1;}
static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
struct vm_area_struct *vma, unsigned long addr, bool need_zero)
{
struct folio *new_folio;
if (need_zero)
new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
else
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); if (!new_folio) return NULL; if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { folio_put(new_folio); return NULL;
}
folio_throttle_swaprate(new_folio, GFP_KERNEL); return new_folio;}
static int
copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
unsigned long end)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct mm_struct *src_mm = src_vma->vm_mm;
pte_t *orig_src_pte, *orig_dst_pte;
pte_t *src_pte, *dst_pte;
pmd_t dummy_pmdval;
pte_t ptent;
spinlock_t *src_ptl, *dst_ptl;
int progress, max_nr, ret = 0;
int rss[NR_MM_COUNTERS];
swp_entry_t entry = (swp_entry_t){0};
struct folio *prealloc = NULL;
int nr;
again:
progress = 0;
init_rss_vec(rss);
/*
* copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
* error handling here, assume that exclusive mmap_lock on dst and src
* protects anon from unexpected THP transitions; with shmem and file
* protected by mmap_lock-less collapse skipping areas with anon_vma
* (whereas vma_needs_copy() skips areas without anon_vma). A rework
* can remove such assumptions later, but this is good enough for now.
*/
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) { ret = -ENOMEM; goto out;
}
/*
* We already hold the exclusive mmap_lock, the copy_pte_range() and
* retract_page_tables() are using vma->anon_vma to be exclusive, so
* the PTE page is stable, and there is no need to get pmdval and do
* pmd_same() check.
*/
src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
&src_ptl);
if (!src_pte) { pte_unmap_unlock(dst_pte, dst_ptl);
/* ret == 0 */
goto out;
}
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
orig_src_pte = src_pte;
orig_dst_pte = dst_pte;
arch_enter_lazy_mmu_mode();
do {
nr = 1;
/*
* We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU.
*/
if (progress >= 32) {
progress = 0;
if (need_resched() || spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
break;
}
ptent = ptep_get(src_pte);
if (pte_none(ptent)) {
progress++;
continue;
}
if (unlikely(!pte_present(ptent))) { ret = copy_nonpresent_pte(dst_mm, src_mm,
dst_pte, src_pte,
dst_vma, src_vma,
addr, rss);
if (ret == -EIO) { entry = pte_to_swp_entry(ptep_get(src_pte)); break;
} else if (ret == -EBUSY) {
break;
} else if (!ret) {
progress += 8;
continue;
}
ptent = ptep_get(src_pte);
VM_WARN_ON_ONCE(!pte_present(ptent));
/*
* Device exclusive entry restored, continue by copying
* the now present pte.
*/
WARN_ON_ONCE(ret != -ENOENT);
}
/* copy_present_ptes() will clear `*prealloc' if consumed */
max_nr = (end - addr) / PAGE_SIZE;
ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
ptent, addr, max_nr, rss, &prealloc);
/*
* If we need a pre-allocated page for this pte, drop the
* locks, allocate, and try again.
* If copy failed due to hwpoison in source page, break out.
*/
if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
break;
if (unlikely(prealloc)) {
/*
* pre-alloc page cannot be reused by next time so as
* to strictly follow mempolicy (e.g., alloc_page_vma()
* will allocate page according to address). This
* could only happen if one pinned pte changed.
*/
folio_put(prealloc); prealloc = NULL;
}
nr = ret;
progress += 8 * nr; } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_src_pte, src_ptl); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
if (ret == -EIO) {
VM_WARN_ON_ONCE(!entry.val); if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
ret = -ENOMEM;
goto out;
}
entry.val = 0; } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) { goto out; } else if (ret == -EAGAIN) {
prealloc = folio_prealloc(src_mm, src_vma, addr, false);
if (!prealloc)
return -ENOMEM;
} else if (ret < 0) {
VM_WARN_ON_ONCE(1);
}
/* We've captured and resolved the error. Reset, try again. */
ret = 0;
if (addr != end)
goto again;
out:
if (unlikely(prealloc)) folio_put(prealloc);
return ret;
}
static inline int
copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
unsigned long end)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct mm_struct *src_mm = src_vma->vm_mm;
pmd_t *src_pmd, *dst_pmd;
unsigned long next;
dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); if (!dst_pmd)
return -ENOMEM;
src_pmd = pmd_offset(src_pud, addr);
do {
next = pmd_addr_end(addr, end);
if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)) {
int err;
VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
addr, dst_vma, src_vma);
if (err == -ENOMEM)
return -ENOMEM;
if (!err)
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(src_pmd))
continue;
if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
addr, next))
return -ENOMEM;
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
return 0;
}
static inline int
copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
unsigned long end)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct mm_struct *src_mm = src_vma->vm_mm;
pud_t *src_pud, *dst_pud;
unsigned long next;
dst_pud = pud_alloc(dst_mm, dst_p4d, addr); if (!dst_pud)
return -ENOMEM;
src_pud = pud_offset(src_p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_trans_huge(*src_pud)) {
int err;
VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
err = copy_huge_pud(dst_mm, src_mm,
dst_pud, src_pud, addr, src_vma);
if (err == -ENOMEM)
return -ENOMEM;
if (!err)
continue;
/* fall through */
}
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
addr, next))
return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end);
return 0;
}
static inline int
copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
unsigned long end)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
p4d_t *src_p4d, *dst_p4d;
unsigned long next;
dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); if (!dst_p4d)
return -ENOMEM;
src_p4d = p4d_offset(src_pgd, addr);
do {
next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(src_p4d))
continue;
if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
addr, next))
return -ENOMEM;
} while (dst_p4d++, src_p4d++, addr = next, addr != end);
return 0;
}
/*
* Return true if the vma needs to copy the pgtable during this fork(). Return
* false when we can speed up fork() by allowing lazy page faults later until
* when the child accesses the memory range.
*/
static bool
vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
{
/*
* Always copy pgtables when dst_vma has uffd-wp enabled even if it's
* file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
* contains uffd-wp protection information, that's something we can't
* retrieve from page cache, and skip copying will lose those info.
*/
if (userfaultfd_wp(dst_vma))
return true;
if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return true;
if (src_vma->anon_vma)
return true;
/*
* Don't copy ptes where a page fault will fill them correctly. Fork
* becomes much lighter when there are big shared or private readonly
* mappings. The tradeoff is that copy_page_range is more efficient
* than faulting.
*/
return false;
}
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
{
pgd_t *src_pgd, *dst_pgd;
unsigned long addr = src_vma->vm_start;
unsigned long end = src_vma->vm_end;
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct mm_struct *src_mm = src_vma->vm_mm;
struct mmu_notifier_range range;
unsigned long next;
bool is_cow;
int ret;
if (!vma_needs_copy(dst_vma, src_vma)) return 0; if (is_vm_hugetlb_page(src_vma))
return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
/*
* We need to invalidate the secondary MMU mappings only when
* there could be a permission downgrade on the ptes of the
* parent mm. And a permission downgrade will only happen if
* is_cow_mapping() returns true.
*/
is_cow = is_cow_mapping(src_vma->vm_flags);
if (is_cow) {
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, src_mm, addr, end);
mmu_notifier_invalidate_range_start(&range);
/*
* Disabling preemption is not needed for the write side, as
* the read side doesn't spin, but goes to the mmap_lock.
*
* Use the raw variant of the seqcount_t write API to avoid
* lockdep complaining about preemptibility.
*/
vma_assert_write_locked(src_vma); raw_write_seqcount_begin(&src_mm->write_protect_seq);
}
ret = 0; dst_pgd = pgd_offset(dst_mm, addr);
src_pgd = pgd_offset(src_mm, addr);
do {
next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd))
continue;
if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
addr, next))) {
ret = -ENOMEM;
break;
}
} while (dst_pgd++, src_pgd++, addr = next, addr != end); if (is_cow) {
raw_write_seqcount_end(&src_mm->write_protect_seq);
mmu_notifier_invalidate_range_end(&range);
}
return ret;
}
/* Whether we should zap all COWed (private) pages too */
static inline bool should_zap_cows(struct zap_details *details)
{
/* By default, zap all pages */
if (!details || details->reclaim_pt)
return true;
/* Or, we zap COWed pages only if the caller wants to */
return details->even_cows;
}
/* Decides whether we should zap this folio with the folio pointer specified */
static inline bool should_zap_folio(struct zap_details *details,
struct folio *folio)
{
/* If we can make a decision without *folio.. */
if (should_zap_cows(details))
return true;
/* Otherwise we should only zap non-anon folios */
return !folio_test_anon(folio);
}
static inline bool zap_drop_markers(struct zap_details *details)
{
if (!details)
return false;
return details->zap_flags & ZAP_FLAG_DROP_MARKER;
}
/*
* This function makes sure that we'll replace the none pte with an uffd-wp
* swap special pte marker when necessary. Must be with the pgtable lock held.
*
* Returns true if uffd-wp ptes was installed, false otherwise.
*/
static inline bool
zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte, int nr,
struct zap_details *details, pte_t pteval)
{
bool was_installed = false;
#ifdef CONFIG_PTE_MARKER_UFFD_WP
/* Zap on anonymous always means dropping everything */
if (vma_is_anonymous(vma))
return false;
if (zap_drop_markers(details))
return false;
for (;;) {
/* the PFN in the PTE is irrelevant. */
if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
was_installed = true;
if (--nr == 0)
break;
pte++;
addr += PAGE_SIZE;
}
#endif
return was_installed;
}
static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct folio *folio,
struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
unsigned long addr, struct zap_details *details, int *rss,
bool *force_flush, bool *force_break, bool *any_skipped)
{
struct mm_struct *mm = tlb->mm;
bool delay_rmap = false;
if (!folio_test_anon(folio)) {
ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
if (pte_dirty(ptent)) {
folio_mark_dirty(folio);
if (tlb_delay_rmap(tlb)) {
delay_rmap = true;
*force_flush = true;
}
}
if (pte_young(ptent) && likely(vma_has_recency(vma)))
folio_mark_accessed(folio);
rss[mm_counter(folio)] -= nr;
} else {
/* We don't need up-to-date accessed/dirty bits. */
clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
rss[MM_ANONPAGES] -= nr;
}
/* Checking a single PTE in a batch is sufficient. */
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entries(tlb, pte, nr, addr);
if (unlikely(userfaultfd_pte_wp(vma, ptent)))
*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
nr, details, ptent);
if (!delay_rmap) {
folio_remove_rmap_ptes(folio, page, nr, vma);
if (unlikely(folio_mapcount(folio) < 0))
print_bad_pte(vma, addr, ptent, page);
}
if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
*force_flush = true;
*force_break = true;
}
}
/*
* Zap or skip at least one present PTE, trying to batch-process subsequent
* PTEs that map consecutive pages of the same folio.
*
* Returns the number of processed (skipped or zapped) PTEs (at least 1).
*/
static inline int zap_present_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
unsigned int max_nr, unsigned long addr,
struct zap_details *details, int *rss, bool *force_flush,
bool *force_break, bool *any_skipped)
{
struct mm_struct *mm = tlb->mm;
struct folio *folio;
struct page *page;
int nr;
page = vm_normal_page(vma, addr, ptent);
if (!page) {
/* We don't need up-to-date accessed/dirty bits. */
ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
if (userfaultfd_pte_wp(vma, ptent))
*any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
pte, 1, details, ptent);
ksm_might_unmap_zero_page(mm, ptent);
return 1;
}
folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio))) {
*any_skipped = true;
return 1;
}
/*
* Make sure that the common "small folio" case is as fast as possible
* by keeping the batching logic separate.
*/
if (unlikely(folio_test_large(folio) && max_nr != 1)) {
nr = folio_pte_batch(folio, pte, ptent, max_nr);
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
addr, details, rss, force_flush,
force_break, any_skipped);
return nr;
}
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
details, rss, force_flush, force_break, any_skipped);
return 1;
}
static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
unsigned int max_nr, unsigned long addr,
struct zap_details *details, int *rss, bool *any_skipped)
{
swp_entry_t entry;
int nr = 1;
*any_skipped = true;
entry = pte_to_swp_entry(ptent);
if (is_device_private_entry(entry) ||
is_device_exclusive_entry(entry)) {
struct page *page = pfn_swap_entry_to_page(entry);
struct folio *folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio)))
return 1;
/*
* Both device private/exclusive mappings should only
* work with anonymous page so far, so we don't need to
* consider uffd-wp bit when zap. For more information,
* see zap_install_uffd_wp_if_needed().
*/
WARN_ON_ONCE(!vma_is_anonymous(vma));
rss[mm_counter(folio)]--;
folio_remove_rmap_pte(folio, page, vma);
folio_put(folio);
} else if (!non_swap_entry(entry)) {
/* Genuine swap entries, hence a private anon pages */
if (!should_zap_cows(details))
return 1;
nr = swap_pte_batch(pte, max_nr, ptent);
rss[MM_SWAPENTS] -= nr;
free_swap_and_cache_nr(entry, nr);
} else if (is_migration_entry(entry)) {
struct folio *folio = pfn_swap_entry_folio(entry);
if (!should_zap_folio(details, folio))
return 1;
rss[mm_counter(folio)]--;
} else if (pte_marker_entry_uffd_wp(entry)) {
/*
* For anon: always drop the marker; for file: only
* drop the marker if explicitly requested.
*/
if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
return 1;
} else if (is_guard_swp_entry(entry)) {
/*
* Ordinary zapping should not remove guard PTE
* markers. Only do so if we should remove PTE markers
* in general.
*/
if (!zap_drop_markers(details))
return 1;
} else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
if (!should_zap_cows(details))
return 1;
} else {
/* We should have covered all the swap entry types */
pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
WARN_ON_ONCE(1);
}
clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
return nr;
}
static inline int do_zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, unsigned long end,
struct zap_details *details, int *rss,
bool *force_flush, bool *force_break,
bool *any_skipped)
{
pte_t ptent = ptep_get(pte);
int max_nr = (end - addr) / PAGE_SIZE;
int nr = 0;
/* Skip all consecutive none ptes */
if (pte_none(ptent)) {
for (nr = 1; nr < max_nr; nr++) {
ptent = ptep_get(pte + nr);
if (!pte_none(ptent))
break;
}
max_nr -= nr;
if (!max_nr)
return nr;
pte += nr;
addr += nr * PAGE_SIZE;
}
if (pte_present(ptent))
nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
details, rss, force_flush, force_break,
any_skipped);
else
nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
details, rss, any_skipped);
return nr;
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
bool force_flush = false, force_break = false;
struct mm_struct *mm = tlb->mm;
int rss[NR_MM_COUNTERS];
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
pmd_t pmdval;
unsigned long start = addr;
bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
bool direct_reclaim = true;
int nr;
retry:
tlb_change_page_size(tlb, PAGE_SIZE);
init_rss_vec(rss);
start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return addr;
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
do {
bool any_skipped = false;
if (need_resched()) {
direct_reclaim = false;
break;
}
nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
&force_flush, &force_break, &any_skipped);
if (any_skipped)
can_reclaim_pt = false;
if (unlikely(force_break)) {
addr += nr * PAGE_SIZE;
direct_reclaim = false;
break;
}
} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
/*
* Fast path: try to hold the pmd lock and unmap the PTE page.
*
* If the pte lock was released midway (retry case), or if the attempt
* to hold the pmd lock failed, then we need to recheck all pte entries
* to ensure they are still none, thereby preventing the pte entries
* from being repopulated by another thread.
*/
if (can_reclaim_pt && direct_reclaim && addr == end)
direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
add_mm_rss_vec(mm, rss);
arch_leave_lazy_mmu_mode();
/* Do the actual TLB flush before dropping ptl */
if (force_flush) {
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_rmaps(tlb, vma);
}
pte_unmap_unlock(start_pte, ptl);
/*
* If we forced a TLB flush (either due to running out of
* batch buffers or because we needed to flush dirty TLB
* entries before releasing the ptl), free the batched
* memory too. Come back again if we didn't do everything.
*/
if (force_flush)
tlb_flush_mmu(tlb);
if (addr != end) {
cond_resched();
force_flush = false;
force_break = false;
goto retry;
}
if (can_reclaim_pt) {
if (direct_reclaim)
free_pte(mm, start, tlb, pmdval);
else
try_to_free_pte(mm, pmd, start, tlb);
}
return addr;
}
static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
__split_huge_pmd(vma, pmd, addr, false);
else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
addr = next;
continue;
}
/* fall through */
} else if (details && details->single_folio &&
folio_test_pmd_mappable(details->single_folio) &&
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
/*
* Take and drop THP pmd lock so that we cannot return
* prematurely, while zap_huge_pmd() has cleared *pmd,
* but not yet decremented compound_mapcount().
*/
spin_unlock(ptl);
}
if (pmd_none(*pmd)) {
addr = next;
continue;
}
addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
if (addr != next)
pmd--;
} while (pmd++, cond_resched(), addr != end);
return addr;
}
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, p4d_t *p4d,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pud_t *pud;
unsigned long next;
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud)) {
if (next - addr != HPAGE_PUD_SIZE) {
mmap_assert_locked(tlb->mm);
split_huge_pud(vma, pud, addr);
} else if (zap_huge_pud(tlb, vma, pud, addr))
goto next;
/* fall through */
}
if (pud_none_or_clear_bad(pud))
continue;
next = zap_pmd_range(tlb, vma, pud, addr, next, details);
next:
cond_resched();
} while (pud++, addr = next, addr != end);
return addr;
}
static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
p4d_t *p4d;
unsigned long next;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d))
continue;
next = zap_pud_range(tlb, vma, p4d, addr, next, details);
} while (p4d++, addr = next, addr != end);
return addr;
}
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pgd_t *pgd;
unsigned long next;
BUG_ON(addr >= end);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
}
static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr,
struct zap_details *details, bool mm_wr_locked)
{
unsigned long start = max(vma->vm_start, start_addr);
unsigned long end;
if (start >= vma->vm_end)
return;
end = min(vma->vm_end, end_addr);
if (end <= vma->vm_start)
return;
if (vma->vm_file)
uprobe_munmap(vma, start, end);
if (start != end) {
if (unlikely(is_vm_hugetlb_page(vma))) {
/*
* It is undesirable to test vma->vm_file as it
* should be non-null for valid hugetlb area.
* However, vm_file will be NULL in the error
* cleanup path of mmap_region. When
* hugetlbfs ->mmap method fails,
* mmap_region() nullifies vma->vm_file
* before calling this function to clean up.
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
*/
if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
__unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags);
}
} else
unmap_page_range(tlb, vma, start, end, details);
}
}
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlb: address of the caller's struct mmu_gather
* @mas: the maple state
* @vma: the starting vma
* @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping
* @tree_end: The maximum index to check
* @mm_wr_locked: lock flag
*
* Unmap all pages in the vma list.
*
* Only addresses between `start' and `end' will be unmapped.
*
* The VMA list must be sorted in ascending virtual address order.
*
* unmap_vmas() assumes that the caller will flush the whole unmapped address
* range after unmap_vmas() returns. So the only responsibility here is to
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules.
*/
void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long tree_end,
bool mm_wr_locked)
{
struct mmu_notifier_range range;
struct zap_details details = {
.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
/* Careful - we need to zap private pages too! */
.even_cows = true,
};
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
do {
unsigned long start = start_addr;
unsigned long end = end_addr;
hugetlb_zap_begin(vma, &start, &end);
unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
hugetlb_zap_end(vma, &details);
vma = mas_find(mas, tree_end - 1);
} while (vma && likely(!xa_is_zero(vma)));
mmu_notifier_invalidate_range_end(&range);
}
/**
* zap_page_range_single_batched - remove user pages in a given range
* @tlb: pointer to the caller's struct mmu_gather
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to remove
* @size: number of bytes to remove
* @details: details of shared cache invalidation
*
* @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for
* hugetlb, @tlb is flushed and re-initialized by this function.
*/
void zap_page_range_single_batched(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
const unsigned long end = address + size;
struct mmu_notifier_range range;
VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
hugetlb_zap_begin(vma, &range.start, &range.end);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
/*
* unmap 'address-end' not 'range.start-range.end' as range
* could have been expanded for hugetlb pmd sharing.
*/
unmap_single_vma(tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range);
if (is_vm_hugetlb_page(vma)) {
/*
* flush tlb and free resources before hugetlb_zap_end(), to
* avoid concurrent page faults' allocation failure.
*/
tlb_finish_mmu(tlb);
hugetlb_zap_end(vma, details);
tlb_gather_mmu(tlb, vma->vm_mm);
}
}
/**
* zap_page_range_single - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to zap
* @size: number of bytes to zap
* @details: details of shared cache invalidation
*
* The range must fit into one VMA.
*/
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
struct mmu_gather tlb;
tlb_gather_mmu(&tlb, vma->vm_mm);
zap_page_range_single_batched(&tlb, vma, address, size, details);
tlb_finish_mmu(&tlb);
}
/**
* zap_vma_ptes - remove ptes mapping the vma
* @vma: vm_area_struct holding ptes to be zapped
* @address: starting address of pages to zap
* @size: number of bytes to zap
*
* This function only unmaps ptes assigned to VM_PFNMAP vmas.
*
* The entire address range must be fully contained within the vma.
*
*/
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (!range_in_vma(vma, address, address + size) ||
!(vma->vm_flags & VM_PFNMAP))
return;
zap_page_range_single(vma, address, size, NULL);
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
if (!p4d)
return NULL;
pud = pud_alloc(mm, p4d, addr);
if (!pud)
return NULL;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
VM_BUG_ON(pmd_trans_huge(*pmd));
return pmd;
}
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pmd_t *pmd = walk_to_pmd(mm, addr);
if (!pmd)
return NULL;
return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
{
VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
/*
* Whoever wants to forbid the zeropage after some zeropages
* might already have been mapped has to scan the page tables and
* bail out on any zeropages. Zeropages in COW mappings can
* be unshared using FAULT_FLAG_UNSHARE faults.
*/
if (mm_forbids_zeropage(vma->vm_mm))
return false;
/* zeropages in COW mappings are common and unproblematic. */
if (is_cow_mapping(vma->vm_flags))
return true;
/* Mappings that do not allow for writable PTEs are unproblematic. */
if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
return true;
/*
* Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
* find the shared zeropage and longterm-pin it, which would
* be problematic as soon as the zeropage gets replaced by a different
* page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
* now differ to what GUP looked up. FSDAX is incompatible to
* FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
* check_vma_flags).
*/
return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
(vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
}
static int validate_page_before_insert(struct vm_area_struct *vma,
struct page *page)
{
struct folio *folio = page_folio(page);
if (!folio_ref_count(folio))
return -EINVAL;
if (unlikely(is_zero_folio(folio))) {
if (!vm_mixed_zeropage_allowed(vma))
return -EINVAL;
return 0;
}
if (folio_test_anon(folio) || page_has_type(page))
return -EINVAL;
flush_dcache_folio(folio);
return 0;
}
static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, struct page *page,
pgprot_t prot, bool mkwrite)
{
struct folio *folio = page_folio(page);
pte_t pteval = ptep_get(pte);
if (!pte_none(pteval)) {
if (!mkwrite)
return -EBUSY;
/* see insert_pfn(). */
if (pte_pfn(pteval) != page_to_pfn(page)) {
WARN_ON_ONCE(!is_zero_pfn(pte_pfn(pteval)));
return -EFAULT;
}
pteval = maybe_mkwrite(pteval, vma);
pteval = pte_mkyoung(pteval);
if (ptep_set_access_flags(vma, addr, pte, pteval, 1))
update_mmu_cache(vma, addr, pte);
return 0;
}
/* Ok, finally just insert the thing.. */
pteval = mk_pte(page, prot);
if (unlikely(is_zero_folio(folio))) {
pteval = pte_mkspecial(pteval);
} else {
folio_get(folio);
pteval = mk_pte(page, prot);
if (mkwrite) {
pteval = pte_mkyoung(pteval);
pteval = maybe_mkwrite(pte_mkdirty(pteval), vma);
}
inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
folio_add_file_rmap_pte(folio, page, vma);
}
set_pte_at(vma->vm_mm, addr, pte, pteval);
return 0;
}
static int insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot, bool mkwrite)
{
int retval;
pte_t *pte;
spinlock_t *ptl;
retval = validate_page_before_insert(vma, page);
if (retval)
goto out;
retval = -ENOMEM;
pte = get_locked_pte(vma->vm_mm, addr, &ptl);
if (!pte)
goto out;
retval = insert_page_into_pte_locked(vma, pte, addr, page, prot,
mkwrite);
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
int err;
err = validate_page_before_insert(vma, page);
if (err)
return err;
return insert_page_into_pte_locked(vma, pte, addr, page, prot, false);
}
/* insert_pages() amortizes the cost of spinlock operations
* when inserting pages in a loop.
*/
static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num, pgprot_t prot)
{
pmd_t *pmd = NULL;
pte_t *start_pte, *pte;
spinlock_t *pte_lock;
struct mm_struct *const mm = vma->vm_mm;
unsigned long curr_page_idx = 0;
unsigned long remaining_pages_total = *num;
unsigned long pages_to_write_in_pmd;
int ret;
more:
ret = -EFAULT;
pmd = walk_to_pmd(mm, addr);
if (!pmd)
goto out;
pages_to_write_in_pmd = min_t(unsigned long,
remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
/* Allocate the PTE if necessary; takes PMD lock once only. */
ret = -ENOMEM;
if (pte_alloc(mm, pmd))
goto out;
while (pages_to_write_in_pmd) {
int pte_idx = 0;
const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
if (!start_pte) {
ret = -EFAULT;
goto out;
}
for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
int err = insert_page_in_batch_locked(vma, pte,
addr, pages[curr_page_idx], prot);
if (unlikely(err)) {
pte_unmap_unlock(start_pte, pte_lock);
ret = err;
remaining_pages_total -= pte_idx;
goto out;
}
addr += PAGE_SIZE;
++curr_page_idx;
}
pte_unmap_unlock(start_pte, pte_lock);
pages_to_write_in_pmd -= batch_size;
remaining_pages_total -= batch_size;
}
if (remaining_pages_total)
goto more;
ret = 0;
out:
*num = remaining_pages_total;
return ret;
}
/**
* vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
* @vma: user vma to map to
* @addr: target start user address of these pages
* @pages: source kernel pages
* @num: in: number of pages to map. out: number of pages that were *not*
* mapped. (0 means all pages were successfully mapped).
*
* Preferred over vm_insert_page() when inserting multiple pages.
*
* In case of error, we may have mapped a subset of the provided
* pages. It is the caller's responsibility to account for this case.
*
* The same restrictions apply as in vm_insert_page().
*/
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num)
{
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
if (addr < vma->vm_start || end_addr >= vma->vm_end)
return -EFAULT;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vm_flags_set(vma, VM_MIXEDMAP);
}
/* Defer page refcount checking till we're about to map that page. */
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_pages);
/**
* vm_insert_page - insert single page into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @page: source kernel page
*
* This allows drivers to insert individual pages they've allocated
* into a user vma. The zeropage is supported in some VMAs,
* see vm_mixed_zeropage_allowed().
*
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
* (see split_page()).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
* that. Your vma protection will have to be set up correctly, which
* means that if you want a shared writable mapping, you'd better
* ask for a shared writable mapping!
*
* The page does not need to be reserved.
*
* Usually this function is called from f_op->mmap() handler
* under mm->mmap_lock write-lock, so it can change vma->vm_flags.
* Caller must set VM_MIXEDMAP on vma if it wants to call this
* function from other places, for example from page-fault handler.
*
* Return: %0 on success, negative error code otherwise.
*/
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vm_flags_set(vma, VM_MIXEDMAP);
}
return insert_page(vma, addr, page, vma->vm_page_prot, false);
}
EXPORT_SYMBOL(vm_insert_page);
/*
* __vm_map_pages - maps range of kernel pages into user vma
* @vma: user vma to map to
* @pages: pointer to array of source kernel pages
* @num: number of pages in page array
* @offset: user's requested vm_pgoff
*
* This allows drivers to map range of kernel pages into a user vma.
* The zeropage is supported in some VMAs, see
* vm_mixed_zeropage_allowed().
*
* Return: 0 on success and error code otherwise.
*/
static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num, unsigned long offset)
{
unsigned long count = vma_pages(vma);
unsigned long uaddr = vma->vm_start;
int ret, i;
/* Fail if the user requested offset is beyond the end of the object */
if (offset >= num)
return -ENXIO;
/* Fail if the user requested size exceeds available object size */
if (count > num - offset)
return -ENXIO;
for (i = 0; i < count; i++) {
ret = vm_insert_page(vma, uaddr, pages[offset + i]);
if (ret < 0)
return ret;
uaddr += PAGE_SIZE;
}
return 0;
}
/**
* vm_map_pages - maps range of kernel pages starts with non zero offset
* @vma: user vma to map to
* @pages: pointer to array of source kernel pages
* @num: number of pages in page array
*
* Maps an object consisting of @num pages, catering for the user's
* requested vm_pgoff
*
* If we fail to insert any page into the vma, the function will return
* immediately leaving any previously inserted pages present. Callers
* from the mmap handler may immediately return the error as their caller
* will destroy the vma, removing any successfully inserted pages. Other
* callers should make their own arrangements for calling unmap_region().
*
* Context: Process context. Called by mmap handlers.
* Return: 0 on success and error code otherwise.
*/
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num)
{
return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
}
EXPORT_SYMBOL(vm_map_pages);
/**
* vm_map_pages_zero - map range of kernel pages starts with zero offset
* @vma: user vma to map to
* @pages: pointer to array of source kernel pages
* @num: number of pages in page array
*
* Similar to vm_map_pages(), except that it explicitly sets the offset
* to 0. This function is intended for the drivers that did not consider
* vm_pgoff.
*
* Context: Process context. Called by mmap handlers.
* Return: 0 on success and error code otherwise.
*/
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
unsigned long num)
{
return __vm_map_pages(vma, pages, num, 0);
}
EXPORT_SYMBOL(vm_map_pages_zero);
static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t prot, bool mkwrite)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte, entry;
spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
return VM_FAULT_OOM;
entry = ptep_get(pte);
if (!pte_none(entry)) {
if (mkwrite) {
/*
* For read faults on private mappings the PFN passed
* in may not match the PFN we have mapped if the
* mapped PFN is a writeable COW page. In the mkwrite
* case we are creating a writable PTE for a shared
* mapping and we expect the PFNs to match. If they
* don't match, we are likely racing with block
* allocation and mapping invalidation so just skip the
* update.
*/
if (pte_pfn(entry) != pfn) {
WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
goto out_unlock;
}
entry = pte_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, addr, pte, entry, 1))
update_mmu_cache(vma, addr, pte);
}
goto out_unlock;
}
/* Ok, finally just insert the thing.. */
entry = pte_mkspecial(pfn_pte(pfn, prot));
if (mkwrite) {
entry = pte_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
}
set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
out_unlock:
pte_unmap_unlock(pte, ptl);
return VM_FAULT_NOPAGE;
}
/**
* vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
* @pgprot: pgprot flags for the inserted page
*
* This is exactly like vmf_insert_pfn(), except that it allows drivers
* to override pgprot on a per-page basis.
*
* This only makes sense for IO mappings, and it makes no sense for
* COW mappings. In general, using multiple vmas is preferable;
* vmf_insert_pfn_prot should only be used if using multiple VMAs is
* impractical.
*
* pgprot typically only differs from @vma->vm_page_prot when drivers set
* caching- and encryption bits different than those of @vma->vm_page_prot,
* because the caching- or encryption mode may not be known at mmap() time.
*
* This is ok as long as @vma->vm_page_prot is not used by the core vm
* to set caching and encryption bits for those vmas (except for COW pages).
* This is ensured by core vm only modifying these page table entries using
* functions that don't touch caching- or encryption bits, using pte_modify()
* if needed. (See for example mprotect()).
*
* Also when new page-table entries are created, this is only done using the
* fault() callback, and never using the value of vma->vm_page_prot,
* except for page-table entries that point to anonymous pages as the result
* of COW.
*
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value.
*/
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot)
{
/*
* Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like
* consistency in testing and feature parity among all, so we should
* try to keep these invariants in place for everybody.
*/
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
if (!pfn_modify_allowed(pfn, pgprot))
return VM_FAULT_SIGBUS;
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
return insert_pfn(vma, addr, pfn, pgprot, false);
}
EXPORT_SYMBOL(vmf_insert_pfn_prot);
/**
* vmf_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
*
* Similar to vm_insert_page, this allows drivers to insert individual pages
* they've allocated into a user vma. Same comments apply.
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return the result of this function.
*
* vma cannot be a COW mapping.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value.
*/
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
}
EXPORT_SYMBOL(vmf_insert_pfn);
static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn,
bool mkwrite)
{
if (unlikely(is_zero_pfn(pfn)) &&
(mkwrite || !vm_mixed_zeropage_allowed(vma)))
return false;
/* these checks mirror the abort conditions in vm_normal_page */
if (vma->vm_flags & VM_MIXEDMAP)
return true;
if (is_zero_pfn(pfn))
return true;
return false;
}
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, bool mkwrite)
{
pgprot_t pgprot = vma->vm_page_prot;
int err;
if (!vm_mixed_ok(vma, pfn, mkwrite))
return VM_FAULT_SIGBUS;
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
if (!pfn_modify_allowed(pfn, pgprot))
return VM_FAULT_SIGBUS;
/*
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
* refcount the page if pfn_valid is true (hence insert_page rather
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
* without pte special, it would there be refcounted as a normal page.
*/
if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) {
struct page *page;
/*
* At this point we are committed to insert_page()
* regardless of whether the caller specified flags that
* result in pfn_t_has_page() == false.
*/
page = pfn_to_page(pfn);
err = insert_page(vma, addr, page, pgprot, mkwrite);
} else {
return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
}
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
bool write)
{
pgprot_t pgprot = vmf->vma->vm_page_prot;
unsigned long addr = vmf->address;
int err;
if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end)
return VM_FAULT_SIGBUS;
err = insert_page(vmf->vma, addr, page, pgprot, write);
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
return __vm_insert_mixed(vma, addr, pfn, false);
}
EXPORT_SYMBOL(vmf_insert_mixed);
/*
* If the insertion of PTE failed because someone else already added a
* different entry in the mean time, we treat that as success as we assume
* the same entry was actually inserted.
*/
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
return __vm_insert_mixed(vma, addr, pfn, true);
}
/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
*/
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pte_t *pte, *mapped_pte;
spinlock_t *ptl;
int err = 0;
mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(ptep_get(pte)));
if (!pfn_modify_allowed(pfn, prot)) {
err = -EACCES;
break;
}
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(mapped_pte, ptl);
return err;
}
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
int err;
pfn -= addr >> PAGE_SHIFT;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
VM_BUG_ON(pmd_trans_huge(*pmd));
do {
next = pmd_addr_end(addr, end);
err = remap_pte_range(mm, pmd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
return err;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
int err;
pfn -= addr >> PAGE_SHIFT;
pud = pud_alloc(mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
err = remap_pmd_range(mm, pud, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
return err;
} while (pud++, addr = next, addr != end);
return 0;
}
static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
p4d_t *p4d;
unsigned long next;
int err;
pfn -= addr >> PAGE_SHIFT;
p4d = p4d_alloc(mm, pgd, addr);
if (!p4d)
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
err = remap_pud_range(mm, p4d, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
return err;
} while (p4d++, addr = next, addr != end);
return 0;
}
static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + PAGE_ALIGN(size);
struct mm_struct *mm = vma->vm_mm;
int err;
if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
return -EINVAL;
/*
* Physically remapped pages are special. Tell the
* rest of the world about it:
* VM_IO tells people not to look at these pages
* (accesses can have side effects).
* VM_PFNMAP tells the core MM that the base pages are just
* raw PFN mappings, and do not have a "struct page" associated
* with them.
* VM_DONTEXPAND
* Disable vma merging and expanding with mremap().
* VM_DONTDUMP
* Omit vma from core dump, even when VM_IO turned off.
*
* There's a horrible special case to handle copy-on-write
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
err = remap_p4d_range(mm, pgd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
return 0;
}
/*
* Variant of remap_pfn_range that does not call track_pfn_remap. The caller
* must have pre-validated the caching bits of the pgprot_t.
*/
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
if (!error)
return 0;
/*
* A partial pfn range mapping is dangerous: it does not
* maintain page reference counts, and callers may free
* pages due to the error. So zap it early.
*/
zap_page_range_single(vma, addr, size, NULL);
return error;
}
#ifdef __HAVE_PFNMAP_TRACKING
static inline struct pfnmap_track_ctx *pfnmap_track_ctx_alloc(unsigned long pfn,
unsigned long size, pgprot_t *prot)
{
struct pfnmap_track_ctx *ctx;
if (pfnmap_track(pfn, size, prot))
return ERR_PTR(-EINVAL);
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (unlikely(!ctx)) {
pfnmap_untrack(pfn, size);
return ERR_PTR(-ENOMEM);
}
ctx->pfn = pfn;
ctx->size = size;
kref_init(&ctx->kref);
return ctx;
}
void pfnmap_track_ctx_release(struct kref *ref)
{
struct pfnmap_track_ctx *ctx = container_of(ref, struct pfnmap_track_ctx, kref);
pfnmap_untrack(ctx->pfn, ctx->size);
kfree(ctx);
}
#endif /* __HAVE_PFNMAP_TRACKING */
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
* @addr: target page aligned user address to start at
* @pfn: page frame number of kernel physical memory address
* @size: size of mapping area
* @prot: page protection flags for this mapping
*
* Note: this is only safe if the mm semaphore is held when called.
*
* Return: %0 on success, negative error code otherwise.
*/
#ifdef __HAVE_PFNMAP_TRACKING
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
struct pfnmap_track_ctx *ctx = NULL;
int err;
size = PAGE_ALIGN(size);
/*
* If we cover the full VMA, we'll perform actual tracking, and
* remember to untrack when the last reference to our tracking
* context from a VMA goes away. We'll keep tracking the whole pfn
* range even during VMA splits and partial unmapping.
*
* If we only cover parts of the VMA, we'll only setup the cachemode
* in the pgprot for the pfn range.
*/
if (addr == vma->vm_start && addr + size == vma->vm_end) {
if (vma->pfnmap_track_ctx)
return -EINVAL;
ctx = pfnmap_track_ctx_alloc(pfn, size, &prot);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
} else if (pfnmap_setup_cachemode(pfn, size, &prot)) {
return -EINVAL;
}
err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
if (ctx) {
if (err)
kref_put(&ctx->kref, pfnmap_track_ctx_release);
else
vma->pfnmap_track_ctx = ctx;
}
return err;
}
#else
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
return remap_pfn_range_notrack(vma, addr, pfn, size, prot);
}
#endif
EXPORT_SYMBOL(remap_pfn_range);
/**
* vm_iomap_memory - remap memory to userspace
* @vma: user vma to map to
* @start: start of the physical memory to be mapped
* @len: size of area
*
* This is a simplified io_remap_pfn_range() for common driver use. The
* driver just needs to give us the physical memory range to be mapped,
* we'll figure out the rest from the vma information.
*
* NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
* whatever write-combining details or similar.
*
* Return: %0 on success, negative error code otherwise.
*/
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
unsigned long vm_len, pfn, pages;
/* Check that the physical memory area passed in looks valid */
if (start + len < start)
return -EINVAL;
/*
* You *really* shouldn't map things that aren't page-aligned,
* but we've historically allowed it because IO memory might
* just have smaller alignment.
*/
len += start & ~PAGE_MASK;
pfn = start >> PAGE_SHIFT;
pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
if (pfn + pages < pfn)
return -EINVAL;
/* We start the mapping 'vm_pgoff' pages into the area */
if (vma->vm_pgoff > pages)
return -EINVAL;
pfn += vma->vm_pgoff;
pages -= vma->vm_pgoff;
/* Can we fit all of the mapping? */
vm_len = vma->vm_end - vma->vm_start;
if (vm_len >> PAGE_SHIFT > pages)
return -EINVAL;
/* Ok, let it rip */
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_iomap_memory);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
pte_t *pte, *mapped_pte;
int err = 0;
spinlock_t *ptl;
if (create) {
mapped_pte = pte = (mm == &init_mm) ?
pte_alloc_kernel_track(pmd, addr, mask) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
} else {
mapped_pte = pte = (mm == &init_mm) ?
pte_offset_kernel(pmd, addr) :
pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -EINVAL;
}
arch_enter_lazy_mmu_mode();
if (fn) {
do {
if (create || !pte_none(ptep_get(pte))) {
err = fn(pte, addr, data);
if (err)
break;
}
} while (pte++, addr += PAGE_SIZE, addr != end);
}
*mask |= PGTBL_PTE_MODIFIED;
arch_leave_lazy_mmu_mode();
if (mm != &init_mm)
pte_unmap_unlock(mapped_pte, ptl);
return err;
}
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
int err = 0;
BUG_ON(pud_leaf(*pud));
if (create) {
pmd = pmd_alloc_track(mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
} else {
pmd = pmd_offset(pud, addr);
}
do {
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) && !create)
continue;
if (WARN_ON_ONCE(pmd_leaf(*pmd)))
return -EINVAL;
if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
if (!create)
continue;
pmd_clear_bad(pmd);
}
err = apply_to_pte_range(mm, pmd, addr, next,
fn, data, create, mask);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
int err = 0;
if (create) {
pud = pud_alloc_track(mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
} else {
pud = pud_offset(p4d, addr);
}
do {
next = pud_addr_end(addr, end);
if (pud_none(*pud) && !create)
continue;
if (WARN_ON_ONCE(pud_leaf(*pud)))
return -EINVAL;
if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
if (!create)
continue;
pud_clear_bad(pud);
}
err = apply_to_pmd_range(mm, pud, addr, next,
fn, data, create, mask);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
int err = 0;
if (create) {
p4d = p4d_alloc_track(mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
} else {
p4d = p4d_offset(pgd, addr);
}
do {
next = p4d_addr_end(addr, end);
if (p4d_none(*p4d) && !create)
continue;
if (WARN_ON_ONCE(p4d_leaf(*p4d)))
return -EINVAL;
if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
if (!create)
continue;
p4d_clear_bad(p4d);
}
err = apply_to_pud_range(mm, p4d, addr, next,
fn, data, create, mask);
if (err)
break;
} while (p4d++, addr = next, addr != end);
return err;
}
static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn,
void *data, bool create)
{
pgd_t *pgd;
unsigned long start = addr, next;
unsigned long end = addr + size;
pgtbl_mod_mask mask = 0;
int err = 0;
if (WARN_ON(addr >= end))
return -EINVAL;
pgd = pgd_offset(mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none(*pgd) && !create)
continue;
if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
err = -EINVAL;
break;
}
if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
if (!create)
continue;
pgd_clear_bad(pgd);
}
err = apply_to_p4d_range(mm, pgd, addr, next,
fn, data, create, &mask);
if (err)
break;
} while (pgd++, addr = next, addr != end);
if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
arch_sync_kernel_mappings(start, start + size);
return err;
}
/*
* Scan a region of virtual memory, filling in page tables as necessary
* and calling a provided function on each leaf page table.
*/
int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn, void *data)
{
return __apply_to_page_range(mm, addr, size, fn, data, true);
}
EXPORT_SYMBOL_GPL(apply_to_page_range);
/*
* Scan a region of virtual memory, calling a provided function on
* each leaf page table where it exists.
*
* Unlike apply_to_page_range, this does _not_ fill in page tables
* where they are absent.
*/
int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn, void *data)
{
return __apply_to_page_range(mm, addr, size, fn, data, false);
}
/*
* handle_pte_fault chooses page fault handler according to an entry which was
* read non-atomically. Before making any commitment, on those architectures
* or configurations (e.g. i386 with PAE) which might give a mix of unmatched
* parts, do_swap_page must check under lock before unmapping the pte and
* proceeding (but do_wp_page is only called after already making such a check;
* and do_anonymous_page can safely check later on).
*/
static inline int pte_unmap_same(struct vm_fault *vmf)
{
int same = 1;
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
if (sizeof(pte_t) > sizeof(unsigned long)) {
spin_lock(vmf->ptl);
same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
spin_unlock(vmf->ptl);
}
#endif
pte_unmap(vmf->pte);
vmf->pte = NULL;
return same;
}
/*
* Return:
* 0: copied succeeded
* -EHWPOISON: copy failed due to hwpoison in source page
* -EAGAIN: copied failed (some other reason)
*/
static inline int __wp_page_copy_user(struct page *dst, struct page *src,
struct vm_fault *vmf)
{
int ret;
void *kaddr;
void __user *uaddr;
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = vmf->address;
if (likely(src)) {
if (copy_mc_user_highpage(dst, src, addr, vma))
return -EHWPOISON;
return 0;
}
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
kaddr = kmap_local_page(dst);
pagefault_disable();
uaddr = (void __user *)(addr & PAGE_MASK);
/*
* On architectures with software "accessed" bits, we would
* take a double page fault, so mark it accessed here.
*/
vmf->pte = NULL;
if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
pte_t entry;
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
/*
* Other thread has already handled the fault
* and update local tlb only
*/
if (vmf->pte)
update_mmu_tlb(vma, addr, vmf->pte);
ret = -EAGAIN;
goto pte_unlock;
}
entry = pte_mkyoung(vmf->orig_pte);
if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
}
/*
* This really shouldn't fail, because the page is there
* in the page tables. But it might just be unreadable,
* in which case we just give up and fill the result with
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
if (vmf->pte)
goto warn;
/* Re-validate under PTL if the page is still mapped */
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
/* The PTE changed under us, update local tlb */
if (vmf->pte)
update_mmu_tlb(vma, addr, vmf->pte);
ret = -EAGAIN;
goto pte_unlock;
}
/*
* The same page can be mapped back since last copy attempt.
* Try to copy again under PTL.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
/*
* Give a warn in case there can be some obscure
* use-case
*/
warn: WARN_ON_ONCE(1); clear_page(kaddr);
}
}
ret = 0;
pte_unlock:
if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); pagefault_enable();
kunmap_local(kaddr);
flush_dcache_page(dst);
return ret;
}
static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
{
struct file *vm_file = vma->vm_file;
if (vm_file) return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
/*
* Special mappings (e.g. VDSO) do not have any file so fake
* a default GFP_KERNEL for them.
*/
return GFP_KERNEL;
}
/*
* Notify the address space that the page is about to become writable so that
* it can prohibit this or wait for the page to get into an appropriate state.
*
* We do this without the lock held, so that it can sleep if it needs to.
*/
static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
{
vm_fault_t ret;
unsigned int old_flags = vmf->flags;
vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
if (vmf->vma->vm_file &&
IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
return VM_FAULT_SIGBUS;
ret = vmf->vma->vm_ops->page_mkwrite(vmf);
/* Restore original flags so that caller is not surprised */
vmf->flags = old_flags;
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
return ret;
if (unlikely(!(ret & VM_FAULT_LOCKED))) {
folio_lock(folio);
if (!folio->mapping) {
folio_unlock(folio);
return 0; /* retry */
}
ret |= VM_FAULT_LOCKED;
} else
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
return ret;
}
/*
* Handle dirtying of a page in shared file mapping on a write fault.
*
* The function expects the page to be locked and unlocks it.
*/
static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping;
struct folio *folio = page_folio(vmf->page);
bool dirtied;
bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
dirtied = folio_mark_dirty(folio);
VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
/*
* Take a local copy of the address_space - folio.mapping may be zeroed
* by truncate after folio_unlock(). The address_space itself remains
* pinned by vma->vm_file's reference. We rely on folio_unlock()'s
* release semantics to prevent the compiler from undoing this copying.
*/
mapping = folio_raw_mapping(folio);
folio_unlock(folio);
if (!page_mkwrite)
file_update_time(vma->vm_file);
/*
* Throttle page dirtying rate down to writeback speed.
*
* mapping may be NULL here because some device drivers do not
* set page.mapping but still dirty their pages
*
* Drop the mmap_lock before waiting on IO, if we can. The file
* is pinning the mapping, as per above.
*/
if ((dirtied || page_mkwrite) && mapping) {
struct file *fpin;
fpin = maybe_unlock_mmap_for_io(vmf, NULL);
balance_dirty_pages_ratelimited(mapping);
if (fpin) {
fput(fpin);
return VM_FAULT_COMPLETED;
}
}
return 0;
}
/*
* Handle write page faults for pages that can be reused in the current vma
*
* This can happen either due to the mapping being with the VM_SHARED flag,
* or due to us being the last reference standing to the page. In either
* case, all we need to do here is to mark the page as writable and update
* any related book-keeping.
*/
static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
__releases(vmf->ptl)
{
struct vm_area_struct *vma = vmf->vma;
pte_t entry;
VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
if (folio) {
VM_BUG_ON(folio_test_anon(folio) &&
!PageAnonExclusive(vmf->page));
/*
* Clear the folio's cpupid information as the existing
* information potentially belongs to a now completely
* unrelated process.
*/
folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = pte_mkyoung(vmf->orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
pte_unmap_unlock(vmf->pte, vmf->ptl);
count_vm_event(PGREUSE);
}
/*
* We could add a bitflag somewhere, but for now, we know that all
* vm_ops that have a ->map_pages have been audited and don't need
* the mmap_lock to be held.
*/
static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma; if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
return 0;
vma_end_read(vma);
return VM_FAULT_RETRY;
}
/**
* __vmf_anon_prepare - Prepare to handle an anonymous fault.
* @vmf: The vm_fault descriptor passed from the fault handler.
*
* When preparing to insert an anonymous page into a VMA from a
* fault handler, call this function rather than anon_vma_prepare().
* If this vma does not already have an associated anon_vma and we are
* only protected by the per-VMA lock, the caller must retry with the
* mmap_lock held. __anon_vma_prepare() will look at adjacent VMAs to
* determine if this VMA can share its anon_vma, and that's not safe to
* do with only the per-VMA lock held for this VMA.
*
* Return: 0 if fault handling can proceed. Any other value should be
* returned to the caller.
*/
vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret = 0;
if (likely(vma->anon_vma))
return 0;
if (vmf->flags & FAULT_FLAG_VMA_LOCK) { if (!mmap_read_trylock(vma->vm_mm))
return VM_FAULT_RETRY;
}
if (__anon_vma_prepare(vma)) ret = VM_FAULT_OOM; if (vmf->flags & FAULT_FLAG_VMA_LOCK) mmap_read_unlock(vma->vm_mm);
return ret;
}
/*
* Handle the case of a page which we actually need to copy to a new page,
* either due to COW or unsharing.
*
* Called with mmap_lock locked and the old page referenced, but
* without the ptl held.
*
* High level logic flow:
*
* - Allocate a page, copy the content of the old page to the new one.
* - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
* - Take the PTL. If the pte changed, bail out and release the allocated page
* - If the pte is still the way we remember it, update the page table and all
* relevant references. This includes dropping the reference the page-table
* held to the old page, as well as updating the rmap.
* - In any case, unlock the PTL and drop the reference we took to the old page.
*/
static vm_fault_t wp_page_copy(struct vm_fault *vmf)
{
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
struct folio *old_folio = NULL;
struct folio *new_folio = NULL;
pte_t entry;
int page_copied = 0;
struct mmu_notifier_range range;
vm_fault_t ret;
bool pfn_is_zero;
delayacct_wpcopy_start(); if (vmf->page) old_folio = page_folio(vmf->page); ret = vmf_anon_prepare(vmf); if (unlikely(ret)) goto out; pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
if (!new_folio)
goto oom;
if (!pfn_is_zero) {
int err;
err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); if (err) {
/*
* COW failed, if the fault was solved by other,
* it's fine. If not, userspace would re-fault on
* the same address and we will handle the fault
* from the second attempt.
* The -EHWPOISON case will not be retried.
*/
folio_put(new_folio); if (old_folio)
folio_put(old_folio);
delayacct_wpcopy_end(); return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
}
kmsan_copy_page_meta(&new_folio->page, vmf->page);
}
__folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
vmf->address & PAGE_MASK,
(vmf->address & PAGE_MASK) + PAGE_SIZE);
mmu_notifier_invalidate_range_start(&range);
/*
* Re-check the pte - we dropped the lock
*/
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { if (old_folio) {
if (!folio_test_anon(old_folio)) {
dec_mm_counter(mm, mm_counter_file(old_folio)); inc_mm_counter(mm, MM_ANONPAGES);
}
} else {
ksm_might_unmap_zero_page(mm, vmf->orig_pte);
inc_mm_counter(mm, MM_ANONPAGES);
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = folio_mk_pte(new_folio, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (unlikely(unshare)) {
if (pte_soft_dirty(vmf->orig_pte))
entry = pte_mksoft_dirty(entry);
if (pte_uffd_wp(vmf->orig_pte))
entry = pte_mkuffd_wp(entry);
} else {
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
}
/*
* Clear the pte entry and flush it first, before updating the
* pte with the new entry, to keep TLBs on different CPUs in
* sync. This code used to set the new PTE then flush TLBs, but
* that left a window where the new PTE could be loaded into
* some TLBs while the old PTE remains in others.
*/
ptep_clear_flush(vma, vmf->address, vmf->pte);
folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, vma);
BUG_ON(unshare && pte_write(entry)); set_pte_at(mm, vmf->address, vmf->pte, entry);
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
if (old_folio) {
/*
* Only after switching the pte to the new page may
* we remove the mapcount here. Otherwise another
* process may come and find the rmap count decremented
* before the pte is switched to the new page, and
* "reuse" the old page writing into it while our pte
* here still points into it and can be read by other
* threads.
*
* The critical issue is to order this
* folio_remove_rmap_pte() with the ptp_clear_flush
* above. Those stores are ordered by (if nothing else,)
* the barrier present in the atomic_add_negative
* in folio_remove_rmap_pte();
*
* Then the TLB flush in ptep_clear_flush ensures that
* no process can access the old page before the
* decremented mapcount is visible. And the old page
* cannot be reused until after the decremented
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
folio_remove_rmap_pte(old_folio, vmf->page, vma);
}
/* Free the old page.. */
new_folio = old_folio;
page_copied = 1;
pte_unmap_unlock(vmf->pte, vmf->ptl); } else if (vmf->pte) {
update_mmu_tlb(vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
}
mmu_notifier_invalidate_range_end(&range); if (new_folio) folio_put(new_folio); if (old_folio) {
if (page_copied)
free_swap_cache(old_folio); folio_put(old_folio);
}
delayacct_wpcopy_end(); return 0;
oom:
ret = VM_FAULT_OOM;
out:
if (old_folio) folio_put(old_folio); delayacct_wpcopy_end(); return ret;
}
/**
* finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
* writeable once the page is prepared
*
* @vmf: structure describing the fault
* @folio: the folio of vmf->page
*
* This function handles all that is needed to finish a write page fault in a
* shared mapping due to PTE being read-only once the mapped page is prepared.
* It handles locking of PTE and modifying it.
*
* The function expects the page to be locked or other protection against
* concurrent faults / writeback (such as DAX radix tree locks).
*
* Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
* we acquired PTE lock.
*/
static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
{
WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (!vmf->pte)
return VM_FAULT_NOPAGE;
/*
* We might have raced with another page fault while we released the
* pte_offset_map_lock.
*/
if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
return VM_FAULT_NOPAGE;
}
wp_page_reuse(vmf, folio);
return 0;
}
/*
* Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
* mapping
*/
static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
vm_fault_t ret;
pte_unmap_unlock(vmf->pte, vmf->ptl); ret = vmf_can_call_fault(vmf); if (ret)
return ret;
vmf->flags |= FAULT_FLAG_MKWRITE;
ret = vma->vm_ops->pfn_mkwrite(vmf);
if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
return ret;
return finish_mkwrite_fault(vmf, NULL);
}
wp_page_reuse(vmf, NULL); return 0;
}
static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
__releases(vmf->ptl)
{
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret = 0;
folio_get(folio); if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
vm_fault_t tmp;
pte_unmap_unlock(vmf->pte, vmf->ptl); tmp = vmf_can_call_fault(vmf);
if (tmp) {
folio_put(folio); return tmp;
}
tmp = do_page_mkwrite(vmf, folio);
if (unlikely(!tmp || (tmp &
(VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
folio_put(folio); return tmp;
}
tmp = finish_mkwrite_fault(vmf, folio);
if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
folio_unlock(folio);
folio_put(folio);
return tmp;
}
} else {
wp_page_reuse(vmf, folio); folio_lock(folio);
}
ret |= fault_dirty_shared_page(vmf); folio_put(folio); return ret;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
struct vm_area_struct *vma)
{
bool exclusive = false;
/* Let's just free up a large folio if only a single page is mapped. */
if (folio_large_mapcount(folio) <= 1)
return false;
/*
* The assumption for anonymous folios is that each page can only get
* mapped once into each MM. The only exception are KSM folios, which
* are always small.
*
* Each taken mapcount must be paired with exactly one taken reference,
* whereby the refcount must be incremented before the mapcount when
* mapping a page, and the refcount must be decremented after the
* mapcount when unmapping a page.
*
* If all folio references are from mappings, and all mappings are in
* the page tables of this MM, then this folio is exclusive to this MM.
*/
if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
return false;
VM_WARN_ON_ONCE(folio_test_ksm(folio));
if (unlikely(folio_test_swapcache(folio))) {
/*
* Note: freeing up the swapcache will fail if some PTEs are
* still swap entries.
*/
if (!folio_trylock(folio))
return false;
folio_free_swap(folio);
folio_unlock(folio);
}
if (folio_large_mapcount(folio) != folio_ref_count(folio))
return false;
/* Stabilize the mapcount vs. refcount and recheck. */
folio_lock_large_mapcount(folio);
VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_ref_count(folio), folio);
if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
goto unlock;
if (folio_large_mapcount(folio) != folio_ref_count(folio))
goto unlock;
VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_nr_pages(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_entire_mapcount(folio), folio);
VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id &&
folio_mm_id(folio, 1) != vma->vm_mm->mm_id);
/*
* Do we need the folio lock? Likely not. If there would have been
* references from page migration/swapout, we would have detected
* an additional folio reference and never ended up here.
*/
exclusive = true;
unlock:
folio_unlock_large_mapcount(folio);
return exclusive;
}
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
struct vm_area_struct *vma)
{
BUILD_BUG();
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static bool wp_can_reuse_anon_folio(struct folio *folio,
struct vm_area_struct *vma)
{
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio))
return __wp_can_reuse_large_anon_folio(folio, vma);
/*
* We have to verify under folio lock: these early checks are
* just an optimization to avoid locking the folio and freeing
* the swapcache if there is little hope that we can reuse.
*
* KSM doesn't necessarily raise the folio refcount.
*/
if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
return false;
if (!folio_test_lru(folio))
/*
* We cannot easily detect+handle references from
* remote LRU caches or references to LRU folios.
*/
lru_add_drain();
if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
return false;
if (!folio_trylock(folio))
return false;
if (folio_test_swapcache(folio)) folio_free_swap(folio); if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
folio_unlock(folio);
return false;
}
/*
* Ok, we've got the only folio reference from our mapping
* and the folio is locked, it's dark out, and we're wearing
* sunglasses. Hit it.
*/
folio_move_anon_rmap(folio, vma);
folio_unlock(folio);
return true;
}
/*
* This routine handles present pages, when
* * users try to write to a shared page (FAULT_FLAG_WRITE)
* * GUP wants to take a R/O pin on a possibly shared anonymous page
* (FAULT_FLAG_UNSHARE)
*
* It is done by copying the page to a new address and decrementing the
* shared-page counter for the old page.
*
* Note that this routine assumes that the protection checks have been
* done by the caller (the low-level page fault routine in most cases).
* Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
* done any necessary COW.
*
* In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
* though the page will change only once the write actually happens. This
* avoids a few races, and potentially makes it more efficient.
*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), with pte both mapped and locked.
* We return with mmap_lock still held, but pte unmapped and unlocked.
*/
static vm_fault_t do_wp_page(struct vm_fault *vmf)
__releases(vmf->ptl)
{
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma;
struct folio *folio = NULL;
pte_t pte;
if (likely(!unshare)) { if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
if (!userfaultfd_wp_async(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return handle_userfault(vmf, VM_UFFD_WP);
}
/*
* Nothing needed (cache flush, TLB invalidations,
* etc.) because we're only removing the uffd-wp bit,
* which is completely invisible to the user.
*/
pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
/*
* Update this to be prepared for following up CoW
* handling
*/
vmf->orig_pte = pte;
}
/*
* Userfaultfd write-protect can defer flushes. Ensure the TLB
* is flushed in this case before copying.
*/
if (unlikely(userfaultfd_wp(vmf->vma) &&
mm_tlb_flush_pending(vmf->vma->vm_mm)))
flush_tlb_page(vmf->vma, vmf->address);
}
vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
if (vmf->page)
folio = page_folio(vmf->page);
/*
* Shared mapping: we are guaranteed to have VM_WRITE and
* FAULT_FLAG_WRITE set at this point.
*/
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
/*
* VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
* VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called.
*
* We should not cow pages in a shared writeable mapping.
* Just mark the pages writable and/or call ops->pfn_mkwrite.
*/
if (!vmf->page || is_fsdax_page(vmf->page)) {
vmf->page = NULL;
return wp_pfn_shared(vmf);
}
return wp_page_shared(vmf, folio);
}
/*
* Private mapping: create an exclusive anonymous page copy if reuse
* is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
*
* If we encounter a page that is marked exclusive, we must reuse
* the page without further checks.
*/
if (folio && folio_test_anon(folio) && (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { if (!PageAnonExclusive(vmf->page)) SetPageAnonExclusive(vmf->page); if (unlikely(unshare)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
wp_page_reuse(vmf, folio);
return 0;
}
/*
* Ok, we need to copy. Oh, well..
*/
if (folio)
folio_get(folio); pte_unmap_unlock(vmf->pte, vmf->ptl);
#ifdef CONFIG_KSM
if (folio && folio_test_ksm(folio))
count_vm_event(COW_KSM);
#endif
return wp_page_copy(vmf);
}
static void unmap_mapping_range_vma(struct vm_area_struct *vma,
unsigned long start_addr, unsigned long end_addr,
struct zap_details *details)
{
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
}
static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
pgoff_t first_index,
pgoff_t last_index,
struct zap_details *details)
{
struct vm_area_struct *vma;
pgoff_t vba, vea, zba, zea;
vma_interval_tree_foreach(vma, root, first_index, last_index) {
vba = vma->vm_pgoff;
vea = vba + vma_pages(vma) - 1;
zba = max(first_index, vba);
zea = min(last_index, vea);
unmap_mapping_range_vma(vma,
((zba - vba) << PAGE_SHIFT) + vma->vm_start,
((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
details);
}
}
/**
* unmap_mapping_folio() - Unmap single folio from processes.
* @folio: The locked folio to be unmapped.
*
* Unmap this folio from any userspace process which still has it mmaped.
* Typically, for efficiency, the range of nearby pages has already been
* unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
* truncation or invalidation holds the lock on a folio, it may find that
* the page has been remapped again: and then uses unmap_mapping_folio()
* to unmap it finally.
*/
void unmap_mapping_folio(struct folio *folio)
{
struct address_space *mapping = folio->mapping;
struct zap_details details = { };
pgoff_t first_index;
pgoff_t last_index;
VM_BUG_ON(!folio_test_locked(folio));
first_index = folio->index;
last_index = folio_next_index(folio) - 1;
details.even_cows = false;
details.single_folio = folio;
details.zap_flags = ZAP_FLAG_DROP_MARKER;
i_mmap_lock_read(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
unmap_mapping_range_tree(&mapping->i_mmap, first_index,
last_index, &details);
i_mmap_unlock_read(mapping);
}
/**
* unmap_mapping_pages() - Unmap pages from processes.
* @mapping: The address space containing pages to be unmapped.
* @start: Index of first page to be unmapped.
* @nr: Number of pages to be unmapped. 0 to unmap to end of file.
* @even_cows: Whether to unmap even private COWed pages.
*
* Unmap the pages in this address space from any userspace process which
* has them mmaped. Generally, you want to remove COWed pages as well when
* a file is being truncated, but not when invalidating pages from the page
* cache.
*/
void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
pgoff_t nr, bool even_cows)
{
struct zap_details details = { };
pgoff_t first_index = start;
pgoff_t last_index = start + nr - 1;
details.even_cows = even_cows;
if (last_index < first_index)
last_index = ULONG_MAX;
i_mmap_lock_read(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
unmap_mapping_range_tree(&mapping->i_mmap, first_index,
last_index, &details);
i_mmap_unlock_read(mapping);
}
EXPORT_SYMBOL_GPL(unmap_mapping_pages);
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified
* address_space corresponding to the specified byte range in the underlying
* file.
*
* @mapping: the address space containing mmaps to be unmapped.
* @holebegin: byte in first page to unmap, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
* boundary. Note that this is different from truncate_pagecache(), which
* must keep the partial page. In contrast, we must get rid of
* partial pages.
* @holelen: size of prospective hole in bytes. This will be rounded
* up to a PAGE_SIZE boundary. A holelen of zero truncates to the
* end of the file.
* @even_cows: 1 when truncating a file, unmap even private COWed pages;
* but 0 when invalidating pagecache, don't throw away private data.
*/
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
long long holeend =
(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (holeend & ~(long long)ULONG_MAX)
hlen = ULONG_MAX - hba + 1;
}
unmap_mapping_pages(mapping, hba, hlen, even_cows);
}
EXPORT_SYMBOL(unmap_mapping_range);
/*
* Restore a potential device exclusive pte to a working pte entry
*/
static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
{
struct folio *folio = page_folio(vmf->page);
struct vm_area_struct *vma = vmf->vma;
struct mmu_notifier_range range;
vm_fault_t ret;
/*
* We need a reference to lock the folio because we don't hold
* the PTL so a racing thread can remove the device-exclusive
* entry and unmap it. If the folio is free the entry must
* have been removed already. If it happens to have already
* been re-allocated after being freed all we do is lock and
* unlock it.
*/
if (!folio_try_get(folio))
return 0;
ret = folio_lock_or_retry(folio, vmf);
if (ret) {
folio_put(folio);
return ret;
}
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0,
vma->vm_mm, vmf->address & PAGE_MASK,
(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
mmu_notifier_invalidate_range_start(&range);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
restore_exclusive_pte(vma, folio, vmf->page, vmf->address,
vmf->pte, vmf->orig_pte);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
folio_unlock(folio);
folio_put(folio);
mmu_notifier_invalidate_range_end(&range);
return 0;
}
static inline bool should_try_to_free_swap(struct folio *folio,
struct vm_area_struct *vma,
unsigned int fault_flags)
{
if (!folio_test_swapcache(folio))
return false;
if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
folio_test_mlocked(folio))
return true;
/*
* If we want to map a page that's in the swapcache writable, we
* have to detect via the refcount if we're really the exclusive
* user. Try freeing the swapcache to get rid of the swapcache
* reference only in case it's likely that we'll be the exlusive user.
*/
return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
folio_ref_count(folio) == (1 + folio_nr_pages(folio));
}
static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
{
vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (!vmf->pte)
return 0;
/*
* Be careful so that we will only recover a special uffd-wp pte into a
* none pte. Otherwise it means the pte could have changed, so retry.
*
* This should also cover the case where e.g. the pte changed
* quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
* So is_pte_marker() check is not enough to safely drop the pte.
*/
if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
static vm_fault_t do_pte_missing(struct vm_fault *vmf)
{
if (vma_is_anonymous(vmf->vma)) return do_anonymous_page(vmf);
else
return do_fault(vmf);
}
/*
* This is actually a page-missing access, but with uffd-wp special pte
* installed. It means this pte was wr-protected before being unmapped.
*/
static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
{
/*
* Just in case there're leftover special ptes even after the region
* got unregistered - we can simply clear them.
*/
if (unlikely(!userfaultfd_wp(vmf->vma)))
return pte_marker_clear(vmf);
return do_pte_missing(vmf);
}
static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
{
swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
unsigned long marker = pte_marker_get(entry);
/*
* PTE markers should never be empty. If anything weird happened,
* the best thing to do is to kill the process along with its mm.
*/
if (WARN_ON_ONCE(!marker))
return VM_FAULT_SIGBUS;
/* Higher priority than uffd-wp when data corrupted */
if (marker & PTE_MARKER_POISONED)
return VM_FAULT_HWPOISON;
/* Hitting a guard page is always a fatal condition. */
if (marker & PTE_MARKER_GUARD)
return VM_FAULT_SIGSEGV;
if (pte_marker_entry_uffd_wp(entry))
return pte_marker_handle_uffd_wp(vmf);
/* This is an unknown pte marker */
return VM_FAULT_SIGBUS;
}
static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
swp_entry_t entry;
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
if (!folio)
return NULL;
entry = pte_to_swp_entry(vmf->orig_pte);
if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
GFP_KERNEL, entry)) {
folio_put(folio);
return NULL;
}
return folio;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* Check if the PTEs within a range are contiguous swap entries
* and have consistent swapcache, zeromap.
*/
static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
{
unsigned long addr;
swp_entry_t entry;
int idx;
pte_t pte;
addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
idx = (vmf->address - addr) / PAGE_SIZE;
pte = ptep_get(ptep);
if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
return false;
entry = pte_to_swp_entry(pte);
if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
return false;
/*
* swap_read_folio() can't handle the case a large folio is hybridly
* from different backends. And they are likely corner cases. Similar
* things might be added once zswap support large folios.
*/
if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
return false;
if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
return false;
return true;
}
static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
unsigned long addr,
unsigned long orders)
{
int order, nr;
order = highest_order(orders);
/*
* To swap in a THP with nr pages, we require that its first swap_offset
* is aligned with that number, as it was when the THP was swapped out.
* This helps filter out most invalid entries.
*/
while (orders) {
nr = 1 << order;
if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
break;
order = next_order(&orders, order);
}
return orders;
}
static struct folio *alloc_swap_folio(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long orders;
struct folio *folio;
unsigned long addr;
swp_entry_t entry;
spinlock_t *ptl;
pte_t *pte;
gfp_t gfp;
int order;
/*
* If uffd is active for the vma we need per-page fault fidelity to
* maintain the uffd semantics.
*/
if (unlikely(userfaultfd_armed(vma)))
goto fallback;
/*
* A large swapped out folio could be partially or fully in zswap. We
* lack handling for such cases, so fallback to swapping in order-0
* folio.
*/
if (!zswap_never_enabled())
goto fallback;
entry = pte_to_swp_entry(vmf->orig_pte);
/*
* Get a list of all the (large) orders below PMD_ORDER that are enabled
* and suitable for swapping THP.
*/
orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT,
BIT(PMD_ORDER) - 1);
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
orders = thp_swap_suitable_orders(swp_offset(entry),
vmf->address, orders);
if (!orders)
goto fallback;
pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
vmf->address & PMD_MASK, &ptl);
if (unlikely(!pte))
goto fallback;
/*
* For do_swap_page, find the highest order where the aligned range is
* completely swap entries with contiguous swap offsets.
*/
order = highest_order(orders);
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
break;
order = next_order(&orders, order);
}
pte_unmap_unlock(pte, ptl);
/* Try allocating the highest of the remaining orders. */
gfp = vma_thp_gfp_mask(vma);
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
folio = vma_alloc_folio(gfp, order, vma, addr);
if (folio) {
if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
gfp, entry))
return folio;
count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
folio_put(folio);
}
count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
order = next_order(&orders, order);
}
fallback:
return __alloc_swap_folio(vmf);
}
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
static struct folio *alloc_swap_folio(struct vm_fault *vmf)
{
return __alloc_swap_folio(vmf);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
*
* We return with the mmap_lock locked or unlocked in the same cases
* as does filemap_fault().
*/
vm_fault_t do_swap_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *swapcache, *folio = NULL;
DECLARE_WAITQUEUE(wait, current);
struct page *page;
struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE;
bool need_clear_cache = false;
bool exclusive = false;
swp_entry_t entry;
pte_t pte;
vm_fault_t ret = 0;
void *shadow = NULL;
int nr_pages;
unsigned long page_idx;
unsigned long address;
pte_t *ptep;
if (!pte_unmap_same(vmf))
goto out;
entry = pte_to_swp_entry(vmf->orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (is_migration_entry(entry)) {
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
} else if (is_device_exclusive_entry(entry)) {
vmf->page = pfn_swap_entry_to_page(entry);
ret = remove_device_exclusive_entry(vmf);
} else if (is_device_private_entry(entry)) {
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
/*
* migrate_to_ram is not yet ready to operate
* under VMA lock.
*/
vma_end_read(vma);
ret = VM_FAULT_RETRY;
goto out;
}
vmf->page = pfn_swap_entry_to_page(entry);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (unlikely(!vmf->pte ||
!pte_same(ptep_get(vmf->pte),
vmf->orig_pte)))
goto unlock;
/*
* Get a page reference while we know the page can't be
* freed.
*/
if (trylock_page(vmf->page)) {
struct dev_pagemap *pgmap;
get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
pgmap = page_pgmap(vmf->page);
ret = pgmap->ops->migrate_to_ram(vmf);
unlock_page(vmf->page);
put_page(vmf->page);
} else {
pte_unmap_unlock(vmf->pte, vmf->ptl);
}
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else if (is_pte_marker_entry(entry)) {
ret = handle_pte_marker(vmf);
} else {
print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
ret = VM_FAULT_SIGBUS;
}
goto out;
}
/* Prevent swapoff from happening to us. */
si = get_swap_device(entry);
if (unlikely(!si))
goto out;
folio = swap_cache_get_folio(entry);
if (folio)
swap_update_readahead(folio, vma, vmf->address);
swapcache = folio;
if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
/* skip swapcache */
folio = alloc_swap_folio(vmf);
if (folio) {
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
nr_pages = folio_nr_pages(folio);
if (folio_test_large(folio))
entry.val = ALIGN_DOWN(entry.val, nr_pages);
/*
* Prevent parallel swapin from proceeding with
* the cache flag. Otherwise, another thread
* may finish swapin first, free the entry, and
* swapout reusing the same entry. It's
* undetectable as pte_same() returns true due
* to entry reuse.
*/
if (swapcache_prepare(entry, nr_pages)) {
/*
* Relax a bit to prevent rapid
* repeated page faults.
*/
add_wait_queue(&swapcache_wq, &wait);
schedule_timeout_uninterruptible(1);
remove_wait_queue(&swapcache_wq, &wait);
goto out_page;
}
need_clear_cache = true;
memcg1_swapin(entry, nr_pages);
shadow = swap_cache_get_shadow(entry);
if (shadow)
workingset_refault(folio, shadow);
folio_add_lru(folio);
/* To provide entry to swap_read_folio() */
folio->swap = entry;
swap_read_folio(folio, NULL);
folio->private = NULL;
}
} else {
folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf);
swapcache = folio;
}
if (!folio) {
/*
* Back out if somebody else faulted in this pte
* while we released the pte lock.
*/
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (likely(vmf->pte &&
pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
ret = VM_FAULT_OOM;
goto unlock;
}
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
}
ret |= folio_lock_or_retry(folio, vmf);
if (ret & VM_FAULT_RETRY)
goto out_release;
page = folio_file_page(folio, swp_offset(entry));
if (swapcache) {
/*
* Make sure folio_free_swap() or swapoff did not release the
* swapcache from under us. The page pin, and pte_same test
* below, are not enough to exclude that. Even if it is still
* swapcache, we need to check that the page's swap has not
* changed.
*/
if (unlikely(!folio_matches_swap_entry(folio, entry)))
goto out_page;
if (unlikely(PageHWPoison(page))) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
* owner processes (which may be unknown at hwpoison time)
*/
ret = VM_FAULT_HWPOISON;
goto out_page;
}
/*
* KSM sometimes has to copy on read faults, for example, if
* folio->index of non-ksm folios would be nonlinear inside the
* anon VMA -- the ksm flag is lost on actual swapout.
*/
folio = ksm_might_need_to_copy(folio, vma, vmf->address);
if (unlikely(!folio)) {
ret = VM_FAULT_OOM;
folio = swapcache;
goto out_page;
} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
ret = VM_FAULT_HWPOISON;
folio = swapcache;
goto out_page;
}
if (folio != swapcache)
page = folio_page(folio, 0);
/*
* If we want to map a page that's in the swapcache writable, we
* have to detect via the refcount if we're really the exclusive
* owner. Try removing the extra reference from the local LRU
* caches if required.
*/
if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
!folio_test_ksm(folio) && !folio_test_lru(folio))
lru_add_drain();
}
folio_throttle_swaprate(folio, GFP_KERNEL);
/*
* Back out if somebody else already faulted in this pte.
*/
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
goto out_nomap;
if (unlikely(!folio_test_uptodate(folio))) {
ret = VM_FAULT_SIGBUS;
goto out_nomap;
}
/* allocated large folios for SWP_SYNCHRONOUS_IO */
if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
unsigned long nr = folio_nr_pages(folio);
unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
pte_t *folio_ptep = vmf->pte - idx;
pte_t folio_pte = ptep_get(folio_ptep);
if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
goto out_nomap;
page_idx = idx;
address = folio_start;
ptep = folio_ptep;
goto check_folio;
}
nr_pages = 1;
page_idx = 0;
address = vmf->address;
ptep = vmf->pte;
if (folio_test_large(folio) && folio_test_swapcache(folio)) {
int nr = folio_nr_pages(folio);
unsigned long idx = folio_page_idx(folio, page);
unsigned long folio_start = address - idx * PAGE_SIZE;
unsigned long folio_end = folio_start + nr * PAGE_SIZE;
pte_t *folio_ptep;
pte_t folio_pte;
if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
goto check_folio;
if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
goto check_folio;
folio_ptep = vmf->pte - idx;
folio_pte = ptep_get(folio_ptep);
if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
goto check_folio;
page_idx = idx;
address = folio_start;
ptep = folio_ptep;
nr_pages = nr;
entry = folio->swap;
page = &folio->page;
}
check_folio:
/*
* PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
* must never point at an anonymous page in the swapcache that is
* PG_anon_exclusive. Sanity check that this holds and especially, that
* no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
* check after taking the PT lock and making sure that nobody
* concurrently faulted in this page and set PG_anon_exclusive.
*/
BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
/*
* Check under PT lock (to protect against concurrent fork() sharing
* the swap entry concurrently) for certainly exclusive pages.
*/
if (!folio_test_ksm(folio)) {
exclusive = pte_swp_exclusive(vmf->orig_pte);
if (folio != swapcache) {
/*
* We have a fresh page that is not exposed to the
* swapcache -> certainly exclusive.
*/
exclusive = true;
} else if (exclusive && folio_test_writeback(folio) &&
data_race(si->flags & SWP_STABLE_WRITES)) {
/*
* This is tricky: not all swap backends support
* concurrent page modifications while under writeback.
*
* So if we stumble over such a page in the swapcache
* we must not set the page exclusive, otherwise we can
* map it writable without further checks and modify it
* while still under writeback.
*
* For these problematic swap backends, simply drop the
* exclusive marker: this is perfectly fine as we start
* writeback only if we fully unmapped the page and
* there are no unexpected references on the page after
* unmapping succeeded. After fully unmapped, no
* further GUP references (FOLL_GET and FOLL_PIN) can
* appear, so dropping the exclusive marker and mapping
* it only R/O is fine.
*/
exclusive = false;
}
}
/*
* Some architectures may have to restore extra metadata to the page
* when reading from swap. This metadata may be indexed by swap entry
* so this must be called before swap_free().
*/
arch_swap_restore(folio_swap(entry, folio), folio);
/*
* Remove the swap entry and conditionally try to free up the swapcache.
* We're already holding a reference on the page but haven't mapped it
* yet.
*/
swap_free_nr(entry, nr_pages);
if (should_try_to_free_swap(folio, vma, vmf->flags))
folio_free_swap(folio);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
pte = mk_pte(page, vma->vm_page_prot);
if (pte_swp_soft_dirty(vmf->orig_pte))
pte = pte_mksoft_dirty(pte);
if (pte_swp_uffd_wp(vmf->orig_pte))
pte = pte_mkuffd_wp(pte);
/*
* Same logic as in do_wp_page(); however, optimize for pages that are
* certainly not shared either because we just allocated them without
* exposing them to the swapcache or because the swap entry indicates
* exclusivity.
*/
if (!folio_test_ksm(folio) &&
(exclusive || folio_ref_count(folio) == 1)) {
if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
!pte_needs_soft_dirty_wp(vma, pte)) {
pte = pte_mkwrite(pte, vma);
if (vmf->flags & FAULT_FLAG_WRITE) {
pte = pte_mkdirty(pte);
vmf->flags &= ~FAULT_FLAG_WRITE;
}
}
rmap_flags |= RMAP_EXCLUSIVE;
}
folio_ref_add(folio, nr_pages - 1);
flush_icache_pages(vma, page, nr_pages);
vmf->orig_pte = pte_advance_pfn(pte, page_idx);
/* ksm created a completely new copy */
if (unlikely(folio != swapcache && swapcache)) {
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
} else if (!folio_test_anon(folio)) {
/*
* We currently only expect small !anon folios which are either
* fully exclusive or fully shared, or new allocated large
* folios which are fully exclusive. If we ever get large
* folios within swapcache here, we have to be careful.
*/
VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
} else {
folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||
(pte_write(pte) && !PageAnonExclusive(page)));
set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
arch_do_swap_page_nr(vma->vm_mm, vma, address,
pte, pte, nr_pages);
folio_unlock(folio);
if (folio != swapcache && swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
* (to avoid false positives from pte_same). For
* further safety release the lock after the swap_free
* so that the swap count won't change under a
* parallel locked swapcache.
*/
folio_unlock(swapcache);
folio_put(swapcache);
}
if (vmf->flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(vmf);
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
goto out;
}
/* No need to invalidate - it was non-present before */
update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
/* Clear the swap cache pin for direct swapin after PTL unlock */
if (need_clear_cache) {
swapcache_clear(si, entry, nr_pages);
if (waitqueue_active(&swapcache_wq))
wake_up(&swapcache_wq);
}
if (si)
put_swap_device(si);
return ret;
out_nomap:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
out_page:
folio_unlock(folio);
out_release:
folio_put(folio);
if (folio != swapcache && swapcache) {
folio_unlock(swapcache);
folio_put(swapcache);
}
if (need_clear_cache) {
swapcache_clear(si, entry, nr_pages);
if (waitqueue_active(&swapcache_wq))
wake_up(&swapcache_wq);
}
if (si)
put_swap_device(si);
return ret;
}
static bool pte_range_none(pte_t *pte, int nr_pages)
{
int i;
for (i = 0; i < nr_pages; i++) {
if (!pte_none(ptep_get_lockless(pte + i)))
return false;
}
return true;
}
static struct folio *alloc_anon_folio(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long orders;
struct folio *folio;
unsigned long addr;
pte_t *pte;
gfp_t gfp;
int order;
/*
* If uffd is active for the vma we need per-page fault fidelity to
* maintain the uffd semantics.
*/
if (unlikely(userfaultfd_armed(vma)))
goto fallback;
/*
* Get a list of all the (large) orders below PMD_ORDER that are enabled
* for this vma. Then filter out the orders that can't be allocated over
* the faulting address and still be fully contained in the vma.
*/
orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_PAGEFAULT,
BIT(PMD_ORDER) - 1);
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
if (!orders)
goto fallback;
pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
if (!pte)
return ERR_PTR(-EAGAIN);
/*
* Find the highest order where the aligned range is completely
* pte_none(). Note that all remaining orders will be completely
* pte_none().
*/
order = highest_order(orders);
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
if (pte_range_none(pte + pte_index(addr), 1 << order))
break;
order = next_order(&orders, order);
}
pte_unmap(pte);
if (!orders)
goto fallback;
/* Try allocating the highest of the remaining orders. */
gfp = vma_thp_gfp_mask(vma);
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
folio = vma_alloc_folio(gfp, order, vma, addr);
if (folio) {
if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
folio_put(folio);
goto next;
}
folio_throttle_swaprate(folio, gfp);
/*
* When a folio is not zeroed during allocation
* (__GFP_ZERO not used) or user folios require special
* handling, folio_zero_user() is used to make sure
* that the page corresponding to the faulting address
* will be hot in the cache after zeroing.
*/
if (user_alloc_needs_zeroing())
folio_zero_user(folio, vmf->address);
return folio;
}
next:
count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
order = next_order(&orders, order);
}
fallback:
#endif
return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
}
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_lock still held, but pte unmapped and unlocked.
*/
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
struct folio *folio;
vm_fault_t ret = 0;
int nr_pages = 1;
pte_t entry;
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
/*
* Use pte_alloc() instead of pte_alloc_map(), so that OOM can
* be distinguished from a transient failure of pte_offset_map().
*/
if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM;
/* Use the zero-page for reads */
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
vma->vm_page_prot));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (!vmf->pte) goto unlock; if (vmf_pte_changed(vmf)) {
update_mmu_tlb(vma, vmf->address, vmf->pte);
goto unlock;
}
ret = check_stable_address_space(vma->vm_mm);
if (ret)
goto unlock;
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
goto setpte;
}
/* Allocate our own private page. */
ret = vmf_anon_prepare(vmf); if (ret)
return ret;
/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
folio = alloc_anon_folio(vmf);
if (IS_ERR(folio))
return 0; if (!folio)
goto oom;
nr_pages = folio_nr_pages(folio);
addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
/*
* The memory barrier inside __folio_mark_uptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__folio_mark_uptodate(folio);
entry = folio_mk_pte(folio, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry), vma); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
if (!vmf->pte)
goto release; if (nr_pages == 1 && vmf_pte_changed(vmf)) {
update_mmu_tlb(vma, addr, vmf->pte);
goto release; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
goto release;
}
ret = check_stable_address_space(vma->vm_mm);
if (ret)
goto release;
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
folio_put(folio);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
folio_ref_add(folio, nr_pages - 1);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
setpte:
if (vmf_orig_pte_uffd_wp(vmf))
entry = pte_mkuffd_wp(entry);
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
/* No need to invalidate - it was non-present before */
update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
unlock:
if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
release:
folio_put(folio);
goto unlock;
oom:
return VM_FAULT_OOM;
}
/*
* The mmap_lock must have been held on entry, and may have been
* released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry().
*/
static vm_fault_t __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
vm_fault_t ret;
/*
* Preallocate pte before we take page_lock because this might lead to
* deadlocks for memcg reclaim which waits for pages under writeback:
* lock_page(A)
* SetPageWriteback(A)
* unlock_page(A)
* lock_page(B)
* lock_page(B)
* pte_alloc_one
* shrink_folio_list
* wait_on_page_writeback(A)
* SetPageWriteback(B)
* unlock_page(B)
* # flush A, B to clear the writeback
*/
if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
ret = vma->vm_ops->fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
VM_FAULT_DONE_COW)))
return ret;
folio = page_folio(vmf->page);
if (unlikely(PageHWPoison(vmf->page))) {
vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) {
if (page_mapped(vmf->page))
unmap_mapping_folio(folio);
/* Retry if a clean folio was removed from the cache. */
if (mapping_evict_folio(folio->mapping, folio))
poisonret = VM_FAULT_NOPAGE;
folio_unlock(folio);
}
folio_put(folio);
vmf->page = NULL;
return poisonret;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
folio_lock(folio);
else
VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
return ret;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void deposit_prealloc_pte(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
/*
* We are going to consume the prealloc table,
* count that as nr_ptes.
*/
mm_inc_nr_ptes(vma->vm_mm);
vmf->prealloc_pte = NULL;
}
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
{
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
pmd_t entry;
vm_fault_t ret = VM_FAULT_FALLBACK;
/*
* It is too late to allocate a small folio, we already have a large
* folio in the pagecache: especially s390 KVM cannot tolerate any
* PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
* PMD mappings if THPs are disabled. As we already have a THP,
* behave as if we are forcing a collapse.
*/
if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags,
/* forced_collapse=*/ true))
return ret;
if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return ret;
if (folio_order(folio) != HPAGE_PMD_ORDER)
return ret;
page = &folio->page;
/*
* Just backoff if any subpage of a THP is corrupted otherwise
* the corrupted page may mapped by PMD silently to escape the
* check. This kind of THP just can be PTE mapped. Access to
* the corrupted subpage should trigger SIGBUS as expected.
*/
if (unlikely(folio_test_has_hwpoisoned(folio)))
return ret;
/*
* Archs like ppc64 need additional space to store information
* related to pte entry. Use the preallocated table for that.
*/
if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_none(*vmf->pmd)))
goto out;
flush_icache_pages(vma, page, HPAGE_PMD_NR);
entry = folio_mk_pmd(folio, vma->vm_page_prot);
if (write)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
folio_add_file_rmap_pmd(folio, page, vma);
/*
* deposit and withdraw with pmd lock held
*/
if (arch_needs_pgtable_deposit())
deposit_prealloc_pte(vmf);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
update_mmu_cache_pmd(vma, haddr, vmf->pmd);
/* fault is handled */
ret = 0;
count_vm_event(THP_FILE_MAPPED);
out:
spin_unlock(vmf->ptl);
return ret;
}
#else
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
{
return VM_FAULT_FALLBACK;
}
#endif
/**
* set_pte_range - Set a range of PTEs to point to pages in a folio.
* @vmf: Fault decription.
* @folio: The folio that contains @page.
* @page: The first page to create a PTE for.
* @nr: The number of PTEs to create.
* @addr: The first address to create a PTE for.
*/
void set_pte_range(struct vm_fault *vmf, struct folio *folio,
struct page *page, unsigned int nr, unsigned long addr)
{
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
pte_t entry;
flush_icache_pages(vma, page, nr);
entry = mk_pte(page, vma->vm_page_prot);
if (prefault && arch_wants_old_prefaulted_pte())
entry = pte_mkold(entry);
else
entry = pte_sw_mkyoung(entry);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
else if (pte_write(entry) && folio_test_dirty(folio))
entry = pte_mkdirty(entry);
if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
entry = pte_mkuffd_wp(entry);
/* copy-on-write page */
if (write && !(vma->vm_flags & VM_SHARED)) {
VM_BUG_ON_FOLIO(nr != 1, folio);
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
} else {
folio_add_file_rmap_ptes(folio, page, nr, vma);
}
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
}
static bool vmf_pte_changed(struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); return !pte_none(ptep_get(vmf->pte));
}
/**
* finish_fault - finish page fault once we have prepared the page to fault
*
* @vmf: structure describing the fault
*
* This function handles all that is needed to finish a page fault once the
* page to fault in is prepared. It handles locking of PTEs, inserts PTE for
* given page, adds reverse page mapping, handles memcg charges and LRU
* addition.
*
* The function expects the page to be locked and on success it consumes a
* reference of a page being mapped (for the PTE which maps it).
*
* Return: %0 on success, %VM_FAULT_ code in case of error.
*/
vm_fault_t finish_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
struct folio *folio;
vm_fault_t ret;
bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
!(vma->vm_flags & VM_SHARED);
int type, nr_pages;
unsigned long addr;
bool needs_fallback = false;
fallback:
addr = vmf->address;
/* Did we COW the page? */
if (is_cow)
page = vmf->cow_page;
else
page = vmf->page;
folio = page_folio(page);
/*
* check even for read faults because we might have lost our CoWed
* page
*/
if (!(vma->vm_flags & VM_SHARED)) {
ret = check_stable_address_space(vma->vm_mm);
if (ret)
return ret;
}
if (!needs_fallback && vma->vm_file) {
struct address_space *mapping = vma->vm_file->f_mapping;
pgoff_t file_end;
file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
/*
* Do not allow to map with PTEs beyond i_size and with PMD
* across i_size to preserve SIGBUS semantics.
*
* Make an exception for shmem/tmpfs that for long time
* intentionally mapped with PMDs across i_size.
*/
needs_fallback = !shmem_mapping(mapping) &&
file_end < folio_next_index(folio);
}
if (pmd_none(*vmf->pmd)) {
if (!needs_fallback && folio_test_pmd_mappable(folio)) {
ret = do_set_pmd(vmf, folio, page);
if (ret != VM_FAULT_FALLBACK)
return ret;
}
if (vmf->prealloc_pte)
pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
return VM_FAULT_OOM;
}
nr_pages = folio_nr_pages(folio);
/* Using per-page fault to maintain the uffd semantics */
if (unlikely(userfaultfd_armed(vma)) || unlikely(needs_fallback)) {
nr_pages = 1;
} else if (nr_pages > 1) {
pgoff_t idx = folio_page_idx(folio, page);
/* The page offset of vmf->address within the VMA. */
pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
/* The index of the entry in the pagetable for fault page. */
pgoff_t pte_off = pte_index(vmf->address);
/*
* Fallback to per-page fault in case the folio size in page
* cache beyond the VMA limits and PMD pagetable limits.
*/
if (unlikely(vma_off < idx ||
vma_off + (nr_pages - idx) > vma_pages(vma) ||
pte_off < idx ||
pte_off + (nr_pages - idx) > PTRS_PER_PTE)) {
nr_pages = 1;
} else {
/* Now we can set mappings for the whole large folio. */
addr = vmf->address - idx * PAGE_SIZE;
page = &folio->page;
}
}
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
addr, &vmf->ptl);
if (!vmf->pte)
return VM_FAULT_NOPAGE;
/* Re-check under ptl */
if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
update_mmu_tlb(vma, addr, vmf->pte);
ret = VM_FAULT_NOPAGE;
goto unlock;
} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
needs_fallback = true;
pte_unmap_unlock(vmf->pte, vmf->ptl);
goto fallback;
}
folio_ref_add(folio, nr_pages - 1);
set_pte_range(vmf, folio, page, nr_pages, addr);
type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
add_mm_counter(vma->vm_mm, type, nr_pages);
ret = 0;
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
}
static unsigned long fault_around_pages __read_mostly =
65536 >> PAGE_SHIFT;
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
{
*val = fault_around_pages << PAGE_SHIFT;
return 0;
}
/*
* fault_around_bytes must be rounded down to the nearest page order as it's
* what do_fault_around() expects to see.
*/
static int fault_around_bytes_set(void *data, u64 val)
{
if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
/*
* The minimum value is 1 page, however this results in no fault-around
* at all. See should_fault_around().
*/
val = max(val, PAGE_SIZE);
fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
static int __init fault_around_debugfs(void)
{
debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
&fault_around_bytes_fops);
return 0;
}
late_initcall(fault_around_debugfs);
#endif
/*
* do_fault_around() tries to map few pages around the fault address. The hope
* is that the pages will be needed soon and this will lower the number of
* faults to handle.
*
* It uses vm_ops->map_pages() to map the pages, which skips the page if it's
* not ready to be mapped: not up-to-date, locked, etc.
*
* This function doesn't cross VMA or page table boundaries, in order to call
* map_pages() and acquire a PTE lock only once.
*
* fault_around_pages defines how many pages we'll try to map.
* do_fault_around() expects it to be set to a power of two less than or equal
* to PTRS_PER_PTE.
*
* The virtual address of the area that we map is naturally aligned to
* fault_around_pages * PAGE_SIZE rounded down to the machine page size
* (and therefore to page order). This way it's easier to guarantee
* that we don't cross page table boundaries.
*/
static vm_fault_t do_fault_around(struct vm_fault *vmf)
{
pgoff_t nr_pages = READ_ONCE(fault_around_pages);
pgoff_t pte_off = pte_index(vmf->address);
/* The page offset of vmf->address within the VMA. */
pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
pgoff_t from_pte, to_pte;
vm_fault_t ret;
/* The PTE offset of the start address, clamped to the VMA. */
from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
pte_off - min(pte_off, vma_off));
/* The PTE offset of the end address, clamped to the VMA and PTE. */
to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
pte_off + vma_pages(vmf->vma) - vma_off) - 1;
if (pmd_none(*vmf->pmd)) {
vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
rcu_read_lock(); ret = vmf->vma->vm_ops->map_pages(vmf,
vmf->pgoff + from_pte - pte_off,
vmf->pgoff + to_pte - pte_off);
rcu_read_unlock();
return ret;
}
/* Return true if we should do read fault-around, false otherwise */
static inline bool should_fault_around(struct vm_fault *vmf)
{
/* No ->map_pages? No way to fault around... */
if (!vmf->vma->vm_ops->map_pages)
return false;
if (uffd_disable_fault_around(vmf->vma))
return false;
/* A single page implies no faulting 'around' at all. */
return fault_around_pages > 1;
}
static vm_fault_t do_read_fault(struct vm_fault *vmf)
{
vm_fault_t ret = 0;
struct folio *folio;
/*
* Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
if (should_fault_around(vmf)) { ret = do_fault_around(vmf); if (ret)
return ret;
}
ret = vmf_can_call_fault(vmf); if (ret)
return ret;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
ret |= finish_fault(vmf);
folio = page_folio(vmf->page); folio_unlock(folio); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) folio_put(folio);
return ret;
}
static vm_fault_t do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
vm_fault_t ret;
ret = vmf_can_call_fault(vmf);
if (!ret)
ret = vmf_anon_prepare(vmf); if (ret) return ret;
folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
if (!folio) return VM_FAULT_OOM;
vmf->cow_page = &folio->page;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (ret & VM_FAULT_DONE_COW)
return ret;
if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
ret = VM_FAULT_HWPOISON;
goto unlock;
}
__folio_mark_uptodate(folio);
ret |= finish_fault(vmf);
unlock:
unlock_page(vmf->page);
put_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out;
return ret;
uncharge_out:
folio_put(folio); return ret;
}
static vm_fault_t do_shared_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret, tmp;
struct folio *folio;
ret = vmf_can_call_fault(vmf); if (ret)
return ret;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
folio = page_folio(vmf->page);
/*
* Check if the backing address space wants to know that the page is
* about to become writable
*/
if (vma->vm_ops->page_mkwrite) {
folio_unlock(folio);
tmp = do_page_mkwrite(vmf, folio);
if (unlikely(!tmp ||
(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
folio_put(folio); return tmp;
}
}
ret |= finish_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY))) {
folio_unlock(folio);
folio_put(folio); return ret;
}
ret |= fault_dirty_shared_page(vmf);
return ret;
}
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults).
* The mmap_lock may have been released depending on flags and our
* return value. See filemap_fault() and __folio_lock_or_retry().
* If mmap_lock is released, vma may become invalid (for example
* by other thread calling munmap()).
*/
static vm_fault_t do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *vm_mm = vma->vm_mm;
vm_fault_t ret;
/*
* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
*/
if (!vma->vm_ops->fault) {
vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (unlikely(!vmf->pte))
ret = VM_FAULT_SIGBUS;
else {
/*
* Make sure this is not a temporary clearing of pte
* by holding ptl and checking again. A R/M/W update
* of pte involves: take ptl, clearing the pte so that
* we don't have concurrent modification by hardware
* followed by an update.
*/
if (unlikely(pte_none(ptep_get(vmf->pte))))
ret = VM_FAULT_SIGBUS;
else
ret = VM_FAULT_NOPAGE;
pte_unmap_unlock(vmf->pte, vmf->ptl);
}
} else if (!(vmf->flags & FAULT_FLAG_WRITE)) ret = do_read_fault(vmf); else if (!(vma->vm_flags & VM_SHARED)) ret = do_cow_fault(vmf);
else
ret = do_shared_fault(vmf);
/* preallocated pagetable is unused: free it */
if (vmf->prealloc_pte) { pte_free(vm_mm, vmf->prealloc_pte);
vmf->prealloc_pte = NULL;
}
return ret;
}
int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
unsigned long addr, int *flags,
bool writable, int *last_cpupid)
{
struct vm_area_struct *vma = vmf->vma;
/*
* Avoid grouping on RO pages in general. RO pages shouldn't hurt as
* much anyway since they can be in shared cache state. This misses
* the case where a mapping is writable but the process never writes
* to it but pte_write gets cleared during protection updates and
* pte_dirty has unpredictable behaviour between PTE scan updates,
* background writeback, dirty balancing and application behaviour.
*/
if (!writable)
*flags |= TNF_NO_GROUP;
/*
* Flag if the folio is shared between multiple address spaces. This
* is later used when determining whether to group tasks together
*/
if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
*flags |= TNF_SHARED;
/*
* For memory tiering mode, cpupid of slow memory page is used
* to record page access time. So use default value.
*/
if (folio_use_access_time(folio))
*last_cpupid = (-1 & LAST_CPUPID_MASK);
else
*last_cpupid = folio_last_cpupid(folio);
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
count_vm_numa_event(NUMA_HINT_FAULTS);
#ifdef CONFIG_NUMA_BALANCING
count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
#endif
if (folio_nid(folio) == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
*flags |= TNF_FAULT_LOCAL;
}
return mpol_misplaced(folio, vmf, addr);
}
static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long fault_addr, pte_t *fault_pte,
bool writable)
{
pte_t pte, old_pte;
old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
pte = pte_modify(old_pte, vma->vm_page_prot);
pte = pte_mkyoung(pte);
if (writable)
pte = pte_mkwrite(pte, vma);
ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
}
static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
struct folio *folio, pte_t fault_pte,
bool ignore_writable, bool pte_write_upgrade)
{
int nr = pte_pfn(fault_pte) - folio_pfn(folio);
unsigned long start, end, addr = vmf->address;
unsigned long addr_start = addr - (nr << PAGE_SHIFT);
unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
pte_t *start_ptep;
/* Stay within the VMA and within the page table. */
start = max3(addr_start, pt_start, vma->vm_start);
end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
vma->vm_end);
start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
/* Restore all PTEs' mapping of the large folio */
for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
pte_t ptent = ptep_get(start_ptep);
bool writable = false;
if (!pte_present(ptent) || !pte_protnone(ptent))
continue;
if (pfn_folio(pte_pfn(ptent)) != folio)
continue;
if (!ignore_writable) {
ptent = pte_modify(ptent, vma->vm_page_prot);
writable = pte_write(ptent);
if (!writable && pte_write_upgrade &&
can_change_pte_writable(vma, addr, ptent))
writable = true;
}
numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
}
}
static vm_fault_t do_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *folio = NULL;
int nid = NUMA_NO_NODE;
bool writable = false, ignore_writable = false;
bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
int last_cpupid;
int target_nid;
pte_t pte, old_pte;
int flags = 0, nr_pages;
/*
* The pte cannot be used safely until we verify, while holding the page
* table lock, that its contents have not changed during fault handling.
*/
spin_lock(vmf->ptl);
/* Read the live PTE from the page tables: */
old_pte = ptep_get(vmf->pte);
if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
pte = pte_modify(old_pte, vma->vm_page_prot);
/*
* Detect now whether the PTE could be writable; this information
* is only valid while holding the PT lock.
*/
writable = pte_write(pte);
if (!writable && pte_write_upgrade &&
can_change_pte_writable(vma, vmf->address, pte))
writable = true;
folio = vm_normal_folio(vma, vmf->address, pte);
if (!folio || folio_is_zone_device(folio))
goto out_map;
nid = folio_nid(folio);
nr_pages = folio_nr_pages(folio);
target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
writable, &last_cpupid);
if (target_nid == NUMA_NO_NODE)
goto out_map;
if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
flags |= TNF_MIGRATE_FAIL;
goto out_map;
}
/* The folio is isolated and isolation code holds a folio reference. */
pte_unmap_unlock(vmf->pte, vmf->ptl);
writable = false;
ignore_writable = true;
/* Migrate to the requested node */
if (!migrate_misplaced_folio(folio, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
}
flags |= TNF_MIGRATE_FAIL;
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (unlikely(!vmf->pte))
return 0;
if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
out_map:
/*
* Make it present again, depending on how arch implements
* non-accessible ptes, some can allow access by kernel mode.
*/
if (folio && folio_test_large(folio))
numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
pte_write_upgrade);
else
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
}
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
if (vma_is_anonymous(vma))
return do_huge_pmd_anonymous_page(vmf);
if (vma->vm_ops->huge_fault)
return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
return VM_FAULT_FALLBACK;
}
/* `inline' is required to avoid gcc 4.1.2 build error */
static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
vm_fault_t ret;
if (vma_is_anonymous(vma)) {
if (likely(!unshare) &&
userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
if (userfaultfd_wp_async(vmf->vma))
goto split;
return handle_userfault(vmf, VM_UFFD_WP);
}
return do_huge_pmd_wp_page(vmf);
}
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
if (vma->vm_ops->huge_fault) {
ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
}
}
split:
/* COW or write-notify handled on pte level: split pmd. */
__split_huge_pmd(vma, vmf->pmd, vmf->address, false);
return VM_FAULT_FALLBACK;
}
static vm_fault_t create_huge_pud(struct vm_fault *vmf)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
struct vm_area_struct *vma = vmf->vma;
/* No support for anonymous transparent PUD pages yet */
if (vma_is_anonymous(vma))
return VM_FAULT_FALLBACK;
if (vma->vm_ops->huge_fault)
return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return VM_FAULT_FALLBACK;
}
static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret;
/* No support for anonymous transparent PUD pages yet */
if (vma_is_anonymous(vma))
goto split;
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
if (vma->vm_ops->huge_fault) {
ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
}
}
split:
/* COW or write-notify not handled on PUD level: split pud.*/
__split_huge_pud(vma, vmf->pud, vmf->address);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
return VM_FAULT_FALLBACK;
}
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
* RISC architectures). The early dirtying is also good on the i386.
*
* There is also a hook called "update_mmu_cache()" that architectures
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*
* We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
* concurrent faults).
*
* The mmap_lock may have been released depending on flags and our return value.
* See filemap_fault() and __folio_lock_or_retry().
*/
static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
{
pte_t entry;
if (unlikely(pmd_none(*vmf->pmd))) {
/*
* Leave __pte_alloc() until later: because vm_ops->fault may
* want to allocate huge page, and if we expose page table
* for an instant, it will be difficult to retract from
* concurrent faults and from rmap lookups.
*/
vmf->pte = NULL;
vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
} else {
pmd_t dummy_pmdval;
/*
* A regular pmd is established and it can't morph into a huge
* pmd by anon khugepaged, since that takes mmap_lock in write
* mode; but shmem or file collapse to THP could still morph
* it into a huge pmd: just retry later if so.
*
* Use the maywrite version to indicate that vmf->pte may be
* modified, but since we will use pte_same() to detect the
* change of the !pte_none() entry, there is no need to recheck
* the pmdval. Here we chooes to pass a dummy variable instead
* of NULL, which helps new user think about why this place is
* special.
*/
vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
vmf->address, &dummy_pmdval,
&vmf->ptl);
if (unlikely(!vmf->pte))
return 0; vmf->orig_pte = ptep_get_lockless(vmf->pte);
vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
if (pte_none(vmf->orig_pte)) { pte_unmap(vmf->pte); vmf->pte = NULL;
}
}
if (!vmf->pte) return do_pte_missing(vmf); if (!pte_present(vmf->orig_pte)) return do_swap_page(vmf);
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
spin_lock(vmf->ptl);
entry = vmf->orig_pte;
if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
goto unlock;
}
if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
if (!pte_write(entry))
return do_wp_page(vmf);
else if (likely(vmf->flags & FAULT_FLAG_WRITE)) entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
vmf->flags & FAULT_FLAG_WRITE)) {
update_mmu_cache_range(vmf, vmf->vma, vmf->address,
vmf->pte, 1);
} else {
/* Skip spurious TLB flush for retried page fault */
if (vmf->flags & FAULT_FLAG_TRIED)
goto unlock;
/*
* This is needed only for protection faults but the arch code
* is not yet telling us if this is a protection fault or not.
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
if (vmf->flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
vmf->pte);
}
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
/*
* On entry, we hold either the VMA lock or the mmap_lock
* (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
* the result, the mmap_lock is not held on exit. See filemap_fault()
* and __folio_lock_or_retry().
*/
static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
struct vm_fault vmf = {
.vma = vma,
.address = address & PAGE_MASK,
.real_address = address,
.flags = flags,
.pgoff = linear_page_index(vma, address),
.gfp_mask = __get_fault_gfp_mask(vma),
};
struct mm_struct *mm = vma->vm_mm;
vm_flags_t vm_flags = vma->vm_flags;
pgd_t *pgd;
p4d_t *p4d;
vm_fault_t ret;
pgd = pgd_offset(mm, address);
p4d = p4d_alloc(mm, pgd, address); if (!p4d) return VM_FAULT_OOM; vmf.pud = pud_alloc(mm, p4d, address); if (!vmf.pud)
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
pud_t orig_pud = *vmf.pud;
barrier();
if (pud_trans_huge(orig_pud)) {
/*
* TODO once we support anonymous PUDs: NUMA case and
* FAULT_FLAG_UNSHARE handling.
*/
if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
ret = wp_huge_pud(&vmf, orig_pud);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
huge_pud_set_accessed(&vmf, orig_pud);
return 0;
}
}
}
vmf.pmd = pmd_alloc(mm, vmf.pud, address); if (!vmf.pmd)
return VM_FAULT_OOM;
/* Huge pud page fault raced with pmd_alloc? */
if (pud_trans_unstable(vmf.pud))
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(vmf.orig_pmd));
if (is_pmd_migration_entry(vmf.orig_pmd))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
}
if (pmd_trans_huge(vmf.orig_pmd)) {
if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
return do_huge_pmd_numa_page(&vmf);
if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
!pmd_write(vmf.orig_pmd)) {
ret = wp_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
huge_pmd_set_accessed(&vmf);
return 0;
}
}
}
return handle_pte_fault(&vmf);}
/**
* mm_account_fault - Do page fault accounting
* @mm: mm from which memcg should be extracted. It can be NULL.
* @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
* of perf event counters, but we'll still do the per-task accounting to
* the task who triggered this page fault.
* @address: the faulted address.
* @flags: the fault flags.
* @ret: the fault retcode.
*
* This will take care of most of the page fault accounting. Meanwhile, it
* will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
* updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
* still be in per-arch page fault handlers at the entry of page fault.
*/
static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
unsigned long address, unsigned int flags,
vm_fault_t ret)
{
bool major;
/* Incomplete faults will be accounted upon completion. */
if (ret & VM_FAULT_RETRY)
return;
/*
* To preserve the behavior of older kernels, PGFAULT counters record
* both successful and failed faults, as opposed to perf counters,
* which ignore failed cases.
*/
count_vm_event(PGFAULT);
count_memcg_event_mm(mm, PGFAULT);
/*
* Do not account for unsuccessful faults (e.g. when the address wasn't
* valid). That includes arch_vma_access_permitted() failing before
* reaching here. So this is not a "this many hardware page faults"
* counter. We should use the hw profiling for that.
*/
if (ret & VM_FAULT_ERROR)
return;
/*
* We define the fault as a major fault when the final successful fault
* is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
* handle it immediately previously).
*/
major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
if (major)
current->maj_flt++;
else
current->min_flt++;
/*
* If the fault is done for GUP, regs will be NULL. We only do the
* accounting for the per thread fault counters who triggered the
* fault, and we skip the perf event updates.
*/
if (!regs)
return;
if (major) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
else
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
#ifdef CONFIG_LRU_GEN
static void lru_gen_enter_fault(struct vm_area_struct *vma)
{
/* the LRU algorithm only applies to accesses with recency */
current->in_lru_fault = vma_has_recency(vma);
}
static void lru_gen_exit_fault(void)
{
current->in_lru_fault = false;
}
#else
static void lru_gen_enter_fault(struct vm_area_struct *vma)
{
}
static void lru_gen_exit_fault(void)
{
}
#endif /* CONFIG_LRU_GEN */
static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
unsigned int *flags)
{
if (unlikely(*flags & FAULT_FLAG_UNSHARE)) { if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
return VM_FAULT_SIGSEGV;
/*
* FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
* just treat it like an ordinary read-fault otherwise.
*/
if (!is_cow_mapping(vma->vm_flags)) *flags &= ~FAULT_FLAG_UNSHARE;
} else if (*flags & FAULT_FLAG_WRITE) {
/* Write faults on read-only mappings are impossible ... */
if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
return VM_FAULT_SIGSEGV;
/* ... and FOLL_FORCE only applies to COW mappings. */
if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
!is_cow_mapping(vma->vm_flags)))
return VM_FAULT_SIGSEGV;
}
#ifdef CONFIG_PER_VMA_LOCK
/*
* Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
* the assumption that lock is dropped on VM_FAULT_RETRY.
*/
if (WARN_ON_ONCE((*flags &
(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
(FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
return VM_FAULT_SIGSEGV;
#endif
return 0;
}
/*
* By the time we get here, we already hold either the VMA lock or the
* mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
*
* The mmap_lock may have been released depending on flags and our
* return value. See filemap_fault() and __folio_lock_or_retry().
*/
vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, struct pt_regs *regs)
{
/* If the fault handler drops the mmap_lock, vma may be freed */
struct mm_struct *mm = vma->vm_mm;
vm_fault_t ret;
bool is_droppable;
__set_current_state(TASK_RUNNING); ret = sanitize_fault_flags(vma, &flags); if (ret) goto out; if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
flags & FAULT_FLAG_INSTRUCTION,
flags & FAULT_FLAG_REMOTE)) {
ret = VM_FAULT_SIGSEGV;
goto out;
}
is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.
*/
if (flags & FAULT_FLAG_USER)
mem_cgroup_enter_user_fault();
lru_gen_enter_fault(vma);
if (unlikely(is_vm_hugetlb_page(vma))) ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
else
ret = __handle_mm_fault(vma, address, flags);
/*
* Warning: It is no longer safe to dereference vma-> after this point,
* because mmap_lock might have been dropped by __handle_mm_fault(), so
* vma might be destroyed from underneath us.
*/
lru_gen_exit_fault();
/* If the mapping is droppable, then errors due to OOM aren't fatal. */
if (is_droppable)
ret &= ~VM_FAULT_OOM;
if (flags & FAULT_FLAG_USER) {
mem_cgroup_exit_user_fault();
/*
* The task may have entered a memcg OOM situation but
* if the allocation error was handled gracefully (no
* VM_FAULT_OOM), there is no need to kill anything.
* Just clean up the OOM state peacefully.
*/
if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
mem_cgroup_oom_synchronize(false);
}
out:
mm_account_fault(mm, regs, address, flags, ret);
return ret;
}
EXPORT_SYMBOL_GPL(handle_mm_fault);
#ifndef __PAGETABLE_P4D_FOLDED
/*
* Allocate p4d page table.
* We've already handled the fast-path in-line.
*/
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
p4d_t *new = p4d_alloc_one(mm, address);
if (!new)
return -ENOMEM;
spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) { /* Another has populated it */
p4d_free(mm, new);
} else {
smp_wmb(); /* See comment in pmd_install() */
pgd_populate(mm, pgd, new);
}
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_P4D_FOLDED */
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
* We've already handled the fast-path in-line.
*/
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
{ pud_t *new = pud_alloc_one(mm, address); if (!new) return -ENOMEM;
spin_lock(&mm->page_table_lock);
if (!p4d_present(*p4d)) {
mm_inc_nr_puds(mm);
smp_wmb(); /* See comment in pmd_install() */
p4d_populate(mm, p4d, new);
} else /* Another has populated it */
pud_free(mm, new); spin_unlock(&mm->page_table_lock); return 0;}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
/*
* Allocate page middle directory.
* We've already handled the fast-path in-line.
*/
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
spinlock_t *ptl;
pmd_t *new = pmd_alloc_one(mm, address); if (!new) return -ENOMEM;
ptl = pud_lock(mm, pud);
if (!pud_present(*pud)) {
mm_inc_nr_pmds(mm);
smp_wmb(); /* See comment in pmd_install() */
pud_populate(mm, pud, new);
} else { /* Another has populated it */
pmd_free(mm, new);
}
spin_unlock(ptl); return 0;}
#endif /* __PAGETABLE_PMD_FOLDED */
static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
spinlock_t *lock, pte_t *ptep,
pgprot_t pgprot, unsigned long pfn_base,
unsigned long addr_mask, bool writable,
bool special)
{
args->lock = lock;
args->ptep = ptep;
args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
args->addr_mask = addr_mask;
args->pgprot = pgprot;
args->writable = writable;
args->special = special;
}
static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
{
#ifdef CONFIG_LOCKDEP
struct file *file = vma->vm_file;
struct address_space *mapping = file ? file->f_mapping : NULL;
if (mapping)
lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
lockdep_is_held(&vma->vm_mm->mmap_lock));
else
lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
#endif
}
/**
* follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
* @args: Pointer to struct @follow_pfnmap_args
*
* The caller needs to setup args->vma and args->address to point to the
* virtual address as the target of such lookup. On a successful return,
* the results will be put into other output fields.
*
* After the caller finished using the fields, the caller must invoke
* another follow_pfnmap_end() to proper releases the locks and resources
* of such look up request.
*
* During the start() and end() calls, the results in @args will be valid
* as proper locks will be held. After the end() is called, all the fields
* in @follow_pfnmap_args will be invalid to be further accessed. Further
* use of such information after end() may require proper synchronizations
* by the caller with page table updates, otherwise it can create a
* security bug.
*
* If the PTE maps a refcounted page, callers are responsible to protect
* against invalidation with MMU notifiers; otherwise access to the PFN at
* a later point in time can trigger use-after-free.
*
* Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
* should be taken for read, and the mmap semaphore cannot be released
* before the end() is invoked.
*
* This function must not be used to modify PTE content.
*
* Return: zero on success, negative otherwise.
*/
int follow_pfnmap_start(struct follow_pfnmap_args *args)
{
struct vm_area_struct *vma = args->vma;
unsigned long address = args->address;
struct mm_struct *mm = vma->vm_mm;
spinlock_t *lock;
pgd_t *pgdp;
p4d_t *p4dp, p4d;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep, pte;
pfnmap_lockdep_assert(vma);
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
goto out;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out;
retry:
pgdp = pgd_offset(mm, address);
if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
goto out;
p4dp = p4d_offset(pgdp, address);
p4d = READ_ONCE(*p4dp);
if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
goto out;
pudp = pud_offset(p4dp, address);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
goto out;
if (pud_leaf(pud)) {
lock = pud_lock(mm, pudp);
if (!unlikely(pud_leaf(pud))) {
spin_unlock(lock);
goto retry;
}
pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
pud_pfn(pud), PUD_MASK, pud_write(pud),
pud_special(pud));
return 0;
}
pmdp = pmd_offset(pudp, address);
pmd = pmdp_get_lockless(pmdp);
if (pmd_leaf(pmd)) {
lock = pmd_lock(mm, pmdp);
if (!unlikely(pmd_leaf(pmd))) {
spin_unlock(lock);
goto retry;
}
pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
pmd_special(pmd));
return 0;
}
ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
if (!ptep)
goto out;
pte = ptep_get(ptep);
if (!pte_present(pte))
goto unlock;
pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
pte_pfn(pte), PAGE_MASK, pte_write(pte),
pte_special(pte));
return 0;
unlock:
pte_unmap_unlock(ptep, lock);
out:
return -EINVAL;
}
EXPORT_SYMBOL_GPL(follow_pfnmap_start);
/**
* follow_pfnmap_end(): End a follow_pfnmap_start() process
* @args: Pointer to struct @follow_pfnmap_args
*
* Must be used in pair of follow_pfnmap_start(). See the start() function
* above for more information.
*/
void follow_pfnmap_end(struct follow_pfnmap_args *args)
{
if (args->lock)
spin_unlock(args->lock);
if (args->ptep)
pte_unmap(args->ptep);
}
EXPORT_SYMBOL_GPL(follow_pfnmap_end);
#ifdef CONFIG_HAVE_IOREMAP_PROT
/**
* generic_access_phys - generic implementation for iomem mmap access
* @vma: the vma to access
* @addr: userspace address, not relative offset within @vma
* @buf: buffer to read/write
* @len: length of transfer
* @write: set to FOLL_WRITE when writing, otherwise reading
*
* This is a generic implementation for &vm_operations_struct.access for an
* iomem mapping. This callback is used by access_process_vm() when the @vma is
* not page based.
*/
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
resource_size_t phys_addr;
pgprot_t prot = __pgprot(0);
void __iomem *maddr;
int offset = offset_in_page(addr);
int ret = -EINVAL;
bool writable;
struct follow_pfnmap_args args = { .vma = vma, .address = addr };
retry:
if (follow_pfnmap_start(&args))
return -EINVAL;
prot = args.pgprot;
phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
writable = args.writable;
follow_pfnmap_end(&args);
if ((write & FOLL_WRITE) && !writable)
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (!maddr)
return -ENOMEM;
if (follow_pfnmap_start(&args))
goto out_unmap;
if ((pgprot_val(prot) != pgprot_val(args.pgprot)) ||
(phys_addr != (args.pfn << PAGE_SHIFT)) ||
(writable != args.writable)) {
follow_pfnmap_end(&args);
iounmap(maddr);
goto retry;
}
if (write)
memcpy_toio(maddr + offset, buf, len);
else
memcpy_fromio(buf, maddr + offset, len);
ret = len;
follow_pfnmap_end(&args);
out_unmap:
iounmap(maddr);
return ret;
}
EXPORT_SYMBOL_GPL(generic_access_phys);
#endif
/*
* Access another process' address space as given in mm.
*/
static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags)
{
void *old_buf = buf;
int write = gup_flags & FOLL_WRITE;
if (mmap_read_lock_killable(mm))
return 0;
/* Untag the address before looking up the VMA */
addr = untagged_addr_remote(mm, addr);
/* Avoid triggering the temporary warning in __get_user_pages */
if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
return 0;
/* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, offset;
void *maddr;
struct folio *folio;
struct vm_area_struct *vma = NULL;
struct page *page = get_user_page_vma_remote(mm, addr,
gup_flags, &vma);
if (IS_ERR(page)) {
/* We might need to expand the stack to access it */
vma = vma_lookup(mm, addr);
if (!vma) {
vma = expand_stack(mm, addr);
/* mmap_lock was dropped on failure */
if (!vma)
return buf - old_buf;
/* Try again if stack expansion worked */
continue;
}
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
bytes = 0;
#ifdef CONFIG_HAVE_IOREMAP_PROT
if (vma->vm_ops && vma->vm_ops->access)
bytes = vma->vm_ops->access(vma, addr, buf,
len, write);
#endif
if (bytes <= 0)
break;
} else {
folio = page_folio(page);
bytes = len;
offset = addr & (PAGE_SIZE-1);
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
folio_mark_dirty_lock(folio);
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
folio_release_kmap(folio, maddr);
}
len -= bytes;
buf += bytes;
addr += bytes;
}
mmap_read_unlock(mm);
return buf - old_buf;
}
/**
* access_remote_vm - access another process' address space
* @mm: the mm_struct of the target address space
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
* @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*
* Return: number of bytes copied from source to destination.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags)
{
return __access_remote_vm(mm, addr, buf, len, gup_flags);
}
/*
* Access another process' address space.
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags)
{
struct mm_struct *mm;
int ret;
mm = get_task_mm(tsk);
if (!mm)
return 0;
ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
mmput(mm);
return ret;
}
EXPORT_SYMBOL_GPL(access_process_vm);
#ifdef CONFIG_BPF_SYSCALL
/*
* Copy a string from another process's address space as given in mm.
* If there is any error return -EFAULT.
*/
static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags)
{
void *old_buf = buf;
int err = 0;
*(char *)buf = '\0';
if (mmap_read_lock_killable(mm))
return -EFAULT;
addr = untagged_addr_remote(mm, addr);
/* Avoid triggering the temporary warning in __get_user_pages */
if (!vma_lookup(mm, addr)) {
err = -EFAULT;
goto out;
}
while (len) {
int bytes, offset, retval;
void *maddr;
struct folio *folio;
struct page *page;
struct vm_area_struct *vma = NULL;
page = get_user_page_vma_remote(mm, addr, gup_flags, &vma);
if (IS_ERR(page)) {
/*
* Treat as a total failure for now until we decide how
* to handle the CONFIG_HAVE_IOREMAP_PROT case and
* stack expansion.
*/
*(char *)buf = '\0';
err = -EFAULT;
goto out;
}
folio = page_folio(page);
bytes = len;
offset = addr & (PAGE_SIZE - 1);
if (bytes > PAGE_SIZE - offset)
bytes = PAGE_SIZE - offset;
maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE);
retval = strscpy(buf, maddr + offset, bytes);
if (retval >= 0) {
/* Found the end of the string */
buf += retval;
folio_release_kmap(folio, maddr);
break;
}
buf += bytes - 1;
/*
* Because strscpy always NUL terminates we need to
* copy the last byte in the page if we are going to
* load more pages
*/
if (bytes != len) {
addr += bytes - 1;
copy_from_user_page(vma, page, addr, buf, maddr + (PAGE_SIZE - 1), 1);
buf += 1;
addr += 1;
}
len -= bytes;
folio_release_kmap(folio, maddr);
}
out:
mmap_read_unlock(mm);
if (err)
return err;
return buf - old_buf;
}
/**
* copy_remote_vm_str - copy a string from another process's address space.
* @tsk: the task of the target address space
* @addr: start address to read from
* @buf: destination buffer
* @len: number of bytes to copy
* @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*
* Return: number of bytes copied from @addr (source) to @buf (destination);
* not including the trailing NUL. Always guaranteed to leave NUL-terminated
* buffer. On any error, return -EFAULT.
*/
int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags)
{
struct mm_struct *mm;
int ret;
if (unlikely(len == 0))
return 0;
mm = get_task_mm(tsk);
if (!mm) {
*(char *)buf = '\0';
return -EFAULT;
}
ret = __copy_remote_vm_str(mm, addr, buf, len, gup_flags);
mmput(mm);
return ret;
}
EXPORT_SYMBOL_GPL(copy_remote_vm_str);
#endif /* CONFIG_BPF_SYSCALL */
/*
* Print the name of a VMA.
*/
void print_vma_addr(char *prefix, unsigned long ip)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
/*
* we might be running from an atomic context so we cannot sleep
*/
if (!mmap_read_trylock(mm))
return;
vma = vma_lookup(mm, ip);
if (vma && vma->vm_file) {
struct file *f = vma->vm_file;
ip -= vma->vm_start;
ip += vma->vm_pgoff << PAGE_SHIFT;
printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
vma->vm_start,
vma->vm_end - vma->vm_start);
}
mmap_read_unlock(mm);
}
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void __might_fault(const char *file, int line)
{
if (pagefault_disabled())
return;
__might_sleep(file, line);
if (current->mm) might_lock_read(¤t->mm->mmap_lock);}
EXPORT_SYMBOL(__might_fault);
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
/*
* Process all subpages of the specified huge page with the specified
* operation. The target subpage will be processed last to keep its
* cache lines hot.
*/
static inline int process_huge_page(
unsigned long addr_hint, unsigned int nr_pages,
int (*process_subpage)(unsigned long addr, int idx, void *arg),
void *arg)
{
int i, n, base, l, ret;
unsigned long addr = addr_hint &
~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
/* Process target subpage last to keep its cache lines hot */
might_sleep();
n = (addr_hint - addr) / PAGE_SIZE;
if (2 * n <= nr_pages) {
/* If target subpage in first half of huge page */
base = 0;
l = n;
/* Process subpages at the end of huge page */
for (i = nr_pages - 1; i >= 2 * n; i--) {
cond_resched();
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
if (ret)
return ret;
}
} else {
/* If target subpage in second half of huge page */
base = nr_pages - 2 * (nr_pages - n);
l = nr_pages - n;
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
if (ret)
return ret;
}
}
/*
* Process remaining subpages in left-right-left-right pattern
* towards the target subpage
*/
for (i = 0; i < l; i++) {
int left_idx = base + i;
int right_idx = base + 2 * l - 1 - i;
cond_resched();
ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
if (ret)
return ret;
cond_resched();
ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
if (ret)
return ret;
}
return 0;
}
static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
unsigned int nr_pages)
{
unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
int i;
might_sleep();
for (i = 0; i < nr_pages; i++) {
cond_resched();
clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
}
}
static int clear_subpage(unsigned long addr, int idx, void *arg)
{
struct folio *folio = arg;
clear_user_highpage(folio_page(folio, idx), addr);
return 0;
}
/**
* folio_zero_user - Zero a folio which will be mapped to userspace.
* @folio: The folio to zero.
* @addr_hint: The address will be accessed or the base address if uncelar.
*/
void folio_zero_user(struct folio *folio, unsigned long addr_hint)
{
unsigned int nr_pages = folio_nr_pages(folio);
if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
clear_gigantic_page(folio, addr_hint, nr_pages);
else
process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma,
unsigned int nr_pages)
{
unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
struct page *dst_page;
struct page *src_page;
int i;
for (i = 0; i < nr_pages; i++) {
dst_page = folio_page(dst, i);
src_page = folio_page(src, i);
cond_resched();
if (copy_mc_user_highpage(dst_page, src_page,
addr + i*PAGE_SIZE, vma))
return -EHWPOISON;
}
return 0;
}
struct copy_subpage_arg {
struct folio *dst;
struct folio *src;
struct vm_area_struct *vma;
};
static int copy_subpage(unsigned long addr, int idx, void *arg)
{
struct copy_subpage_arg *copy_arg = arg;
struct page *dst = folio_page(copy_arg->dst, idx);
struct page *src = folio_page(copy_arg->src, idx);
if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
return -EHWPOISON;
return 0;
}
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint, struct vm_area_struct *vma)
{
unsigned int nr_pages = folio_nr_pages(dst);
struct copy_subpage_arg arg = {
.dst = dst,
.src = src,
.vma = vma,
};
if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
}
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
bool allow_pagefault)
{
void *kaddr;
unsigned long i, rc = 0;
unsigned int nr_pages = folio_nr_pages(dst_folio);
unsigned long ret_val = nr_pages * PAGE_SIZE;
struct page *subpage;
for (i = 0; i < nr_pages; i++) {
subpage = folio_page(dst_folio, i);
kaddr = kmap_local_page(subpage);
if (!allow_pagefault)
pagefault_disable();
rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
if (!allow_pagefault)
pagefault_enable();
kunmap_local(kaddr);
ret_val -= (PAGE_SIZE - rc);
if (rc)
break;
flush_dcache_page(subpage);
cond_resched();
}
return ret_val;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
static struct kmem_cache *page_ptl_cachep;
void __init ptlock_cache_init(void)
{
page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
SLAB_PANIC, NULL);
}
bool ptlock_alloc(struct ptdesc *ptdesc)
{
spinlock_t *ptl;
ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
if (!ptl)
return false;
ptdesc->ptl = ptl; return true;
}
void ptlock_free(struct ptdesc *ptdesc)
{
if (ptdesc->ptl)
kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
}
#endif
void vma_pgtable_walk_begin(struct vm_area_struct *vma)
{
if (is_vm_hugetlb_page(vma))
hugetlb_vma_lock_read(vma);
}
void vma_pgtable_walk_end(struct vm_area_struct *vma)
{
if (is_vm_hugetlb_page(vma))
hugetlb_vma_unlock_read(vma);
}
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _LINUX_XARRAY_H
#define _LINUX_XARRAY_H
/*
* eXtensible Arrays
* Copyright (c) 2017 Microsoft Corporation
* Author: Matthew Wilcox <willy@infradead.org>
*
* See Documentation/core-api/xarray.rst for how to use the XArray.
*/
#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/kconfig.h>
#include <linux/limits.h>
#include <linux/lockdep.h>
#include <linux/rcupdate.h>
#include <linux/sched/mm.h>
#include <linux/spinlock.h>
#include <linux/types.h>
struct list_lru;
/*
* The bottom two bits of the entry determine how the XArray interprets
* the contents:
*
* 00: Pointer entry
* 10: Internal entry
* x1: Value entry or tagged pointer
*
* Attempting to store internal entries in the XArray is a bug.
*
* Most internal entries are pointers to the next node in the tree.
* The following internal entries have a special meaning:
*
* 0-62: Sibling entries
* 256: Retry entry
* 257: Zero entry
*
* Errors are also represented as internal entries, but use the negative
* space (-4094 to -2). They're never stored in the slots array; only
* returned by the normal API.
*/
#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
/**
* xa_mk_value() - Create an XArray entry from an integer.
* @v: Value to store in XArray.
*
* Context: Any context.
* Return: An entry suitable for storing in the XArray.
*/
static inline void *xa_mk_value(unsigned long v)
{
WARN_ON((long)v < 0);
return (void *)((v << 1) | 1);
}
/**
* xa_to_value() - Get value stored in an XArray entry.
* @entry: XArray entry.
*
* Context: Any context.
* Return: The value stored in the XArray entry.
*/
static inline unsigned long xa_to_value(const void *entry)
{
return (unsigned long)entry >> 1;
}
/**
* xa_is_value() - Determine if an entry is a value.
* @entry: XArray entry.
*
* Context: Any context.
* Return: True if the entry is a value, false if it is a pointer.
*/
static inline bool xa_is_value(const void *entry)
{
return (unsigned long)entry & 1;
}
/**
* xa_tag_pointer() - Create an XArray entry for a tagged pointer.
* @p: Plain pointer.
* @tag: Tag value (0, 1 or 3).
*
* If the user of the XArray prefers, they can tag their pointers instead
* of storing value entries. Three tags are available (0, 1 and 3).
* These are distinct from the xa_mark_t as they are not replicated up
* through the array and cannot be searched for.
*
* Context: Any context.
* Return: An XArray entry.
*/
static inline void *xa_tag_pointer(void *p, unsigned long tag)
{
return (void *)((unsigned long)p | tag);
}
/**
* xa_untag_pointer() - Turn an XArray entry into a plain pointer.
* @entry: XArray entry.
*
* If you have stored a tagged pointer in the XArray, call this function
* to get the untagged version of the pointer.
*
* Context: Any context.
* Return: A pointer.
*/
static inline void *xa_untag_pointer(void *entry)
{
return (void *)((unsigned long)entry & ~3UL);
}
/**
* xa_pointer_tag() - Get the tag stored in an XArray entry.
* @entry: XArray entry.
*
* If you have stored a tagged pointer in the XArray, call this function
* to get the tag of that pointer.
*
* Context: Any context.
* Return: A tag.
*/
static inline unsigned int xa_pointer_tag(void *entry)
{
return (unsigned long)entry & 3UL;
}
/*
* xa_mk_internal() - Create an internal entry.
* @v: Value to turn into an internal entry.
*
* Internal entries are used for a number of purposes. Entries 0-255 are
* used for sibling entries (only 0-62 are used by the current code). 256
* is used for the retry entry. 257 is used for the reserved / zero entry.
* Negative internal entries are used to represent errnos. Node pointers
* are also tagged as internal entries in some situations.
*
* Context: Any context.
* Return: An XArray internal entry corresponding to this value.
*/
static inline void *xa_mk_internal(unsigned long v)
{
return (void *)((v << 2) | 2);
}
/*
* xa_to_internal() - Extract the value from an internal entry.
* @entry: XArray entry.
*
* Context: Any context.
* Return: The value which was stored in the internal entry.
*/
static inline unsigned long xa_to_internal(const void *entry)
{
return (unsigned long)entry >> 2;
}
/*
* xa_is_internal() - Is the entry an internal entry?
* @entry: XArray entry.
*
* Context: Any context.
* Return: %true if the entry is an internal entry.
*/
static inline bool xa_is_internal(const void *entry)
{
return ((unsigned long)entry & 3) == 2;
}
#define XA_ZERO_ENTRY xa_mk_internal(257)
/**
* xa_is_zero() - Is the entry a zero entry?
* @entry: Entry retrieved from the XArray
*
* The normal API will return NULL as the contents of a slot containing
* a zero entry. You can only see zero entries by using the advanced API.
*
* Return: %true if the entry is a zero entry.
*/
static inline bool xa_is_zero(const void *entry)
{
return unlikely(entry == XA_ZERO_ENTRY);
}
/**
* xa_is_err() - Report whether an XArray operation returned an error
* @entry: Result from calling an XArray function
*
* If an XArray operation cannot complete an operation, it will return
* a special value indicating an error. This function tells you
* whether an error occurred; xa_err() tells you which error occurred.
*
* Context: Any context.
* Return: %true if the entry indicates an error.
*/
static inline bool xa_is_err(const void *entry)
{
return unlikely(xa_is_internal(entry) &&
entry >= xa_mk_internal(-MAX_ERRNO));
}
/**
* xa_err() - Turn an XArray result into an errno.
* @entry: Result from calling an XArray function.
*
* If an XArray operation cannot complete an operation, it will return
* a special pointer value which encodes an errno. This function extracts
* the errno from the pointer value, or returns 0 if the pointer does not
* represent an errno.
*
* Context: Any context.
* Return: A negative errno or 0.
*/
static inline int xa_err(void *entry)
{
/* xa_to_internal() would not do sign extension. */
if (xa_is_err(entry)) return (long)entry >> 2; return 0;
}
/**
* struct xa_limit - Represents a range of IDs.
* @min: The lowest ID to allocate (inclusive).
* @max: The maximum ID to allocate (inclusive).
*
* This structure is used either directly or via the XA_LIMIT() macro
* to communicate the range of IDs that are valid for allocation.
* Three common ranges are predefined for you:
* * xa_limit_32b - [0 - UINT_MAX]
* * xa_limit_31b - [0 - INT_MAX]
* * xa_limit_16b - [0 - USHRT_MAX]
*/
struct xa_limit {
u32 max;
u32 min;
};
#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max }
#define xa_limit_32b XA_LIMIT(0, UINT_MAX)
#define xa_limit_31b XA_LIMIT(0, INT_MAX)
#define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
typedef unsigned __bitwise xa_mark_t;
#define XA_MARK_0 ((__force xa_mark_t)0U)
#define XA_MARK_1 ((__force xa_mark_t)1U)
#define XA_MARK_2 ((__force xa_mark_t)2U)
#define XA_PRESENT ((__force xa_mark_t)8U)
#define XA_MARK_MAX XA_MARK_2
#define XA_FREE_MARK XA_MARK_0
enum xa_lock_type {
XA_LOCK_IRQ = 1,
XA_LOCK_BH = 2,
};
/*
* Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
* and we remain compatible with that.
*/
#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
#define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
(__force unsigned)(mark)))
/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
/**
* struct xarray - The anchor of the XArray.
* @xa_lock: Lock that protects the contents of the XArray.
*
* To use the xarray, define it statically or embed it in your data structure.
* It is a very small data structure, so it does not usually make sense to
* allocate it separately and keep a pointer to it in your data structure.
*
* You may use the xa_lock to protect your own data structures as well.
*/
/*
* If all of the entries in the array are NULL, @xa_head is a NULL pointer.
* If the only non-NULL entry in the array is at index 0, @xa_head is that
* entry. If any other entry in the array is non-NULL, @xa_head points
* to an @xa_node.
*/
struct xarray {
spinlock_t xa_lock;
/* private: The rest of the data structure is not to be used directly. */
gfp_t xa_flags;
void __rcu * xa_head;
};
#define XARRAY_INIT(name, flags) { \
.xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
.xa_flags = flags, \
.xa_head = NULL, \
}
/**
* DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
* @name: A string that names your XArray.
* @flags: XA_FLAG values.
*
* This is intended for file scope definitions of XArrays. It declares
* and initialises an empty XArray with the chosen name and flags. It is
* equivalent to calling xa_init_flags() on the array, but it does the
* initialisation at compiletime instead of runtime.
*/
#define DEFINE_XARRAY_FLAGS(name, flags) \
struct xarray name = XARRAY_INIT(name, flags)
/**
* DEFINE_XARRAY() - Define an XArray.
* @name: A string that names your XArray.
*
* This is intended for file scope definitions of XArrays. It declares
* and initialises an empty XArray with the chosen name. It is equivalent
* to calling xa_init() on the array, but it does the initialisation at
* compiletime instead of runtime.
*/
#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
/**
* DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
* @name: A string that names your XArray.
*
* This is intended for file scope definitions of allocating XArrays.
* See also DEFINE_XARRAY().
*/
#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
/**
* DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
* @name: A string that names your XArray.
*
* This is intended for file scope definitions of allocating XArrays.
* See also DEFINE_XARRAY().
*/
#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_erase(struct xarray *, unsigned long index);
void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
void *entry, gfp_t);
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
void *xa_find(struct xarray *xa, unsigned long *index,
unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
void *xa_find_after(struct xarray *xa, unsigned long *index,
unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
unsigned long max, unsigned int n, xa_mark_t);
void xa_destroy(struct xarray *);
/**
* xa_init_flags() - Initialise an empty XArray with flags.
* @xa: XArray.
* @flags: XA_FLAG values.
*
* If you need to initialise an XArray with special flags (eg you need
* to take the lock from interrupt context), use this function instead
* of xa_init().
*
* Context: Any context.
*/
static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
{
spin_lock_init(&xa->xa_lock);
xa->xa_flags = flags;
xa->xa_head = NULL;
}
/**
* xa_init() - Initialise an empty XArray.
* @xa: XArray.
*
* An empty XArray is full of NULL entries.
*
* Context: Any context.
*/
static inline void xa_init(struct xarray *xa)
{
xa_init_flags(xa, 0);
}
/**
* xa_empty() - Determine if an array has any present entries.
* @xa: XArray.
*
* Context: Any context.
* Return: %true if the array contains only NULL pointers.
*/
static inline bool xa_empty(const struct xarray *xa)
{
return xa->xa_head == NULL;
}
/**
* xa_marked() - Inquire whether any entry in this array has a mark set
* @xa: Array
* @mark: Mark value
*
* Context: Any context.
* Return: %true if any entry has this mark set.
*/
static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
{
return xa->xa_flags & XA_FLAGS_MARK(mark);
}
/**
* xa_for_each_range() - Iterate over a portion of an XArray.
* @xa: XArray.
* @index: Index of @entry.
* @entry: Entry retrieved from array.
* @start: First index to retrieve from array.
* @last: Last index to retrieve from array.
*
* During the iteration, @entry will have the value of the entry stored
* in @xa at @index. You may modify @index during the iteration if you
* want to skip or reprocess indices. It is safe to modify the array
* during the iteration. At the end of the iteration, @entry will be set
* to NULL and @index will have a value less than or equal to max.
*
* xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have
* to handle your own locking with xas_for_each(), and if you have to unlock
* after each iteration, it will also end up being O(n.log(n)).
* xa_for_each_range() will spin if it hits a retry entry; if you intend to
* see retry entries, you should use the xas_for_each() iterator instead.
* The xas_for_each() iterator will expand into more inline code than
* xa_for_each_range().
*
* Context: Any context. Takes and releases the RCU lock.
*/
#define xa_for_each_range(xa, index, entry, start, last) \
for (index = start, \
entry = xa_find(xa, &index, last, XA_PRESENT); \
entry; \
entry = xa_find_after(xa, &index, last, XA_PRESENT))
/**
* xa_for_each_start() - Iterate over a portion of an XArray.
* @xa: XArray.
* @index: Index of @entry.
* @entry: Entry retrieved from array.
* @start: First index to retrieve from array.
*
* During the iteration, @entry will have the value of the entry stored
* in @xa at @index. You may modify @index during the iteration if you
* want to skip or reprocess indices. It is safe to modify the array
* during the iteration. At the end of the iteration, @entry will be set
* to NULL and @index will have a value less than or equal to max.
*
* xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
* to handle your own locking with xas_for_each(), and if you have to unlock
* after each iteration, it will also end up being O(n.log(n)).
* xa_for_each_start() will spin if it hits a retry entry; if you intend to
* see retry entries, you should use the xas_for_each() iterator instead.
* The xas_for_each() iterator will expand into more inline code than
* xa_for_each_start().
*
* Context: Any context. Takes and releases the RCU lock.
*/
#define xa_for_each_start(xa, index, entry, start) \
xa_for_each_range(xa, index, entry, start, ULONG_MAX)
/**
* xa_for_each() - Iterate over present entries in an XArray.
* @xa: XArray.
* @index: Index of @entry.
* @entry: Entry retrieved from array.
*
* During the iteration, @entry will have the value of the entry stored
* in @xa at @index. You may modify @index during the iteration if you want
* to skip or reprocess indices. It is safe to modify the array during the
* iteration. At the end of the iteration, @entry will be set to NULL and
* @index will have a value less than or equal to max.
*
* xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
* to handle your own locking with xas_for_each(), and if you have to unlock
* after each iteration, it will also end up being O(n.log(n)). xa_for_each()
* will spin if it hits a retry entry; if you intend to see retry entries,
* you should use the xas_for_each() iterator instead. The xas_for_each()
* iterator will expand into more inline code than xa_for_each().
*
* Context: Any context. Takes and releases the RCU lock.
*/
#define xa_for_each(xa, index, entry) \
xa_for_each_start(xa, index, entry, 0)
/**
* xa_for_each_marked() - Iterate over marked entries in an XArray.
* @xa: XArray.
* @index: Index of @entry.
* @entry: Entry retrieved from array.
* @filter: Selection criterion.
*
* During the iteration, @entry will have the value of the entry stored
* in @xa at @index. The iteration will skip all entries in the array
* which do not match @filter. You may modify @index during the iteration
* if you want to skip or reprocess indices. It is safe to modify the array
* during the iteration. At the end of the iteration, @entry will be set to
* NULL and @index will have a value less than or equal to max.
*
* xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
* You have to handle your own locking with xas_for_each(), and if you have
* to unlock after each iteration, it will also end up being O(n.log(n)).
* xa_for_each_marked() will spin if it hits a retry entry; if you intend to
* see retry entries, you should use the xas_for_each_marked() iterator
* instead. The xas_for_each_marked() iterator will expand into more inline
* code than xa_for_each_marked().
*
* Context: Any context. Takes and releases the RCU lock.
*/
#define xa_for_each_marked(xa, index, entry, filter) \
for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
#define xa_lock_irqsave(xa, flags) \
spin_lock_irqsave(&(xa)->xa_lock, flags)
#define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
#define xa_lock_nested(xa, subclass) \
spin_lock_nested(&(xa)->xa_lock, subclass)
#define xa_lock_bh_nested(xa, subclass) \
spin_lock_bh_nested(&(xa)->xa_lock, subclass)
#define xa_lock_irq_nested(xa, subclass) \
spin_lock_irq_nested(&(xa)->xa_lock, subclass)
#define xa_lock_irqsave_nested(xa, flags, subclass) \
spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
/*
* Versions of the normal API which require the caller to hold the
* xa_lock. If the GFP flags allow it, they will drop the lock to
* allocate memory, then reacquire it afterwards. These functions
* may also re-enable interrupts if the XArray flags indicate the
* locking should be interrupt safe.
*/
void *__xa_erase(struct xarray *, unsigned long index);
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
int __must_check __xa_insert(struct xarray *, unsigned long index,
void *entry, gfp_t);
int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
struct xa_limit, gfp_t);
int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
struct xa_limit, u32 *next, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
/**
* xa_store_bh() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* This function is like calling xa_store() except it disables softirqs
* while holding the array lock.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;
might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_bh(xa);
return curr;
}
/**
* xa_store_irq() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* This function is like calling xa_store() except it disables interrupts
* while holding the array lock.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
* Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;
might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_irq(xa);
return curr;
}
/**
* xa_erase_bh() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
*
* After this function returns, loading from @index will return %NULL.
* If the index is part of a multi-index entry, all indices will be erased
* and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: The entry which used to be at this index.
*/
static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
{
void *entry;
xa_lock_bh(xa);
entry = __xa_erase(xa, index);
xa_unlock_bh(xa);
return entry;
}
/**
* xa_erase_irq() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
*
* After this function returns, loading from @index will return %NULL.
* If the index is part of a multi-index entry, all indices will be erased
* and none of the entries will be part of a multi-index entry.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
* Return: The entry which used to be at this index.
*/
static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
{
void *entry;
xa_lock_irq(xa);
entry = __xa_erase(xa, index);
xa_unlock_irq(xa);
return entry;
}
/**
* xa_cmpxchg() - Conditionally replace an entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
* @entry: New value to place in array.
* @gfp: Memory allocation flags.
*
* If the entry at @index is the same as @old, replace it with @entry.
* If the return value is equal to @old, then the exchange was successful.
*
* Context: Any context. Takes and releases the xa_lock. May sleep
* if the @gfp flags permit.
* Return: The old value at this index or xa_err() if an error happened.
*/
static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
void *curr;
might_alloc(gfp);
xa_lock(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock(xa);
return curr;
}
/**
* xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
* @entry: New value to place in array.
* @gfp: Memory allocation flags.
*
* This function is like calling xa_cmpxchg() except it disables softirqs
* while holding the array lock.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: The old value at this index or xa_err() if an error happened.
*/
static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
void *curr;
might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_bh(xa);
return curr;
}
/**
* xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
* @entry: New value to place in array.
* @gfp: Memory allocation flags.
*
* This function is like calling xa_cmpxchg() except it disables interrupts
* while holding the array lock.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: The old value at this index or xa_err() if an error happened.
*/
static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
void *curr;
might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_irq(xa);
return curr;
}
/**
* xa_insert() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int __must_check xa_insert(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock(xa);
return err;
}
/**
* xa_insert_bh() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int __must_check xa_insert_bh(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_bh(xa);
return err;
}
/**
* xa_insert_irq() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int __must_check xa_insert_irq(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_irq(xa);
return err;
}
/**
* xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
* -EBUSY if there are no free entries in @limit.
*/
static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock(xa);
return err;
}
/**
* xa_alloc_bh() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
* -EBUSY if there are no free entries in @limit.
*/
static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_bh(xa);
return err;
}
/**
* xa_alloc_irq() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
* -EBUSY if there are no free entries in @limit.
*/
static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_irq(xa);
return err;
}
/**
* xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of allocated ID.
* @next: Pointer to next ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Note that callers interested in whether wrapping has occurred should
* use __xa_alloc_cyclic() instead.
*
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the allocation succeeded, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock(xa);
return err < 0 ? err : 0;
}
/**
* xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of allocated ID.
* @next: Pointer to next ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Note that callers interested in whether wrapping has occurred should
* use __xa_alloc_cyclic() instead.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_bh(xa);
return err < 0 ? err : 0;
}
/**
* xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of allocated ID.
* @next: Pointer to next ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Note that callers interested in whether wrapping has occurred should
* use __xa_alloc_cyclic() instead.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
int err;
might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_irq(xa);
return err < 0 ? err : 0;
}
/**
* xa_reserve() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
*
* Ensures there is somewhere to store an entry at @index in the array.
* If there is already something stored at @index, this function does
* nothing. If there was nothing there, the entry is marked as reserved.
* Loading from a reserved entry returns a %NULL pointer.
*
* If you do not use the entry that you have reserved, call xa_release()
* or xa_erase() to free any unnecessary memory.
*
* Context: Any context. Takes and releases the xa_lock.
* May sleep if the @gfp flags permit.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline __must_check
int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
}
/**
* xa_reserve_bh() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
*
* A softirq-disabling version of xa_reserve().
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline __must_check
int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
{
return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
}
/**
* xa_reserve_irq() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
*
* An interrupt-disabling version of xa_reserve().
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline __must_check
int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
{
return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
}
/**
* xa_release() - Release a reserved entry.
* @xa: XArray.
* @index: Index of entry.
*
* After calling xa_reserve(), you can call this function to release the
* reservation. If the entry at @index has been stored to, this function
* will do nothing.
*/
static inline void xa_release(struct xarray *xa, unsigned long index)
{
xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0);
}
/* Everything below here is the Advanced API. Proceed with caution. */
/*
* The xarray is constructed out of a set of 'chunks' of pointers. Choosing
* the best chunk size requires some tradeoffs. A power of two recommends
* itself so that we can walk the tree based purely on shifts and masks.
* Generally, the larger the better; as the number of slots per level of the
* tree increases, the less tall the tree needs to be. But that needs to be
* balanced against the memory consumption of each node. On a 64-bit system,
* xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
* doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
*/
#ifndef XA_CHUNK_SHIFT
#define XA_CHUNK_SHIFT (IS_ENABLED(CONFIG_BASE_SMALL) ? 4 : 6)
#endif
#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
#define XA_MAX_MARKS 3
#define XA_MARK_LONGS BITS_TO_LONGS(XA_CHUNK_SIZE)
/*
* @count is the count of every non-NULL element in the ->slots array
* whether that is a value entry, a retry entry, a user pointer,
* a sibling entry or a pointer to the next level of the tree.
* @nr_values is the count of every element in ->slots which is
* either a value entry or a sibling of a value entry.
*/
struct xa_node {
unsigned char shift; /* Bits remaining in each slot */
unsigned char offset; /* Slot offset in parent */
unsigned char count; /* Total entry count */
unsigned char nr_values; /* Value entry count */
struct xa_node __rcu *parent; /* NULL at top of tree */
struct xarray *array; /* The array we belong to */
union {
struct list_head private_list; /* For tree user */
struct rcu_head rcu_head; /* Used when freeing node */
};
void __rcu *slots[XA_CHUNK_SIZE];
union {
unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
};
};
void xa_dump(const struct xarray *);
void xa_dump_node(const struct xa_node *);
#ifdef XA_DEBUG
#define XA_BUG_ON(xa, x) do { \
if (x) { \
xa_dump(xa); \
BUG(); \
} \
} while (0)
#define XA_NODE_BUG_ON(node, x) do { \
if (x) { \
if (node) xa_dump_node(node); \
BUG(); \
} \
} while (0)
#else
#define XA_BUG_ON(xa, x) do { } while (0)
#define XA_NODE_BUG_ON(node, x) do { } while (0)
#endif
/* Private */
static inline void *xa_head(const struct xarray *xa)
{
return rcu_dereference_check(xa->xa_head,
lockdep_is_held(&xa->xa_lock));
}
/* Private */
static inline void *xa_head_locked(const struct xarray *xa)
{
return rcu_dereference_protected(xa->xa_head,
lockdep_is_held(&xa->xa_lock));
}
/* Private */
static inline void *xa_entry(const struct xarray *xa,
const struct xa_node *node, unsigned int offset)
{
XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
return rcu_dereference_check(node->slots[offset],
lockdep_is_held(&xa->xa_lock));
}
/* Private */
static inline void *xa_entry_locked(const struct xarray *xa,
const struct xa_node *node, unsigned int offset)
{
XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
return rcu_dereference_protected(node->slots[offset],
lockdep_is_held(&xa->xa_lock));
}
/* Private */
static inline struct xa_node *xa_parent(const struct xarray *xa,
const struct xa_node *node)
{
return rcu_dereference_check(node->parent,
lockdep_is_held(&xa->xa_lock));
}
/* Private */
static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
const struct xa_node *node)
{
return rcu_dereference_protected(node->parent,
lockdep_is_held(&xa->xa_lock));
}
/* Private */
static inline void *xa_mk_node(const struct xa_node *node)
{
return (void *)((unsigned long)node | 2);
}
/* Private */
static inline struct xa_node *xa_to_node(const void *entry)
{
return (struct xa_node *)((unsigned long)entry - 2);
}
/* Private */
static inline bool xa_is_node(const void *entry)
{
return xa_is_internal(entry) && (unsigned long)entry > 4096;
}
/* Private */
static inline void *xa_mk_sibling(unsigned int offset)
{
return xa_mk_internal(offset);
}
/* Private */
static inline unsigned long xa_to_sibling(const void *entry)
{
return xa_to_internal(entry);
}
/**
* xa_is_sibling() - Is the entry a sibling entry?
* @entry: Entry retrieved from the XArray
*
* Return: %true if the entry is a sibling entry.
*/
static inline bool xa_is_sibling(const void *entry)
{
return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
}
#define XA_RETRY_ENTRY xa_mk_internal(256)
/**
* xa_is_retry() - Is the entry a retry entry?
* @entry: Entry retrieved from the XArray
*
* Return: %true if the entry is a retry entry.
*/
static inline bool xa_is_retry(const void *entry)
{
return unlikely(entry == XA_RETRY_ENTRY);
}
/**
* xa_is_advanced() - Is the entry only permitted for the advanced API?
* @entry: Entry to be stored in the XArray.
*
* Return: %true if the entry cannot be stored by the normal API.
*/
static inline bool xa_is_advanced(const void *entry)
{
return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
}
/**
* typedef xa_update_node_t - A callback function from the XArray.
* @node: The node which is being processed
*
* This function is called every time the XArray updates the count of
* present and value entries in a node. It allows advanced users to
* maintain the private_list in the node.
*
* Context: The xa_lock is held and interrupts may be disabled.
* Implementations should not drop the xa_lock, nor re-enable
* interrupts.
*/
typedef void (*xa_update_node_t)(struct xa_node *node);
void xa_delete_node(struct xa_node *, xa_update_node_t);
/*
* The xa_state is opaque to its users. It contains various different pieces
* of state involved in the current operation on the XArray. It should be
* declared on the stack and passed between the various internal routines.
* The various elements in it should not be accessed directly, but only
* through the provided accessor functions. The below documentation is for
* the benefit of those working on the code, not for users of the XArray.
*
* @xa_node usually points to the xa_node containing the slot we're operating
* on (and @xa_offset is the offset in the slots array). If there is a
* single entry in the array at index 0, there are no allocated xa_nodes to
* point to, and so we store %NULL in @xa_node. @xa_node is set to
* the value %XAS_RESTART if the xa_state is not walked to the correct
* position in the tree of nodes for this operation. If an error occurs
* during an operation, it is set to an %XAS_ERROR value. If we run off the
* end of the allocated nodes, it is set to %XAS_BOUNDS.
*/
struct xa_state {
struct xarray *xa;
unsigned long xa_index;
unsigned char xa_shift;
unsigned char xa_sibs;
unsigned char xa_offset;
unsigned char xa_pad; /* Helps gcc generate better code */
struct xa_node *xa_node;
struct xa_node *xa_alloc;
xa_update_node_t xa_update;
struct list_lru *xa_lru;
};
/*
* We encode errnos in the xas->xa_node. If an error has happened, we need to
* drop the lock to fix it, and once we've done so the xa_state is invalid.
*/
#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
#define XAS_BOUNDS ((struct xa_node *)1UL)
#define XAS_RESTART ((struct xa_node *)3UL)
#define __XA_STATE(array, index, shift, sibs) { \
.xa = array, \
.xa_index = index, \
.xa_shift = shift, \
.xa_sibs = sibs, \
.xa_offset = 0, \
.xa_pad = 0, \
.xa_node = XAS_RESTART, \
.xa_alloc = NULL, \
.xa_update = NULL, \
.xa_lru = NULL, \
}
/**
* XA_STATE() - Declare an XArray operation state.
* @name: Name of this operation state (usually xas).
* @array: Array to operate on.
* @index: Initial index of interest.
*
* Declare and initialise an xa_state on the stack.
*/
#define XA_STATE(name, array, index) \
struct xa_state name = __XA_STATE(array, index, 0, 0)
/**
* XA_STATE_ORDER() - Declare an XArray operation state.
* @name: Name of this operation state (usually xas).
* @array: Array to operate on.
* @index: Initial index of interest.
* @order: Order of entry.
*
* Declare and initialise an xa_state on the stack. This variant of
* XA_STATE() allows you to specify the 'order' of the element you
* want to operate on.`
*/
#define XA_STATE_ORDER(name, array, index, order) \
struct xa_state name = __XA_STATE(array, \
(index >> order) << order, \
order - (order % XA_CHUNK_SHIFT), \
(1U << (order % XA_CHUNK_SHIFT)) - 1)
#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
#define xas_trylock(xas) xa_trylock((xas)->xa)
#define xas_lock(xas) xa_lock((xas)->xa)
#define xas_unlock(xas) xa_unlock((xas)->xa)
#define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
#define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
#define xas_lock_irqsave(xas, flags) \
xa_lock_irqsave((xas)->xa, flags)
#define xas_unlock_irqrestore(xas, flags) \
xa_unlock_irqrestore((xas)->xa, flags)
/**
* xas_error() - Return an errno stored in the xa_state.
* @xas: XArray operation state.
*
* Return: 0 if no error has been noted. A negative errno if one has.
*/
static inline int xas_error(const struct xa_state *xas)
{
return xa_err(xas->xa_node);
}
/**
* xas_set_err() - Note an error in the xa_state.
* @xas: XArray operation state.
* @err: Negative error number.
*
* Only call this function with a negative @err; zero or positive errors
* will probably not behave the way you think they should. If you want
* to clear the error from an xa_state, use xas_reset().
*/
static inline void xas_set_err(struct xa_state *xas, long err)
{
xas->xa_node = XA_ERROR(err);}
/**
* xas_invalid() - Is the xas in a retry or error state?
* @xas: XArray operation state.
*
* Return: %true if the xas cannot be used for operations.
*/
static inline bool xas_invalid(const struct xa_state *xas)
{
return (unsigned long)xas->xa_node & 3;
}
/**
* xas_valid() - Is the xas a valid cursor into the array?
* @xas: XArray operation state.
*
* Return: %true if the xas can be used for operations.
*/
static inline bool xas_valid(const struct xa_state *xas)
{
return !xas_invalid(xas);
}
/**
* xas_is_node() - Does the xas point to a node?
* @xas: XArray operation state.
*
* Return: %true if the xas currently references a node.
*/
static inline bool xas_is_node(const struct xa_state *xas)
{
return xas_valid(xas) && xas->xa_node;
}
/* True if the pointer is something other than a node */
static inline bool xas_not_node(struct xa_node *node)
{
return ((unsigned long)node & 3) || !node;
}
/* True if the node represents RESTART or an error */
static inline bool xas_frozen(struct xa_node *node)
{
return (unsigned long)node & 2;
}
/* True if the node represents head-of-tree, RESTART or BOUNDS */
static inline bool xas_top(struct xa_node *node)
{
return node <= XAS_RESTART;
}
/**
* xas_reset() - Reset an XArray operation state.
* @xas: XArray operation state.
*
* Resets the error or walk state of the @xas so future walks of the
* array will start from the root. Use this if you have dropped the
* xarray lock and want to reuse the xa_state.
*
* Context: Any context.
*/
static inline void xas_reset(struct xa_state *xas)
{
xas->xa_node = XAS_RESTART;
}
/**
* xas_retry() - Retry the operation if appropriate.
* @xas: XArray operation state.
* @entry: Entry from xarray.
*
* The advanced functions may sometimes return an internal entry, such as
* a retry entry or a zero entry. This function sets up the @xas to restart
* the walk from the head of the array if needed.
*
* Context: Any context.
* Return: true if the operation needs to be retried.
*/
static inline bool xas_retry(struct xa_state *xas, const void *entry)
{
if (xa_is_zero(entry))
return true;
if (!xa_is_retry(entry))
return false;
xas_reset(xas);
return true;
}
void *xas_load(struct xa_state *);
void *xas_store(struct xa_state *, void *entry);
void *xas_find(struct xa_state *, unsigned long max);
void *xas_find_conflict(struct xa_state *);
bool xas_get_mark(const struct xa_state *, xa_mark_t);
void xas_set_mark(const struct xa_state *, xa_mark_t);
void xas_clear_mark(const struct xa_state *, xa_mark_t);
void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
void xas_init_marks(const struct xa_state *);
bool xas_nomem(struct xa_state *, gfp_t);
void xas_destroy(struct xa_state *);
void xas_pause(struct xa_state *);
void xas_create_range(struct xa_state *);
#ifdef CONFIG_XARRAY_MULTI
int xa_get_order(struct xarray *, unsigned long index);
int xas_get_order(struct xa_state *xas);
void xas_split(struct xa_state *, void *entry, unsigned int order);
void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
unsigned int xas_try_split_min_order(unsigned int order);
#else
static inline int xa_get_order(struct xarray *xa, unsigned long index)
{
return 0;
}
static inline int xas_get_order(struct xa_state *xas)
{
return 0;
}
static inline void xas_split(struct xa_state *xas, void *entry,
unsigned int order)
{
xas_store(xas, entry);
}
static inline void xas_split_alloc(struct xa_state *xas, void *entry,
unsigned int order, gfp_t gfp)
{
}
static inline void xas_try_split(struct xa_state *xas, void *entry,
unsigned int order)
{
}
static inline unsigned int xas_try_split_min_order(unsigned int order)
{
return 0;
}
#endif
/**
* xas_reload() - Refetch an entry from the xarray.
* @xas: XArray operation state.
*
* Use this function to check that a previously loaded entry still has
* the same value. This is useful for the lockless pagecache lookup where
* we walk the array with only the RCU lock to protect us, lock the page,
* then check that the page hasn't moved since we looked it up.
*
* The caller guarantees that @xas is still valid. If it may be in an
* error or restart state, call xas_load() instead.
*
* Return: The entry at this location in the xarray.
*/
static inline void *xas_reload(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
void *entry;
char offset;
if (!node) return xa_head(xas->xa);
if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
entry = xa_entry(xas->xa, node, offset); if (!xa_is_sibling(entry))
return entry;
offset = xa_to_sibling(entry);
} else {
offset = xas->xa_offset;
}
return xa_entry(xas->xa, node, offset);
}
/**
* xas_set() - Set up XArray operation state for a different index.
* @xas: XArray operation state.
* @index: New index into the XArray.
*
* Move the operation state to refer to a different index. This will
* have the effect of starting a walk from the top; see xas_next()
* to move to an adjacent index.
*/
static inline void xas_set(struct xa_state *xas, unsigned long index)
{
xas->xa_index = index;
xas->xa_node = XAS_RESTART;
}
/**
* xas_advance() - Skip over sibling entries.
* @xas: XArray operation state.
* @index: Index of last sibling entry.
*
* Move the operation state to refer to the last sibling entry.
* This is useful for loops that normally want to see sibling
* entries but sometimes want to skip them. Use xas_set() if you
* want to move to an index which is not part of this entry.
*/
static inline void xas_advance(struct xa_state *xas, unsigned long index)
{
unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
xas->xa_index = index;
xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
}
/**
* xas_set_order() - Set up XArray operation state for a multislot entry.
* @xas: XArray operation state.
* @index: Target of the operation.
* @order: Entry occupies 2^@order indices.
*/
static inline void xas_set_order(struct xa_state *xas, unsigned long index,
unsigned int order)
{
#ifdef CONFIG_XARRAY_MULTI
xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
xas->xa_node = XAS_RESTART;
#else
BUG_ON(order > 0);
xas_set(xas, index);
#endif
}
/**
* xas_set_update() - Set up XArray operation state for a callback.
* @xas: XArray operation state.
* @update: Function to call when updating a node.
*
* The XArray can notify a caller after it has updated an xa_node.
* This is advanced functionality and is only needed by the page
* cache and swap cache.
*/
static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
{
xas->xa_update = update;
}
static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
{
xas->xa_lru = lru;
}
/**
* xas_next_entry() - Advance iterator to next present entry.
* @xas: XArray operation state.
* @max: Highest index to return.
*
* xas_next_entry() is an inline function to optimise xarray traversal for
* speed. It is equivalent to calling xas_find(), and will call xas_find()
* for all the hard cases.
*
* Return: The next present entry after the one currently referred to by @xas.
*/
static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
{
struct xa_node *node = xas->xa_node;
void *entry;
if (unlikely(xas_not_node(node) || node->shift ||
xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
return xas_find(xas, max);
do {
if (unlikely(xas->xa_index >= max))
return xas_find(xas, max);
if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
return xas_find(xas, max);
entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
if (unlikely(xa_is_internal(entry)))
return xas_find(xas, max);
xas->xa_offset++;
xas->xa_index++;
} while (!entry);
return entry;
}
/* Private */
static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
xa_mark_t mark)
{
unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
unsigned int offset = xas->xa_offset;
if (advance)
offset++;
if (XA_CHUNK_SIZE == BITS_PER_LONG) {
if (offset < XA_CHUNK_SIZE) { unsigned long data = *addr & (~0UL << offset);
if (data)
return __ffs(data);
}
return XA_CHUNK_SIZE;
}
return find_next_bit(addr, XA_CHUNK_SIZE, offset);
}
/**
* xas_next_marked() - Advance iterator to next marked entry.
* @xas: XArray operation state.
* @max: Highest index to return.
* @mark: Mark to search for.
*
* xas_next_marked() is an inline function to optimise xarray traversal for
* speed. It is equivalent to calling xas_find_marked(), and will call
* xas_find_marked() for all the hard cases.
*
* Return: The next marked entry after the one currently referred to by @xas.
*/
static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
void *entry;
unsigned int offset;
if (unlikely(xas_not_node(node) || node->shift))
return xas_find_marked(xas, max, mark);
offset = xas_find_chunk(xas, true, mark);
xas->xa_offset = offset;
xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
if (xas->xa_index > max)
return NULL;
if (offset == XA_CHUNK_SIZE)
return xas_find_marked(xas, max, mark);
entry = xa_entry(xas->xa, node, offset);
if (!entry)
return xas_find_marked(xas, max, mark);
return entry;
}
/*
* If iterating while holding a lock, drop the lock and reschedule
* every %XA_CHECK_SCHED loops.
*/
enum {
XA_CHECK_SCHED = 4096,
};
/**
* xas_for_each() - Iterate over a range of an XArray.
* @xas: XArray operation state.
* @entry: Entry retrieved from the array.
* @max: Maximum index to retrieve from array.
*
* The loop body will be executed for each entry present in the xarray
* between the current xas position and @max. @entry will be set to
* the entry retrieved from the xarray. It is safe to delete entries
* from the array in the loop body. You should hold either the RCU lock
* or the xa_lock while iterating. If you need to drop the lock, call
* xas_pause() first.
*/
#define xas_for_each(xas, entry, max) \
for (entry = xas_find(xas, max); entry; \
entry = xas_next_entry(xas, max))
/**
* xas_for_each_marked() - Iterate over a range of an XArray.
* @xas: XArray operation state.
* @entry: Entry retrieved from the array.
* @max: Maximum index to retrieve from array.
* @mark: Mark to search for.
*
* The loop body will be executed for each marked entry in the xarray
* between the current xas position and @max. @entry will be set to
* the entry retrieved from the xarray. It is safe to delete entries
* from the array in the loop body. You should hold either the RCU lock
* or the xa_lock while iterating. If you need to drop the lock, call
* xas_pause() first.
*/
#define xas_for_each_marked(xas, entry, max, mark) \
for (entry = xas_find_marked(xas, max, mark); entry; \
entry = xas_next_marked(xas, max, mark))
/**
* xas_for_each_conflict() - Iterate over a range of an XArray.
* @xas: XArray operation state.
* @entry: Entry retrieved from the array.
*
* The loop body will be executed for each entry in the XArray that
* lies within the range specified by @xas. If the loop terminates
* normally, @entry will be %NULL. The user may break out of the loop,
* which will leave @entry set to the conflicting entry. The caller
* may also call xa_set_err() to exit the loop while setting an error
* to record the reason.
*/
#define xas_for_each_conflict(xas, entry) \
while ((entry = xas_find_conflict(xas)))
void *__xas_next(struct xa_state *);
void *__xas_prev(struct xa_state *);
/**
* xas_prev() - Move iterator to previous index.
* @xas: XArray operation state.
*
* If the @xas was in an error state, it will remain in an error state
* and this function will return %NULL. If the @xas has never been walked,
* it will have the effect of calling xas_load(). Otherwise one will be
* subtracted from the index and the state will be walked to the correct
* location in the array for the next operation.
*
* If the iterator was referencing index 0, this function wraps
* around to %ULONG_MAX.
*
* Return: The entry at the new index. This may be %NULL or an internal
* entry.
*/
static inline void *xas_prev(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
if (unlikely(xas_not_node(node) || node->shift ||
xas->xa_offset == 0))
return __xas_prev(xas);
xas->xa_index--;
xas->xa_offset--;
return xa_entry(xas->xa, node, xas->xa_offset);
}
/**
* xas_next() - Move state to next index.
* @xas: XArray operation state.
*
* If the @xas was in an error state, it will remain in an error state
* and this function will return %NULL. If the @xas has never been walked,
* it will have the effect of calling xas_load(). Otherwise one will be
* added to the index and the state will be walked to the correct
* location in the array for the next operation.
*
* If the iterator was referencing index %ULONG_MAX, this function wraps
* around to 0.
*
* Return: The entry at the new index. This may be %NULL or an internal
* entry.
*/
static inline void *xas_next(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
if (unlikely(xas_not_node(node) || node->shift ||
xas->xa_offset == XA_CHUNK_MASK))
return __xas_next(xas);
xas->xa_index++;
xas->xa_offset++;
return xa_entry(xas->xa, node, xas->xa_offset);
}
#endif /* _LINUX_XARRAY_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_TASK_H
#define _LINUX_SCHED_TASK_H
/*
* Interface between the scheduler and various task lifetime (fork()/exit())
* functionality:
*/
#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
struct task_struct;
struct rusage;
union thread_union;
struct css_set;
/* All the bits taken by the old clone syscall. */
#define CLONE_LEGACY_FLAGS 0xffffffffULL
struct kernel_clone_args {
u64 flags;
int __user *pidfd;
int __user *child_tid;
int __user *parent_tid;
const char *name;
int exit_signal;
u32 kthread:1;
u32 io_thread:1;
u32 user_worker:1;
u32 no_files:1;
unsigned long stack;
unsigned long stack_size;
unsigned long tls;
pid_t *set_tid;
/* Number of elements in *set_tid */
size_t set_tid_size;
int cgroup;
int idle;
int (*fn)(void *);
void *fn_arg;
struct cgroup *cgrp;
struct css_set *cset;
unsigned int kill_seq;
};
/*
* This serializes "schedule()" and also protects
* the run-queue from deletions/modifications (but
* _adding_ to the beginning of the run-queue has
* a separate lock).
*/
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
extern union thread_union init_thread_union;
extern struct task_struct init_task;
extern int lockdep_tasklist_lock_is_held(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(u64 clone_flags, struct task_struct *p);
extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
extern void sched_cancel_fork(struct task_struct *p);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
void __noreturn make_task_dead(int signr);
extern void mm_cache_init(void);
extern void proc_caches_init(void);
extern void fork_init(void);
extern void release_task(struct task_struct * p);
extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
extern void flush_thread(void);
#ifdef CONFIG_HAVE_EXIT_THREAD
extern void exit_thread(struct task_struct *tsk);
#else
static inline void exit_thread(struct task_struct *tsk)
{
}
#endif
extern __noreturn void do_group_exit(int);
extern void exit_files(struct task_struct *);
extern void exit_itimers(struct task_struct *);
extern pid_t kernel_clone(struct kernel_clone_args *kargs);
struct task_struct *copy_process(struct pid *pid, int trace, int node,
struct kernel_clone_args *args);
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
unsigned long flags);
extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
int kernel_wait(pid_t pid, int *stat);
extern void free_task(struct task_struct *tsk);
/* sched_exec is called by processes performing an exec */
extern void sched_exec(void);
static inline struct task_struct *get_task_struct(struct task_struct *t)
{
refcount_inc(&t->usage); return t;
}
static inline struct task_struct *tryget_task_struct(struct task_struct *t)
{
return refcount_inc_not_zero(&t->usage) ? t : NULL;
}
extern void __put_task_struct(struct task_struct *t);
extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t)
{
if (!refcount_dec_and_test(&t->usage))
return;
/*
* Under PREEMPT_RT, we can't call __put_task_struct
* in atomic context because it will indirectly
* acquire sleeping locks. The same is true if the
* current process has a mutex enqueued (blocked on
* a PI chain).
*
* In !RT, it is always safe to call __put_task_struct().
* Though, in order to simplify the code, resort to the
* deferred call too.
*
* call_rcu() will schedule __put_task_struct_rcu_cb()
* to be called in process context.
*
* __put_task_struct() is called when
* refcount_dec_and_test(&t->usage) succeeds.
*
* This means that it can't "conflict" with
* put_task_struct_rcu_user() which abuses ->rcu the same
* way; rcu_users has a reference so task->usage can't be
* zero after rcu_users 1 -> 0 transition.
*
* delayed_free_task() also uses ->rcu, but it is only called
* when it fails to fork a process. Therefore, there is no
* way it can conflict with __put_task_struct().
*/
call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
static inline void put_task_struct_many(struct task_struct *t, int nr)
{
if (refcount_sub_and_test(nr, &t->usage))
__put_task_struct(t);
}
void put_task_struct_rcu_user(struct task_struct *task);
/* Free all architecture-specific resources held by a thread. */
void release_thread(struct task_struct *dead_task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
/*
* If an architecture has not declared a thread_struct whitelist we
* must assume something there may need to be copied to userspace.
*/
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = 0;
/* Handle dynamically sized thread_struct. */
*size = arch_task_struct_size - offsetof(struct task_struct, thread);
}
#endif
#ifdef CONFIG_VMAP_STACK
static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
{
return t->stack_vm_area;
}
#else
static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
{
return NULL;
}
#endif
/*
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
* Nests inside of read_lock(&tasklist_lock). It must not be nested with
* write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
{
spin_unlock(&p->alloc_lock);}
DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
#endif /* _LINUX_SCHED_TASK_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Hash: Hash algorithms under the crypto API
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
/* Set this bit for virtual address instead of SG list. */
#define CRYPTO_AHASH_REQ_VIRT 0x00000001
#define CRYPTO_AHASH_REQ_PRIVATE \
CRYPTO_AHASH_REQ_VIRT
struct crypto_ahash;
/**
* DOC: Message Digest Algorithm Definitions
*
* These data structures define modular message digest algorithm
* implementations, managed via crypto_register_ahash(),
* crypto_register_shash(), crypto_unregister_ahash() and
* crypto_unregister_shash().
*/
/*
* struct hash_alg_common - define properties of message digest
* @digestsize: Size of the result of the transformation. A buffer of this size
* must be available to the @final and @finup calls, so they can
* store the resulting hash into it. For various predefined sizes,
* search include/crypto/ using
* git grep _DIGEST_SIZE include/crypto.
* @statesize: Size of the block for partial state of the transformation. A
* buffer of this size must be passed to the @export function as it
* will save the partial state of the transformation into it. On the
* other side, the @import function will load the state from a
* buffer of this size as well.
* @base: Start of data structure of cipher algorithm. The common data
* structure of crypto_alg contains information common to all ciphers.
* The hash_alg_common data structure now adds the hash-specific
* information.
*/
#define HASH_ALG_COMMON { \
unsigned int digestsize; \
unsigned int statesize; \
\
struct crypto_alg base; \
}
struct hash_alg_common HASH_ALG_COMMON;
struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
union {
struct scatterlist *src;
const u8 *svirt;
};
u8 *result;
struct scatterlist sg_head[2];
crypto_completion_t saved_complete;
void *saved_data;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/**
* struct ahash_alg - asynchronous message digest definition
* @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point. Driver code
* implementation must not use req->result.
* @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
* function must not finalize the HASH transformation by calculating the
* final message digest as this only adds more data into the
* transformation. This function shall not modify the transformation
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point. Driver must not use
* req->result.
* For block-only algorithms, @update must return the number
* of bytes to store in the API partial block buffer.
* @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
* point unless hardware requires it to finish the transformation
* (then the data buffered by the device driver is processed).
* @finup: **[optional]** Combination of @update and @final. This function is effectively a
* combination of @update and @final calls issued in sequence. As some
* hardware cannot do @update and @final separately, this callback was
* added to allow such hardware to be used at least by IPsec. Data
* processing can happen synchronously [SHASH] or asynchronously [AHASH]
* at this point.
* @digest: Combination of @init and @update and @final. This function
* effectively behaves as the entire chain of operations, @init,
* @update and @final issued in sequence. Just like @finup, this was
* added for hardware which cannot do even the @finup, but can only do
* the whole transformation in one run. Data processing can happen
* synchronously [SHASH] or asynchronously [AHASH] at this point.
* @setkey: Set optional key used by the hashing algorithm. Intended to push
* optional key used by the hashing algorithm from upper layers into
* the driver. This function can store the key in the transformation
* context or can outright program it into the hardware. In the former
* case, one must be careful to program the key into the hardware at
* appropriate time and one must be careful that .setkey() can be
* called multiple times during the existence of the transformation
* object. Not all hashing algorithms do implement this function as it
* is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
* implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
* this function. This function must be called before any other of the
* @init, @update, @final, @finup, @digest is called. No data
* processing happens at this point.
* @export: Export partial state of the transformation. This function dumps the
* entire state of the ongoing transformation into a provided block of
* data so it can be @import 'ed back later on. This is useful in case
* you want to save partial result of the transformation after
* processing certain amount of data and reload this partial result
* multiple times later on for multiple re-use. No data processing
* happens at this point. Driver must not use req->result.
* @import: Import partial state of the transformation. This function loads the
* entire state of the ongoing transformation from a provided block of
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
* @export_core: Export partial state without partial block. Only defined
* for algorithms that are not block-only.
* @import_core: Import partial state without partial block. Only defined
* for algorithms that are not block-only.
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
* time, right after the transformation context was
* allocated. In case the cryptographic hardware has
* some special requirements which need to be handled
* by software, this function shall check for the precise
* requirement of the transformation and put any software
* fallbacks in place.
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
* @clone_tfm: Copy transform into new object, may allocate memory.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
int (*export_core)(struct ahash_request *req, void *out);
int (*import_core)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_ahash *tfm);
void (*exit_tfm)(struct crypto_ahash *tfm);
int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
struct hash_alg_common halg;
};
struct shash_desc {
struct crypto_shash *tfm;
void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
};
#define HASH_MAX_DIGESTSIZE 64
/*
* The size of a core hash state and a partial block. The final byte
* is the length of the partial block.
*/
#define HASH_STATE_AND_BLOCK(state, block) ((state) + (block) + 1)
/* Worst case is sha3-224. */
#define HASH_MAX_STATESIZE HASH_STATE_AND_BLOCK(200, 144)
/* This needs to match arch/s390/crypto/sha.h. */
#define S390_SHA_CTX_SIZE 216
/*
* Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc'
* containing a 'struct s390_sha_ctx'.
*/
#define SHA3_224_S390_DESCSIZE HASH_STATE_AND_BLOCK(S390_SHA_CTX_SIZE, 144)
#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + \
SHA3_224_S390_DESCSIZE)
#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \
HASH_MAX_DESCSIZE)
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
__aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
#define HASH_REQUEST_ON_STACK(name, _tfm) \
char __##name##_req[sizeof(struct ahash_request) + \
MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
struct ahash_request *name = \
ahash_request_on_stack_init(__##name##_req, (_tfm))
#define HASH_REQUEST_CLONE(name, gfp) \
hash_request_clone(name, sizeof(__##name##_req), gfp)
#define CRYPTO_HASH_STATESIZE(coresize, blocksize) (coresize + blocksize + 1)
/**
* struct shash_alg - synchronous message digest definition
* @init: see struct ahash_alg
* @update: see struct ahash_alg
* @final: see struct ahash_alg
* @finup: see struct ahash_alg
* @digest: see struct ahash_alg
* @export: see struct ahash_alg
* @import: see struct ahash_alg
* @export_core: see struct ahash_alg
* @import_core: see struct ahash_alg
* @setkey: see struct ahash_alg
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
* time, right after the transformation context was
* allocated. In case the cryptographic hardware has
* some special requirements which need to be handled
* by software, this function shall check for the precise
* requirement of the transformation and put any software
* fallbacks in place.
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
* @clone_tfm: Copy transform into new object, may allocate memory.
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
* @halg: see struct hash_alg_common
* @HASH_ALG_COMMON: see struct hash_alg_common
*/
struct shash_alg {
int (*init)(struct shash_desc *desc);
int (*update)(struct shash_desc *desc, const u8 *data,
unsigned int len);
int (*final)(struct shash_desc *desc, u8 *out);
int (*finup)(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
int (*digest)(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
int (*export)(struct shash_desc *desc, void *out);
int (*import)(struct shash_desc *desc, const void *in);
int (*export_core)(struct shash_desc *desc, void *out);
int (*import_core)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
void (*exit_tfm)(struct crypto_shash *tfm);
int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
unsigned int descsize;
union {
struct HASH_ALG_COMMON;
struct hash_alg_common halg;
};
};
#undef HASH_ALG_COMMON
struct crypto_ahash {
bool using_shash; /* Underlying algorithm is shash, not ahash */
unsigned int statesize;
unsigned int reqsize;
struct crypto_tfm base;
};
struct crypto_shash {
struct crypto_tfm base;
};
/**
* DOC: Asynchronous Message Digest API
*
* The asynchronous message digest API is used with the ciphers of type
* CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
*
* The asynchronous cipher operation discussion provided for the
* CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
static inline bool ahash_req_on_stack(struct ahash_request *req)
{
return crypto_req_on_stack(&req->base);
}
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
}
/**
* crypto_alloc_ahash() - allocate ahash cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* ahash cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for an ahash. The returned struct
* crypto_ahash is the cipher handle that is required for any subsequent
* API invocation for that ahash.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask);
struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
}
/**
* crypto_free_ahash() - zeroize and free the ahash handle
* @tfm: cipher handle to be freed
*
* If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
}
/**
* crypto_has_ahash() - Search for the availability of an ahash.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* ahash
* @type: specifies the type of the ahash
* @mask: specifies the mask for the ahash
*
* Return: true when the ahash is known to the kernel crypto API; false
* otherwise
*/
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask);
static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm)
{
return crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
}
static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
{
return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
}
/**
* crypto_ahash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
*
* The block size for the message digest cipher referenced with the cipher
* handle is returned.
*
* Return: block size of cipher
*/
static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
}
static inline struct hash_alg_common *__crypto_hash_alg_common(
struct crypto_alg *alg)
{
return container_of(alg, struct hash_alg_common, base);
}
static inline struct hash_alg_common *crypto_hash_alg_common(
struct crypto_ahash *tfm)
{
return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
}
/**
* crypto_ahash_digestsize() - obtain message digest size
* @tfm: cipher handle
*
* The size for the message digest created by the message digest cipher
* referenced with the cipher handle is returned.
*
*
* Return: message digest size of cipher
*/
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{
return crypto_hash_alg_common(tfm)->digestsize;
}
/**
* crypto_ahash_statesize() - obtain size of the ahash state
* @tfm: cipher handle
*
* Return the size of the ahash state. With the crypto_ahash_export()
* function, the caller can export the state into a buffer whose size is
* defined with this function.
*
* Return: size of the ahash state
*/
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
return tfm->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
{
return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
}
static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);}
static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
}
/**
* crypto_ahash_reqtfm() - obtain cipher handle from request
* @req: asynchronous request handle that contains the reference to the ahash
* cipher handle
*
* Return the ahash cipher handle that is registered with the asynchronous
* request handle ahash_request.
*
* Return: ahash cipher handle
*/
static inline struct crypto_ahash *crypto_ahash_reqtfm(
struct ahash_request *req)
{
return __crypto_ahash_cast(req->base.tfm);
}
/**
* crypto_ahash_reqsize() - obtain size of the request data structure
* @tfm: cipher handle
*
* Return: size of the request data
*/
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{
return tfm->reqsize;
}
static inline void *ahash_request_ctx(struct ahash_request *req)
{
return req->__ctx;
}
/**
* crypto_ahash_setkey - set key for cipher handle
* @tfm: cipher handle
* @key: buffer holding the key
* @keylen: length of the key in bytes
*
* The caller provided key is set for the ahash cipher. The cipher
* handle must point to a keyed hash in order for this function to succeed.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
* needed to perform the cipher operation
*
* This function is a "short-hand" for the function calls of
* crypto_ahash_update and crypto_ahash_final. The parameters have the same
* meaning as discussed for those separate functions.
*
* Return: see crypto_ahash_final()
*/
int crypto_ahash_finup(struct ahash_request *req);
/**
* crypto_ahash_final() - calculate message digest
* @req: reference to the ahash_request handle that holds all information
* needed to perform the cipher operation
*
* Finalize the message digest operation and create the message digest
* based on all data added to the cipher handle. The message digest is placed
* into the output buffer registered with the ahash_request handle.
*
* Return:
* 0 if the message digest was successfully calculated;
* -EINPROGRESS if data is fed into hardware (DMA) or queued for later;
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
static inline int crypto_ahash_final(struct ahash_request *req)
{
req->nbytes = 0;
return crypto_ahash_finup(req);
}
/**
* crypto_ahash_digest() - calculate message digest for a buffer
* @req: reference to the ahash_request handle that holds all information
* needed to perform the cipher operation
*
* This function is a "short-hand" for the function calls of crypto_ahash_init,
* crypto_ahash_update and crypto_ahash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
* Return: see crypto_ahash_final()
*/
int crypto_ahash_digest(struct ahash_request *req);
/**
* crypto_ahash_export() - extract current message digest state
* @req: reference to the ahash_request handle whose state is exported
* @out: output buffer of sufficient size that can hold the hash state
*
* This function exports the hash state of the ahash_request handle into the
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_ahash_statesize()).
*
* Return: 0 if the export was successful; < 0 if an error occurred
*/
int crypto_ahash_export(struct ahash_request *req, void *out);
/**
* crypto_ahash_import() - import message digest state
* @req: reference to ahash_request handle the state is imported into
* @in: buffer holding the state
*
* This function imports the hash state into the ahash_request handle from the
* input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
* Return: 0 if the import was successful; < 0 if an error occurred
*/
int crypto_ahash_import(struct ahash_request *req, const void *in);
/**
* crypto_ahash_init() - (re)initialize message digest handle
* @req: ahash_request handle that already is initialized with all necessary
* data using the ahash_request_* API functions
*
* The call (re-)initializes the message digest referenced by the ahash_request
* handle. Any potentially existing state created by previous operations is
* discarded.
*
* Return: see crypto_ahash_final()
*/
int crypto_ahash_init(struct ahash_request *req);
/**
* crypto_ahash_update() - add data to message digest for processing
* @req: ahash_request handle that was previously initialized with the
* crypto_ahash_init call.
*
* Updates the message digest state of the &ahash_request handle. The input data
* is pointed to by the scatter/gather list registered in the &ahash_request
* handle
*
* Return: see crypto_ahash_final()
*/
int crypto_ahash_update(struct ahash_request *req);
/**
* DOC: Asynchronous Hash Request Handle
*
* The &ahash_request data structure contains all pointers to data
* required for the asynchronous cipher operation. This includes the cipher
* handle (which can be used by multiple &ahash_request instances), pointer
* to plaintext and the message digest output buffer, asynchronous callback
* function, etc. It acts as a handle to the ahash_request_* API calls in a
* similar way as ahash handle to the crypto_ahash_* API calls.
*/
/**
* ahash_request_set_tfm() - update cipher handle reference in request
* @req: request handle to be modified
* @tfm: cipher handle that shall be added to the request handle
*
* Allow the caller to replace the existing ahash handle in the request
* data structure with a different one.
*/
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
}
/**
* ahash_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
* @gfp: memory allocation flag that is handed to kmalloc by the API call.
*
* Allocate the request data structure that must be used with the ahash
* message digest API calls. During
* the allocation, the provided ahash handle
* is registered in the request data structure.
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct ahash_request *ahash_request_alloc_noprof(
struct crypto_ahash *tfm, gfp_t gfp)
{
struct ahash_request *req;
req = kmalloc_noprof(sizeof(struct ahash_request) +
crypto_ahash_reqsize(tfm), gfp);
if (likely(req))
ahash_request_set_tfm(req, tfm);
return req;
}
#define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__))
/**
* ahash_request_free() - zeroize and free the request data structure
* @req: request data structure cipher handle to be freed
*/
void ahash_request_free(struct ahash_request *req);
static inline void ahash_request_zero(struct ahash_request *req)
{
memzero_explicit(req, sizeof(*req) +
crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
}
static inline struct ahash_request *ahash_request_cast(
struct crypto_async_request *req)
{
return container_of(req, struct ahash_request, base);
}
/**
* ahash_request_set_callback() - set asynchronous callback function
* @req: request handle
* @flags: specify zero or an ORing of the flags
* CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
* increase the wait queue beyond the initial maximum size;
* CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
* @compl: callback function pointer to be registered with the request handle
* @data: The data pointer refers to memory that is not used by the kernel
* crypto API, but provided to the callback function for it to use. Here,
* the caller can provide a reference to memory the callback function can
* operate on. As the callback function is invoked asynchronously to the
* related functionality, it may need to access data structures of the
* related functionality which can be referenced using this pointer. The
* callback function can access the memory via the "data" field in the
* &crypto_async_request data structure provided to the callback function.
*
* This function allows setting the callback function that is triggered once
* the cipher operation completes.
*
* The callback function is registered with the &ahash_request handle and
* must comply with the following template::
*
* void callback_function(struct crypto_async_request *req, int error)
*/
static inline void ahash_request_set_callback(struct ahash_request *req,
u32 flags,
crypto_completion_t compl,
void *data)
{
flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
crypto_request_set_callback(&req->base, flags, compl, data);
}
/**
* ahash_request_set_crypt() - set data buffers
* @req: ahash_request handle to be updated
* @src: source scatter/gather list
* @result: buffer that is filled with the message digest -- the caller must
* ensure that the buffer has sufficient space by, for example, calling
* crypto_ahash_digestsize()
* @nbytes: number of bytes to process from the source scatter/gather list
*
* By using this call, the caller references the source scatter/gather list.
* The source scatter/gather list points to the data the message digest is to
* be calculated for.
*/
static inline void ahash_request_set_crypt(struct ahash_request *req,
struct scatterlist *src, u8 *result,
unsigned int nbytes)
{
req->src = src;
req->nbytes = nbytes;
req->result = result;
req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT;
}
/**
* ahash_request_set_virt() - set virtual address data buffers
* @req: ahash_request handle to be updated
* @src: source virtual address
* @result: buffer that is filled with the message digest -- the caller must
* ensure that the buffer has sufficient space by, for example, calling
* crypto_ahash_digestsize()
* @nbytes: number of bytes to process from the source virtual address
*
* By using this call, the caller references the source virtual address.
* The source virtual address points to the data the message digest is to
* be calculated for.
*/
static inline void ahash_request_set_virt(struct ahash_request *req,
const u8 *src, u8 *result,
unsigned int nbytes)
{
req->svirt = src;
req->nbytes = nbytes;
req->result = result;
req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
}
/**
* DOC: Synchronous Message Digest API
*
* The synchronous message digest API is used with the ciphers of type
* CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
*
* The message digest API is able to maintain state information for the
* caller.
*
* The synchronous message digest API can store user-related context in its
* shash_desc request data structure.
*/
/**
* crypto_alloc_shash() - allocate message digest handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* message digest cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for a message digest. The returned &struct
* crypto_shash is the cipher handle that is required for any subsequent
* API invocation for that message digest.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
{
return &tfm->base;
}
/**
* crypto_free_shash() - zeroize and free the message digest handle
* @tfm: cipher handle to be freed
*
* If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
}
static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm)
{
return crypto_tfm_alg_name(crypto_shash_tfm(tfm));
}
static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
{
return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
}
/**
* crypto_shash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
*
* The block size for the message digest cipher referenced with the cipher
* handle is returned.
*
* Return: block size of cipher
*/
static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
}
static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
{
return container_of(alg, struct shash_alg, base);
}
static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
{
return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
}
/**
* crypto_shash_digestsize() - obtain message digest size
* @tfm: cipher handle
*
* The size for the message digest created by the message digest cipher
* referenced with the cipher handle is returned.
*
* Return: digest size of cipher
*/
static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
{
return crypto_shash_alg(tfm)->digestsize;
}
static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
{
return crypto_shash_alg(tfm)->statesize;
}
static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
{
return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
}
static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
{
crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
}
static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
{
crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
}
/**
* crypto_shash_descsize() - obtain the operational state size
* @tfm: cipher handle
*
* The size of the operational state the cipher needs during operation is
* returned for the hash referenced with the cipher handle. This size is
* required to calculate the memory requirements to allow the caller allocating
* sufficient memory for operational state.
*
* The operational state is defined with struct shash_desc where the size of
* that data structure is to be calculated as
* sizeof(struct shash_desc) + crypto_shash_descsize(alg)
*
* Return: size of the operational state
*/
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
return crypto_shash_alg(tfm)->descsize;
}
static inline void *shash_desc_ctx(struct shash_desc *desc)
{
return desc->__ctx;
}
/**
* crypto_shash_setkey() - set key for message digest
* @tfm: cipher handle
* @key: buffer holding the key
* @keylen: length of the key in bytes
*
* The caller provided key is set for the keyed message digest cipher. The
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
* Context: Softirq or process context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
/**
* crypto_shash_digest() - calculate message digest for buffer
* @desc: see crypto_shash_final()
* @data: see crypto_shash_update()
* @len: see crypto_shash_update()
* @out: see crypto_shash_final()
*
* This function is a "short-hand" for the function calls of crypto_shash_init,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
* Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
/**
* crypto_shash_tfm_digest() - calculate message digest for buffer
* @tfm: hash transformation object
* @data: see crypto_shash_update()
* @len: see crypto_shash_update()
* @out: see crypto_shash_final()
*
* This is a simplified version of crypto_shash_digest() for users who don't
* want to allocate their own hash descriptor (shash_desc). Instead,
* crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash)
* directly, and it allocates a hash descriptor on the stack internally.
* Note that this stack allocation may be fairly large.
*
* Context: Softirq or process context.
* Return: 0 on success; < 0 if an error occurred.
*/
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out);
int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
unsigned int len, u8 *out);
/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
* @out: output buffer of sufficient size that can hold the hash state
*
* This function exports the hash state of the operational state handle into the
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
* Context: Softirq or process context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
int crypto_shash_export(struct shash_desc *desc, void *out);
/**
* crypto_shash_import() - import operational state
* @desc: reference to the operational state handle the state imported into
* @in: buffer holding the state
*
* This function imports the hash state into the operational state handle from
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
* Context: Softirq or process context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
int crypto_shash_import(struct shash_desc *desc, const void *in);
/**
* crypto_shash_init() - (re)initialize message digest
* @desc: operational state handle that is already filled
*
* The call (re-)initializes the message digest referenced by the
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
* Context: Softirq or process context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
int crypto_shash_init(struct shash_desc *desc);
/**
* crypto_shash_finup() - calculate message digest of buffer
* @desc: see crypto_shash_final()
* @data: see crypto_shash_update()
* @len: see crypto_shash_update()
* @out: see crypto_shash_final()
*
* This function is a "short-hand" for the function calls of
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate functions.
*
* Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
/**
* crypto_shash_update() - add data to message digest for processing
* @desc: operational state handle that is already initialized
* @data: input data to be added to the message digest
* @len: length of the input data
*
* Updates the message digest state of the operational state handle.
*
* Context: Softirq or process context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return crypto_shash_finup(desc, data, len, NULL);
}
/**
* crypto_shash_final() - calculate message digest
* @desc: operational state handle that is already filled with data
* @out: output buffer filled with the message digest
*
* Finalize the message digest operation and create the message digest
* based on all data added to the cipher handle. The message digest is placed
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
* Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
{
return crypto_shash_finup(desc, NULL, 0, out);
}
static inline void shash_desc_zero(struct shash_desc *desc)
{
memzero_explicit(desc,
sizeof(*desc) + crypto_shash_descsize(desc->tfm));
}
static inline bool ahash_is_async(struct crypto_ahash *tfm)
{
return crypto_tfm_is_async(&tfm->base);
}
static inline struct ahash_request *ahash_request_on_stack_init(
char *buf, struct crypto_ahash *tfm)
{
struct ahash_request *req = (void *)buf;
crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
return req;
}
static inline struct ahash_request *ahash_request_clone(
struct ahash_request *req, size_t total, gfp_t gfp)
{
return container_of(crypto_request_clone(&req->base, total, gfp),
struct ahash_request, base);
}
#endif /* _CRYPTO_HASH_H */
// SPDX-License-Identifier: GPL-2.0
// Generated by scripts/atomic/gen-atomic-instrumented.sh
// DO NOT MODIFY THIS FILE DIRECTLY
/*
* This file provoides atomic operations with explicit instrumentation (e.g.
* KASAN, KCSAN), which should be used unless it is necessary to avoid
* instrumentation. Where it is necessary to aovid instrumenation, the
* raw_atomic*() operations should be used.
*/
#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
#define _LINUX_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/instrumented.h>
/**
* atomic_read() - atomic load with relaxed ordering
* @v: pointer to atomic_t
*
* Atomically loads the value of @v with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_read() there.
*
* Return: The value loaded from @v.
*/
static __always_inline int
atomic_read(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v)); return raw_atomic_read(v);
}
/**
* atomic_read_acquire() - atomic load with acquire ordering
* @v: pointer to atomic_t
*
* Atomically loads the value of @v with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_read_acquire() there.
*
* Return: The value loaded from @v.
*/
static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return raw_atomic_read_acquire(v);
}
/**
* atomic_set() - atomic set with relaxed ordering
* @v: pointer to atomic_t
* @i: int value to assign
*
* Atomically sets @v to @i with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_set() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_set(atomic_t *v, int i)
{
instrument_atomic_write(v, sizeof(*v));
raw_atomic_set(v, i);}
/**
* atomic_set_release() - atomic set with release ordering
* @v: pointer to atomic_t
* @i: int value to assign
*
* Atomically sets @v to @i with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_set_release() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
kcsan_release();
instrument_atomic_write(v, sizeof(*v));
raw_atomic_set_release(v, i);
}
/**
* atomic_add() - atomic add with relaxed ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_add(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_add(i, v);
}
/**
* atomic_add_return() - atomic add with full ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_return(i, v);
}
/**
* atomic_add_return_acquire() - atomic add with acquire ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_return_acquire(i, v);
}
/**
* atomic_add_return_release() - atomic add with release ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_return_release(i, v);
}
/**
* atomic_add_return_relaxed() - atomic add with relaxed ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_return_relaxed(i, v);
}
/**
* atomic_fetch_add() - atomic add with full ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_add() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_add(i, v);
}
/**
* atomic_fetch_add_acquire() - atomic add with acquire ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_add_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_add_acquire(i, v);
}
/**
* atomic_fetch_add_release() - atomic add with release ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_add_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_add_release(i, v);
}
/**
* atomic_fetch_add_relaxed() - atomic add with relaxed ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_add_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_add_relaxed(i, v);
}
/**
* atomic_sub() - atomic subtract with relaxed ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_sub() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_sub(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_sub(i, v);
}
/**
* atomic_sub_return() - atomic subtract with full ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_sub_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_sub_return(i, v);
}
/**
* atomic_sub_return_acquire() - atomic subtract with acquire ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_sub_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_sub_return_acquire(i, v);
}
/**
* atomic_sub_return_release() - atomic subtract with release ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_sub_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_sub_return_release(i, v);
}
/**
* atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_sub_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_sub_return_relaxed(i, v);
}
/**
* atomic_fetch_sub() - atomic subtract with full ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_sub(i, v);
}
/**
* atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_sub_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_sub_acquire(i, v);
}
/**
* atomic_fetch_sub_release() - atomic subtract with release ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_sub_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_sub_release(i, v);
}
/**
* atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_sub_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_sub_relaxed(i, v);
}
/**
* atomic_inc() - atomic increment with relaxed ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_inc() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_inc(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_inc(v);
}
/**
* atomic_inc_return() - atomic increment with full ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_inc_return(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_inc_return(v);
}
/**
* atomic_inc_return_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_inc_return_acquire(v);
}
/**
* atomic_inc_return_release() - atomic increment with release ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_inc_return_release(v);
}
/**
* atomic_inc_return_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_inc_return_relaxed(v);
}
/**
* atomic_fetch_inc() - atomic increment with full ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_inc() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_inc(v);
}
/**
* atomic_fetch_inc_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_inc_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_inc_acquire(v);
}
/**
* atomic_fetch_inc_release() - atomic increment with release ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_inc_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_inc_release(v);
}
/**
* atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_inc_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_inc_relaxed(v);
}
/**
* atomic_dec() - atomic decrement with relaxed ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_dec() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_dec(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_dec(v);
}
/**
* atomic_dec_return() - atomic decrement with full ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_dec_return(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_dec_return(v);
}
/**
* atomic_dec_return_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_dec_return_acquire(v);
}
/**
* atomic_dec_return_release() - atomic decrement with release ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_dec_return_release(v);
}
/**
* atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_dec_return_relaxed(v);
}
/**
* atomic_fetch_dec() - atomic decrement with full ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_dec() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_dec(v);
}
/**
* atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_dec_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_dec_acquire(v);
}
/**
* atomic_fetch_dec_release() - atomic decrement with release ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_dec_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_dec_release(v);
}
/**
* atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_dec_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_dec_relaxed(v);
}
/**
* atomic_and() - atomic bitwise AND with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_and() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_and(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_and(i, v);
}
/**
* atomic_fetch_and() - atomic bitwise AND with full ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_and() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_and(i, v);
}
/**
* atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_and_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_and_acquire(i, v);
}
/**
* atomic_fetch_and_release() - atomic bitwise AND with release ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_and_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_and_release(i, v);
}
/**
* atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_and_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_and_relaxed(i, v);
}
/**
* atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_andnot() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_andnot(i, v);
}
/**
* atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & ~@i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_andnot() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_andnot(i, v);
}
/**
* atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & ~@i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_andnot_acquire(i, v);
}
/**
* atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & ~@i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_andnot_release(i, v);
}
/**
* atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_andnot_relaxed(i, v);
}
/**
* atomic_or() - atomic bitwise OR with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_or() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_or(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_or(i, v);
}
/**
* atomic_fetch_or() - atomic bitwise OR with full ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v | @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_or() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_or(i, v);
}
/**
* atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v | @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_or_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_or_acquire(i, v);
}
/**
* atomic_fetch_or_release() - atomic bitwise OR with release ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v | @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_or_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_or_release(i, v);
}
/**
* atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_or_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_or_relaxed(i, v);
}
/**
* atomic_xor() - atomic bitwise XOR with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_xor() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_xor(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_xor(i, v);
}
/**
* atomic_fetch_xor() - atomic bitwise XOR with full ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v ^ @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_xor() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_xor(i, v);
}
/**
* atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v ^ @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_xor_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_xor_acquire(i, v);
}
/**
* atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v ^ @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_xor_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_xor_release(i, v);
}
/**
* atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
* @i: int value
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_xor_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_xor_relaxed(i, v);
}
/**
* atomic_xchg() - atomic exchange with full ordering
* @v: pointer to atomic_t
* @new: int value to assign
*
* Atomically updates @v to @new with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_xchg() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_xchg(atomic_t *v, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_xchg(v, new);
}
/**
* atomic_xchg_acquire() - atomic exchange with acquire ordering
* @v: pointer to atomic_t
* @new: int value to assign
*
* Atomically updates @v to @new with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_xchg_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_xchg_acquire(atomic_t *v, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_xchg_acquire(v, new);
}
/**
* atomic_xchg_release() - atomic exchange with release ordering
* @v: pointer to atomic_t
* @new: int value to assign
*
* Atomically updates @v to @new with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_xchg_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_xchg_release(atomic_t *v, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_xchg_release(v, new);
}
/**
* atomic_xchg_relaxed() - atomic exchange with relaxed ordering
* @v: pointer to atomic_t
* @new: int value to assign
*
* Atomically updates @v to @new with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_xchg_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_xchg_relaxed(v, new);
}
/**
* atomic_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic_t
* @old: int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_cmpxchg(v, old, new);
}
/**
* atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic_t
* @old: int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_cmpxchg_acquire(v, old, new);
}
/**
* atomic_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic_t
* @old: int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_cmpxchg_release(v, old, new);
}
/**
* atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic_t
* @old: int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_cmpxchg_relaxed(v, old, new);
}
/**
* atomic_try_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic_t
* @old: pointer to int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg(v, old, new);
}
/**
* atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic_t
* @old: pointer to int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg_acquire(v, old, new);
}
/**
* atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic_t
* @old: pointer to int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg_release(v, old, new);
}
/**
* atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic_t
* @old: pointer to int value to compare with
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg_relaxed(v, old, new);
}
/**
* atomic_sub_and_test() - atomic subtract and test if zero with full ordering
* @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_sub_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_sub_and_test(i, v);
}
/**
* atomic_dec_and_test() - atomic decrement and test if zero with full ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_dec_and_test(v);
}
/**
* atomic_inc_and_test() - atomic increment and test if zero with full ordering
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_inc_and_test(v);
}
/**
* atomic_add_negative() - atomic add and test if negative with full ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_negative() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_negative(i, v);
}
/**
* atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_negative_acquire() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_add_negative_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_negative_acquire(i, v);
}
/**
* atomic_add_negative_release() - atomic add and test if negative with release ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_negative_release() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_add_negative_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_negative_release(i, v);
}
/**
* atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
* @i: int value to add
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_add_negative_relaxed() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_add_negative_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_negative_relaxed(i, v);
}
/**
* atomic_fetch_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic_t
* @a: int value to add
* @u: int value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there.
*
* Return: The original value of @v.
*/
static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_fetch_add_unless(v, a, u);
}
/**
* atomic_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic_t
* @a: int value to add
* @u: int value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_add_unless() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_add_unless(v, a, u);
}
/**
* atomic_inc_not_zero() - atomic increment unless zero with full ordering
* @v: pointer to atomic_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_inc_not_zero(v);
}
/**
* atomic_inc_unless_negative() - atomic increment unless negative with full ordering
* @v: pointer to atomic_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_inc_unless_negative(v);
}
/**
* atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
* @v: pointer to atomic_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_dec_unless_positive(v);
}
/**
* atomic_dec_if_positive() - atomic decrement if positive with full ordering
* @v: pointer to atomic_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there.
*
* Return: The old value of (@v - 1), regardless of whether @v was updated.
*/
static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_dec_if_positive(v);
}
/**
* atomic64_read() - atomic load with relaxed ordering
* @v: pointer to atomic64_t
*
* Atomically loads the value of @v with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_read() there.
*
* Return: The value loaded from @v.
*/
static __always_inline s64
atomic64_read(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return raw_atomic64_read(v);
}
/**
* atomic64_read_acquire() - atomic load with acquire ordering
* @v: pointer to atomic64_t
*
* Atomically loads the value of @v with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_read_acquire() there.
*
* Return: The value loaded from @v.
*/
static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return raw_atomic64_read_acquire(v);
}
/**
* atomic64_set() - atomic set with relaxed ordering
* @v: pointer to atomic64_t
* @i: s64 value to assign
*
* Atomically sets @v to @i with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_set() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
{
instrument_atomic_write(v, sizeof(*v));
raw_atomic64_set(v, i);
}
/**
* atomic64_set_release() - atomic set with release ordering
* @v: pointer to atomic64_t
* @i: s64 value to assign
*
* Atomically sets @v to @i with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_set_release() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
kcsan_release();
instrument_atomic_write(v, sizeof(*v));
raw_atomic64_set_release(v, i);
}
/**
* atomic64_add() - atomic add with relaxed ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_add(i, v);
}
/**
* atomic64_add_return() - atomic add with full ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_return(i, v);
}
/**
* atomic64_add_return_acquire() - atomic add with acquire ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_return_acquire(i, v);
}
/**
* atomic64_add_return_release() - atomic add with release ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_return_release(i, v);
}
/**
* atomic64_add_return_relaxed() - atomic add with relaxed ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_return_relaxed(i, v);
}
/**
* atomic64_fetch_add() - atomic add with full ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_add() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_add(i, v);
}
/**
* atomic64_fetch_add_acquire() - atomic add with acquire ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_add_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_add_acquire(i, v);
}
/**
* atomic64_fetch_add_release() - atomic add with release ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_add_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_add_release(i, v);
}
/**
* atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_add_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_add_relaxed(i, v);
}
/**
* atomic64_sub() - atomic subtract with relaxed ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_sub() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_sub(i, v);
}
/**
* atomic64_sub_return() - atomic subtract with full ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_sub_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_sub_return(i, v);
}
/**
* atomic64_sub_return_acquire() - atomic subtract with acquire ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_sub_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_sub_return_acquire(i, v);
}
/**
* atomic64_sub_return_release() - atomic subtract with release ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_sub_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_sub_return_release(i, v);
}
/**
* atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_sub_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_sub_return_relaxed(i, v);
}
/**
* atomic64_fetch_sub() - atomic subtract with full ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_sub() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_sub(i, v);
}
/**
* atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_sub_acquire(i, v);
}
/**
* atomic64_fetch_sub_release() - atomic subtract with release ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_sub_release(i, v);
}
/**
* atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_sub_relaxed(i, v);
}
/**
* atomic64_inc() - atomic increment with relaxed ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_inc(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_inc(v);
}
/**
* atomic64_inc_return() - atomic increment with full ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_inc_return(v);
}
/**
* atomic64_inc_return_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_inc_return_acquire(v);
}
/**
* atomic64_inc_return_release() - atomic increment with release ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_inc_return_release(v);
}
/**
* atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_inc_return_relaxed(v);
}
/**
* atomic64_fetch_inc() - atomic increment with full ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_inc() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_inc(v);
}
/**
* atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_inc_acquire(v);
}
/**
* atomic64_fetch_inc_release() - atomic increment with release ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_inc_release(v);
}
/**
* atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_inc_relaxed(v);
}
/**
* atomic64_dec() - atomic decrement with relaxed ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_dec(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_dec(v);
}
/**
* atomic64_dec_return() - atomic decrement with full ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_dec_return(v);
}
/**
* atomic64_dec_return_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_dec_return_acquire(v);
}
/**
* atomic64_dec_return_release() - atomic decrement with release ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_dec_return_release(v);
}
/**
* atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_dec_return_relaxed(v);
}
/**
* atomic64_fetch_dec() - atomic decrement with full ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_dec() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_dec(v);
}
/**
* atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_dec_acquire(v);
}
/**
* atomic64_fetch_dec_release() - atomic decrement with release ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_dec_release(v);
}
/**
* atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_dec_relaxed(v);
}
/**
* atomic64_and() - atomic bitwise AND with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_and() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_and(i, v);
}
/**
* atomic64_fetch_and() - atomic bitwise AND with full ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_and() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_and(i, v);
}
/**
* atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_and_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_and_acquire(i, v);
}
/**
* atomic64_fetch_and_release() - atomic bitwise AND with release ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_and_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_and_release(i, v);
}
/**
* atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_and_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_and_relaxed(i, v);
}
/**
* atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_andnot() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_andnot(i, v);
}
/**
* atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & ~@i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_andnot(i, v);
}
/**
* atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & ~@i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_andnot_acquire(i, v);
}
/**
* atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & ~@i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_andnot_release(i, v);
}
/**
* atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_andnot_relaxed(i, v);
}
/**
* atomic64_or() - atomic bitwise OR with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_or() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_or(i, v);
}
/**
* atomic64_fetch_or() - atomic bitwise OR with full ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v | @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_or() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_or(i, v);
}
/**
* atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v | @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_or_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_or_acquire(i, v);
}
/**
* atomic64_fetch_or_release() - atomic bitwise OR with release ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v | @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_or_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_or_release(i, v);
}
/**
* atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_or_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_or_relaxed(i, v);
}
/**
* atomic64_xor() - atomic bitwise XOR with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_xor() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic64_xor(i, v);
}
/**
* atomic64_fetch_xor() - atomic bitwise XOR with full ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v ^ @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_xor() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_xor(i, v);
}
/**
* atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v ^ @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_xor_acquire(i, v);
}
/**
* atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v ^ @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_xor_release(i, v);
}
/**
* atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
* @i: s64 value
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_xor_relaxed(i, v);
}
/**
* atomic64_xchg() - atomic exchange with full ordering
* @v: pointer to atomic64_t
* @new: s64 value to assign
*
* Atomically updates @v to @new with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_xchg() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_xchg(v, new);
}
/**
* atomic64_xchg_acquire() - atomic exchange with acquire ordering
* @v: pointer to atomic64_t
* @new: s64 value to assign
*
* Atomically updates @v to @new with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_xchg_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_xchg_acquire(v, new);
}
/**
* atomic64_xchg_release() - atomic exchange with release ordering
* @v: pointer to atomic64_t
* @new: s64 value to assign
*
* Atomically updates @v to @new with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_xchg_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_xchg_release(v, new);
}
/**
* atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
* @v: pointer to atomic64_t
* @new: s64 value to assign
*
* Atomically updates @v to @new with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_xchg_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_xchg_relaxed(v, new);
}
/**
* atomic64_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic64_t
* @old: s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_cmpxchg(v, old, new);
}
/**
* atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic64_t
* @old: s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_cmpxchg_acquire(v, old, new);
}
/**
* atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic64_t
* @old: s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_cmpxchg_release(v, old, new);
}
/**
* atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic64_t
* @old: s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_cmpxchg_relaxed(v, old, new);
}
/**
* atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic64_t
* @old: pointer to s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg(v, old, new);
}
/**
* atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic64_t
* @old: pointer to s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg_acquire(v, old, new);
}
/**
* atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic64_t
* @old: pointer to s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg_release(v, old, new);
}
/**
* atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic64_t
* @old: pointer to s64 value to compare with
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
}
/**
* atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
* @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_sub_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_sub_and_test(i, v);
}
/**
* atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_dec_and_test(v);
}
/**
* atomic64_inc_and_test() - atomic increment and test if zero with full ordering
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_inc_and_test(v);
}
/**
* atomic64_add_negative() - atomic add and test if negative with full ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_negative() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_negative(i, v);
}
/**
* atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_negative_acquire() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_negative_acquire(i, v);
}
/**
* atomic64_add_negative_release() - atomic add and test if negative with release ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_negative_release() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic64_add_negative_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_negative_release(i, v);
}
/**
* atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
* @i: s64 value to add
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_negative_relaxed() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_negative_relaxed(i, v);
}
/**
* atomic64_fetch_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic64_t
* @a: s64 value to add
* @u: s64 value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there.
*
* Return: The original value of @v.
*/
static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_fetch_add_unless(v, a, u);
}
/**
* atomic64_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic64_t
* @a: s64 value to add
* @u: s64 value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_unless() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_add_unless(v, a, u);
}
/**
* atomic64_inc_not_zero() - atomic increment unless zero with full ordering
* @v: pointer to atomic64_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_inc_not_zero(v);
}
/**
* atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
* @v: pointer to atomic64_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_inc_unless_negative(v);
}
/**
* atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
* @v: pointer to atomic64_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_dec_unless_positive(v);
}
/**
* atomic64_dec_if_positive() - atomic decrement if positive with full ordering
* @v: pointer to atomic64_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there.
*
* Return: The old value of (@v - 1), regardless of whether @v was updated.
*/
static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic64_dec_if_positive(v);
}
/**
* atomic_long_read() - atomic load with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically loads the value of @v with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_read() there.
*
* Return: The value loaded from @v.
*/
static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return raw_atomic_long_read(v);
}
/**
* atomic_long_read_acquire() - atomic load with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically loads the value of @v with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_read_acquire() there.
*
* Return: The value loaded from @v.
*/
static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return raw_atomic_long_read_acquire(v);
}
/**
* atomic_long_set() - atomic set with relaxed ordering
* @v: pointer to atomic_long_t
* @i: long value to assign
*
* Atomically sets @v to @i with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_set() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
instrument_atomic_write(v, sizeof(*v));
raw_atomic_long_set(v, i);
}
/**
* atomic_long_set_release() - atomic set with release ordering
* @v: pointer to atomic_long_t
* @i: long value to assign
*
* Atomically sets @v to @i with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_set_release() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
kcsan_release();
instrument_atomic_write(v, sizeof(*v));
raw_atomic_long_set_release(v, i);
}
/**
* atomic_long_add() - atomic add with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_add(i, v);
}
/**
* atomic_long_add_return() - atomic add with full ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_return(i, v);
}
/**
* atomic_long_add_return_acquire() - atomic add with acquire ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_return_acquire(i, v);
}
/**
* atomic_long_add_return_release() - atomic add with release ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_return_release(i, v);
}
/**
* atomic_long_add_return_relaxed() - atomic add with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_return_relaxed(i, v);
}
/**
* atomic_long_fetch_add() - atomic add with full ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_add() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_add(i, v);
}
/**
* atomic_long_fetch_add_acquire() - atomic add with acquire ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_add_acquire(i, v);
}
/**
* atomic_long_fetch_add_release() - atomic add with release ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_add_release(i, v);
}
/**
* atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_add_relaxed(i, v);
}
/**
* atomic_long_sub() - atomic subtract with relaxed ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_sub() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_sub(i, v);
}
/**
* atomic_long_sub_return() - atomic subtract with full ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_sub_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_sub_return(i, v);
}
/**
* atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_sub_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_sub_return_acquire(i, v);
}
/**
* atomic_long_sub_return_release() - atomic subtract with release ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_sub_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_sub_return_release(i, v);
}
/**
* atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_sub_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_sub_return_relaxed(i, v);
}
/**
* atomic_long_fetch_sub() - atomic subtract with full ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_sub(i, v);
}
/**
* atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_sub_acquire(i, v);
}
/**
* atomic_long_fetch_sub_release() - atomic subtract with release ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_sub_release(i, v);
}
/**
* atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_sub_relaxed(i, v);
}
/**
* atomic_long_inc() - atomic increment with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_inc(v);
}
/**
* atomic_long_inc_return() - atomic increment with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_inc_return(v);
}
/**
* atomic_long_inc_return_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_inc_return_acquire(v);
}
/**
* atomic_long_inc_return_release() - atomic increment with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_inc_return_release(v);
}
/**
* atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_inc_return_relaxed(v);
}
/**
* atomic_long_fetch_inc() - atomic increment with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_inc(v);
}
/**
* atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_inc_acquire(v);
}
/**
* atomic_long_fetch_inc_release() - atomic increment with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_inc_release(v);
}
/**
* atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_inc_relaxed(v);
}
/**
* atomic_long_dec() - atomic decrement with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_dec(v);
}
/**
* atomic_long_dec_return() - atomic decrement with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_return() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_dec_return(v);
}
/**
* atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_return_acquire() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_dec_return_acquire(v);
}
/**
* atomic_long_dec_return_release() - atomic decrement with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_return_release() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_dec_return_release(v);
}
/**
* atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_return_relaxed() there.
*
* Return: The updated value of @v.
*/
static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_dec_return_relaxed(v);
}
/**
* atomic_long_fetch_dec() - atomic decrement with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_dec(v);
}
/**
* atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_dec_acquire(v);
}
/**
* atomic_long_fetch_dec_release() - atomic decrement with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_dec_release(v);
}
/**
* atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_dec_relaxed(v);
}
/**
* atomic_long_and() - atomic bitwise AND with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_and() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_and(i, v);
}
/**
* atomic_long_fetch_and() - atomic bitwise AND with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_and() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_and(i, v);
}
/**
* atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_and_acquire(i, v);
}
/**
* atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_and_release(i, v);
}
/**
* atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_and_relaxed(i, v);
}
/**
* atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_andnot() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_andnot(i, v);
}
/**
* atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_andnot(i, v);
}
/**
* atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_andnot_acquire(i, v);
}
/**
* atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_andnot_release(i, v);
}
/**
* atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_andnot_relaxed(i, v);
}
/**
* atomic_long_or() - atomic bitwise OR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_or() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_or(i, v);
}
/**
* atomic_long_fetch_or() - atomic bitwise OR with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_or() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_or(i, v);
}
/**
* atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_or_acquire(i, v);
}
/**
* atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_or_release(i, v);
}
/**
* atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_or_relaxed(i, v);
}
/**
* atomic_long_xor() - atomic bitwise XOR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_xor() there.
*
* Return: Nothing.
*/
static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
raw_atomic_long_xor(i, v);
}
/**
* atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_xor(i, v);
}
/**
* atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_xor_acquire(i, v);
}
/**
* atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_xor_release(i, v);
}
/**
* atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_xor_relaxed(i, v);
}
/**
* atomic_long_xchg() - atomic exchange with full ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_xchg() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_xchg(atomic_long_t *v, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_xchg(v, new);
}
/**
* atomic_long_xchg_acquire() - atomic exchange with acquire ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_xchg_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_xchg_acquire(v, new);
}
/**
* atomic_long_xchg_release() - atomic exchange with release ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_xchg_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_xchg_release(v, new);
}
/**
* atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_xchg_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_xchg_relaxed(v, new);
}
/**
* atomic_long_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_cmpxchg(v, old, new);
}
/**
* atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_cmpxchg_acquire(v, old, new);
}
/**
* atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_cmpxchg_release(v, old, new);
}
/**
* atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_cmpxchg_relaxed(v, old, new);
}
/**
* atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg(v, old, new);
}
/**
* atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
}
/**
* atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg_release(v, old, new);
}
/**
* atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
}
/**
* atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_sub_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_sub_and_test(i, v);
}
/**
* atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_dec_and_test(v);
}
/**
* atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_and_test() there.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_inc_and_test(v);
}
/**
* atomic_long_add_negative() - atomic add and test if negative with full ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_negative() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_negative(i, v);
}
/**
* atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_negative_acquire() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_negative_acquire(i, v);
}
/**
* atomic_long_add_negative_release() - atomic add and test if negative with release ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_negative_release() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_long_add_negative_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_negative_release(i, v);
}
/**
* atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_negative_relaxed() there.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_negative_relaxed(i, v);
}
/**
* atomic_long_fetch_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic_long_t
* @a: long value to add
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there.
*
* Return: The original value of @v.
*/
static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_fetch_add_unless(v, a, u);
}
/**
* atomic_long_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic_long_t
* @a: long value to add
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_add_unless(v, a, u);
}
/**
* atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
* @v: pointer to atomic_long_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_inc_not_zero(v);
}
/**
* atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
* @v: pointer to atomic_long_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_inc_unless_negative(v);
}
/**
* atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
* @v: pointer to atomic_long_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_dec_unless_positive(v);
}
/**
* atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
* @v: pointer to atomic_long_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there.
*
* Return: The old value of (@v - 1), regardless of whether @v was updated.
*/
static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
return raw_atomic_long_dec_if_positive(v);
}
#define xchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_xchg(__ai_ptr, __VA_ARGS__); \
})
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#define xchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_xchg_release(__ai_ptr, __VA_ARGS__); \
})
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg128(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg128_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg128_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg128_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \
})
#define try_cmpxchg(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg_acquire(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg_release(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg_relaxed(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_acquire(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_release(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg128(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg128_acquire(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg128_release(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg128_relaxed(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define cmpxchg_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg128_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \
})
#define sync_cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define try_cmpxchg_local(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_local(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg128_local(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define sync_try_cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
// 8829b337928e9508259079d32581775ececd415b
/*
* mm/rmap.c - physical to virtual reverse mappings
*
* Copyright 2001, Rik van Riel <riel@conectiva.com.br>
* Released under the General Public License (GPL).
*
* Simple, low overhead reverse mapping scheme.
* Please try to keep this thing as modular as possible.
*
* Provides methods for unmapping each kind of mapped page:
* the anon methods track anonymous pages, and
* the file methods track pages belonging to an inode.
*
* Original design by Rik van Riel <riel@conectiva.com.br> 2001
* File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
* Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
* Contributions by Hugh Dickins 2003, 2004
*/
/*
* Lock ordering in mm:
*
* inode->i_rwsem (while writing or truncating, not reading or faulting)
* mm->mmap_lock
* mapping->invalidate_lock (in filemap_fault)
* folio_lock
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
* vma_start_write
* mapping->i_mmap_rwsem
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in block_dirty_folio)
* i_pages lock (widely used)
* lruvec->lru_lock (in folio_lruvec_lock_irq)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c)
* i_pages lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock,
* within bdi.wb->list_lock in __sync_single_inode)
*
* anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
* ->tasklist_lock
* pte map lock
*
* hugetlbfs PageHuge() take locks in this order:
* hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
* vma_lock (hugetlb specific lock for pmd_sharing)
* mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
* folio_lock
*/
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/backing-dev.h>
#include <linux/page_idle.h>
#include <linux/memremap.h>
#include <linux/userfaultfd_k.h>
#include <linux/mm_inline.h>
#include <linux/oom.h>
#include <asm/tlbflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/migrate.h>
#include "internal.h"
static struct kmem_cache *anon_vma_cachep;
static struct kmem_cache *anon_vma_chain_cachep;
static inline struct anon_vma *anon_vma_alloc(void)
{
struct anon_vma *anon_vma;
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) { atomic_set(&anon_vma->refcount, 1);
anon_vma->num_children = 0;
anon_vma->num_active_vmas = 0;
anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
* from fork, the root will be reset to the parents anon_vma.
*/
anon_vma->root = anon_vma;
}
return anon_vma;
}
static inline void anon_vma_free(struct anon_vma *anon_vma)
{
VM_BUG_ON(atomic_read(&anon_vma->refcount));
/*
* Synchronize against folio_lock_anon_vma_read() such that
* we can safely hold the lock without the anon_vma getting
* freed.
*
* Relies on the full mb implied by the atomic_dec_and_test() from
* put_anon_vma() against the acquire barrier implied by
* down_read_trylock() from folio_lock_anon_vma_read(). This orders:
*
* folio_lock_anon_vma_read() VS put_anon_vma()
* down_read_trylock() atomic_dec_and_test()
* LOCK MB
* atomic_read() rwsem_is_locked()
*
* LOCK should suffice since the actual taking of the lock must
* happen _before_ what follows.
*/
might_sleep();
if (rwsem_is_locked(&anon_vma->root->rwsem)) {
anon_vma_lock_write(anon_vma);
anon_vma_unlock_write(anon_vma);
}
kmem_cache_free(anon_vma_cachep, anon_vma);
}
static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
{
return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
}
static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
{
kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
}
static void anon_vma_chain_link(struct vm_area_struct *vma,
struct anon_vma_chain *avc,
struct anon_vma *anon_vma)
{
avc->vma = vma;
avc->anon_vma = anon_vma;
list_add(&avc->same_vma, &vma->anon_vma_chain);
anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
}
/**
* __anon_vma_prepare - attach an anon_vma to a memory region
* @vma: the memory region in question
*
* This makes sure the memory mapping described by 'vma' has
* an 'anon_vma' attached to it, so that we can associate the
* anonymous pages mapped into it with that anon_vma.
*
* The common case will be that we already have one, which
* is handled inline by anon_vma_prepare(). But if
* not we either need to find an adjacent mapping that we
* can re-use the anon_vma from (very common when the only
* reason for splitting a vma has been mprotect()), or we
* allocate a new one.
*
* Anon-vma allocations are very subtle, because we may have
* optimistically looked up an anon_vma in folio_lock_anon_vma_read()
* and that may actually touch the rwsem even in the newly
* allocated vma (it depends on RCU to make sure that the
* anon_vma isn't actually destroyed).
*
* As a result, we need to do proper anon_vma locking even
* for the new allocation. At the same time, we do not want
* to do any locking for the common case of already having
* an anon_vma.
*/
int __anon_vma_prepare(struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
struct anon_vma *anon_vma, *allocated;
struct anon_vma_chain *avc;
mmap_assert_locked(mm);
might_sleep();
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_enomem;
anon_vma = find_mergeable_anon_vma(vma);
allocated = NULL;
if (!anon_vma) {
anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma))
goto out_enomem_free_avc;
anon_vma->num_children++; /* self-parent link for new root */
allocated = anon_vma;
}
anon_vma_lock_write(anon_vma);
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
anon_vma->num_active_vmas++;
allocated = NULL;
avc = NULL;
}
spin_unlock(&mm->page_table_lock);
anon_vma_unlock_write(anon_vma);
if (unlikely(allocated))
put_anon_vma(allocated);
if (unlikely(avc))
anon_vma_chain_free(avc);
return 0;
out_enomem_free_avc:
anon_vma_chain_free(avc);
out_enomem:
return -ENOMEM;
}
/*
* This is a useful helper function for locking the anon_vma root as
* we traverse the vma->anon_vma_chain, looping over anon_vma's that
* have the same vma.
*
* Such anon_vma's should have the same root, so you'd expect to see
* just a single mutex_lock for the whole traversal.
*/
static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
{
struct anon_vma *new_root = anon_vma->root;
if (new_root != root) { if (WARN_ON_ONCE(root))
up_write(&root->rwsem);
root = new_root;
down_write(&root->rwsem);
}
return root;
}
static inline void unlock_anon_vma_root(struct anon_vma *root)
{
if (root) up_write(&root->rwsem);
}
/*
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
*
* anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(),
* copy_vma() and anon_vma_fork(). The first four want an exact copy of src,
* while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to
* prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
* call, we can identify this case by checking (!dst->anon_vma &&
* src->anon_vma).
*
* If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
* and reuse existing anon_vma which has no vmas and only one child anon_vma.
* This prevents degradation of anon_vma hierarchy to endless linear chain in
* case of constantly forking task. On the other hand, an anon_vma with more
* than one child isn't reused even if there was no alive vma, thus rmap
* walker has a good chance of avoiding scanning the whole hierarchy when it
* searches where page is mapped.
*/
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma;
avc = anon_vma_chain_alloc(GFP_NOWAIT);
if (unlikely(!avc)) {
unlock_anon_vma_root(root);
root = NULL;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto enomem_failure;
}
anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma);
anon_vma_chain_link(dst, avc, anon_vma);
/*
* Reuse existing anon_vma if it has no vma and only one
* anon_vma child.
*
* Root anon_vma is never reused:
* it has self-parent reference and at least one child.
*/
if (!dst->anon_vma && src->anon_vma && anon_vma->num_children < 2 &&
anon_vma->num_active_vmas == 0)
dst->anon_vma = anon_vma;
}
if (dst->anon_vma)
dst->anon_vma->num_active_vmas++;
unlock_anon_vma_root(root); return 0;
enomem_failure:
/*
* dst->anon_vma is dropped here otherwise its num_active_vmas can
* be incorrectly decremented in unlink_anon_vmas().
* We can safely do this because callers of anon_vma_clone() don't care
* about dst->anon_vma if anon_vma_clone() failed.
*/
dst->anon_vma = NULL;
unlink_anon_vmas(dst);
return -ENOMEM;
}
/*
* Attach vma to its own anon_vma, as well as to the anon_vmas that
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
int error;
/* Don't bother if the parent process has no anon_vma here. */
if (!pvma->anon_vma) return 0;
/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
vma->anon_vma = NULL;
/*
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
*/
error = anon_vma_clone(vma, pvma);
if (error)
return error;
/* An existing anon_vma has been reused, all done then. */
if (vma->anon_vma)
return 0;
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
if (!anon_vma)
goto out_error;
anon_vma->num_active_vmas++;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_error_free_anon_vma;
/*
* The root anon_vma's rwsem is the lock actually used when we
* lock any of the anon_vmas in this anon_vma tree.
*/
anon_vma->root = pvma->anon_vma->root;
anon_vma->parent = pvma->anon_vma;
/*
* With refcounts, an anon_vma can stay around longer than the
* process it belongs to. The root anon_vma needs to be pinned until
* this anon_vma is freed, because the lock lives in the root.
*/
get_anon_vma(anon_vma->root);
/* Mark this anon_vma as the one where our new (COWed) pages go. */
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
anon_vma->parent->num_children++;
anon_vma_unlock_write(anon_vma);
return 0;
out_error_free_anon_vma:
put_anon_vma(anon_vma);
out_error:
unlink_anon_vmas(vma);
return -ENOMEM;
}
void unlink_anon_vmas(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc, *next;
struct anon_vma *root = NULL;
/*
* Unlink each anon_vma chained to the VMA. This list is ordered
* from newest to oldest, ensuring the root anon_vma gets freed last.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
/*
* Leave empty anon_vmas on the list - we'll need
* to free them outside the lock.
*/
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
anon_vma->parent->num_children--;
continue;
}
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
if (vma->anon_vma) {
vma->anon_vma->num_active_vmas--;
/*
* vma would still be needed after unlink, and anon_vma will be prepared
* when handle fault.
*/
vma->anon_vma = NULL;
}
unlock_anon_vma_root(root);
/*
* Iterate the list once more, it now only contains empty and unlinked
* anon_vmas, destroy them. Could not do before due to __put_anon_vma()
* needing to write-acquire the anon_vma->root->rwsem.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
VM_WARN_ON(anon_vma->num_children);
VM_WARN_ON(anon_vma->num_active_vmas);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
}
static void anon_vma_ctor(void *data)
{
struct anon_vma *anon_vma = data;
init_rwsem(&anon_vma->rwsem);
atomic_set(&anon_vma->refcount, 0);
anon_vma->rb_root = RB_ROOT_CACHED;
}
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
anon_vma_ctor);
anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
SLAB_PANIC|SLAB_ACCOUNT);
}
/*
* Getting a lock on a stable anon_vma from a page off the LRU is tricky!
*
* Since there is no serialization what so ever against folio_remove_rmap_*()
* the best this function can do is return a refcount increased anon_vma
* that might have been relevant to this page.
*
* The page might have been remapped to a different anon_vma or the anon_vma
* returned may already be freed (and even reused).
*
* In case it was remapped to a different anon_vma, the new anon_vma will be a
* child of the old anon_vma, and the anon_vma lifetime rules will therefore
* ensure that any anon_vma obtained from the page will still be valid for as
* long as we observe page_mapped() [ hence all those page_mapped() tests ].
*
* All users of this function must be very careful when walking the anon_vma
* chain and verify that the page in question is indeed mapped in it
* [ something equivalent to page_mapped_in_vma() ].
*
* Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
* folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
* if there is a mapcount, we can dereference the anon_vma after observing
* those.
*
* NOTE: the caller should normally hold folio lock when calling this. If
* not, the caller needs to double check the anon_vma didn't change after
* taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it
* concurrently without folio lock protection). See folio_lock_anon_vma_read()
* which has already covered that, and comment above remap_pages().
*/
struct anon_vma *folio_get_anon_vma(const struct folio *folio)
{
struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping;
rcu_read_lock();
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
goto out;
if (!folio_mapped(folio))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
/*
* If this folio is still mapped, then its anon_vma cannot have been
* freed. But if it has been unmapped, we have no security against the
* anon_vma structure being freed and reused (for another anon_vma:
* SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
* above cannot corrupt).
*/
if (!folio_mapped(folio)) {
rcu_read_unlock();
put_anon_vma(anon_vma);
return NULL;
}
out:
rcu_read_unlock();
return anon_vma;
}
/*
* Similar to folio_get_anon_vma() except it locks the anon_vma.
*
* Its a little more complex as it tries to keep the fast path to a single
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
* reference like with folio_get_anon_vma() and then block on the mutex
* on !rwc->try_lock case.
*/
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma = NULL;
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
retry:
rcu_read_lock();
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
goto out;
if (!folio_mapped(folio))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
root_anon_vma = READ_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
* folio_move_anon_rmap() might have changed the anon_vma as we
* might not hold the folio lock here.
*/
if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
anon_mapping)) {
up_read(&root_anon_vma->rwsem);
rcu_read_unlock();
goto retry;
}
/*
* If the folio is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
*/
if (!folio_mapped(folio)) {
up_read(&root_anon_vma->rwsem);
anon_vma = NULL;
}
goto out;
}
if (rwc && rwc->try_lock) {
anon_vma = NULL;
rwc->contended = true;
goto out;
}
/* trylock failed, we got to sleep */
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
if (!folio_mapped(folio)) {
rcu_read_unlock();
put_anon_vma(anon_vma);
return NULL;
}
/* we pinned the anon_vma, its safe to sleep */
rcu_read_unlock();
anon_vma_lock_read(anon_vma);
/*
* folio_move_anon_rmap() might have changed the anon_vma as we might
* not hold the folio lock here.
*/
if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
anon_mapping)) {
anon_vma_unlock_read(anon_vma);
put_anon_vma(anon_vma);
anon_vma = NULL;
goto retry;
}
if (atomic_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
* and bail -- can't simply use put_anon_vma() because
* we'll deadlock on the anon_vma_lock_write() recursion.
*/
anon_vma_unlock_read(anon_vma);
__put_anon_vma(anon_vma);
anon_vma = NULL;
}
return anon_vma;
out:
rcu_read_unlock();
return anon_vma;
}
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
* before any IO is initiated on the page to prevent lost writes. Similarly,
* it must be flushed before freeing to prevent data leakage.
*/
void try_to_unmap_flush(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
if (!tlb_ubc->flush_required)
return;
arch_tlbbatch_flush(&tlb_ubc->arch);
tlb_ubc->flush_required = false;
tlb_ubc->writable = false;
}
/* Flush iff there are potentially writable TLB entries that can race with IO */
void try_to_unmap_flush_dirty(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
if (tlb_ubc->writable)
try_to_unmap_flush();
}
/*
* Bits 0-14 of mm->tlb_flush_batched record pending generations.
* Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
*/
#define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16
#define TLB_FLUSH_BATCH_PENDING_MASK \
((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
#define TLB_FLUSH_BATCH_PENDING_LARGE \
(TLB_FLUSH_BATCH_PENDING_MASK / 2)
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
unsigned long start, unsigned long end)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
int batch;
bool writable = pte_dirty(pteval);
if (!pte_accessible(mm, pteval))
return;
arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end);
tlb_ubc->flush_required = true;
/*
* Ensure compiler does not re-order the setting of tlb_flush_batched
* before the PTE is cleared.
*/
barrier();
batch = atomic_read(&mm->tlb_flush_batched);
retry:
if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
/*
* Prevent `pending' from catching up with `flushed' because of
* overflow. Reset `pending' and `flushed' to be 1 and 0 if
* `pending' becomes large.
*/
if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
goto retry;
} else {
atomic_inc(&mm->tlb_flush_batched);
}
/*
* If the PTE was dirty then it's best to assume it's writable. The
* caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
* before the page is queued for IO.
*/
if (writable)
tlb_ubc->writable = true;
}
/*
* Returns true if the TLB flush should be deferred to the end of a batch of
* unmap operations to reduce IPIs.
*/
static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
{
if (!(flags & TTU_BATCH_FLUSH))
return false;
return arch_tlbbatch_should_defer(mm);
}
/*
* Reclaim unmaps pages under the PTL but do not flush the TLB prior to
* releasing the PTL if TLB flushes are batched. It's possible for a parallel
* operation such as mprotect or munmap to race between reclaim unmapping
* the page and flushing the page. If this race occurs, it potentially allows
* access to data via a stale TLB entry. Tracking all mm's that have TLB
* batching in flight would be expensive during reclaim so instead track
* whether TLB batching occurred in the past and if so then do a flush here
* if required. This will cost one additional flush per reclaim cycle paid
* by the first operation at risk such as mprotect and mumap.
*
* This must be called under the PTL so that an access to tlb_flush_batched
* that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
* via the PTL.
*/
void flush_tlb_batched_pending(struct mm_struct *mm)
{
int batch = atomic_read(&mm->tlb_flush_batched);
int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
if (pending != flushed) {
flush_tlb_mm(mm);
/*
* If the new TLB flushing is pending during flushing, leave
* mm->tlb_flush_batched as is, to avoid losing flushing.
*/
atomic_cmpxchg(&mm->tlb_flush_batched, batch,
pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
}
}
#else
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
unsigned long start, unsigned long end)
{
}
static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
{
return false;
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
/**
* page_address_in_vma - The virtual address of a page in this VMA.
* @folio: The folio containing the page.
* @page: The page within the folio.
* @vma: The VMA we need to know the address in.
*
* Calculates the user virtual address of this page in the specified VMA.
* It is the caller's responsibility to check the page is actually
* within the VMA. There may not currently be a PTE pointing at this
* page, but if a page fault occurs at this address, this is the page
* which will be accessed.
*
* Context: Caller should hold a reference to the folio. Caller should
* hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
* VMA from being altered.
*
* Return: The virtual address corresponding to this page in the VMA.
*/
unsigned long page_address_in_vma(const struct folio *folio,
const struct page *page, const struct vm_area_struct *vma)
{
if (folio_test_anon(folio)) {
struct anon_vma *anon_vma = folio_anon_vma(folio);
/*
* Note: swapoff's unuse_vma() is more efficient with this
* check, and needs it to match anon_vma when KSM is active.
*/
if (!vma->anon_vma || !anon_vma ||
vma->anon_vma->root != anon_vma->root)
return -EFAULT;
} else if (!vma->vm_file) {
return -EFAULT;
} else if (vma->vm_file->f_mapping != folio->mapping) {
return -EFAULT;
}
/* KSM folios don't reach here because of the !anon_vma check */
return vma_address(vma, page_pgoff(folio, page), 1);
}
/*
* Returns the actual pmd_t* where we expect 'address' to be mapped from, or
* NULL if it doesn't exist. No guarantees / checks on what the pmd_t*
* represents.
*/
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
p4d = p4d_offset(pgd, address);
if (!p4d_present(*p4d))
goto out;
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
out:
return pmd;
}
struct folio_referenced_arg {
int mapcount;
int referenced;
vm_flags_t vm_flags;
struct mem_cgroup *memcg;
};
/*
* arg: folio_referenced_arg will be passed
*/
static bool folio_referenced_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *arg)
{
struct folio_referenced_arg *pra = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
int ptes = 0, referenced = 0;
while (page_vma_mapped_walk(&pvmw)) {
address = pvmw.address;
if (vma->vm_flags & VM_LOCKED) {
ptes++;
pra->mapcount--;
/* Only mlock fully mapped pages */
if (pvmw.pte && ptes != pvmw.nr_pages)
continue;
/*
* All PTEs must be protected by page table lock in
* order to mlock the page.
*
* If page table boundary has been cross, current ptl
* only protect part of ptes.
*/
if (pvmw.flags & PVMW_PGTABLE_CROSSED)
continue;
/* Restore the mlock which got missed */
mlock_vma_folio(folio, vma);
page_vma_mapped_walk_done(&pvmw);
pra->vm_flags |= VM_LOCKED;
return false; /* To break the loop */
}
/*
* Skip the non-shared swapbacked folio mapped solely by
* the exiting or OOM-reaped process. This avoids redundant
* swap-out followed by an immediate unmap.
*/
if ((!atomic_read(&vma->vm_mm->mm_users) ||
check_stable_address_space(vma->vm_mm)) &&
folio_test_anon(folio) && folio_test_swapbacked(folio) &&
!folio_maybe_mapped_shared(folio)) {
pra->referenced = -1;
page_vma_mapped_walk_done(&pvmw);
return false;
}
if (lru_gen_enabled() && pvmw.pte) {
if (lru_gen_look_around(&pvmw))
referenced++;
} else if (pvmw.pte) {
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte))
referenced++;
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pmdp_clear_flush_young_notify(vma, address,
pvmw.pmd))
referenced++;
} else {
/* unexpected pmd-mapped folio? */
WARN_ON_ONCE(1);
}
pra->mapcount--;
}
if (referenced)
folio_clear_idle(folio);
if (folio_test_clear_young(folio))
referenced++;
if (referenced) {
pra->referenced++;
pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
}
if (!pra->mapcount)
return false; /* To break the loop */
return true;
}
static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
{
struct folio_referenced_arg *pra = arg;
struct mem_cgroup *memcg = pra->memcg;
/*
* Ignore references from this mapping if it has no recency. If the
* folio has been used in another mapping, we will catch it; if this
* other mapping is already gone, the unmap path will have set the
* referenced flag or activated the folio in zap_pte_range().
*/
if (!vma_has_recency(vma))
return true;
/*
* If we are reclaiming on behalf of a cgroup, skip counting on behalf
* of references from different cgroups.
*/
if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
return true;
return false;
}
/**
* folio_referenced() - Test if the folio was referenced.
* @folio: The folio to test.
* @is_locked: Caller holds lock on the folio.
* @memcg: target memory cgroup
* @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
*
* Quick test_and_clear_referenced for all mappings of a folio,
*
* Return: The number of mappings which referenced the folio. Return -1 if
* the function bailed out due to rmap lock contention.
*/
int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg, vm_flags_t *vm_flags)
{
bool we_locked = false;
struct folio_referenced_arg pra = {
.mapcount = folio_mapcount(folio),
.memcg = memcg,
};
struct rmap_walk_control rwc = {
.rmap_one = folio_referenced_one,
.arg = (void *)&pra,
.anon_lock = folio_lock_anon_vma_read,
.try_lock = true,
.invalid_vma = invalid_folio_referenced_vma,
};
*vm_flags = 0;
if (!pra.mapcount)
return 0;
if (!folio_raw_mapping(folio))
return 0;
if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
we_locked = folio_trylock(folio);
if (!we_locked)
return 1;
}
rmap_walk(folio, &rwc);
*vm_flags = pra.vm_flags;
if (we_locked)
folio_unlock(folio);
return rwc.contended ? -1 : pra.referenced;
}
static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
{
int cleaned = 0;
struct vm_area_struct *vma = pvmw->vma;
struct mmu_notifier_range range;
unsigned long address = pvmw->address;
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the folio can not be freed from this function.
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
vma->vm_mm, address, vma_address_end(pvmw));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(pvmw)) {
int ret = 0;
address = pvmw->address;
if (pvmw->pte) {
pte_t *pte = pvmw->pte;
pte_t entry = ptep_get(pte);
/*
* PFN swap PTEs, such as device-exclusive ones, that
* actually map pages are clean and not writable from a
* CPU perspective. The MMU notifier takes care of any
* device aspects.
*/
if (!pte_present(entry))
continue;
if (!pte_dirty(entry) && !pte_write(entry))
continue;
flush_cache_page(vma, address, pte_pfn(entry));
entry = ptep_clear_flush(vma, address, pte);
entry = pte_wrprotect(entry);
entry = pte_mkclean(entry);
set_pte_at(vma->vm_mm, address, pte, entry);
ret = 1;
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t *pmd = pvmw->pmd;
pmd_t entry;
if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
continue;
flush_cache_range(vma, address,
address + HPAGE_PMD_SIZE);
entry = pmdp_invalidate(vma, address, pmd);
entry = pmd_wrprotect(entry);
entry = pmd_mkclean(entry);
set_pmd_at(vma->vm_mm, address, pmd, entry);
ret = 1;
#else
/* unexpected pmd-mapped folio? */
WARN_ON_ONCE(1);
#endif
}
if (ret)
cleaned++;
}
mmu_notifier_invalidate_range_end(&range);
return cleaned;
}
static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
int *cleaned = arg;
*cleaned += page_vma_mkclean_one(&pvmw);
return true;
}
static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
{
if (vma->vm_flags & VM_SHARED)
return false;
return true;
}
int folio_mkclean(struct folio *folio)
{
int cleaned = 0;
struct address_space *mapping;
struct rmap_walk_control rwc = {
.arg = (void *)&cleaned,
.rmap_one = page_mkclean_one,
.invalid_vma = invalid_mkclean_vma,
};
BUG_ON(!folio_test_locked(folio));
if (!folio_mapped(folio))
return 0;
mapping = folio_mapping(folio);
if (!mapping)
return 0;
rmap_walk(folio, &rwc);
return cleaned;
}
EXPORT_SYMBOL_GPL(folio_mkclean);
struct wrprotect_file_state {
int cleaned;
pgoff_t pgoff;
unsigned long pfn;
unsigned long nr_pages;
};
static bool mapping_wrprotect_range_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *arg)
{
struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg;
struct page_vma_mapped_walk pvmw = {
.pfn = state->pfn,
.nr_pages = state->nr_pages,
.pgoff = state->pgoff,
.vma = vma,
.address = address,
.flags = PVMW_SYNC,
};
state->cleaned += page_vma_mkclean_one(&pvmw);
return true;
}
static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
pgoff_t pgoff_start, unsigned long nr_pages,
struct rmap_walk_control *rwc, bool locked);
/**
* mapping_wrprotect_range() - Write-protect all mappings in a specified range.
*
* @mapping: The mapping whose reverse mapping should be traversed.
* @pgoff: The page offset at which @pfn is mapped within @mapping.
* @pfn: The PFN of the page mapped in @mapping at @pgoff.
* @nr_pages: The number of physically contiguous base pages spanned.
*
* Traverses the reverse mapping, finding all VMAs which contain a shared
* mapping of the pages in the specified range in @mapping, and write-protects
* them (that is, updates the page tables to mark the mappings read-only such
* that a write protection fault arises when the mappings are written to).
*
* The @pfn value need not refer to a folio, but rather can reference a kernel
* allocation which is mapped into userland. We therefore do not require that
* the page maps to a folio with a valid mapping or index field, rather the
* caller specifies these in @mapping and @pgoff.
*
* Return: the number of write-protected PTEs, or an error.
*/
int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
unsigned long pfn, unsigned long nr_pages)
{
struct wrprotect_file_state state = {
.cleaned = 0,
.pgoff = pgoff,
.pfn = pfn,
.nr_pages = nr_pages,
};
struct rmap_walk_control rwc = {
.arg = (void *)&state,
.rmap_one = mapping_wrprotect_range_one,
.invalid_vma = invalid_mkclean_vma,
};
if (!mapping)
return 0;
__rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
/* locked = */false);
return state.cleaned;
}
EXPORT_SYMBOL_GPL(mapping_wrprotect_range);
/**
* pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
* [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
* within the @vma of shared mappings. And since clean PTEs
* should also be readonly, write protects them too.
* @pfn: start pfn.
* @nr_pages: number of physically contiguous pages srarting with @pfn.
* @pgoff: page offset that the @pfn mapped with.
* @vma: vma that @pfn mapped within.
*
* Returns the number of cleaned PTEs (including PMDs).
*/
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma)
{
struct page_vma_mapped_walk pvmw = {
.pfn = pfn,
.nr_pages = nr_pages,
.pgoff = pgoff,
.vma = vma,
.flags = PVMW_SYNC,
};
if (invalid_mkclean_vma(vma, NULL))
return 0;
pvmw.address = vma_address(vma, pgoff, nr_pages);
VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
return page_vma_mkclean_one(&pvmw);
}
static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
{
int idx;
if (nr) {
idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
__lruvec_stat_mod_folio(folio, idx, nr);
}
if (nr_pmdmapped) {
if (folio_test_anon(folio)) {
idx = NR_ANON_THPS;
__lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
} else {
/* NR_*_PMDMAPPED are not maintained per-memcg */
idx = folio_test_swapbacked(folio) ?
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
__mod_node_page_state(folio_pgdat(folio), idx,
nr_pmdmapped);
}
}
}
static __always_inline void __folio_add_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma,
enum pgtable_level level)
{
atomic_t *mapped = &folio->_nr_pages_mapped;
const int orig_nr_pages = nr_pages;
int first = 0, nr = 0, nr_pmdmapped = 0;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
case PGTABLE_LEVEL_PTE:
if (!folio_test_large(folio)) {
nr = atomic_inc_and_test(&folio->_mapcount);
break;
}
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
if (nr == orig_nr_pages)
/* Was completely unmapped. */
nr = folio_large_nr_pages(folio);
else
nr = 0;
break;
}
do {
first += atomic_inc_and_test(&page->_mapcount);
} while (page++, --nr_pages > 0);
if (first &&
atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
nr = first;
folio_add_large_mapcount(folio, orig_nr_pages, vma);
break;
case PGTABLE_LEVEL_PMD:
case PGTABLE_LEVEL_PUD:
first = atomic_inc_and_test(&folio->_entire_mapcount);
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
if (level == PGTABLE_LEVEL_PMD && first)
nr_pmdmapped = folio_large_nr_pages(folio);
nr = folio_inc_return_large_mapcount(folio, vma);
if (nr == 1)
/* Was completely unmapped. */
nr = folio_large_nr_pages(folio);
else
nr = 0;
break;
}
if (first) {
nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
nr_pages = folio_large_nr_pages(folio);
/*
* We only track PMD mappings of PMD-sized
* folios separately.
*/
if (level == PGTABLE_LEVEL_PMD)
nr_pmdmapped = nr_pages;
nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of a remove and another add? */
if (unlikely(nr < 0))
nr = 0;
} else {
/* Raced ahead of a remove of ENTIRELY_MAPPED */
nr = 0;
}
}
folio_inc_large_mapcount(folio, vma);
break;
default:
BUILD_BUG();
}
__folio_mod_stat(folio, nr, nr_pmdmapped);
}
/**
* folio_move_anon_rmap - move a folio to our anon_vma
* @folio: The folio to move to our anon_vma
* @vma: The vma the folio belongs to
*
* When a folio belongs exclusively to one process after a COW event,
* that folio can be moved into the anon_vma that belongs to just that
* process, so the rmap code will not search the parent or sibling processes.
*/
void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
{
void *anon_vma = vma->anon_vma;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_VMA(!anon_vma, vma);
anon_vma += FOLIO_MAPPING_ANON;
/*
* Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written
* simultaneously, so a concurrent reader (eg folio_referenced()'s
* folio_test_anon()) will not see one without the other.
*/
WRITE_ONCE(folio->mapping, anon_vma);
}
/**
* __folio_set_anon - set up a new anonymous rmap for a folio
* @folio: The folio to set up the new anonymous rmap for.
* @vma: VM area to add the folio to.
* @address: User virtual address of the mapping
* @exclusive: Whether the folio is exclusive to the process.
*/
static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, bool exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
/*
* If the folio isn't exclusive to this vma, we must use the _oldest_
* possible anon_vma for the folio mapping!
*/
if (!exclusive)
anon_vma = anon_vma->root;
/*
* page_idle does a lockless/optimistic rmap scan on folio->mapping.
* Make sure the compiler doesn't split the stores of anon_vma and
* the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code
* could mistake the mapping for a struct address_space and crash.
*/
anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON;
WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
folio->index = linear_page_index(vma, address);
}
/**
* __page_check_anon_rmap - sanity check anonymous rmap addition
* @folio: The folio containing @page.
* @page: the page to check the mapping of
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*/
static void __page_check_anon_rmap(const struct folio *folio,
const struct page *page, struct vm_area_struct *vma,
unsigned long address)
{
/*
* The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point.
*
* We have exclusion against folio_add_anon_rmap_*() because the caller
* always holds the page locked.
*
* We have exclusion against folio_add_new_anon_rmap because those pages
* are initially only visible via the pagetables, and the pte is locked
* over the call to folio_add_new_anon_rmap.
*/
VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
folio);
VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address),
page);
}
static __always_inline void __folio_add_anon_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma,
unsigned long address, rmap_t flags, enum pgtable_level level)
{
int i;
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
__folio_add_rmap(folio, page, nr_pages, vma, level);
if (likely(!folio_test_ksm(folio)))
__page_check_anon_rmap(folio, page, vma, address);
if (flags & RMAP_EXCLUSIVE) {
switch (level) {
case PGTABLE_LEVEL_PTE:
for (i = 0; i < nr_pages; i++)
SetPageAnonExclusive(page + i);
break;
case PGTABLE_LEVEL_PMD:
SetPageAnonExclusive(page);
break;
case PGTABLE_LEVEL_PUD:
/*
* Keep the compiler happy, we don't support anonymous
* PUD mappings.
*/
WARN_ON_ONCE(1);
break;
default:
BUILD_BUG();
}
}
VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) &&
atomic_read(&folio->_mapcount) > 0, folio);
for (i = 0; i < nr_pages; i++) {
struct page *cur_page = page + i;
VM_WARN_ON_FOLIO(folio_test_large(folio) &&
folio_entire_mapcount(folio) > 1 &&
PageAnonExclusive(cur_page), folio);
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
continue;
/*
* While PTE-mapping a THP we have a PMD and a PTE
* mapping.
*/
VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 &&
PageAnonExclusive(cur_page), folio);
}
/*
* Only mlock it if the folio is fully mapped to the VMA.
*
* Partially mapped folios can be split on reclaim and part outside
* of mlocked VMA can be evicted or freed.
*/
if (folio_nr_pages(folio) == nr_pages)
mlock_vma_folio(folio, vma);
}
/**
* folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
* @folio: The folio to add the mappings to
* @page: The first page to add
* @nr_pages: The number of pages which will be mapped
* @vma: The vm area in which the mappings are added
* @address: The user virtual address of the first page to map
* @flags: The rmap flags
*
* The page range of folio is defined by [first_page, first_page + nr_pages)
*
* The caller needs to hold the page table lock, and the page must be locked in
* the anon_vma case: to serialize mapping,index checking after setting,
* and to ensure that an anon folio is not being upgraded racily to a KSM folio
* (but KSM folios are never downgraded).
*/
void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
int nr_pages, struct vm_area_struct *vma, unsigned long address,
rmap_t flags)
{
__folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
PGTABLE_LEVEL_PTE);
}
/**
* folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
* @folio: The folio to add the mapping to
* @page: The first page to add
* @vma: The vm area in which the mapping is added
* @address: The user virtual address of the first page to map
* @flags: The rmap flags
*
* The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock, and the page must be locked in
* the anon_vma case: to serialize mapping,index checking after setting.
*/
void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
struct vm_area_struct *vma, unsigned long address, rmap_t flags)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
__folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
#endif
}
/**
* folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
* @folio: The folio to add the mapping to.
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
* @flags: The rmap flags
*
* Like folio_add_anon_rmap_*() but must only be called on *new* folios.
* This means the inc-and-test can be bypassed.
* The folio doesn't necessarily need to be locked while it's exclusive
* unless two threads map it concurrently. However, the folio must be
* locked if it's shared.
*
* If the folio is pmd-mappable, it is accounted as a THP.
*/
void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
const bool exclusive = flags & RMAP_EXCLUSIVE;
int nr = 1, nr_pmdmapped = 0;
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
/*
* VM_DROPPABLE mappings don't swap; instead they're just dropped when
* under memory pressure.
*/
if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) __folio_set_swapbacked(folio); __folio_set_anon(folio, vma, address, exclusive); if (likely(!folio_test_large(folio))) {
/* increment count (starts at -1) */
atomic_set(&folio->_mapcount, 0);
if (exclusive)
SetPageAnonExclusive(&folio->page);
} else if (!folio_test_pmd_mappable(folio)) {
int i;
nr = folio_large_nr_pages(folio); for (i = 0; i < nr; i++) { struct page *page = folio_page(folio, i);
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
/* increment count (starts at -1) */
atomic_set(&page->_mapcount, 0);
if (exclusive) SetPageAnonExclusive(page);
}
folio_set_large_mapcount(folio, nr, vma);
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
atomic_set(&folio->_nr_pages_mapped, nr);
} else {
nr = folio_large_nr_pages(folio);
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
folio_set_large_mapcount(folio, 1, vma);
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
if (exclusive)
SetPageAnonExclusive(&folio->page);
nr_pmdmapped = nr;
}
VM_WARN_ON_ONCE(address < vma->vm_start ||
address + (nr << PAGE_SHIFT) > vma->vm_end);
__folio_mod_stat(folio, nr, nr_pmdmapped); mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);}
static __always_inline void __folio_add_file_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma,
enum pgtable_level level)
{
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
__folio_add_rmap(folio, page, nr_pages, vma, level);
/*
* Only mlock it if the folio is fully mapped to the VMA.
*
* Partially mapped folios can be split on reclaim and part outside
* of mlocked VMA can be evicted or freed.
*/
if (folio_nr_pages(folio) == nr_pages)
mlock_vma_folio(folio, vma);
}
/**
* folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
* @folio: The folio to add the mappings to
* @page: The first page to add
* @nr_pages: The number of pages that will be mapped using PTEs
* @vma: The vm area in which the mappings are added
*
* The page range of the folio is defined by [page, page + nr_pages)
*
* The caller needs to hold the page table lock.
*/
void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
int nr_pages, struct vm_area_struct *vma)
{
__folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
}
/**
* folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
* @folio: The folio to add the mapping to
* @page: The first page to add
* @vma: The vm area in which the mapping is added
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock.
*/
void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
struct vm_area_struct *vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
__folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
#endif
}
/**
* folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio
* @folio: The folio to add the mapping to
* @page: The first page to add
* @vma: The vm area in which the mapping is added
*
* The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
*
* The caller needs to hold the page table lock.
*/
void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
struct vm_area_struct *vma)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
__folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
#else
WARN_ON_ONCE(true);
#endif
}
static __always_inline void __folio_remove_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma,
enum pgtable_level level)
{
atomic_t *mapped = &folio->_nr_pages_mapped;
int last = 0, nr = 0, nr_pmdmapped = 0;
bool partially_mapped = false;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
case PGTABLE_LEVEL_PTE:
if (!folio_test_large(folio)) {
nr = atomic_add_negative(-1, &folio->_mapcount);
break;
}
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
nr = folio_sub_return_large_mapcount(folio, nr_pages, vma);
if (!nr) {
/* Now completely unmapped. */
nr = folio_large_nr_pages(folio);
} else {
partially_mapped = nr < folio_large_nr_pages(folio) &&
!folio_entire_mapcount(folio);
nr = 0;
}
break;
}
folio_sub_large_mapcount(folio, nr_pages, vma);
do {
last += atomic_add_negative(-1, &page->_mapcount);
} while (page++, --nr_pages > 0);
if (last &&
atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED)
nr = last;
partially_mapped = nr && atomic_read(mapped);
break;
case PGTABLE_LEVEL_PMD:
case PGTABLE_LEVEL_PUD:
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
last = atomic_add_negative(-1, &folio->_entire_mapcount);
if (level == PGTABLE_LEVEL_PMD && last)
nr_pmdmapped = folio_large_nr_pages(folio);
nr = folio_dec_return_large_mapcount(folio, vma);
if (!nr) {
/* Now completely unmapped. */
nr = folio_large_nr_pages(folio);
} else {
partially_mapped = last &&
nr < folio_large_nr_pages(folio);
nr = 0;
}
break;
}
folio_dec_large_mapcount(folio, vma);
last = atomic_add_negative(-1, &folio->_entire_mapcount);
if (last) {
nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
if (likely(nr < ENTIRELY_MAPPED)) {
nr_pages = folio_large_nr_pages(folio);
if (level == PGTABLE_LEVEL_PMD)
nr_pmdmapped = nr_pages;
nr = nr_pages - nr;
/* Raced ahead of another remove and an add? */
if (unlikely(nr < 0))
nr = 0;
} else {
/* An add of ENTIRELY_MAPPED raced ahead */
nr = 0;
}
}
partially_mapped = nr && nr < nr_pmdmapped;
break;
default:
BUILD_BUG();
}
/*
* Queue anon large folio for deferred split if at least one page of
* the folio is unmapped and at least one page is still mapped.
*
* Check partially_mapped first to ensure it is a large folio.
*/
if (partially_mapped && folio_test_anon(folio) &&
!folio_test_partially_mapped(folio))
deferred_split_folio(folio, true);
__folio_mod_stat(folio, -nr, -nr_pmdmapped);
/*
* It would be tidy to reset folio_test_anon mapping when fully
* unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
* which increments mapcount after us but sets mapping before us:
* so leave the reset to free_pages_prepare, and remember that
* it's only reliable while mapped.
*/
munlock_vma_folio(folio, vma);
}
/**
* folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
* @folio: The folio to remove the mappings from
* @page: The first page to remove
* @nr_pages: The number of pages that will be removed from the mapping
* @vma: The vm area from which the mappings are removed
*
* The page range of the folio is defined by [page, page + nr_pages)
*
* The caller needs to hold the page table lock.
*/
void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
int nr_pages, struct vm_area_struct *vma)
{
__folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);}
/**
* folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
* @folio: The folio to remove the mapping from
* @page: The first page to remove
* @vma: The vm area from which the mapping is removed
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock.
*/
void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
struct vm_area_struct *vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
__folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
#endif
}
/**
* folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio
* @folio: The folio to remove the mapping from
* @page: The first page to remove
* @vma: The vm area from which the mapping is removed
*
* The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
*
* The caller needs to hold the page table lock.
*/
void folio_remove_rmap_pud(struct folio *folio, struct page *page,
struct vm_area_struct *vma)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
__folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
#else
WARN_ON_ONCE(true);
#endif
}
static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
struct page_vma_mapped_walk *pvmw,
enum ttu_flags flags, pte_t pte)
{
unsigned long end_addr, addr = pvmw->address;
struct vm_area_struct *vma = pvmw->vma;
unsigned int max_nr;
if (flags & TTU_HWPOISON)
return 1;
if (!folio_test_large(folio))
return 1;
/* We may only batch within a single VMA and a single page table. */
end_addr = pmd_addr_end(addr, vma->vm_end);
max_nr = (end_addr - addr) >> PAGE_SHIFT;
/* We only support lazyfree batching for now ... */
if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
return 1;
if (pte_unused(pte))
return 1;
return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
}
/*
* @arg: enum ttu_flags will be passed to this argument
*/
static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
bool anon_exclusive, ret = true;
pte_t pteval;
struct page *subpage;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
unsigned long nr_pages = 1, end_addr;
unsigned long pfn;
unsigned long hsz = 0;
int ptes = 0;
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_unmap() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
if (flags & TTU_SYNC)
pvmw.flags = PVMW_SYNC;
/*
* For THP, we have to assume the worse case ie pmd for invalidation.
* For hugetlb, it could be much worse if we need to do pud
* invalidation in the case of pmd sharing.
*
* Note that the folio can not be freed in this function as call of
* try_to_unmap() must hold a reference on the folio.
*/
range.end = vma_address_end(&pvmw);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, range.end);
if (folio_test_hugetlb(folio)) {
/*
* If sharing is possible, start and end will be adjusted
* accordingly.
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
/* We need the huge page size for set_huge_pte_at() */
hsz = huge_page_size(hstate_vma(vma));
}
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
/*
* If the folio is in an mlock()d vma, we must not swap it out.
*/
if (!(flags & TTU_IGNORE_MLOCK) &&
(vma->vm_flags & VM_LOCKED)) {
ptes++;
/*
* Set 'ret' to indicate the page cannot be unmapped.
*
* Do not jump to walk_abort immediately as additional
* iteration might be required to detect fully mapped
* folio an mlock it.
*/
ret = false;
/* Only mlock fully mapped pages */
if (pvmw.pte && ptes != pvmw.nr_pages)
continue;
/*
* All PTEs must be protected by page table lock in
* order to mlock the page.
*
* If page table boundary has been cross, current ptl
* only protect part of ptes.
*/
if (pvmw.flags & PVMW_PGTABLE_CROSSED)
goto walk_done;
/* Restore the mlock which got missed */
mlock_vma_folio(folio, vma);
goto walk_done;
}
if (!pvmw.pte) {
if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
goto walk_done;
/*
* unmap_huge_pmd_locked has either already marked
* the folio as swap-backed or decided to retain it
* due to GUP or speculative references.
*/
goto walk_abort;
}
if (flags & TTU_SPLIT_HUGE_PMD) {
/*
* We temporarily have to drop the PTL and
* restart so we can process the PTE-mapped THP.
*/
split_huge_pmd_locked(vma, pvmw.address,
pvmw.pmd, false);
flags &= ~TTU_SPLIT_HUGE_PMD;
page_vma_mapped_walk_restart(&pvmw);
continue;
}
}
/* Unexpected PMD-mapped THP? */
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
/*
* Handle PFN swap PTEs, such as device-exclusive ones, that
* actually map pages.
*/
pteval = ptep_get(pvmw.pte);
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
subpage = folio_page(folio, pfn - folio_pfn(folio));
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
PageAnonExclusive(subpage);
if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
/*
* The try_to_unmap() is only passed a hugetlb page
* in the case where the hugetlb page is poisoned.
*/
VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
/*
* huge_pmd_unshare may unmap an entire PMD page.
* There is no way of knowing exactly which PMDs may
* be cached for this mm, so we must flush them all.
* start/end were already adjusted above to cover this
* range.
*/
flush_cache_range(vma, range.start, range.end);
/*
* To call huge_pmd_unshare, i_mmap_rwsem must be
* held in write mode. Caller needs to explicitly
* do this outside rmap routines.
*
* We also must hold hugetlb vma_lock in write mode.
* Lock order dictates acquiring vma_lock BEFORE
* i_mmap_rwsem. We can only try lock here and fail
* if unsuccessful.
*/
if (!anon) {
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
if (!hugetlb_vma_trylock_write(vma))
goto walk_abort;
if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
hugetlb_vma_unlock_write(vma);
flush_tlb_range(vma,
range.start, range.end);
/*
* The ref count of the PMD page was
* dropped which is part of the way map
* counting is done for shared PMDs.
* Return 'true' here. When there is
* no other sharing, huge_pmd_unshare
* returns false and we will unmap the
* actual page and drop map count
* to zero.
*/
goto walk_done;
}
hugetlb_vma_unlock_write(vma);
}
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
if (pte_dirty(pteval))
folio_mark_dirty(folio);
} else if (likely(pte_present(pteval))) {
nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval);
end_addr = address + nr_pages * PAGE_SIZE;
flush_cache_range(vma, address, end_addr);
/* Nuke the page table entry. */
pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages);
/*
* We clear the PTE but do not flush so potentially
* a remote CPU could still be writing to the folio.
* If the entry was previously clean then the
* architecture must guarantee that a clear->dirty
* transition on a cached TLB entry is written through
* and traps if the PTE is unmapped.
*/
if (should_defer_flush(mm, flags))
set_tlb_ubc_flush_pending(mm, pteval, address, end_addr);
else
flush_tlb_range(vma, address, end_addr);
if (pte_dirty(pteval))
folio_mark_dirty(folio);
} else {
pte_clear(mm, address, pvmw.pte);
}
/*
* Now the pte is cleared. If this pte was uffd-wp armed,
* we may want to replace a none pte with a marker pte if
* it's file-backed, so we don't lose the tracking info.
*/
pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz);
} else {
dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}
} else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
!userfaultfd_armed(vma)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
* will take care of the rest.
* A future reference will then fault in a new zero
* page. When userfaultfd is active, we must not drop
* this page though, as its main user (postcopy
* migration) will not expect userfaults on already
* copied pages.
*/
dec_mm_counter(mm, mm_counter(folio));
} else if (folio_test_anon(folio)) {
swp_entry_t entry = page_swap_entry(subpage);
pte_t swp_pte;
/*
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
if (unlikely(folio_test_swapbacked(folio) !=
folio_test_swapcache(folio))) {
WARN_ON_ONCE(1);
goto walk_abort;
}
/* MADV_FREE page check */
if (!folio_test_swapbacked(folio)) {
int ref_count, map_count;
/*
* Synchronize with gup_pte_range():
* - clear PTE; barrier; read refcount
* - inc refcount; barrier; read PTE
*/
smp_mb();
ref_count = folio_ref_count(folio);
map_count = folio_mapcount(folio);
/*
* Order reads for page refcount and dirty flag
* (see comments in __remove_mapping()).
*/
smp_rmb();
if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
/*
* redirtied either using the page table or a previously
* obtained GUP reference.
*/
set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
folio_set_swapbacked(folio);
goto walk_abort;
} else if (ref_count != 1 + map_count) {
/*
* Additional reference. Could be a GUP reference or any
* speculative reference. GUP users must mark the folio
* dirty if there was a modification. This folio cannot be
* reclaimed right now either way, so act just like nothing
* happened.
* We'll come back here later and detect if the folio was
* dirtied when the additional reference is gone.
*/
set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
goto walk_abort;
}
add_mm_counter(mm, MM_ANONPAGES, -nr_pages);
goto discard;
}
if (swap_duplicate(entry) < 0) {
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
}
/*
* arch_unmap_one() is expected to be a NOP on
* architectures where we could have PFN swap PTEs,
* so we'll not check/care.
*/
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
swap_free(entry);
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
}
/* See folio_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive &&
folio_try_share_anon_rmap_pte(folio, subpage)) {
swap_free(entry);
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
}
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
dec_mm_counter(mm, MM_ANONPAGES);
inc_mm_counter(mm, MM_SWAPENTS);
swp_pte = swp_entry_to_pte(entry);
if (anon_exclusive)
swp_pte = pte_swp_mkexclusive(swp_pte);
if (likely(pte_present(pteval))) {
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
} else {
if (pte_swp_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
}
set_pte_at(mm, address, pvmw.pte, swp_pte);
} else {
/*
* This is a locked file-backed folio,
* so it cannot be removed from the page
* cache and replaced by a new folio before
* mmu_notifier_invalidate_range_end, so no
* concurrent thread might update its page table
* to point at a new folio while a device is
* still using this folio.
*
* See Documentation/mm/mmu_notifier.rst
*/
dec_mm_counter(mm, mm_counter_file(folio));
}
discard:
if (unlikely(folio_test_hugetlb(folio))) {
hugetlb_remove_rmap(folio);
} else {
folio_remove_rmap_ptes(folio, subpage, nr_pages, vma);
}
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put_refs(folio, nr_pages);
/*
* If we are sure that we batched the entire folio and cleared
* all PTEs, we can just optimize and stop right here.
*/
if (nr_pages == folio_nr_pages(folio))
goto walk_done;
continue;
walk_abort:
ret = false;
walk_done:
page_vma_mapped_walk_done(&pvmw);
break;
}
mmu_notifier_invalidate_range_end(&range);
return ret;
}
static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
{
return vma_is_temporary_stack(vma);
}
static int folio_not_mapped(struct folio *folio)
{
return !folio_mapped(folio);
}
/**
* try_to_unmap - Try to remove all page table mappings to a folio.
* @folio: The folio to unmap.
* @flags: action and flags
*
* Tries to remove all the page table entries which are mapping this
* folio. It is the caller's responsibility to check if the folio is
* still mapped if needed (use TTU_SYNC to prevent accounting races).
*
* Context: Caller must hold the folio lock.
*/
void try_to_unmap(struct folio *folio, enum ttu_flags flags)
{
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = folio_not_mapped,
.anon_lock = folio_lock_anon_vma_read,
};
if (flags & TTU_RMAP_LOCKED)
rmap_walk_locked(folio, &rwc);
else
rmap_walk(folio, &rwc);
}
/*
* @arg: enum ttu_flags will be passed to this argument.
*
* If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
* containing migration entries.
*/
static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
bool anon_exclusive, writable, ret = true;
pte_t pteval;
struct page *subpage;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
unsigned long pfn;
unsigned long hsz = 0;
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_migrate() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
if (flags & TTU_SYNC)
pvmw.flags = PVMW_SYNC;
/*
* For THP, we have to assume the worse case ie pmd for invalidation.
* For hugetlb, it could be much worse if we need to do pud
* invalidation in the case of pmd sharing.
*
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
range.end = vma_address_end(&pvmw);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, range.end);
if (folio_test_hugetlb(folio)) {
/*
* If sharing is possible, start and end will be adjusted
* accordingly.
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
/* We need the huge page size for set_huge_pte_at() */
hsz = huge_page_size(hstate_vma(vma));
}
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
/* PMD-mapped THP migration entry */
if (!pvmw.pte) {
if (flags & TTU_SPLIT_HUGE_PMD) {
split_huge_pmd_locked(vma, pvmw.address,
pvmw.pmd, true);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
subpage = folio_page(folio,
pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
!folio_test_pmd_mappable(folio), folio);
if (set_pmd_migration_entry(&pvmw, subpage)) {
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
continue;
#endif
}
/* Unexpected PMD-mapped THP? */
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
/*
* Handle PFN swap PTEs, such as device-exclusive ones, that
* actually map pages.
*/
pteval = ptep_get(pvmw.pte);
if (likely(pte_present(pteval))) {
pfn = pte_pfn(pteval);
} else {
pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
subpage = folio_page(folio, pfn - folio_pfn(folio));
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
PageAnonExclusive(subpage);
if (folio_test_hugetlb(folio)) {
bool anon = folio_test_anon(folio);
/*
* huge_pmd_unshare may unmap an entire PMD page.
* There is no way of knowing exactly which PMDs may
* be cached for this mm, so we must flush them all.
* start/end were already adjusted above to cover this
* range.
*/
flush_cache_range(vma, range.start, range.end);
/*
* To call huge_pmd_unshare, i_mmap_rwsem must be
* held in write mode. Caller needs to explicitly
* do this outside rmap routines.
*
* We also must hold hugetlb vma_lock in write mode.
* Lock order dictates acquiring vma_lock BEFORE
* i_mmap_rwsem. We can only try lock here and
* fail if unsuccessful.
*/
if (!anon) {
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
if (!hugetlb_vma_trylock_write(vma)) {
page_vma_mapped_walk_done(&pvmw);
ret = false;
break;
}
if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
hugetlb_vma_unlock_write(vma);
flush_tlb_range(vma,
range.start, range.end);
/*
* The ref count of the PMD page was
* dropped which is part of the way map
* counting is done for shared PMDs.
* Return 'true' here. When there is
* no other sharing, huge_pmd_unshare
* returns false and we will unmap the
* actual page and drop map count
* to zero.
*/
page_vma_mapped_walk_done(&pvmw);
break;
}
hugetlb_vma_unlock_write(vma);
}
/* Nuke the hugetlb page table entry */
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
if (pte_dirty(pteval))
folio_mark_dirty(folio);
writable = pte_write(pteval);
} else if (likely(pte_present(pteval))) {
flush_cache_page(vma, address, pfn);
/* Nuke the page table entry. */
if (should_defer_flush(mm, flags)) {
/*
* We clear the PTE but do not flush so potentially
* a remote CPU could still be writing to the folio.
* If the entry was previously clean then the
* architecture must guarantee that a clear->dirty
* transition on a cached TLB entry is written through
* and traps if the PTE is unmapped.
*/
pteval = ptep_get_and_clear(mm, address, pvmw.pte);
set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
} else {
pteval = ptep_clear_flush(vma, address, pvmw.pte);
}
if (pte_dirty(pteval))
folio_mark_dirty(folio);
writable = pte_write(pteval);
} else {
pte_clear(mm, address, pvmw.pte);
writable = is_writable_device_private_entry(pte_to_swp_entry(pteval));
}
VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&
!anon_exclusive, folio);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
if (PageHWPoison(subpage)) {
VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio);
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz);
} else {
dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}
} else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
!userfaultfd_armed(vma)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
* will take care of the rest.
* A future reference will then fault in a new zero
* page. When userfaultfd is active, we must not drop
* this page though, as its main user (postcopy
* migration) will not expect userfaults on already
* copied pages.
*/
dec_mm_counter(mm, mm_counter(folio));
} else {
swp_entry_t entry;
pte_t swp_pte;
/*
* arch_unmap_one() is expected to be a NOP on
* architectures where we could have PFN swap PTEs,
* so we'll not check/care.
*/
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
if (folio_test_hugetlb(folio))
set_huge_pte_at(mm, address, pvmw.pte,
pteval, hsz);
else
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
/* See folio_try_share_anon_rmap_pte(): clear PTE first. */
if (folio_test_hugetlb(folio)) {
if (anon_exclusive &&
hugetlb_try_share_anon_rmap(folio)) {
set_huge_pte_at(mm, address, pvmw.pte,
pteval, hsz);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
} else if (anon_exclusive &&
folio_try_share_anon_rmap_pte(folio, subpage)) {
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
/*
* Store the pfn of the page in a special migration
* pte. do_swap_page() will wait until the migration
* pte is removed and then restart fault handling.
*/
if (writable)
entry = make_writable_migration_entry(
page_to_pfn(subpage));
else if (anon_exclusive)
entry = make_readable_exclusive_migration_entry(
page_to_pfn(subpage));
else
entry = make_readable_migration_entry(
page_to_pfn(subpage));
if (likely(pte_present(pteval))) {
if (pte_young(pteval))
entry = make_migration_entry_young(entry);
if (pte_dirty(pteval))
entry = make_migration_entry_dirty(entry);
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
} else {
swp_pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
}
if (folio_test_hugetlb(folio))
set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
hsz);
else
set_pte_at(mm, address, pvmw.pte, swp_pte);
trace_set_migration_pte(address, pte_val(swp_pte),
folio_order(folio));
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
*/
}
if (unlikely(folio_test_hugetlb(folio)))
hugetlb_remove_rmap(folio);
else
folio_remove_rmap_pte(folio, subpage, vma);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
}
mmu_notifier_invalidate_range_end(&range);
return ret;
}
/**
* try_to_migrate - try to replace all page table mappings with swap entries
* @folio: the folio to replace page table entries for
* @flags: action and flags
*
* Tries to remove all the page table entries which are mapping this folio and
* replace them with special swap entries. Caller must hold the folio lock.
*/
void try_to_migrate(struct folio *folio, enum ttu_flags flags)
{
struct rmap_walk_control rwc = {
.rmap_one = try_to_migrate_one,
.arg = (void *)flags,
.done = folio_not_mapped,
.anon_lock = folio_lock_anon_vma_read,
};
/*
* Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
* TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
*/
if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
TTU_SYNC | TTU_BATCH_FLUSH)))
return;
if (folio_is_zone_device(folio) &&
(!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
return;
/*
* During exec, a temporary VMA is setup and later moved.
* The VMA is moved under the anon_vma lock but not the
* page tables leading to a race where migration cannot
* find the migration ptes. Rather than increasing the
* locking requirements of exec(), migration skips
* temporary VMAs until after exec() completes.
*/
if (!folio_test_ksm(folio) && folio_test_anon(folio))
rwc.invalid_vma = invalid_migration_vma;
if (flags & TTU_RMAP_LOCKED)
rmap_walk_locked(folio, &rwc);
else
rmap_walk(folio, &rwc);
}
#ifdef CONFIG_DEVICE_PRIVATE
/**
* make_device_exclusive() - Mark a page for exclusive use by a device
* @mm: mm_struct of associated target process
* @addr: the virtual address to mark for exclusive device access
* @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
* @foliop: folio pointer will be stored here on success.
*
* This function looks up the page mapped at the given address, grabs a
* folio reference, locks the folio and replaces the PTE with special
* device-exclusive PFN swap entry, preventing access through the process
* page tables. The function will return with the folio locked and referenced.
*
* On fault, the device-exclusive entries are replaced with the original PTE
* under folio lock, after calling MMU notifiers.
*
* Only anonymous non-hugetlb folios are supported and the VMA must have
* write permissions such that we can fault in the anonymous page writable
* in order to mark it exclusive. The caller must hold the mmap_lock in read
* mode.
*
* A driver using this to program access from a device must use a mmu notifier
* critical section to hold a device specific lock during programming. Once
* programming is complete it should drop the folio lock and reference after
* which point CPU access to the page will revoke the exclusive access.
*
* Notes:
* #. This function always operates on individual PTEs mapping individual
* pages. PMD-sized THPs are first remapped to be mapped by PTEs before
* the conversion happens on a single PTE corresponding to @addr.
* #. While concurrent access through the process page tables is prevented,
* concurrent access through other page references (e.g., earlier GUP
* invocation) is not handled and not supported.
* #. device-exclusive entries are considered "clean" and "old" by core-mm.
* Device drivers must update the folio state when informed by MMU
* notifiers.
*
* Returns: pointer to mapped page on success, otherwise a negative error.
*/
struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
void *owner, struct folio **foliop)
{
struct mmu_notifier_range range;
struct folio *folio, *fw_folio;
struct vm_area_struct *vma;
struct folio_walk fw;
struct page *page;
swp_entry_t entry;
pte_t swp_pte;
int ret;
mmap_assert_locked(mm);
addr = PAGE_ALIGN_DOWN(addr);
/*
* Fault in the page writable and try to lock it; note that if the
* address would already be marked for exclusive use by a device,
* the GUP call would undo that first by triggering a fault.
*
* If any other device would already map this page exclusively, the
* fault will trigger a conversion to an ordinary
* (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE.
*/
retry:
page = get_user_page_vma_remote(mm, addr,
FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
&vma);
if (IS_ERR(page))
return page;
folio = page_folio(page);
if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) {
folio_put(folio);
return ERR_PTR(-EOPNOTSUPP);
}
ret = folio_lock_killable(folio);
if (ret) {
folio_put(folio);
return ERR_PTR(ret);
}
/*
* Inform secondary MMUs that we are going to convert this PTE to
* device-exclusive, such that they unmap it now. Note that the
* caller must filter this event out to prevent livelocks.
*/
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
mm, addr, addr + PAGE_SIZE, owner);
mmu_notifier_invalidate_range_start(&range);
/*
* Let's do a second walk and make sure we still find the same page
* mapped writable. Note that any page of an anonymous folio can
* only be mapped writable using exactly one PTE ("exclusive"), so
* there cannot be other mappings.
*/
fw_folio = folio_walk_start(&fw, vma, addr, 0);
if (fw_folio != folio || fw.page != page ||
fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) {
if (fw_folio)
folio_walk_end(&fw, vma);
mmu_notifier_invalidate_range_end(&range);
folio_unlock(folio);
folio_put(folio);
goto retry;
}
/* Nuke the page table entry so we get the uptodate dirty bit. */
flush_cache_page(vma, addr, page_to_pfn(page));
fw.pte = ptep_clear_flush(vma, addr, fw.ptep);
/* Set the dirty flag on the folio now the PTE is gone. */
if (pte_dirty(fw.pte))
folio_mark_dirty(folio);
/*
* Store the pfn of the page in a special device-exclusive PFN swap PTE.
* do_swap_page() will trigger the conversion back while holding the
* folio lock.
*/
entry = make_device_exclusive_entry(page_to_pfn(page));
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(fw.pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
/* The pte is writable, uffd-wp does not apply. */
set_pte_at(mm, addr, fw.ptep, swp_pte);
folio_walk_end(&fw, vma);
mmu_notifier_invalidate_range_end(&range);
*foliop = folio;
return page;
}
EXPORT_SYMBOL_GPL(make_device_exclusive);
#endif
void __put_anon_vma(struct anon_vma *anon_vma)
{
struct anon_vma *root = anon_vma->root;
anon_vma_free(anon_vma);
if (root != anon_vma && atomic_dec_and_test(&root->refcount))
anon_vma_free(root);
}
static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
if (rwc->anon_lock)
return rwc->anon_lock(folio, rwc);
/*
* Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
* because that depends on page_mapped(); but not all its usages
* are holding mmap_lock. Users without mmap_lock are required to
* take a reference count to prevent the anon_vma disappearing
*/
anon_vma = folio_anon_vma(folio);
if (!anon_vma)
return NULL;
if (anon_vma_trylock_read(anon_vma))
goto out;
if (rwc->try_lock) {
anon_vma = NULL;
rwc->contended = true;
goto out;
}
anon_vma_lock_read(anon_vma);
out:
return anon_vma;
}
/*
* rmap_walk_anon - do something to anonymous page using the object-based
* rmap method
* @folio: the folio to be handled
* @rwc: control variable according to each walk type
* @locked: caller holds relevant rmap lock
*
* Find all the mappings of a folio using the mapping pointer and the vma
* chains contained in the anon_vma struct it points to.
*/
static void rmap_walk_anon(struct folio *folio,
struct rmap_walk_control *rwc, bool locked)
{
struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
if (locked) {
anon_vma = folio_anon_vma(folio);
/* anon_vma disappear under us? */
VM_BUG_ON_FOLIO(!anon_vma, folio);
} else {
anon_vma = rmap_walk_anon_lock(folio, rwc);
}
if (!anon_vma)
return;
pgoff_start = folio_pgoff(folio);
pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(vma, pgoff_start,
folio_nr_pages(folio));
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
if (!rwc->rmap_one(folio, vma, address, rwc->arg))
break;
if (rwc->done && rwc->done(folio))
break;
}
if (!locked)
anon_vma_unlock_read(anon_vma);
}
/**
* __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping
* of a page mapped within a specified page cache object at a specified offset.
*
* @folio: Either the folio whose mappings to traverse, or if NULL,
* the callbacks specified in @rwc will be configured such
* as to be able to look up mappings correctly.
* @mapping: The page cache object whose mapping VMAs we intend to
* traverse. If @folio is non-NULL, this should be equal to
* folio_mapping(folio).
* @pgoff_start: The offset within @mapping of the page which we are
* looking up. If @folio is non-NULL, this should be equal
* to folio_pgoff(folio).
* @nr_pages: The number of pages mapped by the mapping. If @folio is
* non-NULL, this should be equal to folio_nr_pages(folio).
* @rwc: The reverse mapping walk control object describing how
* the traversal should proceed.
* @locked: Is the @mapping already locked? If not, we acquire the
* lock.
*/
static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
pgoff_t pgoff_start, unsigned long nr_pages,
struct rmap_walk_control *rwc, bool locked)
{
pgoff_t pgoff_end = pgoff_start + nr_pages - 1;
struct vm_area_struct *vma;
VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio);
VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio);
VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio);
if (!locked) {
if (i_mmap_trylock_read(mapping))
goto lookup;
if (rwc->try_lock) {
rwc->contended = true;
return;
}
i_mmap_lock_read(mapping);
}
lookup:
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(vma, pgoff_start, nr_pages);
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
if (!rwc->rmap_one(folio, vma, address, rwc->arg))
goto done;
if (rwc->done && rwc->done(folio))
goto done;
}
done:
if (!locked)
i_mmap_unlock_read(mapping);
}
/*
* rmap_walk_file - do something to file page using the object-based rmap method
* @folio: the folio to be handled
* @rwc: control variable according to each walk type
* @locked: caller holds relevant rmap lock
*
* Find all the mappings of a folio using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*/
static void rmap_walk_file(struct folio *folio,
struct rmap_walk_control *rwc, bool locked)
{
/*
* The folio lock not only makes sure that folio->mapping cannot
* suddenly be NULLified by truncation, it makes sure that the structure
* at mapping cannot be freed and reused yet, so we can safely take
* mapping->i_mmap_rwsem.
*/
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (!folio->mapping)
return;
__rmap_walk_file(folio, folio->mapping, folio->index,
folio_nr_pages(folio), rwc, locked);
}
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
{
if (unlikely(folio_test_ksm(folio)))
rmap_walk_ksm(folio, rwc);
else if (folio_test_anon(folio))
rmap_walk_anon(folio, rwc, false);
else
rmap_walk_file(folio, rwc, false);
}
/* Like rmap_walk, but caller holds relevant rmap lock */
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
{
/* no ksm support for now */
VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
if (folio_test_anon(folio))
rmap_walk_anon(folio, rwc, true);
else
rmap_walk_file(folio, rwc, true);
}
#ifdef CONFIG_HUGETLB_PAGE
/*
* The following two functions are for anonymous (private mapped) hugepages.
* Unlike common anonymous pages, anonymous hugepages have no accounting code
* and no lru code, because we handle hugepages differently from common pages.
*/
void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(&folio->page);
VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
PageAnonExclusive(&folio->page), folio);
}
void hugetlb_add_new_anon_rmap(struct folio *folio,
struct vm_area_struct *vma, unsigned long address)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
atomic_set(&folio->_large_mapcount, 0);
folio_clear_hugetlb_restore_reserve(folio);
__folio_set_anon(folio, vma, address, true);
SetPageAnonExclusive(&folio->page);
}
#endif /* CONFIG_HUGETLB_PAGE */
5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SIMD_H
#define _ASM_SIMD_H
#include <asm/fpu/api.h>
#include <linux/compiler_attributes.h>
#include <linux/types.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*/
static __must_check inline bool may_use_simd(void)
{
return irq_fpu_usable();
}
#endif /* _ASM_SIMD_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/file_table.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/eventpoll.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
#include <linux/capability.h>
#include <linux/cdev.h>
#include <linux/fsnotify.h>
#include <linux/sysctl.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
#include <linux/task_work.h>
#include <linux/swap.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include "internal.h"
/* sysctl tunables... */
static struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
/* SLAB cache for file structures */
static struct kmem_cache *filp_cachep __ro_after_init;
static struct kmem_cache *bfilp_cachep __ro_after_init;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
/* Container for backing file with optional user path */
struct backing_file {
struct file file;
union {
struct path user_path;
freeptr_t bf_freeptr;
};
};
#define backing_file(f) container_of(f, struct backing_file, file)
const struct path *backing_file_user_path(const struct file *f)
{
return &backing_file(f)->user_path;
}
EXPORT_SYMBOL_GPL(backing_file_user_path);
void backing_file_set_user_path(struct file *f, const struct path *path)
{
backing_file(f)->user_path = *path;
}
EXPORT_SYMBOL_GPL(backing_file_set_user_path);
static inline void file_free(struct file *f)
{
security_file_free(f);
if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
percpu_counter_dec(&nr_files);
put_cred(f->f_cred);
if (unlikely(f->f_mode & FMODE_BACKING)) {
path_put(backing_file_user_path(f));
kmem_cache_free(bfilp_cachep, backing_file(f));
} else {
kmem_cache_free(filp_cachep, f);
}
}
/*
* Return the total number of open files in the system
*/
static long get_nr_files(void)
{
return percpu_counter_read_positive(&nr_files);
}
/*
* Return the maximum number of open files in the system
*/
unsigned long get_max_files(void)
{
return files_stat.max_files;
}
EXPORT_SYMBOL_GPL(get_max_files);
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
/*
* Handle nr_files sysctl
*/
static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = percpu_counter_sum_positive(&nr_files);
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static const struct ctl_table fs_stat_sysctls[] = {
{
.procname = "file-nr",
.data = &files_stat,
.maxlen = sizeof(files_stat),
.mode = 0444,
.proc_handler = proc_nr_files,
},
{
.procname = "file-max",
.data = &files_stat.max_files,
.maxlen = sizeof(files_stat.max_files),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = SYSCTL_LONG_ZERO,
.extra2 = SYSCTL_LONG_MAX,
},
{
.procname = "nr_open",
.data = &sysctl_nr_open,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = &sysctl_nr_open_min,
.extra2 = &sysctl_nr_open_max,
},
};
static int __init init_fs_stat_sysctls(void)
{
register_sysctl_init("fs", fs_stat_sysctls);
if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
struct ctl_table_header *hdr;
hdr = register_sysctl_mount_point("fs/binfmt_misc");
kmemleak_not_leak(hdr);
}
return 0;
}
fs_initcall(init_fs_stat_sysctls);
#endif
static int init_file(struct file *f, int flags, const struct cred *cred)
{
int error;
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) { put_cred(f->f_cred); return error;
}
spin_lock_init(&f->f_lock);
/*
* Note that f_pos_lock is only used for files raising
* FMODE_ATOMIC_POS and directories. Other files such as pipes
* don't need it and since f_pos_lock is in a union may reuse
* the space for other purposes. They are expected to initialize
* the respective member when opening the file.
*/
mutex_init(&f->f_pos_lock);
memset(&f->__f_path, 0, sizeof(f->f_path));
memset(&f->f_ra, 0, sizeof(f->f_ra));
f->f_flags = flags;
f->f_mode = OPEN_FMODE(flags);
f->f_op = NULL;
f->f_mapping = NULL;
f->private_data = NULL;
f->f_inode = NULL;
f->f_owner = NULL;
#ifdef CONFIG_EPOLL
f->f_ep = NULL;
#endif
f->f_iocb_flags = 0;
f->f_pos = 0;
f->f_wb_err = 0;
f->f_sb_err = 0;
/*
* We're SLAB_TYPESAFE_BY_RCU so initialize f_ref last. While
* fget-rcu pattern users need to be able to handle spurious
* refcount bumps we should reinitialize the reused file first.
*/
file_ref_init(&f->f_ref, 1);
/*
* Disable permission and pre-content events for all files by default.
* They may be enabled later by fsnotify_open_perm_and_set_mode().
*/
file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM);
return 0;}
/* Find an unused file structure and return a pointer to it.
* Returns an error pointer if some error happend e.g. we over file
* structures limit, run out of memory or operation is not permitted.
*
* Be very careful using this. You are responsible for
* getting write access to any mount that you might assign
* to this filp, if it is opened for write. If this is not
* done, you will imbalance int the mount's writer count
* and a warning at __fput() time.
*/
struct file *alloc_empty_file(int flags, const struct cred *cred)
{
static long old_max;
struct file *f;
int error;
/*
* Privileged users can go above max_files
*/
if (unlikely(get_nr_files() >= files_stat.max_files) &&
!capable(CAP_SYS_ADMIN)) {
/*
* percpu_counters are inaccurate. Do an expensive check before
* we go and fail.
*/
if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
goto over;
}
f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); if (unlikely(!f)) return ERR_PTR(-ENOMEM);
error = init_file(f, flags, cred);
if (unlikely(error)) {
kmem_cache_free(filp_cachep, f);
return ERR_PTR(error);
}
percpu_counter_inc(&nr_files); return f;
over:
/* Ran out of filps - report that */
if (get_nr_files() > old_max) {
pr_info("VFS: file-max limit %lu reached\n", get_max_files());
old_max = get_nr_files();
}
return ERR_PTR(-ENFILE);}
/*
* Variant of alloc_empty_file() that doesn't check and modify nr_files.
*
* This is only for kernel internal use, and the allocate file must not be
* installed into file tables or such.
*/
struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
{
struct file *f;
int error;
f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
if (unlikely(!f))
return ERR_PTR(-ENOMEM);
error = init_file(f, flags, cred);
if (unlikely(error)) {
kmem_cache_free(filp_cachep, f);
return ERR_PTR(error);
}
f->f_mode |= FMODE_NOACCOUNT;
return f;
}
/*
* Variant of alloc_empty_file() that allocates a backing_file container
* and doesn't check and modify nr_files.
*
* This is only for kernel internal use, and the allocate file must not be
* installed into file tables or such.
*/
struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
{
struct backing_file *ff;
int error;
ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL);
if (unlikely(!ff))
return ERR_PTR(-ENOMEM);
error = init_file(&ff->file, flags, cred);
if (unlikely(error)) {
kmem_cache_free(bfilp_cachep, ff);
return ERR_PTR(error);
}
ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
return &ff->file;
}
/**
* file_init_path - initialize a 'struct file' based on path
*
* @file: the file to set up
* @path: the (dentry, vfsmount) pair for the new file
* @fop: the 'struct file_operations' for the new file
*/
static void file_init_path(struct file *file, const struct path *path,
const struct file_operations *fop)
{
file->__f_path = *path;
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
file->f_sb_err = file_sample_sb_err(file);
if (fop->llseek)
file->f_mode |= FMODE_LSEEK; if ((file->f_mode & FMODE_READ) && likely(fop->read || fop->read_iter)) file->f_mode |= FMODE_CAN_READ; if ((file->f_mode & FMODE_WRITE) && likely(fop->write || fop->write_iter)) file->f_mode |= FMODE_CAN_WRITE; file->f_iocb_flags = iocb_flags(file);
file->f_mode |= FMODE_OPENED;
file->f_op = fop;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);}
/**
* alloc_file - allocate and initialize a 'struct file'
*
* @path: the (dentry, vfsmount) pair for the new file
* @flags: O_... flags with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*/
static struct file *alloc_file(const struct path *path, int flags,
const struct file_operations *fop)
{
struct file *file;
file = alloc_empty_file(flags, current_cred());
if (!IS_ERR(file))
file_init_path(file, path, fop);
return file;}
static inline int alloc_path_pseudo(const char *name, struct inode *inode,
struct vfsmount *mnt, struct path *path)
{
path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name)); if (!path->dentry)
return -ENOMEM;
path->mnt = mntget(mnt);
d_instantiate(path->dentry, inode);
return 0;}
struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
const char *name, int flags,
const struct file_operations *fops)
{
int ret;
struct path path;
struct file *file;
ret = alloc_path_pseudo(name, inode, mnt, &path);
if (ret)
return ERR_PTR(ret);
file = alloc_file(&path, flags, fops);
if (IS_ERR(file)) {
ihold(inode);
path_put(&path);
return file;
}
/*
* Disable all fsnotify events for pseudo files by default.
* They may be enabled by caller with file_set_fsnotify_mode().
*/
file_set_fsnotify_mode(file, FMODE_NONOTIFY); return file;}
EXPORT_SYMBOL(alloc_file_pseudo);
struct file *alloc_file_pseudo_noaccount(struct inode *inode,
struct vfsmount *mnt, const char *name,
int flags,
const struct file_operations *fops)
{
int ret;
struct path path;
struct file *file;
ret = alloc_path_pseudo(name, inode, mnt, &path);
if (ret)
return ERR_PTR(ret);
file = alloc_empty_file_noaccount(flags, current_cred());
if (IS_ERR(file)) {
ihold(inode);
path_put(&path);
return file;
}
file_init_path(file, &path, fops);
/*
* Disable all fsnotify events for pseudo files by default.
* They may be enabled by caller with file_set_fsnotify_mode().
*/
file_set_fsnotify_mode(file, FMODE_NONOTIFY);
return file;
}
EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
struct file *alloc_file_clone(struct file *base, int flags,
const struct file_operations *fops)
{
struct file *f;
f = alloc_file(&base->f_path, flags, fops);
if (!IS_ERR(f)) {
path_get(&f->f_path);
f->f_mapping = base->f_mapping;
}
return f;
}
/* the real guts of fput() - releasing the last reference to file
*/
static void __fput(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = file->f_inode;
fmode_t mode = file->f_mode;
if (unlikely(!(file->f_mode & FMODE_OPENED)))
goto out;
might_sleep();
fsnotify_close(file);
/*
* The function eventpoll_release() should be the first called
* in the file cleanup chain.
*/
eventpoll_release(file);
locks_remove_file(file);
security_file_release(file);
if (unlikely(file->f_flags & FASYNC)) {
if (file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
}
if (file->f_op->release)
file->f_op->release(inode, file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
!(mode & FMODE_PATH))) {
cdev_put(inode->i_cdev);
}
fops_put(file->f_op);
file_f_owner_release(file);
put_file_access(file);
dput(dentry);
if (unlikely(mode & FMODE_NEED_UNMOUNT))
dissolve_on_fput(mnt);
mntput(mnt);
out:
file_free(file);
}
static LLIST_HEAD(delayed_fput_list);
static void delayed_fput(struct work_struct *unused)
{
struct llist_node *node = llist_del_all(&delayed_fput_list);
struct file *f, *t;
llist_for_each_entry_safe(f, t, node, f_llist)
__fput(f);
}
static void ____fput(struct callback_head *work)
{
__fput(container_of(work, struct file, f_task_work));
}
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
/*
* If kernel thread really needs to have the final fput() it has done
* to complete, call this. The only user right now is the boot - we
* *do* need to make sure our writes to binaries on initramfs has
* not left us with opened struct file waiting for __fput() - execve()
* won't work without that. Please, don't add more callers without
* very good reasons; in particular, never call that with locks
* held and never call that from a thread that might need to do
* some work on any kind of umount.
*/
void flush_delayed_fput(void)
{
delayed_fput(NULL);
flush_delayed_work(&delayed_fput_work);
}
EXPORT_SYMBOL_GPL(flush_delayed_fput);
static void __fput_deferred(struct file *file)
{
struct task_struct *task = current;
if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
file_free(file);
return;
}
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
init_task_work(&file->f_task_work, ____fput);
if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
return;
/*
* After this task has run exit_task_work(),
* task_work_add() will fail. Fall through to delayed
* fput to avoid leaking *file.
*/
}
if (llist_add(&file->f_llist, &delayed_fput_list))
schedule_delayed_work(&delayed_fput_work, 1);
}
void fput(struct file *file)
{
if (unlikely(file_ref_put(&file->f_ref)))
__fput_deferred(file);
}
EXPORT_SYMBOL(fput);
/*
* synchronous analog of fput(); for kernel threads that might be needed
* in some umount() (and thus can't use flush_delayed_fput() without
* risking deadlocks), need to wait for completion of __fput() and know
* for this specific struct file it won't involve anything that would
* need them. Use only if you really need it - at the very least,
* don't blindly convert fput() by kernel thread to that.
*/
void __fput_sync(struct file *file)
{
if (file_ref_put(&file->f_ref))
__fput(file);
}
EXPORT_SYMBOL(__fput_sync);
/*
* Equivalent to __fput_sync(), but optimized for being called with the last
* reference.
*
* See file_ref_put_close() for details.
*/
void fput_close_sync(struct file *file)
{
if (likely(file_ref_put_close(&file->f_ref)))
__fput(file);
}
/*
* Equivalent to fput(), but optimized for being called with the last
* reference.
*
* See file_ref_put_close() for details.
*/
void fput_close(struct file *file)
{
if (file_ref_put_close(&file->f_ref))
__fput_deferred(file);
}
void __init files_init(void)
{
struct kmem_cache_args args = {
.use_freeptr_offset = true,
.freeptr_offset = offsetof(struct file, f_freeptr),
};
filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
SLAB_HWCACHE_ALIGN | SLAB_PANIC |
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
args.freeptr_offset = offsetof(struct backing_file, bf_freeptr);
bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file),
&args, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
/*
* One file with associated inode and dcache is very roughly 1K. Per default
* do not use more than 10% of our memory for files.
*/
void __init files_maxfiles_init(void)
{
unsigned long n;
unsigned long nr_pages = totalram_pages();
unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
memreserve = min(memreserve, nr_pages - 1);
n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_STRINGHASH_H
#define __LINUX_STRINGHASH_H
#include <linux/compiler.h> /* For __pure */
#include <linux/types.h> /* For u32, u64 */
#include <linux/hash.h>
/*
* Routines for hashing strings of bytes to a 32-bit hash value.
*
* These hash functions are NOT GUARANTEED STABLE between kernel
* versions, architectures, or even repeated boots of the same kernel.
* (E.g. they may depend on boot-time hardware detection or be
* deliberately randomized.)
*
* They are also not intended to be secure against collisions caused by
* malicious inputs; much slower hash functions are required for that.
*
* They are optimized for pathname components, meaning short strings.
* Even if a majority of files have longer names, the dynamic profile of
* pathname components skews short due to short directory names.
* (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
*/
/*
* Version 1: one byte at a time. Example of use:
*
* unsigned long hash = init_name_hash;
* while (*p)
* hash = partial_name_hash(tolower(*p++), hash);
* hash = end_name_hash(hash);
*
* Although this is designed for bytes, fs/hfsplus/unicode.c
* abuses it to hash 16-bit values.
*/
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
#define init_name_hash(salt) (unsigned long)(salt)
/* partial hash update function. Assume roughly 4 bits per character */
static inline unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}
/*
* Finally: cut down the number of bits to a int value (and try to avoid
* losing bits). This also has the property (wanted by the dcache)
* that the msbits make a good hash table index.
*/
static inline unsigned int end_name_hash(unsigned long hash)
{
return hash_long(hash, 32);
}
/*
* Version 2: One word (32 or 64 bits) at a time.
* If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
* exists, which describes major Linux platforms like x86 and ARM), then
* this computes a different hash function much faster.
*
* If not set, this falls back to a wrapper around the preceding.
*/
extern unsigned int __pure full_name_hash(const void *salt, const char *, unsigned int);
/*
* A hash_len is a u64 with the hash of a string in the low
* half and the length in the high half.
*/
#define hashlen_hash(hashlen) ((u32)(hashlen))
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
/* Return the "hash_len" (hash and length) of a null-terminated string */
extern u64 __pure hashlen_string(const void *salt, const char *name);
#endif /* __LINUX_STRINGHASH_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_UACCESS_H
#define _ASM_X86_UACCESS_H
/*
* User space memory access functions
*/
#include <linux/compiler.h>
#include <linux/instrumented.h>
#include <linux/kasan-checks.h>
#include <linux/mm_types.h>
#include <linux/string.h>
#include <linux/mmap_lock.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
#include <asm/extable.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_X86_32
# include <asm/uaccess_32.h>
#else
# include <asm/uaccess_64.h>
#endif
#include <asm-generic/access_ok.h>
extern int __get_user_1(void);
extern int __get_user_2(void);
extern int __get_user_4(void);
extern int __get_user_8(void);
extern int __get_user_nocheck_1(void);
extern int __get_user_nocheck_2(void);
extern int __get_user_nocheck_4(void);
extern int __get_user_nocheck_8(void);
extern int __get_user_bad(void);
#define __uaccess_begin() stac()
#define __uaccess_end() clac()
#define __uaccess_begin_nospec() \
({ \
stac(); \
barrier_nospec(); \
})
/*
* This is the smallest unsigned integer type that can fit a value
* (up to 'long long')
*/
#define __inttype(x) __typeof__( \
__typefits(x,char, \
__typefits(x,short, \
__typefits(x,int, \
__typefits(x,long,0ULL)))))
#define __typefits(x,type,not) \
__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
/*
* This is used for both get_user() and __get_user() to expand to
* the proper special function call that has odd calling conventions
* due to returning both a value and an error, and that depends on
* the size of the pointer passed in.
*
* Careful: we have to cast the result to the type of the pointer
* for sign reasons.
*
* The use of _ASM_DX as the register specifier is a bit of a
* simplification, as gcc only cares about it as the starting point
* and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
* (%ecx being the next register in gcc's x86 register sequence), and
* %rdx on 64 bits.
*
* Clang/LLVM cares about the size of the register, but still wants
* the base register for something that ends up being a pair.
*/
#define do_get_user_call(fn,x,ptr) \
({ \
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
asm volatile("call __" #fn "_%c[size]" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
instrument_get_user(__val_gu); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
})
/**
* get_user - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Return: zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
/**
* __get_user - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Return: zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
#ifdef CONFIG_X86_32
#define __put_user_goto_u64(x, addr, label) \
asm goto("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
_ASM_EXTABLE_UA(1b, %l2) \
_ASM_EXTABLE_UA(2b, %l2) \
: : "A" (x), "r" (addr) \
: : label)
#else
#define __put_user_goto_u64(x, ptr, label) \
__put_user_goto(x, ptr, "q", "er", label)
#endif
extern void __put_user_bad(void);
/*
* Strange magic calling convention: pointer in %ecx,
* value in %eax(:%edx), return value in %ecx. clobbers %rbx
*/
extern void __put_user_1(void);
extern void __put_user_2(void);
extern void __put_user_4(void);
extern void __put_user_8(void);
extern void __put_user_nocheck_1(void);
extern void __put_user_nocheck_2(void);
extern void __put_user_nocheck_4(void);
extern void __put_user_nocheck_8(void);
/*
* ptr must be evaluated and assigned to the temporary __ptr_pu before
* the assignment of x to __val_pu, to avoid any function calls
* involved in the ptr expression (possibly implicitly generated due
* to KASAN) from clobbering %ax.
*/
#define do_put_user_call(fn,x,ptr) \
({ \
int __ret_pu; \
void __user *__ptr_pu; \
register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
__typeof__(*(ptr)) __x = (x); /* eval x once */ \
__typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
__chk_user_ptr(__ptr); \
__ptr_pu = __ptr; \
__val_pu = __x; \
asm volatile("call __" #fn "_%c[size]" \
: "=c" (__ret_pu), \
ASM_CALL_CONSTRAINT \
: "0" (__ptr_pu), \
"r" (__val_pu), \
[size] "i" (sizeof(*(ptr))) \
:"ebx"); \
instrument_put_user(__x, __ptr, sizeof(*(ptr))); \
__builtin_expect(__ret_pu, 0); \
})
/**
* put_user - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Return: zero on success, or -EFAULT on error.
*/
#define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
/**
* __put_user - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Return: zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
#define __put_user_size(x, ptr, size, label) \
do { \
__typeof__(*(ptr)) __x = (x); /* eval x once */ \
__typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
__chk_user_ptr(__ptr); \
switch (size) { \
case 1: \
__put_user_goto(__x, __ptr, "b", "iq", label); \
break; \
case 2: \
__put_user_goto(__x, __ptr, "w", "ir", label); \
break; \
case 4: \
__put_user_goto(__x, __ptr, "l", "ir", label); \
break; \
case 8: \
__put_user_goto_u64(__x, __ptr, label); \
break; \
default: \
__put_user_bad(); \
} \
instrument_put_user(__x, __ptr, size); \
} while (0)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, label) do { \
unsigned int __gu_low, __gu_high; \
const unsigned int __user *__gu_ptr; \
__gu_ptr = (const void __user *)(ptr); \
__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
(x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
} while (0)
#else
#define __get_user_asm_u64(x, ptr, label) \
__get_user_asm(x, ptr, "q", "=r", label)
#endif
#define __get_user_size(x, ptr, size, label) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: { \
unsigned char x_u8__; \
__get_user_asm(x_u8__, ptr, "b", "=q", label); \
(x) = x_u8__; \
break; \
} \
case 2: \
__get_user_asm(x, ptr, "w", "=r", label); \
break; \
case 4: \
__get_user_asm(x, ptr, "l", "=r", label); \
break; \
case 8: \
__get_user_asm_u64(x, ptr, label); \
break; \
default: \
(x) = __get_user_bad(); \
} \
instrument_get_user(x); \
} while (0)
#define __get_user_asm(x, addr, itype, ltype, label) \
asm_goto_output("\n" \
"1: mov"itype" %[umem],%[output]\n" \
_ASM_EXTABLE_UA(1b, %l2) \
: [output] ltype(x) \
: [umem] "m" (__m(addr)) \
: : label)
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval) \
({ \
__typeof__(ptr) __ptr = (ptr); \
asm volatile("\n" \
"1: movl %[lowbits],%%eax\n" \
"2: movl %[highbits],%%edx\n" \
"3:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \
EX_FLAG_CLEAR_AX_DX, \
%[errout]) \
_ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \
EX_FLAG_CLEAR_AX_DX, \
%[errout]) \
: [errout] "=r" (retval), \
[output] "=&A"(x) \
: [lowbits] "m" (__m(__ptr)), \
[highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
"0" (retval)); \
})
#else
#define __get_user_asm_u64(x, ptr, retval) \
__get_user_asm(x, ptr, retval, "q")
#endif
#define __get_user_size(x, ptr, size, retval) \
do { \
unsigned char x_u8__; \
\
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm(x_u8__, ptr, retval, "b"); \
(x) = x_u8__; \
break; \
case 2: \
__get_user_asm(x, ptr, retval, "w"); \
break; \
case 4: \
__get_user_asm(x, ptr, retval, "l"); \
break; \
case 8: \
__get_user_asm_u64(x, ptr, retval); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, itype) \
asm volatile("\n" \
"1: mov"itype" %[umem],%[output]\n" \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
EX_FLAG_CLEAR_AX, \
%[errout]) \
: [errout] "=r" (err), \
[output] "=a" (x) \
: [umem] "m" (__m(addr)), \
"0" (err))
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_goto_output("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
_ASM_EXTABLE_UA(1b, %l[label]) \
: "=@ccz" (success), \
[ptr] "+m" (*_ptr), \
[old] "+a" (__old) \
: [new] ltype (__new) \
: "memory" \
: label); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })
#ifdef CONFIG_X86_32
#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_goto_output("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
_ASM_EXTABLE_UA(1b, %l[label]) \
: "=@ccz" (success), \
"+A" (__old), \
[ptr] "+m" (*_ptr) \
: "b" ((u32)__new), \
"c" ((u32)((u64)__new >> 32)) \
: "memory" \
: label); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })
#endif // CONFIG_X86_32
#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
int __err = 0; \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm volatile("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
"2:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
%[errout]) \
: "=@ccz" (success), \
[errout] "+r" (__err), \
[ptr] "+m" (*_ptr), \
[old] "+a" (__old) \
: [new] ltype (__new) \
: "memory"); \
if (unlikely(__err)) \
goto label; \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })
#ifdef CONFIG_X86_32
/*
* Unlike the normal CMPXCHG, use output GPR for both success/fail and error.
* There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
* hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
* both ESI and EDI for the memory operand, compilation will fail if the error
* is an input+output as there will be no register available for input.
*/
#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
int __result; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm volatile("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
"mov $0, %[result]\n\t" \
"setz %b[result]\n" \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
%[result]) \
: [result] "=q" (__result), \
"+A" (__old), \
[ptr] "+m" (*_ptr) \
: "b" ((u32)__new), \
"c" ((u32)((u64)__new >> 32)) \
: "memory", "cc"); \
if (unlikely(__result < 0)) \
goto label; \
if (unlikely(!__result)) \
*_old = __old; \
likely(__result); })
#endif // CONFIG_X86_32
#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
/* FIXME: this hack is definitely wrong -AK */
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
/*
* Tell gcc we read from memory instead of writing: this is because
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
#define __put_user_goto(x, addr, itype, ltype, label) \
asm goto("\n" \
"1: mov"itype" %0,%1\n" \
_ASM_EXTABLE_UA(1b, %l2) \
: : ltype(x), "m" (__m(addr)) \
: : label)
extern unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
#ifdef CONFIG_ARCH_HAS_COPY_MC
unsigned long __must_check
copy_mc_to_kernel(void *to, const void *from, unsigned len);
#define copy_mc_to_kernel copy_mc_to_kernel
unsigned long __must_check
copy_mc_to_user(void __user *to, const void *from, unsigned len);
#endif
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
#ifdef CONFIG_X86_INTEL_USERCOPY
extern struct movsl_mask {
int mask;
} ____cacheline_aligned_in_smp movsl_mask;
#endif
#define ARCH_HAS_NOCACHE_UACCESS 1
/*
* The "unsafe" user accesses aren't really "unsafe", but the naming
* is a big fat warning: you have to not only do the access_ok()
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr,len)))
return 0;
__uaccess_begin_nospec();
return 1;
}
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() __uaccess_end()
#define user_access_save() smap_save()
#define user_access_restore(x) smap_restore(x)
#define unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define unsafe_get_user(x, ptr, err_label) \
do { \
__inttype(*(ptr)) __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define unsafe_get_user(x, ptr, err_label) \
do { \
int __gu_err; \
__inttype(*(ptr)) __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
if (unlikely(__gu_err)) goto err_label; \
} while (0)
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
extern void __try_cmpxchg_user_wrong_size(void);
#ifndef CONFIG_X86_32
#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
__try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
#endif
/*
* Force the pointer to u<size> to match the size expected by the asm helper.
* clang/LLVM compiles all cases and only discards the unused paths after
* processing errors, which breaks i386 if the pointer is an 8-byte value.
*/
#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
bool __ret; \
__chk_user_ptr(_ptr); \
switch (sizeof(*(_ptr))) { \
case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
(__force u8 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
(__force u16 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
(__force u32 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
(_nval), _label); \
break; \
default: __try_cmpxchg_user_wrong_size(); \
} \
__ret; })
/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
int __ret = -EFAULT; \
__uaccess_begin_nospec(); \
__ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
_label: \
__uaccess_end(); \
__ret; \
})
/*
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
*/
#define unsafe_copy_loop(dst, src, len, type, label) \
while (len >= sizeof(type)) { \
unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
dst += sizeof(type); \
src += sizeof(type); \
len -= sizeof(type); \
}
#define unsafe_copy_to_user(_dst,_src,_len,label) \
do { \
char __user *__ucu_dst = (_dst); \
const char *__ucu_src = (_src); \
size_t __ucu_len = (_len); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
} while (0)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __get_kernel_nofault(dst, src, type, err_label) \
__get_user_size(*((type *)(dst)), (__force type __user *)(src), \
sizeof(type), err_label)
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __kr_err; \
\
__get_user_size(*((type *)(dst)), (__force type __user *)(src), \
sizeof(type), __kr_err); \
if (unlikely(__kr_err)) \
goto err_label; \
} while (0)
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __put_kernel_nofault(dst, src, type, err_label) \
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
sizeof(type), err_label)
#endif /* _ASM_X86_UACCESS_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SWAP_H
#define _LINUX_SWAP_H
#include <linux/spinlock.h>
#include <linux/linkage.h>
#include <linux/mmzone.h>
#include <linux/list.h>
#include <linux/memcontrol.h>
#include <linux/sched.h>
#include <linux/node.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
#include <uapi/linux/mempolicy.h>
#include <asm/page.h>
struct notifier_block;
struct bio;
struct pagevec;
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
SWAP_FLAG_DISCARD_PAGES)
#define SWAP_BATCH 64
static inline int current_is_kswapd(void)
{
return current->flags & PF_KSWAPD;
}
/*
* MAX_SWAPFILES defines the maximum number of swaptypes: things which can
* be swapped to. The swap type and the offset into that swap type are
* encoded into pte's and into pgoff_t's in the swapcache. Using five bits
* for the type means that the maximum number of swapcache pages is 27 bits
* on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
* the type/offset into the pte as 5/27 as well.
*/
#define MAX_SWAPFILES_SHIFT 5
/*
* Use some of the swap files numbers for other purposes. This
* is a convenient way to hook into the VM to trigger special
* actions on faults.
*/
/*
* PTE markers are used to persist information onto PTEs that otherwise
* should be a none pte. As its name "PTE" hints, it should only be
* applied to the leaves of pgtables.
*/
#define SWP_PTE_MARKER_NUM 1
#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
/*
* Unaddressable device memory support. See include/linux/hmm.h and
* Documentation/mm/hmm.rst. Short description is we need struct pages for
* device memory that is unaddressable (inaccessible) by CPU, so that we can
* migrate part of a process memory to device memory.
*
* When a page is migrated from CPU to device, we set the CPU page table entry
* to a special SWP_DEVICE_{READ|WRITE} entry.
*
* When a page is mapped by the device for exclusive access we set the CPU page
* table entries to a special SWP_DEVICE_EXCLUSIVE entry.
*/
#ifdef CONFIG_DEVICE_PRIVATE
#define SWP_DEVICE_NUM 3
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
#define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
#else
#define SWP_DEVICE_NUM 0
#endif
/*
* Page migration support.
*
* SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
* indicates that the referenced (part of) an anonymous page is exclusive to
* a single process. For SWP_MIGRATION_WRITE, that information is implicit:
* (part of) an anonymous page that are mapped writable are exclusive to a
* single process.
*/
#ifdef CONFIG_MIGRATION
#define SWP_MIGRATION_NUM 3
#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
#else
#define SWP_MIGRATION_NUM 0
#endif
/*
* Handling of hardware poisoned pages with memory corruption.
*/
#ifdef CONFIG_MEMORY_FAILURE
#define SWP_HWPOISON_NUM 1
#define SWP_HWPOISON MAX_SWAPFILES
#else
#define SWP_HWPOISON_NUM 0
#endif
#define MAX_SWAPFILES \
((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
SWP_PTE_MARKER_NUM)
/*
* Magic header for a swap area. The first part of the union is
* what the swap magic looks like for the old (limited to 128MB)
* swap area format, the second part of the union adds - in the
* old reserved area - some extra information. Note that the first
* kilobyte is reserved for boot loader or disk label stuff...
*
* Having the magic at the end of the PAGE_SIZE makes detecting swap
* areas somewhat tricky on machines that support multiple page sizes.
* For 2.5 we'll probably want to move the magic to just beyond the
* bootbits...
*/
union swap_header {
struct {
char reserved[PAGE_SIZE - 10];
char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
} magic;
struct {
char bootbits[1024]; /* Space for disklabel etc. */
__u32 version;
__u32 last_page;
__u32 nr_badpages;
unsigned char sws_uuid[16];
unsigned char sws_volume[16];
__u32 padding[117];
__u32 badpages[1];
} info;
};
/*
* current->reclaim_state points to one of these when a task is running
* memory reclaim
*/
struct reclaim_state {
/* pages reclaimed outside of LRU-based reclaim */
unsigned long reclaimed;
#ifdef CONFIG_LRU_GEN
/* per-thread mm walk data */
struct lru_gen_mm_walk *mm_walk;
#endif
};
/*
* mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
* reclaim
* @pages: number of pages reclaimed
*
* If the current process is undergoing a reclaim operation, increment the
* number of reclaimed pages by @pages.
*/
static inline void mm_account_reclaimed_pages(unsigned long pages)
{
if (current->reclaim_state)
current->reclaim_state->reclaimed += pages;
}
#ifdef __KERNEL__
struct address_space;
struct sysinfo;
struct writeback_control;
struct zone;
/*
* A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
* disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
* term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
* from setup, they're handled identically.
*
* We always assume that blocks are of size PAGE_SIZE.
*/
struct swap_extent {
struct rb_node rb_node;
pgoff_t start_page;
pgoff_t nr_pages;
sector_t start_block;
};
/*
* Max bad pages in the new format..
*/
#define MAX_SWAP_BADPAGES \
((offsetof(union swap_header, magic.magic) - \
offsetof(union swap_header, info.badpages)) / sizeof(int))
enum {
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
SWP_BLKDEV = (1 << 6), /* its a block device */
SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
/* add others here before... */
};
#define SWAP_CLUSTER_MAX 32UL
#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10)
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
/* Bit flag in swap_map */
#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
/* Special value in first swap_map */
#define SWAP_MAP_MAX 0x3e /* Max count */
#define SWAP_MAP_BAD 0x3f /* Note page is bad */
#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
/* Special value in each swap_map continuation */
#define SWAP_CONT_MAX 0x7f /* Max count */
/*
* The first page in the swap file is the swap header, which is always marked
* bad to prevent it from being allocated as an entry. This also prevents the
* cluster to which it belongs being marked free. Therefore 0 is safe to use as
* a sentinel to indicate an entry is not valid.
*/
#define SWAP_ENTRY_INVALID 0
#ifdef CONFIG_THP_SWAP
#define SWAP_NR_ORDERS (PMD_ORDER + 1)
#else
#define SWAP_NR_ORDERS 1
#endif
/*
* We keep using same cluster for rotational device so IO will be sequential.
* The purpose is to optimize SWAP throughput on these device.
*/
struct swap_sequential_cluster {
unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
struct percpu_ref users; /* indicate and keep swap device valid. */
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
struct list_head free_clusters; /* free clusters list */
struct list_head full_clusters; /* full clusters list */
struct list_head nonfull_clusters[SWAP_NR_ORDERS];
/* list of cluster that contains at least one free slot */
struct list_head frag_clusters[SWAP_NR_ORDERS];
/* list of cluster that are fragmented or contented */
unsigned int pages; /* total of usable pages of swap */
atomic_long_t inuse_pages; /* number of those currently in use */
struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */
spinlock_t global_cluster_lock; /* Serialize usage of global cluster */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
struct completion comp; /* seldom referenced */
spinlock_t lock; /*
* protect map scan related fields like
* swap_map, inuse_pages and all cluster
* lists. other fields are only changed
* at swapon/swapoff, so are protected
* by swap_lock. changing flags need
* hold this lock and swap_lock. If
* both locks need hold, hold swap_lock
* first.
*/
spinlock_t cont_lock; /*
* protect swap count continuation page
* list.
*/
struct work_struct discard_work; /* discard worker */
struct work_struct reclaim_work; /* reclaim worker */
struct list_head discard_clusters; /* discard clusters list */
struct plist_node avail_lists[]; /*
* entries in swap_avail_heads, one
* entry per node.
* Must be last as the number of the
* array is nr_node_ids, which is not
* a fixed value so have to allocate
* dynamically.
* And it has to be an array so that
* plist_for_each_* can work.
*/
};
static inline swp_entry_t page_swap_entry(struct page *page)
{
struct folio *folio = page_folio(page);
swp_entry_t entry = folio->swap;
entry.val += folio_page_idx(folio, page);
return entry;
}
/* linux/mm/workingset.c */
bool workingset_test_recent(void *shadow, bool file, bool *workingset,
bool flush);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
void workingset_refault(struct folio *folio, void *shadow);
void workingset_activation(struct folio *folio);
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
/* linux/mm/swap.c */
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
unsigned int nr_io, unsigned int nr_rotated)
__releases(lruvec->lru_lock);
void lru_note_cost_refault(struct folio *);
void folio_add_lru(struct folio *);
void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *);
static inline bool folio_may_be_lru_cached(struct folio *folio)
{
/*
* Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
* Holding small numbers of low-order mTHP folios in per-CPU LRU cache
* will be sensible, but nobody has implemented and tested that yet.
*/
return !folio_test_large(folio);
}
extern atomic_t lru_disable_count;
static inline bool lru_cache_disabled(void)
{
return atomic_read(&lru_disable_count);
}
static inline void lru_cache_enable(void)
{
atomic_dec(&lru_disable_count);
}
extern void lru_cache_disable(void);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
void folio_deactivate(struct folio *folio);
void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
#define MIN_SWAPPINESS 0
#define MAX_SWAPPINESS 200
/* Just reclaim from anon folios in proactive memory reclaim */
#define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1)
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
unsigned int reclaim_options,
int *swappiness);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
long remove_mapping(struct address_space *mapping, struct folio *folio);
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
extern int reclaim_register_node(struct node *node);
extern void reclaim_unregister_node(struct node *node);
#else
static inline int reclaim_register_node(struct node *node)
{
return 0;
}
static inline void reclaim_unregister_node(struct node *node)
{
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
#ifdef CONFIG_NUMA
extern int sysctl_min_unmapped_ratio;
extern int sysctl_min_slab_ratio;
#endif
void check_move_unevictable_folios(struct folio_batch *fbatch);
extern void __meminit kswapd_run(int nid);
extern void __meminit kswapd_stop(int nid);
#ifdef CONFIG_SWAP
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
int generic_swapfile_activate(struct swap_info_struct *, struct file *,
sector_t *);
static inline unsigned long total_swapcache_pages(void)
{
return global_node_page_state(NR_SWAPCACHE);
}
void free_swap_cache(struct folio *folio);
void free_folio_and_swap_cache(struct folio *folio);
void free_pages_and_swap_cache(struct encoded_page **, int);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
extern atomic_t nr_rotate_swap;
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
{
return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
}
static inline long get_nr_swap_pages(void)
{
return atomic_long_read(&nr_swap_pages);
}
extern void si_swapinfo(struct sysinfo *);
int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
bool folio_free_swap(struct folio *folio);
void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
extern int swapcache_prepare(swp_entry_t entry, int nr);
extern void swap_free_nr(swp_entry_t entry, int nr_pages);
extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
struct backing_dev_info;
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
sector_t swap_folio_sector(struct folio *folio);
static inline void put_swap_device(struct swap_info_struct *si)
{
percpu_ref_put(&si->users);
}
#else /* CONFIG_SWAP */
static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
return NULL;
}
static inline void put_swap_device(struct swap_info_struct *si)
{
}
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
#define vm_swap_full() 0
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
#define free_folio_and_swap_cache(folio) \
folio_put(folio)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr)
{
}
static inline void free_swap_cache(struct folio *folio)
{
}
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
return 0;
}
static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
{
}
static inline int swap_duplicate(swp_entry_t swp)
{
return 0;
}
static inline int swapcache_prepare(swp_entry_t swp, int nr)
{
return 0;
}
static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
{
}
static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
{
}
static inline int __swap_count(swp_entry_t entry)
{
return 0;
}
static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
{
return false;
}
static inline int swp_swapcount(swp_entry_t entry)
{
return 0;
}
static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask)
{
return -EINVAL;
}
static inline bool folio_free_swap(struct folio *folio)
{
return false;
}
static inline int add_swap_extent(struct swap_info_struct *sis,
unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
{
return -EINVAL;
}
#endif /* CONFIG_SWAP */
static inline void free_swap_and_cache(swp_entry_t entry)
{
free_swap_and_cache_nr(entry, 1);
}
static inline void swap_free(swp_entry_t entry)
{
swap_free_nr(entry, 1);
}
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
/* Cgroup2 doesn't have per-cgroup swappiness */
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
return READ_ONCE(vm_swappiness);
/* root ? */
if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
return READ_ONCE(vm_swappiness);
return READ_ONCE(memcg->swappiness);
}
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
return READ_ONCE(vm_swappiness);
}
#endif
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
{
if (mem_cgroup_disabled())
return;
__folio_throttle_swaprate(folio, gfp);
}
#else
static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
{
}
#endif
#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
if (mem_cgroup_disabled())
return 0;
return __mem_cgroup_try_charge_swap(folio, entry);
}
extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
{
if (mem_cgroup_disabled())
return;
__mem_cgroup_uncharge_swap(entry, nr_pages);
}
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
extern bool mem_cgroup_swap_full(struct folio *folio);
#else
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
return 0;
}
static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
unsigned int nr_pages)
{
}
static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
return get_nr_swap_pages();
}
static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}
#endif
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
*
* This driver produces cryptographically secure pseudorandom data. It is divided
* into roughly six sections, each with a section header:
*
* - Initialization and readiness waiting.
* - Fast key erasure RNG, the "crng".
* - Entropy accumulation and extraction routines.
* - Entropy collection routines.
* - Userspace reader/writer interfaces.
* - Sysctl interface.
*
* The high level overview is that there is one input pool, into which
* various pieces of data are hashed. Prior to initialization, some of that
* data is then "credited" as having a certain number of bits of entropy.
* When enough bits of entropy are available, the hash is finalized and
* handed as a key to a stream cipher that expands it indefinitely for
* various consumers. This key is periodically refreshed as the various
* entropy collectors, described below, add data to the input pool.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/utsname.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/nodemask.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <linux/siphash.h>
#include <linux/sched/isolation.h>
#include <crypto/chacha.h>
#include <crypto/blake2s.h>
#ifdef CONFIG_VDSO_GETRANDOM
#include <vdso/getrandom.h>
#include <vdso/datapage.h>
#include <vdso/vsyscall.h>
#endif
#include <asm/archrandom.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
/*********************************************************************
*
* Initialization and readiness waiting.
*
* Much of the RNG infrastructure is devoted to various dependencies
* being able to wait until the RNG has collected enough entropy and
* is ready for safe consumption.
*
*********************************************************************/
/*
* crng_init is protected by base_crng->lock, and only increases
* its value (from empty->early->ready).
*/
static enum {
CRNG_EMPTY = 0, /* Little to no entropy collected */
CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
} crng_init __read_mostly = CRNG_EMPTY;
static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
/* Control how we warn userspace. */
static struct ratelimit_state urandom_warning =
RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
static int ratelimit_disable __read_mostly =
IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
/*
* Returns whether or not the input pool has been seeded and thus guaranteed
* to supply cryptographically secure random numbers. This applies to: the
* /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
* u16,u32,u64,long} family of functions.
*
* Returns: true if the input pool has been seeded.
* false if the input pool has not been seeded.
*/
bool rng_is_initialized(void)
{ return crng_ready();}
EXPORT_SYMBOL(rng_is_initialized);
static void __cold crng_set_ready(struct work_struct *work)
{
static_branch_enable(&crng_is_ready);
}
/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
static void try_to_generate_entropy(void);
/*
* Wait for the input pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
* device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
* long} family of functions. Using any of these functions without first
* calling this function forfeits the guarantee of security.
*
* Returns: 0 if the input pool has been seeded.
* -ERESTARTSYS if the function was interrupted by a signal.
*/
int wait_for_random_bytes(void)
{
while (!crng_ready()) {
int ret;
try_to_generate_entropy();
ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
}
return 0;
}
EXPORT_SYMBOL(wait_for_random_bytes);
/*
* Add a callback function that will be invoked when the crng is initialised,
* or immediately if it already has been. Only use this is you are absolutely
* sure it is required. Most users should instead be able to test
* `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
*/
int __cold execute_with_initialized_rng(struct notifier_block *nb)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&random_ready_notifier.lock, flags);
if (crng_ready())
nb->notifier_call(nb, 0, NULL);
else
ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
return ret;
}
#define warn_unseeded_randomness() \
if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
__func__, (void *)_RET_IP_, crng_init)
/*********************************************************************
*
* Fast key erasure RNG, the "crng".
*
* These functions expand entropy from the entropy extractor into
* long streams for external consumption using the "fast key erasure"
* RNG described at <https://blog.cr.yp.to/20170723-random.html>.
*
* There are a few exported interfaces for use by other drivers:
*
* void get_random_bytes(void *buf, size_t len)
* u8 get_random_u8()
* u16 get_random_u16()
* u32 get_random_u32()
* u32 get_random_u32_below(u32 ceil)
* u32 get_random_u32_above(u32 floor)
* u32 get_random_u32_inclusive(u32 floor, u32 ceil)
* u64 get_random_u64()
* unsigned long get_random_long()
*
* These interfaces will return the requested number of random bytes
* into the given buffer or as a return value. This is equivalent to
* a read from /dev/urandom. The u8, u16, u32, u64, long family of
* functions may be higher performance for one-off random integers,
* because they do a bit of buffering and do not invoke reseeding
* until the buffer is emptied.
*
*********************************************************************/
enum {
CRNG_RESEED_START_INTERVAL = HZ,
CRNG_RESEED_INTERVAL = 60 * HZ
};
static struct {
u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
unsigned long generation;
spinlock_t lock;
} base_crng = {
.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
};
struct crng {
u8 key[CHACHA_KEY_SIZE];
unsigned long generation;
local_lock_t lock;
};
static DEFINE_PER_CPU(struct crng, crngs) = {
.generation = ULONG_MAX,
.lock = INIT_LOCAL_LOCK(crngs.lock),
};
/*
* Return the interval until the next reseeding, which is normally
* CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
* proportional to the uptime.
*/
static unsigned int crng_reseed_interval(void)
{
static bool early_boot = true;
if (unlikely(READ_ONCE(early_boot))) {
time64_t uptime = ktime_get_seconds();
if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
WRITE_ONCE(early_boot, false);
else
return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
(unsigned int)uptime / 2 * HZ);
}
return CRNG_RESEED_INTERVAL;
}
/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
static void extract_entropy(void *buf, size_t len);
/* This extracts a new crng key from the input pool. */
static void crng_reseed(struct work_struct *work)
{
static DECLARE_DELAYED_WORK(next_reseed, crng_reseed);
unsigned long flags;
unsigned long next_gen;
u8 key[CHACHA_KEY_SIZE];
/* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
if (likely(system_unbound_wq))
queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
extract_entropy(key, sizeof(key));
/*
* We copy the new key into the base_crng, overwriting the old one,
* and update the generation counter. We avoid hitting ULONG_MAX,
* because the per-cpu crngs are initialized to ULONG_MAX, so this
* forces new CPUs that come online to always initialize.
*/
spin_lock_irqsave(&base_crng.lock, flags);
memcpy(base_crng.key, key, sizeof(base_crng.key));
next_gen = base_crng.generation + 1;
if (next_gen == ULONG_MAX)
++next_gen;
WRITE_ONCE(base_crng.generation, next_gen);
#ifdef CONFIG_VDSO_GETRANDOM
/* base_crng.generation's invalid value is ULONG_MAX, while
* vdso_k_rng_data->generation's invalid value is 0, so add one to the
* former to arrive at the latter. Use smp_store_release so that this
* is ordered with the write above to base_crng.generation. Pairs with
* the smp_rmb() before the syscall in the vDSO code.
*
* Cast to unsigned long for 32-bit architectures, since atomic 64-bit
* operations are not supported on those architectures. This is safe
* because base_crng.generation is a 32-bit value. On big-endian
* architectures it will be stored in the upper 32 bits, but that's okay
* because the vDSO side only checks whether the value changed, without
* actually using or interpreting the value.
*/
smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1);
#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
spin_unlock_irqrestore(&base_crng.lock, flags);
memzero_explicit(key, sizeof(key));
}
/*
* This generates a ChaCha block using the provided key, and then
* immediately overwrites that key with half the block. It returns
* the resultant ChaCha state to the user, along with the second
* half of the block containing 32 bytes of random data that may
* be used; random_data_len may not be greater than 32.
*
* The returned ChaCha state contains within it a copy of the old
* key value, at index 4, so the state should always be zeroed out
* immediately after using in order to maintain forward secrecy.
* If the state cannot be erased in a timely manner, then it is
* safer to set the random_data parameter to &chacha_state->x[4]
* so that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
u8 first_block[CHACHA_BLOCK_SIZE];
BUG_ON(random_data_len > 32); chacha_init_consts(chacha_state);
memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE);
memset(&chacha_state->x[12], 0, sizeof(u32) * 4);
chacha20_block(chacha_state, first_block);
memcpy(key, first_block, CHACHA_KEY_SIZE);
memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
memzero_explicit(first_block, sizeof(first_block));
}
/*
* This function returns a ChaCha state that you may use for generating
* random data. It also returns up to 32 bytes on its own of random data
* that may be used; random_data_len may not be greater than 32.
*/
static void crng_make_state(struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
unsigned long flags;
struct crng *crng;
BUG_ON(random_data_len > 32);
/*
* For the fast path, we check whether we're ready, unlocked first, and
* then re-check once locked later. In the case where we're really not
* ready, we do fast key erasure with the base_crng directly, extracting
* when crng_init is CRNG_EMPTY.
*/
if (!crng_ready()) {
bool ready;
spin_lock_irqsave(&base_crng.lock, flags);
ready = crng_ready();
if (!ready) {
if (crng_init == CRNG_EMPTY)
extract_entropy(base_crng.key, sizeof(base_crng.key));
crng_fast_key_erasure(base_crng.key, chacha_state,
random_data, random_data_len);
}
spin_unlock_irqrestore(&base_crng.lock, flags);
if (!ready)
return;
}
local_lock_irqsave(&crngs.lock, flags);
crng = raw_cpu_ptr(&crngs);
/*
* If our per-cpu crng is older than the base_crng, then it means
* somebody reseeded the base_crng. In that case, we do fast key
* erasure on the base_crng, and use its output as the new key
* for our per-cpu crng. This brings us up to date with base_crng.
*/
if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
spin_lock(&base_crng.lock);
crng_fast_key_erasure(base_crng.key, chacha_state,
crng->key, sizeof(crng->key));
crng->generation = base_crng.generation;
spin_unlock(&base_crng.lock);
}
/*
* Finally, when we've made it this far, our per-cpu crng has an up
* to date key, and we can do fast key erasure with it to produce
* some random data and a ChaCha state for the caller. All other
* branches of this function are "unlikely", so most of the time we
* should wind up here immediately.
*/
crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); local_unlock_irqrestore(&crngs.lock, flags);}
static void _get_random_bytes(void *buf, size_t len)
{
struct chacha_state chacha_state;
u8 tmp[CHACHA_BLOCK_SIZE];
size_t first_block_len;
if (!len) return;
first_block_len = min_t(size_t, 32, len);
crng_make_state(&chacha_state, buf, first_block_len);
len -= first_block_len;
buf += first_block_len;
while (len) {
if (len < CHACHA_BLOCK_SIZE) {
chacha20_block(&chacha_state, tmp);
memcpy(buf, tmp, len);
memzero_explicit(tmp, sizeof(tmp));
break;
}
chacha20_block(&chacha_state, buf);
if (unlikely(chacha_state.x[12] == 0))
++chacha_state.x[13]; len -= CHACHA_BLOCK_SIZE;
buf += CHACHA_BLOCK_SIZE;
}
chacha_zeroize_state(&chacha_state);}
/*
* This returns random bytes in arbitrary quantities. The quality of the
* random bytes is good as /dev/urandom. In order to ensure that the
* randomness provided by this function is okay, the function
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
*/
void get_random_bytes(void *buf, size_t len)
{
warn_unseeded_randomness();
_get_random_bytes(buf, len);
}
EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
struct chacha_state chacha_state;
u8 block[CHACHA_BLOCK_SIZE];
size_t ret = 0, copied;
if (unlikely(!iov_iter_count(iter)))
return 0;
/*
* Immediately overwrite the ChaCha key at index 4 with random
* bytes, in case userspace causes copy_to_iter() below to sleep
* forever, so that we still retain forward secrecy in that case.
*/
crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4],
CHACHA_KEY_SIZE);
/*
* However, if we're doing a read of len <= 32, we don't need to
* use chacha_state after, so we can simply return those bytes to
* the user directly.
*/
if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter);
goto out_zero_chacha;
}
for (;;) {
chacha20_block(&chacha_state, block);
if (unlikely(chacha_state.x[12] == 0))
++chacha_state.x[13];
copied = copy_to_iter(block, sizeof(block), iter);
ret += copied;
if (!iov_iter_count(iter) || copied != sizeof(block))
break;
BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
if (ret % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
cond_resched();
}
}
memzero_explicit(block, sizeof(block));
out_zero_chacha:
chacha_zeroize_state(&chacha_state);
return ret ? ret : -EFAULT;
}
/*
* Batched entropy returns random integers. The quality of the random
* number is good as /dev/urandom. In order to ensure that the randomness
* provided by this function is okay, the function wait_for_random_bytes()
* should be called and return 0 at least once at any point prior.
*/
#define DEFINE_BATCHED_ENTROPY(type) \
struct batch_ ##type { \
/* \
* We make this 1.5x a ChaCha block, so that we get the \
* remaining 32 bytes from fast key erasure, plus one full \
* block from the detached ChaCha state. We can increase \
* the size of this later if needed so long as we keep the \
* formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
*/ \
type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
local_lock_t lock; \
unsigned long generation; \
unsigned int position; \
}; \
\
static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
.lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
.position = UINT_MAX \
}; \
\
type get_random_ ##type(void) \
{ \
type ret; \
unsigned long flags; \
struct batch_ ##type *batch; \
unsigned long next_gen; \
\
warn_unseeded_randomness(); \
\
if (!crng_ready()) { \
_get_random_bytes(&ret, sizeof(ret)); \
return ret; \
} \
\
local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
batch = raw_cpu_ptr(&batched_entropy_##type); \
\
next_gen = READ_ONCE(base_crng.generation); \
if (batch->position >= ARRAY_SIZE(batch->entropy) || \
next_gen != batch->generation) { \
_get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
batch->position = 0; \
batch->generation = next_gen; \
} \
\
ret = batch->entropy[batch->position]; \
batch->entropy[batch->position] = 0; \
++batch->position; \
local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
return ret; \
} \
EXPORT_SYMBOL(get_random_ ##type);
DEFINE_BATCHED_ENTROPY(u8)DEFINE_BATCHED_ENTROPY(u16)DEFINE_BATCHED_ENTROPY(u32)DEFINE_BATCHED_ENTROPY(u64)
u32 __get_random_u32_below(u32 ceil)
{
/*
* This is the slow path for variable ceil. It is still fast, most of
* the time, by doing traditional reciprocal multiplication and
* opportunistically comparing the lower half to ceil itself, before
* falling back to computing a larger bound, and then rejecting samples
* whose lower half would indicate a range indivisible by ceil. The use
* of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
* in 32-bits.
*/
u32 rand = get_random_u32();
u64 mult;
/*
* This function is technically undefined for ceil == 0, and in fact
* for the non-underscored constant version in the header, we build bug
* on that. But for the non-constant case, it's convenient to have that
* evaluate to being a straight call to get_random_u32(), so that
* get_random_u32_inclusive() can work over its whole range without
* undefined behavior.
*/
if (unlikely(!ceil))
return rand;
mult = (u64)ceil * rand;
if (unlikely((u32)mult < ceil)) {
u32 bound = -ceil % ceil;
while (unlikely((u32)mult < bound))
mult = (u64)ceil * get_random_u32();
}
return mult >> 32;}
EXPORT_SYMBOL(__get_random_u32_below);
#ifdef CONFIG_SMP
/*
* This function is called when the CPU is coming up, with entry
* CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
*/
int __cold random_prepare_cpu(unsigned int cpu)
{
/*
* When the cpu comes back online, immediately invalidate both
* the per-cpu crng and all batches, so that we serve fresh
* randomness.
*/
per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
return 0;
}
#endif
/**********************************************************************
*
* Entropy accumulation and extraction routines.
*
* Callers may add entropy via:
*
* static void mix_pool_bytes(const void *buf, size_t len)
*
* After which, if added entropy should be credited:
*
* static void credit_init_bits(size_t bits)
*
* Finally, extract entropy via:
*
* static void extract_entropy(void *buf, size_t len)
*
**********************************************************************/
enum {
POOL_BITS = BLAKE2S_HASH_SIZE * 8,
POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
};
static struct {
struct blake2s_state hash;
spinlock_t lock;
unsigned int init_bits;
} input_pool = {
.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
.hash.outlen = BLAKE2S_HASH_SIZE,
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
};
static void _mix_pool_bytes(const void *buf, size_t len)
{
blake2s_update(&input_pool.hash, buf, len);
}
/*
* This function adds bytes into the input pool. It does not
* update the initialization bit counter; the caller should call
* credit_init_bits if this is appropriate.
*/
static void mix_pool_bytes(const void *buf, size_t len)
{
unsigned long flags;
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
}
/*
* This is an HKDF-like construction for using the hashed collected entropy
* as a PRF key, that's then expanded block-by-block.
*/
static void extract_entropy(void *buf, size_t len)
{
unsigned long flags;
u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
struct {
unsigned long rdseed[32 / sizeof(long)];
size_t counter;
} block;
size_t i, longs;
for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
if (longs) {
i += longs;
continue;
}
longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
if (longs) {
i += longs;
continue;
}
block.rdseed[i++] = random_get_entropy();
}
spin_lock_irqsave(&input_pool.lock, flags);
/* seed = HASHPRF(last_key, entropy_input) */
blake2s_final(&input_pool.hash, seed);
/* next_key = HASHPRF(seed, RDSEED || 0) */
block.counter = 0;
blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
spin_unlock_irqrestore(&input_pool.lock, flags);
memzero_explicit(next_key, sizeof(next_key));
while (len) {
i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
/* output = HASHPRF(seed, RDSEED || ++counter) */
++block.counter;
blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
len -= i;
buf += i;
}
memzero_explicit(seed, sizeof(seed));
memzero_explicit(&block, sizeof(block));
}
#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
static void __cold _credit_init_bits(size_t bits)
{
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
int m;
if (!bits)
return;
add = min_t(size_t, bits, POOL_BITS);
orig = READ_ONCE(input_pool.init_bits);
do {
new = min_t(unsigned int, POOL_BITS, orig + add);
} while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
if (static_key_initialized && system_unbound_wq)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
WRITE_ONCE(vdso_k_rng_data->is_ready, true);
#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
m = ratelimit_state_get_miss(&urandom_warning);
if (m)
pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
spin_lock_irqsave(&base_crng.lock, flags);
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
if (crng_init == CRNG_EMPTY) {
extract_entropy(base_crng.key, sizeof(base_crng.key));
crng_init = CRNG_EARLY;
}
spin_unlock_irqrestore(&base_crng.lock, flags);
}
}
/**********************************************************************
*
* Entropy collection routines.
*
* The following exported functions are used for pushing entropy into
* the above entropy accumulation routines:
*
* void add_device_randomness(const void *buf, size_t len);
* void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
* void add_bootloader_randomness(const void *buf, size_t len);
* void add_vmfork_randomness(const void *unique_vm_id, size_t len);
* void add_interrupt_randomness(int irq);
* void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
* void add_disk_randomness(struct gendisk *disk);
*
* add_device_randomness() adds data to the input pool that
* is likely to differ between two devices (or possibly even per boot).
* This would be things like MAC addresses or serial numbers, or the
* read-out of the RTC. This does *not* credit any actual entropy to
* the pool, but it initializes the pool to different values for devices
* that might otherwise be identical and have very little entropy
* available to them (particularly common in the embedded world).
*
* add_hwgenerator_randomness() is for true hardware RNGs, and will credit
* entropy as specified by the caller. If the entropy pool is full it will
* block until more entropy is needed.
*
* add_bootloader_randomness() is called by bootloader drivers, such as EFI
* and device tree, and credits its input depending on whether or not the
* command line option 'random.trust_bootloader'.
*
* add_vmfork_randomness() adds a unique (but not necessarily secret) ID
* representing the current instance of a VM to the pool, without crediting,
* and then force-reseeds the crng so that it takes effect immediately.
*
* add_interrupt_randomness() uses the interrupt timing as random
* inputs to the entropy pool. Using the cycle counters and the irq source
* as inputs, it feeds the input pool roughly once a second or after 64
* interrupts, crediting 1 bit of entropy for whichever comes first.
*
* add_input_randomness() uses the input layer interrupt timing, as well
* as the event type information from the hardware.
*
* add_disk_randomness() uses what amounts to the seek time of block
* layer request events, on a per-disk_devt basis, as input to the
* entropy pool. Note that high-speed solid state drives with very low
* seek times do not make for good sources of entropy, as their seek
* times are usually fairly consistent.
*
* The last two routines try to estimate how many bits of entropy
* to credit. They do this by keeping track of the first and second
* order deltas of the event timings.
*
**********************************************************************/
static bool trust_cpu __initdata = true;
static bool trust_bootloader __initdata = true;
static int __init parse_trust_cpu(char *arg)
{
return kstrtobool(arg, &trust_cpu);
}
static int __init parse_trust_bootloader(char *arg)
{
return kstrtobool(arg, &trust_bootloader);
}
early_param("random.trust_cpu", parse_trust_cpu);
early_param("random.trust_bootloader", parse_trust_bootloader);
static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
{
unsigned long flags, entropy = random_get_entropy();
/*
* Encode a representation of how long the system has been suspended,
* in a way that is distinct from prior system suspends.
*/
ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&action, sizeof(action));
_mix_pool_bytes(stamps, sizeof(stamps));
_mix_pool_bytes(&entropy, sizeof(entropy));
spin_unlock_irqrestore(&input_pool.lock, flags);
if (crng_ready() && (action == PM_RESTORE_PREPARE ||
(action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
!IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
crng_reseed(NULL);
pr_notice("crng reseeded on system resumption\n");
}
return 0;
}
static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
/*
* This is called extremely early, before time keeping functionality is
* available, but arch randomness is. Interrupts are not yet enabled.
*/
void __init random_init_early(const char *command_line)
{
unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
size_t i, longs, arch_bits;
#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
arch_bits -= sizeof(*entropy) * 8;
++i;
}
_mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
_mix_pool_bytes(command_line, strlen(command_line));
/* Reseed if already seeded by earlier phases. */
if (crng_ready())
crng_reseed(NULL);
else if (trust_cpu)
_credit_init_bits(arch_bits);
}
/*
* This is called a little bit after the prior function, and now there is
* access to timestamps counters. Interrupts are not yet enabled.
*/
void __init random_init(void)
{
unsigned long entropy = random_get_entropy();
ktime_t now = ktime_get_real();
_mix_pool_bytes(&now, sizeof(now));
_mix_pool_bytes(&entropy, sizeof(entropy));
add_latent_entropy();
/*
* If we were initialized by the cpu or bootloader before jump labels
* or workqueues are initialized, then we should enable the static
* branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
/* Reseed if already seeded by earlier phases. */
if (crng_ready())
crng_reseed(NULL);
WARN_ON(register_pm_notifier(&pm_notifier));
WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
"entropy collection will consequently suffer.");
}
/*
* Add device- or boot-specific data to the input pool to help
* initialize it.
*
* None of this adds any entropy; it is meant to avoid the problem of
* the entropy pool having similar initial state across largely
* identical devices.
*/
void add_device_randomness(const void *buf, size_t len)
{
unsigned long entropy = random_get_entropy();
unsigned long flags;
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&entropy, sizeof(entropy));
_mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
/*
* Interface for in-kernel drivers of true hardware RNGs. Those devices
* may produce endless random bits, so this function will sleep for
* some amount of time after, if the sleep_after parameter is true.
*/
void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after)
{
mix_pool_bytes(buf, len);
credit_init_bits(entropy);
/*
* Throttle writing to once every reseed interval, unless we're not yet
* initialized or no entropy is credited.
*/
if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy))
schedule_timeout_interruptible(crng_reseed_interval());
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
/*
* Handle random seed passed by bootloader, and credit it depending
* on the command line option 'random.trust_bootloader'.
*/
void __init add_bootloader_randomness(const void *buf, size_t len)
{
mix_pool_bytes(buf, len);
if (trust_bootloader)
credit_init_bits(len * 8);
}
#if IS_ENABLED(CONFIG_VMGENID)
static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
/*
* Handle a new unique VM ID, which is unique, not secret, so we
* don't credit it, but we do immediately force a reseed after so
* that it's used by the crng posthaste.
*/
void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
{
add_device_randomness(unique_vm_id, len);
if (crng_ready()) {
crng_reseed(NULL);
pr_notice("crng reseeded due to virtual machine fork\n");
}
blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
}
#if IS_MODULE(CONFIG_VMGENID)
EXPORT_SYMBOL_GPL(add_vmfork_randomness);
#endif
int __cold register_random_vmfork_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&vmfork_chain, nb);
}
EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&vmfork_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
#endif
struct fast_pool {
unsigned long pool[4];
unsigned long last;
unsigned int count;
struct timer_list mix;
};
static void mix_interrupt_randomness(struct timer_list *work);
static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
#ifdef CONFIG_64BIT
#define FASTMIX_PERM SIPHASH_PERMUTATION
.pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
#else
#define FASTMIX_PERM HSIPHASH_PERMUTATION
.pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
#endif
.mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
};
/*
* This is [Half]SipHash-1-x, starting from an empty key. Because
* the key is fixed, it assumes that its inputs are non-malicious,
* and therefore this has no security on its own. s represents the
* four-word SipHash state, while v represents a two-word input.
*/
static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
{
s[3] ^= v1;
FASTMIX_PERM(s[0], s[1], s[2], s[3]);
s[0] ^= v1;
s[3] ^= v2;
FASTMIX_PERM(s[0], s[1], s[2], s[3]);
s[0] ^= v2;
}
#ifdef CONFIG_SMP
/*
* This function is called when the CPU has just come online, with
* entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
*/
int __cold random_online_cpu(unsigned int cpu)
{
/*
* During CPU shutdown and before CPU onlining, add_interrupt_
* randomness() may schedule mix_interrupt_randomness(), and
* set the MIX_INFLIGHT flag. However, because the worker can
* be scheduled on a different CPU during this period, that
* flag will never be cleared. For that reason, we zero out
* the flag here, which runs just after workqueues are onlined
* for the CPU again. This also has the effect of setting the
* irq randomness count to zero so that new accumulated irqs
* are fresh.
*/
per_cpu_ptr(&irq_randomness, cpu)->count = 0;
return 0;
}
#endif
static void mix_interrupt_randomness(struct timer_list *work)
{
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
/*
* The size of the copied stack pool is explicitly 2 longs so that we
* only ever ingest half of the siphash output each time, retaining
* the other half as the next "key" that carries over. The entropy is
* supposed to be sufficiently dispersed between bits so on average
* we don't wind up "losing" some.
*/
unsigned long pool[2];
unsigned int count;
/* Check to see if we're running on the wrong CPU due to hotplug. */
local_irq_disable();
if (fast_pool != this_cpu_ptr(&irq_randomness)) {
local_irq_enable();
return;
}
/*
* Copy the pool to the stack so that the mixer always has a
* consistent view, before we reenable irqs again.
*/
memcpy(pool, fast_pool->pool, sizeof(pool));
count = fast_pool->count;
fast_pool->count = 0;
fast_pool->last = jiffies;
local_irq_enable();
mix_pool_bytes(pool, sizeof(pool));
credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
memzero_explicit(pool, sizeof(pool));
}
void add_interrupt_randomness(int irq)
{
enum { MIX_INFLIGHT = 1U << 31 };
unsigned long entropy = random_get_entropy();
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned int new_count;
fast_mix(fast_pool->pool, entropy,
(regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
new_count = ++fast_pool->count;
if (new_count & MIX_INFLIGHT)
return;
if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
return;
fast_pool->count |= MIX_INFLIGHT;
if (!timer_pending(&fast_pool->mix)) {
fast_pool->mix.expires = jiffies;
add_timer_on(&fast_pool->mix, raw_smp_processor_id());
}
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
/* There is one of these per entropy source */
struct timer_rand_state {
unsigned long last_time;
long last_delta, last_delta2;
};
/*
* This function adds entropy to the entropy "pool" by using timing
* delays. It uses the timer_rand_state structure to make an estimate
* of how many bits of entropy this call has added to the pool. The
* value "num" is also added to the pool; it should somehow describe
* the type of event that just happened.
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
unsigned long entropy = random_get_entropy(), now = jiffies, flags;
long delta, delta2, delta3;
unsigned int bits;
/*
* If we're in a hard IRQ, add_interrupt_randomness() will be called
* sometime after, so mix into the fast pool.
*/
if (in_hardirq()) {
fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
} else {
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&entropy, sizeof(entropy));
_mix_pool_bytes(&num, sizeof(num));
spin_unlock_irqrestore(&input_pool.lock, flags);
}
if (crng_ready())
return;
/*
* Calculate number of bits of randomness we probably added.
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
delta = now - READ_ONCE(state->last_time);
WRITE_ONCE(state->last_time, now);
delta2 = delta - READ_ONCE(state->last_delta);
WRITE_ONCE(state->last_delta, delta);
delta3 = delta2 - READ_ONCE(state->last_delta2);
WRITE_ONCE(state->last_delta2, delta2);
if (delta < 0)
delta = -delta;
if (delta2 < 0)
delta2 = -delta2;
if (delta3 < 0)
delta3 = -delta3;
if (delta > delta2)
delta = delta2;
if (delta > delta3)
delta = delta3;
/*
* delta is now minimum absolute delta. Round down by 1 bit
* on general principles, and limit entropy estimate to 11 bits.
*/
bits = min(fls(delta >> 1), 11);
/*
* As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
* will run after this, which uses a different crediting scheme of 1 bit
* per every 64 interrupts. In order to let that function do accounting
* close to the one in this function, we credit a full 64/64 bit per bit,
* and then subtract one to account for the extra one added.
*/
if (in_hardirq())
this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
else
_credit_init_bits(bits);
}
void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
{
static unsigned char last_value;
static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
/* Ignore autorepeat and the like. */
if (value == last_value)
return;
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
}
EXPORT_SYMBOL_GPL(add_input_randomness);
#ifdef CONFIG_BLOCK
void add_disk_randomness(struct gendisk *disk)
{
if (!disk || !disk->random)
return;
/* First major is 1, so we get >= 0x200 here. */
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
void __cold rand_initialize_disk(struct gendisk *disk)
{
struct timer_rand_state *state;
/*
* If kzalloc returns null, we just won't use that entropy
* source.
*/
state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
if (state) {
state->last_time = INITIAL_JIFFIES;
disk->random = state;
}
}
#endif
struct entropy_timer_state {
unsigned long entropy;
struct timer_list timer;
atomic_t samples;
unsigned int samples_per_bit;
};
/*
* Each time the timer fires, we expect that we got an unpredictable jump in
* the cycle counter. Even if the timer is running on another CPU, the timer
* activity will be touching the stack of the CPU that is generating entropy.
*
* Note that we don't re-arm the timer in the timer itself - we are happy to be
* scheduled away, since that just makes the load more complex, but we do not
* want the timer to keep ticking unless the entropy loop is running.
*
* So the re-arming always happens in the entropy loop itself.
*/
static void __cold entropy_timer(struct timer_list *timer)
{
struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
unsigned long entropy = random_get_entropy();
mix_pool_bytes(&entropy, sizeof(entropy));
if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
credit_init_bits(1);
}
/*
* If we have an actual cycle counter, see if we can generate enough entropy
* with timing noise.
*/
static void __cold try_to_generate_entropy(void)
{
enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
stack->entropy = random_get_entropy();
if (stack->entropy != last)
++num_different;
last = stack->entropy;
}
stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
return;
atomic_set(&stack->samples, 0);
timer_setup_on_stack(&stack->timer, entropy_timer, 0);
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
* executing by checking timer_delete_sync_try(), before queueing the next one.
*/
if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
struct cpumask timer_cpus;
unsigned int num_cpus;
/*
* Preemption must be disabled here, both to read the current CPU number
* and to avoid scheduling a timer on a dead CPU.
*/
preempt_disable();
/* Only schedule callbacks on timer CPUs that are online. */
cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
num_cpus = cpumask_weight(&timer_cpus);
/* In very bizarre case of misconfiguration, fallback to all online. */
if (unlikely(num_cpus == 0)) {
timer_cpus = *cpu_online_mask;
num_cpus = cpumask_weight(&timer_cpus);
}
/* Basic CPU round-robin, which avoids the current CPU. */
do {
cpu = cpumask_next(cpu, &timer_cpus);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(&timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
/* Expiring the timer at `jiffies` means it's the next tick. */
stack->timer.expires = jiffies;
add_timer_on(&stack->timer, cpu);
preempt_enable();
}
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
schedule();
stack->entropy = random_get_entropy();
}
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
timer_delete_sync(&stack->timer);
timer_destroy_on_stack(&stack->timer);
}
/**********************************************************************
*
* Userspace reader/writer interfaces.
*
* getrandom(2) is the primary modern interface into the RNG and should
* be used in preference to anything else.
*
* Reading from /dev/random has the same functionality as calling
* getrandom(2) with flags=0. In earlier versions, however, it had
* vastly different semantics and should therefore be avoided, to
* prevent backwards compatibility issues.
*
* Reading from /dev/urandom has the same functionality as calling
* getrandom(2) with flags=GRND_INSECURE. Because it does not block
* waiting for the RNG to be ready, it should not be used.
*
* Writing to either /dev/random or /dev/urandom adds entropy to
* the input pool but does not credit it.
*
* Polling on /dev/random indicates when the RNG is initialized, on
* the read side, and when it wants new entropy, on the write side.
*
* Both /dev/random and /dev/urandom have the same set of ioctls for
* adding entropy, getting the entropy count, zeroing the count, and
* reseeding the crng.
*
**********************************************************************/
SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
{
struct iov_iter iter;
int ret;
if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
return -EINVAL;
/*
* Requesting insecure and blocking randomness at the same time makes
* no sense.
*/
if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
if (!crng_ready() && !(flags & GRND_INSECURE)) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
if (unlikely(ret))
return ret;
}
ret = import_ubuf(ITER_DEST, ubuf, len, &iter);
if (unlikely(ret))
return ret;
return get_random_bytes_user(&iter);
}
static __poll_t random_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &crng_init_wait, wait);
return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
static ssize_t write_pool_user(struct iov_iter *iter)
{
u8 block[BLAKE2S_BLOCK_SIZE];
ssize_t ret = 0;
size_t copied;
if (unlikely(!iov_iter_count(iter)))
return 0;
for (;;) {
copied = copy_from_iter(block, sizeof(block), iter);
ret += copied;
mix_pool_bytes(block, copied);
if (!iov_iter_count(iter) || copied != sizeof(block))
break;
BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
if (ret % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
cond_resched();
}
}
memzero_explicit(block, sizeof(block));
return ret ? ret : -EFAULT;
}
static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
return write_pool_user(iter);
}
static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
static int maxwarn = 10;
/*
* Opportunistically attempt to initialize the RNG on platforms that
* have fast cycle counters, but don't (for now) require it to succeed.
*/
if (!crng_ready())
try_to_generate_entropy();
if (!crng_ready()) {
if (!ratelimit_disable && maxwarn <= 0)
ratelimit_state_inc_miss(&urandom_warning);
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
current->comm, iov_iter_count(iter));
}
}
return get_random_bytes_user(iter);
}
static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
if (!crng_ready() &&
((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
(kiocb->ki_filp->f_flags & O_NONBLOCK)))
return -EAGAIN;
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
return get_random_bytes_user(iter);
}
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int __user *p = (int __user *)arg;
int ent_count;
switch (cmd) {
case RNDGETENTCNT:
/* Inherently racy, no point locking. */
if (put_user(input_pool.init_bits, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
if (ent_count < 0)
return -EINVAL;
credit_init_bits(ent_count);
return 0;
case RNDADDENTROPY: {
struct iov_iter iter;
ssize_t ret;
int len;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ent_count, p++))
return -EFAULT;
if (ent_count < 0)
return -EINVAL;
if (get_user(len, p++))
return -EFAULT;
ret = import_ubuf(ITER_SOURCE, p, len, &iter);
if (unlikely(ret))
return ret;
ret = write_pool_user(&iter);
if (unlikely(ret < 0))
return ret;
/* Since we're crediting, enforce that it was all written into the pool. */
if (unlikely(ret != len))
return -EFAULT;
credit_init_bits(ent_count);
return 0;
}
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/* No longer has any effect. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!crng_ready())
return -ENODATA;
crng_reseed(NULL);
return 0;
default:
return -EINVAL;
}
}
static int random_fasync(int fd, struct file *filp, int on)
{
return fasync_helper(fd, filp, on, &fasync);
}
const struct file_operations random_fops = {
.read_iter = random_read_iter,
.write_iter = random_write_iter,
.poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
};
const struct file_operations urandom_fops = {
.read_iter = urandom_read_iter,
.write_iter = random_write_iter,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
};
/********************************************************************
*
* Sysctl interface.
*
* These are partly unused legacy knobs with dummy values to not break
* userspace and partly still useful things. They are usually accessible
* in /proc/sys/kernel/random/ and are as follows:
*
* - boot_id - a UUID representing the current boot.
*
* - uuid - a random UUID, different each time the file is read.
*
* - poolsize - the number of bits of entropy that the input pool can
* hold, tied to the POOL_BITS constant.
*
* - entropy_avail - the number of bits of entropy currently in the
* input pool. Always <= poolsize.
*
* - write_wakeup_threshold - the amount of entropy in the input pool
* below which write polls to /dev/random will unblock, requesting
* more entropy, tied to the POOL_READY_BITS constant. It is writable
* to avoid breaking old userspaces, but writing to it does not
* change any behavior of the RNG.
*
* - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
* It is writable to avoid breaking old userspaces, but writing
* to it does not change any behavior of the RNG.
*
********************************************************************/
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
static int sysctl_poolsize = POOL_BITS;
static u8 sysctl_bootid[UUID_SIZE];
/*
* This function is used to return both the bootid UUID, and random
* UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
*/
static int proc_do_uuid(const struct ctl_table *table, int write, void *buf,
size_t *lenp, loff_t *ppos)
{
u8 tmp_uuid[UUID_SIZE], *uuid;
char uuid_string[UUID_STRING_LEN + 1];
struct ctl_table fake_table = {
.data = uuid_string,
.maxlen = UUID_STRING_LEN
};
if (write)
return -EPERM;
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
generate_random_uuid(uuid);
} else {
static DEFINE_SPINLOCK(bootid_spinlock);
spin_lock(&bootid_spinlock);
if (!uuid[8])
generate_random_uuid(uuid);
spin_unlock(&bootid_spinlock);
}
snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
return proc_dostring(&fake_table, 0, buf, lenp, ppos);
}
/* The same as proc_dointvec, but writes don't change anything. */
static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
size_t *lenp, loff_t *ppos)
{
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
static const struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "entropy_avail",
.data = &input_pool.init_bits,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "write_wakeup_threshold",
.data = &sysctl_random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_rointvec,
},
{
.procname = "urandom_min_reseed_secs",
.data = &sysctl_random_min_urandom_seed,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_rointvec,
},
{
.procname = "boot_id",
.data = &sysctl_bootid,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
{
.procname = "uuid",
.mode = 0444,
.proc_handler = proc_do_uuid,
},
};
/*
* random_init() is called before sysctl_init(),
* so we cannot call register_sysctl_init() in random_init()
*/
static int __init random_sysctls_init(void)
{
register_sysctl_init("kernel/random", random_table);
return 0;
}
device_initcall(random_sysctls_init);
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file contains the interface functions for the various time related
* system calls: time, stime, gettimeofday, settimeofday, adjtime
*
* Modification history:
*
* 1993-09-02 Philip Gladstone
* Created file with time related functions from sched/core.c and adjtimex()
* 1993-10-08 Torsten Duwe
* adjtime interface update and CMOS clock write code
* 1995-08-13 Torsten Duwe
* kernel PLL updated to 1994-12-13 specs (rfc-1589)
* 1999-01-16 Ulrich Windl
* Introduced error checking for many cases in adjtimex().
* Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
* (Even though the technical memorandum forbids it)
* 2004-07-14 Christoph Lameter
* Added getnstimeofday to allow the posix timer functions to return
* with nanosecond accuracy
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/capability.h>
#include <linux/timekeeper_internal.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
#include <linux/security.h>
#include <linux/fs.h>
#include <linux/math64.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <asm/unistd.h>
#include <generated/timeconst.h>
#include "timekeeping.h"
/*
* The timezone where the local system is located. Used as a default by some
* programs who obtain this value by using gettimeofday.
*/
struct timezone sys_tz;
EXPORT_SYMBOL(sys_tz);
#ifdef __ARCH_WANT_SYS_TIME
/*
* sys_time() can be implemented in user-level using
* sys_gettimeofday(). Is this for backwards compatibility? If so,
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
SYSCALL_DEFINE1(time, __kernel_old_time_t __user *, tloc)
{
__kernel_old_time_t i = (__kernel_old_time_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
return -EFAULT;
}
force_successful_syscall_return();
return i;
}
/*
* sys_stime() can be implemented in user-level using
* sys_settimeofday(). Is this for backwards compatibility? If so,
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
SYSCALL_DEFINE1(stime, __kernel_old_time_t __user *, tptr)
{
struct timespec64 tv;
int err;
if (get_user(tv.tv_sec, tptr))
return -EFAULT;
tv.tv_nsec = 0;
err = security_settime64(&tv, NULL);
if (err)
return err;
do_settimeofday64(&tv);
return 0;
}
#endif /* __ARCH_WANT_SYS_TIME */
#ifdef CONFIG_COMPAT_32BIT_TIME
#ifdef __ARCH_WANT_SYS_TIME32
/* old_time32_t is a 32 bit "long" and needs to get converted. */
SYSCALL_DEFINE1(time32, old_time32_t __user *, tloc)
{
old_time32_t i;
i = (old_time32_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
return -EFAULT;
}
force_successful_syscall_return();
return i;
}
SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr)
{
struct timespec64 tv;
int err;
if (get_user(tv.tv_sec, tptr))
return -EFAULT;
tv.tv_nsec = 0;
err = security_settime64(&tv, NULL);
if (err)
return err;
do_settimeofday64(&tv);
return 0;
}
#endif /* __ARCH_WANT_SYS_TIME32 */
#endif
SYSCALL_DEFINE2(gettimeofday, struct __kernel_old_timeval __user *, tv,
struct timezone __user *, tz)
{
if (likely(tv != NULL)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
if (put_user(ts.tv_sec, &tv->tv_sec) ||
put_user(ts.tv_nsec / 1000, &tv->tv_usec))
return -EFAULT;
}
if (unlikely(tz != NULL)) {
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
return -EFAULT;
}
return 0;
}
/*
* In case for some reason the CMOS clock has not already been running
* in UTC, but in some local time: The first time we set the timezone,
* we will warp the clock so that it is ticking UTC time instead of
* local time. Presumably, if someone is setting the timezone then we
* are running in an environment where the programs understand about
* timezones. This should be done at boot time in the /etc/rc script,
* as soon as possible, so that the clock can be set right. Otherwise,
* various programs will get confused when the clock gets warped.
*/
int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
{
static int firsttime = 1;
int error = 0;
if (tv && !timespec64_valid_settod(tv))
return -EINVAL;
error = security_settime64(tv, tz);
if (error)
return error;
if (tz) {
/* Verify we're within the +-15 hrs range */
if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
return -EINVAL;
sys_tz = *tz;
update_vsyscall_tz();
if (firsttime) {
firsttime = 0;
if (!tv)
timekeeping_warp_clock();
}
}
if (tv)
return do_settimeofday64(tv);
return 0;
}
SYSCALL_DEFINE2(settimeofday, struct __kernel_old_timeval __user *, tv,
struct timezone __user *, tz)
{
struct timespec64 new_ts;
struct timezone new_tz;
if (tv) {
if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
get_user(new_ts.tv_nsec, &tv->tv_usec))
return -EFAULT;
if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
return -EINVAL;
new_ts.tv_nsec *= NSEC_PER_USEC;
}
if (tz) {
if (copy_from_user(&new_tz, tz, sizeof(*tz)))
return -EFAULT;
}
return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv,
struct timezone __user *, tz)
{
if (tv) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
if (put_user(ts.tv_sec, &tv->tv_sec) ||
put_user(ts.tv_nsec / 1000, &tv->tv_usec))
return -EFAULT;
}
if (tz) {
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
return -EFAULT;
}
return 0;
}
COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
struct timezone __user *, tz)
{
struct timespec64 new_ts;
struct timezone new_tz;
if (tv) {
if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
get_user(new_ts.tv_nsec, &tv->tv_usec))
return -EFAULT;
if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
return -EINVAL;
new_ts.tv_nsec *= NSEC_PER_USEC;
}
if (tz) {
if (copy_from_user(&new_tz, tz, sizeof(*tz)))
return -EFAULT;
}
return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
}
#endif
#ifdef CONFIG_64BIT
SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p)
{
struct __kernel_timex txc; /* Local copy of parameter */
int ret;
/* Copy the user data space into the kernel copy
* structure. But bear in mind that the structures
* may change
*/
if (copy_from_user(&txc, txc_p, sizeof(struct __kernel_timex)))
return -EFAULT;
ret = do_adjtimex(&txc);
return copy_to_user(txc_p, &txc, sizeof(struct __kernel_timex)) ? -EFAULT : ret;
}
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
int get_old_timex32(struct __kernel_timex *txc, const struct old_timex32 __user *utp)
{
struct old_timex32 tx32;
memset(txc, 0, sizeof(struct __kernel_timex));
if (copy_from_user(&tx32, utp, sizeof(struct old_timex32)))
return -EFAULT;
txc->modes = tx32.modes;
txc->offset = tx32.offset;
txc->freq = tx32.freq;
txc->maxerror = tx32.maxerror;
txc->esterror = tx32.esterror;
txc->status = tx32.status;
txc->constant = tx32.constant;
txc->precision = tx32.precision;
txc->tolerance = tx32.tolerance;
txc->time.tv_sec = tx32.time.tv_sec;
txc->time.tv_usec = tx32.time.tv_usec;
txc->tick = tx32.tick;
txc->ppsfreq = tx32.ppsfreq;
txc->jitter = tx32.jitter;
txc->shift = tx32.shift;
txc->stabil = tx32.stabil;
txc->jitcnt = tx32.jitcnt;
txc->calcnt = tx32.calcnt;
txc->errcnt = tx32.errcnt;
txc->stbcnt = tx32.stbcnt;
return 0;
}
int put_old_timex32(struct old_timex32 __user *utp, const struct __kernel_timex *txc)
{
struct old_timex32 tx32;
memset(&tx32, 0, sizeof(struct old_timex32));
tx32.modes = txc->modes;
tx32.offset = txc->offset;
tx32.freq = txc->freq;
tx32.maxerror = txc->maxerror;
tx32.esterror = txc->esterror;
tx32.status = txc->status;
tx32.constant = txc->constant;
tx32.precision = txc->precision;
tx32.tolerance = txc->tolerance;
tx32.time.tv_sec = txc->time.tv_sec;
tx32.time.tv_usec = txc->time.tv_usec;
tx32.tick = txc->tick;
tx32.ppsfreq = txc->ppsfreq;
tx32.jitter = txc->jitter;
tx32.shift = txc->shift;
tx32.stabil = txc->stabil;
tx32.jitcnt = txc->jitcnt;
tx32.calcnt = txc->calcnt;
tx32.errcnt = txc->errcnt;
tx32.stbcnt = txc->stbcnt;
tx32.tai = txc->tai;
if (copy_to_user(utp, &tx32, sizeof(struct old_timex32)))
return -EFAULT;
return 0;
}
SYSCALL_DEFINE1(adjtimex_time32, struct old_timex32 __user *, utp)
{
struct __kernel_timex txc;
int err, ret;
err = get_old_timex32(&txc, utp);
if (err)
return err;
ret = do_adjtimex(&txc);
err = put_old_timex32(utp, &txc);
if (err)
return err;
return ret;
}
#endif
/**
* jiffies_to_msecs - Convert jiffies to milliseconds
* @j: jiffies value
*
* Avoid unnecessary multiplications/divisions in the
* two most common HZ cases.
*
* Return: milliseconds value
*/
unsigned int jiffies_to_msecs(const unsigned long j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
return (MSEC_PER_SEC / HZ) * j;
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
HZ_TO_MSEC_SHR32;
# else
return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_msecs);
/**
* jiffies_to_usecs - Convert jiffies to microseconds
* @j: jiffies value
*
* Return: microseconds value
*/
unsigned int jiffies_to_usecs(const unsigned long j)
{
/*
* Hz usually doesn't go much further MSEC_PER_SEC.
* jiffies_to_usecs() and usecs_to_jiffies() depend on that.
*/
BUILD_BUG_ON(HZ > USEC_PER_SEC);
#if !(USEC_PER_SEC % HZ)
return (USEC_PER_SEC / HZ) * j;
#else
# if BITS_PER_LONG == 32
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
# else
return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_usecs);
/**
* mktime64 - Converts date to seconds.
* @year0: year to convert
* @mon0: month to convert
* @day: day to convert
* @hour: hour to convert
* @min: minute to convert
* @sec: second to convert
*
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
* => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
*
* [For the Julian calendar (which was used in Russia before 1917,
* Britain & colonies before 1752, anywhere else before 1582,
* and is still in use by some communities) leave out the
* -year/100+year/400 terms, and add 10.]
*
* This algorithm was first published by Gauss (I think).
*
* A leap second can be indicated by calling this function with sec as
* 60 (allowable under ISO 8601). The leap second is treated the same
* as the following second since they don't exist in UNIX time.
*
* An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
* tomorrow - (allowable under ISO 8601) is supported.
*
* Return: seconds since the epoch time for the given input date
*/
time64_t mktime64(const unsigned int year0, const unsigned int mon0,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec)
{
unsigned int mon = mon0, year = year0;
/* 1..12 -> 11,12,1..10 */
if (0 >= (int) (mon -= 2)) {
mon += 12; /* Puts Feb last since it has leap day */
year -= 1;
}
return ((((time64_t)
(year/4 - year/100 + year/400 + 367*mon/12 + day) +
year*365 - 719499
)*24 + hour /* now have hours - midnight tomorrow handled here */
)*60 + min /* now have minutes */
)*60 + sec; /* finally seconds */
}
EXPORT_SYMBOL(mktime64);
struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec)
{
struct timespec64 ts = ns_to_timespec64(nsec);
struct __kernel_old_timeval tv;
tv.tv_sec = ts.tv_sec;
tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
return tv;
}
EXPORT_SYMBOL(ns_to_kernel_old_timeval);
/**
* set_normalized_timespec64 - set timespec sec and nsec parts and normalize
*
* @ts: pointer to timespec variable to be set
* @sec: seconds to set
* @nsec: nanoseconds to set
*
* Set seconds and nanoseconds field of a timespec variable and
* normalize to the timespec storage format
*
* Note: The tv_nsec part is always in the range of 0 <= tv_nsec < NSEC_PER_SEC.
* For negative values only the tv_sec field is negative !
*/
void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
{ while (nsec >= NSEC_PER_SEC) {
/*
* The following asm() prevents the compiler from
* optimising this loop into a modulo operation. See
* also __iter_div_u64_rem() in include/linux/time.h
*/
asm("" : "+rm"(nsec));
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) { asm("" : "+rm"(nsec));
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
EXPORT_SYMBOL(set_normalized_timespec64);
/**
* ns_to_timespec64 - Convert nanoseconds to timespec64
* @nsec: the nanoseconds value to be converted
*
* Return: the timespec64 representation of the nsec parameter.
*/
struct timespec64 ns_to_timespec64(s64 nsec)
{
struct timespec64 ts = { 0, 0 };
s32 rem;
if (likely(nsec > 0)) {
ts.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
ts.tv_nsec = rem;
} else if (nsec < 0) {
/*
* With negative times, tv_sec points to the earlier
* second, and tv_nsec counts the nanoseconds since
* then, so tv_nsec is always a positive number.
*/
ts.tv_sec = -div_u64_rem(-nsec - 1, NSEC_PER_SEC, &rem) - 1;
ts.tv_nsec = NSEC_PER_SEC - rem - 1;
}
return ts;
}
EXPORT_SYMBOL(ns_to_timespec64);
/**
* __msecs_to_jiffies: - convert milliseconds to jiffies
* @m: time in milliseconds
*
* conversion is done as follows:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
* for the details see _msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* The _msecs_to_jiffies helpers are the HZ dependent conversion
* routines found in include/linux/jiffies.h
*
* Return: jiffies value
*/
unsigned long __msecs_to_jiffies(const unsigned int m)
{
/*
* Negative value, means infinite timeout:
*/
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
return _msecs_to_jiffies(m);
}
EXPORT_SYMBOL(__msecs_to_jiffies);
/**
* __usecs_to_jiffies: - convert microseconds to jiffies
* @u: time in milliseconds
*
* Return: jiffies value
*/
unsigned long __usecs_to_jiffies(const unsigned int u)
{
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return _usecs_to_jiffies(u);
}
EXPORT_SYMBOL(__usecs_to_jiffies);
/**
* timespec64_to_jiffies - convert a timespec64 value to jiffies
* @value: pointer to &struct timespec64
*
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note
* that a remainder subtract here would not do the right thing as the
* resolution values don't fall on second boundaries. I.e. the line:
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
* Note that due to the small error in the multiplier here, this
* rounding is incorrect for sufficiently large values of tv_nsec, but
* well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
* OK.
*
* Rather, we just shift the bits off the right.
*
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
* value to a scaled second value.
*
* Return: jiffies value
*/
unsigned long
timespec64_to_jiffies(const struct timespec64 *value)
{
u64 sec = value->tv_sec;
long nsec = value->tv_nsec + TICK_NSEC - 1;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
nsec = 0;
}
return ((sec * SEC_CONVERSION) +
(((u64)nsec * NSEC_CONVERSION) >>
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
EXPORT_SYMBOL(timespec64_to_jiffies);
/**
* jiffies_to_timespec64 - convert jiffies value to &struct timespec64
* @jiffies: jiffies value
* @value: pointer to &struct timespec64
*/
void
jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u32 rem;
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
NSEC_PER_SEC, &rem);
value->tv_nsec = rem;
}
EXPORT_SYMBOL(jiffies_to_timespec64);
/*
* Convert jiffies/jiffies_64 to clock_t and back.
*/
/**
* jiffies_to_clock_t - Convert jiffies to clock_t
* @x: jiffies value
*
* Return: jiffies converted to clock_t (CLOCKS_PER_SEC)
*/
clock_t jiffies_to_clock_t(unsigned long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
return x * (USER_HZ / HZ);
# else
return x / (HZ / USER_HZ);
# endif
#else
return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
#endif
}
EXPORT_SYMBOL(jiffies_to_clock_t);
/**
* clock_t_to_jiffies - Convert clock_t to jiffies
* @x: clock_t value
*
* Return: clock_t value converted to jiffies
*/
unsigned long clock_t_to_jiffies(unsigned long x)
{
#if (HZ % USER_HZ)==0
if (x >= ~0UL / (HZ / USER_HZ))
return ~0UL;
return x * (HZ / USER_HZ);
#else
/* Don't worry about loss of precision here .. */
if (x >= ~0UL / HZ * USER_HZ)
return ~0UL;
/* .. but do try to contain it here */
return div_u64((u64)x * HZ, USER_HZ);
#endif
}
EXPORT_SYMBOL(clock_t_to_jiffies);
/**
* jiffies_64_to_clock_t - Convert jiffies_64 to clock_t
* @x: jiffies_64 value
*
* Return: jiffies_64 value converted to 64-bit "clock_t" (CLOCKS_PER_SEC)
*/
u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
x = div_u64(x * USER_HZ, HZ);
# elif HZ > USER_HZ
x = div_u64(x, HZ / USER_HZ);
# else
/* Nothing to do */
# endif
#else
/*
* There are better ways that don't overflow early,
* but even this doesn't overflow in hundreds of years
* in 64 bits, so..
*/
x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
#endif
return x;
}
EXPORT_SYMBOL(jiffies_64_to_clock_t);
/**
* nsec_to_clock_t - Convert nsec value to clock_t
* @x: nsec value
*
* Return: nsec value converted to 64-bit "clock_t" (CLOCKS_PER_SEC)
*/
u64 nsec_to_clock_t(u64 x)
{
#if (NSEC_PER_SEC % USER_HZ) == 0
return div_u64(x, NSEC_PER_SEC / USER_HZ);
#elif (USER_HZ % 512) == 0
return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
* overflow after 64.99 years.
* exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
*/
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
/**
* jiffies64_to_nsecs - Convert jiffies64 to nanoseconds
* @j: jiffies64 value
*
* Return: nanoseconds value
*/
u64 jiffies64_to_nsecs(u64 j)
{
#if !(NSEC_PER_SEC % HZ)
return (NSEC_PER_SEC / HZ) * j;
# else
return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
#endif
}
EXPORT_SYMBOL(jiffies64_to_nsecs);
/**
* jiffies64_to_msecs - Convert jiffies64 to milliseconds
* @j: jiffies64 value
*
* Return: milliseconds value
*/
u64 jiffies64_to_msecs(const u64 j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
return (MSEC_PER_SEC / HZ) * j;
#else
return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
#endif
}
EXPORT_SYMBOL(jiffies64_to_msecs);
/**
* nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
*
* @n: nsecs in u64
*
* Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
* And this doesn't return MAX_JIFFY_OFFSET since this function is designed
* for scheduler, not for use in device drivers to calculate timeout value.
*
* note:
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
*
* Return: nsecs converted to jiffies64 value
*/
u64 nsecs_to_jiffies64(u64 n)
{
#if (NSEC_PER_SEC % HZ) == 0
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
return div_u64(n, NSEC_PER_SEC / HZ);
#elif (HZ % 512) == 0
/* overflow after 292 years if HZ = 1024 */
return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* Generic case - optimized for cases where HZ is a multiple of 3.
* overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
*/
return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}
EXPORT_SYMBOL(nsecs_to_jiffies64);
/**
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
*
* @n: nsecs in u64
*
* Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
* And this doesn't return MAX_JIFFY_OFFSET since this function is designed
* for scheduler, not for use in device drivers to calculate timeout value.
*
* note:
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
*
* Return: nsecs converted to jiffies value
*/
unsigned long nsecs_to_jiffies(u64 n)
{
return (unsigned long)nsecs_to_jiffies64(n);
}
EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
/**
* timespec64_add_safe - Add two timespec64 values and do a safety check
* for overflow.
* @lhs: first (left) timespec64 to add
* @rhs: second (right) timespec64 to add
*
* It's assumed that both values are valid (>= 0).
* And, each timespec64 is in normalized form.
*
* Return: sum of @lhs + @rhs
*/
struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
const struct timespec64 rhs)
{
struct timespec64 res;
set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
res.tv_sec = TIME64_MAX;
res.tv_nsec = 0;
}
return res;
}
EXPORT_SYMBOL_GPL(timespec64_add_safe);
/**
* get_timespec64 - get user's time value into kernel space
* @ts: destination &struct timespec64
* @uts: user's time value as &struct __kernel_timespec
*
* Handles compat or 32-bit modes.
*
* Return: 0 on success or negative errno on error
*/
int get_timespec64(struct timespec64 *ts,
const struct __kernel_timespec __user *uts)
{
struct __kernel_timespec kts;
int ret;
ret = copy_from_user(&kts, uts, sizeof(kts));
if (ret)
return -EFAULT;
ts->tv_sec = kts.tv_sec;
/* Zero out the padding in compat mode */
if (in_compat_syscall())
kts.tv_nsec &= 0xFFFFFFFFUL;
/* In 32-bit mode, this drops the padding */
ts->tv_nsec = kts.tv_nsec;
return 0;
}
EXPORT_SYMBOL_GPL(get_timespec64);
/**
* put_timespec64 - convert timespec64 value to __kernel_timespec format and
* copy the latter to userspace
* @ts: input &struct timespec64
* @uts: user's &struct __kernel_timespec
*
* Return: 0 on success or negative errno on error
*/
int put_timespec64(const struct timespec64 *ts,
struct __kernel_timespec __user *uts)
{
struct __kernel_timespec kts = {
.tv_sec = ts->tv_sec,
.tv_nsec = ts->tv_nsec
};
return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
}
EXPORT_SYMBOL_GPL(put_timespec64);
static int __get_old_timespec32(struct timespec64 *ts64,
const struct old_timespec32 __user *cts)
{
struct old_timespec32 ts;
int ret;
ret = copy_from_user(&ts, cts, sizeof(ts));
if (ret)
return -EFAULT;
ts64->tv_sec = ts.tv_sec;
ts64->tv_nsec = ts.tv_nsec;
return 0;
}
static int __put_old_timespec32(const struct timespec64 *ts64,
struct old_timespec32 __user *cts)
{
struct old_timespec32 ts = {
.tv_sec = ts64->tv_sec,
.tv_nsec = ts64->tv_nsec
};
return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
}
/**
* get_old_timespec32 - get user's old-format time value into kernel space
* @ts: destination &struct timespec64
* @uts: user's old-format time value (&struct old_timespec32)
*
* Handles X86_X32_ABI compatibility conversion.
*
* Return: 0 on success or negative errno on error
*/
int get_old_timespec32(struct timespec64 *ts, const void __user *uts)
{
if (COMPAT_USE_64BIT_TIME)
return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
else
return __get_old_timespec32(ts, uts);
}
EXPORT_SYMBOL_GPL(get_old_timespec32);
/**
* put_old_timespec32 - convert timespec64 value to &struct old_timespec32 and
* copy the latter to userspace
* @ts: input &struct timespec64
* @uts: user's &struct old_timespec32
*
* Handles X86_X32_ABI compatibility conversion.
*
* Return: 0 on success or negative errno on error
*/
int put_old_timespec32(const struct timespec64 *ts, void __user *uts)
{
if (COMPAT_USE_64BIT_TIME)
return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
else
return __put_old_timespec32(ts, uts);
}
EXPORT_SYMBOL_GPL(put_old_timespec32);
/**
* get_itimerspec64 - get user's &struct __kernel_itimerspec into kernel space
* @it: destination &struct itimerspec64
* @uit: user's &struct __kernel_itimerspec
*
* Return: 0 on success or negative errno on error
*/
int get_itimerspec64(struct itimerspec64 *it,
const struct __kernel_itimerspec __user *uit)
{
int ret;
ret = get_timespec64(&it->it_interval, &uit->it_interval);
if (ret)
return ret;
ret = get_timespec64(&it->it_value, &uit->it_value);
return ret;
}
EXPORT_SYMBOL_GPL(get_itimerspec64);
/**
* put_itimerspec64 - convert &struct itimerspec64 to __kernel_itimerspec format
* and copy the latter to userspace
* @it: input &struct itimerspec64
* @uit: user's &struct __kernel_itimerspec
*
* Return: 0 on success or negative errno on error
*/
int put_itimerspec64(const struct itimerspec64 *it,
struct __kernel_itimerspec __user *uit)
{
int ret;
ret = put_timespec64(&it->it_interval, &uit->it_interval);
if (ret)
return ret;
ret = put_timespec64(&it->it_value, &uit->it_value);
return ret;
}
EXPORT_SYMBOL_GPL(put_itimerspec64);
/**
* get_old_itimerspec32 - get user's &struct old_itimerspec32 into kernel space
* @its: destination &struct itimerspec64
* @uits: user's &struct old_itimerspec32
*
* Return: 0 on success or negative errno on error
*/
int get_old_itimerspec32(struct itimerspec64 *its,
const struct old_itimerspec32 __user *uits)
{
if (__get_old_timespec32(&its->it_interval, &uits->it_interval) ||
__get_old_timespec32(&its->it_value, &uits->it_value))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(get_old_itimerspec32);
/**
* put_old_itimerspec32 - convert &struct itimerspec64 to &struct
* old_itimerspec32 and copy the latter to userspace
* @its: input &struct itimerspec64
* @uits: user's &struct old_itimerspec32
*
* Return: 0 on success or negative errno on error
*/
int put_old_itimerspec32(const struct itimerspec64 *its,
struct old_itimerspec32 __user *uits)
{
if (__put_old_timespec32(&its->it_interval, &uits->it_interval) ||
__put_old_timespec32(&its->it_value, &uits->it_value))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(put_old_itimerspec32);
// SPDX-License-Identifier: GPL-2.0
/*
* Lockless hierarchical page accounting & limiting
*
* Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
*/
#include <linux/page_counter.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/bug.h>
#include <asm/page.h>
static bool track_protection(struct page_counter *c)
{
return c->protection_support;
}
static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
unsigned long protected, old_protected;
long delta;
if (!c->parent)
return;
protected = min(usage, READ_ONCE(c->min));
old_protected = atomic_long_read(&c->min_usage);
if (protected != old_protected) {
old_protected = atomic_long_xchg(&c->min_usage, protected);
delta = protected - old_protected;
if (delta) atomic_long_add(delta, &c->parent->children_min_usage);
}
protected = min(usage, READ_ONCE(c->low));
old_protected = atomic_long_read(&c->low_usage);
if (protected != old_protected) {
old_protected = atomic_long_xchg(&c->low_usage, protected);
delta = protected - old_protected;
if (delta) atomic_long_add(delta, &c->parent->children_low_usage);
}
}
/**
* page_counter_cancel - take pages out of the local counter
* @counter: counter
* @nr_pages: number of pages to cancel
*/
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
{
long new;
new = atomic_long_sub_return(nr_pages, &counter->usage);
/* More uncharges than charges? */
if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n",
new, nr_pages)) {
new = 0;
atomic_long_set(&counter->usage, new);
}
if (track_protection(counter))
propagate_protected_usage(counter, new);
}
/**
* page_counter_charge - hierarchically charge pages
* @counter: counter
* @nr_pages: number of pages to charge
*
* NOTE: This does not consider any configured counter limits.
*/
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) {
long new;
new = atomic_long_add_return(nr_pages, &c->usage);
if (protection)
propagate_protected_usage(c, new);
/*
* This is indeed racy, but we can live with some
* inaccuracy in the watermark.
*
* Notably, we have two watermarks to allow for both a globally
* visible peak and one that can be reset at a smaller scope.
*
* Since we reset both watermarks when the global reset occurs,
* we can guarantee that watermark >= local_watermark, so we
* don't need to do both comparisons every time.
*
* On systems with branch predictors, the inner condition should
* be almost free.
*/
if (new > READ_ONCE(c->local_watermark)) {
WRITE_ONCE(c->local_watermark, new);
if (new > READ_ONCE(c->watermark))
WRITE_ONCE(c->watermark, new);
}
}
}
/**
* page_counter_try_charge - try to hierarchically charge pages
* @counter: counter
* @nr_pages: number of pages to charge
* @fail: points first counter to hit its limit, if any
*
* Returns %true on success, or %false and @fail if the counter or one
* of its ancestors has hit its configured limit.
*/
bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages,
struct page_counter **fail)
{
struct page_counter *c;
bool protection = track_protection(counter);
bool track_failcnt = counter->track_failcnt;
for (c = counter; c; c = c->parent) {
long new;
/*
* Charge speculatively to avoid an expensive CAS. If
* a bigger charge fails, it might falsely lock out a
* racing smaller charge and send it into reclaim
* early, but the error is limited to the difference
* between the two sizes, which is less than 2M/4M in
* case of a THP locking out a regular page charge.
*
* The atomic_long_add_return() implies a full memory
* barrier between incrementing the count and reading
* the limit. When racing with page_counter_set_max(),
* we either see the new limit or the setter sees the
* counter has changed and retries.
*/
new = atomic_long_add_return(nr_pages, &c->usage);
if (new > c->max) {
atomic_long_sub(nr_pages, &c->usage);
/*
* This is racy, but we can live with some
* inaccuracy in the failcnt which is only used
* to report stats.
*/
if (track_failcnt)
data_race(c->failcnt++); *fail = c;
goto failed;
}
if (protection) propagate_protected_usage(c, new);
/* see comment on page_counter_charge */
if (new > READ_ONCE(c->local_watermark)) {
WRITE_ONCE(c->local_watermark, new);
if (new > READ_ONCE(c->watermark))
WRITE_ONCE(c->watermark, new);
}
}
return true;
failed:
for (c = counter; c != *fail; c = c->parent) page_counter_cancel(c, nr_pages);
return false;
}
/**
* page_counter_uncharge - hierarchically uncharge pages
* @counter: counter
* @nr_pages: number of pages to uncharge
*/
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
for (c = counter; c; c = c->parent) page_counter_cancel(c, nr_pages);
}
/**
* page_counter_set_max - set the maximum number of pages allowed
* @counter: counter
* @nr_pages: limit to set
*
* Returns 0 on success, -EBUSY if the current number of pages on the
* counter already exceeds the specified limit.
*
* The caller must serialize invocations on the same counter.
*/
int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
{
for (;;) {
unsigned long old;
long usage;
/*
* Update the limit while making sure that it's not
* below the concurrently-changing counter value.
*
* The xchg implies two full memory barriers before
* and after, so the read-swap-read is ordered and
* ensures coherency with page_counter_try_charge():
* that function modifies the count before checking
* the limit, so if it sees the old limit, we see the
* modified counter and retry.
*/
usage = page_counter_read(counter);
if (usage > nr_pages)
return -EBUSY;
old = xchg(&counter->max, nr_pages);
if (page_counter_read(counter) <= usage || nr_pages >= old)
return 0;
counter->max = old;
cond_resched();
}
}
/**
* page_counter_set_min - set the amount of protected memory
* @counter: counter
* @nr_pages: value to set
*
* The caller must serialize invocations on the same counter.
*/
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
WRITE_ONCE(counter->min, nr_pages);
for (c = counter; c; c = c->parent)
propagate_protected_usage(c, atomic_long_read(&c->usage));
}
/**
* page_counter_set_low - set the amount of protected memory
* @counter: counter
* @nr_pages: value to set
*
* The caller must serialize invocations on the same counter.
*/
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
WRITE_ONCE(counter->low, nr_pages);
for (c = counter; c; c = c->parent)
propagate_protected_usage(c, atomic_long_read(&c->usage));
}
/**
* page_counter_memparse - memparse() for page counter limits
* @buf: string to parse
* @max: string meaning maximum possible value
* @nr_pages: returns the result in number of pages
*
* Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
* limited to %PAGE_COUNTER_MAX.
*/
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages)
{
char *end;
u64 bytes;
if (!strcmp(buf, max)) {
*nr_pages = PAGE_COUNTER_MAX;
return 0;
}
bytes = memparse(buf, &end);
if (*end != '\0')
return -EINVAL;
*nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
return 0;
}
#if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM)
/*
* This function calculates an individual page counter's effective
* protection which is derived from its own memory.min/low, its
* parent's and siblings' settings, as well as the actual memory
* distribution in the tree.
*
* The following rules apply to the effective protection values:
*
* 1. At the first level of reclaim, effective protection is equal to
* the declared protection in memory.min and memory.low.
*
* 2. To enable safe delegation of the protection configuration, at
* subsequent levels the effective protection is capped to the
* parent's effective protection.
*
* 3. To make complex and dynamic subtrees easier to configure, the
* user is allowed to overcommit the declared protection at a given
* level. If that is the case, the parent's effective protection is
* distributed to the children in proportion to how much protection
* they have declared and how much of it they are utilizing.
*
* This makes distribution proportional, but also work-conserving:
* if one counter claims much more protection than it uses memory,
* the unused remainder is available to its siblings.
*
* 4. Conversely, when the declared protection is undercommitted at a
* given level, the distribution of the larger parental protection
* budget is NOT proportional. A counter's protection from a sibling
* is capped to its own memory.min/low setting.
*
* 5. However, to allow protecting recursive subtrees from each other
* without having to declare each individual counter's fixed share
* of the ancestor's claim to protection, any unutilized -
* "floating" - protection from up the tree is distributed in
* proportion to each counter's *usage*. This makes the protection
* neutral wrt sibling cgroups and lets them compete freely over
* the shared parental protection budget, but it protects the
* subtree as a whole from neighboring subtrees.
*
* Note that 4. and 5. are not in conflict: 4. is about protecting
* against immediate siblings whereas 5. is about protecting against
* neighboring subtrees.
*/
static unsigned long effective_protection(unsigned long usage,
unsigned long parent_usage,
unsigned long setting,
unsigned long parent_effective,
unsigned long siblings_protected,
bool recursive_protection)
{
unsigned long protected;
unsigned long ep;
protected = min(usage, setting);
/*
* If all cgroups at this level combined claim and use more
* protection than what the parent affords them, distribute
* shares in proportion to utilization.
*
* We are using actual utilization rather than the statically
* claimed protection in order to be work-conserving: claimed
* but unused protection is available to siblings that would
* otherwise get a smaller chunk than what they claimed.
*/
if (siblings_protected > parent_effective)
return protected * parent_effective / siblings_protected;
/*
* Ok, utilized protection of all children is within what the
* parent affords them, so we know whatever this child claims
* and utilizes is effectively protected.
*
* If there is unprotected usage beyond this value, reclaim
* will apply pressure in proportion to that amount.
*
* If there is unutilized protection, the cgroup will be fully
* shielded from reclaim, but we do return a smaller value for
* protection than what the group could enjoy in theory. This
* is okay. With the overcommit distribution above, effective
* protection is always dependent on how memory is actually
* consumed among the siblings anyway.
*/
ep = protected;
/*
* If the children aren't claiming (all of) the protection
* afforded to them by the parent, distribute the remainder in
* proportion to the (unprotected) memory of each cgroup. That
* way, cgroups that aren't explicitly prioritized wrt each
* other compete freely over the allowance, but they are
* collectively protected from neighboring trees.
*
* We're using unprotected memory for the weight so that if
* some cgroups DO claim explicit protection, we don't protect
* the same bytes twice.
*
* Check both usage and parent_usage against the respective
* protected values. One should imply the other, but they
* aren't read atomically - make sure the division is sane.
*/
if (!recursive_protection)
return ep;
if (parent_effective > siblings_protected &&
parent_usage > siblings_protected &&
usage > protected) {
unsigned long unclaimed;
unclaimed = parent_effective - siblings_protected;
unclaimed *= usage - protected;
unclaimed /= parent_usage - siblings_protected;
ep += unclaimed;
}
return ep;
}
/**
* page_counter_calculate_protection - check if memory consumption is in the normal range
* @root: the top ancestor of the sub-tree being checked
* @counter: the page_counter the counter to update
* @recursive_protection: Whether to use memory_recursiveprot behavior.
*
* Calculates elow/emin thresholds for given page_counter.
*
* WARNING: This function is not stateless! It can only be used as part
* of a top-down tree iteration, not for isolated queries.
*/
void page_counter_calculate_protection(struct page_counter *root,
struct page_counter *counter,
bool recursive_protection)
{
unsigned long usage, parent_usage;
struct page_counter *parent = counter->parent;
/*
* Effective values of the reclaim targets are ignored so they
* can be stale. Have a look at mem_cgroup_protection for more
* details.
* TODO: calculation should be more robust so that we do not need
* that special casing.
*/
if (root == counter)
return;
usage = page_counter_read(counter);
if (!usage)
return;
if (parent == root) {
counter->emin = READ_ONCE(counter->min);
counter->elow = READ_ONCE(counter->low);
return;
}
parent_usage = page_counter_read(parent);
WRITE_ONCE(counter->emin, effective_protection(usage, parent_usage,
READ_ONCE(counter->min),
READ_ONCE(parent->emin),
atomic_long_read(&parent->children_min_usage),
recursive_protection));
WRITE_ONCE(counter->elow, effective_protection(usage, parent_usage,
READ_ONCE(counter->low),
READ_ONCE(parent->elow),
atomic_long_read(&parent->children_low_usage),
recursive_protection));
}
#endif /* CONFIG_MEMCG || CONFIG_CGROUP_DMEM */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* NOTE:
*
* This header has combined a lot of unrelated to each other stuff.
* The process of splitting its content is in progress while keeping
* backward compatibility. That's why it's highly recommended NOT to
* include this header inside another header file, especially under
* generic or architectural include/ directory.
*/
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
#include <linux/stdarg.h>
#include <linux/align.h>
#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/bitops.h>
#include <linux/hex.h>
#include <linux/kstrtox.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/typecheck.h>
#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
#include <linux/sprintf.h>
#include <linux/static_call_types.h>
#include <linux/instruction_pointer.h>
#include <linux/util_macros.h>
#include <linux/wordpart.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
#define STACK_MAGIC 0xdeadbeef
struct completion;
struct user;
#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
extern int __cond_resched(void);
# define might_resched() __cond_resched()
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
extern int __cond_resched(void);
DECLARE_STATIC_CALL(might_resched, __cond_resched);
static __always_inline void might_resched(void)
{
static_call_mod(might_resched)();
}
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
extern int dynamic_might_resched(void);
# define might_resched() dynamic_might_resched()
#else
# define might_resched() do { } while (0)
#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
extern void __might_resched(const char *file, int line, unsigned int offsets);
extern void __might_sleep(const char *file, int line);
extern void __cant_sleep(const char *file, int line, int preempt_offset);
extern void __cant_migrate(const char *file, int line);
/**
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
* context (spinlock, irq-handler, ...). Additional sections where blocking is
* not allowed can be annotated with non_block_start() and non_block_end()
* pairs.
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
/**
* cant_sleep - annotation for functions that cannot sleep
*
* this macro will print a stack trace if it is executed with preemption enabled
*/
# define cant_sleep() \
do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
/**
* cant_migrate - annotation for functions that cannot migrate
*
* Will print a stack trace if executed in code which is migratable
*/
# define cant_migrate() \
do { \
if (IS_ENABLED(CONFIG_SMP)) \
__cant_migrate(__FILE__, __LINE__); \
} while (0)
/**
* non_block_start - annotate the start of section where sleeping is prohibited
*
* This is on behalf of the oom reaper, specifically when it is calling the mmu
* notifiers. The problem is that if the notifier were to block on, for example,
* mutex_lock() and if the process which holds that mutex were to perform a
* sleeping memory allocation, the oom reaper is now blocked on completion of
* that memory allocation. Other blocking calls like wait_event() pose similar
* issues.
*/
# define non_block_start() (current->non_block_count++)
/**
* non_block_end - annotate the end of section where sleeping is prohibited
*
* Closes a section opened by non_block_start().
*/
# define non_block_end() WARN_ON(current->non_block_count-- == 0)
#else
static inline void __might_resched(const char *file, int line,
unsigned int offsets) { }
static inline void __might_sleep(const char *file, int line) { }
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
# define cant_migrate() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
# define non_block_start() do { } while (0)
# define non_block_end() do { } while (0)
#endif
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
#define might_fault() __might_fault(__FILE__, __LINE__)
void __might_fault(const char *file, int line);
#else
static inline void might_fault(void) { }
#endif
void do_exit(long error_code) __noreturn;
extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
extern void bust_spinlocks(int yes);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
/**
* enum system_states - Values used for system_state.
*
* @SYSTEM_BOOTING: %0, no init needed
* @SYSTEM_SCHEDULING: system is ready for scheduling; OK to use RCU
* @SYSTEM_FREEING_INITMEM: system is freeing all of initmem; almost running
* @SYSTEM_RUNNING: system is up and running
* @SYSTEM_HALT: system entered clean system halt state
* @SYSTEM_POWER_OFF: system entered shutdown/clean power off state
* @SYSTEM_RESTART: system entered emergency power off or normal restart
* @SYSTEM_SUSPEND: system entered suspend or hibernate state
*
* Note:
* Ordering of the states must not be changed
* as code checks for <, <=, >, >= STATE.
*/
enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
SYSTEM_SUSPEND,
};
extern enum system_states system_state;
/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
* Use tracing_on/tracing_off when you want to quickly turn on or off
* tracing. It simply enables or disables the recording of the trace events.
* This also corresponds to the user space /sys/kernel/tracing/tracing_on
* file, which gives a means for the kernel and userspace to interact.
* Place a tracing_off() in the kernel where you want tracing to end.
* From user space, examine the trace, and then echo 1 > tracing_on
* to continue tracing.
*
* tracing_stop/tracing_start has slightly more overhead. It is used
* by things like suspend to ram where disabling the recording of the
* trace is not enough, but tracing must actually stop because things
* like calling smp_processor_id() may crash the system.
*
* Most likely, you want to use tracing_on/tracing_off.
*/
enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
DUMP_PARAM,
};
#ifdef CONFIG_TRACING
void tracing_on(void);
void tracing_off(void);
int tracing_is_on(void);
void tracing_snapshot(void);
void tracing_snapshot_alloc(void);
extern void tracing_start(void);
extern void tracing_stop(void);
static inline __printf(1, 2)
void ____trace_printk_check_format(const char *fmt, ...)
{
}
#define __trace_printk_check_format(fmt, args...) \
do { \
if (0) \
____trace_printk_check_format(fmt, ##args); \
} while (0)
/**
* trace_printk - printf formatting in the ftrace buffer
* @fmt: the printf format for printing
*
* Note: __trace_printk is an internal function for trace_printk() and
* the @ip is passed in via the trace_printk() macro.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
* printk like tracing in the code, a developer can quickly see
* where problems are occurring.
*
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in
* your code. (Extra memory is used for special buffers that are
* allocated when trace_printk() is used.)
*
* A little optimization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
* By stringifying the args and checking the size we can tell
* whether or not there are args. __stringify((__VA_ARGS__)) will
* turn into "()\0" with a size of 3 when there are no args, anything
* else will be bigger. All we need to do is define a string to this,
* and then take its size and compare to 3. If it's bigger, use
* do_trace_printk() otherwise, optimize it to trace_puts(). Then just
* let gcc optimize the rest.
*/
#define trace_printk(fmt, ...) \
do { \
char _______STR[] = __stringify((__VA_ARGS__)); \
if (sizeof(_______STR) > 3) \
do_trace_printk(fmt, ##__VA_ARGS__); \
else \
trace_puts(fmt); \
} while (0)
#define do_trace_printk(fmt, args...) \
do { \
static const char *trace_printk_fmt __used \
__section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_printk_check_format(fmt, ##args); \
\
if (__builtin_constant_p(fmt)) \
__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
else \
__trace_printk(_THIS_IP_, fmt, ##args); \
} while (0)
extern __printf(2, 3)
int __trace_bprintk(unsigned long ip, const char *fmt, ...);
extern __printf(2, 3)
int __trace_printk(unsigned long ip, const char *fmt, ...);
/**
* trace_puts - write a string into the ftrace buffer
* @str: the string to record
*
* Note: __trace_bputs is an internal function for trace_puts and
* the @ip is passed in via the trace_puts macro.
*
* This is similar to trace_printk() but is made for those really fast
* paths that a developer wants the least amount of "Heisenbug" effects,
* where the processing of the print format is still too much.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
* printk like tracing in the code, a developer can quickly see
* where problems are occurring.
*
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_puts scattered around in
* your code. (Extra memory is used for special buffers that are
* allocated when trace_puts() is used.)
*
* Returns: 0 if nothing was written, positive # if string was.
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
*/
#define trace_puts(str) ({ \
static const char *trace_printk_fmt __used \
__section("__trace_printk_fmt") = \
__builtin_constant_p(str) ? str : NULL; \
\
if (__builtin_constant_p(str)) \
__trace_bputs(_THIS_IP_, trace_printk_fmt); \
else \
__trace_puts(_THIS_IP_, str, strlen(str)); \
})
extern int __trace_bputs(unsigned long ip, const char *str);
extern int __trace_puts(unsigned long ip, const char *str, int size);
extern void trace_dump_stack(int skip);
/*
* The double __builtin_constant_p is because gcc will give us an error
* if we try to allocate the static variable to fmt if it is not a
* constant. Even with the outer if statement.
*/
#define ftrace_vprintk(fmt, vargs) \
do { \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt __used \
__section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
} else \
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \
} while (0)
extern __printf(2, 0) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
extern __printf(2, 0) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void trace_dump_stack(int skip) { }
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; }
static inline void tracing_snapshot(void) { }
static inline void tracing_snapshot_alloc(void) { }
static inline __printf(1, 2)
int trace_printk(const char *fmt, ...)
{
return 0;
}
static __printf(1, 0) inline int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
}
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_DYNAMIC_FTRACE
# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
#define VERIFY_OCTAL_PERMISSIONS(perms) \
(BUILD_BUG_ON_ZERO((perms) < 0) + \
BUILD_BUG_ON_ZERO((perms) > 0777) + \
/* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
/* USER_WRITABLE >= GROUP_WRITABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
/* OTHER_WRITABLE? Generally considered a bad idea. */ \
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
/*
* include/linux/preempt.h - macros for accessing and manipulating
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
#include <linux/linkage.h>
#include <linux/cleanup.h>
#include <linux/types.h>
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count could in theory be the same as the number of
* interrupts in the system, but we run all interrupt handlers with
* interrupts disabled, so we cannot have nesting interrupts. Though
* there are a few palaeontologic drivers which reenable interrupts in
* the handler, so we need more than one bit here.
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
* NMI_MASK: 0x00f00000
* PREEMPT_NEED_RESCHED: 0x80000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
#define NMI_BITS 4
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/*
* Disable preemption until the scheduler is running -- use an unconditional
* value so that it also works on !PREEMPT_COUNT kernels.
*
* Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
*/
#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
/*
* Initial preempt_count value; reflects the preempt_count schedule invariant
* which states that during context switches:
*
* preempt_count() == 2*PREEMPT_DISABLE_OFFSET
*
* Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
* Note: See finish_task_switch().
*/
#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
/**
* interrupt_context_level - return interrupt context level
*
* Returns the current interrupt context level.
* 0 - normal context
* 1 - softirq context
* 2 - hardirq context
* 3 - NMI context
*/
static __always_inline unsigned char interrupt_context_level(void)
{
unsigned long pc = preempt_count();
unsigned char level = 0;
level += !!(pc & (NMI_MASK));
level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
return level;
}
/*
* These macro definitions avoid redundant invocations of preempt_count()
* because such invocations would result in redundant loads given that
* preempt_count() is commonly implemented with READ_ONCE().
*/
#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#ifdef CONFIG_PREEMPT_RT
# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
#else
# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
#endif
/*
* Macros to retrieve the current execution context:
*
* in_nmi() - We're in NMI context
* in_hardirq() - We're in hard IRQ context
* in_serving_softirq() - We're in softirq context
* in_task() - We're in task context
*/
#define in_nmi() (nmi_count())
#define in_hardirq() (hardirq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
#ifdef CONFIG_PREEMPT_RT
# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
#else
# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
#endif
/*
* The following macros are deprecated and should not be used in new code:
* in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
/*
* The preempt_count offset after preempt_disable();
*/
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
#else
# define PREEMPT_DISABLE_OFFSET 0
#endif
/*
* The preempt_count offset after spin_lock()
*/
#if !defined(CONFIG_PREEMPT_RT)
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
#else
/* Locks on RT do not disable preemption */
#define PREEMPT_LOCK_OFFSET 0
#endif
/*
* The preempt_count offset needed for things like:
*
* spin_lock_bh()
*
* Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
* softirqs, such that unlock sequences of:
*
* spin_unlock();
* local_bh_enable();
*
* Work as expected.
*/
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
#define in_atomic() (preempt_count() != 0)
/*
* Check whether we were atomic before we did preempt_disable():
* (used by the scheduler)
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() \
({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
#endif
#define __preempt_count_inc() __preempt_count_add(1)
#define __preempt_count_dec() __preempt_count_sub(1)
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
do { \
preempt_count_inc(); \
barrier(); \
} while (0)
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
barrier(); \
if (unlikely(preempt_count_dec_and_test())) \
__preempt_schedule(); \
} while (0)
#define preempt_enable_notrace() \
do { \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
__preempt_schedule_notrace(); \
} while (0)
#define preempt_check_resched() \
do { \
if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
#define preempt_enable_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
#define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPTION */
#define preempt_disable_notrace() \
do { \
__preempt_count_inc(); \
barrier(); \
} while (0)
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
#else /* !CONFIG_PREEMPT_COUNT */
/*
* Even if we don't have any preemption, we need preempt disable/enable
* to be barriers, so that we don't have things like get_user/put_user
* that can cause faults and scheduling migrate into our preempt-protected
* region.
*/
#define preempt_disable() barrier()
#define sched_preempt_enable_no_resched() barrier()
#define preempt_enable_no_resched() barrier()
#define preempt_enable() barrier()
#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef MODULE
/*
* Modules have no business playing preemption tricks.
*/
#undef sched_preempt_enable_no_resched
#undef preempt_enable_no_resched
#undef preempt_enable_no_resched_notrace
#undef preempt_check_resched
#endif
#define preempt_set_need_resched() \
do { \
set_preempt_need_resched(); \
} while (0)
#define preempt_fold_need_resched() \
do { \
if (tif_need_resched()) \
set_preempt_need_resched(); \
} while (0)
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
struct task_struct;
/**
* preempt_ops - notifiers called when a task is preempted and rescheduled
* @sched_in: we're about to be rescheduled:
* notifier: struct preempt_notifier for the task being scheduled
* cpu: cpu we're scheduled on
* @sched_out: we've just been preempted
* notifier: struct preempt_notifier for the task being preempted
* next: the task that's kicking us out
*
* Please note that sched_in and out are called under different
* contexts. sched_out is called with rq lock held and irq disabled
* while sched_in is called without rq lock and irq enabled. This
* difference is intentional and depended upon by its users.
*/
struct preempt_ops {
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
void (*sched_out)(struct preempt_notifier *notifier,
struct task_struct *next);
};
/**
* preempt_notifier - key for installing preemption notifiers
* @link: internal use
* @ops: defines the notifier functions to be called
*
* Usually used in conjunction with container_of().
*/
struct preempt_notifier {
struct hlist_node link;
struct preempt_ops *ops;
};
void preempt_notifier_inc(void);
void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
/* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
notifier->link.next = NULL;
notifier->link.pprev = NULL;
notifier->ops = ops;
}
#endif
/*
* Migrate-Disable and why it is undesired.
*
* When a preempted task becomes eligible to run under the ideal model (IOW it
* becomes one of the M highest priority tasks), it might still have to wait
* for the preemptee's migrate_disable() section to complete. Thereby suffering
* a reduction in bandwidth in the exact duration of the migrate_disable()
* section.
*
* Per this argument, the change from preempt_disable() to migrate_disable()
* gets us:
*
* - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
* it would have had to wait for the lower priority task.
*
* - a lower priority tasks; which under preempt_disable() could've instantly
* migrated away when another CPU becomes available, is now constrained
* by the ability to push the higher priority task away, which might itself be
* in a migrate_disable() section, reducing its available bandwidth.
*
* IOW it trades latency / moves the interference term, but it stays in the
* system, and as long as it remains unbounded, the system is not fully
* deterministic.
*
*
* The reason we have it anyway.
*
* PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
* number of primitives into becoming preemptible, they would also allow
* migration. This turns out to break a bunch of per-cpu usage. To this end,
* all these primitives employ migrate_disable() to restore this implicit
* assumption.
*
* This is a 'temporary' work-around at best. The correct solution is getting
* rid of the above assumptions and reworking the code to employ explicit
* per-cpu locking or short preempt-disable regions.
*
* The end goal must be to get rid of migrate_disable(), alternatively we need
* a schedulability theory that does not depend on arbitrary migration.
*
*
* Notes on the implementation.
*
* The implementation is particularly tricky since existing code patterns
* dictate neither migrate_disable() nor migrate_enable() is allowed to block.
* This means that it cannot use cpus_read_lock() to serialize against hotplug,
* nor can it easily migrate itself into a pending affinity mask change on
* migrate_enable().
*
*
* Note: even non-work-conserving schedulers like semi-partitioned depends on
* migration, so migrate_disable() is not only a problem for
* work-conserving schedulers.
*
*/
/**
* preempt_disable_nested - Disable preemption inside a normally preempt disabled section
*
* Use for code which requires preemption protection inside a critical
* section which has preemption disabled implicitly on non-PREEMPT_RT
* enabled kernels, by e.g.:
* - holding a spinlock/rwlock
* - soft interrupt context
* - regular interrupt handlers
*
* On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
* interrupt context and regular interrupt handlers are preemptible and
* only prevent migration. preempt_disable_nested() ensures that preemption
* is disabled for cases which require CPU local serialization even on
* PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
*
* The use cases are code sequences which are not serialized by a
* particular lock instance, e.g.:
* - seqcount write side critical sections where the seqcount is not
* associated to a particular lock and therefore the automatic
* protection mechanism does not work. This prevents a live lock
* against a preempting high priority reader.
* - RMW per CPU variable updates like vmstat.
*/
/* Macro to avoid header recursion hell vs. lockdep */
#define preempt_disable_nested() \
do { \
if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
preempt_disable(); \
else \
lockdep_assert_preemption_disabled(); \
} while (0)
/**
* preempt_enable_nested - Undo the effect of preempt_disable_nested()
*/
static __always_inline void preempt_enable_nested(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
}
DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
#ifdef CONFIG_PREEMPT_DYNAMIC
extern bool preempt_model_none(void);
extern bool preempt_model_voluntary(void);
extern bool preempt_model_full(void);
extern bool preempt_model_lazy(void);
#else
static inline bool preempt_model_none(void)
{
return IS_ENABLED(CONFIG_PREEMPT_NONE);
}
static inline bool preempt_model_voluntary(void)
{
return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
}
static inline bool preempt_model_full(void)
{
return IS_ENABLED(CONFIG_PREEMPT);
}
static inline bool preempt_model_lazy(void)
{
return IS_ENABLED(CONFIG_PREEMPT_LAZY);
}
#endif
static inline bool preempt_model_rt(void)
{
return IS_ENABLED(CONFIG_PREEMPT_RT);
}
extern const char *preempt_model_str(void);
/*
* Does the preemption model allow non-cooperative preemption?
*
* For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
* CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
* kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
* PREEMPT_NONE model.
*/
static inline bool preempt_model_preemptible(void)
{
return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
}
#endif /* __LINUX_PREEMPT_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* fs/dcache.c
*
* Complete reimplementation
* (C) 1997 Thomas Schoebel-Theuer,
* with heavy changes by Linus Torvalds
*/
/*
* Notes on the allocation strategy:
*
* The dcache is a master of the icache - whenever a dcache entry
* exists, the inode will always exist. "iput()" is done either when
* the dcache entry is deleted or garbage collected.
*/
#include <linux/ratelimit.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/fscrypt.h>
#include <linux/fsnotify.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/cache.h>
#include <linux/export.h>
#include <linux/security.h>
#include <linux/seqlock.h>
#include <linux/memblock.h>
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/list_lru.h>
#include "internal.h"
#include "mount.h"
#include <asm/runtime-const.h>
/*
* Usage:
* dcache->d_inode->i_lock protects:
* - i_dentry, d_u.d_alias, d_inode of aliases
* dcache_hash_bucket lock protects:
* - the dcache hash table
* s_roots bl list spinlock protects:
* - the s_roots list (see __d_drop)
* dentry->d_sb->s_dentry_lru_lock protects:
* - the dcache lru lists and counters
* d_lock protects:
* - d_flags
* - d_name
* - d_lru
* - d_count
* - d_unhashed()
* - d_parent and d_chilren
* - childrens' d_sib and d_parent
* - d_u.d_alias, d_inode
*
* Ordering:
* dentry->d_inode->i_lock
* dentry->d_lock
* dentry->d_sb->s_dentry_lru_lock
* dcache_hash_bucket lock
* s_roots lock
*
* If there is an ancestor relationship:
* dentry->d_parent->...->d_parent->d_lock
* ...
* dentry->d_parent->d_lock
* dentry->d_lock
*
* If no ancestor relationship:
* arbitrary, since it's serialized on rename_lock
*/
static int sysctl_vfs_cache_pressure __read_mostly = 100;
static int sysctl_vfs_cache_pressure_denom __read_mostly = 100;
unsigned long vfs_pressure_ratio(unsigned long val)
{
return mult_frac(val, sysctl_vfs_cache_pressure, sysctl_vfs_cache_pressure_denom);
}
EXPORT_SYMBOL_GPL(vfs_pressure_ratio);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
EXPORT_SYMBOL(rename_lock);
static struct kmem_cache *dentry_cache __ro_after_init;
const struct qstr empty_name = QSTR_INIT("", 0);
EXPORT_SYMBOL(empty_name);
const struct qstr slash_name = QSTR_INIT("/", 1);
EXPORT_SYMBOL(slash_name);
const struct qstr dotdot_name = QSTR_INIT("..", 2);
EXPORT_SYMBOL(dotdot_name);
/*
* This is the single most critical data structure when it comes
* to the dcache: the hashtable for lookups. Somebody should try
* to make this good - I've just made it work.
*
* This hash-function tries to avoid losing too many bits of hash
* information, yet avoid using a prime hash-size or similar.
*
* Marking the variables "used" ensures that the compiler doesn't
* optimize them away completely on architectures with runtime
* constant infrastructure, this allows debuggers to see their
* values. But updating these values has no effect on those arches.
*/
static unsigned int d_hash_shift __ro_after_init __used;
static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
{
return runtime_const_ptr(dentry_hashtable) +
runtime_const_shift_right_32(hashlen, d_hash_shift);
}
#define IN_LOOKUP_SHIFT 10
static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
unsigned int hash)
{
hash += (unsigned long) parent / L1_CACHE_BYTES;
return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
}
struct dentry_stat_t {
long nr_dentry;
long nr_unused;
long age_limit; /* age in seconds */
long want_pages; /* pages requested by system */
long nr_negative; /* # of unused negative dentries */
long dummy; /* Reserved for future use */
};
static DEFINE_PER_CPU(long, nr_dentry);
static DEFINE_PER_CPU(long, nr_dentry_unused);
static DEFINE_PER_CPU(long, nr_dentry_negative);
static int dentry_negative_policy;
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
/* Statistics gathering. */
static struct dentry_stat_t dentry_stat = {
.age_limit = 45,
};
/*
* Here we resort to our own counters instead of using generic per-cpu counters
* for consistency with what the vfs inode code does. We are expected to harvest
* better code and performance by having our own specialized counters.
*
* Please note that the loop is done over all possible CPUs, not over all online
* CPUs. The reason for this is that we don't want to play games with CPUs going
* on and off. If one of them goes off, we will just keep their counters.
*
* glommer: See cffbc8a for details, and if you ever intend to change this,
* please update all vfs counters to match.
*/
static long get_nr_dentry(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_dentry, i);
return sum < 0 ? 0 : sum;
}
static long get_nr_dentry_unused(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_dentry_unused, i);
return sum < 0 ? 0 : sum;
}
static long get_nr_dentry_negative(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_dentry_negative, i);
return sum < 0 ? 0 : sum;
}
static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
dentry_stat.nr_dentry = get_nr_dentry();
dentry_stat.nr_unused = get_nr_dentry_unused();
dentry_stat.nr_negative = get_nr_dentry_negative();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static const struct ctl_table fs_dcache_sysctls[] = {
{
.procname = "dentry-state",
.data = &dentry_stat,
.maxlen = 6*sizeof(long),
.mode = 0444,
.proc_handler = proc_nr_dentry,
},
{
.procname = "dentry-negative",
.data = &dentry_negative_policy,
.maxlen = sizeof(dentry_negative_policy),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
};
static const struct ctl_table vm_dcache_sysctls[] = {
{
.procname = "vfs_cache_pressure",
.data = &sysctl_vfs_cache_pressure,
.maxlen = sizeof(sysctl_vfs_cache_pressure),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "vfs_cache_pressure_denom",
.data = &sysctl_vfs_cache_pressure_denom,
.maxlen = sizeof(sysctl_vfs_cache_pressure_denom),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE_HUNDRED,
},
};
static int __init init_fs_dcache_sysctls(void)
{
register_sysctl_init("vm", vm_dcache_sysctls);
register_sysctl_init("fs", fs_dcache_sysctls);
return 0;
}
fs_initcall(init_fs_dcache_sysctls);
#endif
/*
* Compare 2 name strings, return 0 if they match, otherwise non-zero.
* The strings are both count bytes long, and count is non-zero.
*/
#ifdef CONFIG_DCACHE_WORD_ACCESS
#include <asm/word-at-a-time.h>
/*
* NOTE! 'cs' and 'scount' come from a dentry, so it has a
* aligned allocation for this particular component. We don't
* strictly need the load_unaligned_zeropad() safety, but it
* doesn't hurt either.
*
* In contrast, 'ct' and 'tcount' can be from a pathname, and do
* need the careful unaligned handling.
*/
static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
{
unsigned long a,b,mask;
for (;;) {
a = read_word_at_a_time(cs);
b = load_unaligned_zeropad(ct);
if (tcount < sizeof(unsigned long))
break;
if (unlikely(a != b))
return 1;
cs += sizeof(unsigned long);
ct += sizeof(unsigned long);
tcount -= sizeof(unsigned long);
if (!tcount)
return 0;
}
mask = bytemask_from_count(tcount);
return unlikely(!!((a ^ b) & mask));
}
#else
static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
{
do {
if (*cs != *ct)
return 1;
cs++;
ct++;
tcount--;
} while (tcount);
return 0;
}
#endif
static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
{
/*
* Be careful about RCU walk racing with rename:
* use 'READ_ONCE' to fetch the name pointer.
*
* NOTE! Even if a rename will mean that the length
* was not loaded atomically, we don't care. The
* RCU walk will check the sequence count eventually,
* and catch it. And we won't overrun the buffer,
* because we're reading the name pointer atomically,
* and a dentry name is guaranteed to be properly
* terminated with a NUL byte.
*
* End result: even if 'len' is wrong, we'll exit
* early because the data cannot match (there can
* be no NUL in the ct/tcount data)
*/
const unsigned char *cs = READ_ONCE(dentry->d_name.name);
return dentry_string_cmp(cs, ct, tcount);
}
/*
* long names are allocated separately from dentry and never modified.
* Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
* for the reason why ->count and ->head can't be combined into a union.
* dentry_string_cmp() relies upon ->name[] being word-aligned.
*/
struct external_name {
atomic_t count;
struct rcu_head head;
unsigned char name[] __aligned(sizeof(unsigned long));
};
static inline struct external_name *external_name(struct dentry *dentry)
{
return container_of(dentry->d_name.name, struct external_name, name[0]);
}
static void __d_free(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
kmem_cache_free(dentry_cache, dentry);
}
static void __d_free_external(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
kfree(external_name(dentry));
kmem_cache_free(dentry_cache, dentry);
}
static inline int dname_external(const struct dentry *dentry)
{
return dentry->d_name.name != dentry->d_shortname.string;
}
void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
{
unsigned seq;
const unsigned char *s;
rcu_read_lock();
retry:
seq = read_seqcount_begin(&dentry->d_seq);
s = READ_ONCE(dentry->d_name.name);
name->name.hash_len = dentry->d_name.hash_len;
name->name.name = name->inline_name.string;
if (likely(s == dentry->d_shortname.string)) {
name->inline_name = dentry->d_shortname;
} else {
struct external_name *p;
p = container_of(s, struct external_name, name[0]);
// get a valid reference
if (unlikely(!atomic_inc_not_zero(&p->count)))
goto retry;
name->name.name = s;
}
if (read_seqcount_retry(&dentry->d_seq, seq)) {
release_dentry_name_snapshot(name);
goto retry;
}
rcu_read_unlock();
}
EXPORT_SYMBOL(take_dentry_name_snapshot);
void release_dentry_name_snapshot(struct name_snapshot *name)
{
if (unlikely(name->name.name != name->inline_name.string)) {
struct external_name *p;
p = container_of(name->name.name, struct external_name, name[0]);
if (unlikely(atomic_dec_and_test(&p->count)))
kfree_rcu(p, head);
}
}
EXPORT_SYMBOL(release_dentry_name_snapshot);
static inline void __d_set_inode_and_type(struct dentry *dentry,
struct inode *inode,
unsigned type_flags)
{
unsigned flags;
dentry->d_inode = inode;
flags = READ_ONCE(dentry->d_flags);
flags &= ~DCACHE_ENTRY_TYPE;
flags |= type_flags;
smp_store_release(&dentry->d_flags, flags);
}
static inline void __d_clear_type_and_inode(struct dentry *dentry)
{
unsigned flags = READ_ONCE(dentry->d_flags);
flags &= ~DCACHE_ENTRY_TYPE;
WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL;
/*
* The negative counter only tracks dentries on the LRU. Don't inc if
* d_lru is on another list.
*/
if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_inc(nr_dentry_negative);
}
static void dentry_free(struct dentry *dentry)
{
WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
if (unlikely(dname_external(dentry))) {
struct external_name *p = external_name(dentry);
if (likely(atomic_dec_and_test(&p->count))) {
call_rcu(&dentry->d_u.d_rcu, __d_free_external);
return;
}
}
/* if dentry was never visible to RCU, immediate free is OK */
if (dentry->d_flags & DCACHE_NORCU)
__d_free(&dentry->d_u.d_rcu);
else
call_rcu(&dentry->d_u.d_rcu, __d_free);
}
/*
* Release the dentry's inode, using the filesystem
* d_iput() operation if defined.
*/
static void dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_lock)
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
raw_write_seqcount_begin(&dentry->d_seq);
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
fsnotify_inoderemove(inode);
if (dentry->d_op && dentry->d_op->d_iput)
dentry->d_op->d_iput(dentry, inode);
else
iput(inode);
}
/*
* The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
* is in use - which includes both the "real" per-superblock
* LRU list _and_ the DCACHE_SHRINK_LIST use.
*
* The DCACHE_SHRINK_LIST bit is set whenever the dentry is
* on the shrink list (ie not on the superblock LRU list).
*
* The per-cpu "nr_dentry_unused" counters are updated with
* the DCACHE_LRU_LIST bit.
*
* The per-cpu "nr_dentry_negative" counters are only updated
* when deleted from or added to the per-superblock LRU list, not
* from/to the shrink list. That is to avoid an unneeded dec/inc
* pair when moving from LRU to shrink list in select_collect().
*
* These helper functions make sure we always follow the
* rules. d_lock must be held by the caller.
*/
#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
static void d_lru_add(struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, 0);
dentry->d_flags |= DCACHE_LRU_LIST;
this_cpu_inc(nr_dentry_unused);
if (d_is_negative(dentry))
this_cpu_inc(nr_dentry_negative);
WARN_ON_ONCE(!list_lru_add_obj(
&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
static void d_lru_del(struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
if (d_is_negative(dentry))
this_cpu_dec(nr_dentry_negative);
WARN_ON_ONCE(!list_lru_del_obj(
&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
static void d_shrink_del(struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
list_del_init(&dentry->d_lru);
dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
this_cpu_dec(nr_dentry_unused);
}
static void d_shrink_add(struct dentry *dentry, struct list_head *list)
{
D_FLAG_VERIFY(dentry, 0);
list_add(&dentry->d_lru, list);
dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
this_cpu_inc(nr_dentry_unused);
}
/*
* These can only be called under the global LRU lock, ie during the
* callback for freeing the LRU list. "isolate" removes it from the
* LRU lists entirely, while shrink_move moves it to the indicated
* private list.
*/
static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
if (d_is_negative(dentry))
this_cpu_dec(nr_dentry_negative);
list_lru_isolate(lru, &dentry->d_lru);
}
static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
struct list_head *list)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags |= DCACHE_SHRINK_LIST;
if (d_is_negative(dentry))
this_cpu_dec(nr_dentry_negative);
list_lru_isolate_move(lru, &dentry->d_lru, list);
}
static void ___d_drop(struct dentry *dentry)
{
struct hlist_bl_head *b;
/*
* Hashed dentries are normally on the dentry hashtable,
* with the exception of those newly allocated by
* d_obtain_root, which are always IS_ROOT:
*/
if (unlikely(IS_ROOT(dentry)))
b = &dentry->d_sb->s_roots;
else
b = d_hash(dentry->d_name.hash);
hlist_bl_lock(b);
__hlist_bl_del(&dentry->d_hash);
hlist_bl_unlock(b);
}
void __d_drop(struct dentry *dentry)
{
if (!d_unhashed(dentry)) {
___d_drop(dentry);
dentry->d_hash.pprev = NULL;
write_seqcount_invalidate(&dentry->d_seq);
}
}
EXPORT_SYMBOL(__d_drop);
/**
* d_drop - drop a dentry
* @dentry: dentry to drop
*
* d_drop() unhashes the entry from the parent dentry hashes, so that it won't
* be found through a VFS lookup any more. Note that this is different from
* deleting the dentry - d_delete will try to mark the dentry negative if
* possible, giving a successful _negative_ lookup, while d_drop will
* just make the cache lookup fail.
*
* d_drop() is used mainly for stuff that wants to invalidate a dentry for some
* reason (NFS timeouts or autofs deletes).
*
* __d_drop requires dentry->d_lock
*
* ___d_drop doesn't mark dentry as "unhashed"
* (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
*/
void d_drop(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(d_drop);
static inline void dentry_unlist(struct dentry *dentry)
{
struct dentry *next;
/*
* Inform d_walk() and shrink_dentry_list() that we are no longer
* attached to the dentry tree
*/
dentry->d_flags |= DCACHE_DENTRY_KILLED;
if (unlikely(hlist_unhashed(&dentry->d_sib)))
return;
__hlist_del(&dentry->d_sib);
/*
* Cursors can move around the list of children. While we'd been
* a normal list member, it didn't matter - ->d_sib.next would've
* been updated. However, from now on it won't be and for the
* things like d_walk() it might end up with a nasty surprise.
* Normally d_walk() doesn't care about cursors moving around -
* ->d_lock on parent prevents that and since a cursor has no children
* of its own, we get through it without ever unlocking the parent.
* There is one exception, though - if we ascend from a child that
* gets killed as soon as we unlock it, the next sibling is found
* using the value left in its ->d_sib.next. And if _that_
* pointed to a cursor, and cursor got moved (e.g. by lseek())
* before d_walk() regains parent->d_lock, we'll end up skipping
* everything the cursor had been moved past.
*
* Solution: make sure that the pointer left behind in ->d_sib.next
* points to something that won't be moving around. I.e. skip the
* cursors.
*/
while (dentry->d_sib.next) {
next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
break;
dentry->d_sib.next = next->d_sib.next;
}
}
static struct dentry *__dentry_kill(struct dentry *dentry)
{
struct dentry *parent = NULL;
bool can_free = true;
/*
* The dentry is now unrecoverably dead to the world.
*/
lockref_mark_dead(&dentry->d_lockref);
/*
* inform the fs via d_prune that this dentry is about to be
* unhashed and destroyed.
*/
if (dentry->d_flags & DCACHE_OP_PRUNE)
dentry->d_op->d_prune(dentry);
if (dentry->d_flags & DCACHE_LRU_LIST) {
if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
d_lru_del(dentry);
}
/* if it was on the hash then remove it */
__d_drop(dentry);
if (dentry->d_inode)
dentry_unlink_inode(dentry);
else
spin_unlock(&dentry->d_lock);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
cond_resched();
/* now that it's negative, ->d_parent is stable */
if (!IS_ROOT(dentry)) {
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
}
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
dentry_unlist(dentry);
if (dentry->d_flags & DCACHE_SHRINK_LIST)
can_free = false;
spin_unlock(&dentry->d_lock);
if (likely(can_free))
dentry_free(dentry);
if (parent && --parent->d_lockref.count) {
spin_unlock(&parent->d_lock);
return NULL;
}
return parent;
}
/*
* Lock a dentry for feeding it to __dentry_kill().
* Called under rcu_read_lock() and dentry->d_lock; the former
* guarantees that nothing we access will be freed under us.
* Note that dentry is *not* protected from concurrent dentry_kill(),
* d_delete(), etc.
*
* Return false if dentry is busy. Otherwise, return true and have
* that dentry's inode locked.
*/
static bool lock_for_kill(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
if (unlikely(dentry->d_lockref.count))
return false;
if (!inode || likely(spin_trylock(&inode->i_lock)))
return true;
do {
spin_unlock(&dentry->d_lock);
spin_lock(&inode->i_lock);
spin_lock(&dentry->d_lock);
if (likely(inode == dentry->d_inode))
break;
spin_unlock(&inode->i_lock);
inode = dentry->d_inode;
} while (inode);
if (likely(!dentry->d_lockref.count))
return true;
if (inode)
spin_unlock(&inode->i_lock);
return false;
}
/*
* Decide if dentry is worth retaining. Usually this is called with dentry
* locked; if not locked, we are more limited and might not be able to tell
* without a lock. False in this case means "punt to locked path and recheck".
*
* In case we aren't locked, these predicates are not "stable". However, it is
* sufficient that at some point after we dropped the reference the dentry was
* hashed and the flags had the proper value. Other dentry users may have
* re-gotten a reference to the dentry and change that, but our work is done -
* we can leave the dentry around with a zero refcount.
*/
static inline bool retain_dentry(struct dentry *dentry, bool locked)
{
unsigned int d_flags;
smp_rmb();
d_flags = READ_ONCE(dentry->d_flags);
// Unreachable? Nobody would be able to look it up, no point retaining
if (unlikely(d_unhashed(dentry)))
return false;
// Same if it's disconnected
if (unlikely(d_flags & DCACHE_DISCONNECTED))
return false;
// ->d_delete() might tell us not to bother, but that requires
// ->d_lock; can't decide without it
if (unlikely(d_flags & DCACHE_OP_DELETE)) {
if (!locked || dentry->d_op->d_delete(dentry))
return false;
}
// Explicitly told not to bother
if (unlikely(d_flags & DCACHE_DONTCACHE))
return false;
// At this point it looks like we ought to keep it. We also might
// need to do something - put it on LRU if it wasn't there already
// and mark it referenced if it was on LRU, but not marked yet.
// Unfortunately, both actions require ->d_lock, so in lockless
// case we'd have to punt rather than doing those.
if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
if (!locked)
return false;
d_lru_add(dentry);
} else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
if (!locked)
return false;
dentry->d_flags |= DCACHE_REFERENCED;
}
return true;
}
void d_mark_dontcache(struct inode *inode)
{
struct dentry *de;
spin_lock(&inode->i_lock);
hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
spin_lock(&de->d_lock);
de->d_flags |= DCACHE_DONTCACHE;
spin_unlock(&de->d_lock);
}
inode->i_state |= I_DONTCACHE;
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_mark_dontcache);
/*
* Try to do a lockless dput(), and return whether that was successful.
*
* If unsuccessful, we return false, having already taken the dentry lock.
* In that case refcount is guaranteed to be zero and we have already
* decided that it's not worth keeping around.
*
* The caller needs to hold the RCU read lock, so that the dentry is
* guaranteed to stay around even if the refcount goes down to zero!
*/
static inline bool fast_dput(struct dentry *dentry)
{
int ret;
/*
* try to decrement the lockref optimistically.
*/
ret = lockref_put_return(&dentry->d_lockref);
/*
* If the lockref_put_return() failed due to the lock being held
* by somebody else, the fast path has failed. We will need to
* get the lock, and then check the count again.
*/
if (unlikely(ret < 0)) {
spin_lock(&dentry->d_lock);
if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
spin_unlock(&dentry->d_lock);
return true;
}
dentry->d_lockref.count--;
goto locked;
}
/*
* If we weren't the last ref, we're done.
*/
if (ret)
return true;
/*
* Can we decide that decrement of refcount is all we needed without
* taking the lock? There's a very common case when it's all we need -
* dentry looks like it ought to be retained and there's nothing else
* to do.
*/
if (retain_dentry(dentry, false))
return true;
/*
* Either not worth retaining or we can't tell without the lock.
* Get the lock, then. We've already decremented the refcount to 0,
* but we'll need to re-check the situation after getting the lock.
*/
spin_lock(&dentry->d_lock);
/*
* Did somebody else grab a reference to it in the meantime, and
* we're no longer the last user after all? Alternatively, somebody
* else could have killed it and marked it dead. Either way, we
* don't need to do anything else.
*/
locked:
if (dentry->d_lockref.count || retain_dentry(dentry, true)) { spin_unlock(&dentry->d_lock); return true;
}
return false;
}
/*
* This is dput
*
* This is complicated by the fact that we do not want to put
* dentries that are no longer on any hash chain on the unused
* list: we'd much rather just get rid of them immediately.
*
* However, that implies that we have to traverse the dentry
* tree upwards to the parents which might _also_ now be
* scheduled for deletion (it may have been only waiting for
* its last child to go away).
*
* This tail recursion is done by hand as we don't want to depend
* on the compiler to always get this right (gcc generally doesn't).
* Real recursion would eat up our stack space.
*/
/*
* dput - release a dentry
* @dentry: dentry to release
*
* Release a dentry. This will drop the usage count and if appropriate
* call the dentry unlink method as well as removing it from the queues and
* releasing its resources. If the parent dentries were scheduled for release
* they too may now get deleted.
*/
void dput(struct dentry *dentry)
{
if (!dentry)
return;
might_sleep();
rcu_read_lock(); if (likely(fast_dput(dentry))) { rcu_read_unlock(); return;
}
while (lock_for_kill(dentry)) { rcu_read_unlock();
dentry = __dentry_kill(dentry);
if (!dentry)
return;
if (retain_dentry(dentry, true)) {
spin_unlock(&dentry->d_lock);
return;
}
rcu_read_lock();
}
rcu_read_unlock(); spin_unlock(&dentry->d_lock);}
EXPORT_SYMBOL(dput);
static void to_shrink_list(struct dentry *dentry, struct list_head *list)
__must_hold(&dentry->d_lock)
{
if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
if (dentry->d_flags & DCACHE_LRU_LIST)
d_lru_del(dentry);
d_shrink_add(dentry, list);
}
}
void dput_to_list(struct dentry *dentry, struct list_head *list)
{
rcu_read_lock();
if (likely(fast_dput(dentry))) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
to_shrink_list(dentry, list);
spin_unlock(&dentry->d_lock);
}
struct dentry *dget_parent(struct dentry *dentry)
{
int gotref;
struct dentry *ret;
unsigned seq;
/*
* Do optimistic parent lookup without any
* locking.
*/
rcu_read_lock();
seq = raw_seqcount_begin(&dentry->d_seq);
ret = READ_ONCE(dentry->d_parent);
gotref = lockref_get_not_zero(&ret->d_lockref);
rcu_read_unlock();
if (likely(gotref)) {
if (!read_seqcount_retry(&dentry->d_seq, seq))
return ret;
dput(ret);
}
repeat:
/*
* Don't need rcu_dereference because we re-check it was correct under
* the lock.
*/
rcu_read_lock();
ret = dentry->d_parent;
spin_lock(&ret->d_lock);
if (unlikely(ret != dentry->d_parent)) {
spin_unlock(&ret->d_lock);
rcu_read_unlock();
goto repeat;
}
rcu_read_unlock();
BUG_ON(!ret->d_lockref.count);
ret->d_lockref.count++;
spin_unlock(&ret->d_lock);
return ret;
}
EXPORT_SYMBOL(dget_parent);
static struct dentry * __d_find_any_alias(struct inode *inode)
{
struct dentry *alias;
if (hlist_empty(&inode->i_dentry))
return NULL;
alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
lockref_get(&alias->d_lockref);
return alias;
}
/**
* d_find_any_alias - find any alias for a given inode
* @inode: inode to find an alias for
*
* If any aliases exist for the given inode, take and return a
* reference for one of them. If no aliases exist, return %NULL.
*/
struct dentry *d_find_any_alias(struct inode *inode)
{
struct dentry *de;
spin_lock(&inode->i_lock);
de = __d_find_any_alias(inode);
spin_unlock(&inode->i_lock);
return de;
}
EXPORT_SYMBOL(d_find_any_alias);
static struct dentry *__d_find_alias(struct inode *inode)
{
struct dentry *alias;
if (S_ISDIR(inode->i_mode))
return __d_find_any_alias(inode);
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
spin_lock(&alias->d_lock);
if (!d_unhashed(alias)) {
dget_dlock(alias);
spin_unlock(&alias->d_lock);
return alias;
}
spin_unlock(&alias->d_lock);
}
return NULL;
}
/**
* d_find_alias - grab a hashed alias of inode
* @inode: inode in question
*
* If inode has a hashed alias, or is a directory and has any alias,
* acquire the reference to alias and return it. Otherwise return NULL.
* Notice that if inode is a directory there can be only one alias and
* it can be unhashed only if it has no children, or if it is the root
* of a filesystem, or if the directory was renamed and d_revalidate
* was the first vfs operation to notice.
*
* If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
* any other hashed alias over that one.
*/
struct dentry *d_find_alias(struct inode *inode)
{
struct dentry *de = NULL;
if (!hlist_empty(&inode->i_dentry)) {
spin_lock(&inode->i_lock);
de = __d_find_alias(inode);
spin_unlock(&inode->i_lock);
}
return de;
}
EXPORT_SYMBOL(d_find_alias);
/*
* Caller MUST be holding rcu_read_lock() and be guaranteed
* that inode won't get freed until rcu_read_unlock().
*/
struct dentry *d_find_alias_rcu(struct inode *inode)
{
struct hlist_head *l = &inode->i_dentry;
struct dentry *de = NULL;
spin_lock(&inode->i_lock);
// ->i_dentry and ->i_rcu are colocated, but the latter won't be
// used without having I_FREEING set, which means no aliases left
if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
if (S_ISDIR(inode->i_mode)) {
de = hlist_entry(l->first, struct dentry, d_u.d_alias);
} else {
hlist_for_each_entry(de, l, d_u.d_alias)
if (!d_unhashed(de))
break;
}
}
spin_unlock(&inode->i_lock);
return de;
}
/*
* Try to kill dentries associated with this inode.
* WARNING: you must own a reference to inode.
*/
void d_prune_aliases(struct inode *inode)
{
LIST_HEAD(dispose);
struct dentry *dentry;
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
if (!dentry->d_lockref.count)
to_shrink_list(dentry, &dispose);
spin_unlock(&dentry->d_lock);
}
spin_unlock(&inode->i_lock);
shrink_dentry_list(&dispose);
}
EXPORT_SYMBOL(d_prune_aliases);
static inline void shrink_kill(struct dentry *victim)
{
do {
rcu_read_unlock();
victim = __dentry_kill(victim);
rcu_read_lock();
} while (victim && lock_for_kill(victim));
rcu_read_unlock();
if (victim)
spin_unlock(&victim->d_lock);
}
void shrink_dentry_list(struct list_head *list)
{
while (!list_empty(list)) {
struct dentry *dentry;
dentry = list_entry(list->prev, struct dentry, d_lru);
spin_lock(&dentry->d_lock);
rcu_read_lock(); if (!lock_for_kill(dentry)) {
bool can_free;
rcu_read_unlock();
d_shrink_del(dentry);
can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
spin_unlock(&dentry->d_lock);
if (can_free)
dentry_free(dentry);
continue;
}
d_shrink_del(dentry);
shrink_kill(dentry);
}
}
static enum lru_status dentry_lru_isolate(struct list_head *item,
struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
/*
* we are inverting the lru lock/dentry->d_lock here,
* so use a trylock. If we fail to get the lock, just skip
* it
*/
if (!spin_trylock(&dentry->d_lock))
return LRU_SKIP;
/*
* Referenced dentries are still in use. If they have active
* counts, just remove them from the LRU. Otherwise give them
* another pass through the LRU.
*/
if (dentry->d_lockref.count) {
d_lru_isolate(lru, dentry);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
if (dentry->d_flags & DCACHE_REFERENCED) {
dentry->d_flags &= ~DCACHE_REFERENCED;
spin_unlock(&dentry->d_lock);
/*
* The list move itself will be made by the common LRU code. At
* this point, we've dropped the dentry->d_lock but keep the
* lru lock. This is safe to do, since every list movement is
* protected by the lru lock even if both locks are held.
*
* This is guaranteed by the fact that all LRU management
* functions are intermediated by the LRU API calls like
* list_lru_add_obj and list_lru_del_obj. List movement in this file
* only ever occur through this functions or through callbacks
* like this one, that are called from the LRU API.
*
* The only exceptions to this are functions like
* shrink_dentry_list, and code that first checks for the
* DCACHE_SHRINK_LIST flag. Those are guaranteed to be
* operating only with stack provided lists after they are
* properly isolated from the main list. It is thus, always a
* local access.
*/
return LRU_ROTATE;
}
d_lru_shrink_move(lru, dentry, freeable);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
/**
* prune_dcache_sb - shrink the dcache
* @sb: superblock
* @sc: shrink control, passed to list_lru_shrink_walk()
*
* Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
* is done when we need more memory and called from the superblock shrinker
* function.
*
* This function may fail to free any resources if all the dentries are in
* use.
*/
long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
{
LIST_HEAD(dispose);
long freed;
freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
dentry_lru_isolate, &dispose);
shrink_dentry_list(&dispose);
return freed;
}
static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
/*
* we are inverting the lru lock/dentry->d_lock here,
* so use a trylock. If we fail to get the lock, just skip
* it
*/
if (!spin_trylock(&dentry->d_lock))
return LRU_SKIP;
d_lru_shrink_move(lru, dentry, freeable);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
/**
* shrink_dcache_sb - shrink dcache for a superblock
* @sb: superblock
*
* Shrink the dcache for the specified super block. This is used to free
* the dcache before unmounting a file system.
*/
void shrink_dcache_sb(struct super_block *sb)
{
do {
LIST_HEAD(dispose);
list_lru_walk(&sb->s_dentry_lru,
dentry_lru_isolate_shrink, &dispose, 1024);
shrink_dentry_list(&dispose);
} while (list_lru_count(&sb->s_dentry_lru) > 0);
}
EXPORT_SYMBOL(shrink_dcache_sb);
/**
* enum d_walk_ret - action to talke during tree walk
* @D_WALK_CONTINUE: contrinue walk
* @D_WALK_QUIT: quit walk
* @D_WALK_NORETRY: quit when retry is needed
* @D_WALK_SKIP: skip this dentry and its children
*/
enum d_walk_ret {
D_WALK_CONTINUE,
D_WALK_QUIT,
D_WALK_NORETRY,
D_WALK_SKIP,
};
/**
* d_walk - walk the dentry tree
* @parent: start of walk
* @data: data passed to @enter() and @finish()
* @enter: callback when first entering the dentry
*
* The @enter() callbacks are called with d_lock held.
*/
static void d_walk(struct dentry *parent, void *data,
enum d_walk_ret (*enter)(void *, struct dentry *))
{
struct dentry *this_parent, *dentry;
unsigned seq = 0;
enum d_walk_ret ret;
bool retry = true;
again:
read_seqbegin_or_lock(&rename_lock, &seq);
this_parent = parent;
spin_lock(&this_parent->d_lock);
ret = enter(data, this_parent);
switch (ret) {
case D_WALK_CONTINUE:
break;
case D_WALK_QUIT:
case D_WALK_SKIP:
goto out_unlock;
case D_WALK_NORETRY:
retry = false;
break;
}
repeat:
dentry = d_first_child(this_parent);
resume:
hlist_for_each_entry_from(dentry, d_sib) {
if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
continue;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
ret = enter(data, dentry);
switch (ret) {
case D_WALK_CONTINUE:
break;
case D_WALK_QUIT:
spin_unlock(&dentry->d_lock);
goto out_unlock;
case D_WALK_NORETRY:
retry = false;
break;
case D_WALK_SKIP:
spin_unlock(&dentry->d_lock);
continue;
}
if (!hlist_empty(&dentry->d_children)) {
spin_unlock(&this_parent->d_lock);
spin_release(&dentry->d_lock.dep_map, _RET_IP_);
this_parent = dentry;
spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
}
spin_unlock(&dentry->d_lock);
}
/*
* All done at this level ... ascend and resume the search.
*/
rcu_read_lock();
ascend:
if (this_parent != parent) {
dentry = this_parent;
this_parent = dentry->d_parent;
spin_unlock(&dentry->d_lock);
spin_lock(&this_parent->d_lock);
/* might go back up the wrong parent if we have had a rename. */
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
/* go into the first sibling still alive */
hlist_for_each_entry_continue(dentry, d_sib) {
if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
rcu_read_unlock();
goto resume;
}
}
goto ascend;
}
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
rcu_read_unlock();
out_unlock:
spin_unlock(&this_parent->d_lock);
done_seqretry(&rename_lock, seq);
return;
rename_retry:
spin_unlock(&this_parent->d_lock);
rcu_read_unlock();
BUG_ON(seq & 1);
if (!retry)
return;
seq = 1;
goto again;
}
struct check_mount {
struct vfsmount *mnt;
unsigned int mounted;
};
/* locks: mount_locked_reader && dentry->d_lock */
static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
{
struct check_mount *info = data;
struct path path = { .mnt = info->mnt, .dentry = dentry };
if (likely(!d_mountpoint(dentry)))
return D_WALK_CONTINUE;
if (__path_is_mountpoint(&path)) {
info->mounted = 1;
return D_WALK_QUIT;
}
return D_WALK_CONTINUE;
}
/**
* path_has_submounts - check for mounts over a dentry in the
* current namespace.
* @parent: path to check.
*
* Return true if the parent or its subdirectories contain
* a mount point in the current namespace.
*/
int path_has_submounts(const struct path *parent)
{
struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
guard(mount_locked_reader)();
d_walk(parent->dentry, &data, path_check_mount);
return data.mounted;
}
EXPORT_SYMBOL(path_has_submounts);
/*
* Called by mount code to set a mountpoint and check if the mountpoint is
* reachable (e.g. NFS can unhash a directory dentry and then the complete
* subtree can become unreachable).
*
* Only one of d_invalidate() and d_set_mounted() must succeed. For
* this reason take rename_lock and d_lock on dentry and ancestors.
*/
int d_set_mounted(struct dentry *dentry)
{
struct dentry *p;
int ret = -ENOENT;
read_seqlock_excl(&rename_lock);
for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
/* Need exclusion wrt. d_invalidate() */
spin_lock(&p->d_lock);
if (unlikely(d_unhashed(p))) {
spin_unlock(&p->d_lock);
goto out;
}
spin_unlock(&p->d_lock);
}
spin_lock(&dentry->d_lock);
if (!d_unlinked(dentry)) {
ret = -EBUSY;
if (!d_mountpoint(dentry)) {
dentry->d_flags |= DCACHE_MOUNTED;
ret = 0;
}
}
spin_unlock(&dentry->d_lock);
out:
read_sequnlock_excl(&rename_lock);
return ret;
}
/*
* Search the dentry child list of the specified parent,
* and move any unused dentries to the end of the unused
* list for prune_dcache(). We descend to the next level
* whenever the d_children list is non-empty and continue
* searching.
*
* It returns zero iff there are no unused children,
* otherwise it returns the number of children moved to
* the end of the unused list. This may not be the total
* number of unused children, because select_parent can
* drop the lock and return early due to latency
* constraints.
*/
struct select_data {
struct dentry *start;
union {
long found;
struct dentry *victim;
};
struct list_head dispose;
};
static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
{
struct select_data *data = _data;
enum d_walk_ret ret = D_WALK_CONTINUE;
if (data->start == dentry)
goto out;
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
data->found++;
} else if (!dentry->d_lockref.count) {
to_shrink_list(dentry, &data->dispose);
data->found++;
} else if (dentry->d_lockref.count < 0) {
data->found++;
}
/*
* We can return to the caller if we have found some (this
* ensures forward progress). We'll be coming back to find
* the rest.
*/
if (!list_empty(&data->dispose))
ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
out:
return ret;
}
static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
{
struct select_data *data = _data;
enum d_walk_ret ret = D_WALK_CONTINUE;
if (data->start == dentry)
goto out;
if (!dentry->d_lockref.count) {
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
rcu_read_lock();
data->victim = dentry;
return D_WALK_QUIT;
}
to_shrink_list(dentry, &data->dispose);
}
/*
* We can return to the caller if we have found some (this
* ensures forward progress). We'll be coming back to find
* the rest.
*/
if (!list_empty(&data->dispose))
ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
out:
return ret;
}
/**
* shrink_dcache_parent - prune dcache
* @parent: parent of entries to prune
*
* Prune the dcache to remove unused children of the parent dentry.
*/
void shrink_dcache_parent(struct dentry *parent)
{
for (;;) {
struct select_data data = {.start = parent};
INIT_LIST_HEAD(&data.dispose);
d_walk(parent, &data, select_collect);
if (!list_empty(&data.dispose)) {
shrink_dentry_list(&data.dispose);
continue;
}
cond_resched();
if (!data.found)
break;
data.victim = NULL;
d_walk(parent, &data, select_collect2);
if (data.victim) {
spin_lock(&data.victim->d_lock);
if (!lock_for_kill(data.victim)) {
spin_unlock(&data.victim->d_lock);
rcu_read_unlock();
} else {
shrink_kill(data.victim);
}
}
if (!list_empty(&data.dispose))
shrink_dentry_list(&data.dispose);
}
}
EXPORT_SYMBOL(shrink_dcache_parent);
static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
{
/* it has busy descendents; complain about those instead */
if (!hlist_empty(&dentry->d_children))
return D_WALK_CONTINUE;
/* root with refcount 1 is fine */
if (dentry == _data && dentry->d_lockref.count == 1)
return D_WALK_CONTINUE;
WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
" still in use (%d) [unmount of %s %s]\n",
dentry,
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry,
dentry->d_lockref.count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
return D_WALK_CONTINUE;
}
static void do_one_tree(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
d_walk(dentry, dentry, umount_check);
d_drop(dentry);
dput(dentry);
}
/*
* destroy the dentries attached to a superblock on unmounting
*/
void shrink_dcache_for_umount(struct super_block *sb)
{
struct dentry *dentry;
rwsem_assert_held_write(&sb->s_umount);
dentry = sb->s_root;
sb->s_root = NULL;
do_one_tree(dentry);
while (!hlist_bl_empty(&sb->s_roots)) {
dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
do_one_tree(dentry);
}
}
static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
{
struct dentry **victim = _data;
if (d_mountpoint(dentry)) {
*victim = dget_dlock(dentry);
return D_WALK_QUIT;
}
return D_WALK_CONTINUE;
}
/**
* d_invalidate - detach submounts, prune dcache, and drop
* @dentry: dentry to invalidate (aka detach, prune and drop)
*/
void d_invalidate(struct dentry *dentry)
{
bool had_submounts = false;
spin_lock(&dentry->d_lock);
if (d_unhashed(dentry)) {
spin_unlock(&dentry->d_lock);
return;
}
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
/* Negative dentries can be dropped without further checks */
if (!dentry->d_inode)
return;
shrink_dcache_parent(dentry);
for (;;) {
struct dentry *victim = NULL;
d_walk(dentry, &victim, find_submount);
if (!victim) {
if (had_submounts)
shrink_dcache_parent(dentry);
return;
}
had_submounts = true;
detach_mounts(victim);
dput(victim);
}
}
EXPORT_SYMBOL(d_invalidate);
/**
* __d_alloc - allocate a dcache entry
* @sb: filesystem it will belong to
* @name: qstr of the name
*
* Allocates a dentry. It returns %NULL if there is insufficient memory
* available. On a success the dentry is returned. The name passed in is
* copied and the copy passed in may be reused after this call.
*/
static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
{
struct dentry *dentry;
char *dname;
int err;
dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
GFP_KERNEL);
if (!dentry)
return NULL;
/*
* We guarantee that the inline name is always NUL-terminated.
* This way the memcpy() done by the name switching in rename
* will still always have a NUL at the end, even if we might
* be overwriting an internal NUL character
*/
dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0;
if (unlikely(!name)) {
name = &slash_name;
dname = dentry->d_shortname.string;
} else if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
struct external_name *p = kmalloc(size + name->len,
GFP_KERNEL_ACCOUNT |
__GFP_RECLAIMABLE);
if (!p) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
atomic_set(&p->count, 1);
dname = p->name;
} else {
dname = dentry->d_shortname.string;
}
dentry->__d_name.len = name->len;
dentry->__d_name.hash = name->hash;
memcpy(dname, name->name, name->len);
dname[name->len] = 0;
/* Make sure we always see the terminating NUL character */
smp_store_release(&dentry->__d_name.name, dname); /* ^^^ */
dentry->d_flags = 0;
lockref_init(&dentry->d_lockref);
seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
dentry->d_inode = NULL;
dentry->d_parent = dentry;
dentry->d_sb = sb;
dentry->d_op = sb->__s_d_op;
dentry->d_flags = sb->s_d_flags;
dentry->d_fsdata = NULL;
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_HLIST_HEAD(&dentry->d_children);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
INIT_HLIST_NODE(&dentry->d_sib);
if (dentry->d_op && dentry->d_op->d_init) {
err = dentry->d_op->d_init(dentry);
if (err) {
if (dname_external(dentry))
kfree(external_name(dentry));
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
}
this_cpu_inc(nr_dentry); return dentry;
}
/**
* d_alloc - allocate a dcache entry
* @parent: parent of entry to allocate
* @name: qstr of the name
*
* Allocates a dentry. It returns %NULL if there is insufficient memory
* available. On a success the dentry is returned. The name passed in is
* copied and the copy passed in may be reused after this call.
*/
struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
{
struct dentry *dentry = __d_alloc(parent->d_sb, name);
if (!dentry)
return NULL;
spin_lock(&parent->d_lock);
/*
* don't need child lock because it is not subject
* to concurrency here
*/
dentry->d_parent = dget_dlock(parent);
hlist_add_head(&dentry->d_sib, &parent->d_children);
spin_unlock(&parent->d_lock);
return dentry;
}
EXPORT_SYMBOL(d_alloc);
struct dentry *d_alloc_anon(struct super_block *sb)
{
return __d_alloc(sb, NULL);
}
EXPORT_SYMBOL(d_alloc_anon);
struct dentry *d_alloc_cursor(struct dentry * parent)
{
struct dentry *dentry = d_alloc_anon(parent->d_sb);
if (dentry) {
dentry->d_flags |= DCACHE_DENTRY_CURSOR;
dentry->d_parent = dget(parent);
}
return dentry;
}
/**
* d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
* @sb: the superblock
* @name: qstr of the name
*
* For a filesystem that just pins its dentries in memory and never
* performs lookups at all, return an unhashed IS_ROOT dentry.
* This is used for pipes, sockets et.al. - the stuff that should
* never be anyone's children or parents. Unlike all other
* dentries, these will not have RCU delay between dropping the
* last reference and freeing them.
*
* The only user is alloc_file_pseudo() and that's what should
* be considered a public interface. Don't use directly.
*/
struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
{
static const struct dentry_operations anon_ops = {
.d_dname = simple_dname
};
struct dentry *dentry = __d_alloc(sb, name);
if (likely(dentry)) {
dentry->d_flags |= DCACHE_NORCU;
/* d_op_flags(&anon_ops) is 0 */
if (!dentry->d_op)
dentry->d_op = &anon_ops;
}
return dentry;}
struct dentry *d_alloc_name(struct dentry *parent, const char *name)
{
struct qstr q;
q.name = name;
q.hash_len = hashlen_string(parent, name);
return d_alloc(parent, &q);
}
EXPORT_SYMBOL(d_alloc_name);
#define DCACHE_OP_FLAGS \
(DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | \
DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_DELETE | DCACHE_OP_PRUNE | \
DCACHE_OP_REAL)
static unsigned int d_op_flags(const struct dentry_operations *op)
{
unsigned int flags = 0;
if (op) {
if (op->d_hash)
flags |= DCACHE_OP_HASH;
if (op->d_compare)
flags |= DCACHE_OP_COMPARE;
if (op->d_revalidate)
flags |= DCACHE_OP_REVALIDATE;
if (op->d_weak_revalidate)
flags |= DCACHE_OP_WEAK_REVALIDATE;
if (op->d_delete)
flags |= DCACHE_OP_DELETE;
if (op->d_prune)
flags |= DCACHE_OP_PRUNE;
if (op->d_real)
flags |= DCACHE_OP_REAL;
}
return flags;
}
static void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
{
unsigned int flags = d_op_flags(op);
WARN_ON_ONCE(dentry->d_op);
WARN_ON_ONCE(dentry->d_flags & DCACHE_OP_FLAGS);
dentry->d_op = op;
if (flags)
dentry->d_flags |= flags;
}
void set_default_d_op(struct super_block *s, const struct dentry_operations *ops)
{
unsigned int flags = d_op_flags(ops);
s->__s_d_op = ops;
s->s_d_flags = (s->s_d_flags & ~DCACHE_OP_FLAGS) | flags;
}
EXPORT_SYMBOL(set_default_d_op);
static unsigned d_flags_for_inode(struct inode *inode)
{
unsigned add_flags = DCACHE_REGULAR_TYPE;
if (!inode)
return DCACHE_MISS_TYPE;
if (S_ISDIR(inode->i_mode)) { add_flags = DCACHE_DIRECTORY_TYPE; if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { if (unlikely(!inode->i_op->lookup))
add_flags = DCACHE_AUTODIR_TYPE;
else
inode->i_opflags |= IOP_LOOKUP;
}
goto type_determined;
}
if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { if (unlikely(inode->i_op->get_link)) {
add_flags = DCACHE_SYMLINK_TYPE;
goto type_determined;
}
inode->i_opflags |= IOP_NOFOLLOW;
}
if (unlikely(!S_ISREG(inode->i_mode)))
add_flags = DCACHE_SPECIAL_TYPE;
type_determined:
if (unlikely(IS_AUTOMOUNT(inode)))
add_flags |= DCACHE_NEED_AUTOMOUNT;
return add_flags;
}
static void __d_instantiate(struct dentry *dentry, struct inode *inode)
{
unsigned add_flags = d_flags_for_inode(inode);
WARN_ON(d_in_lookup(dentry));
spin_lock(&dentry->d_lock);
/*
* The negative counter only tracks dentries on the LRU. Don't dec if
* d_lru is on another list.
*/
if ((dentry->d_flags &
(DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_dec(nr_dentry_negative);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
fsnotify_update_flags(dentry);
spin_unlock(&dentry->d_lock);
}
/**
* d_instantiate - fill in inode information for a dentry
* @entry: dentry to complete
* @inode: inode to attach to this dentry
*
* Fill in inode information in the entry.
*
* This turns negative dentries into productive full members
* of society.
*
* NOTE! This assumes that the inode count has been incremented
* (or otherwise set) by the caller to indicate that it is now
* in use by the dcache.
*/
void d_instantiate(struct dentry *entry, struct inode * inode)
{
BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); if (inode) {
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
spin_unlock(&inode->i_lock);
}
}
EXPORT_SYMBOL(d_instantiate);
/*
* This should be equivalent to d_instantiate() + unlock_new_inode(),
* with lockdep-related part of unlock_new_inode() done before
* anything else. Use that instead of open-coding d_instantiate()/
* unlock_new_inode() combinations.
*/
void d_instantiate_new(struct dentry *entry, struct inode *inode)
{
BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
BUG_ON(!inode);
lockdep_annotate_inode_mutex_key(inode);
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW & ~I_CREATING;
/*
* Pairs with the barrier in prepare_to_wait_event() to make sure
* ___wait_var_event() either sees the bit cleared or
* waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_instantiate_new);
struct dentry *d_make_root(struct inode *root_inode)
{
struct dentry *res = NULL;
if (root_inode) {
res = d_alloc_anon(root_inode->i_sb);
if (res)
d_instantiate(res, root_inode);
else
iput(root_inode);
}
return res;}
EXPORT_SYMBOL(d_make_root);
static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
{
struct super_block *sb;
struct dentry *new, *res;
if (!inode)
return ERR_PTR(-ESTALE);
if (IS_ERR(inode))
return ERR_CAST(inode);
sb = inode->i_sb;
res = d_find_any_alias(inode); /* existing alias? */
if (res)
goto out;
new = d_alloc_anon(sb);
if (!new) {
res = ERR_PTR(-ENOMEM);
goto out;
}
security_d_instantiate(new, inode);
spin_lock(&inode->i_lock);
res = __d_find_any_alias(inode); /* recheck under lock */
if (likely(!res)) { /* still no alias, attach a disconnected dentry */
unsigned add_flags = d_flags_for_inode(inode);
if (disconnected)
add_flags |= DCACHE_DISCONNECTED;
spin_lock(&new->d_lock);
__d_set_inode_and_type(new, inode, add_flags);
hlist_add_head(&new->d_u.d_alias, &inode->i_dentry);
if (!disconnected) {
hlist_bl_lock(&sb->s_roots);
hlist_bl_add_head(&new->d_hash, &sb->s_roots);
hlist_bl_unlock(&sb->s_roots);
}
spin_unlock(&new->d_lock);
spin_unlock(&inode->i_lock);
inode = NULL; /* consumed by new->d_inode */
res = new;
} else {
spin_unlock(&inode->i_lock);
dput(new);
}
out:
iput(inode);
return res;
}
/**
* d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
* @inode: inode to allocate the dentry for
*
* Obtain a dentry for an inode resulting from NFS filehandle conversion or
* similar open by handle operations. The returned dentry may be anonymous,
* or may have a full name (if the inode was already in the cache).
*
* When called on a directory inode, we must ensure that the inode only ever
* has one dentry. If a dentry is found, that is returned instead of
* allocating a new one.
*
* On successful return, the reference to the inode has been transferred
* to the dentry. In case of an error the reference on the inode is released.
* To make it easier to use in export operations a %NULL or IS_ERR inode may
* be passed in and the error will be propagated to the return value,
* with a %NULL @inode replaced by ERR_PTR(-ESTALE).
*/
struct dentry *d_obtain_alias(struct inode *inode)
{
return __d_obtain_alias(inode, true);
}
EXPORT_SYMBOL(d_obtain_alias);
/**
* d_obtain_root - find or allocate a dentry for a given inode
* @inode: inode to allocate the dentry for
*
* Obtain an IS_ROOT dentry for the root of a filesystem.
*
* We must ensure that directory inodes only ever have one dentry. If a
* dentry is found, that is returned instead of allocating a new one.
*
* On successful return, the reference to the inode has been transferred
* to the dentry. In case of an error the reference on the inode is
* released. A %NULL or IS_ERR inode may be passed in and will be the
* error will be propagate to the return value, with a %NULL @inode
* replaced by ERR_PTR(-ESTALE).
*/
struct dentry *d_obtain_root(struct inode *inode)
{
return __d_obtain_alias(inode, false);
}
EXPORT_SYMBOL(d_obtain_root);
/**
* d_add_ci - lookup or allocate new dentry with case-exact name
* @dentry: the negative dentry that was passed to the parent's lookup func
* @inode: the inode case-insensitive lookup has found
* @name: the case-exact name to be associated with the returned dentry
*
* This is to avoid filling the dcache with case-insensitive names to the
* same inode, only the actual correct case is stored in the dcache for
* case-insensitive filesystems.
*
* For a case-insensitive lookup match and if the case-exact dentry
* already exists in the dcache, use it and return it.
*
* If no entry exists with the exact case name, allocate new dentry with
* the exact case, and return the spliced entry.
*/
struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
struct qstr *name)
{
struct dentry *found, *res;
/*
* First check if a dentry matching the name already exists,
* if not go ahead and create it now.
*/
found = d_hash_and_lookup(dentry->d_parent, name);
if (found) {
iput(inode);
return found;
}
if (d_in_lookup(dentry)) {
found = d_alloc_parallel(dentry->d_parent, name,
dentry->d_wait);
if (IS_ERR(found) || !d_in_lookup(found)) {
iput(inode);
return found;
}
} else {
found = d_alloc(dentry->d_parent, name);
if (!found) {
iput(inode);
return ERR_PTR(-ENOMEM);
}
}
res = d_splice_alias(inode, found);
if (res) {
d_lookup_done(found);
dput(found);
return res;
}
return found;
}
EXPORT_SYMBOL(d_add_ci);
/**
* d_same_name - compare dentry name with case-exact name
* @dentry: the negative dentry that was passed to the parent's lookup func
* @parent: parent dentry
* @name: the case-exact name to be associated with the returned dentry
*
* Return: true if names are same, or false
*/
bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
const struct qstr *name)
{
if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
if (dentry->d_name.len != name->len)
return false;
return dentry_cmp(dentry, name->name, name->len) == 0;
}
return parent->d_op->d_compare(dentry,
dentry->d_name.len, dentry->d_name.name,
name) == 0;
}
EXPORT_SYMBOL_GPL(d_same_name);
/*
* This is __d_lookup_rcu() when the parent dentry has
* DCACHE_OP_COMPARE, which makes things much nastier.
*/
static noinline struct dentry *__d_lookup_rcu_op_compare(
const struct dentry *parent,
const struct qstr *name,
unsigned *seqp)
{
u64 hashlen = name->hash_len;
struct hlist_bl_head *b = d_hash(hashlen);
struct hlist_bl_node *node;
struct dentry *dentry;
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
int tlen;
const char *tname;
unsigned seq;
seqretry:
seq = raw_seqcount_begin(&dentry->d_seq);
if (dentry->d_parent != parent)
continue;
if (d_unhashed(dentry))
continue;
if (dentry->d_name.hash != hashlen_hash(hashlen))
continue;
tlen = dentry->d_name.len;
tname = dentry->d_name.name;
/* we want a consistent (name,len) pair */
if (read_seqcount_retry(&dentry->d_seq, seq)) {
cpu_relax();
goto seqretry;
}
if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
continue;
*seqp = seq;
return dentry;
}
return NULL;
}
/**
* __d_lookup_rcu - search for a dentry (racy, store-free)
* @parent: parent dentry
* @name: qstr of name we wish to find
* @seqp: returns d_seq value at the point where the dentry was found
* Returns: dentry, or NULL
*
* __d_lookup_rcu is the dcache lookup function for rcu-walk name
* resolution (store-free path walking) design described in
* Documentation/filesystems/path-lookup.txt.
*
* This is not to be used outside core vfs.
*
* __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
* held, and rcu_read_lock held. The returned dentry must not be stored into
* without taking d_lock and checking d_seq sequence count against @seq
* returned here.
*
* Alternatively, __d_lookup_rcu may be called again to look up the child of
* the returned dentry, so long as its parent's seqlock is checked after the
* child is looked up. Thus, an interlocking stepping of sequence lock checks
* is formed, giving integrity down the path walk.
*
* NOTE! The caller *has* to check the resulting dentry against the sequence
* number we've returned before using any of the resulting dentry state!
*/
struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name,
unsigned *seqp)
{
u64 hashlen = name->hash_len;
const unsigned char *str = name->name;
struct hlist_bl_head *b = d_hash(hashlen);
struct hlist_bl_node *node;
struct dentry *dentry;
/*
* Note: There is significant duplication with __d_lookup_rcu which is
* required to prevent single threaded performance regressions
* especially on architectures where smp_rmb (in seqcounts) are costly.
* Keep the two functions in sync.
*/
if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
return __d_lookup_rcu_op_compare(parent, name, seqp);
/*
* The hash list is protected using RCU.
*
* Carefully use d_seq when comparing a candidate dentry, to avoid
* races with d_move().
*
* It is possible that concurrent renames can mess up our list
* walk here and result in missing our dentry, resulting in the
* false-negative result. d_lookup() protects against concurrent
* renames using rename_lock seqlock.
*
* See Documentation/filesystems/path-lookup.txt for more details.
*/
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
unsigned seq;
/*
* The dentry sequence count protects us from concurrent
* renames, and thus protects parent and name fields.
*
* The caller must perform a seqcount check in order
* to do anything useful with the returned dentry.
*
* NOTE! We do a "raw" seqcount_begin here. That means that
* we don't wait for the sequence count to stabilize if it
* is in the middle of a sequence change. If we do the slow
* dentry compare, we will do seqretries until it is stable,
* and if we end up with a successful lookup, we actually
* want to exit RCU lookup anyway.
*
* Note that raw_seqcount_begin still *does* smp_rmb(), so
* we are still guaranteed NUL-termination of ->d_name.name.
*/
seq = raw_seqcount_begin(&dentry->d_seq);
if (dentry->d_parent != parent)
continue;
if (d_unhashed(dentry))
continue;
if (dentry->d_name.hash_len != hashlen)
continue;
if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
continue;
*seqp = seq;
return dentry;
}
return NULL;
}
/**
* d_lookup - search for a dentry
* @parent: parent dentry
* @name: qstr of name we wish to find
* Returns: dentry, or NULL
*
* d_lookup searches the children of the parent dentry for the name in
* question. If the dentry is found its reference count is incremented and the
* dentry is returned. The caller must use dput to free the entry when it has
* finished using it. %NULL is returned if the dentry does not exist.
*/
struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
{
struct dentry *dentry;
unsigned seq;
do {
seq = read_seqbegin(&rename_lock);
dentry = __d_lookup(parent, name);
if (dentry)
break;
} while (read_seqretry(&rename_lock, seq));
return dentry;
}
EXPORT_SYMBOL(d_lookup);
/**
* __d_lookup - search for a dentry (racy)
* @parent: parent dentry
* @name: qstr of name we wish to find
* Returns: dentry, or NULL
*
* __d_lookup is like d_lookup, however it may (rarely) return a
* false-negative result due to unrelated rename activity.
*
* __d_lookup is slightly faster by avoiding rename_lock read seqlock,
* however it must be used carefully, eg. with a following d_lookup in
* the case of failure.
*
* __d_lookup callers must be commented.
*/
struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = d_hash(hash);
struct hlist_bl_node *node;
struct dentry *found = NULL;
struct dentry *dentry;
/*
* Note: There is significant duplication with __d_lookup_rcu which is
* required to prevent single threaded performance regressions
* especially on architectures where smp_rmb (in seqcounts) are costly.
* Keep the two functions in sync.
*/
/*
* The hash list is protected using RCU.
*
* Take d_lock when comparing a candidate dentry, to avoid races
* with d_move().
*
* It is possible that concurrent renames can mess up our list
* walk here and result in missing our dentry, resulting in the
* false-negative result. d_lookup() protects against concurrent
* renames using rename_lock seqlock.
*
* See Documentation/filesystems/path-lookup.txt for more details.
*/
rcu_read_lock();
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
if (dentry->d_name.hash != hash)
continue;
spin_lock(&dentry->d_lock);
if (dentry->d_parent != parent)
goto next;
if (d_unhashed(dentry))
goto next;
if (!d_same_name(dentry, parent, name))
goto next;
dentry->d_lockref.count++;
found = dentry;
spin_unlock(&dentry->d_lock);
break;
next:
spin_unlock(&dentry->d_lock);
}
rcu_read_unlock();
return found;
}
/**
* d_hash_and_lookup - hash the qstr then search for a dentry
* @dir: Directory to search in
* @name: qstr of name we wish to find
*
* On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
*/
struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
{
/*
* Check for a fs-specific hash function. Note that we must
* calculate the standard hash first, as the d_op->d_hash()
* routine may choose to leave the hash value unchanged.
*/
name->hash = full_name_hash(dir, name->name, name->len);
if (dir->d_flags & DCACHE_OP_HASH) {
int err = dir->d_op->d_hash(dir, name);
if (unlikely(err < 0))
return ERR_PTR(err);
}
return d_lookup(dir, name);
}
/*
* When a file is deleted, we have two options:
* - turn this dentry into a negative dentry
* - unhash this dentry and free it.
*
* Usually, we want to just turn this into
* a negative dentry, but if anybody else is
* currently using the dentry or the inode
* we can't do that and we fall back on removing
* it from the hash queues and waiting for
* it to be deleted later when it has no users
*/
/**
* d_delete - delete a dentry
* @dentry: The dentry to delete
*
* Turn the dentry into a negative dentry if possible, otherwise
* remove it from the hash queues so it can be deleted later
*/
void d_delete(struct dentry * dentry)
{
struct inode *inode = dentry->d_inode;
spin_lock(&inode->i_lock);
spin_lock(&dentry->d_lock);
/*
* Are we the only user?
*/
if (dentry->d_lockref.count == 1) {
if (dentry_negative_policy)
__d_drop(dentry);
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
dentry_unlink_inode(dentry);
} else {
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
}
}
EXPORT_SYMBOL(d_delete);
static void __d_rehash(struct dentry *entry)
{
struct hlist_bl_head *b = d_hash(entry->d_name.hash);
hlist_bl_lock(b);
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
}
/**
* d_rehash - add an entry back to the hash
* @entry: dentry to add to the hash
*
* Adds a dentry to the hash according to its name.
*/
void d_rehash(struct dentry * entry)
{
spin_lock(&entry->d_lock);
__d_rehash(entry);
spin_unlock(&entry->d_lock);
}
EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
preempt_disable_nested();
for (;;) {
unsigned n = READ_ONCE(dir->i_dir_seq);
if (!(n & 1) && try_cmpxchg(&dir->i_dir_seq, &n, n + 1))
return n;
cpu_relax();
}
}
static inline void end_dir_add(struct inode *dir, unsigned int n,
wait_queue_head_t *d_wait)
{
smp_store_release(&dir->i_dir_seq, n + 2);
preempt_enable_nested();
if (wq_has_sleeper(d_wait))
wake_up_all(d_wait);
}
static void d_wait_lookup(struct dentry *dentry)
{
if (d_in_lookup(dentry)) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(dentry->d_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&dentry->d_lock);
schedule();
spin_lock(&dentry->d_lock);
} while (d_in_lookup(dentry));
}
}
struct dentry *d_alloc_parallel(struct dentry *parent,
const struct qstr *name,
wait_queue_head_t *wq)
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *new = __d_alloc(parent->d_sb, name);
struct dentry *dentry;
unsigned seq, r_seq, d_seq;
if (unlikely(!new))
return ERR_PTR(-ENOMEM);
new->d_flags |= DCACHE_PAR_LOOKUP;
spin_lock(&parent->d_lock);
new->d_parent = dget_dlock(parent);
hlist_add_head(&new->d_sib, &parent->d_children);
if (parent->d_flags & DCACHE_DISCONNECTED)
new->d_flags |= DCACHE_DISCONNECTED;
spin_unlock(&parent->d_lock);
retry:
rcu_read_lock();
seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
r_seq = read_seqbegin(&rename_lock);
dentry = __d_lookup_rcu(parent, name, &d_seq);
if (unlikely(dentry)) {
if (!lockref_get_not_dead(&dentry->d_lockref)) {
rcu_read_unlock();
goto retry;
}
if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
rcu_read_unlock();
dput(dentry);
goto retry;
}
rcu_read_unlock();
dput(new);
return dentry;
}
if (unlikely(read_seqretry(&rename_lock, r_seq))) {
rcu_read_unlock();
goto retry;
}
if (unlikely(seq & 1)) {
rcu_read_unlock();
goto retry;
}
hlist_bl_lock(b);
if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
hlist_bl_unlock(b);
rcu_read_unlock();
goto retry;
}
/*
* No changes for the parent since the beginning of d_lookup().
* Since all removals from the chain happen with hlist_bl_lock(),
* any potential in-lookup matches are going to stay here until
* we unlock the chain. All fields are stable in everything
* we encounter.
*/
hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
if (dentry->d_name.hash != hash)
continue;
if (dentry->d_parent != parent)
continue;
if (!d_same_name(dentry, parent, name))
continue;
hlist_bl_unlock(b);
/* now we can try to grab a reference */
if (!lockref_get_not_dead(&dentry->d_lockref)) {
rcu_read_unlock();
goto retry;
}
rcu_read_unlock();
/*
* somebody is likely to be still doing lookup for it;
* wait for them to finish
*/
spin_lock(&dentry->d_lock);
d_wait_lookup(dentry);
/*
* it's not in-lookup anymore; in principle we should repeat
* everything from dcache lookup, but it's likely to be what
* d_lookup() would've found anyway. If it is, just return it;
* otherwise we really have to repeat the whole thing.
*/
if (unlikely(dentry->d_name.hash != hash))
goto mismatch;
if (unlikely(dentry->d_parent != parent))
goto mismatch;
if (unlikely(d_unhashed(dentry)))
goto mismatch;
if (unlikely(!d_same_name(dentry, parent, name)))
goto mismatch;
/* OK, it *is* a hashed match; return it */
spin_unlock(&dentry->d_lock);
dput(new);
return dentry;
}
rcu_read_unlock();
new->d_wait = wq;
hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
hlist_bl_unlock(b);
return new;
mismatch:
spin_unlock(&dentry->d_lock);
dput(dentry);
goto retry;
}
EXPORT_SYMBOL(d_alloc_parallel);
/*
* - Unhash the dentry
* - Retrieve and clear the waitqueue head in dentry
* - Return the waitqueue head
*/
static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
{
wait_queue_head_t *d_wait;
struct hlist_bl_head *b;
lockdep_assert_held(&dentry->d_lock);
b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
d_wait = dentry->d_wait;
dentry->d_wait = NULL;
hlist_bl_unlock(b);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
INIT_LIST_HEAD(&dentry->d_lru);
return d_wait;
}
void __d_lookup_unhash_wake(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
wake_up_all(__d_lookup_unhash(dentry));
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(__d_lookup_unhash_wake);
/* inode->i_lock held if inode is non-NULL */
static inline void __d_add(struct dentry *dentry, struct inode *inode,
const struct dentry_operations *ops)
{
wait_queue_head_t *d_wait;
struct inode *dir = NULL;
unsigned n;
spin_lock(&dentry->d_lock);
if (unlikely(d_in_lookup(dentry))) {
dir = dentry->d_parent->d_inode;
n = start_dir_add(dir);
d_wait = __d_lookup_unhash(dentry);
}
if (unlikely(ops))
d_set_d_op(dentry, ops);
if (inode) {
unsigned add_flags = d_flags_for_inode(inode);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
fsnotify_update_flags(dentry);
}
__d_rehash(dentry);
if (dir)
end_dir_add(dir, n, d_wait);
spin_unlock(&dentry->d_lock);
if (inode)
spin_unlock(&inode->i_lock);
}
/**
* d_add - add dentry to hash queues
* @entry: dentry to add
* @inode: The inode to attach to this dentry
*
* This adds the entry to the hash queues and initializes @inode.
* The entry was actually filled in earlier during d_alloc().
*/
void d_add(struct dentry *entry, struct inode *inode)
{
if (inode) {
security_d_instantiate(entry, inode);
spin_lock(&inode->i_lock);
}
__d_add(entry, inode, NULL);
}
EXPORT_SYMBOL(d_add);
static void swap_names(struct dentry *dentry, struct dentry *target)
{
if (unlikely(dname_external(target))) {
if (unlikely(dname_external(dentry))) {
/*
* Both external: swap the pointers
*/
swap(target->__d_name.name, dentry->__d_name.name);
} else {
/*
* dentry:internal, target:external. Steal target's
* storage and make target internal.
*/
dentry->__d_name.name = target->__d_name.name;
target->d_shortname = dentry->d_shortname;
target->__d_name.name = target->d_shortname.string;
}
} else {
if (unlikely(dname_external(dentry))) {
/*
* dentry:external, target:internal. Give dentry's
* storage to target and make dentry internal
*/
target->__d_name.name = dentry->__d_name.name;
dentry->d_shortname = target->d_shortname;
dentry->__d_name.name = dentry->d_shortname.string;
} else {
/*
* Both are internal.
*/
for (int i = 0; i < DNAME_INLINE_WORDS; i++)
swap(dentry->d_shortname.words[i],
target->d_shortname.words[i]);
}
}
swap(dentry->__d_name.hash_len, target->__d_name.hash_len);
}
static void copy_name(struct dentry *dentry, struct dentry *target)
{
struct external_name *old_name = NULL;
if (unlikely(dname_external(dentry)))
old_name = external_name(dentry);
if (unlikely(dname_external(target))) {
atomic_inc(&external_name(target)->count);
dentry->__d_name = target->__d_name;
} else {
dentry->d_shortname = target->d_shortname;
dentry->__d_name.name = dentry->d_shortname.string;
dentry->__d_name.hash_len = target->__d_name.hash_len;
}
if (old_name && likely(atomic_dec_and_test(&old_name->count)))
kfree_rcu(old_name, head);
}
/*
* __d_move - move a dentry
* @dentry: entry to move
* @target: new dentry
* @exchange: exchange the two dentries
*
* Update the dcache to reflect the move of a file name. Negative dcache
* entries should not be moved in this way. Caller must hold rename_lock, the
* i_rwsem of the source and target directories (exclusively), and the sb->
* s_vfs_rename_mutex if they differ. See lock_rename().
*/
static void __d_move(struct dentry *dentry, struct dentry *target,
bool exchange)
{
struct dentry *old_parent, *p;
wait_queue_head_t *d_wait;
struct inode *dir = NULL;
unsigned n;
WARN_ON(!dentry->d_inode);
if (WARN_ON(dentry == target))
return;
BUG_ON(d_ancestor(target, dentry));
old_parent = dentry->d_parent;
p = d_ancestor(old_parent, target);
if (IS_ROOT(dentry)) {
BUG_ON(p);
spin_lock(&target->d_parent->d_lock);
} else if (!p) {
/* target is not a descendent of dentry->d_parent */
spin_lock(&target->d_parent->d_lock);
spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
} else {
BUG_ON(p == dentry);
spin_lock(&old_parent->d_lock);
if (p != target)
spin_lock_nested(&target->d_parent->d_lock,
DENTRY_D_LOCK_NESTED);
}
spin_lock_nested(&dentry->d_lock, 2);
spin_lock_nested(&target->d_lock, 3);
if (unlikely(d_in_lookup(target))) {
dir = target->d_parent->d_inode;
n = start_dir_add(dir);
d_wait = __d_lookup_unhash(target);
}
write_seqcount_begin(&dentry->d_seq);
write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
/* unhash both */
if (!d_unhashed(dentry))
___d_drop(dentry);
if (!d_unhashed(target))
___d_drop(target);
/* ... and switch them in the tree */
dentry->d_parent = target->d_parent;
if (!exchange) {
copy_name(dentry, target);
target->d_hash.pprev = NULL;
dentry->d_parent->d_lockref.count++;
if (dentry != old_parent) /* wasn't IS_ROOT */
WARN_ON(!--old_parent->d_lockref.count);
} else {
target->d_parent = old_parent;
swap_names(dentry, target);
if (!hlist_unhashed(&target->d_sib))
__hlist_del(&target->d_sib);
hlist_add_head(&target->d_sib, &target->d_parent->d_children);
__d_rehash(target);
fsnotify_update_flags(target);
}
if (!hlist_unhashed(&dentry->d_sib))
__hlist_del(&dentry->d_sib);
hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
__d_rehash(dentry);
fsnotify_update_flags(dentry);
fscrypt_handle_d_move(dentry);
write_seqcount_end(&target->d_seq);
write_seqcount_end(&dentry->d_seq);
if (dir)
end_dir_add(dir, n, d_wait);
if (dentry->d_parent != old_parent)
spin_unlock(&dentry->d_parent->d_lock);
if (dentry != old_parent)
spin_unlock(&old_parent->d_lock);
spin_unlock(&target->d_lock);
spin_unlock(&dentry->d_lock);
}
/*
* d_move - move a dentry
* @dentry: entry to move
* @target: new dentry
*
* Update the dcache to reflect the move of a file name. Negative
* dcache entries should not be moved in this way. See the locking
* requirements for __d_move.
*/
void d_move(struct dentry *dentry, struct dentry *target)
{
write_seqlock(&rename_lock);
__d_move(dentry, target, false);
write_sequnlock(&rename_lock);
}
EXPORT_SYMBOL(d_move);
/*
* d_exchange - exchange two dentries
* @dentry1: first dentry
* @dentry2: second dentry
*/
void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
{
write_seqlock(&rename_lock);
WARN_ON(!dentry1->d_inode);
WARN_ON(!dentry2->d_inode);
WARN_ON(IS_ROOT(dentry1));
WARN_ON(IS_ROOT(dentry2));
__d_move(dentry1, dentry2, true);
write_sequnlock(&rename_lock);
}
EXPORT_SYMBOL(d_exchange);
/**
* d_ancestor - search for an ancestor
* @p1: ancestor dentry
* @p2: child dentry
*
* Returns the ancestor dentry of p2 which is a child of p1, if p1 is
* an ancestor of p2, else NULL.
*/
struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
{
struct dentry *p;
for (p = p2; !IS_ROOT(p); p = p->d_parent) {
if (p->d_parent == p1)
return p;
}
return NULL;
}
/*
* This helper attempts to cope with remotely renamed directories
*
* It assumes that the caller is already holding
* dentry->d_parent->d_inode->i_rwsem, and rename_lock
*
* Note: If ever the locking in lock_rename() changes, then please
* remember to update this too...
*/
static int __d_unalias(struct dentry *dentry, struct dentry *alias)
{
struct mutex *m1 = NULL;
struct rw_semaphore *m2 = NULL;
int ret = -ESTALE;
/* If alias and dentry share a parent, then no extra locks required */
if (alias->d_parent == dentry->d_parent)
goto out_unalias;
/* See lock_rename() */
if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
goto out_err;
m1 = &dentry->d_sb->s_vfs_rename_mutex;
if (!inode_trylock_shared(alias->d_parent->d_inode))
goto out_err;
m2 = &alias->d_parent->d_inode->i_rwsem;
out_unalias:
if (alias->d_op && alias->d_op->d_unalias_trylock &&
!alias->d_op->d_unalias_trylock(alias))
goto out_err;
__d_move(alias, dentry, false);
if (alias->d_op && alias->d_op->d_unalias_unlock)
alias->d_op->d_unalias_unlock(alias);
ret = 0;
out_err:
if (m2)
up_read(m2);
if (m1)
mutex_unlock(m1);
return ret;
}
struct dentry *d_splice_alias_ops(struct inode *inode, struct dentry *dentry,
const struct dentry_operations *ops)
{
if (IS_ERR(inode))
return ERR_CAST(inode);
BUG_ON(!d_unhashed(dentry));
if (!inode)
goto out;
security_d_instantiate(dentry, inode);
spin_lock(&inode->i_lock);
if (S_ISDIR(inode->i_mode)) {
struct dentry *new = __d_find_any_alias(inode);
if (unlikely(new)) {
/* The reference to new ensures it remains an alias */
spin_unlock(&inode->i_lock);
write_seqlock(&rename_lock);
if (unlikely(d_ancestor(new, dentry))) {
write_sequnlock(&rename_lock);
dput(new);
new = ERR_PTR(-ELOOP);
pr_warn_ratelimited(
"VFS: Lookup of '%s' in %s %s"
" would have caused loop\n",
dentry->d_name.name,
inode->i_sb->s_type->name,
inode->i_sb->s_id);
} else if (!IS_ROOT(new)) {
struct dentry *old_parent = dget(new->d_parent);
int err = __d_unalias(dentry, new);
write_sequnlock(&rename_lock);
if (err) {
dput(new);
new = ERR_PTR(err);
}
dput(old_parent);
} else {
__d_move(new, dentry, false);
write_sequnlock(&rename_lock);
}
iput(inode);
return new;
}
}
out:
__d_add(dentry, inode, ops);
return NULL;
}
/**
* d_splice_alias - splice a disconnected dentry into the tree if one exists
* @inode: the inode which may have a disconnected dentry
* @dentry: a negative dentry which we want to point to the inode.
*
* If inode is a directory and has an IS_ROOT alias, then d_move that in
* place of the given dentry and return it, else simply d_add the inode
* to the dentry and return NULL.
*
* If a non-IS_ROOT directory is found, the filesystem is corrupt, and
* we should error out: directories can't have multiple aliases.
*
* This is needed in the lookup routine of any filesystem that is exportable
* (via knfsd) so that we can build dcache paths to directories effectively.
*
* If a dentry was found and moved, then it is returned. Otherwise NULL
* is returned. This matches the expected return value of ->lookup.
*
* Cluster filesystems may call this function with a negative, hashed dentry.
* In that case, we know that the inode will be a regular file, and also this
* will only occur during atomic_open. So we need to check for the dentry
* being already hashed only in the final case.
*/
struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
{
return d_splice_alias_ops(inode, dentry, NULL);
}
EXPORT_SYMBOL(d_splice_alias);
/*
* Test whether new_dentry is a subdirectory of old_dentry.
*
* Trivially implemented using the dcache structure
*/
/**
* is_subdir - is new dentry a subdirectory of old_dentry
* @new_dentry: new dentry
* @old_dentry: old dentry
*
* Returns true if new_dentry is a subdirectory of the parent (at any depth).
* Returns false otherwise.
* Caller must ensure that "new_dentry" is pinned before calling is_subdir()
*/
bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
{
bool subdir;
unsigned seq;
if (new_dentry == old_dentry) return true;
/* Access d_parent under rcu as d_move() may change it. */
rcu_read_lock(); seq = read_seqbegin(&rename_lock); subdir = d_ancestor(old_dentry, new_dentry);
/* Try lockless once... */
if (read_seqretry(&rename_lock, seq)) {
/* ...else acquire lock for progress even on deep chains. */
read_seqlock_excl(&rename_lock); subdir = d_ancestor(old_dentry, new_dentry); read_sequnlock_excl(&rename_lock);
}
rcu_read_unlock(); return subdir;}
EXPORT_SYMBOL(is_subdir);
static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
{
struct dentry *root = data;
if (dentry != root) {
if (d_unhashed(dentry) || !dentry->d_inode)
return D_WALK_SKIP;
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
dentry->d_lockref.count--;
}
}
return D_WALK_CONTINUE;
}
void d_genocide(struct dentry *parent)
{
d_walk(parent, parent, d_genocide_kill);
}
void d_mark_tmpfile(struct file *file, struct inode *inode)
{
struct dentry *dentry = file->f_path.dentry;
BUG_ON(dname_external(dentry) ||
!hlist_unhashed(&dentry->d_u.d_alias) ||
!d_unlinked(dentry));
spin_lock(&dentry->d_parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
dentry->__d_name.len = sprintf(dentry->d_shortname.string, "#%llu",
(unsigned long long)inode->i_ino);
spin_unlock(&dentry->d_lock);
spin_unlock(&dentry->d_parent->d_lock);
}
EXPORT_SYMBOL(d_mark_tmpfile);
void d_tmpfile(struct file *file, struct inode *inode)
{
struct dentry *dentry = file->f_path.dentry;
inode_dec_link_count(inode);
d_mark_tmpfile(file, inode);
d_instantiate(dentry, inode);
}
EXPORT_SYMBOL(d_tmpfile);
/*
* Obtain inode number of the parent dentry.
*/
ino_t d_parent_ino(struct dentry *dentry)
{
struct dentry *parent;
struct inode *iparent;
unsigned seq;
ino_t ret;
scoped_guard(rcu) {
seq = raw_seqcount_begin(&dentry->d_seq);
parent = READ_ONCE(dentry->d_parent);
iparent = d_inode_rcu(parent);
if (likely(iparent)) {
ret = iparent->i_ino;
if (!read_seqcount_retry(&dentry->d_seq, seq))
return ret;
}
}
spin_lock(&dentry->d_lock);
ret = dentry->d_parent->d_inode->i_ino;
spin_unlock(&dentry->d_lock);
return ret;
}
EXPORT_SYMBOL(d_parent_ino);
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)
{
if (!str)
return 0;
dhash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("dhash_entries=", set_dhash_entries);
static void __init dcache_init_early(void)
{
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
if (hashdist)
return;
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
sizeof(struct hlist_bl_head),
dhash_entries,
13,
HASH_EARLY | HASH_ZERO,
&d_hash_shift,
NULL,
0,
0);
d_hash_shift = 32 - d_hash_shift;
runtime_const_init(shift, d_hash_shift);
runtime_const_init(ptr, dentry_hashtable);
}
static void __init dcache_init(void)
{
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
* of the dcache.
*/
dentry_cache = KMEM_CACHE_USERCOPY(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
d_shortname.string);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
return;
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
sizeof(struct hlist_bl_head),
dhash_entries,
13,
HASH_ZERO,
&d_hash_shift,
NULL,
0,
0);
d_hash_shift = 32 - d_hash_shift;
runtime_const_init(shift, d_hash_shift);
runtime_const_init(ptr, dentry_hashtable);
}
/* SLAB cache for __getname() consumers */
struct kmem_cache *names_cachep __ro_after_init;
EXPORT_SYMBOL(names_cachep);
void __init vfs_caches_init_early(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
dcache_init_early();
inode_init_early();
}
void __init vfs_caches_init(void)
{
names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
dcache_init();
inode_init();
files_init();
files_maxfiles_init();
mnt_init();
bdev_cache_init();
chrdev_init();
}
#ifndef _LINUX_HASH_H
#define _LINUX_HASH_H
/* Fast hashing routine for ints, longs and pointers.
(C) 2002 Nadia Yvette Chambers, IBM */
#include <asm/types.h>
#include <linux/compiler.h>
/*
* The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
* fs/inode.c. It's not actually prime any more (the previous primes
* were actively bad for hashing), but the name remains.
*/
#if BITS_PER_LONG == 32
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
#define hash_long(val, bits) hash_32(val, bits)
#elif BITS_PER_LONG == 64
#define hash_long(val, bits) hash_64(val, bits)
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
#else
#error Wordsize not 32 or 64
#endif
/*
* This hash multiplies the input by a large odd number and takes the
* high bits. Since multiplication propagates changes to the most
* significant end only, it is essential that the high bits of the
* product be used for the hash value.
*
* Chuck Lever verified the effectiveness of this technique:
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
*
* Although a random odd number will do, it turns out that the golden
* ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
* properties. (See Knuth vol 3, section 6.4, exercise 9.)
*
* These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
* which is very slightly easier to multiply by and makes no
* difference to the hash distribution.
*/
#define GOLDEN_RATIO_32 0x61C88647
#define GOLDEN_RATIO_64 0x61C8864680B583EBull
#ifdef CONFIG_HAVE_ARCH_HASH
/* This header may use the GOLDEN_RATIO_xx constants */
#include <asm/hash.h>
#endif
/*
* The _generic versions exist only so lib/test_hash.c can compare
* the arch-optimized versions with the generic.
*
* Note that if you change these, any <asm/hash.h> that aren't updated
* to match need to have their HAVE_ARCH_* define values updated so the
* self-test will not false-positive.
*/
#ifndef HAVE_ARCH__HASH_32
#define __hash_32 __hash_32_generic
#endif
static inline u32 __hash_32_generic(u32 val)
{
return val * GOLDEN_RATIO_32;
}
static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
}
#ifndef HAVE_ARCH_HASH_64
#define hash_64 hash_64_generic
#endif
static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
{
#if BITS_PER_LONG == 64
/* 64x64-bit multiply is efficient on all 64-bit processors */
return val * GOLDEN_RATIO_64 >> (64 - bits);
#else
/* Hash 64 bits using only 32x32-bit multiply. */
return hash_32((u32)val ^ __hash_32(val >> 32), bits);
#endif
}
static inline u32 hash_ptr(const void *ptr, unsigned int bits)
{
return hash_long((unsigned long)ptr, bits);
}
/* This really should be called fold32_ptr; it does no hashing to speak of. */
static inline u32 hash32_ptr(const void *ptr)
{
unsigned long val = (unsigned long)ptr;
#if BITS_PER_LONG == 64
val ^= (val >> 32);
#endif
return (u32)val;
}
#endif /* _LINUX_HASH_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_GETORDER_H
#define __ASM_GENERIC_GETORDER_H
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/log2.h>
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
*
* Determine the allocation order of a particular sized block of memory. This
* is on a logarithmic scale, where:
*
* 0 -> 2^0 * PAGE_SIZE and below
* 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
* 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
* 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
* 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
* ...
*
* The order returned is used to find the smallest allocation granule required
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
*/
static __always_inline __attribute_const__ int get_order(unsigned long size)
{
if (__builtin_constant_p(size)) {
if (!size)
return BITS_PER_LONG - PAGE_SHIFT;
if (size < (1UL << PAGE_SHIFT))
return 0;
return ilog2((size) - 1) - PAGE_SHIFT + 1;
}
size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
return fls(size);
#else
return fls64(size);
#endif
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_GETORDER_H */
// SPDX-License-Identifier: GPL-2.0
/*
* linux/ipc/sem.c
* Copyright (C) 1992 Krishna Balasubramanian
* Copyright (C) 1995 Eric Schenk, Bruno Haible
*
* /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
*
* SMP-threaded, sysctl's added
* (c) 1999 Manfred Spraul <manfred@colorfullife.com>
* Enforced range limit on SEM_UNDO
* (c) 2001 Red Hat Inc
* Lockless wakeup
* (c) 2003 Manfred Spraul <manfred@colorfullife.com>
* (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
* Further wakeup optimizations, documentation
* (c) 2010 Manfred Spraul <manfred@colorfullife.com>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*
* Implementation notes: (May 2010)
* This file implements System V semaphores.
*
* User space visible behavior:
* - FIFO ordering for semop() operations (just FIFO, not starvation
* protection)
* - multiple semaphore operations that alter the same semaphore in
* one semop() are handled.
* - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
* SETALL calls.
* - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
* - undo adjustments at process exit are limited to 0..SEMVMX.
* - namespace are supported.
* - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing
* to /proc/sys/kernel/sem.
* - statistics about the usage are reported in /proc/sysvipc/sem.
*
* Internals:
* - scalability:
* - all global variables are read-mostly.
* - semop() calls and semctl(RMID) are synchronized by RCU.
* - most operations do write operations (actually: spin_lock calls) to
* the per-semaphore array structure.
* Thus: Perfect SMP scaling between independent semaphore arrays.
* If multiple semaphores in one array are used, then cache line
* trashing on the semaphore array spinlock will limit the scaling.
* - semncnt and semzcnt are calculated on demand in count_semcnt()
* - the task that performs a successful semop() scans the list of all
* sleeping tasks and completes any pending operations that can be fulfilled.
* Semaphores are actively given to waiting tasks (necessary for FIFO).
* (see update_queue())
* - To improve the scalability, the actual wake-up calls are performed after
* dropping all locks. (see wake_up_sem_queue_prepare())
* - All work is done by the waker, the woken up task does not have to do
* anything - not even acquiring a lock or dropping a refcount.
* - A woken up task may not even touch the semaphore array anymore, it may
* have been destroyed already by a semctl(RMID).
* - UNDO values are stored in an array (one per process and per
* semaphore array, lazily allocated). For backwards compatibility, multiple
* modes for the UNDO variables are supported (per process, per thread)
* (see copy_semundo, CLONE_SYSVSEM)
* - There are two lists of the pending operations: a per-array list
* and per-semaphore list (stored in the array). This allows to achieve FIFO
* ordering without always scanning all pending operations.
* The worst-case behavior is nevertheless O(N^2) for N wakeups.
*/
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <linux/sched/wake_q.h>
#include <linux/nospec.h>
#include <linux/rhashtable.h>
#include <linux/uaccess.h>
#include "util.h"
/* One semaphore structure for each semaphore in the system. */
struct sem {
int semval; /* current value */
/*
* PID of the process that last modified the semaphore. For
* Linux, specifically these are:
* - semop
* - semctl, via SETVAL and SETALL.
* - at task exit when performing undo adjustments (see exit_sem).
*/
struct pid *sempid;
spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head pending_alter; /* pending single-sop operations */
/* that alter the semaphore */
struct list_head pending_const; /* pending single-sop operations */
/* that do not alter the semaphore*/
time64_t sem_otime; /* candidate for sem_otime */
} ____cacheline_aligned_in_smp;
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
time64_t sem_ctime; /* create/last semctl() time */
struct list_head pending_alter; /* pending operations */
/* that alter the array */
struct list_head pending_const; /* pending complex operations */
/* that do not alter semvals */
struct list_head list_id; /* undo requests on this array */
int sem_nsems; /* no. of semaphores in array */
int complex_count; /* pending complex operations */
unsigned int use_global_lock;/* >0: global lock required */
struct sem sems[];
} __randomize_layout;
/* One queue for each sleeping process in the system. */
struct sem_queue {
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
struct pid *pid; /* process id of requesting process */
int status; /* completion status of operation */
struct sembuf *sops; /* array of pending operations */
struct sembuf *blocking; /* the operation that blocked */
int nsops; /* number of operations */
bool alter; /* does *sops alter the array? */
bool dupsop; /* sops on more than one sem_num */
};
/* Each task has a list of undo requests. They are executed automatically
* when the process exits.
*/
struct sem_undo {
struct list_head list_proc; /* per-process list: *
* all undos from one process
* rcu protected */
struct rcu_head rcu; /* rcu struct for sem_undo */
struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
struct list_head list_id; /* per semaphore array list:
* all undos for one array */
int semid; /* semaphore set identifier */
short semadj[]; /* array of adjustments */
/* one per semaphore */
};
/* sem_undo_list controls shared access to the list of sem_undo structures
* that may be shared among all a CLONE_SYSVSEM task group.
*/
struct sem_undo_list {
refcount_t refcnt;
spinlock_t lock;
struct list_head list_proc;
};
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
static int newary(struct ipc_namespace *, struct ipc_params *);
static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
#ifdef CONFIG_PROC_FS
static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
#endif
#define SEMMSL_FAST 256 /* 512 bytes on stack */
#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
/*
* Switching from the mode suitable for simple ops
* to the mode for complex ops is costly. Therefore:
* use some hysteresis
*/
#define USE_GLOBAL_LOCK_HYSTERESIS 10
/*
* Locking:
* a) global sem_lock() for read/write
* sem_undo.id_next,
* sem_array.complex_count,
* sem_array.pending{_alter,_const},
* sem_array.sem_undo
*
* b) global or semaphore sem_lock() for read/write:
* sem_array.sems[i].pending_{const,alter}:
*
* c) special:
* sem_undo_list.list_proc:
* * undo_list->lock for write
* * rcu for read
* use_global_lock:
* * global sem_lock() for write
* * either local or global sem_lock() for read.
*
* Memory ordering:
* Most ordering is enforced by using spin_lock() and spin_unlock().
*
* Exceptions:
* 1) use_global_lock: (SEM_BARRIER_1)
* Setting it from non-zero to 0 is a RELEASE, this is ensured by
* using smp_store_release(): Immediately after setting it to 0,
* a simple op can start.
* Testing if it is non-zero is an ACQUIRE, this is ensured by using
* smp_load_acquire().
* Setting it from 0 to non-zero must be ordered with regards to
* this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
* is inside a spin_lock() and after a write from 0 to non-zero a
* spin_lock()+spin_unlock() is done.
* To prevent the compiler/cpu temporarily writing 0 to use_global_lock,
* READ_ONCE()/WRITE_ONCE() is used.
*
* 2) queue.status: (SEM_BARRIER_2)
* Initialization is done while holding sem_lock(), so no further barrier is
* required.
* Setting it to a result code is a RELEASE, this is ensured by both a
* smp_store_release() (for case a) and while holding sem_lock()
* (for case b).
* The ACQUIRE when reading the result code without holding sem_lock() is
* achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
* (case a above).
* Reading the result code while holding sem_lock() needs no further barriers,
* the locks inside sem_lock() enforce ordering (case b above)
*
* 3) current->state:
* current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
* The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
* happen immediately after calling wake_q_add. As wake_q_add_safe() is called
* when holding sem_lock(), no further barriers are required.
*
* See also ipc/mqueue.c for more details on the covered races.
*/
#define sc_semmsl sem_ctls[0]
#define sc_semmns sem_ctls[1]
#define sc_semopm sem_ctls[2]
#define sc_semmni sem_ctls[3]
void sem_init_ns(struct ipc_namespace *ns)
{
ns->sc_semmsl = SEMMSL;
ns->sc_semmns = SEMMNS;
ns->sc_semopm = SEMOPM;
ns->sc_semmni = SEMMNI;
ns->used_sems = 0;
ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
}
#ifdef CONFIG_IPC_NS
void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
}
#endif
void __init sem_init(void)
{
sem_init_ns(&init_ipc_ns);
ipc_init_proc_interface("sysvipc/sem",
" key semid perms nsems uid gid cuid cgid otime ctime\n",
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
/**
* unmerge_queues - unmerge queues, if possible.
* @sma: semaphore array
*
* The function unmerges the wait queues if complex_count is 0.
* It must be called prior to dropping the global semaphore array lock.
*/
static void unmerge_queues(struct sem_array *sma)
{
struct sem_queue *q, *tq;
/* complex operations still around? */
if (sma->complex_count)
return;
/*
* We will switch back to simple mode.
* Move all pending operation back into the per-semaphore
* queues.
*/
list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
struct sem *curr;
curr = &sma->sems[q->sops[0].sem_num];
list_add_tail(&q->list, &curr->pending_alter);
}
INIT_LIST_HEAD(&sma->pending_alter);
}
/**
* merge_queues - merge single semop queues into global queue
* @sma: semaphore array
*
* This function merges all per-semaphore queues into the global queue.
* It is necessary to achieve FIFO ordering for the pending single-sop
* operations when a multi-semop operation must sleep.
* Only the alter operations must be moved, the const operations can stay.
*/
static void merge_queues(struct sem_array *sma)
{
int i;
for (i = 0; i < sma->sem_nsems; i++) {
struct sem *sem = &sma->sems[i];
list_splice_init(&sem->pending_alter, &sma->pending_alter);
}
}
static void sem_rcu_free(struct rcu_head *head)
{
struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
security_sem_free(&sma->sem_perm);
kvfree(sma);
}
/*
* Enter the mode suitable for non-simple operations:
* Caller must own sem_perm.lock.
*/
static void complexmode_enter(struct sem_array *sma)
{
int i;
struct sem *sem;
if (sma->use_global_lock > 0) {
/*
* We are already in global lock mode.
* Nothing to do, just reset the
* counter until we return to simple mode.
*/
WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
return;
}
WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
for (i = 0; i < sma->sem_nsems; i++) {
sem = &sma->sems[i];
spin_lock(&sem->lock);
spin_unlock(&sem->lock);
}
}
/*
* Try to leave the mode that disallows simple operations:
* Caller must own sem_perm.lock.
*/
static void complexmode_tryleave(struct sem_array *sma)
{
if (sma->complex_count) {
/* Complex ops are sleeping.
* We must stay in complex mode
*/
return;
}
if (sma->use_global_lock == 1) {
/* See SEM_BARRIER_1 for purpose/pairing */
smp_store_release(&sma->use_global_lock, 0);
} else {
WRITE_ONCE(sma->use_global_lock,
sma->use_global_lock-1);
}
}
#define SEM_GLOBAL_LOCK (-1)
/*
* If the request contains only one semaphore operation, and there are
* no complex transactions pending, lock only the semaphore involved.
* Otherwise, lock the entire semaphore array, since we either have
* multiple semaphores in our own semops, or we need to look at
* semaphores from other pending complex operations.
*/
static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
int nsops)
{
struct sem *sem;
int idx;
if (nsops != 1) {
/* Complex operation - acquire a full lock */
ipc_lock_object(&sma->sem_perm);
/* Prevent parallel simple ops */
complexmode_enter(sma);
return SEM_GLOBAL_LOCK;
}
/*
* Only one semaphore affected - try to optimize locking.
* Optimized locking is possible if no complex operation
* is either enqueued or processed right now.
*
* Both facts are tracked by use_global_mode.
*/
idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
sem = &sma->sems[idx];
/*
* Initial check for use_global_lock. Just an optimization,
* no locking, no memory barrier.
*/
if (!READ_ONCE(sma->use_global_lock)) {
/*
* It appears that no complex operation is around.
* Acquire the per-semaphore lock.
*/
spin_lock(&sem->lock);
/* see SEM_BARRIER_1 for purpose/pairing */
if (!smp_load_acquire(&sma->use_global_lock)) {
/* fast path successful! */
return sops->sem_num;
}
spin_unlock(&sem->lock);
}
/* slow path: acquire the full lock */
ipc_lock_object(&sma->sem_perm);
if (sma->use_global_lock == 0) {
/*
* The use_global_lock mode ended while we waited for
* sma->sem_perm.lock. Thus we must switch to locking
* with sem->lock.
* Unlike in the fast path, there is no need to recheck
* sma->use_global_lock after we have acquired sem->lock:
* We own sma->sem_perm.lock, thus use_global_lock cannot
* change.
*/
spin_lock(&sem->lock);
ipc_unlock_object(&sma->sem_perm);
return sops->sem_num;
} else {
/*
* Not a false alarm, thus continue to use the global lock
* mode. No need for complexmode_enter(), this was done by
* the caller that has set use_global_mode to non-zero.
*/
return SEM_GLOBAL_LOCK;
}
}
static inline void sem_unlock(struct sem_array *sma, int locknum)
{
if (locknum == SEM_GLOBAL_LOCK) {
unmerge_queues(sma); complexmode_tryleave(sma); ipc_unlock_object(&sma->sem_perm);
} else {
struct sem *sem = &sma->sems[locknum];
spin_unlock(&sem->lock);
}
}
/*
* sem_lock_(check_) routines are called in the paths where the rwsem
* is not held.
*
* The caller holds the RCU read lock.
*/
static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
{
ipc_rmid(&sem_ids(ns), &s->sem_perm);
}
static struct sem_array *sem_alloc(size_t nsems)
{
struct sem_array *sma;
if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
return NULL;
sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
if (unlikely(!sma))
return NULL;
return sma;
}
/**
* newary - Create a new semaphore set
* @ns: namespace
* @params: ptr to the structure that contains key, semflg and nsems
*
* Called with sem_ids.rwsem held (as a writer)
*/
static int newary(struct ipc_namespace *ns, struct ipc_params *params)
{
int retval;
struct sem_array *sma;
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
int i;
if (!nsems)
return -EINVAL;
if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC;
sma = sem_alloc(nsems);
if (!sma)
return -ENOMEM;
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
retval = security_sem_alloc(&sma->sem_perm);
if (retval) {
kvfree(sma);
return retval;
}
for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sems[i].pending_alter);
INIT_LIST_HEAD(&sma->sems[i].pending_const);
spin_lock_init(&sma->sems[i].lock);
}
sma->complex_count = 0;
sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
INIT_LIST_HEAD(&sma->pending_alter);
INIT_LIST_HEAD(&sma->pending_const);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = ktime_get_real_seconds();
/* ipc_addid() locks sma upon success. */
retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (retval < 0) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return retval;
}
ns->used_sems += nsems;
sem_unlock(sma, -1);
rcu_read_unlock();
return sma->sem_perm.id;
}
/*
* Called with sem_ids.rwsem and ipcp locked.
*/
static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
{
struct sem_array *sma;
sma = container_of(ipcp, struct sem_array, sem_perm);
if (params->u.nsems > sma->sem_nsems)
return -EINVAL;
return 0;
}
long ksys_semget(key_t key, int nsems, int semflg)
{
struct ipc_namespace *ns;
static const struct ipc_ops sem_ops = {
.getnew = newary,
.associate = security_sem_associate,
.more_checks = sem_more_checks,
};
struct ipc_params sem_params;
ns = current->nsproxy->ipc_ns;
if (nsems < 0 || nsems > ns->sc_semmsl)
return -EINVAL;
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
}
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
{
return ksys_semget(key, nsems, semflg);
}
/**
* perform_atomic_semop[_slow] - Attempt to perform semaphore
* operations on a given array.
* @sma: semaphore array
* @q: struct sem_queue that describes the operation
*
* Caller blocking are as follows, based the value
* indicated by the semaphore operation (sem_op):
*
* (1) >0 never blocks.
* (2) 0 (wait-for-zero operation): semval is non-zero.
* (3) <0 attempting to decrement semval to a value smaller than zero.
*
* Returns 0 if the operation was possible.
* Returns 1 if the operation is impossible, the caller must sleep.
* Returns <0 for error codes.
*/
static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
{
int result, sem_op, nsops;
struct pid *pid;
struct sembuf *sop;
struct sem *curr;
struct sembuf *sops;
struct sem_undo *un;
sops = q->sops;
nsops = q->nsops;
un = q->undo;
for (sop = sops; sop < sops + nsops; sop++) {
int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
curr = &sma->sems[idx];
sem_op = sop->sem_op;
result = curr->semval;
if (!sem_op && result)
goto would_block;
result += sem_op;
if (result < 0)
goto would_block;
if (result > SEMVMX)
goto out_of_range;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
/* Exceeding the undo range is an error. */
if (undo < (-SEMAEM - 1) || undo > SEMAEM)
goto out_of_range;
un->semadj[sop->sem_num] = undo;
}
curr->semval = result;
}
sop--;
pid = q->pid;
while (sop >= sops) {
ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
sop--;
}
return 0;
out_of_range:
result = -ERANGE;
goto undo;
would_block:
q->blocking = sop;
if (sop->sem_flg & IPC_NOWAIT)
result = -EAGAIN;
else
result = 1;
undo:
sop--;
while (sop >= sops) {
sem_op = sop->sem_op;
sma->sems[sop->sem_num].semval -= sem_op;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] += sem_op;
sop--;
}
return result;
}
static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
{
int result, sem_op, nsops;
struct sembuf *sop;
struct sem *curr;
struct sembuf *sops;
struct sem_undo *un;
sops = q->sops;
nsops = q->nsops;
un = q->undo;
if (unlikely(q->dupsop))
return perform_atomic_semop_slow(sma, q);
/*
* We scan the semaphore set twice, first to ensure that the entire
* operation can succeed, therefore avoiding any pointless writes
* to shared memory and having to undo such changes in order to block
* until the operations can go through.
*/
for (sop = sops; sop < sops + nsops; sop++) {
int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
curr = &sma->sems[idx];
sem_op = sop->sem_op;
result = curr->semval;
if (!sem_op && result)
goto would_block; /* wait-for-zero */
result += sem_op;
if (result < 0)
goto would_block;
if (result > SEMVMX)
return -ERANGE;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
/* Exceeding the undo range is an error. */
if (undo < (-SEMAEM - 1) || undo > SEMAEM)
return -ERANGE;
}
}
for (sop = sops; sop < sops + nsops; sop++) {
curr = &sma->sems[sop->sem_num];
sem_op = sop->sem_op;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
un->semadj[sop->sem_num] = undo;
}
curr->semval += sem_op;
ipc_update_pid(&curr->sempid, q->pid);
}
return 0;
would_block:
q->blocking = sop;
return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
}
static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
struct wake_q_head *wake_q)
{
struct task_struct *sleeper;
sleeper = get_task_struct(q->sleeper);
/* see SEM_BARRIER_2 for purpose/pairing */
smp_store_release(&q->status, error);
wake_q_add_safe(wake_q, sleeper);
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
{
list_del(&q->list);
if (q->nsops > 1)
sma->complex_count--;
}
/** check_restart(sma, q)
* @sma: semaphore array
* @q: the operation that just completed
*
* update_queue is O(N^2) when it restarts scanning the whole queue of
* waiting operations. Therefore this function checks if the restart is
* really necessary. It is called after a previously waiting operation
* modified the array.
* Note that wait-for-zero operations are handled without restart.
*/
static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
{
/* pending complex alter operations are too difficult to analyse */
if (!list_empty(&sma->pending_alter))
return 1;
/* we were a sleeping complex operation. Too difficult */
if (q->nsops > 1)
return 1;
/* It is impossible that someone waits for the new value:
* - complex operations always restart.
* - wait-for-zero are handled separately.
* - q is a previously sleeping simple operation that
* altered the array. It must be a decrement, because
* simple increments never sleep.
* - If there are older (higher priority) decrements
* in the queue, then they have observed the original
* semval value and couldn't proceed. The operation
* decremented to value - thus they won't proceed either.
*/
return 0;
}
/**
* wake_const_ops - wake up non-alter tasks
* @sma: semaphore array.
* @semnum: semaphore that was modified.
* @wake_q: lockless wake-queue head.
*
* wake_const_ops must be called after a semaphore in a semaphore array
* was set to 0. If complex const operations are pending, wake_const_ops must
* be called with semnum = -1, as well as with the number of each modified
* semaphore.
* The tasks that must be woken up are added to @wake_q. The return code
* is stored in q->pid.
* The function returns 1 if at least one operation was completed successfully.
*/
static int wake_const_ops(struct sem_array *sma, int semnum,
struct wake_q_head *wake_q)
{
struct sem_queue *q, *tmp;
struct list_head *pending_list;
int semop_completed = 0;
if (semnum == -1)
pending_list = &sma->pending_const;
else
pending_list = &sma->sems[semnum].pending_const;
list_for_each_entry_safe(q, tmp, pending_list, list) {
int error = perform_atomic_semop(sma, q);
if (error > 0)
continue;
/* operation completed, remove from queue & wakeup */
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, error, wake_q);
if (error == 0)
semop_completed = 1;
}
return semop_completed;
}
/**
* do_smart_wakeup_zero - wakeup all wait for zero tasks
* @sma: semaphore array
* @sops: operations that were performed
* @nsops: number of operations
* @wake_q: lockless wake-queue head
*
* Checks all required queue for wait-for-zero operations, based
* on the actual changes that were performed on the semaphore array.
* The function returns 1 if at least one operation was completed successfully.
*/
static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
int nsops, struct wake_q_head *wake_q)
{
int i;
int semop_completed = 0;
int got_zero = 0;
/* first: the per-semaphore queues, if known */
if (sops) {
for (i = 0; i < nsops; i++) {
int num = sops[i].sem_num;
if (sma->sems[num].semval == 0) {
got_zero = 1;
semop_completed |= wake_const_ops(sma, num, wake_q);
}
}
} else {
/*
* No sops means modified semaphores not known.
* Assume all were changed.
*/
for (i = 0; i < sma->sem_nsems; i++) {
if (sma->sems[i].semval == 0) {
got_zero = 1;
semop_completed |= wake_const_ops(sma, i, wake_q);
}
}
}
/*
* If one of the modified semaphores got 0,
* then check the global queue, too.
*/
if (got_zero)
semop_completed |= wake_const_ops(sma, -1, wake_q);
return semop_completed;
}
/**
* update_queue - look for tasks that can be completed.
* @sma: semaphore array.
* @semnum: semaphore that was modified.
* @wake_q: lockless wake-queue head.
*
* update_queue must be called after a semaphore in a semaphore array
* was modified. If multiple semaphores were modified, update_queue must
* be called with semnum = -1, as well as with the number of each modified
* semaphore.
* The tasks that must be woken up are added to @wake_q. The return code
* is stored in q->pid.
* The function internally checks if const operations can now succeed.
*
* The function return 1 if at least one semop was completed successfully.
*/
static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
{
struct sem_queue *q, *tmp;
struct list_head *pending_list;
int semop_completed = 0;
if (semnum == -1)
pending_list = &sma->pending_alter;
else
pending_list = &sma->sems[semnum].pending_alter;
again:
list_for_each_entry_safe(q, tmp, pending_list, list) {
int error, restart;
/* If we are scanning the single sop, per-semaphore list of
* one semaphore and that semaphore is 0, then it is not
* necessary to scan further: simple increments
* that affect only one entry succeed immediately and cannot
* be in the per semaphore pending queue, and decrements
* cannot be successful if the value is already 0.
*/
if (semnum != -1 && sma->sems[semnum].semval == 0)
break;
error = perform_atomic_semop(sma, q);
/* Does q->sleeper still need to sleep? */
if (error > 0)
continue;
unlink_queue(sma, q);
if (error) {
restart = 0;
} else {
semop_completed = 1;
do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
restart = check_restart(sma, q);
}
wake_up_sem_queue_prepare(q, error, wake_q);
if (restart)
goto again;
}
return semop_completed;
}
/**
* set_semotime - set sem_otime
* @sma: semaphore array
* @sops: operations that modified the array, may be NULL
*
* sem_otime is replicated to avoid cache line trashing.
* This function sets one instance to the current time.
*/
static void set_semotime(struct sem_array *sma, struct sembuf *sops)
{
if (sops == NULL) {
sma->sems[0].sem_otime = ktime_get_real_seconds();
} else {
sma->sems[sops[0].sem_num].sem_otime =
ktime_get_real_seconds();
}
}
/**
* do_smart_update - optimized update_queue
* @sma: semaphore array
* @sops: operations that were performed
* @nsops: number of operations
* @otime: force setting otime
* @wake_q: lockless wake-queue head
*
* do_smart_update() does the required calls to update_queue and wakeup_zero,
* based on the actual changes that were performed on the semaphore array.
* Note that the function does not do the actual wake-up: the caller is
* responsible for calling wake_up_q().
* It is safe to perform this call after dropping all locks.
*/
static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
int otime, struct wake_q_head *wake_q)
{
int i;
otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
if (!list_empty(&sma->pending_alter)) {
/* semaphore array uses the global queue - just process it. */
otime |= update_queue(sma, -1, wake_q);
} else {
if (!sops) {
/*
* No sops, thus the modified semaphores are not
* known. Check all.
*/
for (i = 0; i < sma->sem_nsems; i++)
otime |= update_queue(sma, i, wake_q);
} else {
/*
* Check the semaphores that were increased:
* - No complex ops, thus all sleeping ops are
* decrease.
* - if we decreased the value, then any sleeping
* semaphore ops won't be able to run: If the
* previous value was too small, then the new
* value will be too small, too.
*/
for (i = 0; i < nsops; i++) {
if (sops[i].sem_op > 0) {
otime |= update_queue(sma,
sops[i].sem_num, wake_q);
}
}
}
}
if (otime)
set_semotime(sma, sops);
}
/*
* check_qop: Test if a queued operation sleeps on the semaphore semnum
*/
static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
bool count_zero)
{
struct sembuf *sop = q->blocking;
/*
* Linux always (since 0.99.10) reported a task as sleeping on all
* semaphores. This violates SUS, therefore it was changed to the
* standard compliant behavior.
* Give the administrators a chance to notice that an application
* might misbehave because it relies on the Linux behavior.
*/
pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
"The task %s (%d) triggered the difference, watch for misbehavior.\n",
current->comm, task_pid_nr(current));
if (sop->sem_num != semnum)
return 0;
if (count_zero && sop->sem_op == 0)
return 1;
if (!count_zero && sop->sem_op < 0)
return 1;
return 0;
}
/* The following counts are associated to each semaphore:
* semncnt number of tasks waiting on semval being nonzero
* semzcnt number of tasks waiting on semval being zero
*
* Per definition, a task waits only on the semaphore of the first semop
* that cannot proceed, even if additional operation would block, too.
*/
static int count_semcnt(struct sem_array *sma, ushort semnum,
bool count_zero)
{
struct list_head *l;
struct sem_queue *q;
int semcnt;
semcnt = 0;
/* First: check the simple operations. They are easy to evaluate */
if (count_zero)
l = &sma->sems[semnum].pending_const;
else
l = &sma->sems[semnum].pending_alter;
list_for_each_entry(q, l, list) {
/* all task on a per-semaphore list sleep on exactly
* that semaphore
*/
semcnt++;
}
/* Then: check the complex operations. */
list_for_each_entry(q, &sma->pending_alter, list) {
semcnt += check_qop(sma, semnum, q, count_zero);
}
if (count_zero) {
list_for_each_entry(q, &sma->pending_const, list) {
semcnt += check_qop(sma, semnum, q, count_zero);
}
}
return semcnt;
}
/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
* as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
* remains locked on exit.
*/
static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct sem_undo *un, *tu;
struct sem_queue *q, *tq;
struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
int i;
DEFINE_WAKE_Q(wake_q);
/* Free the existing undo structures for this semaphore set. */
ipc_assert_locked_object(&sma->sem_perm);
list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
list_del(&un->list_id);
spin_lock(&un->ulp->lock);
un->semid = -1;
list_del_rcu(&un->list_proc);
spin_unlock(&un->ulp->lock);
kvfree_rcu(un, rcu);
}
/* Wake up all pending processes and let them fail with EIDRM. */
list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
}
list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
}
for (i = 0; i < sma->sem_nsems; i++) {
struct sem *sem = &sma->sems[i];
list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
}
list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
}
ipc_update_pid(&sem->sempid, NULL);
}
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
sem_unlock(sma, -1);
rcu_read_unlock();
wake_up_q(&wake_q);
ns->used_sems -= sma->sem_nsems;
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
}
static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
{
switch (version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct semid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
out.sem_otime = in->sem_otime;
out.sem_ctime = in->sem_ctime;
out.sem_nsems = in->sem_nsems;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
static time64_t get_semotime(struct sem_array *sma)
{
int i;
time64_t res;
res = sma->sems[0].sem_otime;
for (i = 1; i < sma->sem_nsems; i++) {
time64_t to = sma->sems[i].sem_otime;
if (to > res)
res = to;
}
return res;
}
static int semctl_stat(struct ipc_namespace *ns, int semid,
int cmd, struct semid64_ds *semid64)
{
struct sem_array *sma;
time64_t semotime;
int err;
memset(semid64, 0, sizeof(*semid64));
rcu_read_lock();
if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
sma = sem_obtain_object(ns, semid);
if (IS_ERR(sma)) {
err = PTR_ERR(sma);
goto out_unlock;
}
} else { /* IPC_STAT */
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
err = PTR_ERR(sma);
goto out_unlock;
}
}
/* see comment for SHM_STAT_ANY */
if (cmd == SEM_STAT_ANY)
audit_ipc_obj(&sma->sem_perm);
else {
err = -EACCES;
if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
goto out_unlock;
}
err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_unlock;
ipc_lock_object(&sma->sem_perm);
if (!ipc_valid_object(&sma->sem_perm)) {
ipc_unlock_object(&sma->sem_perm);
err = -EIDRM;
goto out_unlock;
}
kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
semotime = get_semotime(sma);
semid64->sem_otime = semotime;
semid64->sem_ctime = sma->sem_ctime;
#ifndef CONFIG_64BIT
semid64->sem_otime_high = semotime >> 32;
semid64->sem_ctime_high = sma->sem_ctime >> 32;
#endif
semid64->sem_nsems = sma->sem_nsems;
if (cmd == IPC_STAT) {
/*
* As defined in SUS:
* Return 0 on success
*/
err = 0;
} else {
/*
* SEM_STAT and SEM_STAT_ANY (both Linux specific)
* Return the full id, including the sequence number
*/
err = sma->sem_perm.id;
}
ipc_unlock_object(&sma->sem_perm);
out_unlock:
rcu_read_unlock();
return err;
}
static int semctl_info(struct ipc_namespace *ns, int semid,
int cmd, void __user *p)
{
struct seminfo seminfo;
int max_idx;
int err;
err = security_sem_semctl(NULL, cmd);
if (err)
return err;
memset(&seminfo, 0, sizeof(seminfo));
seminfo.semmni = ns->sc_semmni;
seminfo.semmns = ns->sc_semmns;
seminfo.semmsl = ns->sc_semmsl;
seminfo.semopm = ns->sc_semopm;
seminfo.semvmx = SEMVMX;
seminfo.semmnu = SEMMNU;
seminfo.semmap = SEMMAP;
seminfo.semume = SEMUME;
down_read(&sem_ids(ns).rwsem);
if (cmd == SEM_INFO) {
seminfo.semusz = sem_ids(ns).in_use;
seminfo.semaem = ns->used_sems;
} else {
seminfo.semusz = SEMUSZ;
seminfo.semaem = SEMAEM;
}
max_idx = ipc_get_maxidx(&sem_ids(ns));
up_read(&sem_ids(ns).rwsem);
if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
return -EFAULT;
return (max_idx < 0) ? 0 : max_idx;
}
static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
int val)
{
struct sem_undo *un;
struct sem_array *sma;
struct sem *curr;
int err;
DEFINE_WAKE_Q(wake_q);
if (val > SEMVMX || val < 0)
return -ERANGE;
rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
rcu_read_unlock();
return PTR_ERR(sma);
}
if (semnum < 0 || semnum >= sma->sem_nsems) {
rcu_read_unlock();
return -EINVAL;
}
if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
rcu_read_unlock();
return -EACCES;
}
err = security_sem_semctl(&sma->sem_perm, SETVAL);
if (err) {
rcu_read_unlock();
return -EACCES;
}
sem_lock(sma, NULL, -1);
if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1);
rcu_read_unlock();
return -EIDRM;
}
semnum = array_index_nospec(semnum, sma->sem_nsems);
curr = &sma->sems[semnum];
ipc_assert_locked_object(&sma->sem_perm);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
curr->semval = val;
ipc_update_pid(&curr->sempid, task_tgid(current));
sma->sem_ctime = ktime_get_real_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &wake_q);
sem_unlock(sma, -1);
rcu_read_unlock();
wake_up_q(&wake_q);
return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int cmd, void __user *p)
{
struct sem_array *sma;
struct sem *curr;
int err, nsems;
ushort fast_sem_io[SEMMSL_FAST];
ushort *sem_io = fast_sem_io;
DEFINE_WAKE_Q(wake_q);
rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
rcu_read_unlock();
return PTR_ERR(sma);
}
nsems = sma->sem_nsems;
err = -EACCES;
if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
goto out_rcu_wakeup;
err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_rcu_wakeup;
switch (cmd) {
case GETALL:
{
ushort __user *array = p;
int i;
sem_lock(sma, NULL, -1);
if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM;
goto out_unlock;
}
if (nsems > SEMMSL_FAST) {
if (!ipc_rcu_getref(&sma->sem_perm)) {
err = -EIDRM;
goto out_unlock;
}
sem_unlock(sma, -1);
rcu_read_unlock();
sem_io = kvmalloc_array(nsems, sizeof(ushort),
GFP_KERNEL);
if (sem_io == NULL) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return -ENOMEM;
}
rcu_read_lock();
sem_lock_and_putref(sma);
if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM;
goto out_unlock;
}
}
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sems[i].semval;
sem_unlock(sma, -1);
rcu_read_unlock();
err = 0;
if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
goto out_free;
}
case SETALL:
{
int i;
struct sem_undo *un;
if (!ipc_rcu_getref(&sma->sem_perm)) {
err = -EIDRM;
goto out_rcu_wakeup;
}
rcu_read_unlock();
if (nsems > SEMMSL_FAST) {
sem_io = kvmalloc_array(nsems, sizeof(ushort),
GFP_KERNEL);
if (sem_io == NULL) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return -ENOMEM;
}
}
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
err = -ERANGE;
goto out_free;
}
}
rcu_read_lock();
sem_lock_and_putref(sma);
if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM;
goto out_unlock;
}
for (i = 0; i < nsems; i++) {
sma->sems[i].semval = sem_io[i];
ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
}
ipc_assert_locked_object(&sma->sem_perm);
list_for_each_entry(un, &sma->list_id, list_id) {
for (i = 0; i < nsems; i++)
un->semadj[i] = 0;
}
sma->sem_ctime = ktime_get_real_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &wake_q);
err = 0;
goto out_unlock;
}
/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
}
err = -EINVAL;
if (semnum < 0 || semnum >= nsems)
goto out_rcu_wakeup;
sem_lock(sma, NULL, -1);
if (!ipc_valid_object(&sma->sem_perm)) {
err = -EIDRM;
goto out_unlock;
}
semnum = array_index_nospec(semnum, nsems);
curr = &sma->sems[semnum];
switch (cmd) {
case GETVAL:
err = curr->semval;
goto out_unlock;
case GETPID:
err = pid_vnr(curr->sempid);
goto out_unlock;
case GETNCNT:
err = count_semcnt(sma, semnum, 0);
goto out_unlock;
case GETZCNT:
err = count_semcnt(sma, semnum, 1);
goto out_unlock;
}
out_unlock:
sem_unlock(sma, -1);
out_rcu_wakeup:
rcu_read_unlock();
wake_up_q(&wake_q);
out_free:
if (sem_io != fast_sem_io)
kvfree(sem_io);
return err;
}
static inline unsigned long
copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
{
switch (version) {
case IPC_64:
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct semid_ds tbuf_old;
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->sem_perm.uid = tbuf_old.sem_perm.uid;
out->sem_perm.gid = tbuf_old.sem_perm.gid;
out->sem_perm.mode = tbuf_old.sem_perm.mode;
return 0;
}
default:
return -EINVAL;
}
}
/*
* This function handles some semctl commands which require the rwsem
* to be held in write mode.
* NOTE: no locks must be held, the rwsem is taken inside this function.
*/
static int semctl_down(struct ipc_namespace *ns, int semid,
int cmd, struct semid64_ds *semid64)
{
struct sem_array *sma;
int err;
struct kern_ipc_perm *ipcp;
down_write(&sem_ids(ns).rwsem);
rcu_read_lock();
ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
&semid64->sem_perm, 0);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto out_unlock1;
}
sma = container_of(ipcp, struct sem_array, sem_perm);
err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_unlock1;
switch (cmd) {
case IPC_RMID:
sem_lock(sma, NULL, -1);
/* freeary unlocks the ipc object and rcu */
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64->sem_perm, ipcp);
if (err)
goto out_unlock0;
sma->sem_ctime = ktime_get_real_seconds();
break;
default:
err = -EINVAL;
goto out_unlock1;
}
out_unlock0:
sem_unlock(sma, -1);
out_unlock1:
rcu_read_unlock();
out_up:
up_write(&sem_ids(ns).rwsem);
return err;
}
static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
{
struct ipc_namespace *ns;
void __user *p = (void __user *)arg;
struct semid64_ds semid64;
int err;
if (semid < 0)
return -EINVAL;
ns = current->nsproxy->ipc_ns;
switch (cmd) {
case IPC_INFO:
case SEM_INFO:
return semctl_info(ns, semid, cmd, p);
case IPC_STAT:
case SEM_STAT:
case SEM_STAT_ANY:
err = semctl_stat(ns, semid, cmd, &semid64);
if (err < 0)
return err;
if (copy_semid_to_user(p, &semid64, version))
err = -EFAULT;
return err;
case GETALL:
case GETVAL:
case GETPID:
case GETNCNT:
case GETZCNT:
case SETALL:
return semctl_main(ns, semid, semnum, cmd, p);
case SETVAL: {
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
/* big-endian 64bit */
val = arg >> 32;
#else
/* 32bit or little-endian 64bit */
val = arg;
#endif
return semctl_setval(ns, semid, semnum, val);
}
case IPC_SET:
if (copy_semid_from_user(&semid64, p, version))
return -EFAULT;
fallthrough;
case IPC_RMID:
return semctl_down(ns, semid, cmd, &semid64);
default:
return -EINVAL;
}
}
SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
{
return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
}
#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
{
int version = ipc_parse_version(&cmd);
return ksys_semctl(semid, semnum, cmd, arg, version);
}
SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
{
return ksys_old_semctl(semid, semnum, cmd, arg);
}
#endif
#ifdef CONFIG_COMPAT
struct compat_semid_ds {
struct compat_ipc_perm sem_perm;
old_time32_t sem_otime;
old_time32_t sem_ctime;
compat_uptr_t sem_base;
compat_uptr_t sem_pending;
compat_uptr_t sem_pending_last;
compat_uptr_t undo;
unsigned short sem_nsems;
};
static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
int version)
{
memset(out, 0, sizeof(*out));
if (version == IPC_64) {
struct compat_semid64_ds __user *p = buf;
return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
} else {
struct compat_semid_ds __user *p = buf;
return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
}
}
static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
int version)
{
if (version == IPC_64) {
struct compat_semid64_ds v;
memset(&v, 0, sizeof(v));
to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
v.sem_otime = lower_32_bits(in->sem_otime);
v.sem_otime_high = upper_32_bits(in->sem_otime);
v.sem_ctime = lower_32_bits(in->sem_ctime);
v.sem_ctime_high = upper_32_bits(in->sem_ctime);
v.sem_nsems = in->sem_nsems;
return copy_to_user(buf, &v, sizeof(v));
} else {
struct compat_semid_ds v;
memset(&v, 0, sizeof(v));
to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
v.sem_otime = in->sem_otime;
v.sem_ctime = in->sem_ctime;
v.sem_nsems = in->sem_nsems;
return copy_to_user(buf, &v, sizeof(v));
}
}
static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
{
void __user *p = compat_ptr(arg);
struct ipc_namespace *ns;
struct semid64_ds semid64;
int err;
ns = current->nsproxy->ipc_ns;
if (semid < 0)
return -EINVAL;
switch (cmd & (~IPC_64)) {
case IPC_INFO:
case SEM_INFO:
return semctl_info(ns, semid, cmd, p);
case IPC_STAT:
case SEM_STAT:
case SEM_STAT_ANY:
err = semctl_stat(ns, semid, cmd, &semid64);
if (err < 0)
return err;
if (copy_compat_semid_to_user(p, &semid64, version))
err = -EFAULT;
return err;
case GETVAL:
case GETPID:
case GETNCNT:
case GETZCNT:
case GETALL:
case SETALL:
return semctl_main(ns, semid, semnum, cmd, p);
case SETVAL:
return semctl_setval(ns, semid, semnum, arg);
case IPC_SET:
if (copy_compat_semid_from_user(&semid64, p, version))
return -EFAULT;
fallthrough;
case IPC_RMID:
return semctl_down(ns, semid, cmd, &semid64);
default:
return -EINVAL;
}
}
COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
{
return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
}
#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
{
int version = compat_ipc_parse_version(&cmd);
return compat_ksys_semctl(semid, semnum, cmd, arg, version);
}
COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
{
return compat_ksys_old_semctl(semid, semnum, cmd, arg);
}
#endif
#endif
/* If the task doesn't already have a undo_list, then allocate one
* here. We guarantee there is only one thread using this undo list,
* and current is THE ONE
*
* If this allocation and assignment succeeds, but later
* portions of this code fail, there is no need to free the sem_undo_list.
* Just let it stay associated with the task, and it'll be freed later
* at exit time.
*
* This can block, so callers must hold no locks.
*/
static inline int get_undo_list(struct sem_undo_list **undo_listp)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
if (undo_list == NULL)
return -ENOMEM;
spin_lock_init(&undo_list->lock);
refcount_set(&undo_list->refcnt, 1);
INIT_LIST_HEAD(&undo_list->list_proc);
current->sysvsem.undo_list = undo_list;
}
*undo_listp = undo_list;
return 0;
}
static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
spin_is_locked(&ulp->lock)) {
if (un->semid == semid)
return un;
}
return NULL;
}
static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
assert_spin_locked(&ulp->lock);
un = __lookup_undo(ulp, semid);
if (un) {
list_del_rcu(&un->list_proc);
list_add_rcu(&un->list_proc, &ulp->list_proc);
}
return un;
}
/**
* find_alloc_undo - lookup (and if not present create) undo array
* @ns: namespace
* @semid: semaphore array id
*
* The function looks up (and if not present creates) the undo structure.
* The size of the undo structure depends on the size of the semaphore
* array, thus the alloc path is not that straightforward.
* Lifetime-rules: sem_undo is rcu-protected, on success, the function
* performs a rcu_read_lock().
*/
static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
{
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
int nsems, error;
error = get_undo_list(&ulp);
if (error)
return ERR_PTR(error);
rcu_read_lock();
spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid);
spin_unlock(&ulp->lock);
if (likely(un != NULL))
goto out;
/* no undo structure around - allocate one. */
/* step 1: figure out the size of the semaphore array */
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
rcu_read_unlock();
return ERR_CAST(sma);
}
nsems = sma->sem_nsems;
if (!ipc_rcu_getref(&sma->sem_perm)) {
rcu_read_unlock();
un = ERR_PTR(-EIDRM);
goto out;
}
rcu_read_unlock();
/* step 2: allocate new undo structure */
new = kvzalloc(struct_size(new, semadj, nsems), GFP_KERNEL_ACCOUNT);
if (!new) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
}
/* step 3: Acquire the lock on semaphore array */
rcu_read_lock();
sem_lock_and_putref(sma);
if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1);
rcu_read_unlock();
kvfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
spin_lock(&ulp->lock);
/*
* step 4: check for races: did someone else allocate the undo struct?
*/
un = lookup_undo(ulp, semid);
if (un) {
spin_unlock(&ulp->lock);
kvfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
new->ulp = ulp;
new->semid = semid;
assert_spin_locked(&ulp->lock);
list_add_rcu(&new->list_proc, &ulp->list_proc);
ipc_assert_locked_object(&sma->sem_perm);
list_add(&new->list_id, &sma->list_id);
un = new;
spin_unlock(&ulp->lock);
success:
sem_unlock(sma, -1);
out:
return un;
}
long __do_semtimedop(int semid, struct sembuf *sops,
unsigned nsops, const struct timespec64 *timeout,
struct ipc_namespace *ns)
{
int error = -EINVAL;
struct sem_array *sma;
struct sembuf *sop;
struct sem_undo *un;
int max, locknum;
bool undos = false, alter = false, dupsop = false;
struct sem_queue queue;
unsigned long dup = 0;
ktime_t expires, *exp = NULL;
bool timed_out = false;
if (nsops < 1 || semid < 0)
return -EINVAL;
if (nsops > ns->sc_semopm)
return -E2BIG;
if (timeout) {
if (!timespec64_valid(timeout))
return -EINVAL;
expires = ktime_add_safe(ktime_get(),
timespec64_to_ktime(*timeout));
exp = &expires;
}
max = 0;
for (sop = sops; sop < sops + nsops; sop++) {
unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
if (sop->sem_num >= max)
max = sop->sem_num;
if (sop->sem_flg & SEM_UNDO)
undos = true;
if (dup & mask) {
/*
* There was a previous alter access that appears
* to have accessed the same semaphore, thus use
* the dupsop logic. "appears", because the detection
* can only check % BITS_PER_LONG.
*/
dupsop = true;
}
if (sop->sem_op != 0) {
alter = true;
dup |= mask;
}
}
if (undos) {
/* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out;
}
} else {
un = NULL;
rcu_read_lock();
}
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
rcu_read_unlock();
error = PTR_ERR(sma);
goto out;
}
error = -EFBIG;
if (max >= sma->sem_nsems) {
rcu_read_unlock();
goto out;
}
error = -EACCES;
if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
rcu_read_unlock();
goto out;
}
error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
if (error) {
rcu_read_unlock();
goto out;
}
error = -EIDRM;
locknum = sem_lock(sma, sops, nsops);
/*
* We eventually might perform the following check in a lockless
* fashion, considering ipc_valid_object() locking constraints.
* If nsops == 1 and there is no contention for sem_perm.lock, then
* only a per-semaphore lock is held and it's OK to proceed with the
* check below. More details on the fine grained locking scheme
* entangled here and why it's RMID race safe on comments at sem_lock()
*/
if (!ipc_valid_object(&sma->sem_perm))
goto out_unlock;
/*
* semid identifiers are not unique - find_alloc_undo may have
* allocated an undo structure, it was invalidated by an RMID
* and now a new array with received the same id. Check and fail.
* This case can be detected checking un->semid. The existence of
* "un" itself is guaranteed by rcu.
*/
if (un && un->semid == -1)
goto out_unlock;
queue.sops = sops;
queue.nsops = nsops;
queue.undo = un;
queue.pid = task_tgid(current);
queue.alter = alter;
queue.dupsop = dupsop;
error = perform_atomic_semop(sma, &queue);
if (error == 0) { /* non-blocking successful path */
DEFINE_WAKE_Q(wake_q);
/*
* If the operation was successful, then do
* the required updates.
*/
if (alter)
do_smart_update(sma, sops, nsops, 1, &wake_q);
else
set_semotime(sma, sops);
sem_unlock(sma, locknum);
rcu_read_unlock();
wake_up_q(&wake_q);
goto out;
}
if (error < 0) /* non-blocking error path */
goto out_unlock;
/*
* We need to sleep on this operation, so we put the current
* task into the pending queue and go to sleep.
*/
if (nsops == 1) {
struct sem *curr;
int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
curr = &sma->sems[idx];
if (alter) {
if (sma->complex_count) {
list_add_tail(&queue.list,
&sma->pending_alter);
} else {
list_add_tail(&queue.list,
&curr->pending_alter);
}
} else {
list_add_tail(&queue.list, &curr->pending_const);
}
} else {
if (!sma->complex_count)
merge_queues(sma);
if (alter)
list_add_tail(&queue.list, &sma->pending_alter);
else
list_add_tail(&queue.list, &sma->pending_const);
sma->complex_count++;
}
do {
/* memory ordering ensured by the lock in sem_lock() */
WRITE_ONCE(queue.status, -EINTR);
queue.sleeper = current;
/* memory ordering is ensured by the lock in sem_lock() */
__set_current_state(TASK_INTERRUPTIBLE);
sem_unlock(sma, locknum);
rcu_read_unlock();
timed_out = !schedule_hrtimeout_range(exp,
current->timer_slack_ns, HRTIMER_MODE_ABS);
/*
* fastpath: the semop has completed, either successfully or
* not, from the syscall pov, is quite irrelevant to us at this
* point; we're done.
*
* We _do_ care, nonetheless, about being awoken by a signal or
* spuriously. The queue.status is checked again in the
* slowpath (aka after taking sem_lock), such that we can detect
* scenarios where we were awakened externally, during the
* window between wake_q_add() and wake_up_q().
*/
rcu_read_lock();
error = READ_ONCE(queue.status);
if (error != -EINTR) {
/* see SEM_BARRIER_2 for purpose/pairing */
smp_acquire__after_ctrl_dep();
rcu_read_unlock();
goto out;
}
locknum = sem_lock(sma, sops, nsops);
if (!ipc_valid_object(&sma->sem_perm))
goto out_unlock;
/*
* No necessity for any barrier: We are protect by sem_lock()
*/
error = READ_ONCE(queue.status);
/*
* If queue.status != -EINTR we are woken up by another process.
* Leave without unlink_queue(), but with sem_unlock().
*/
if (error != -EINTR)
goto out_unlock;
/*
* If an interrupt occurred we have to clean up the queue.
*/
if (timed_out)
error = -EAGAIN;
} while (error == -EINTR && !signal_pending(current)); /* spurious */
unlink_queue(sma, &queue);
out_unlock:
sem_unlock(sma, locknum);
rcu_read_unlock();
out:
return error;
}
static long do_semtimedop(int semid, struct sembuf __user *tsops,
unsigned nsops, const struct timespec64 *timeout)
{
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf *sops = fast_sops;
struct ipc_namespace *ns;
int ret;
ns = current->nsproxy->ipc_ns;
if (nsops > ns->sc_semopm)
return -E2BIG;
if (nsops < 1)
return -EINVAL;
if (nsops > SEMOPM_FAST) {
sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
if (sops == NULL)
return -ENOMEM;
}
if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
ret = -EFAULT;
goto out_free;
}
ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
out_free:
if (sops != fast_sops)
kvfree(sops);
return ret;
}
long ksys_semtimedop(int semid, struct sembuf __user *tsops,
unsigned int nsops, const struct __kernel_timespec __user *timeout)
{
if (timeout) {
struct timespec64 ts;
if (get_timespec64(&ts, timeout))
return -EFAULT;
return do_semtimedop(semid, tsops, nsops, &ts);
}
return do_semtimedop(semid, tsops, nsops, NULL);
}
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
{
return ksys_semtimedop(semid, tsops, nsops, timeout);
}
#ifdef CONFIG_COMPAT_32BIT_TIME
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
const struct old_timespec32 __user *timeout)
{
if (timeout) {
struct timespec64 ts;
if (get_old_timespec32(&ts, timeout))
return -EFAULT;
return do_semtimedop(semid, tsems, nsops, &ts);
}
return do_semtimedop(semid, tsems, nsops, NULL);
}
SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
unsigned int, nsops,
const struct old_timespec32 __user *, timeout)
{
return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
}
#endif
SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
unsigned, nsops)
{
return do_semtimedop(semid, tsops, nsops, NULL);
}
/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
* parent and child tasks.
*/
int copy_semundo(u64 clone_flags, struct task_struct *tsk)
{
struct sem_undo_list *undo_list;
int error;
if (clone_flags & CLONE_SYSVSEM) {
error = get_undo_list(&undo_list);
if (error)
return error;
refcount_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
} else
tsk->sysvsem.undo_list = NULL;
return 0;}
/*
* add semadj values to semaphores, free undo structures.
* undo structures are not freed when semaphore arrays are destroyed
* so some of them may be out of date.
* IMPLEMENTATION NOTE: There is some confusion over whether the
* set of adjustments that needs to be done should be done in an atomic
* manner or not. That is, if we are attempting to decrement the semval
* should we queue up and wait until we can do so legally?
* The original implementation attempted to do this (queue and wait).
* The current implementation does not do so. The POSIX standard
* and SVID should be consulted to determine what behavior is mandated.
*/
void exit_sem(struct task_struct *tsk)
{
struct sem_undo_list *ulp;
ulp = tsk->sysvsem.undo_list; if (!ulp)
return;
tsk->sysvsem.undo_list = NULL;
if (!refcount_dec_and_test(&ulp->refcnt))
return;
for (;;) {
struct sem_array *sma;
struct sem_undo *un;
int semid, i;
DEFINE_WAKE_Q(wake_q);
cond_resched();
rcu_read_lock(); un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
if (&un->list_proc == &ulp->list_proc) {
/*
* We must wait for freeary() before freeing this ulp,
* in case we raced with last sem_undo. There is a small
* possibility where we exit while freeary() didn't
* finish unlocking sem_undo_list.
*/
spin_lock(&ulp->lock);
spin_unlock(&ulp->lock);
rcu_read_unlock();
break;
}
spin_lock(&ulp->lock);
semid = un->semid;
spin_unlock(&ulp->lock);
/* exit_sem raced with IPC_RMID, nothing to do */
if (semid == -1) { rcu_read_unlock(); continue;
}
sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma)) { rcu_read_unlock(); continue;
}
sem_lock(sma, NULL, -1);
/* exit_sem raced with IPC_RMID, nothing to do */
if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1); rcu_read_unlock();
continue;
}
un = __lookup_undo(ulp, semid); if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
sem_unlock(sma, -1); rcu_read_unlock(); continue;
}
/* remove un from the linked lists */
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
spin_lock(&ulp->lock);
list_del_rcu(&un->list_proc);
spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
struct sem *semaphore = &sma->sems[i];
if (un->semadj[i]) {
semaphore->semval += un->semadj[i];
/*
* Range checks of the new semaphore value,
* not defined by sus:
* - Some unices ignore the undo entirely
* (e.g. HP UX 11i 11.22, Tru64 V5.1)
* - some cap the value (e.g. FreeBSD caps
* at 0, but doesn't enforce SEMVMX)
*
* Linux caps the semaphore value, both at 0
* and at SEMVMX.
*
* Manfred <manfred@colorfullife.com>
*/
if (semaphore->semval < 0)
semaphore->semval = 0;
if (semaphore->semval > SEMVMX)
semaphore->semval = SEMVMX;
ipc_update_pid(&semaphore->sempid, task_tgid(current));
}
}
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 1, &wake_q);
sem_unlock(sma, -1); rcu_read_unlock();
wake_up_q(&wake_q);
kvfree_rcu(un, rcu);
}
kfree(ulp);}
#ifdef CONFIG_PROC_FS
static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
{
struct user_namespace *user_ns = seq_user_ns(s);
struct kern_ipc_perm *ipcp = it;
struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
time64_t sem_otime;
/*
* The proc interface isn't aware of sem_lock(), it calls
* ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
* (in sysvipc_find_ipc)
* In order to stay compatible with sem_lock(), we must
* enter / leave complex_mode.
*/
complexmode_enter(sma);
sem_otime = get_semotime(sma);
seq_printf(s,
"%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
sma->sem_perm.key,
sma->sem_perm.id,
sma->sem_perm.mode,
sma->sem_nsems,
from_kuid_munged(user_ns, sma->sem_perm.uid),
from_kgid_munged(user_ns, sma->sem_perm.gid),
from_kuid_munged(user_ns, sma->sem_perm.cuid),
from_kgid_munged(user_ns, sma->sem_perm.cgid),
sem_otime,
sma->sem_ctime);
complexmode_tryleave(sma);
return 0;
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KSM_H
#define __LINUX_KSM_H
/*
* Memory merging support.
*
* This code enables dynamic sharing of identical pages found in different
* memory areas, even if they are not shared by fork().
*/
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/sched.h>
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, vm_flags_t *vm_flags);
vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file,
vm_flags_t vm_flags);
int ksm_enable_merge_any(struct mm_struct *mm);
int ksm_disable_merge_any(struct mm_struct *mm);
int ksm_disable(struct mm_struct *mm);
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
/*
* To identify zeropages that were mapped by KSM, we reuse the dirty bit
* in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
* deduplicating memory.
*/
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
extern atomic_long_t ksm_zero_pages;
static inline void ksm_map_zero_page(struct mm_struct *mm)
{
atomic_long_inc(&ksm_zero_pages);
atomic_long_inc(&mm->ksm_zero_pages);
}
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
if (is_ksm_zero_pte(pte)) {
atomic_long_dec(&ksm_zero_pages);
atomic_long_dec(&mm->ksm_zero_pages);
}
}
static inline long mm_ksm_zero_pages(struct mm_struct *mm)
{
return atomic_long_read(&mm->ksm_zero_pages);
}
static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
/* Adding mm to ksm is best effort on fork. */
if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) {
long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
mm->ksm_merging_pages = 0;
mm->ksm_rmap_items = 0;
atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
__ksm_enter(mm);
}
}
static inline int ksm_execve(struct mm_struct *mm)
{
if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
return __ksm_enter(mm);
return 0;
}
static inline void ksm_exit(struct mm_struct *mm)
{
if (mm_flags_test(MMF_VM_MERGEABLE, mm))
__ksm_exit(mm);
}
/*
* When do_swap_page() first faults in from swap what used to be a KSM page,
* no problem, it will be assigned to this vma's anon_vma; but thereafter,
* it might be faulted into a different anon_vma (or perhaps to a different
* offset in the same anon_vma). do_swap_page() cannot do all the locking
* needed to reconstitute a cross-anon_vma KSM page: for now it has to make
* a copy, and leave remerging the pages to a later pass of ksmd.
*
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
struct folio *ksm_might_need_to_copy(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);
bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
const struct file *file, vm_flags_t vm_flags)
{
return vm_flags;
}
static inline int ksm_disable(struct mm_struct *mm)
{
return 0;
}
static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
}
static inline int ksm_execve(struct mm_struct *mm)
{
return 0;
}
static inline void ksm_exit(struct mm_struct *mm)
{
}
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
}
static inline void collect_procs_ksm(const struct folio *folio,
const struct page *page, struct list_head *to_kill,
int force_early)
{
}
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, vm_flags_t *vm_flags)
{
return 0;
}
static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr)
{
return folio;
}
static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
#endif /* __LINUX_KSM_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PTRACE_H
#define _ASM_X86_PTRACE_H
#include <asm/segment.h>
#include <asm/page_types.h>
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLER__
#ifdef __i386__
struct pt_regs {
/*
* NB: 32-bit x86 CPUs are inconsistent as what happens in the
* following cases (where %seg represents a segment register):
*
* - pushl %seg: some do a 16-bit write and leave the high
* bits alone
* - movl %seg, [mem]: some do a 16-bit write despite the movl
* - IDT entry: some (e.g. 486) will leave the high bits of CS
* and (if applicable) SS undefined.
*
* Fortunately, x86-32 doesn't read the high bits on POP or IRET,
* so we can just treat all of the segment registers as 16-bit
* values.
*/
unsigned long bx;
unsigned long cx;
unsigned long dx;
unsigned long si;
unsigned long di;
unsigned long bp;
unsigned long ax;
unsigned short ds;
unsigned short __dsh;
unsigned short es;
unsigned short __esh;
unsigned short fs;
unsigned short __fsh;
/*
* On interrupt, gs and __gsh store the vector number. They never
* store gs any more.
*/
unsigned short gs;
unsigned short __gsh;
/* On interrupt, this is the error code. */
unsigned long orig_ax;
unsigned long ip;
unsigned short cs;
unsigned short __csh;
unsigned long flags;
unsigned long sp;
unsigned short ss;
unsigned short __ssh;
};
#else /* __i386__ */
struct fred_cs {
/* CS selector */
u64 cs : 16,
/* Stack level at event time */
sl : 2,
/* IBT in WAIT_FOR_ENDBRANCH state */
wfe : 1,
: 45;
};
struct fred_ss {
/* SS selector */
u64 ss : 16,
/* STI state */
sti : 1,
/* Set if syscall, sysenter or INT n */
swevent : 1,
/* Event is NMI type */
nmi : 1,
: 13,
/* Event vector */
vector : 8,
: 8,
/* Event type */
type : 4,
: 4,
/* Event was incident to enclave execution */
enclave : 1,
/* CPU was in long mode */
lm : 1,
/*
* Nested exception during FRED delivery, not set
* for #DF.
*/
nested : 1,
: 1,
/*
* The length of the instruction causing the event.
* Only set for INTO, INT1, INT3, INT n, SYSCALL
* and SYSENTER. 0 otherwise.
*/
insnlen : 4;
};
struct pt_regs {
/*
* C ABI says these regs are callee-preserved. They aren't saved on
* kernel entry unless syscall needs a complete, fully filled
* "struct pt_regs".
*/
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long bp;
unsigned long bx;
/* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long ax;
unsigned long cx;
unsigned long dx;
unsigned long si;
unsigned long di;
/*
* orig_ax is used on entry for:
* - the syscall number (syscall, sysenter, int80)
* - error_code stored by the CPU on traps and exceptions
* - the interrupt number for device interrupts
*
* A FRED stack frame starts here:
* 1) It _always_ includes an error code;
*
* 2) The return frame for ERET[US] starts here, but
* the content of orig_ax is ignored.
*/
unsigned long orig_ax;
/* The IRETQ return frame starts here */
unsigned long ip;
union {
/* CS selector */
u16 cs;
/* The extended 64-bit data slot containing CS */
u64 csx;
/* The FRED CS extension */
struct fred_cs fred_cs;
};
unsigned long flags;
unsigned long sp;
union {
/* SS selector */
u16 ss;
/* The extended 64-bit data slot containing SS */
u64 ssx;
/* The FRED SS extension */
struct fred_ss fred_ss;
};
/*
* Top of stack on IDT systems, while FRED systems have extra fields
* defined above for storing exception related information, e.g. CR2 or
* DR6.
*/
};
#endif /* !__i386__ */
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
#endif
#include <asm/proto.h>
struct cpuinfo_x86;
struct task_struct;
extern unsigned long profile_pc(struct pt_regs *regs);
extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct pt_regs *regs, int error_code, int si_code);
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->ax = rc;
}
/*
* user_mode(regs) determines whether a register set came from user
* mode. On x86_32, this is true if V8086 mode was enabled OR if the
* register set was from protected mode with RPL-3 CS value. This
* tricky test checks that with one comparison.
*
* On x86_64, vm86 mode is mercifully nonexistent, and we don't need
* the extra check.
*/
static __always_inline int user_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
#else
return !!(regs->cs & 3);
#endif
}
static __always_inline int v8086_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (regs->flags & X86_VM_MASK);
#else
return 0; /* No V86 mode support in long mode */
#endif
}
static inline bool user_64bit_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_64
#ifndef CONFIG_PARAVIRT_XXL
/*
* On non-paravirt systems, this is the only long mode CPL 3
* selector. We do not allow long mode selectors in the LDT.
*/
return regs->cs == __USER_CS;
#else
/* Headers are too twisted for this to go in paravirt.h. */
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
#else /* !CONFIG_X86_64 */
return false;
#endif
}
/*
* Determine whether the register set came from any context that is running in
* 64-bit mode.
*/
static inline bool any_64bit_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_64
return !user_mode(regs) || user_64bit_mode(regs);
#else
return false;
#endif
}
#ifdef CONFIG_X86_64
#define current_user_stack_pointer() current_pt_regs()->sp
#define compat_user_stack_pointer() current_pt_regs()->sp
static __always_inline bool ip_within_syscall_gap(struct pt_regs *regs)
{
bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack);
ret = ret || (regs->ip >= (unsigned long)entry_SYSRETQ_unsafe_stack &&
regs->ip < (unsigned long)entry_SYSRETQ_end);
#ifdef CONFIG_IA32_EMULATION
ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack);
ret = ret || (regs->ip >= (unsigned long)entry_SYSRETL_compat_unsafe_stack &&
regs->ip < (unsigned long)entry_SYSRETL_compat_end);
#endif
return ret;
}
#endif
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
return regs->ip;
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->ip = val;
}
static inline unsigned long frame_pointer(struct pt_regs *regs)
{
return regs->bp;
}
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
static inline void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->sp = val;
}
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return !(regs->flags & X86_EFLAGS_IF);
}
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten.
* @offset: offset number of the register.
*
* regs_get_register returns the value of a register. The @offset is the
* offset of the register in struct pt_regs address which specified by @regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
#ifdef CONFIG_X86_32
/* The selector fields are 16-bit. */
if (offset == offsetof(struct pt_regs, cs) ||
offset == offsetof(struct pt_regs, ss) ||
offset == offsetof(struct pt_regs, ds) ||
offset == offsetof(struct pt_regs, es) ||
offset == offsetof(struct pt_regs, fs) ||
offset == offsetof(struct pt_regs, gs)) {
return *(u16 *)((unsigned long)regs + offset);
}
#endif
return *(unsigned long *)((unsigned long)regs + offset);
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline int regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) == (regs->sp & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns the address of the @n th entry of the
* kernel stack which is specified by @regs. If the @n th entry is NOT in
* the kernel stack, this returns NULL.
*/
static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)regs->sp;
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return addr;
else
return NULL;
}
/* To avoid include hell, we can't include uaccess.h */
extern long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr;
unsigned long val;
long ret;
addr = regs_get_kernel_stack_nth_addr(regs, n);
if (addr) {
ret = copy_from_kernel_nofault(&val, addr, sizeof(val));
if (!ret)
return val;
}
return 0;
}
/**
* regs_get_kernel_argument() - get Nth function argument in kernel
* @regs: pt_regs of that context
* @n: function argument number (start from 0)
*
* regs_get_argument() returns @n th argument of the function call.
* Note that this chooses most probably assignment, in some case
* it can be incorrect.
* This is expected to be called from kprobes or ftrace with regs
* where the top of stack is the return address.
*/
static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
unsigned int n)
{
static const unsigned int argument_offs[] = {
#ifdef __i386__
offsetof(struct pt_regs, ax),
offsetof(struct pt_regs, dx),
offsetof(struct pt_regs, cx),
#define NR_REG_ARGUMENTS 3
#else
offsetof(struct pt_regs, di),
offsetof(struct pt_regs, si),
offsetof(struct pt_regs, dx),
offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, r8),
offsetof(struct pt_regs, r9),
#define NR_REG_ARGUMENTS 6
#endif
};
if (n >= NR_REG_ARGUMENTS) {
n -= NR_REG_ARGUMENTS - 1;
return regs_get_kernel_stack_nth(regs, n);
} else
return regs_get_register(regs, argument_offs[n]);
}
#define arch_has_single_step() (1)
#ifdef CONFIG_X86_DEBUGCTLMSR
#define arch_has_block_step() (1)
#else
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
#ifdef CONFIG_X86_64
# define do_set_thread_area_64(p, s, t) do_arch_prctl_64(p, s, t)
#else
# define do_set_thread_area_64(p, s, t) (0)
#endif
#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_PTRACE_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
* Copyright (C) 2006 Nick Piggin
* Copyright (C) 2012 Konstantin Khlebnikov
*/
#ifndef _LINUX_RADIX_TREE_H
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
#include <linux/gfp_types.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/math.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/xarray.h>
#include <linux/local_lock.h>
/* Keep unconverted code working */
#define radix_tree_root xarray
#define radix_tree_node xa_node
struct radix_tree_preload {
local_lock_t lock;
unsigned nr;
/* nodes->parent points to next preallocated node */
struct radix_tree_node *nodes;
};
DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
/*
* The bottom two bits of the slot determine how the remaining bits in the
* slot are interpreted:
*
* 00 - data pointer
* 10 - internal entry
* x1 - value entry
*
* The internal entry may be a pointer to the next level in the tree, a
* sibling entry, or an indicator that the entry in this slot has been moved
* to another location in the tree and the lookup should be restarted. While
* NULL fits the 'data pointer' pattern, it means that there is no entry in
* the tree for this index (no matter what level of the tree it is found at).
* This means that storing a NULL entry in the tree is the same as deleting
* the entry from the tree.
*/
#define RADIX_TREE_ENTRY_MASK 3UL
#define RADIX_TREE_INTERNAL_NODE 2UL
static inline bool radix_tree_is_internal_node(void *ptr)
{
return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
RADIX_TREE_INTERNAL_NODE;
}
/*** radix-tree API starts here ***/
#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
/* The IDR tag is stored in the low bits of xa_flags */
#define ROOT_IS_IDR ((__force gfp_t)4)
/* The top bits of xa_flags are used to store the root tags */
#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
#define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
static inline bool radix_tree_empty(const struct radix_tree_root *root)
{
return root->xa_head == NULL;
}
/**
* struct radix_tree_iter - radix tree iterator state
*
* @index: index of current slot
* @next_index: one beyond the last index for this chunk
* @tags: bit-mask for tag-iterating
* @node: node that contains current slot
*
* This radix tree iterator works in terms of "chunks" of slots. A chunk is a
* subinterval of slots contained within one radix tree leaf node. It is
* described by a pointer to its first slot and a struct radix_tree_iter
* which holds the chunk's position in the tree and its size. For tagged
* iteration radix_tree_iter also holds the slots' bit-mask for one chosen
* radix tree tag.
*/
struct radix_tree_iter {
unsigned long index;
unsigned long next_index;
unsigned long tags;
struct radix_tree_node *node;
};
/**
* Radix-tree synchronization
*
* The radix-tree API requires that users provide all synchronisation (with
* specific exceptions, noted below).
*
* Synchronization of access to the data items being stored in the tree, and
* management of their lifetimes must be completely managed by API users.
*
* For API usage, in general,
* - any function _modifying_ the tree or tags (inserting or deleting
* items, setting or clearing tags) must exclude other modifications, and
* exclude any functions reading the tree.
* - any function _reading_ the tree or tags (looking up items or tags,
* gang lookups) must exclude modifications to the tree, but may occur
* concurrently with other readers.
*
* The notable exceptions to this rule are the following functions:
* __radix_tree_lookup
* radix_tree_lookup
* radix_tree_lookup_slot
* radix_tree_tag_get
* radix_tree_gang_lookup
* radix_tree_gang_lookup_tag
* radix_tree_gang_lookup_tag_slot
* radix_tree_tagged
*
* The first 7 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently.
*
* It is still required that the caller manage the synchronization and lifetimes
* of the items. So if RCU lock-free lookups are used, typically this would mean
* that the items have their own locks, or are amenable to lock-free access; and
* that the items are freed by RCU (or only freed after having been deleted from
* the radix tree *and* a synchronize_rcu() grace period).
*
* (Note, rcu_assign_pointer and rcu_dereference are not needed to control
* access to data items when inserting into or looking up from the radix tree)
*
* Note that the value returned by radix_tree_tag_get() may not be relied upon
* if only the RCU read lock is held. Functions to set/clear tags and to
* delete nodes running concurrently with it may affect its result such that
* two consecutive reads in the same locked section may return different
* values. If reliability is required, modification functions must also be
* excluded from concurrency.
*
* radix_tree_tagged is able to be called without locking or RCU.
*/
/**
* radix_tree_deref_slot - dereference a slot
* @slot: slot pointer, returned by radix_tree_lookup_slot
*
* For use with radix_tree_lookup_slot(). Caller must hold tree at least read
* locked across slot lookup and dereference. Not required if write lock is
* held (ie. items cannot be concurrently inserted).
*
* radix_tree_deref_retry must be used to confirm validity of the pointer if
* only the read lock is held.
*
* Return: entry stored in that slot.
*/
static inline void *radix_tree_deref_slot(void __rcu **slot)
{
return rcu_dereference(*slot);
}
/**
* radix_tree_deref_slot_protected - dereference a slot with tree lock held
* @slot: slot pointer, returned by radix_tree_lookup_slot
*
* Similar to radix_tree_deref_slot. The caller does not hold the RCU read
* lock but it must hold the tree lock to prevent parallel updates.
*
* Return: entry stored in that slot.
*/
static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
spinlock_t *treelock)
{
return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
}
/**
* radix_tree_deref_retry - check radix_tree_deref_slot
* @arg: pointer returned by radix_tree_deref_slot
* Returns: 0 if retry is not required, otherwise retry is required
*
* radix_tree_deref_retry must be used with radix_tree_deref_slot.
*/
static inline int radix_tree_deref_retry(void *arg)
{
return unlikely(radix_tree_is_internal_node(arg));
}
/**
* radix_tree_exception - radix_tree_deref_slot returned either exception?
* @arg: value returned by radix_tree_deref_slot
* Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
*/
static inline int radix_tree_exception(void *arg)
{
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
int radix_tree_insert(struct radix_tree_root *, unsigned long index,
void *);
void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
struct radix_tree_node **nodep, void __rcu ***slotp);
void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
unsigned long index);
void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
void __rcu **slot, void *entry);
void radix_tree_iter_replace(struct radix_tree_root *,
const struct radix_tree_iter *, void __rcu **slot, void *entry);
void radix_tree_replace_slot(struct radix_tree_root *,
void __rcu **slot, void *entry);
void radix_tree_iter_delete(struct radix_tree_root *,
struct radix_tree_iter *iter, void __rcu **slot);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
void **results, unsigned long first_index,
unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
void *radix_tree_tag_clear(struct radix_tree_root *,
unsigned long index, unsigned int tag);
int radix_tree_tag_get(const struct radix_tree_root *,
unsigned long index, unsigned int tag);
void radix_tree_iter_tag_clear(struct radix_tree_root *,
const struct radix_tree_iter *iter, unsigned int tag);
unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
void **results, unsigned long first_index,
unsigned int max_items, unsigned int tag);
unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
void __rcu ***results, unsigned long first_index,
unsigned int max_items, unsigned int tag);
int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
static inline void radix_tree_preload_end(void)
{
local_unlock(&radix_tree_preloads.lock);
}
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max);
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
};
/**
* radix_tree_iter_init - initialize radix tree iterator
*
* @iter: pointer to iterator state
* @start: iteration starting index
* Returns: NULL
*/
static __always_inline void __rcu **
radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
{
/*
* Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
* in the case of a successful tagged chunk lookup. If the lookup was
* unsuccessful or non-tagged then nobody cares about ->tags.
*
* Set index to zero to bypass next_index overflow protection.
* See the comment in radix_tree_next_chunk() for details.
*/
iter->index = 0;
iter->next_index = start;
return NULL;
}
/**
* radix_tree_next_chunk - find next chunk of slots for iteration
*
* @root: radix tree root
* @iter: iterator state
* @flags: RADIX_TREE_ITER_* flags and tag index
* Returns: pointer to chunk first slot, or NULL if there no more left
*
* This function looks up the next chunk in the radix tree starting from
* @iter->next_index. It returns a pointer to the chunk's first slot.
* Also it fills @iter with data about chunk: position in the tree (index),
* its end (next_index), and constructs a bit mask for tagged iterating (tags).
*/
void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
struct radix_tree_iter *iter, unsigned flags);
/**
* radix_tree_iter_lookup - look up an index in the radix tree
* @root: radix tree root
* @iter: iterator state
* @index: key to look up
*
* If @index is present in the radix tree, this function returns the slot
* containing it and updates @iter to describe the entry. If @index is not
* present, it returns NULL.
*/
static inline void __rcu **
radix_tree_iter_lookup(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned long index)
{
radix_tree_iter_init(iter, index);
return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
}
/**
* radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state
*
* If we iterate over a tree protected only by the RCU lock, a race
* against deletion or creation may result in seeing a slot for which
* radix_tree_deref_retry() returns true. If so, call this function
* and continue the iteration.
*/
static inline __must_check
void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
iter->tags = 0;
return NULL;
}
static inline unsigned long
__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
{
return iter->index + slots;
}
/**
* radix_tree_iter_resume - resume iterating when the chunk may be invalid
* @slot: pointer to current slot
* @iter: iterator state
* Returns: New slot pointer
*
* If the iterator needs to release then reacquire a lock, the chunk may
* have been invalidated by an insertion or deletion. Call this function
* before releasing the lock to continue the iteration from the next index.
*/
void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter);
/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
* Returns: current chunk size
*/
static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
}
/**
* radix_tree_next_slot - find next slot in chunk
*
* @slot: pointer to current slot
* @iter: pointer to iterator state
* @flags: RADIX_TREE_ITER_*, should be constant
* Returns: pointer to next slot, or NULL if there no more left
*
* This function updates @iter->index in the case of a successful lookup.
* For tagged lookup it also eats @iter->tags.
*
* There are several cases where 'slot' can be passed in as NULL to this
* function. These cases result from the use of radix_tree_iter_resume() or
* radix_tree_iter_retry(). In these cases we don't end up dereferencing
* 'slot' because either:
* a) we are doing tagged iteration and iter->tags has been set to 0, or
* b) we are doing non-tagged iteration, and iter->index and iter->next_index
* have been set up so that radix_tree_chunk_size() returns 1 or 0.
*/
static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags)
{
if (flags & RADIX_TREE_ITER_TAGGED) {
iter->tags >>= 1;
if (unlikely(!iter->tags))
return NULL;
if (likely(iter->tags & 1ul)) {
iter->index = __radix_tree_iter_add(iter, 1);
slot++;
goto found;
}
if (!(flags & RADIX_TREE_ITER_CONTIG)) {
unsigned offset = __ffs(iter->tags);
iter->tags >>= offset++;
iter->index = __radix_tree_iter_add(iter, offset);
slot += offset;
goto found;
}
} else {
long count = radix_tree_chunk_size(iter); while (--count > 0) { slot++;
iter->index = __radix_tree_iter_add(iter, 1);
if (likely(*slot))
goto found;
if (flags & RADIX_TREE_ITER_CONTIG) {
/* forbid switching to the next chunk */
iter->next_index = 0;
break;
}
}
}
return NULL;
found:
return slot;
}
/**
* radix_tree_for_each_slot - iterate over non-empty slots
*
* @slot: the void** variable for pointer to slot
* @root: the struct radix_tree_root pointer
* @iter: the struct radix_tree_iter pointer
* @start: iteration starting index
*
* @slot points to radix tree slot, @iter->index contains its index.
*/
#define radix_tree_for_each_slot(slot, root, iter, start) \
for (slot = radix_tree_iter_init(iter, start) ; \
slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
slot = radix_tree_next_slot(slot, iter, 0))
/**
* radix_tree_for_each_tagged - iterate over tagged slots
*
* @slot: the void** variable for pointer to slot
* @root: the struct radix_tree_root pointer
* @iter: the struct radix_tree_iter pointer
* @start: iteration starting index
* @tag: tag index
*
* @slot points to radix tree slot, @iter->index contains its index.
*/
#define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
for (slot = radix_tree_iter_init(iter, start) ; \
slot || (slot = radix_tree_next_chunk(root, iter, \
RADIX_TREE_ITER_TAGGED | tag)) ; \
slot = radix_tree_next_slot(slot, iter, \
RADIX_TREE_ITER_TAGGED | tag))
#endif /* _LINUX_RADIX_TREE_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/exit.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched/autogroup.h>
#include <linux/sched/mm.h>
#include <linux/sched/stat.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/cpu.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/binfmts.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/profile.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/cgroup.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
#include <linux/futex.h>
#include <linux/pipe_fs_i.h>
#include <linux/audit.h> /* for audit_free() */
#include <linux/resource.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/blkdev.h>
#include <linux/task_work.h>
#include <linux/fs_struct.h>
#include <linux/init_task.h>
#include <linux/perf_event.h>
#include <trace/events/sched.h>
#include <linux/hw_breakpoint.h>
#include <linux/oom.h>
#include <linux/writeback.h>
#include <linux/shm.h>
#include <linux/kcov.h>
#include <linux/kmsan.h>
#include <linux/random.h>
#include <linux/rcuwait.h>
#include <linux/compat.h>
#include <linux/io_uring.h>
#include <linux/kprobes.h>
#include <linux/rethook.h>
#include <linux/sysfs.h>
#include <linux/user_events.h>
#include <linux/unwind_deferred.h>
#include <linux/uaccess.h>
#include <linux/pidfs.h>
#include <uapi/linux/wait.h>
#include <asm/unistd.h>
#include <asm/mmu_context.h>
#include "exit.h"
/*
* The default value should be high enough to not crash a system that randomly
* crashes its kernel from time to time, but low enough to at least not permit
* overflowing 32-bit refcounts or the ldsem writer count.
*/
static unsigned int oops_limit = 10000;
#ifdef CONFIG_SYSCTL
static const struct ctl_table kern_exit_table[] = {
{
.procname = "oops_limit",
.data = &oops_limit,
.maxlen = sizeof(oops_limit),
.mode = 0644,
.proc_handler = proc_douintvec,
},
};
static __init int kernel_exit_sysctls_init(void)
{
register_sysctl_init("kernel", kern_exit_table);
return 0;
}
late_initcall(kernel_exit_sysctls_init);
#endif
static atomic_t oops_count = ATOMIC_INIT(0);
#ifdef CONFIG_SYSFS
static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
}
static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
static __init int kernel_exit_sysfs_init(void)
{
sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
return 0;
}
late_initcall(kernel_exit_sysfs_init);
#endif
/*
* For things release_task() would like to do *after* tasklist_lock is released.
*/
struct release_task_post {
struct pid *pids[PIDTYPE_MAX];
};
static void __unhash_process(struct release_task_post *post, struct task_struct *p,
bool group_dead)
{
struct pid *pid = task_pid(p);
nr_threads--;
detach_pid(post->pids, p, PIDTYPE_PID);
wake_up_all(&pid->wait_pidfd);
if (group_dead) {
detach_pid(post->pids, p, PIDTYPE_TGID);
detach_pid(post->pids, p, PIDTYPE_PGID);
detach_pid(post->pids, p, PIDTYPE_SID);
list_del_rcu(&p->tasks);
list_del_init(&p->sibling);
__this_cpu_dec(process_counts);
}
list_del_rcu(&p->thread_node);
}
/*
* This function expects the tasklist_lock write-locked.
*/
static void __exit_signal(struct release_task_post *post, struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
bool group_dead = thread_group_leader(tsk);
struct sighand_struct *sighand;
struct tty_struct *tty;
u64 utime, stime;
sighand = rcu_dereference_check(tsk->sighand,
lockdep_tasklist_lock_is_held());
spin_lock(&sighand->siglock);
#ifdef CONFIG_POSIX_TIMERS
posix_cpu_timers_exit(tsk);
if (group_dead)
posix_cpu_timers_exit_group(tsk);
#endif
if (group_dead) {
tty = sig->tty;
sig->tty = NULL;
} else {
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->notify_count > 0 && !--sig->notify_count)
wake_up_process(sig->group_exec_task);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
}
/*
* Accumulate here the counters for all threads as they die. We could
* skip the group leader because it is the last user of signal_struct,
* but we want to avoid the race with thread_group_cputime() which can
* see the empty ->thread_head list.
*/
task_cputime(tsk, &utime, &stime);
write_seqlock(&sig->stats_lock);
sig->utime += utime;
sig->stime += stime;
sig->gtime += task_gtime(tsk);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig->nr_threads--;
__unhash_process(post, tsk, group_dead);
write_sequnlock(&sig->stats_lock);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
__cleanup_sighand(sighand);
if (group_dead)
tty_kref_put(tty);
}
static void delayed_put_task_struct(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
kprobe_flush_task(tsk);
rethook_flush_task(tsk);
perf_event_delayed_put(tsk);
trace_sched_process_free(tsk);
put_task_struct(tsk);
}
void put_task_struct_rcu_user(struct task_struct *task)
{
if (refcount_dec_and_test(&task->rcu_users))
call_rcu(&task->rcu, delayed_put_task_struct);
}
void __weak release_thread(struct task_struct *dead_task)
{
}
void release_task(struct task_struct *p)
{
struct release_task_post post;
struct task_struct *leader;
struct pid *thread_pid;
int zap_leader;
repeat:
memset(&post, 0, sizeof(post));
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
rcu_read_unlock();
pidfs_exit(p);
cgroup_release(p);
/* Retrieve @thread_pid before __unhash_process() may set it to NULL. */
thread_pid = task_pid(p);
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
__exit_signal(&post, p);
/*
* If we are the last non-leader member of the thread
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader)
&& leader->exit_state == EXIT_ZOMBIE) {
/* for pidfs_exit() and do_notify_parent() */
if (leader->signal->flags & SIGNAL_GROUP_EXIT)
leader->exit_code = leader->signal->group_exit_code;
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*/
zap_leader = do_notify_parent(leader, leader->exit_signal);
if (zap_leader)
leader->exit_state = EXIT_DEAD;
}
write_unlock_irq(&tasklist_lock);
/* @thread_pid can't go away until free_pids() below */
proc_flush_pid(thread_pid);
add_device_randomness(&p->se.sum_exec_runtime,
sizeof(p->se.sum_exec_runtime));
free_pids(post.pids);
release_thread(p);
/*
* This task was already removed from the process/thread/pid lists
* and lock_task_sighand(p) can't succeed. Nobody else can touch
* ->pending or, if group dead, signal->shared_pending. We can call
* flush_sigqueue() lockless.
*/
flush_sigqueue(&p->pending);
if (thread_group_leader(p))
flush_sigqueue(&p->signal->shared_pending);
put_task_struct_rcu_user(p);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
int rcuwait_wake_up(struct rcuwait *w)
{
int ret = 0;
struct task_struct *task;
rcu_read_lock();
/*
* Order condition vs @task, such that everything prior to the load
* of @task is visible. This is the condition as to why the user called
* rcuwait_wake() in the first place. Pairs with set_current_state()
* barrier (A) in rcuwait_wait_event().
*
* WAIT WAKE
* [S] tsk = current [S] cond = true
* MB (A) MB (B)
* [L] cond [L] tsk
*/
smp_mb(); /* (B) */
task = rcu_dereference(w->task);
if (task)
ret = wake_up_process(task);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(rcuwait_wake_up);
/*
* Determine if a process group is "orphaned", according to the POSIX
* definition in 2.2.2.52. Orphaned process groups are not to be affected
* by terminal-generated stop signals. Newly orphaned process groups are
* to receive a SIGHUP and a SIGCONT.
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
static int will_become_orphaned_pgrp(struct pid *pgrp,
struct task_struct *ignored_task)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if ((p == ignored_task) ||
(p->exit_state && thread_group_empty(p)) ||
is_global_init(p->real_parent))
continue;
if (task_pgrp(p->real_parent) != pgrp &&
task_session(p->real_parent) == task_session(p))
return 0;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return 1;
}
int is_current_pgrp_orphaned(void)
{
int retval;
read_lock(&tasklist_lock);
retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
read_unlock(&tasklist_lock);
return retval;
}
static bool has_stopped_jobs(struct pid *pgrp)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return true;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return false;
}
/*
* Check to see if any process groups have become orphaned as
* a result of our exiting, and if they have any stopped jobs,
* send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
static void
kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
{
struct pid *pgrp = task_pgrp(tsk);
struct task_struct *ignored_task = tsk;
if (!parent)
/* exit: our father is in a different pgrp than
* we are and we were the only connection outside.
*/
parent = tsk->real_parent;
else
/* reparent: our child is in a different pgrp than
* we are, and it was the only connection outside.
*/
ignored_task = NULL;
if (task_pgrp(parent) != pgrp &&
task_session(parent) == task_session(tsk) &&
will_become_orphaned_pgrp(pgrp, ignored_task) &&
has_stopped_jobs(pgrp)) {
__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
}
static void coredump_task_exit(struct task_struct *tsk,
struct core_state *core_state)
{
struct core_thread self;
self.task = tsk;
if (self.task->flags & PF_SIGNALED) self.next = xchg(&core_state->dumper.next, &self);
else
self.task = NULL;
/*
* Implies mb(), the result of xchg() must be visible
* to core_state->dumper.
*/
if (atomic_dec_and_test(&core_state->nr_threads)) complete(&core_state->startup);
for (;;) {
set_current_state(TASK_IDLE|TASK_FREEZABLE); if (!self.task) /* see coredump_finish() */
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
#ifdef CONFIG_MEMCG
/* drops tasklist_lock if succeeds */
static bool __try_to_set_owner(struct task_struct *tsk, struct mm_struct *mm)
{
bool ret = false;
task_lock(tsk);
if (likely(tsk->mm == mm)) {
/* tsk can't pass exit_mm/exec_mmap and exit */
read_unlock(&tasklist_lock);
WRITE_ONCE(mm->owner, tsk);
lru_gen_migrate_mm(mm);
ret = true;
}
task_unlock(tsk);
return ret;
}
static bool try_to_set_owner(struct task_struct *g, struct mm_struct *mm)
{
struct task_struct *t;
for_each_thread(g, t) {
struct mm_struct *t_mm = READ_ONCE(t->mm);
if (t_mm == mm) {
if (__try_to_set_owner(t, mm))
return true;
} else if (t_mm)
break;
}
return false;
}
/*
* A task is exiting. If it owned this mm, find a new owner for the mm.
*/
void mm_update_next_owner(struct mm_struct *mm)
{
struct task_struct *g, *p = current;
/*
* If the exiting or execing task is not the owner, it's
* someone else's problem.
*/
if (mm->owner != p)
return;
/*
* The current owner is exiting/execing and there are no other
* candidates. Do not leave the mm pointing to a possibly
* freed task structure.
*/
if (atomic_read(&mm->mm_users) <= 1) {
WRITE_ONCE(mm->owner, NULL);
return;
}
read_lock(&tasklist_lock);
/*
* Search in the children
*/
list_for_each_entry(g, &p->children, sibling) {
if (try_to_set_owner(g, mm))
goto ret;
}
/*
* Search in the siblings
*/
list_for_each_entry(g, &p->real_parent->children, sibling) {
if (try_to_set_owner(g, mm))
goto ret;
}
/*
* Search through everything else, we should not get here often.
*/
for_each_process(g) {
if (atomic_read(&mm->mm_users) <= 1)
break;
if (g->flags & PF_KTHREAD)
continue;
if (try_to_set_owner(g, mm))
goto ret;
}
read_unlock(&tasklist_lock);
/*
* We found no owner yet mm_users > 1: this implies that we are
* most likely racing with swapoff (try_to_unuse()) or /proc or
* ptrace or page migration (get_task_mm()). Mark owner as NULL.
*/
WRITE_ONCE(mm->owner, NULL);
ret:
return;
}
#endif /* CONFIG_MEMCG */
/*
* Turn us into a lazy TLB process if we
* aren't already..
*/
static void exit_mm(void)
{
struct mm_struct *mm = current->mm;
exit_mm_release(current, mm);
if (!mm)
return;
mmap_read_lock(mm);
mmgrab_lazy_tlb(mm);
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */
task_lock(current);
/*
* When a thread stops operating on an address space, the loop
* in membarrier_private_expedited() may not observe that
* tsk->mm, and the loop in membarrier_global_expedited() may
* not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
* rq->membarrier_state, so those would not issue an IPI.
* Membarrier requires a memory barrier after accessing
* user-space memory, before clearing tsk->mm or the
* rq->membarrier_state.
*/
smp_mb__after_spinlock();
local_irq_disable();
current->mm = NULL;
membarrier_update_current_mm(NULL);
enter_lazy_tlb(mm, current);
local_irq_enable();
task_unlock(current);
mmap_read_unlock(mm);
mm_update_next_owner(mm);
mmput(mm);
if (test_thread_flag(TIF_MEMDIE)) exit_oom_victim();
}
static struct task_struct *find_alive_thread(struct task_struct *p)
{
struct task_struct *t;
for_each_thread(p, t) {
if (!(t->flags & PF_EXITING))
return t;
}
return NULL;
}
static struct task_struct *find_child_reaper(struct task_struct *father,
struct list_head *dead)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *reaper = pid_ns->child_reaper;
struct task_struct *p, *n;
if (likely(reaper != father))
return reaper;
reaper = find_alive_thread(father);
if (reaper) {
pid_ns->child_reaper = reaper; return reaper;
}
write_unlock_irq(&tasklist_lock);
list_for_each_entry_safe(p, n, dead, ptrace_entry) { list_del_init(&p->ptrace_entry);
release_task(p);
}
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
return father;
}
/*
* When we die, we re-parent all our children, and try to:
* 1. give them to another thread in our thread group, if such a member exists
* 2. give it to the first ancestor process which prctl'd itself as a
* child_subreaper for its children (like a service manager)
* 3. give it to the init process (PID 1) in our pid namespace
*/
static struct task_struct *find_new_reaper(struct task_struct *father,
struct task_struct *child_reaper)
{
struct task_struct *thread, *reaper;
thread = find_alive_thread(father);
if (thread)
return thread;
if (father->signal->has_child_subreaper) {
unsigned int ns_level = task_pid(father)->level;
/*
* Find the first ->is_child_subreaper ancestor in our pid_ns.
* We can't check reaper != child_reaper to ensure we do not
* cross the namespaces, the exiting parent could be injected
* by setns() + fork().
* We check pid->level, this is slightly more efficient than
* task_active_pid_ns(reaper) != task_active_pid_ns(father).
*/
for (reaper = father->real_parent;
task_pid(reaper)->level == ns_level;
reaper = reaper->real_parent) { if (reaper == &init_task)
break;
if (!reaper->signal->is_child_subreaper)
continue;
thread = find_alive_thread(reaper);
if (thread)
return thread;
}
}
return child_reaper;
}
/*
* Any that need to be release_task'd are put on the @dead list.
*/
static void reparent_leader(struct task_struct *father, struct task_struct *p,
struct list_head *dead)
{
if (unlikely(p->exit_state == EXIT_DEAD))
return;
/* We don't want people slaying init. */
p->exit_signal = SIGCHLD;
/* If it has exited notify the new parent about this child's death. */
if (!p->ptrace && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
if (do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_DEAD; list_add(&p->ptrace_entry, dead);
}
}
kill_orphaned_pgrp(p, father);
}
/*
* Make init inherit all the child processes
*/
static void forget_original_parent(struct task_struct *father,
struct list_head *dead)
{
struct task_struct *p, *t, *reaper;
if (unlikely(!list_empty(&father->ptraced)))
exit_ptrace(father, dead);
/* Can drop and reacquire tasklist_lock */
reaper = find_child_reaper(father, dead); if (list_empty(&father->children))
return;
reaper = find_new_reaper(father, reaper); list_for_each_entry(p, &father->children, sibling) { for_each_thread(p, t) { RCU_INIT_POINTER(t->real_parent, reaper); BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); if (likely(!t->ptrace))
t->parent = t->real_parent;
if (t->pdeath_signal) group_send_sig_info(t->pdeath_signal,
SEND_SIG_NOINFO, t,
PIDTYPE_TGID);
}
/*
* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
*/
if (!same_thread_group(reaper, father)) reparent_leader(father, p, dead);
}
list_splice_tail_init(&father->children, &reaper->children);
}
/*
* Send signals to all our closest relatives so that they know
* to properly mourn us..
*/
static void exit_notify(struct task_struct *tsk, int group_dead)
{
bool autoreap;
struct task_struct *p, *n;
LIST_HEAD(dead);
write_lock_irq(&tasklist_lock);
forget_original_parent(tsk, &dead); if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); tsk->exit_state = EXIT_ZOMBIE;
if (unlikely(tsk->ptrace)) {
int sig = thread_group_leader(tsk) &&
thread_group_empty(tsk) &&
!ptrace_reparented(tsk) ?
tsk->exit_signal : SIGCHLD; autoreap = do_notify_parent(tsk, sig); } else if (thread_group_leader(tsk)) { autoreap = thread_group_empty(tsk) &&
do_notify_parent(tsk, tsk->exit_signal);
} else {
autoreap = true;
/* untraced sub-thread */
do_notify_pidfd(tsk);
}
if (autoreap) { tsk->exit_state = EXIT_DEAD; list_add(&tsk->ptrace_entry, &dead);
}
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
wake_up_process(tsk->signal->group_exec_task);
write_unlock_irq(&tasklist_lock); list_for_each_entry_safe(p, n, &dead, ptrace_entry) { list_del_init(&p->ptrace_entry);
release_task(p);
}
}
#ifdef CONFIG_DEBUG_STACK_USAGE
#ifdef CONFIG_STACK_GROWSUP
unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);
do { /* Skip over canary */
n--;
} while (!*n);
return (unsigned long)end_of_stack(p) - (unsigned long)n;
}
#else /* !CONFIG_STACK_GROWSUP */
unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);
do { /* Skip over canary */
n++;
} while (!*n); return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif /* CONFIG_STACK_GROWSUP */
/* Count the maximum pages reached in kernel stacks */
static inline void kstack_histogram(unsigned long used_stack)
{
#ifdef CONFIG_VM_EVENT_COUNTERS
if (used_stack <= 1024) count_vm_event(KSTACK_1K);
#if THREAD_SIZE > 1024
else if (used_stack <= 2048) count_vm_event(KSTACK_2K);
#endif
#if THREAD_SIZE > 2048
else if (used_stack <= 4096) count_vm_event(KSTACK_4K);
#endif
#if THREAD_SIZE > 4096
else if (used_stack <= 8192) count_vm_event(KSTACK_8K);
#endif
#if THREAD_SIZE > 8192
else if (used_stack <= 16384) count_vm_event(KSTACK_16K);
#endif
#if THREAD_SIZE > 16384
else if (used_stack <= 32768) count_vm_event(KSTACK_32K);
#endif
#if THREAD_SIZE > 32768
else if (used_stack <= 65536)
count_vm_event(KSTACK_64K);
#endif
#if THREAD_SIZE > 65536
else
count_vm_event(KSTACK_REST);
#endif
#endif /* CONFIG_VM_EVENT_COUNTERS */
}
static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
unsigned long free;
free = stack_not_used(current); kstack_histogram(THREAD_SIZE - free); if (free >= lowest_to_date)
return;
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
current->comm, task_pid_nr(current), free);
lowest_to_date = free;
}
spin_unlock(&low_water_lock);
}
#else /* !CONFIG_DEBUG_STACK_USAGE */
static inline void check_stack_usage(void) {}
#endif /* CONFIG_DEBUG_STACK_USAGE */
static void synchronize_group_exit(struct task_struct *tsk, long code)
{
struct sighand_struct *sighand = tsk->sighand;
struct signal_struct *signal = tsk->signal;
struct core_state *core_state;
spin_lock_irq(&sighand->siglock);
signal->quick_threads--;
if ((signal->quick_threads == 0) &&
!(signal->flags & SIGNAL_GROUP_EXIT)) {
signal->flags = SIGNAL_GROUP_EXIT;
signal->group_exit_code = code;
signal->group_stop_count = 0;
}
/*
* Serialize with any possible pending coredump.
* We must hold siglock around checking core_state
* and setting PF_POSTCOREDUMP. The core-inducing thread
* will increment ->nr_threads for each thread in the
* group without PF_POSTCOREDUMP set.
*/
tsk->flags |= PF_POSTCOREDUMP;
core_state = signal->core_state;
spin_unlock_irq(&sighand->siglock);
if (unlikely(core_state)) coredump_task_exit(tsk, core_state);
}
void __noreturn do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
WARN_ON(irqs_disabled()); WARN_ON(tsk->plug); kcov_task_exit(tsk);
kmsan_task_exit(tsk);
synchronize_group_exit(tsk, code); ptrace_event(PTRACE_EVENT_EXIT, code);
user_events_exit(tsk);
io_uring_files_cancel();
exit_signals(tsk); /* sets PF_EXITING */
seccomp_filter_release(tsk);
acct_update_integrals(tsk);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
/*
* If the last thread of global init has exited, panic
* immediately to get a useable coredump.
*/
if (unlikely(is_global_init(tsk))) panic("Attempted to kill init! exitcode=0x%08x\n",
tsk->signal->group_exit_code ?: (int)code);
#ifdef CONFIG_POSIX_TIMERS
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk);
#endif
if (tsk->mm) setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
}
acct_collect(code, group_dead);
if (group_dead)
tty_audit_exit();
audit_free(tsk); tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
unwind_deferred_task_exit(tsk);
trace_sched_process_exit(tsk, group_dead);
/*
* Since sampling can touch ->mm, make sure to stop everything before we
* tear it down.
*
* Also flushes inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*/
perf_event_exit_task(tsk);
exit_mm();
if (group_dead)
acct_process();
exit_sem(tsk);
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
if (group_dead)
disassociate_ctty(1); exit_task_namespaces(tsk);
exit_task_work(tsk);
exit_thread(tsk);
sched_autogroup_exit_task(tsk);
cgroup_exit(tsk);
/*
* FIXME: do that only when needed, using sched_exit tracepoint
*/
flush_ptrace_hw_breakpoint(tsk);
exit_tasks_rcu_start();
exit_notify(tsk, group_dead);
proc_exit_connector(tsk);
mpol_put_task_policy(tsk);
#ifdef CONFIG_FUTEX
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
#endif
/*
* Make sure we are holding no locks:
*/
debug_check_no_locks_held(); if (tsk->io_context)
exit_io_context(tsk);
if (tsk->splice_pipe)
free_pipe_info(tsk->splice_pipe);
if (tsk->task_frag.page) put_page(tsk->task_frag.page); exit_task_stack_account(tsk); check_stack_usage();
preempt_disable();
if (tsk->nr_dirtied)
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); exit_rcu();
exit_tasks_rcu_finish();
lockdep_free_task(tsk);
do_task_dead();
}
void __noreturn make_task_dead(int signr)
{
/*
* Take the task off the cpu after something catastrophic has
* happened.
*
* We can get here from a kernel oops, sometimes with preemption off.
* Start by checking for critical errors.
* Then fix up important state like USER_DS and preemption.
* Then do everything else.
*/
struct task_struct *tsk = current;
unsigned int limit;
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
if (unlikely(irqs_disabled())) {
pr_info("note: %s[%d] exited with irqs disabled\n",
current->comm, task_pid_nr(current));
local_irq_enable();
}
if (unlikely(in_atomic())) {
pr_info("note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
preempt_count());
preempt_count_set(PREEMPT_ENABLED);
}
/*
* Every time the system oopses, if the oops happens while a reference
* to an object was held, the reference leaks.
* If the oops doesn't also leak memory, repeated oopsing can cause
* reference counters to wrap around (if they're not using refcount_t).
* This means that repeated oopsing can make unexploitable-looking bugs
* exploitable through repeated oopsing.
* To make sure this can't happen, place an upper bound on how often the
* kernel may oops without panic().
*/
limit = READ_ONCE(oops_limit);
if (atomic_inc_return(&oops_count) >= limit && limit)
panic("Oopsed too often (kernel.oops_limit is %d)", limit);
/*
* We're taking recursive faults here in make_task_dead. Safest is to just
* leave this task alone and wait for reboot.
*/
if (unlikely(tsk->flags & PF_EXITING)) {
pr_alert("Fixing recursive fault but reboot is needed!\n");
futex_exit_recursive(tsk);
tsk->exit_state = EXIT_DEAD;
refcount_inc(&tsk->rcu_users);
do_task_dead();
}
do_exit(signr);
}
SYSCALL_DEFINE1(exit, int, error_code)
{
do_exit((error_code&0xff)<<8);
}
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
*/
void __noreturn
do_group_exit(int exit_code)
{
struct signal_struct *sig = current->signal;
if (sig->flags & SIGNAL_GROUP_EXIT)
exit_code = sig->group_exit_code;
else if (sig->group_exec_task)
exit_code = 0;
else {
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else if (sig->group_exec_task)
exit_code = 0;
else {
sig->group_exit_code = exit_code;
sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
}
do_exit(exit_code);
/* NOTREACHED */
}
/*
* this kills every thread in the thread group. Note that any externally
* wait4()-ing process will get the correct exit code - even if this
* thread is not the thread group leader.
*/
SYSCALL_DEFINE1(exit_group, int, error_code)
{
do_group_exit((error_code & 0xff) << 8);
/* NOTREACHED */
return 0;
}
static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
{
return wo->wo_type == PIDTYPE_MAX ||
task_pid_type(p, wo->wo_type) == wo->wo_pid;
}
static int
eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
{
if (!eligible_pid(wo, p))
return 0;
/*
* Wait for all children (clone and not) if __WALL is set or
* if it is traced by us.
*/
if (ptrace || (wo->wo_flags & __WALL))
return 1;
/*
* Otherwise, wait for clone children *only* if __WCLONE is set;
* otherwise, wait for non-clone children *only*.
*
* Note: a "clone" child here is one that reports to its parent
* using a signal other than SIGCHLD, or a non-leader thread which
* we can only see if it is traced by us.
*/
if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
return 0;
return 1;
}
/*
* Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
{
int state, status;
pid_t pid = task_pid_vnr(p);
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
struct waitid_info *infop;
if (!likely(wo->wo_flags & WEXITED))
return 0;
if (unlikely(wo->wo_flags & WNOWAIT)) {
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
? p->signal->group_exit_code : p->exit_code;
get_task_struct(p);
read_unlock(&tasklist_lock);
sched_annotate_sleep();
if (wo->wo_rusage)
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
put_task_struct(p);
goto out_info;
}
/*
* Move the task's state to DEAD/TRACE, only one thread can do this.
*/
state = (ptrace_reparented(p) && thread_group_leader(p)) ?
EXIT_TRACE : EXIT_DEAD;
if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
return 0;
/*
* We own this thread, nobody else can reap it.
*/
read_unlock(&tasklist_lock);
sched_annotate_sleep();
/*
* Check thread_group_leader() to exclude the traced sub-threads.
*/
if (state == EXIT_DEAD && thread_group_leader(p)) {
struct signal_struct *sig = p->signal;
struct signal_struct *psig = current->signal;
unsigned long maxrss;
u64 tgutime, tgstime;
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
* are in its signal_struct, as are those for the child
* processes it has previously reaped. All these
* accumulate in the parent's signal_struct c* fields.
*
* We don't bother to take a lock here to protect these
* p->signal fields because the whole thread group is dead
* and nobody can change them.
*
* psig->stats_lock also protects us from our sub-threads
* which can reap other children at the same time.
*
* We use thread_group_cputime_adjusted() to get times for
* the thread group, which consolidates times for all threads
* in the group including the group leader.
*/
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
write_seqlock_irq(&psig->stats_lock);
psig->cutime += tgutime + sig->cutime;
psig->cstime += tgstime + sig->cstime;
psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
p->maj_flt + sig->maj_flt + sig->cmaj_flt;
psig->cnvcsw +=
p->nvcsw + sig->nvcsw + sig->cnvcsw;
psig->cnivcsw +=
p->nivcsw + sig->nivcsw + sig->cnivcsw;
psig->cinblock +=
task_io_get_inblock(p) +
sig->inblock + sig->cinblock;
psig->coublock +=
task_io_get_oublock(p) +
sig->oublock + sig->coublock;
maxrss = max(sig->maxrss, sig->cmaxrss);
if (psig->cmaxrss < maxrss)
psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
write_sequnlock_irq(&psig->stats_lock);
}
if (wo->wo_rusage)
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
? p->signal->group_exit_code : p->exit_code;
wo->wo_stat = status;
if (state == EXIT_TRACE) {
write_lock_irq(&tasklist_lock);
/* We dropped tasklist, ptracer could die and untrace */
ptrace_unlink(p);
/* If parent wants a zombie, don't release it now */
state = EXIT_ZOMBIE;
if (do_notify_parent(p, p->exit_signal))
state = EXIT_DEAD;
p->exit_state = state;
write_unlock_irq(&tasklist_lock);
}
if (state == EXIT_DEAD)
release_task(p);
out_info:
infop = wo->wo_info;
if (infop) {
if ((status & 0x7f) == 0) {
infop->cause = CLD_EXITED;
infop->status = status >> 8;
} else {
infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
infop->status = status & 0x7f;
}
infop->pid = pid;
infop->uid = uid;
}
return pid;
}
static int *task_stopped_code(struct task_struct *p, bool ptrace)
{
if (ptrace) {
if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
return &p->exit_code;
} else {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return &p->signal->group_exit_code;
}
return NULL;
}
/**
* wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
* @wo: wait options
* @ptrace: is the wait for ptrace
* @p: task to wait for
*
* Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
*
* CONTEXT:
* read_lock(&tasklist_lock), which is released if return value is
* non-zero. Also, grabs and releases @p->sighand->siglock.
*
* RETURNS:
* 0 if wait condition didn't exist and search for other wait conditions
* should continue. Non-zero return, -errno on failure and @p's pid on
* success, implies that tasklist_lock is released and wait condition
* search should terminate.
*/
static int wait_task_stopped(struct wait_opts *wo,
int ptrace, struct task_struct *p)
{
struct waitid_info *infop;
int exit_code, *p_code, why;
uid_t uid = 0; /* unneeded, required by compiler */
pid_t pid;
/*
* Traditionally we see ptrace'd stopped tasks regardless of options.
*/
if (!ptrace && !(wo->wo_flags & WUNTRACED))
return 0;
if (!task_stopped_code(p, ptrace))
return 0;
exit_code = 0;
spin_lock_irq(&p->sighand->siglock);
p_code = task_stopped_code(p, ptrace);
if (unlikely(!p_code))
goto unlock_sig;
exit_code = *p_code;
if (!exit_code)
goto unlock_sig;
if (!unlikely(wo->wo_flags & WNOWAIT))
*p_code = 0;
uid = from_kuid_munged(current_user_ns(), task_uid(p));
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
return 0;
/*
* Now we are pretty sure this task is interesting.
* Make sure it doesn't get reaped out from under us while we
* give up the lock and then examine it below. We don't want to
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
get_task_struct(p);
pid = task_pid_vnr(p);
why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
read_unlock(&tasklist_lock);
sched_annotate_sleep();
if (wo->wo_rusage)
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
put_task_struct(p);
if (likely(!(wo->wo_flags & WNOWAIT)))
wo->wo_stat = (exit_code << 8) | 0x7f;
infop = wo->wo_info;
if (infop) {
infop->cause = why;
infop->status = exit_code;
infop->pid = pid;
infop->uid = uid;
}
return pid;
}
/*
* Handle do_wait work for one task in a live, non-stopped state.
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
{
struct waitid_info *infop;
pid_t pid;
uid_t uid;
if (!unlikely(wo->wo_flags & WCONTINUED))
return 0;
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
spin_lock_irq(&p->sighand->siglock);
/* Re-check with the lock held. */
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
spin_unlock_irq(&p->sighand->siglock);
return 0;
}
if (!unlikely(wo->wo_flags & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
uid = from_kuid_munged(current_user_ns(), task_uid(p));
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);
get_task_struct(p);
read_unlock(&tasklist_lock);
sched_annotate_sleep();
if (wo->wo_rusage)
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
put_task_struct(p);
infop = wo->wo_info;
if (!infop) {
wo->wo_stat = 0xffff;
} else {
infop->cause = CLD_CONTINUED;
infop->pid = pid;
infop->uid = uid;
infop->status = SIGCONT;
}
return pid;
}
/*
* Consider @p for a wait by @parent.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue;
* then ->notask_error is 0 if @p is an eligible child,
* or still -ECHILD.
*/
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
/*
* We can race with wait_task_zombie() from another thread.
* Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
* can't confuse the checks below.
*/
int exit_state = READ_ONCE(p->exit_state);
int ret;
if (unlikely(exit_state == EXIT_DEAD))
return 0;
ret = eligible_child(wo, ptrace, p);
if (!ret)
return ret;
if (unlikely(exit_state == EXIT_TRACE)) {
/*
* ptrace == 0 means we are the natural parent. In this case
* we should clear notask_error, debugger will notify us.
*/
if (likely(!ptrace))
wo->notask_error = 0;
return 0;
}
if (likely(!ptrace) && unlikely(p->ptrace)) {
/*
* If it is traced by its real parent's group, just pretend
* the caller is ptrace_do_wait() and reap this child if it
* is zombie.
*
* This also hides group stop state from real parent; otherwise
* a single stop can be reported twice as group and ptrace stop.
* If a ptracer wants to distinguish these two events for its
* own children it should create a separate process which takes
* the role of real parent.
*/
if (!ptrace_reparented(p))
ptrace = 1;
}
/* slay zombie? */
if (exit_state == EXIT_ZOMBIE) {
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p)) {
/*
* A zombie ptracee is only visible to its ptracer.
* Notification and reaping will be cascaded to the
* real parent when the ptracer detaches.
*/
if (unlikely(ptrace) || likely(!p->ptrace))
return wait_task_zombie(wo, p);
}
/*
* Allow access to stopped/continued state via zombie by
* falling through. Clearing of notask_error is complex.
*
* When !@ptrace:
*
* If WEXITED is set, notask_error should naturally be
* cleared. If not, subset of WSTOPPED|WCONTINUED is set,
* so, if there are live subthreads, there are events to
* wait for. If all subthreads are dead, it's still safe
* to clear - this function will be called again in finite
* amount time once all the subthreads are released and
* will then return without clearing.
*
* When @ptrace:
*
* Stopped state is per-task and thus can't change once the
* target task dies. Only continued and exited can happen.
* Clear notask_error if WCONTINUED | WEXITED.
*/
if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
wo->notask_error = 0;
} else {
/*
* @p is alive and it's gonna stop, continue or exit, so
* there always is something to wait for.
*/
wo->notask_error = 0;
}
/*
* Wait for stopped. Depending on @ptrace, different stopped state
* is used and the two don't interact with each other.
*/
ret = wait_task_stopped(wo, ptrace, p);
if (ret)
return ret;
/*
* Wait for continued. There's only one continued state and the
* ptracer can consume it which can confuse the real parent. Don't
* use WCONTINUED from ptracer. You don't need or want it.
*/
return wait_task_continued(wo, p);
}
/*
* Do the work of do_wait() for one thread in the group, @tsk.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue; then
* ->notask_error is 0 if there were any eligible children,
* or still -ECHILD.
*/
static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->children, sibling) { int ret = wait_consider_task(wo, 0, p);
if (ret)
return ret;
}
return 0;
}
static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { int ret = wait_consider_task(wo, 1, p);
if (ret)
return ret;
}
return 0;
}
bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p)
{
if (!eligible_pid(wo, p))
return false;
if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent)
return false;
return true;
}
static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
int sync, void *key)
{
struct wait_opts *wo = container_of(wait, struct wait_opts,
child_wait);
struct task_struct *p = key;
if (pid_child_should_wake(wo, p))
return default_wake_function(wait, mode, sync, key);
return 0;
}
void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
{
__wake_up_sync_key(&parent->signal->wait_chldexit,
TASK_INTERRUPTIBLE, p);
}
static bool is_effectively_child(struct wait_opts *wo, bool ptrace,
struct task_struct *target)
{
struct task_struct *parent =
!ptrace ? target->real_parent : target->parent;
return current == parent || (!(wo->wo_flags & __WNOTHREAD) &&
same_thread_group(current, parent));
}
/*
* Optimization for waiting on PIDTYPE_PID. No need to iterate through child
* and tracee lists to find the target task.
*/
static int do_wait_pid(struct wait_opts *wo)
{
bool ptrace;
struct task_struct *target;
int retval;
ptrace = false;
target = pid_task(wo->wo_pid, PIDTYPE_TGID);
if (target && is_effectively_child(wo, ptrace, target)) {
retval = wait_consider_task(wo, ptrace, target);
if (retval)
return retval;
}
ptrace = true;
target = pid_task(wo->wo_pid, PIDTYPE_PID); if (target && target->ptrace &&
is_effectively_child(wo, ptrace, target)) {
retval = wait_consider_task(wo, ptrace, target);
if (retval)
return retval;
}
return 0;
}
long __do_wait(struct wait_opts *wo)
{
long retval;
/*
* If there is nothing that can match our criteria, just get out.
* We will clear ->notask_error to zero if we see any child that
* might later match our criteria, even if we are not able to reap
* it yet.
*/
wo->notask_error = -ECHILD; if ((wo->wo_type < PIDTYPE_MAX) && (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) goto notask; read_lock(&tasklist_lock);
if (wo->wo_type == PIDTYPE_PID) {
retval = do_wait_pid(wo); if (retval)
return retval;
} else {
struct task_struct *tsk = current;
do {
retval = do_wait_thread(wo, tsk);
if (retval)
return retval; retval = ptrace_do_wait(wo, tsk);
if (retval)
return retval;
if (wo->wo_flags & __WNOTHREAD)
break;
} while_each_thread(current, tsk);
}
read_unlock(&tasklist_lock);
notask:
retval = wo->notask_error; if (!retval && !(wo->wo_flags & WNOHANG)) return -ERESTARTSYS;
return retval;
}
static long do_wait(struct wait_opts *wo)
{
int retval;
trace_sched_process_wait(wo->wo_pid);
init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
wo->child_wait.private = current;
add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
do {
set_current_state(TASK_INTERRUPTIBLE);
retval = __do_wait(wo);
if (retval != -ERESTARTSYS)
break;
if (signal_pending(current))
break;
schedule();
} while (1);
__set_current_state(TASK_RUNNING);
remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
return retval;}
int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid,
struct waitid_info *infop, int options,
struct rusage *ru)
{
unsigned int f_flags = 0;
struct pid *pid = NULL;
enum pid_type type;
if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL; if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
return -EINVAL;
switch (which) {
case P_ALL:
type = PIDTYPE_MAX;
break;
case P_PID:
type = PIDTYPE_PID;
if (upid <= 0)
return -EINVAL;
pid = find_get_pid(upid);
break;
case P_PGID:
type = PIDTYPE_PGID;
if (upid < 0)
return -EINVAL;
if (upid)
pid = find_get_pid(upid);
else
pid = get_task_pid(current, PIDTYPE_PGID);
break;
case P_PIDFD:
type = PIDTYPE_PID;
if (upid < 0)
return -EINVAL;
pid = pidfd_get_pid(upid, &f_flags);
if (IS_ERR(pid))
return PTR_ERR(pid);
break;
default:
return -EINVAL;
}
wo->wo_type = type;
wo->wo_pid = pid;
wo->wo_flags = options;
wo->wo_info = infop;
wo->wo_rusage = ru;
if (f_flags & O_NONBLOCK) wo->wo_flags |= WNOHANG;
return 0;
}
static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
int options, struct rusage *ru)
{
struct wait_opts wo;
long ret;
ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru);
if (ret)
return ret;
ret = do_wait(&wo);
if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG))
ret = -EAGAIN;
put_pid(wo.wo_pid);
return ret;
}SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
infop, int, options, struct rusage __user *, ru)
{
struct rusage r;
struct waitid_info info = {.status = 0};
long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
int signo = 0;
if (err > 0) {
signo = SIGCHLD;
err = 0;
if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
return -EFAULT;
}
if (!infop)
return err;
if (!user_write_access_begin(infop, sizeof(*infop)))
return -EFAULT;
unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); unsafe_put_user(info.pid, &infop->si_pid, Efault); unsafe_put_user(info.uid, &infop->si_uid, Efault); unsafe_put_user(info.status, &infop->si_status, Efault);
user_write_access_end();
return err;
Efault:
user_write_access_end(); return -EFAULT;}
long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
struct rusage *ru)
{
struct wait_opts wo;
struct pid *pid = NULL;
enum pid_type type;
long ret;
if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
/* -INT_MIN is not defined */
if (upid == INT_MIN)
return -ESRCH;
if (upid == -1)
type = PIDTYPE_MAX;
else if (upid < 0) {
type = PIDTYPE_PGID;
pid = find_get_pid(-upid); } else if (upid == 0) {
type = PIDTYPE_PGID;
pid = get_task_pid(current, PIDTYPE_PGID);
} else /* upid > 0 */ {
type = PIDTYPE_PID;
pid = find_get_pid(upid);
}
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options | WEXITED;
wo.wo_info = NULL;
wo.wo_stat = 0;
wo.wo_rusage = ru;
ret = do_wait(&wo);
put_pid(pid);
if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) ret = -EFAULT;
return ret;
}
int kernel_wait(pid_t pid, int *stat)
{
struct wait_opts wo = {
.wo_type = PIDTYPE_PID,
.wo_pid = find_get_pid(pid),
.wo_flags = WEXITED,
};
int ret;
ret = do_wait(&wo);
if (ret > 0 && wo.wo_stat)
*stat = wo.wo_stat;
put_pid(wo.wo_pid);
return ret;
}
SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
int, options, struct rusage __user *, ru)
{
struct rusage r;
long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); if (err > 0) { if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) return -EFAULT;
}
return err;
}
#ifdef __ARCH_WANT_SYS_WAITPID
/*
* sys_waitpid() remains for compatibility. waitpid() should be
* implemented by calling sys_wait4() from libc.a.
*/
SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
{
return kernel_wait4(pid, stat_addr, options, NULL);
}
#endif
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(wait4,
compat_pid_t, pid,
compat_uint_t __user *, stat_addr,
int, options,
struct compat_rusage __user *, ru)
{
struct rusage r;
long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
if (err > 0) {
if (ru && put_compat_rusage(&r, ru))
return -EFAULT;
}
return err;
}
COMPAT_SYSCALL_DEFINE5(waitid,
int, which, compat_pid_t, pid,
struct compat_siginfo __user *, infop, int, options,
struct compat_rusage __user *, uru)
{
struct rusage ru;
struct waitid_info info = {.status = 0};
long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
int signo = 0;
if (err > 0) {
signo = SIGCHLD;
err = 0;
if (uru) {
/* kernel_waitid() overwrites everything in ru */
if (COMPAT_USE_64BIT_TIME)
err = copy_to_user(uru, &ru, sizeof(ru));
else
err = put_compat_rusage(&ru, uru);
if (err)
return -EFAULT;
}
}
if (!infop)
return err;
if (!user_write_access_begin(infop, sizeof(*infop)))
return -EFAULT;
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault);
unsafe_put_user(info.pid, &infop->si_pid, Efault);
unsafe_put_user(info.uid, &infop->si_uid, Efault);
unsafe_put_user(info.status, &infop->si_status, Efault);
user_write_access_end();
return err;
Efault:
user_write_access_end();
return -EFAULT;
}
#endif
/*
* This needs to be __function_aligned as GCC implicitly makes any
* implementation of abort() cold and drops alignment specified by
* -falign-functions=N.
*
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11
*/
__weak __function_aligned void abort(void)
{
BUG();
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
EXPORT_SYMBOL(abort);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_MM_H
#define _LINUX_SCHED_MM_H
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/gfp.h>
#include <linux/sync_core.h>
#include <linux/sched/coredump.h>
/*
* Routines for handling mm_structs
*/
extern struct mm_struct *mm_alloc(void);
/**
* mmgrab() - Pin a &struct mm_struct.
* @mm: The &struct mm_struct to pin.
*
* Make sure that @mm will not get freed even after the owning task
* exits. This doesn't guarantee that the associated address space
* will still exist later on and mmget_not_zero() has to be used before
* accessing it.
*
* This is a preferred way to pin @mm for a longer/unbounded amount
* of time.
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
* See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
{
atomic_inc(&mm->mm_count);
}
static inline void smp_mb__after_mmgrab(void)
{
smp_mb__after_atomic();
}
extern void __mmdrop(struct mm_struct *mm);
static inline void mmdrop(struct mm_struct *mm)
{
/*
* The implicit full barrier implied by atomic_dec_and_test() is
* required by the membarrier system call before returning to
* user-space, after storing to rq->curr.
*/
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
#ifdef CONFIG_PREEMPT_RT
/*
* RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
* by far the least expensive way to do that.
*/
static inline void __mmdrop_delayed(struct rcu_head *rhp)
{
struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
__mmdrop(mm);
}
/*
* Invoked from finish_task_switch(). Delegates the heavy lifting on RT
* kernels via RCU.
*/
static inline void mmdrop_sched(struct mm_struct *mm)
{
/* Provides a full memory barrier. See mmdrop() */
if (atomic_dec_and_test(&mm->mm_count))
call_rcu(&mm->delayed_drop, __mmdrop_delayed);
}
#else
static inline void mmdrop_sched(struct mm_struct *mm)
{
mmdrop(mm);
}
#endif
/* Helpers for lazy TLB mm refcounting */
static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
{
if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
mmgrab(mm);
}
static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
{
if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
mmdrop(mm);
} else {
/*
* mmdrop_lazy_tlb must provide a full memory barrier, see the
* membarrier comment finish_task_switch which relies on this.
*/
smp_mb();
}
}
static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
{
if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
mmdrop_sched(mm);
else
smp_mb(); /* see mmdrop_lazy_tlb() above */
}
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
*
* Make sure that the address space of the given &struct mm_struct doesn't
* go away. This does not protect against parts of the address space being
* modified or freed, however.
*
* Never use this function to pin this address space for an
* unbounded/indefinite amount of time.
*
* Use mmput() to release the reference acquired by mmget().
*
* See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
{
atomic_inc(&mm->mm_users);
}
static inline bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
}
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
void mmput_async(struct mm_struct *);
#endif
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
* Grab a reference to a task's mm, if it is not already going away
* and ptrace_may_access with the mode parameter passed to it
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
/* Remove the current tasks stale references to the old mm_struct on exit() */
extern void exit_mm_release(struct task_struct *, struct mm_struct *);
/* Remove the current tasks stale references to the old mm_struct on exec() */
extern void exec_mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
#ifndef arch_get_mmap_end
#define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
#endif
#ifndef arch_get_mmap_base
#define arch_get_mmap_base(addr, base) (base)
#endif
extern void arch_pick_mmap_layout(struct mm_struct *mm,
const struct rlimit *rlim_stack);
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags);
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t);
unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags,
vm_flags_t vm_flags);
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags);
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
const struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
{
bool ret;
/*
* need RCU to access ->real_parent if CLONE_VM was used along with
* CLONE_PARENT.
*
* We check real_parent->mm == tsk->mm because CLONE_VFORK does not
* imply CLONE_VM
*
* CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
* ->real_parent is not necessarily the task doing vfork(), so in
* theory we can't rely on task_lock() if we want to dereference it.
*
* And in this case we can't trust the real_parent->mm == tsk->mm
* check, it can be false negative. But we do not care, if init or
* another oom-unkillable task does this it should blame itself.
*/
rcu_read_lock();
ret = tsk->vfork_done &&
rcu_dereference(tsk->real_parent)->mm == tsk->mm;
rcu_read_unlock();
return ret;
}
/*
* Applies per-task gfp context to the given allocation flags.
* PF_MEMALLOC_NOIO implies GFP_NOIO
* PF_MEMALLOC_NOFS implies GFP_NOFS
* PF_MEMALLOC_PIN implies !GFP_MOVABLE
*/
static inline gfp_t current_gfp_context(gfp_t flags)
{
unsigned int pflags = READ_ONCE(current->flags);
if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
/*
* NOIO implies both NOIO and NOFS and it is a weaker context
* so always make sure it makes precedence
*/
if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; if (pflags & PF_MEMALLOC_PIN)
flags &= ~__GFP_MOVABLE;
}
return flags;
}
#ifdef CONFIG_LOCKDEP
extern void __fs_reclaim_acquire(unsigned long ip);
extern void __fs_reclaim_release(unsigned long ip);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
#else
static inline void __fs_reclaim_acquire(unsigned long ip) { }
static inline void __fs_reclaim_release(unsigned long ip) { }
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
/* Any memory-allocation retry loop should use
* memalloc_retry_wait(), and pass the flags for the most
* constrained allocation attempt that might have failed.
* This provides useful documentation of where loops are,
* and a central place to fine tune the waiting as the MM
* implementation changes.
*/
static inline void memalloc_retry_wait(gfp_t gfp_flags)
{
/* We use io_schedule_timeout because waiting for memory
* typically included waiting for dirty pages to be
* written out, which requires IO.
*/
__set_current_state(TASK_UNINTERRUPTIBLE);
gfp_flags = current_gfp_context(gfp_flags);
if (gfpflags_allow_blocking(gfp_flags) &&
!(gfp_flags & __GFP_NORETRY))
/* Probably waited already, no need for much more */
io_schedule_timeout(1);
else
/* Probably didn't wait, and has now released a lock,
* so now is a good time to wait
*/
io_schedule_timeout(HZ/50);
}
/**
* might_alloc - Mark possible allocation sites
* @gfp_mask: gfp_t flags that would be used to allocate
*
* Similar to might_sleep() and other annotations, this can be used in functions
* that might allocate, but often don't. Compiles to nothing without
* CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
*/
static inline void might_alloc(gfp_t gfp_mask)
{
fs_reclaim_acquire(gfp_mask);
fs_reclaim_release(gfp_mask);
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
}
/**
* memalloc_flags_save - Add a PF_* flag to current->flags, save old value
*
* This allows PF_* flags to be conveniently added, irrespective of current
* value, and then the old version restored with memalloc_flags_restore().
*/
static inline unsigned memalloc_flags_save(unsigned flags)
{
unsigned oldflags = ~current->flags & flags;
current->flags |= flags;
return oldflags;
}
static inline void memalloc_flags_restore(unsigned flags)
{
current->flags &= ~flags;
}
/**
* memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
*
* This functions marks the beginning of the GFP_NOIO allocation scope.
* All further allocations will implicitly drop __GFP_IO flag and so
* they are safe for the IO critical section from the allocation recursion
* point of view. Use memalloc_noio_restore to end the scope with flags
* returned by this function.
*
* Context: This function is safe to be used from any context.
* Return: The saved flags to be passed to memalloc_noio_restore.
*/
static inline unsigned int memalloc_noio_save(void)
{
return memalloc_flags_save(PF_MEMALLOC_NOIO);
}
/**
* memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
* @flags: Flags to restore.
*
* Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
* Always make sure that the given flags is the return value from the
* pairing memalloc_noio_save call.
*/
static inline void memalloc_noio_restore(unsigned int flags)
{
memalloc_flags_restore(flags);
}
/**
* memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
*
* This functions marks the beginning of the GFP_NOFS allocation scope.
* All further allocations will implicitly drop __GFP_FS flag and so
* they are safe for the FS critical section from the allocation recursion
* point of view. Use memalloc_nofs_restore to end the scope with flags
* returned by this function.
*
* Context: This function is safe to be used from any context.
* Return: The saved flags to be passed to memalloc_nofs_restore.
*/
static inline unsigned int memalloc_nofs_save(void)
{
return memalloc_flags_save(PF_MEMALLOC_NOFS);
}
/**
* memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
* @flags: Flags to restore.
*
* Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
* Always make sure that the given flags is the return value from the
* pairing memalloc_nofs_save call.
*/
static inline void memalloc_nofs_restore(unsigned int flags)
{
memalloc_flags_restore(flags);
}
/**
* memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
*
* This function marks the beginning of the __GFP_MEMALLOC allocation scope.
* All further allocations will implicitly add the __GFP_MEMALLOC flag, which
* prevents entering reclaim and allows access to all memory reserves. This
* should only be used when the caller guarantees the allocation will allow more
* memory to be freed very shortly, i.e. it needs to allocate some memory in
* the process of freeing memory, and cannot reclaim due to potential recursion.
*
* Users of this scope have to be extremely careful to not deplete the reserves
* completely and implement a throttling mechanism which controls the
* consumption of the reserve based on the amount of freed memory. Usage of a
* pre-allocated pool (e.g. mempool) should be always considered before using
* this scope.
*
* Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
*
* Context: This function should not be used in an interrupt context as that one
* does not give PF_MEMALLOC access to reserves.
* See __gfp_pfmemalloc_flags().
* Return: The saved flags to be passed to memalloc_noreclaim_restore.
*/
static inline unsigned int memalloc_noreclaim_save(void)
{
return memalloc_flags_save(PF_MEMALLOC);
}
/**
* memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
* @flags: Flags to restore.
*
* Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
* function. Always make sure that the given flags is the return value from the
* pairing memalloc_noreclaim_save call.
*/
static inline void memalloc_noreclaim_restore(unsigned int flags)
{
memalloc_flags_restore(flags);
}
/**
* memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
*
* This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
* All further allocations will implicitly remove the __GFP_MOVABLE flag, which
* will constraint the allocations to zones that allow long term pinning, i.e.
* not ZONE_MOVABLE zones.
*
* Return: The saved flags to be passed to memalloc_pin_restore.
*/
static inline unsigned int memalloc_pin_save(void)
{
return memalloc_flags_save(PF_MEMALLOC_PIN);
}
/**
* memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
* @flags: Flags to restore.
*
* Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
* Always make sure that the given flags is the return value from the pairing
* memalloc_pin_save call.
*/
static inline void memalloc_pin_restore(unsigned int flags)
{
memalloc_flags_restore(flags);
}
#ifdef CONFIG_MEMCG
DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
/**
* set_active_memcg - Starts the remote memcg charging scope.
* @memcg: memcg to charge.
*
* This function marks the beginning of the remote memcg charging scope. All the
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
* given memcg.
*
* Please, make sure that caller has a reference to the passed memcg structure,
* so its lifetime is guaranteed to exceed the scope between two
* set_active_memcg() calls.
*
* NOTE: This function can nest. Users must save the return value and
* reset the previous value after their own charging scope is over.
*/
static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup *memcg)
{
struct mem_cgroup *old;
if (!in_task()) {
old = this_cpu_read(int_active_memcg);
this_cpu_write(int_active_memcg, memcg);
} else {
old = current->active_memcg;
current->active_memcg = memcg;
}
return old;
}
#else
static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup *memcg)
{
return NULL;
}
#endif
#ifdef CONFIG_MEMBARRIER
enum {
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
};
enum {
MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
MEMBARRIER_FLAG_RSEQ = (1U << 1),
};
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
#include <asm/membarrier.h>
#endif
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
/*
* The atomic_read() below prevents CSE. The following should
* help the compiler generate more efficient code on architectures
* where sync_core_before_usermode() is a no-op.
*/
if (!IS_ENABLED(CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE))
return;
if (current->mm != mm)
return;
if (likely(!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
return;
sync_core_before_usermode();
}
extern void membarrier_exec_mmap(struct mm_struct *mm);
extern void membarrier_update_current_mm(struct mm_struct *next_mm);
#else
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
}
#endif
static inline void membarrier_exec_mmap(struct mm_struct *mm)
{
}
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
}
static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
{
}
#endif
#endif /* _LINUX_SCHED_MM_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INCLUDE_LINUX_OOM_H
#define __INCLUDE_LINUX_OOM_H
#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
struct notifier_block;
struct mem_cgroup;
struct task_struct;
enum oom_constraint {
CONSTRAINT_NONE,
CONSTRAINT_CPUSET,
CONSTRAINT_MEMORY_POLICY,
CONSTRAINT_MEMCG,
};
/*
* Details of the page allocation that triggered the oom killer that are used to
* determine what should be killed.
*/
struct oom_control {
/* Used to determine cpuset */
struct zonelist *zonelist;
/* Used to determine mempolicy */
nodemask_t *nodemask;
/* Memory cgroup in which oom is invoked, or NULL for global oom */
struct mem_cgroup *memcg;
/* Used to determine cpuset and node locality requirement */
const gfp_t gfp_mask;
/*
* order == -1 means the oom kill is required by sysrq, otherwise only
* for display purposes.
*/
const int order;
/* Used by oom implementation, do not set */
unsigned long totalpages;
struct task_struct *chosen;
long chosen_points;
/* Used to print the constraint info. */
enum oom_constraint constraint;
};
extern struct mutex oom_lock;
extern struct mutex oom_adj_mutex;
static inline void set_current_oom_origin(void)
{
current->signal->oom_flag_origin = true;
}
static inline void clear_current_oom_origin(void)
{
current->signal->oom_flag_origin = false;
}
static inline bool oom_task_origin(const struct task_struct *p)
{
return p->signal->oom_flag_origin;
}
static inline bool tsk_is_oom_victim(struct task_struct * tsk)
{
return tsk->signal->oom_mm;
}
/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the
* address space which is reflected by MMF_UNSTABLE flag set in
* the mm. At that moment any !shared mapping would lose the content
* and could cause a memory corruption (zero pages instead of the
* original content).
*
* User should call this before establishing a page table entry for
* a !shared mapping and under the proper page table lock.
*
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
*/
static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
{
if (unlikely(mm_flags_test(MMF_UNSTABLE, mm)))
return VM_FAULT_SIGBUS;
return 0;
}
long oom_badness(struct task_struct *p,
unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
extern void exit_oom_victim(void);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
extern bool oom_killer_disable(signed long timeout);
extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
#endif /* _INCLUDE_LINUX_OOM_H */
// SPDX-License-Identifier: GPL-2.0+
/*
* Base port operations for 8250/16550-type serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
* Split from 8250_core.c, Copyright (C) 2001 Russell King.
*
* A note about mapbase / membase
*
* mapbase is the physical address of the IO port.
* membase is an 'ioremapped' cookie.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/console.h>
#include <linux/gpio/consumer.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/ratelimit.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/nmi.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/ktime.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "8250.h"
/*
* Here we define the default xmit fifo size used for each type of UART.
*/
static const struct serial8250_config uart_config[] = {
[PORT_UNKNOWN] = {
.name = "unknown",
.fifo_size = 1,
.tx_loadsz = 1,
},
[PORT_8250] = {
.name = "8250",
.fifo_size = 1,
.tx_loadsz = 1,
},
[PORT_16450] = {
.name = "16450",
.fifo_size = 1,
.tx_loadsz = 1,
},
[PORT_16550] = {
.name = "16550",
.fifo_size = 1,
.tx_loadsz = 1,
},
[PORT_16550A] = {
.name = "16550A",
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
[PORT_CIRRUS] = {
.name = "Cirrus",
.fifo_size = 1,
.tx_loadsz = 1,
},
[PORT_16650] = {
.name = "ST16650",
.fifo_size = 1,
.tx_loadsz = 1,
.flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
},
[PORT_16650V2] = {
.name = "ST16650V2",
.fifo_size = 32,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
UART_FCR_T_TRIG_00,
.rxtrig_bytes = {8, 16, 24, 28},
.flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
},
[PORT_16750] = {
.name = "TI16750",
.fifo_size = 64,
.tx_loadsz = 64,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
UART_FCR7_64BYTE,
.rxtrig_bytes = {1, 16, 32, 56},
.flags = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
},
[PORT_STARTECH] = {
.name = "Startech",
.fifo_size = 1,
.tx_loadsz = 1,
},
[PORT_16C950] = {
.name = "16C950/954",
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
.rxtrig_bytes = {16, 32, 112, 120},
/* UART_CAP_EFR breaks billionon CF bluetooth card. */
.flags = UART_CAP_FIFO | UART_CAP_SLEEP,
},
[PORT_16654] = {
.name = "ST16654",
.fifo_size = 64,
.tx_loadsz = 32,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
UART_FCR_T_TRIG_10,
.rxtrig_bytes = {8, 16, 56, 60},
.flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
},
[PORT_16850] = {
.name = "XR16850",
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
},
[PORT_RSA] = {
.name = "RSA",
.fifo_size = 2048,
.tx_loadsz = 2048,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
.flags = UART_CAP_FIFO,
},
[PORT_NS16550A] = {
.name = "NS16550A",
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_NATSEMI,
},
[PORT_XSCALE] = {
.name = "XScale",
.fifo_size = 32,
.tx_loadsz = 32,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
},
[PORT_OCTEON] = {
.name = "OCTEON",
.fifo_size = 64,
.tx_loadsz = 64,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO,
},
[PORT_U6_16550A] = {
.name = "U6_16550A",
.fifo_size = 64,
.tx_loadsz = 64,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
[PORT_TEGRA] = {
.name = "Tegra",
.fifo_size = 32,
.tx_loadsz = 8,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
UART_FCR_T_TRIG_01,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO | UART_CAP_RTOIE,
},
[PORT_XR17D15X] = {
.name = "XR17D15X",
.fifo_size = 64,
.tx_loadsz = 64,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
UART_CAP_SLEEP,
},
[PORT_XR17V35X] = {
.name = "XR17V35X",
.fifo_size = 256,
.tx_loadsz = 256,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 |
UART_FCR_T_TRIG_11,
.flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
UART_CAP_SLEEP,
},
[PORT_LPC3220] = {
.name = "LPC3220",
.fifo_size = 64,
.tx_loadsz = 32,
.fcr = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
.flags = UART_CAP_FIFO,
},
[PORT_BRCM_TRUMANAGE] = {
.name = "TruManage",
.fifo_size = 1,
.tx_loadsz = 1024,
.flags = UART_CAP_HFIFO,
},
[PORT_8250_CIR] = {
.name = "CIR port"
},
[PORT_ALTR_16550_F32] = {
.name = "Altera 16550 FIFO32",
.fifo_size = 32,
.tx_loadsz = 32,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 8, 16, 30},
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
[PORT_ALTR_16550_F64] = {
.name = "Altera 16550 FIFO64",
.fifo_size = 64,
.tx_loadsz = 64,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 16, 32, 62},
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
[PORT_ALTR_16550_F128] = {
.name = "Altera 16550 FIFO128",
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 32, 64, 126},
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
/*
* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
* workaround of errata A-008006 which states that tx_loadsz should
* be configured less than Maximum supported fifo bytes.
*/
[PORT_16550A_FSL64] = {
.name = "16550A_FSL64",
.fifo_size = 64,
.tx_loadsz = 63,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
UART_FCR7_64BYTE,
.flags = UART_CAP_FIFO | UART_CAP_NOTEMT,
},
[PORT_RT2880] = {
.name = "Palmchip BK-3103",
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
[PORT_DA830] = {
.name = "TI DA8xx/66AK2x",
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
[PORT_MTK_BTIF] = {
.name = "MediaTek BTIF",
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
.flags = UART_CAP_FIFO,
},
[PORT_NPCM] = {
.name = "Nuvoton 16550",
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
[PORT_SUNIX] = {
.name = "Sunix",
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 32, 64, 112},
.flags = UART_CAP_FIFO | UART_CAP_SLEEP,
},
[PORT_ASPEED_VUART] = {
.name = "ASPEED VUART",
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
[PORT_MCHP16550A] = {
.name = "MCHP16550A",
.fifo_size = 256,
.tx_loadsz = 256,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
.rxtrig_bytes = {2, 66, 130, 194},
.flags = UART_CAP_FIFO,
},
[PORT_BCM7271] = {
.name = "Broadcom BCM7271 UART",
.fifo_size = 32,
.tx_loadsz = 32,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
.rxtrig_bytes = {1, 8, 16, 30},
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
};
/* Uart divisor latch read */
static u32 default_serial_dl_read(struct uart_8250_port *up)
{
/* Assign these in pieces to truncate any bits above 7. */
unsigned char dll = serial_in(up, UART_DLL);
unsigned char dlm = serial_in(up, UART_DLM);
return dll | dlm << 8;
}
/* Uart divisor latch write */
static void default_serial_dl_write(struct uart_8250_port *up, u32 value)
{
serial_out(up, UART_DLL, value & 0xff);
serial_out(up, UART_DLM, value >> 8 & 0xff);
}
#ifdef CONFIG_HAS_IOPORT
static u32 hub6_serial_in(struct uart_port *p, unsigned int offset)
{
offset = offset << p->regshift;
outb(p->hub6 - 1 + offset, p->iobase);
return inb(p->iobase + 1);
}
static void hub6_serial_out(struct uart_port *p, unsigned int offset, u32 value)
{
offset = offset << p->regshift;
outb(p->hub6 - 1 + offset, p->iobase);
outb(value, p->iobase + 1);
}
#endif /* CONFIG_HAS_IOPORT */
static u32 mem_serial_in(struct uart_port *p, unsigned int offset)
{
offset = offset << p->regshift;
return readb(p->membase + offset);
}
static void mem_serial_out(struct uart_port *p, unsigned int offset, u32 value)
{
offset = offset << p->regshift;
writeb(value, p->membase + offset);
}
static void mem16_serial_out(struct uart_port *p, unsigned int offset, u32 value)
{
offset = offset << p->regshift;
writew(value, p->membase + offset);
}
static u32 mem16_serial_in(struct uart_port *p, unsigned int offset)
{
offset = offset << p->regshift;
return readw(p->membase + offset);
}
static void mem32_serial_out(struct uart_port *p, unsigned int offset, u32 value)
{
offset = offset << p->regshift;
writel(value, p->membase + offset);
}
static u32 mem32_serial_in(struct uart_port *p, unsigned int offset)
{
offset = offset << p->regshift;
return readl(p->membase + offset);
}
static void mem32be_serial_out(struct uart_port *p, unsigned int offset, u32 value)
{
offset = offset << p->regshift;
iowrite32be(value, p->membase + offset);
}
static u32 mem32be_serial_in(struct uart_port *p, unsigned int offset)
{
offset = offset << p->regshift;
return ioread32be(p->membase + offset);
}
#ifdef CONFIG_HAS_IOPORT
static u32 io_serial_in(struct uart_port *p, unsigned int offset)
{
offset = offset << p->regshift;
return inb(p->iobase + offset);
}
static void io_serial_out(struct uart_port *p, unsigned int offset, u32 value)
{
offset = offset << p->regshift;
outb(value, p->iobase + offset);
}
#endif
static u32 no_serial_in(struct uart_port *p, unsigned int offset)
{
return ~0U;
}
static void no_serial_out(struct uart_port *p, unsigned int offset, u32 value)
{
}
static int serial8250_default_handle_irq(struct uart_port *port);
static void set_io_from_upio(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
up->dl_read = default_serial_dl_read;
up->dl_write = default_serial_dl_write;
switch (p->iotype) {
#ifdef CONFIG_HAS_IOPORT
case UPIO_HUB6:
p->serial_in = hub6_serial_in;
p->serial_out = hub6_serial_out;
break;
#endif
case UPIO_MEM:
p->serial_in = mem_serial_in;
p->serial_out = mem_serial_out;
break;
case UPIO_MEM16:
p->serial_in = mem16_serial_in;
p->serial_out = mem16_serial_out;
break;
case UPIO_MEM32:
p->serial_in = mem32_serial_in;
p->serial_out = mem32_serial_out;
break;
case UPIO_MEM32BE:
p->serial_in = mem32be_serial_in;
p->serial_out = mem32be_serial_out;
break;
#ifdef CONFIG_HAS_IOPORT
case UPIO_PORT:
p->serial_in = io_serial_in;
p->serial_out = io_serial_out;
break;
#endif
default:
WARN(p->iotype != UPIO_PORT || p->iobase,
"Unsupported UART type %x\n", p->iotype);
p->serial_in = no_serial_in;
p->serial_out = no_serial_out;
}
/* Remember loaded iotype */
up->cur_iotype = p->iotype;
p->handle_irq = serial8250_default_handle_irq;
}
static void
serial_port_out_sync(struct uart_port *p, int offset, int value)
{
switch (p->iotype) {
case UPIO_MEM:
case UPIO_MEM16:
case UPIO_MEM32:
case UPIO_MEM32BE:
case UPIO_AU:
p->serial_out(p, offset, value);
p->serial_in(p, UART_LCR); /* safe, no side-effects */
break;
default:
p->serial_out(p, offset, value);
}
}
/*
* FIFO support.
*/
static void serial8250_clear_fifos(struct uart_8250_port *p)
{
if (p->capabilities & UART_CAP_FIFO) {
serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(p, UART_FCR, 0);
}
}
static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t);
static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t);
void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
{
serial8250_clear_fifos(p);
serial_out(p, UART_FCR, p->fcr);
}
EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
void serial8250_rpm_get(struct uart_8250_port *p)
{
if (!(p->capabilities & UART_CAP_RPM))
return;
pm_runtime_get_sync(p->port.dev);
}
EXPORT_SYMBOL_GPL(serial8250_rpm_get);
void serial8250_rpm_put(struct uart_8250_port *p)
{
if (!(p->capabilities & UART_CAP_RPM))
return;
pm_runtime_mark_last_busy(p->port.dev);
pm_runtime_put_autosuspend(p->port.dev);
}
EXPORT_SYMBOL_GPL(serial8250_rpm_put);
/**
* serial8250_em485_init() - put uart_8250_port into rs485 emulating
* @p: uart_8250_port port instance
*
* The function is used to start rs485 software emulating on the
* &struct uart_8250_port* @p. Namely, RTS is switched before/after
* transmission. The function is idempotent, so it is safe to call it
* multiple times.
*
* The caller MUST enable interrupt on empty shift register before
* calling serial8250_em485_init(). This interrupt is not a part of
* 8250 standard, but implementation defined.
*
* The function is supposed to be called from .rs485_config callback
* or from any other callback protected with p->port.lock spinlock.
*
* See also serial8250_em485_destroy()
*
* Return 0 - success, -errno - otherwise
*/
static int serial8250_em485_init(struct uart_8250_port *p)
{
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&p->port.lock);
if (p->em485)
goto deassert_rts;
p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC);
if (!p->em485)
return -ENOMEM;
hrtimer_setup(&p->em485->stop_tx_timer, &serial8250_em485_handle_stop_tx, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
hrtimer_setup(&p->em485->start_tx_timer, &serial8250_em485_handle_start_tx, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
p->em485->port = p;
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
deassert_rts:
if (p->em485->tx_stopped)
p->rs485_stop_tx(p, true);
return 0;
}
/**
* serial8250_em485_destroy() - put uart_8250_port into normal state
* @p: uart_8250_port port instance
*
* The function is used to stop rs485 software emulating on the
* &struct uart_8250_port* @p. The function is idempotent, so it is safe to
* call it multiple times.
*
* The function is supposed to be called from .rs485_config callback
* or from any other callback protected with p->port.lock spinlock.
*
* See also serial8250_em485_init()
*/
void serial8250_em485_destroy(struct uart_8250_port *p)
{
if (!p->em485)
return;
hrtimer_cancel(&p->em485->start_tx_timer);
hrtimer_cancel(&p->em485->stop_tx_timer);
kfree(p->em485);
p->em485 = NULL;
}
EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
struct serial_rs485 serial8250_em485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
SER_RS485_TERMINATE_BUS | SER_RS485_RX_DURING_TX,
.delay_rts_before_send = 1,
.delay_rts_after_send = 1,
};
EXPORT_SYMBOL_GPL(serial8250_em485_supported);
/**
* serial8250_em485_config() - generic ->rs485_config() callback
* @port: uart port
* @termios: termios structure
* @rs485: rs485 settings
*
* Generic callback usable by 8250 uart drivers to activate rs485 settings
* if the uart is incapable of driving RTS as a Transmit Enable signal in
* hardware, relying on software emulation instead.
*/
int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
/*
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
*/
if (rs485->flags & SER_RS485_ENABLED)
return serial8250_em485_init(up);
serial8250_em485_destroy(up);
return 0;
}
EXPORT_SYMBOL_GPL(serial8250_em485_config);
/*
* These two wrappers ensure that enable_runtime_pm_tx() can be called more than
* once and disable_runtime_pm_tx() will still disable RPM because the fifo is
* empty and the HW can idle again.
*/
static void serial8250_rpm_get_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
if (!(p->capabilities & UART_CAP_RPM))
return;
rpm_active = xchg(&p->rpm_tx_active, 1);
if (rpm_active)
return;
pm_runtime_get_sync(p->port.dev);
}
static void serial8250_rpm_put_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
if (!(p->capabilities & UART_CAP_RPM))
return;
rpm_active = xchg(&p->rpm_tx_active, 0);
if (!rpm_active)
return;
pm_runtime_mark_last_busy(p->port.dev);
pm_runtime_put_autosuspend(p->port.dev);
}
/*
* IER sleep support. UARTs which have EFRs need the "extended
* capability" bit enabled. Note that on XR16C850s, we need to
* reset LCR to write to IER.
*/
static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
{
unsigned char lcr = 0, efr = 0;
guard(serial8250_rpm)(p);
if (!(p->capabilities & UART_CAP_SLEEP))
return;
/* Synchronize UART_IER access against the console. */
guard(uart_port_lock_irq)(&p->port);
if (p->capabilities & UART_CAP_EFR) {
lcr = serial_in(p, UART_LCR);
efr = serial_in(p, UART_EFR);
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(p, UART_EFR, UART_EFR_ECB);
serial_out(p, UART_LCR, 0);
}
serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
if (p->capabilities & UART_CAP_EFR) {
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(p, UART_EFR, efr);
serial_out(p, UART_LCR, lcr);
}
}
/* Clear the interrupt registers. */
static void serial8250_clear_interrupts(struct uart_port *port)
{
serial_port_in(port, UART_LSR);
serial_port_in(port, UART_RX);
serial_port_in(port, UART_IIR);
serial_port_in(port, UART_MSR);
}
static void serial8250_clear_IER(struct uart_8250_port *up)
{
if (up->capabilities & UART_CAP_UUE)
serial_out(up, UART_IER, UART_IER_UUE);
else
serial_out(up, UART_IER, 0);
}
/*
* This is a quickie test to see how big the FIFO is.
* It doesn't work at all the time, more's the pity.
*/
static int size_fifo(struct uart_8250_port *up)
{
unsigned char old_fcr, old_mcr, old_lcr;
u32 old_dl;
int count;
old_lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, 0);
old_fcr = serial_in(up, UART_FCR);
old_mcr = serial8250_in_MCR(up);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial8250_out_MCR(up, UART_MCR_LOOP);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(up);
serial_dl_write(up, 0x0001);
serial_out(up, UART_LCR, UART_LCR_WLEN8);
for (count = 0; count < 256; count++)
serial_out(up, UART_TX, count);
mdelay(20);/* FIXME - schedule_timeout */
for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) &&
(count < 256); count++)
serial_in(up, UART_RX);
serial_out(up, UART_FCR, old_fcr);
serial8250_out_MCR(up, old_mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_dl_write(up, old_dl);
serial_out(up, UART_LCR, old_lcr);
return count;
}
/*
* Read UART ID using the divisor method - set DLL and DLM to zero
* and the revision will be in DLL and device type in DLM. We
* preserve the device state across this.
*/
static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
{
unsigned char old_lcr;
unsigned int id, old_dl;
old_lcr = serial_in(p, UART_LCR);
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(p);
serial_dl_write(p, 0);
id = serial_dl_read(p);
serial_dl_write(p, old_dl);
serial_out(p, UART_LCR, old_lcr);
return id;
}
/*
* This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
* When this function is called we know it is at least a StarTech
* 16650 V2, but it might be one of several StarTech UARTs, or one of
* its clones. (We treat the broken original StarTech 16650 V1 as a
* 16550, and why not? Startech doesn't seem to even acknowledge its
* existence.)
*
* What evil have men's minds wrought...
*/
static void autoconfig_has_efr(struct uart_8250_port *up)
{
unsigned int id1, id2, id3, rev;
/*
* Everything with an EFR has SLEEP
*/
up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
/*
* First we check to see if it's an Oxford Semiconductor UART.
*
* If we have to do this here because some non-National
* Semiconductor clone chips lock up if you try writing to the
* LSR register (which serial_icr_read does)
*/
/*
* Check for Oxford Semiconductor 16C950.
*
* EFR [4] must be set else this test fails.
*
* This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
* claims that it's needed for 952 dual UART's (which are not
* recommended for new designs).
*/
up->acr = 0;
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, 0x00);
id1 = serial_icr_read(up, UART_ID1);
id2 = serial_icr_read(up, UART_ID2);
id3 = serial_icr_read(up, UART_ID3);
rev = serial_icr_read(up, UART_REV);
if (id1 == 0x16 && id2 == 0xC9 &&
(id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
up->port.type = PORT_16C950;
/*
* Enable work around for the Oxford Semiconductor 952 rev B
* chip which causes it to seriously miscalculate baud rates
* when DLL is 0.
*/
if (id3 == 0x52 && rev == 0x01)
up->bugs |= UART_BUG_QUOT;
return;
}
/*
* We check for a XR16C850 by setting DLL and DLM to 0, and then
* reading back DLL and DLM. The chip type depends on the DLM
* value read back:
* 0x10 - XR16C850 and the DLL contains the chip revision.
* 0x12 - XR16C2850.
* 0x14 - XR16C854.
*/
id1 = autoconfig_read_divisor_id(up);
id2 = id1 >> 8;
if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
up->port.type = PORT_16850;
return;
}
/*
* It wasn't an XR16C850.
*
* We distinguish between the '654 and the '650 by counting
* how many bytes are in the FIFO. I'm using this for now,
* since that's the technique that was sent to me in the
* serial driver update, but I'm not convinced this works.
* I've had problems doing this in the past. -TYT
*/
if (size_fifo(up) == 64)
up->port.type = PORT_16654;
else
up->port.type = PORT_16650V2;
}
/*
* We detected a chip without a FIFO. Only two fall into
* this category - the original 8250 and the 16450. The
* 16450 has a scratch register (accessible with LCR=0)
*/
static void autoconfig_8250(struct uart_8250_port *up)
{
unsigned char scratch, status1, status2;
up->port.type = PORT_8250;
scratch = serial_in(up, UART_SCR);
serial_out(up, UART_SCR, 0xa5);
status1 = serial_in(up, UART_SCR);
serial_out(up, UART_SCR, 0x5a);
status2 = serial_in(up, UART_SCR);
serial_out(up, UART_SCR, scratch);
if (status1 == 0xa5 && status2 == 0x5a)
up->port.type = PORT_16450;
}
static int broken_efr(struct uart_8250_port *up)
{
/*
* Exar ST16C2550 "A2" devices incorrectly detect as
* having an EFR, and report an ID of 0x0201. See
* http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
*/
if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
return 1;
return 0;
}
/*
* We know that the chip has FIFOs. Does it have an EFR? The
* EFR is located in the same register position as the IIR and
* we know the top two bits of the IIR are currently set. The
* EFR should contain zero. Try to read the EFR.
*/
static void autoconfig_16550a(struct uart_8250_port *up)
{
unsigned char status1, status2;
unsigned int iersave;
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&up->port.lock);
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS) &&
!(up->port.flags & UPF_FULL_PROBE))
return;
/*
* Check for presence of the EFR when DLAB is set.
* Only ST16C650V1 UARTs pass this test.
*/
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
if (serial_in(up, UART_EFR) == 0) {
serial_out(up, UART_EFR, 0xA8);
if (serial_in(up, UART_EFR) != 0) {
up->port.type = PORT_16650;
up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
} else {
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR7_64BYTE);
status1 = serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED_16750;
serial_out(up, UART_FCR, 0);
serial_out(up, UART_LCR, 0);
if (status1 == UART_IIR_FIFO_ENABLED_16750)
up->port.type = PORT_16550A_FSL64;
}
serial_out(up, UART_EFR, 0);
return;
}
/*
* Maybe it requires 0xbf to be written to the LCR.
* (other ST16C650V2 UARTs, TI16C752A, etc)
*/
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
autoconfig_has_efr(up);
return;
}
/*
* Check for a National Semiconductor SuperIO chip.
* Attempt to switch to bank 2, read the value of the LOOP bit
* from EXCR1. Switch back to bank 0, change it in MCR. Then
* switch back to bank 2, read it from EXCR1 again and check
* it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
*/
serial_out(up, UART_LCR, 0);
status1 = serial8250_in_MCR(up);
serial_out(up, UART_LCR, 0xE0);
status2 = serial_in(up, 0x02); /* EXCR1 */
if (!((status2 ^ status1) & UART_MCR_LOOP)) {
serial_out(up, UART_LCR, 0);
serial8250_out_MCR(up, status1 ^ UART_MCR_LOOP);
serial_out(up, UART_LCR, 0xE0);
status2 = serial_in(up, 0x02); /* EXCR1 */
serial_out(up, UART_LCR, 0);
serial8250_out_MCR(up, status1);
if ((status2 ^ status1) & UART_MCR_LOOP) {
unsigned short quot;
serial_out(up, UART_LCR, 0xE0);
quot = serial_dl_read(up);
quot <<= 3;
if (ns16550a_goto_highspeed(up))
serial_dl_write(up, quot);
serial_out(up, UART_LCR, 0);
up->port.uartclk = 921600*16;
up->port.type = PORT_NS16550A;
up->capabilities |= UART_NATSEMI;
return;
}
}
/*
* No EFR. Try to detect a TI16750, which only sets bit 5 of
* the IIR when 64 byte FIFO mode is enabled when DLAB is set.
* Try setting it with and without DLAB set. Cheap clones
* set bit 5 without DLAB set.
*/
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
status1 = serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED_16750;
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
status2 = serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED_16750;
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_LCR, 0);
if (status1 == UART_IIR_FIFO_ENABLED_16550A &&
status2 == UART_IIR_FIFO_ENABLED_16750) {
up->port.type = PORT_16750;
up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
return;
}
/*
* Try writing and reading the UART_IER_UUE bit (b6).
* If it works, this is probably one of the Xscale platform's
* internal UARTs.
* We're going to explicitly set the UUE bit to 0 before
* trying to write and read a 1 just to make sure it's not
* already a 1 and maybe locked there before we even start.
*/
iersave = serial_in(up, UART_IER);
serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
/*
* OK it's in a known zero state, try writing and reading
* without disturbing the current state of the other bits.
*/
serial_out(up, UART_IER, iersave | UART_IER_UUE);
if (serial_in(up, UART_IER) & UART_IER_UUE) {
/*
* It's an Xscale.
* We'll leave the UART_IER_UUE bit set to 1 (enabled).
*/
up->port.type = PORT_XSCALE;
up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
return;
}
}
serial_out(up, UART_IER, iersave);
/*
* We distinguish between 16550A and U6 16550A by counting
* how many bytes are in the FIFO.
*/
if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
up->port.type = PORT_U6_16550A;
up->capabilities |= UART_CAP_AFE;
}
}
/*
* This routine is called by rs_init() to initialize a specific serial
* port. It determines what type of UART chip this serial port is
* using: 8250, 16450, 16550, 16550A. The important question is
* whether or not this UART is a 16550A or not, since this will
* determine whether or not we can use its FIFO features or not.
*/
static void autoconfig(struct uart_8250_port *up)
{
unsigned char status1, scratch, scratch2, scratch3;
unsigned char save_lcr, save_mcr;
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int old_capabilities;
if (!port->iobase && !port->mapbase && !port->membase)
return;
/*
* We really do need global IRQs disabled here - we're going to
* be frobbing the chips IRQ enable register to see if it exists.
*
* Synchronize UART_IER access against the console.
*/
uart_port_lock_irqsave(port, &flags);
up->capabilities = 0;
up->bugs = 0;
if (!(port->flags & UPF_BUGGY_UART)) {
/*
* Do a simple existence test first; if we fail this,
* there's no point trying anything else.
*
* 0x80 is used as a nonsense port to prevent against
* false positives due to ISA bus float. The
* assumption is that 0x80 is a non-existent port;
* which should be safe since include/asm/io.h also
* makes this assumption.
*
* Note: this is safe as long as MCR bit 4 is clear
* and the device is in "PC" mode.
*/
scratch = serial_in(up, UART_IER);
serial_out(up, UART_IER, 0);
#if defined(__i386__) && defined(CONFIG_HAS_IOPORT)
outb(0xff, 0x080);
#endif
/*
* Mask out IER[7:4] bits for test as some UARTs (e.g. TL
* 16C754B) allow only to modify them if an EFR bit is set.
*/
scratch2 = serial_in(up, UART_IER) & UART_IER_ALL_INTR;
serial_out(up, UART_IER, UART_IER_ALL_INTR);
#if defined(__i386__) && defined(CONFIG_HAS_IOPORT)
outb(0, 0x080);
#endif
scratch3 = serial_in(up, UART_IER) & UART_IER_ALL_INTR;
serial_out(up, UART_IER, scratch);
if (scratch2 != 0 || scratch3 != UART_IER_ALL_INTR) {
/*
* We failed; there's nothing here
*/
uart_port_unlock_irqrestore(port, flags);
return;
}
}
save_mcr = serial8250_in_MCR(up);
save_lcr = serial_in(up, UART_LCR);
/*
* Check to see if a UART is really there. Certain broken
* internal modems based on the Rockwell chipset fail this
* test, because they apparently don't implement the loopback
* test mode. So this test is skipped on the COM 1 through
* COM 4 ports. This *should* be safe, since no board
* manufacturer would be stupid enough to design a board
* that conflicts with COM 1-4 --- we hope!
*/
if (!(port->flags & UPF_SKIP_TEST)) {
serial8250_out_MCR(up, UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_RTS);
status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
serial8250_out_MCR(up, save_mcr);
if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
uart_port_unlock_irqrestore(port, flags);
return;
}
}
/*
* We're pretty sure there's a port here. Lets find out what
* type of port it is. The IIR top two bits allows us to find
* out if it's 8250 or 16450, 16550, 16550A or later. This
* determines what we test for next.
*
* We also initialise the EFR (if any) to zero for later. The
* EFR occupies the same register location as the FCR and IIR.
*/
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, 0);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
switch (serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED) {
case UART_IIR_FIFO_ENABLED_8250:
autoconfig_8250(up);
break;
case UART_IIR_FIFO_ENABLED_16550:
port->type = PORT_16550;
break;
case UART_IIR_FIFO_ENABLED_16550A:
autoconfig_16550a(up);
break;
default:
port->type = PORT_UNKNOWN;
break;
}
rsa_autoconfig(up);
serial_out(up, UART_LCR, save_lcr);
port->fifosize = uart_config[up->port.type].fifo_size;
old_capabilities = up->capabilities;
up->capabilities = uart_config[port->type].flags;
up->tx_loadsz = uart_config[port->type].tx_loadsz;
if (port->type != PORT_UNKNOWN) {
/*
* Reset the UART.
*/
rsa_reset(up);
serial8250_out_MCR(up, save_mcr);
serial8250_clear_fifos(up);
serial_in(up, UART_RX);
serial8250_clear_IER(up);
}
uart_port_unlock_irqrestore(port, flags);
/*
* Check if the device is a Fintek F81216A
*/
if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
fintek_8250_probe(up);
if (up->capabilities != old_capabilities) {
dev_warn(port->dev, "detected caps %08x should be %08x\n",
old_capabilities, up->capabilities);
}
}
static void autoconfig_irq(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
unsigned char save_mcr, save_ier;
unsigned char save_ICP = 0;
unsigned int ICP = 0;
unsigned long irqs;
int irq;
if (port->flags & UPF_FOURPORT) {
ICP = (port->iobase & 0xfe0) | 0x1f;
save_ICP = inb_p(ICP);
outb_p(0x80, ICP);
inb_p(ICP);
}
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
save_mcr = serial8250_in_MCR(up);
/* Synchronize UART_IER access against the console. */
scoped_guard(uart_port_lock_irq, port)
save_ier = serial_in(up, UART_IER);
serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
irqs = probe_irq_on();
serial8250_out_MCR(up, 0);
udelay(10);
if (port->flags & UPF_FOURPORT) {
serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
} else {
serial8250_out_MCR(up,
UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
}
/* Synchronize UART_IER access against the console. */
scoped_guard(uart_port_lock_irq, port)
serial_out(up, UART_IER, UART_IER_ALL_INTR);
serial8250_clear_interrupts(port);
serial_out(up, UART_TX, 0xFF);
udelay(20);
irq = probe_irq_off(irqs);
serial8250_out_MCR(up, save_mcr);
/* Synchronize UART_IER access against the console. */
scoped_guard(uart_port_lock_irq, port)
serial_out(up, UART_IER, save_ier);
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
port->irq = (irq > 0) ? irq : 0;
}
static void serial8250_stop_rx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&port->lock);
guard(serial8250_rpm)(up);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
serial_port_out(port, UART_IER, up->ier);
}
/**
* serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback
* @p: uart 8250 port
* @toggle_ier: true to allow enabling receive interrupts
*
* Generic callback usable by 8250 uart drivers to stop rs485 transmission.
*/
void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier)
{
unsigned char mcr = serial8250_in_MCR(p);
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&p->port.lock);
if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
mcr |= UART_MCR_RTS;
else
mcr &= ~UART_MCR_RTS;
serial8250_out_MCR(p, mcr);
/*
* Empty the RX FIFO, we are not interested in anything
* received during the half-duplex transmission.
* Enable previously disabled RX interrupts.
*/
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
serial8250_clear_and_reinit_fifos(p);
if (toggle_ier) {
p->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_port_out(&p->port, UART_IER, p->ier);
}
}
}
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
{
struct uart_8250_em485 *em485 = container_of(t, struct uart_8250_em485,
stop_tx_timer);
struct uart_8250_port *p = em485->port;
guard(serial8250_rpm)(p);
guard(uart_port_lock_irqsave)(&p->port);
if (em485->active_timer == &em485->stop_tx_timer) {
p->rs485_stop_tx(p, true);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
return HRTIMER_NORESTART;
}
static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
{
hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL);
}
static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
{
struct uart_8250_em485 *em485 = p->em485;
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&p->port.lock);
stop_delay += (u64)p->port.rs485.delay_rts_after_send * NSEC_PER_MSEC;
/*
* rs485_stop_tx() is going to set RTS according to config
* AND flush RX FIFO if required.
*/
if (stop_delay > 0) {
em485->active_timer = &em485->stop_tx_timer;
hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL);
} else {
p->rs485_stop_tx(p, true);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
}
static inline void __stop_tx(struct uart_8250_port *p)
{
struct uart_8250_em485 *em485 = p->em485;
if (em485) {
u16 lsr = serial_lsr_in(p);
u64 stop_delay = 0;
if (!(lsr & UART_LSR_THRE))
return;
/*
* To provide required timing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
* shift register are empty. The device driver should either
* enable interrupt on TEMT or set UART_CAP_NOTEMT that will
* enlarge stop_tx_timer by the tx time of one frame to cover
* for emptying of the shift register.
*/
if (!(lsr & UART_LSR_TEMT)) {
if (!(p->capabilities & UART_CAP_NOTEMT))
return;
/*
* RTS might get deasserted too early with the normal
* frame timing formula. It seems to suggest THRE might
* get asserted already during tx of the stop bit
* rather than after it is fully sent.
* Roughly estimate 1 extra bit here with / 7.
*/
stop_delay = p->port.frame_time + DIV_ROUND_UP(p->port.frame_time, 7);
}
__stop_tx_rs485(p, stop_delay);
}
if (serial8250_clear_THRI(p))
serial8250_rpm_put_tx(p);
}
static void serial8250_stop_tx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
guard(serial8250_rpm)(up);
__stop_tx(up);
/*
* We really want to stop the transmitter from sending.
*/
if (port->type == PORT_16C950) {
up->acr |= UART_ACR_TXDIS;
serial_icr_write(up, UART_ACR, up->acr);
}
}
static inline void __start_tx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
if (up->dma && !up->dma->tx_dma(up))
return;
if (serial8250_set_THRI(up)) {
if (up->bugs & UART_BUG_TXEN) {
u16 lsr = serial_lsr_in(up);
if (lsr & UART_LSR_THRE)
serial8250_tx_chars(up);
}
}
/*
* Re-enable the transmitter if we disabled it.
*/
if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
up->acr &= ~UART_ACR_TXDIS;
serial_icr_write(up, UART_ACR, up->acr);
}
}
/**
* serial8250_em485_start_tx() - generic ->rs485_start_tx() callback
* @up: uart 8250 port
* @toggle_ier: true to allow disabling receive interrupts
*
* Generic callback usable by 8250 uart drivers to start rs485 transmission.
* Assumes that setting the RTS bit in the MCR register means RTS is high.
* (Some chips use inverse semantics.) Further assumes that reception is
* stoppable by disabling the UART_IER_RDI interrupt. (Some chips set the
* UART_LSR_DR bit even when UART_IER_RDI is disabled, foiling this approach.)
*/
void serial8250_em485_start_tx(struct uart_8250_port *up, bool toggle_ier)
{
unsigned char mcr = serial8250_in_MCR(up);
if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX) && toggle_ier)
serial8250_stop_rx(&up->port);
if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
mcr |= UART_MCR_RTS;
else
mcr &= ~UART_MCR_RTS;
serial8250_out_MCR(up, mcr);
}
EXPORT_SYMBOL_GPL(serial8250_em485_start_tx);
/* Returns false, if start_tx_timer was setup to defer TX start */
static bool start_tx_rs485(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
/*
* While serial8250_em485_handle_stop_tx() is a noop if
* em485->active_timer != &em485->stop_tx_timer, it might happen that
* the timer is still armed and triggers only after the current bunch of
* chars is send and em485->active_timer == &em485->stop_tx_timer again.
* So cancel the timer. There is still a theoretical race condition if
* the timer is already running and only comes around to check for
* em485->active_timer when &em485->stop_tx_timer is armed again.
*/
if (em485->active_timer == &em485->stop_tx_timer)
hrtimer_try_to_cancel(&em485->stop_tx_timer);
em485->active_timer = NULL;
if (em485->tx_stopped) {
em485->tx_stopped = false;
up->rs485_start_tx(up, true);
if (up->port.rs485.delay_rts_before_send > 0) {
em485->active_timer = &em485->start_tx_timer;
start_hrtimer_ms(&em485->start_tx_timer,
up->port.rs485.delay_rts_before_send);
return false;
}
}
return true;
}
static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
{
struct uart_8250_em485 *em485 = container_of(t, struct uart_8250_em485,
start_tx_timer);
struct uart_8250_port *p = em485->port;
guard(uart_port_lock_irqsave)(&p->port);
if (em485->active_timer == &em485->start_tx_timer) {
__start_tx(&p->port);
em485->active_timer = NULL;
}
return HRTIMER_NORESTART;
}
static void serial8250_start_tx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&port->lock);
if (!port->x_char && kfifo_is_empty(&port->state->port.xmit_fifo))
return;
serial8250_rpm_get_tx(up);
if (em485) {
if ((em485->active_timer == &em485->start_tx_timer) ||
!start_tx_rs485(port))
return;
}
__start_tx(port);
}
static void serial8250_throttle(struct uart_port *port)
{
port->throttle(port);
}
static void serial8250_unthrottle(struct uart_port *port)
{
port->unthrottle(port);
}
static void serial8250_disable_ms(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&port->lock);
/* no MSR capabilities */
if (up->bugs & UART_BUG_NOMSR)
return;
mctrl_gpio_disable_ms_no_sync(up->gpios);
up->ier &= ~UART_IER_MSI;
serial_port_out(port, UART_IER, up->ier);
}
static void serial8250_enable_ms(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
/* Port locked to synchronize UART_IER access against the console. */
lockdep_assert_held_once(&port->lock);
/* no MSR capabilities */
if (up->bugs & UART_BUG_NOMSR)
return;
mctrl_gpio_enable_ms(up->gpios);
up->ier |= UART_IER_MSI;
guard(serial8250_rpm)(up);
serial_port_out(port, UART_IER, up->ier);
}
void serial8250_read_char(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
u8 ch, flag = TTY_NORMAL;
if (likely(lsr & UART_LSR_DR))
ch = serial_in(up, UART_RX);
else
/*
* Intel 82571 has a Serial Over Lan device that will
* set UART_LSR_BI without setting UART_LSR_DR when
* it receives a break. To avoid reading from the
* receive buffer without UART_LSR_DR bit set, we
* just force the read character to be 0
*/
ch = 0;
port->icount.rx++;
lsr |= up->lsr_saved_flags;
up->lsr_saved_flags = 0;
if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
if (lsr & UART_LSR_BI) {
lsr &= ~(UART_LSR_FE | UART_LSR_PE);
port->icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(port))
return;
} else if (lsr & UART_LSR_PE)
port->icount.parity++;
else if (lsr & UART_LSR_FE)
port->icount.frame++;
if (lsr & UART_LSR_OE)
port->icount.overrun++;
/*
* Mask off conditions which should be ignored.
*/
lsr &= port->read_status_mask;
if (lsr & UART_LSR_BI) {
dev_dbg(port->dev, "handling break\n");
flag = TTY_BREAK;
} else if (lsr & UART_LSR_PE)
flag = TTY_PARITY;
else if (lsr & UART_LSR_FE)
flag = TTY_FRAME;
}
if (uart_prepare_sysrq_char(port, ch))
return;
uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
}
EXPORT_SYMBOL_GPL(serial8250_read_char);
/*
* serial8250_rx_chars - Read characters. The first LSR value must be passed in.
*
* Returns LSR bits. The caller should rely only on non-Rx related LSR bits
* (such as THRE) because the LSR value might come from an already consumed
* character.
*/
u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
int max_count = 256;
do {
serial8250_read_char(up, lsr);
if (--max_count == 0)
break;
lsr = serial_in(up, UART_LSR);
} while (lsr & (UART_LSR_DR | UART_LSR_BI));
tty_flip_buffer_push(&port->state->port);
return lsr;
}
EXPORT_SYMBOL_GPL(serial8250_rx_chars);
void serial8250_tx_chars(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
struct tty_port *tport = &port->state->port;
int count;
if (port->x_char) {
uart_xchar_out(port, UART_TX);
return;
}
if (uart_tx_stopped(port)) {
serial8250_stop_tx(port);
return;
}
if (kfifo_is_empty(&tport->xmit_fifo)) {
__stop_tx(up);
return;
}
count = up->tx_loadsz;
do {
unsigned char c;
if (!uart_fifo_get(port, &c))
break;
serial_out(up, UART_TX, c);
if (up->bugs & UART_BUG_TXRACE) {
/*
* The Aspeed BMC virtual UARTs have a bug where data
* may get stuck in the BMC's Tx FIFO from bursts of
* writes on the APB interface.
*
* Delay back-to-back writes by a read cycle to avoid
* stalling the VUART. Read a register that won't have
* side-effects and discard the result.
*/
serial_in(up, UART_SCR);
}
if ((up->capabilities & UART_CAP_HFIFO) &&
!uart_lsr_tx_empty(serial_in(up, UART_LSR)))
break;
/* The BCM2835 MINI UART THRE bit is really a not-full bit. */
if ((up->capabilities & UART_CAP_MINI) &&
!(serial_in(up, UART_LSR) & UART_LSR_THRE))
break;
} while (--count > 0);
if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(port);
/*
* With RPM enabled, we have to wait until the FIFO is empty before the
* HW can go idle. So we get here once again with empty FIFO and disable
* the interrupt and RPM in __stop_tx()
*/
if (kfifo_is_empty(&tport->xmit_fifo) &&
!(up->capabilities & UART_CAP_RPM))
__stop_tx(up);
}
EXPORT_SYMBOL_GPL(serial8250_tx_chars);
/* Caller holds uart port lock */
unsigned int serial8250_modem_status(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
unsigned int status = serial_in(up, UART_MSR);
status |= up->msr_saved_flags;
up->msr_saved_flags = 0;
if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
port->state != NULL) {
if (status & UART_MSR_TERI)
port->icount.rng++;
if (status & UART_MSR_DDSR)
port->icount.dsr++;
if (status & UART_MSR_DDCD)
uart_handle_dcd_change(port, status & UART_MSR_DCD);
if (status & UART_MSR_DCTS)
uart_handle_cts_change(port, status & UART_MSR_CTS);
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
return status;
}
EXPORT_SYMBOL_GPL(serial8250_modem_status);
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
{
switch (iir & 0x3f) {
case UART_IIR_THRI:
/*
* Postpone DMA or not decision to IIR_RDI or IIR_RX_TIMEOUT
* because it's impossible to do an informed decision about
* that with IIR_THRI.
*
* This also fixes one known DMA Rx corruption issue where
* DR is asserted but DMA Rx only gets a corrupted zero byte
* (too early DR?).
*/
return false;
case UART_IIR_RDI:
if (!up->dma->rx_running)
break;
fallthrough;
case UART_IIR_RLSI:
case UART_IIR_RX_TIMEOUT:
serial8250_rx_dma_flush(up);
return true;
}
return up->dma->rx_dma(up);
}
/*
* This handles the interrupt from one port.
*/
int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct tty_port *tport = &port->state->port;
bool skip_rx = false;
unsigned long flags;
u16 status;
if (iir & UART_IIR_NO_INT)
return 0;
uart_port_lock_irqsave(port, &flags);
status = serial_lsr_in(up);
/*
* If port is stopped and there are no error conditions in the
* FIFO, then don't drain the FIFO, as this may lead to TTY buffer
* overflow. Not servicing, RX FIFO would trigger auto HW flow
* control when FIFO occupancy reaches preset threshold, thus
* halting RX. This only works when auto HW flow control is
* available.
*/
if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
(port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
!(up->ier & (UART_IER_RLSI | UART_IER_RDI)))
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
struct irq_data *d;
d = irq_get_irq_data(port->irq);
if (d && irqd_is_wakeup_set(d))
pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) {
if (!up->dma || up->dma->tx_err)
serial8250_tx_chars(up);
else if (!up->dma->tx_running)
__stop_tx(up);
}
uart_unlock_and_check_sysrq_irqrestore(port, flags);
return 1;
}
EXPORT_SYMBOL_GPL(serial8250_handle_irq);
static int serial8250_default_handle_irq(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int iir;
guard(serial8250_rpm)(up);
iir = serial_port_in(port, UART_IIR);
return serial8250_handle_irq(port, iir);
}
/*
* Newer 16550 compatible parts such as the SC16C650 & Altera 16550 Soft IP
* have a programmable TX threshold that triggers the THRE interrupt in
* the IIR register. In this case, the THRE interrupt indicates the FIFO
* has space available. Load it up with tx_loadsz bytes.
*/
static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
{
unsigned int iir = serial_port_in(port, UART_IIR);
/* TX Threshold IRQ triggered so load up FIFO */
if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
struct uart_8250_port *up = up_to_u8250p(port);
guard(uart_port_lock_irqsave)(port);
serial8250_tx_chars(up);
}
iir = serial_port_in(port, UART_IIR);
return serial8250_handle_irq(port, iir);
}
static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
guard(serial8250_rpm)(up);
guard(uart_port_lock_irqsave)(port);
if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
return TIOCSER_TEMT;
return 0;
}
unsigned int serial8250_do_get_mctrl(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int status;
unsigned int val;
scoped_guard(serial8250_rpm, up)
status = serial8250_modem_status(up);
val = serial8250_MSR_to_TIOCM(status);
if (up->gpios)
return mctrl_gpio_get(up->gpios, &val);
return val;
}
EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
static unsigned int serial8250_get_mctrl(struct uart_port *port)
{
if (port->get_mctrl)
return port->get_mctrl(port);
return serial8250_do_get_mctrl(port);
}
void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned char mcr;
mcr = serial8250_TIOCM_to_MCR(mctrl);
mcr |= up->mcr;
serial8250_out_MCR(up, mcr);
}
EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
if (port->rs485.flags & SER_RS485_ENABLED)
return;
if (port->set_mctrl)
port->set_mctrl(port, mctrl);
else
serial8250_do_set_mctrl(port, mctrl);
}
static void serial8250_break_ctl(struct uart_port *port, int break_state)
{
struct uart_8250_port *up = up_to_u8250p(port);
guard(serial8250_rpm)(up);
guard(uart_port_lock_irqsave)(port);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_port_out(port, UART_LCR, up->lcr);
}
/* Returns true if @bits were set, false on timeout */
static bool wait_for_lsr(struct uart_8250_port *up, int bits)
{
unsigned int status, tmout;
/*
* Wait for a character to be sent. Fallback to a safe default
* timeout value if @frame_time is not available.
*/
if (up->port.frame_time)
tmout = up->port.frame_time * 2 / NSEC_PER_USEC;
else
tmout = 10000;
for (;;) {
status = serial_lsr_in(up); if ((status & bits) == bits)
break;
if (--tmout == 0)
break;
udelay(1);
touch_nmi_watchdog();
}
return (tmout != 0);}
/* Wait for transmitter and holding register to empty with timeout */
static void wait_for_xmitr(struct uart_8250_port *up, int bits)
{
unsigned int tmout;
wait_for_lsr(up, bits);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR); up->msr_saved_flags |= msr & MSR_SAVE_FLAGS; if (msr & UART_MSR_CTS)
break;
udelay(1);
touch_nmi_watchdog();
}
}
}
#ifdef CONFIG_CONSOLE_POLL
/*
* Console polling routines for writing and reading from the uart while
* in an interrupt or debug context.
*/
static int serial8250_get_poll_char(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
u16 lsr;
guard(serial8250_rpm)(up);
lsr = serial_port_in(port, UART_LSR);
if (!(lsr & UART_LSR_DR))
return NO_POLL_CHAR;
return serial_port_in(port, UART_RX);
}
static void serial8250_put_poll_char(struct uart_port *port,
unsigned char c)
{
unsigned int ier;
struct uart_8250_port *up = up_to_u8250p(port);
/*
* Normally the port is locked to synchronize UART_IER access
* against the console. However, this function is only used by
* KDB/KGDB, where it may not be possible to acquire the port
* lock because all other CPUs are quiesced. The quiescence
* should allow safe lockless usage here.
*/
guard(serial8250_rpm)(up);
/*
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
serial8250_clear_IER(up);
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
* Send the character out.
*/
serial_port_out(port, UART_TX, c);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
serial_port_out(port, UART_IER, ier);
}
#endif /* CONFIG_CONSOLE_POLL */
static void serial8250_startup_special(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
switch (port->type) {
case PORT_16C950: {
/*
* Wake up and initialize UART
*
* Synchronize UART_IER access against the console.
*/
guard(uart_port_lock_irqsave)(port);
up->acr = 0;
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
serial_port_out(port, UART_IER, 0);
serial_port_out(port, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
serial_port_out(port, UART_LCR, 0);
break;
}
case PORT_DA830:
/*
* Reset the port
*
* Synchronize UART_IER access against the console.
*/
scoped_guard(uart_port_lock_irqsave, port) {
serial_port_out(port, UART_IER, 0);
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
}
mdelay(10);
/* Enable Tx, Rx and free run mode */
serial_port_out(port, UART_DA830_PWREMU_MGMT,
UART_DA830_PWREMU_MGMT_UTRST |
UART_DA830_PWREMU_MGMT_URRST |
UART_DA830_PWREMU_MGMT_FREE);
break;
case PORT_RSA:
rsa_enable(up);
break;
}
}
static void serial8250_set_TRG_levels(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
switch (port->type) {
/* For a XR16C850, we need to set the trigger levels */
case PORT_16850: {
u8 fctr;
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
fctr |= UART_FCTR_TRGD;
serial_port_out(port, UART_FCTR, fctr | UART_FCTR_RX);
serial_port_out(port, UART_TRG, UART_TRG_96);
serial_port_out(port, UART_FCTR, fctr | UART_FCTR_TX);
serial_port_out(port, UART_TRG, UART_TRG_96);
serial_port_out(port, UART_LCR, 0);
break;
}
/* For the Altera 16550 variants, set TX threshold trigger level. */
case PORT_ALTR_16550_F32:
case PORT_ALTR_16550_F64:
case PORT_ALTR_16550_F128:
if (port->fifosize <= 1)
return;
/* Bounds checking of TX threshold (valid 0 to fifosize-2) */
if (up->tx_loadsz < 2 || up->tx_loadsz > port->fifosize) {
dev_err(port->dev, "TX FIFO Threshold errors, skipping\n");
return;
}
serial_port_out(port, UART_ALTR_AFR, UART_ALTR_EN_TXFIFO_LW);
serial_port_out(port, UART_ALTR_TX_LOW, port->fifosize - up->tx_loadsz);
port->handle_irq = serial8250_tx_threshold_handle_irq;
break;
}
}
static void serial8250_THRE_test(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
bool iir_noint1, iir_noint2;
if (!port->irq)
return;
if (up->port.flags & UPF_NO_THRE_TEST)
return;
if (port->irqflags & IRQF_SHARED)
disable_irq_nosync(port->irq);
/*
* Test for UARTs that do not reassert THRE when the transmitter is idle and the interrupt
* has already been cleared. Real 16550s should always reassert this interrupt whenever the
* transmitter is idle and the interrupt is enabled. Delays are necessary to allow register
* changes to become visible.
*
* Synchronize UART_IER access against the console.
*/
scoped_guard(uart_port_lock_irqsave, port) {
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
udelay(1); /* allow THRE to set */
iir_noint1 = serial_port_in(port, UART_IIR) & UART_IIR_NO_INT;
serial_port_out(port, UART_IER, 0);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
udelay(1); /* allow a working UART time to re-assert THRE */
iir_noint2 = serial_port_in(port, UART_IIR) & UART_IIR_NO_INT;
serial_port_out(port, UART_IER, 0);
}
if (port->irqflags & IRQF_SHARED)
enable_irq(port->irq);
/*
* If the interrupt is not reasserted, or we otherwise don't trust the iir, setup a timer to
* kick the UART on a regular basis.
*/
if ((!iir_noint1 && iir_noint2) || up->port.flags & UPF_BUG_THRE)
up->bugs |= UART_BUG_THRE;
}
static void serial8250_init_mctrl(struct uart_port *port)
{
if (port->flags & UPF_FOURPORT) {
if (!port->irq)
port->mctrl |= TIOCM_OUT1;
} else {
/* Most PC uarts need OUT2 raised to enable interrupts. */
if (port->irq)
port->mctrl |= TIOCM_OUT2;
}
serial8250_set_mctrl(port, port->mctrl);
}
static void serial8250_iir_txen_test(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
bool lsr_temt, iir_noint;
if (port->quirks & UPQ_NO_TXEN_TEST)
return;
/* Do a quick test to see if we receive an interrupt when we enable the TX irq. */
serial_port_out(port, UART_IER, UART_IER_THRI);
lsr_temt = serial_port_in(port, UART_LSR) & UART_LSR_TEMT;
iir_noint = serial_port_in(port, UART_IIR) & UART_IIR_NO_INT;
serial_port_out(port, UART_IER, 0);
/*
* Serial over Lan (SoL) hack:
* Intel 8257x Gigabit ethernet chips have a 16550 emulation, to be used for Serial Over
* Lan. Those chips take a longer time than a normal serial device to signalize that a
* transmission data was queued. Due to that, the above test generally fails. One solution
* would be to delay the reading of iir. However, this is not reliable, since the timeout is
* variable. So, in case of UPQ_NO_TXEN_TEST, let's just don't test if we receive TX irq.
* This way, we'll never enable UART_BUG_TXEN.
*/
if (lsr_temt && iir_noint) {
if (!(up->bugs & UART_BUG_TXEN)) {
up->bugs |= UART_BUG_TXEN;
dev_dbg(port->dev, "enabling bad tx status workarounds\n");
}
return;
}
/* FIXME: why is this needed? */
up->bugs &= ~UART_BUG_TXEN;
}
static void serial8250_initialize(struct uart_port *port)
{
guard(uart_port_lock_irqsave)(port);
serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
serial8250_init_mctrl(port);
serial8250_iir_txen_test(port);
}
int serial8250_do_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
int retval;
if (!port->fifosize)
port->fifosize = uart_config[port->type].fifo_size;
if (!up->tx_loadsz)
up->tx_loadsz = uart_config[port->type].tx_loadsz;
if (!up->capabilities)
up->capabilities = uart_config[port->type].flags;
up->mcr = 0;
if (port->iotype != up->cur_iotype)
set_io_from_upio(port);
guard(serial8250_rpm)(up);
serial8250_startup_special(port);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial8250_clear_fifos(up);
serial8250_clear_interrupts(port);
/*
* At this point, there's no way the LSR could still be 0xff;
* if it is, then bail out, because there's likely no UART
* here.
*/
if (!(port->flags & UPF_BUGGY_UART) &&
(serial_port_in(port, UART_LSR) == 0xff)) {
dev_info_ratelimited(port->dev, "LSR safety check engaged!\n");
return -ENODEV;
}
serial8250_set_TRG_levels(port);
/* Check if we need to have shared IRQs */
if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
up->port.irqflags |= IRQF_SHARED;
retval = up->ops->setup_irq(up);
if (retval)
return retval;
serial8250_THRE_test(port);
up->ops->setup_timer(up);
serial8250_initialize(port);
/*
* Clear the interrupt registers again for luck, and clear the
* saved flags to avoid getting false values from polling
* routines or the previous session.
*/
serial8250_clear_interrupts(port);
up->lsr_saved_flags = 0;
up->msr_saved_flags = 0;
/*
* Request DMA channels for both RX and TX.
*/
if (up->dma) {
const char *msg = NULL;
if (uart_console(port))
msg = "forbid DMA for kernel console";
else if (serial8250_request_dma(up))
msg = "failed to request DMA";
if (msg) {
dev_warn_ratelimited(port->dev, "%s\n", msg);
up->dma = NULL;
}
}
/*
* Set the IER shadow for rx interrupts but defer actual interrupt
* enable until after the FIFOs are enabled; otherwise, an already-
* active sender can swamp the interrupt handler with "too much work".
*/
up->ier = UART_IER_RLSI | UART_IER_RDI;
if (port->flags & UPF_FOURPORT) {
unsigned int icp;
/*
* Enable interrupts on the AST Fourport board
*/
icp = (port->iobase & 0xfe0) | 0x01f;
outb_p(0x80, icp);
inb_p(icp);
}
return 0;
}
EXPORT_SYMBOL_GPL(serial8250_do_startup);
static int serial8250_startup(struct uart_port *port)
{
if (port->startup)
return port->startup(port);
return serial8250_do_startup(port);
}
void serial8250_do_shutdown(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_rpm_get(up);
/*
* Disable interrupts from this port
*
* Synchronize UART_IER access against the console.
*/
scoped_guard(uart_port_lock_irqsave, port) {
up->ier = 0;
serial_port_out(port, UART_IER, 0);
}
synchronize_irq(port->irq);
if (up->dma)
serial8250_release_dma(up);
scoped_guard(uart_port_lock_irqsave, port) {
if (port->flags & UPF_FOURPORT) {
/* reset interrupts on the AST Fourport board */
inb((port->iobase & 0xfe0) | 0x1f);
port->mctrl |= TIOCM_OUT1;
} else
port->mctrl &= ~TIOCM_OUT2;
serial8250_set_mctrl(port, port->mctrl);
}
/*
* Disable break condition and FIFOs
*/
serial_port_out(port, UART_LCR,
serial_port_in(port, UART_LCR) & ~UART_LCR_SBC);
serial8250_clear_fifos(up);
rsa_disable(up);
/*
* Read data port to reset things, and then unlink from
* the IRQ chain.
*/
serial_port_in(port, UART_RX);
serial8250_rpm_put(up);
up->ops->release_irq(up);
}
EXPORT_SYMBOL_GPL(serial8250_do_shutdown);
static void serial8250_shutdown(struct uart_port *port)
{
if (port->shutdown)
port->shutdown(port);
else
serial8250_do_shutdown(port);
}
static void serial8250_flush_buffer(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
if (up->dma)
serial8250_tx_dma_flush(up);
}
static unsigned int serial8250_do_get_divisor(struct uart_port *port, unsigned int baud)
{
upf_t magic_multiplier = port->flags & UPF_MAGIC_MULTIPLIER;
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int quot;
/*
* Handle magic divisors for baud rates above baud_base on SMSC
* Super I/O chips. We clamp custom rates from clk/6 and clk/12
* up to clk/4 (0x8001) and clk/8 (0x8002) respectively. These
* magic divisors actually reprogram the baud rate generator's
* reference clock derived from chips's 14.318MHz clock input.
*
* Documentation claims that with these magic divisors the base
* frequencies of 7.3728MHz and 3.6864MHz are used respectively
* for the extra baud rates of 460800bps and 230400bps rather
* than the usual base frequency of 1.8462MHz. However empirical
* evidence contradicts that.
*
* Instead bit 7 of the DLM register (bit 15 of the divisor) is
* effectively used as a clock prescaler selection bit for the
* base frequency of 7.3728MHz, always used. If set to 0, then
* the base frequency is divided by 4 for use by the Baud Rate
* Generator, for the usual arrangement where the value of 1 of
* the divisor produces the baud rate of 115200bps. Conversely,
* if set to 1 and high-speed operation has been enabled with the
* Serial Port Mode Register in the Device Configuration Space,
* then the base frequency is supplied directly to the Baud Rate
* Generator, so for the divisor values of 0x8001, 0x8002, 0x8003,
* 0x8004, etc. the respective baud rates produced are 460800bps,
* 230400bps, 153600bps, 115200bps, etc.
*
* In all cases only low 15 bits of the divisor are used to divide
* the baud base and therefore 32767 is the maximum divisor value
* possible, even though documentation says that the programmable
* Baud Rate Generator is capable of dividing the internal PLL
* clock by any divisor from 1 to 65535.
*/
if (magic_multiplier && baud >= port->uartclk / 6)
quot = 0x8001;
else if (magic_multiplier && baud >= port->uartclk / 12)
quot = 0x8002;
else
quot = uart_get_divisor(port, baud);
/*
* Oxford Semi 952 rev B workaround
*/
if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
quot++;
return quot;
}
static unsigned int serial8250_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
{
if (port->get_divisor)
return port->get_divisor(port, baud, frac);
return serial8250_do_get_divisor(port, baud);
}
static unsigned char serial8250_compute_lcr(struct uart_8250_port *up, tcflag_t c_cflag)
{
u8 lcr = UART_LCR_WLEN(tty_get_char_size(c_cflag));
if (c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
if (c_cflag & PARENB)
lcr |= UART_LCR_PARITY;
if (!(c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
if (c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
return lcr;
}
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
unsigned int quot)
{
struct uart_8250_port *up = up_to_u8250p(port);
/* Workaround to enable 115200 baud on OMAP1510 internal ports */
if (is_omap1510_8250(up)) {
if (baud == 115200) {
quot = 1;
serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
} else
serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
}
/*
* For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
* otherwise just set DLAB
*/
if (up->capabilities & UART_NATSEMI)
serial_port_out(port, UART_LCR, 0xe0);
else
serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
serial_dl_write(up, quot);
}
EXPORT_SYMBOL_GPL(serial8250_do_set_divisor);
static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
unsigned int quot, unsigned int quot_frac)
{
if (port->set_divisor)
port->set_divisor(port, baud, quot, quot_frac);
else
serial8250_do_set_divisor(port, baud, quot);
}
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned int tolerance = port->uartclk / 100;
unsigned int min;
unsigned int max;
/*
* Handle magic divisors for baud rates above baud_base on SMSC
* Super I/O chips. Enable custom rates of clk/4 and clk/8, but
* disable divisor values beyond 32767, which are unavailable.
*/
if (port->flags & UPF_MAGIC_MULTIPLIER) {
min = port->uartclk / 16 / UART_DIV_MAX >> 1;
max = (port->uartclk + tolerance) / 4;
} else {
min = port->uartclk / 16 / UART_DIV_MAX;
max = (port->uartclk + tolerance) / 16;
}
/*
* Ask the core to calculate the divisor for us.
* Allow 1% tolerance at the upper limit so uart clks marginally
* slower than nominal still match standard baud rates without
* causing transmission errors.
*/
return uart_get_baud_rate(port, termios, old, min, max);
}
/*
* Note in order to avoid the tty port mutex deadlock don't use the next method
* within the uart port callbacks. Primarily it's supposed to be utilized to
* handle a sudden reference clock rate change.
*/
void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
{
struct tty_port *tport = &port->state->port;
scoped_guard(tty_port_tty, tport) {
struct tty_struct *tty = scoped_tty();
guard(rwsem_write)(&tty->termios_rwsem);
guard(mutex)(&tport->mutex);
if (port->uartclk == uartclk)
return;
port->uartclk = uartclk;
if (!tty_port_initialized(tport))
return;
serial8250_do_set_termios(port, &tty->termios, NULL);
return;
}
guard(mutex)(&tport->mutex);
port->uartclk = uartclk;
}
EXPORT_SYMBOL_GPL(serial8250_update_uartclk);
static void serial8250_set_mini(struct uart_port *port, struct ktermios *termios)
{
struct uart_8250_port *up = up_to_u8250p(port);
if (!(up->capabilities & UART_CAP_MINI))
return;
termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CMSPAR);
tcflag_t csize = termios->c_cflag & CSIZE;
if (csize == CS5 || csize == CS6) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS7;
}
}
static void serial8250_set_trigger_for_slow_speed(struct uart_port *port, struct ktermios *termios,
unsigned int baud)
{
struct uart_8250_port *up = up_to_u8250p(port);
if (!(up->capabilities & UART_CAP_FIFO))
return;
if (port->fifosize <= 1)
return;
if (baud >= 2400)
return;
if (up->dma)
return;
up->fcr &= ~UART_FCR_TRIGGER_MASK;
up->fcr |= UART_FCR_TRIGGER_1;
}
/*
* MCR-based auto flow control. When AFE is enabled, RTS will be deasserted when the receive FIFO
* contains more characters than the trigger, or the MCR RTS bit is cleared.
*/
static void serial8250_set_afe(struct uart_port *port, struct ktermios *termios)
{
struct uart_8250_port *up = up_to_u8250p(port);
if (!(up->capabilities & UART_CAP_AFE))
return;
up->mcr &= ~UART_MCR_AFE;
if (termios->c_cflag & CRTSCTS)
up->mcr |= UART_MCR_AFE;
}
static void serial8250_set_errors_and_ignores(struct uart_port *port, struct ktermios *termios)
{
/*
* Specify which conditions may be considered for error handling and the ignoring of
* characters. The actual ignoring of characters only occurs if the bit is set in
* @ignore_status_mask as well.
*/
port->read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART_LSR_BI;
/* Characters to ignore */
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators, ignore overruns too (for real raw
* support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART_LSR_OE;
}
/* ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= UART_LSR_DR;
}
static void serial8250_set_ier(struct uart_port *port, struct ktermios *termios)
{
struct uart_8250_port *up = up_to_u8250p(port);
/* CTS flow control flag and modem status interrupts */
up->ier &= ~UART_IER_MSI;
if (!(up->bugs & UART_BUG_NOMSR) && UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
if (up->capabilities & UART_CAP_UUE)
up->ier |= UART_IER_UUE;
if (up->capabilities & UART_CAP_RTOIE)
up->ier |= UART_IER_RTOIE;
serial_port_out(port, UART_IER, up->ier);
}
static void serial8250_set_efr(struct uart_port *port, struct ktermios *termios)
{
struct uart_8250_port *up = up_to_u8250p(port);
u8 efr_reg = UART_EFR;
u8 efr = 0;
if (!(up->capabilities & UART_CAP_EFR))
return;
/*
* TI16C752/Startech hardware flow control. FIXME:
* - TI16C752 requires control thresholds to be set.
* - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
*/
if (termios->c_cflag & CRTSCTS)
efr |= UART_EFR_CTS;
if (port->flags & UPF_EXAR_EFR)
efr_reg = UART_XR_EFR;
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, efr_reg, efr);
}
static void serial8250_set_fcr(struct uart_port *port, struct ktermios *termios)
{
struct uart_8250_port *up = up_to_u8250p(port);
bool is_16750 = port->type == PORT_16750;
if (is_16750)
serial_port_out(port, UART_FCR, up->fcr);
/*
* LCR DLAB must be reset to enable 64-byte FIFO mode. If the FCR is written without DLAB
* set, this mode will be disabled.
*/
serial_port_out(port, UART_LCR, up->lcr);
if (is_16750)
return;
/* emulated UARTs (Lucent Venus 167x) need two steps */
if (up->fcr & UART_FCR_ENABLE_FIFO)
serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_port_out(port, UART_FCR, up->fcr);
}
void
serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int baud, quot, frac = 0;
u8 lcr;
serial8250_set_mini(port, termios);
lcr = serial8250_compute_lcr(up, termios->c_cflag);
baud = serial8250_get_baud_rate(port, termios, old);
quot = serial8250_get_divisor(port, baud, &frac);
/*
* Ok, we're now changing the port state. Do it with interrupts disabled.
*
* Synchronize UART_IER access against the console.
*/
scoped_guard(serial8250_rpm, up) {
guard(uart_port_lock_irqsave)(port);
up->lcr = lcr;
serial8250_set_trigger_for_slow_speed(port, termios, baud);
serial8250_set_afe(port, termios);
uart_update_timeout(port, termios->c_cflag, baud);
serial8250_set_errors_and_ignores(port, termios);
serial8250_set_ier(port, termios);
serial8250_set_efr(port, termios);
serial8250_set_divisor(port, baud, quot, frac);
serial8250_set_fcr(port, termios);
serial8250_set_mctrl(port, port->mctrl);
}
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
}
EXPORT_SYMBOL(serial8250_do_set_termios);
static void
serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
if (port->set_termios)
port->set_termios(port, termios, old);
else
serial8250_do_set_termios(port, termios, old);
}
void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
guard(uart_port_lock_irq)(port);
serial8250_enable_ms(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
guard(uart_port_lock_irq)(port);
serial8250_disable_ms(port);
}
}
}
EXPORT_SYMBOL_GPL(serial8250_do_set_ldisc);
static void
serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (port->set_ldisc)
port->set_ldisc(port, termios);
else
serial8250_do_set_ldisc(port, termios);
}
void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct uart_8250_port *p = up_to_u8250p(port);
serial8250_set_sleep(p, state != 0);
}
EXPORT_SYMBOL(serial8250_do_pm);
static void
serial8250_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
if (port->pm)
port->pm(port, state, oldstate);
else
serial8250_do_pm(port, state, oldstate);
}
static unsigned int serial8250_port_size(struct uart_8250_port *pt)
{
if (pt->port.mapsize)
return pt->port.mapsize;
if (is_omap1_8250(pt))
return 0x16 << pt->port.regshift;
return 8 << pt->port.regshift;
}
/*
* Resource handling.
*/
static int serial8250_request_std_resource(struct uart_8250_port *up)
{
unsigned int size = serial8250_port_size(up);
struct uart_port *port = &up->port;
switch (port->iotype) {
case UPIO_AU:
case UPIO_TSI:
case UPIO_MEM32:
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
if (!port->mapbase)
return -EINVAL;
if (!request_mem_region(port->mapbase, size, "serial"))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
release_mem_region(port->mapbase, size);
return -ENOMEM;
}
}
return 0;
case UPIO_HUB6:
case UPIO_PORT:
if (!request_region(port->iobase, size, "serial"))
return -EBUSY;
return 0;
case UPIO_UNKNOWN:
break;
}
return 0;
}
static void serial8250_release_std_resource(struct uart_8250_port *up)
{
unsigned int size = serial8250_port_size(up);
struct uart_port *port = &up->port;
switch (port->iotype) {
case UPIO_AU:
case UPIO_TSI:
case UPIO_MEM32:
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
if (!port->mapbase)
break;
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
release_mem_region(port->mapbase, size);
break;
case UPIO_HUB6:
case UPIO_PORT:
release_region(port->iobase, size);
break;
case UPIO_UNKNOWN:
break;
}
}
static void serial8250_release_port(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_release_std_resource(up);
}
static int serial8250_request_port(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
return serial8250_request_std_resource(up);
}
static int fcr_get_rxtrig_bytes(struct uart_8250_port *up)
{
const struct serial8250_config *conf_type = &uart_config[up->port.type];
unsigned char bytes;
bytes = conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(up->fcr)];
return bytes ? bytes : -EOPNOTSUPP;
}
static int bytes_to_fcr_rxtrig(struct uart_8250_port *up, unsigned char bytes)
{
const struct serial8250_config *conf_type = &uart_config[up->port.type];
int i;
if (!conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(UART_FCR_R_TRIG_00)])
return -EOPNOTSUPP;
for (i = 1; i < UART_FCR_R_TRIG_MAX_STATE; i++) {
if (bytes < conf_type->rxtrig_bytes[i])
/* Use the nearest lower value */
return (--i) << UART_FCR_R_TRIG_SHIFT;
}
return UART_FCR_R_TRIG_11;
}
static int do_get_rxtrig(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
struct uart_8250_port *up = up_to_u8250p(uport);
if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
return -EINVAL;
return fcr_get_rxtrig_bytes(up);
}
static int do_serial8250_get_rxtrig(struct tty_port *port)
{
int rxtrig_bytes;
mutex_lock(&port->mutex);
rxtrig_bytes = do_get_rxtrig(port);
mutex_unlock(&port->mutex);
return rxtrig_bytes;
}
static ssize_t rx_trig_bytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tty_port *port = dev_get_drvdata(dev);
int rxtrig_bytes;
rxtrig_bytes = do_serial8250_get_rxtrig(port);
if (rxtrig_bytes < 0)
return rxtrig_bytes;
return sysfs_emit(buf, "%d\n", rxtrig_bytes);
}
static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
struct uart_8250_port *up = up_to_u8250p(uport);
int rxtrig;
if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
return -EINVAL;
rxtrig = bytes_to_fcr_rxtrig(up, bytes);
if (rxtrig < 0)
return rxtrig;
serial8250_clear_fifos(up);
up->fcr &= ~UART_FCR_TRIGGER_MASK;
up->fcr |= (unsigned char)rxtrig;
serial_out(up, UART_FCR, up->fcr);
return 0;
}
static int do_serial8250_set_rxtrig(struct tty_port *port, unsigned char bytes)
{
int ret;
mutex_lock(&port->mutex);
ret = do_set_rxtrig(port, bytes);
mutex_unlock(&port->mutex);
return ret;
}
static ssize_t rx_trig_bytes_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct tty_port *port = dev_get_drvdata(dev);
unsigned char bytes;
int ret;
if (!count)
return -EINVAL;
ret = kstrtou8(buf, 10, &bytes);
if (ret < 0)
return ret;
ret = do_serial8250_set_rxtrig(port, bytes);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(rx_trig_bytes);
static struct attribute *serial8250_dev_attrs[] = {
&dev_attr_rx_trig_bytes.attr,
NULL
};
static struct attribute_group serial8250_dev_attr_group = {
.attrs = serial8250_dev_attrs,
};
static void register_dev_spec_attr_grp(struct uart_8250_port *up)
{
const struct serial8250_config *conf_type = &uart_config[up->port.type];
if (conf_type->rxtrig_bytes[0])
up->port.attr_group = &serial8250_dev_attr_group;
}
static void serial8250_config_port(struct uart_port *port, int flags)
{
struct uart_8250_port *up = up_to_u8250p(port);
int ret;
/*
* Find the region that we can probe for. This in turn
* tells us whether we can probe for the type of port.
*/
ret = serial8250_request_std_resource(up);
if (ret < 0)
return;
if (port->iotype != up->cur_iotype)
set_io_from_upio(port);
if (flags & UART_CONFIG_TYPE)
autoconfig(up);
/* HW bugs may trigger IRQ while IIR == NO_INT */
if (port->type == PORT_TEGRA)
up->bugs |= UART_BUG_NOMSR;
if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
autoconfig_irq(up);
if (port->type == PORT_UNKNOWN)
serial8250_release_std_resource(up);
register_dev_spec_attr_grp(up);
up->fcr = uart_config[up->port.type].fcr;
}
static int
serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (ser->irq >= irq_get_nr_irqs() || ser->irq < 0 ||
ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
ser->type == PORT_STARTECH)
return -EINVAL;
return 0;
}
static const char *serial8250_type(struct uart_port *port)
{
int type = port->type;
if (type >= ARRAY_SIZE(uart_config))
type = 0;
return uart_config[type].name;
}
static const struct uart_ops serial8250_pops = {
.tx_empty = serial8250_tx_empty,
.set_mctrl = serial8250_set_mctrl,
.get_mctrl = serial8250_get_mctrl,
.stop_tx = serial8250_stop_tx,
.start_tx = serial8250_start_tx,
.throttle = serial8250_throttle,
.unthrottle = serial8250_unthrottle,
.stop_rx = serial8250_stop_rx,
.enable_ms = serial8250_enable_ms,
.break_ctl = serial8250_break_ctl,
.startup = serial8250_startup,
.shutdown = serial8250_shutdown,
.flush_buffer = serial8250_flush_buffer,
.set_termios = serial8250_set_termios,
.set_ldisc = serial8250_set_ldisc,
.pm = serial8250_pm,
.type = serial8250_type,
.release_port = serial8250_release_port,
.request_port = serial8250_request_port,
.config_port = serial8250_config_port,
.verify_port = serial8250_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = serial8250_get_poll_char,
.poll_put_char = serial8250_put_poll_char,
#endif
};
void serial8250_init_port(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
spin_lock_init(&port->lock);
port->ctrl_id = 0;
port->pm = NULL;
port->ops = &serial8250_pops;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
up->cur_iotype = UPIO_UNKNOWN;
}
EXPORT_SYMBOL_GPL(serial8250_init_port);
void serial8250_set_defaults(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
if (up->port.flags & UPF_FIXED_TYPE) {
unsigned int type = up->port.type;
if (!up->port.fifosize)
up->port.fifosize = uart_config[type].fifo_size;
if (!up->tx_loadsz)
up->tx_loadsz = uart_config[type].tx_loadsz;
if (!up->capabilities)
up->capabilities = uart_config[type].flags;
}
set_io_from_upio(port);
/* default dma handlers */
if (up->dma) {
if (!up->dma->tx_dma)
up->dma->tx_dma = serial8250_tx_dma;
if (!up->dma->rx_dma)
up->dma->rx_dma = serial8250_rx_dma;
}
}
EXPORT_SYMBOL_GPL(serial8250_set_defaults);
#ifdef CONFIG_SERIAL_8250_CONSOLE
static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
{
serial_port_out(port, UART_TX, ch);
}
static void serial8250_console_wait_putchar(struct uart_port *port, unsigned char ch)
{
struct uart_8250_port *up = up_to_u8250p(port);
wait_for_xmitr(up, UART_LSR_THRE);
serial8250_console_putchar(port, ch);
}
/*
* Restore serial console when h/w power-off detected
*/
static void serial8250_console_restore(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
struct ktermios termios;
unsigned int baud, quot, frac = 0;
termios.c_cflag = port->cons->cflag;
termios.c_ispeed = port->cons->ispeed;
termios.c_ospeed = port->cons->ospeed;
if (port->state->port.tty && termios.c_cflag == 0) {
termios.c_cflag = port->state->port.tty->termios.c_cflag;
termios.c_ispeed = port->state->port.tty->termios.c_ispeed;
termios.c_ospeed = port->state->port.tty->termios.c_ospeed;
}
baud = serial8250_get_baud_rate(port, &termios, NULL);
quot = serial8250_get_divisor(port, baud, &frac);
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++) { if (wait_for_lsr(up, UART_LSR_THRE))
return;
}
}
/*
* Print a string to the serial port using the device FIFO
*
* It sends fifosize bytes and then waits for the fifo
* to get empty.
*/
static void serial8250_console_fifo_write(struct uart_8250_port *up,
const char *s, unsigned int count)
{
const char *end = s + count;
unsigned int fifosize = up->tx_loadsz;
struct uart_port *port = &up->port;
unsigned int tx_count = 0;
bool cr_sent = false;
unsigned int i;
while (s != end) {
/* Allow timeout for each byte of a possibly full FIFO */
fifo_wait_for_lsr(up, fifosize); for (i = 0; i < fifosize && s != end; ++i) { if (*s == '\n' && !cr_sent) {
serial8250_console_putchar(port, '\r');
cr_sent = true;
} else {
serial8250_console_putchar(port, *s++);
cr_sent = false;
}
}
tx_count = i;
}
/*
* Allow timeout for each byte written since the caller will only wait
* for UART_LSR_BOTH_EMPTY using the timeout of a single character
*/
fifo_wait_for_lsr(up, tx_count);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*
* Doing runtime PM is really a bad idea for the kernel console.
* Thus, we assume the function is called when device is powered up.
*/
void serial8250_console_write(struct uart_8250_port *up, const char *s,
unsigned int count)
{
struct uart_8250_em485 *em485 = up->em485;
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier, use_fifo;
int locked = 1;
touch_nmi_watchdog();
if (oops_in_progress) locked = uart_port_trylock_irqsave(port, &flags);
else
uart_port_lock_irqsave(port, &flags);
/*
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
serial8250_clear_IER(up);
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { serial8250_console_restore(up); up->canary = 0;
}
if (em485) { if (em485->tx_stopped)
up->rs485_start_tx(up, false);
mdelay(port->rs485.delay_rts_before_send);
}
use_fifo = (up->capabilities & UART_CAP_FIFO) &&
/*
* BCM283x requires to check the fifo
* after each byte.
*/
!(up->capabilities & UART_CAP_MINI) &&
/*
* tx_loadsz contains the transmit fifo size
*/
up->tx_loadsz > 1 &&
(up->fcr & UART_FCR_ENABLE_FIFO) &&
port->state && test_bit(TTY_PORT_INITIALIZED, &port->state->port.iflags) &&
/*
* After we put a data in the fifo, the controller will send
* it regardless of the CTS state. Therefore, only use fifo
* if we don't use control flow.
*/
!(up->port.flags & UPF_CONS_FLOW); if (likely(use_fifo)) serial8250_console_fifo_write(up, s, count);
else
uart_console_write(port, s, count, serial8250_console_wait_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
if (em485) {
mdelay(port->rs485.delay_rts_after_send); if (em485->tx_stopped)
up->rs485_stop_tx(up, false);
}
serial_port_out(port, UART_IER, ier);
/*
* The receive handling will happen properly because the
* receive ready bit will still be set; it is not cleared
* on read. However, modem control will not, we must
* call it if we have saved something in the saved flags
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
serial8250_modem_status(up); if (locked) uart_port_unlock_irqrestore(port, flags);}
static unsigned int probe_baud(struct uart_port *port)
{
unsigned char lcr, dll, dlm;
unsigned int quot;
lcr = serial_port_in(port, UART_LCR);
serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
dll = serial_port_in(port, UART_DLL);
dlm = serial_port_in(port, UART_DLM);
serial_port_out(port, UART_LCR, lcr);
quot = (dlm << 8) | dll;
return (port->uartclk / 16) / quot;
}
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
if (!port->iobase && !port->membase)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else if (probe)
baud = probe_baud(port);
ret = uart_set_options(port, port->cons, baud, parity, bits, flow);
if (ret)
return ret;
if (port->dev)
pm_runtime_get_sync(port->dev);
return 0;
}
int serial8250_console_exit(struct uart_port *port)
{
if (port->dev)
pm_runtime_put_sync(port->dev);
return 0;
}
#endif /* CONFIG_SERIAL_8250_CONSOLE */
MODULE_DESCRIPTION("Base port operations for 8250/16550-type serial ports");
MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/printk.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Modified to make sys_syslog() more flexible: added commands to
* return the last 4k of kernel messages, regardless of whether
* they've been read or not. Added option to suppress kernel printk's
* to the console. Added hook for sending the console messages
* elsewhere, in preparation for a serial line console (someday).
* Ted Ts'o, 2/11/93.
* Modified for sysctl support, 1/8/97, Chris Horn.
* Fixed SMP synchronization, 08/08/99, Manfred Spraul
* manfred@colorfullife.com
* Rewrote bits to get rid of console_lock
* 01Mar01 Andrew Morton
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/syscore_ops.h>
#include <linux/vmcore_info.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
#include <linux/syslog.h>
#include <linux/cpu.h>
#include <linux/rculist.h>
#include <linux/poll.h>
#include <linux/irq_work.h>
#include <linux/ctype.h>
#include <linux/uio.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/panic.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
#include <trace/events/initcall.h>
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
#include "printk_ringbuffer.h"
#include "console_cmdline.h"
#include "braille.h"
#include "internal.h"
int console_printk[4] = {
CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
};
EXPORT_SYMBOL_GPL(console_printk);
atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
EXPORT_SYMBOL(ignore_console_lock_warning);
EXPORT_TRACEPOINT_SYMBOL_GPL(console);
/*
* Low level drivers may need that to know if they can schedule in
* their unblank() callback or not. So let's export it.
*/
int oops_in_progress;
EXPORT_SYMBOL(oops_in_progress);
/*
* console_mutex protects console_list updates and console->flags updates.
* The flags are synchronized only for consoles that are registered, i.e.
* accessible via the console list.
*/
static DEFINE_MUTEX(console_mutex);
/*
* console_sem protects updates to console->seq
* and also provides serialization for console printing.
*/
static DEFINE_SEMAPHORE(console_sem, 1);
HLIST_HEAD(console_list);
EXPORT_SYMBOL_GPL(console_list);
DEFINE_STATIC_SRCU(console_srcu);
/*
* System may need to suppress printk message under certain
* circumstances, like after kernel panic happens.
*/
int __read_mostly suppress_printk;
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_lock_dep_map = {
.name = "console_lock"
};
void lockdep_assert_console_list_lock_held(void)
{
lockdep_assert_held(&console_mutex);
}
EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
bool console_srcu_read_lock_is_held(void)
{
return srcu_read_lock_held(&console_srcu);
}
EXPORT_SYMBOL(console_srcu_read_lock_is_held);
#endif
enum devkmsg_log_bits {
__DEVKMSG_LOG_BIT_ON = 0,
__DEVKMSG_LOG_BIT_OFF,
__DEVKMSG_LOG_BIT_LOCK,
};
enum devkmsg_log_masks {
DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
};
/* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
#define DEVKMSG_LOG_MASK_DEFAULT 0
static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
static int __control_devkmsg(char *str)
{
size_t len;
if (!str)
return -EINVAL;
len = str_has_prefix(str, "on");
if (len) {
devkmsg_log = DEVKMSG_LOG_MASK_ON;
return len;
}
len = str_has_prefix(str, "off");
if (len) {
devkmsg_log = DEVKMSG_LOG_MASK_OFF;
return len;
}
len = str_has_prefix(str, "ratelimit");
if (len) {
devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
return len;
}
return -EINVAL;
}
static int __init control_devkmsg(char *str)
{
if (__control_devkmsg(str) < 0) {
pr_warn("printk.devkmsg: bad option string '%s'\n", str);
return 1;
}
/*
* Set sysctl string accordingly:
*/
if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
strscpy(devkmsg_log_str, "on");
else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
strscpy(devkmsg_log_str, "off");
/* else "ratelimit" which is set by default. */
/*
* Sysctl cannot change it anymore. The kernel command line setting of
* this parameter is to force the setting to be permanent throughout the
* runtime of the system. This is a precation measure against userspace
* trying to be a smarta** and attempting to change it up on us.
*/
devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
return 1;
}
__setup("printk.devkmsg=", control_devkmsg);
char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
char old_str[DEVKMSG_STR_MAX_SIZE];
unsigned int old;
int err;
if (write) {
if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
return -EINVAL;
old = devkmsg_log;
strscpy(old_str, devkmsg_log_str);
}
err = proc_dostring(table, write, buffer, lenp, ppos);
if (err)
return err;
if (write) {
err = __control_devkmsg(devkmsg_log_str);
/*
* Do not accept an unknown string OR a known string with
* trailing crap...
*/
if (err < 0 || (err + 1 != *lenp)) {
/* ... and restore old setting. */
devkmsg_log = old;
strscpy(devkmsg_log_str, old_str);
return -EINVAL;
}
}
return 0;
}
#endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
/**
* console_list_lock - Lock the console list
*
* For console list or console->flags updates
*/
void console_list_lock(void)
{
/*
* In unregister_console() and console_force_preferred_locked(),
* synchronize_srcu() is called with the console_list_lock held.
* Therefore it is not allowed that the console_list_lock is taken
* with the srcu_lock held.
*
* Detecting if this context is really in the read-side critical
* section is only possible if the appropriate debug options are
* enabled.
*/
WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
srcu_read_lock_held(&console_srcu));
mutex_lock(&console_mutex);
}
EXPORT_SYMBOL(console_list_lock);
/**
* console_list_unlock - Unlock the console list
*
* Counterpart to console_list_lock()
*/
void console_list_unlock(void)
{
mutex_unlock(&console_mutex);
}
EXPORT_SYMBOL(console_list_unlock);
/**
* console_srcu_read_lock - Register a new reader for the
* SRCU-protected console list
*
* Use for_each_console_srcu() to iterate the console list
*
* Context: Any context.
* Return: A cookie to pass to console_srcu_read_unlock().
*/
int console_srcu_read_lock(void)
__acquires(&console_srcu)
{
return srcu_read_lock_nmisafe(&console_srcu);
}
EXPORT_SYMBOL(console_srcu_read_lock);
/**
* console_srcu_read_unlock - Unregister an old reader from
* the SRCU-protected console list
* @cookie: cookie returned from console_srcu_read_lock()
*
* Counterpart to console_srcu_read_lock()
*/
void console_srcu_read_unlock(int cookie)
__releases(&console_srcu)
{
srcu_read_unlock_nmisafe(&console_srcu, cookie);
}
EXPORT_SYMBOL(console_srcu_read_unlock);
/*
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* macros instead of functions so that _RET_IP_ contains useful information.
*/
#define down_console_sem() do { \
down(&console_sem);\
mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
} while (0)
static int __down_trylock_console_sem(unsigned long ip)
{
int lock_failed;
unsigned long flags;
/*
* Here and in __up_console_sem() we need to be in safe mode,
* because spindump/WARN/etc from under console ->lock will
* deadlock in printk()->down_trylock_console_sem() otherwise.
*/
printk_safe_enter_irqsave(flags);
lock_failed = down_trylock(&console_sem);
printk_safe_exit_irqrestore(flags);
if (lock_failed)
return 1;
mutex_acquire(&console_lock_dep_map, 0, 1, ip);
return 0;}
#define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
static void __up_console_sem(unsigned long ip)
{
unsigned long flags;
mutex_release(&console_lock_dep_map, ip);
printk_safe_enter_irqsave(flags);
up(&console_sem);
printk_safe_exit_irqrestore(flags);
}
#define up_console_sem() __up_console_sem(_RET_IP_)
/*
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
* definitely not the perfect debug tool (we don't know if _WE_
* hold it and are racing, but it helps tracking those weird code
* paths in the console code where we end up in places I want
* locked without the console semaphore held).
*/
static int console_locked;
/*
* Array of consoles built from command line options (console=)
*/
#define MAX_CMDLINECONSOLES 8
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
static int preferred_console = -1;
int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline);
/* Flag: console code may call schedule() */
static int console_may_schedule;
enum con_msg_format_flags {
MSG_FORMAT_DEFAULT = 0,
MSG_FORMAT_SYSLOG = (1 << 0),
};
static int console_msg_format = MSG_FORMAT_DEFAULT;
/*
* The printk log buffer consists of a sequenced collection of records, each
* containing variable length message text. Every record also contains its
* own meta-data (@info).
*
* Every record meta-data carries the timestamp in microseconds, as well as
* the standard userspace syslog level and syslog facility. The usual kernel
* messages use LOG_KERN; userspace-injected messages always carry a matching
* syslog facility, by default LOG_USER. The origin of every message can be
* reliably determined that way.
*
* The human readable log message of a record is available in @text, the
* length of the message text in @text_len. The stored message is not
* terminated.
*
* Optionally, a record can carry a dictionary of properties (key/value
* pairs), to provide userspace with a machine-readable message context.
*
* Examples for well-defined, commonly used property names are:
* DEVICE=b12:8 device identifier
* b12:8 block dev_t
* c127:3 char dev_t
* n8 netdev ifindex
* +sound:card0 subsystem:devname
* SUBSYSTEM=pci driver-core subsystem name
*
* Valid characters in property names are [a-zA-Z0-9.-_]. Property names
* and values are terminated by a '\0' character.
*
* Example of record values:
* record.text_buf = "it's a line" (unterminated)
* record.info.seq = 56
* record.info.ts_nsec = 36863
* record.info.text_len = 11
* record.info.facility = 0 (LOG_KERN)
* record.info.flags = 0
* record.info.level = 3 (LOG_ERR)
* record.info.caller_id = 299 (task 299)
* record.info.dev_info.subsystem = "pci" (terminated)
* record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
*
* The 'struct printk_info' buffer must never be directly exported to
* userspace, it is a kernel-private implementation detail that might
* need to be changed in the future, when the requirements change.
*
* /dev/kmsg exports the structured data in the following line format:
* "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
*
* Users of the export format should ignore possible additional values
* separated by ',', and find the message after the ';' character.
*
* The optional key/value pairs are attached as continuation lines starting
* with a space character and terminated by a newline. All possible
* non-prinatable characters are escaped in the "\xff" notation.
*/
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static DEFINE_MUTEX(syslog_lock);
/*
* Specifies if a legacy console is registered. If legacy consoles are
* present, it is necessary to perform the console lock/unlock dance
* whenever console flushing should occur.
*/
bool have_legacy_console;
/*
* Specifies if an nbcon console is registered. If nbcon consoles are present,
* synchronous printing of legacy consoles will not occur during panic until
* the backtrace has been stored to the ringbuffer.
*/
bool have_nbcon_console;
/*
* Specifies if a boot console is registered. If boot consoles are present,
* nbcon consoles cannot print simultaneously and must be synchronized by
* the console lock. This is because boot consoles and nbcon consoles may
* have mapped the same hardware.
*/
bool have_boot_console;
/* See printk_legacy_allow_panic_sync() for details. */
bool legacy_allow_panic_sync;
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static size_t syslog_partial;
static bool syslog_time;
/* True when _all_ printer threads are available for printing. */
bool printk_kthreads_running;
struct latched_seq {
seqcount_latch_t latch;
u64 val[2];
};
/*
* The next printk record to read after the last 'clear' command. There are
* two copies (updated with seqcount_latch) so that reads can locklessly
* access a valid value. Writers are synchronized by @syslog_lock.
*/
static struct latched_seq clear_seq = {
.latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
.val[0] = 0,
.val[1] = 0,
};
#define LOG_LEVEL(v) ((v) & 0x07)
#define LOG_FACILITY(v) ((v) >> 3 & 0xff)
/* record buffer */
#define LOG_ALIGN __alignof__(unsigned long)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
#define LOG_BUF_LEN_MAX ((u32)1 << 31)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
/*
* Define the average message size. This only affects the number of
* descriptors that will be available. Underestimating is better than
* overestimating (too many available descriptors is better than not enough).
*/
#define PRB_AVGBITS 5 /* 32 character average length */
#if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
#error CONFIG_LOG_BUF_SHIFT value too small.
#endif
_DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
PRB_AVGBITS, &__log_buf[0]);
static struct printk_ringbuffer printk_rb_dynamic;
struct printk_ringbuffer *prb = &printk_rb_static;
/*
* We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
* per_cpu_areas are initialised. This variable is set to true when
* it's safe to access per-CPU data.
*/
static bool __printk_percpu_data_ready __ro_after_init;
bool printk_percpu_data_ready(void)
{ return __printk_percpu_data_ready;
}
/* Must be called under syslog_lock. */
static void latched_seq_write(struct latched_seq *ls, u64 val)
{
write_seqcount_latch_begin(&ls->latch);
ls->val[0] = val;
write_seqcount_latch(&ls->latch);
ls->val[1] = val;
write_seqcount_latch_end(&ls->latch);
}
/* Can be called from any context. */
static u64 latched_seq_read_nolock(struct latched_seq *ls)
{
unsigned int seq;
unsigned int idx;
u64 val;
do {
seq = read_seqcount_latch(&ls->latch);
idx = seq & 0x1;
val = ls->val[idx];
} while (read_seqcount_latch_retry(&ls->latch, seq));
return val;
}
/* Return log buffer address */
char *log_buf_addr_get(void)
{
return log_buf;
}
/* Return log buffer size */
u32 log_buf_len_get(void)
{
return log_buf_len;
}
/*
* Define how much of the log buffer we could take at maximum. The value
* must be greater than two. Note that only half of the buffer is available
* when the index points to the middle.
*/
#define MAX_LOG_TAKE_PART 4
static const char trunc_msg[] = "<truncated>";
static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
{
/*
* The message should not take the whole buffer. Otherwise, it might
* get removed too soon.
*/
u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
if (*text_len > max_text_len)
*text_len = max_text_len;
/* enable the warning message (if there is room) */
*trunc_msg_len = strlen(trunc_msg); if (*text_len >= *trunc_msg_len)
*text_len -= *trunc_msg_len;
else
*trunc_msg_len = 0;
}
int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
static int syslog_action_restricted(int type)
{
if (dmesg_restrict)
return 1;
/*
* Unless restricted, we allow "read all" and "get buffer size"
* for everybody.
*/
return type != SYSLOG_ACTION_READ_ALL &&
type != SYSLOG_ACTION_SIZE_BUFFER;
}
static int check_syslog_permissions(int type, int source)
{
/*
* If this is from /proc/kmsg and we've already opened it, then we've
* already done the capabilities checks at open time.
*/
if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
goto ok;
if (syslog_action_restricted(type)) {
if (capable(CAP_SYSLOG))
goto ok;
return -EPERM;
}
ok:
return security_syslog(type);
}
static void append_char(char **pp, char *e, char c)
{
if (*pp < e)
*(*pp)++ = c;
}
static ssize_t info_print_ext_header(char *buf, size_t size,
struct printk_info *info)
{
u64 ts_usec = info->ts_nsec;
char caller[20];
#ifdef CONFIG_PRINTK_CALLER
u32 id = info->caller_id;
snprintf(caller, sizeof(caller), ",caller=%c%u",
id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
#else
caller[0] = '\0';
#endif
do_div(ts_usec, 1000);
return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
(info->facility << 3) | info->level, info->seq,
ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
}
static ssize_t msg_add_ext_text(char *buf, size_t size,
const char *text, size_t text_len,
unsigned char endc)
{
char *p = buf, *e = buf + size;
size_t i;
/* escape non-printable characters */
for (i = 0; i < text_len; i++) {
unsigned char c = text[i];
if (c < ' ' || c >= 127 || c == '\\')
p += scnprintf(p, e - p, "\\x%02x", c);
else
append_char(&p, e, c);
}
append_char(&p, e, endc);
return p - buf;
}
static ssize_t msg_add_dict_text(char *buf, size_t size,
const char *key, const char *val)
{
size_t val_len = strlen(val);
ssize_t len;
if (!val_len)
return 0;
len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
return len;
}
static ssize_t msg_print_ext_body(char *buf, size_t size,
char *text, size_t text_len,
struct dev_printk_info *dev_info)
{
ssize_t len;
len = msg_add_ext_text(buf, size, text, text_len, '\n');
if (!dev_info) goto out;
len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
dev_info->subsystem);
len += msg_add_dict_text(buf + len, size - len, "DEVICE",
dev_info->device);
out:
return len;
}
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
atomic64_t seq;
struct ratelimit_state rs;
struct mutex lock;
struct printk_buffers pbufs;
};
static __printf(3, 4) __cold
int devkmsg_emit(int facility, int level, const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = vprintk_emit(facility, level, NULL, fmt, args);
va_end(args);
return r;
}
static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
{
char *buf, *line;
int level = default_message_loglevel;
int facility = 1; /* LOG_USER */
struct file *file = iocb->ki_filp;
struct devkmsg_user *user = file->private_data;
size_t len = iov_iter_count(from);
ssize_t ret = len;
if (len > PRINTKRB_RECORD_MAX)
return -EINVAL;
/* Ignore when user logging is disabled. */
if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
return len;
/* Ratelimit when not explicitly enabled. */
if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
if (!___ratelimit(&user->rs, current->comm))
return ret;
}
buf = kmalloc(len+1, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
buf[len] = '\0';
if (!copy_from_iter_full(buf, len, from)) {
kfree(buf);
return -EFAULT;
}
/*
* Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
* the decimal value represents 32bit, the lower 3 bit are the log
* level, the rest are the log facility.
*
* If no prefix or no userspace facility is specified, we
* enforce LOG_USER, to be able to reliably distinguish
* kernel-generated messages from userspace-injected ones.
*/
line = buf;
if (line[0] == '<') {
char *endp = NULL;
unsigned int u;
u = simple_strtoul(line + 1, &endp, 10);
if (endp && endp[0] == '>') {
level = LOG_LEVEL(u);
if (LOG_FACILITY(u) != 0)
facility = LOG_FACILITY(u);
endp++;
line = endp;
}
}
devkmsg_emit(facility, level, "%s", line);
kfree(buf);
return ret;
}
static ssize_t devkmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
char *outbuf = &user->pbufs.outbuf[0];
struct printk_message pmsg = {
.pbufs = &user->pbufs,
};
ssize_t ret;
ret = mutex_lock_interruptible(&user->lock);
if (ret)
return ret;
if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto out;
}
/*
* Guarantee this task is visible on the waitqueue before
* checking the wake condition.
*
* The full memory barrier within set_current_state() of
* prepare_to_wait_event() pairs with the full memory barrier
* within wq_has_sleeper().
*
* This pairs with __wake_up_klogd:A.
*/
ret = wait_event_interruptible(log_wait,
printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
false)); /* LMM(devkmsg_read:A) */
if (ret)
goto out;
}
if (pmsg.dropped) {
/* our last seen message is gone, return error and reset */
atomic64_set(&user->seq, pmsg.seq);
ret = -EPIPE;
goto out;
}
atomic64_set(&user->seq, pmsg.seq + 1);
if (pmsg.outbuf_len > count) {
ret = -EINVAL;
goto out;
}
if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
ret = -EFAULT;
goto out;
}
ret = pmsg.outbuf_len;
out:
mutex_unlock(&user->lock);
return ret;
}
/*
* Be careful when modifying this function!!!
*
* Only few operations are supported because the device works only with the
* entire variable length messages (records). Non-standard values are
* returned in the other cases and has been this way for quite some time.
* User space applications might depend on this behavior.
*/
static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
{
struct devkmsg_user *user = file->private_data;
loff_t ret = 0;
if (offset)
return -ESPIPE;
switch (whence) {
case SEEK_SET:
/* the first record */
atomic64_set(&user->seq, prb_first_valid_seq(prb));
break;
case SEEK_DATA:
/*
* The first record after the last SYSLOG_ACTION_CLEAR,
* like issued by 'dmesg -c'. Reading /dev/kmsg itself
* changes no global state, and does not clear anything.
*/
atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
break;
case SEEK_END:
/* after the last record */
atomic64_set(&user->seq, prb_next_seq(prb));
break;
default:
ret = -EINVAL;
}
return ret;
}
static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
{
struct devkmsg_user *user = file->private_data;
struct printk_info info;
__poll_t ret = 0;
poll_wait(file, &log_wait, wait);
if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
/* return error when data has vanished underneath us */
if (info.seq != atomic64_read(&user->seq))
ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
else
ret = EPOLLIN|EPOLLRDNORM;
}
return ret;
}
static int devkmsg_open(struct inode *inode, struct file *file)
{
struct devkmsg_user *user;
int err;
if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
return -EPERM;
/* write-only does not need any file context */
if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
SYSLOG_FROM_READER);
if (err)
return err;
}
user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
if (!user)
return -ENOMEM;
ratelimit_default_init(&user->rs);
ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
mutex_init(&user->lock);
atomic64_set(&user->seq, prb_first_valid_seq(prb));
file->private_data = user;
return 0;
}
static int devkmsg_release(struct inode *inode, struct file *file)
{
struct devkmsg_user *user = file->private_data;
ratelimit_state_exit(&user->rs);
mutex_destroy(&user->lock);
kvfree(user);
return 0;
}
const struct file_operations kmsg_fops = {
.open = devkmsg_open,
.read = devkmsg_read,
.write_iter = devkmsg_write,
.llseek = devkmsg_llseek,
.poll = devkmsg_poll,
.release = devkmsg_release,
};
#ifdef CONFIG_VMCORE_INFO
/*
* This appends the listed symbols to /proc/vmcore
*
* /proc/vmcore is used by various utilities, like crash and makedumpfile to
* obtain access to symbols that are otherwise very difficult to locate. These
* symbols are specifically used so that utilities can access and extract the
* dmesg log from a vmcore file after a crash.
*/
void log_buf_vmcoreinfo_setup(void)
{
struct dev_printk_info *dev_info = NULL;
VMCOREINFO_SYMBOL(prb);
VMCOREINFO_SYMBOL(printk_rb_static);
VMCOREINFO_SYMBOL(clear_seq);
/*
* Export struct size and field offsets. User space tools can
* parse it and detect any changes to structure down the line.
*/
VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
VMCOREINFO_OFFSET(printk_ringbuffer, fail);
VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
VMCOREINFO_OFFSET(prb_desc_ring, descs);
VMCOREINFO_OFFSET(prb_desc_ring, infos);
VMCOREINFO_OFFSET(prb_desc_ring, head_id);
VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
VMCOREINFO_STRUCT_SIZE(prb_desc);
VMCOREINFO_OFFSET(prb_desc, state_var);
VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
VMCOREINFO_STRUCT_SIZE(printk_info);
VMCOREINFO_OFFSET(printk_info, seq);
VMCOREINFO_OFFSET(printk_info, ts_nsec);
VMCOREINFO_OFFSET(printk_info, text_len);
VMCOREINFO_OFFSET(printk_info, caller_id);
VMCOREINFO_OFFSET(printk_info, dev_info);
VMCOREINFO_STRUCT_SIZE(dev_printk_info);
VMCOREINFO_OFFSET(dev_printk_info, subsystem);
VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
VMCOREINFO_OFFSET(dev_printk_info, device);
VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
VMCOREINFO_STRUCT_SIZE(prb_data_ring);
VMCOREINFO_OFFSET(prb_data_ring, size_bits);
VMCOREINFO_OFFSET(prb_data_ring, data);
VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
VMCOREINFO_SIZE(atomic_long_t);
VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
VMCOREINFO_STRUCT_SIZE(latched_seq);
VMCOREINFO_OFFSET(latched_seq, val);
}
#endif
/* requested log_buf_len from kernel cmdline */
static unsigned long __initdata new_log_buf_len;
/* we practice scaling the ring buffer by powers of 2 */
static void __init log_buf_len_update(u64 size)
{
if (size > (u64)LOG_BUF_LEN_MAX) {
size = (u64)LOG_BUF_LEN_MAX;
pr_err("log_buf over 2G is not supported.\n");
}
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len)
new_log_buf_len = (unsigned long)size;
}
/* save requested log_buf_len since it's too early to process it */
static int __init log_buf_len_setup(char *str)
{
u64 size;
if (!str)
return -EINVAL;
size = memparse(str, &str);
log_buf_len_update(size);
return 0;
}
early_param("log_buf_len", log_buf_len_setup);
#ifdef CONFIG_SMP
#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
static void __init log_buf_add_cpu(void)
{
unsigned int cpu_extra;
/*
* archs should set up cpu_possible_bits properly with
* set_cpu_possible() after setup_arch() but just in
* case lets ensure this is valid.
*/
if (num_possible_cpus() == 1)
return;
cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
/* by default this will only continue through for large > 64 CPUs */
if (cpu_extra <= __LOG_BUF_LEN / 2)
return;
pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
__LOG_CPU_MAX_BUF_LEN);
pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
cpu_extra);
pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
}
#else /* !CONFIG_SMP */
static inline void log_buf_add_cpu(void) {}
#endif /* CONFIG_SMP */
static void __init set_percpu_data_ready(void)
{
__printk_percpu_data_ready = true;
}
static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
struct printk_record *r)
{
struct prb_reserved_entry e;
struct printk_record dest_r;
prb_rec_init_wr(&dest_r, r->info->text_len);
if (!prb_reserve(&e, rb, &dest_r))
return 0;
memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
dest_r.info->text_len = r->info->text_len;
dest_r.info->facility = r->info->facility;
dest_r.info->level = r->info->level;
dest_r.info->flags = r->info->flags;
dest_r.info->ts_nsec = r->info->ts_nsec;
dest_r.info->caller_id = r->info->caller_id;
memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
prb_final_commit(&e);
return prb_record_text_space(&e);
}
static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
static void print_log_buf_usage_stats(void)
{
unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
size_t meta_data_size;
meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
log_buf_len, meta_data_size, log_buf_len + meta_data_size);
}
void __init setup_log_buf(int early)
{
struct printk_info *new_infos;
unsigned int new_descs_count;
struct prb_desc *new_descs;
struct printk_info info;
struct printk_record r;
unsigned int text_size;
size_t new_descs_size;
size_t new_infos_size;
unsigned long flags;
char *new_log_buf;
unsigned int free;
u64 seq;
/*
* Some archs call setup_log_buf() multiple times - first is very
* early, e.g. from setup_arch(), and second - when percpu_areas
* are initialised.
*/
if (!early)
set_percpu_data_ready();
if (log_buf != __log_buf)
return;
if (!early && !new_log_buf_len)
log_buf_add_cpu();
if (!new_log_buf_len) {
/* Show the memory stats only once. */
if (!early)
goto out;
return;
}
new_descs_count = new_log_buf_len >> PRB_AVGBITS;
if (new_descs_count == 0) {
pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
goto out;
}
new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
if (unlikely(!new_log_buf)) {
pr_err("log_buf_len: %lu text bytes not available\n",
new_log_buf_len);
goto out;
}
new_descs_size = new_descs_count * sizeof(struct prb_desc);
new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
if (unlikely(!new_descs)) {
pr_err("log_buf_len: %zu desc bytes not available\n",
new_descs_size);
goto err_free_log_buf;
}
new_infos_size = new_descs_count * sizeof(struct printk_info);
new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
if (unlikely(!new_infos)) {
pr_err("log_buf_len: %zu info bytes not available\n",
new_infos_size);
goto err_free_descs;
}
prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
prb_init(&printk_rb_dynamic,
new_log_buf, ilog2(new_log_buf_len),
new_descs, ilog2(new_descs_count),
new_infos);
local_irq_save(flags);
log_buf_len = new_log_buf_len;
log_buf = new_log_buf;
new_log_buf_len = 0;
free = __LOG_BUF_LEN;
prb_for_each_record(0, &printk_rb_static, seq, &r) {
text_size = add_to_rb(&printk_rb_dynamic, &r);
if (text_size > free)
free = 0;
else
free -= text_size;
}
prb = &printk_rb_dynamic;
local_irq_restore(flags);
/*
* Copy any remaining messages that might have appeared from
* NMI context after copying but before switching to the
* dynamic buffer.
*/
prb_for_each_record(seq, &printk_rb_static, seq, &r) {
text_size = add_to_rb(&printk_rb_dynamic, &r);
if (text_size > free)
free = 0;
else
free -= text_size;
}
if (seq != prb_next_seq(&printk_rb_static)) {
pr_err("dropped %llu messages\n",
prb_next_seq(&printk_rb_static) - seq);
}
print_log_buf_usage_stats();
pr_info("early log buf free: %u(%u%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
return;
err_free_descs:
memblock_free(new_descs, new_descs_size);
err_free_log_buf:
memblock_free(new_log_buf, new_log_buf_len);
out:
print_log_buf_usage_stats();
}
static bool __read_mostly ignore_loglevel;
static int __init ignore_loglevel_setup(char *str)
{
ignore_loglevel = true;
pr_info("debug: ignoring loglevel setting.\n");
return 0;
}
early_param("ignore_loglevel", ignore_loglevel_setup);
module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ignore_loglevel,
"ignore loglevel setting (prints all kernel messages to the console)");
static bool suppress_message_printing(int level)
{
return (level >= console_loglevel && !ignore_loglevel);
}
#ifdef CONFIG_BOOT_PRINTK_DELAY
static int boot_delay; /* msecs delay after each printk during bootup */
static unsigned long long loops_per_msec; /* based on boot_delay */
static int __init boot_delay_setup(char *str)
{
unsigned long lpj;
lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
get_option(&str, &boot_delay);
if (boot_delay > 10 * 1000)
boot_delay = 0;
pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
"HZ: %d, loops_per_msec: %llu\n",
boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
return 0;
}
early_param("boot_delay", boot_delay_setup);
static void boot_delay_msec(int level)
{
unsigned long long k;
unsigned long timeout;
bool suppress = !is_printk_force_console() &&
suppress_message_printing(level);
if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress)
return;
k = (unsigned long long)loops_per_msec * boot_delay;
timeout = jiffies + msecs_to_jiffies(boot_delay);
while (k) {
k--;
cpu_relax();
/*
* use (volatile) jiffies to prevent
* compiler reduction; loop termination via jiffies
* is secondary and may or may not happen.
*/
if (time_after(jiffies, timeout))
break;
touch_nmi_watchdog();
}
}
#else
static inline void boot_delay_msec(int level)
{
}
#endif
static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
static size_t print_syslog(unsigned int level, char *buf)
{
return sprintf(buf, "<%u>", level);
}
static size_t print_time(u64 ts, char *buf)
{
unsigned long rem_nsec = do_div(ts, 1000000000);
return sprintf(buf, "[%5lu.%06lu]",
(unsigned long)ts, rem_nsec / 1000);
}
#ifdef CONFIG_PRINTK_CALLER
static size_t print_caller(u32 id, char *buf)
{
char caller[12];
snprintf(caller, sizeof(caller), "%c%u",
id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
return sprintf(buf, "[%6s]", caller);
}
#else
#define print_caller(id, buf) 0
#endif
static size_t info_print_prefix(const struct printk_info *info, bool syslog,
bool time, char *buf)
{
size_t len = 0;
if (syslog)
len = print_syslog((info->facility << 3) | info->level, buf); if (time)
len += print_time(info->ts_nsec, buf + len);
len += print_caller(info->caller_id, buf + len);
if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
buf[len++] = ' ';
buf[len] = '\0';
}
return len;}
/*
* Prepare the record for printing. The text is shifted within the given
* buffer to avoid a need for another one. The following operations are
* done:
*
* - Add prefix for each line.
* - Drop truncated lines that no longer fit into the buffer.
* - Add the trailing newline that has been removed in vprintk_store().
* - Add a string terminator.
*
* Since the produced string is always terminated, the maximum possible
* return value is @r->text_buf_size - 1;
*
* Return: The length of the updated/prepared text, including the added
* prefixes and the newline. The terminator is not counted. The dropped
* line(s) are not counted.
*/
static size_t record_print_text(struct printk_record *r, bool syslog,
bool time)
{
size_t text_len = r->info->text_len;
size_t buf_size = r->text_buf_size;
char *text = r->text_buf;
char prefix[PRINTK_PREFIX_MAX];
bool truncated = false;
size_t prefix_len;
size_t line_len;
size_t len = 0;
char *next;
/*
* If the message was truncated because the buffer was not large
* enough, treat the available text as if it were the full text.
*/
if (text_len > buf_size)
text_len = buf_size;
prefix_len = info_print_prefix(r->info, syslog, time, prefix);
/*
* @text_len: bytes of unprocessed text
* @line_len: bytes of current line _without_ newline
* @text: pointer to beginning of current line
* @len: number of bytes prepared in r->text_buf
*/
for (;;) {
next = memchr(text, '\n', text_len);
if (next) {
line_len = next - text;
} else {
/* Drop truncated line(s). */
if (truncated)
break;
line_len = text_len;
}
/*
* Truncate the text if there is not enough space to add the
* prefix and a trailing newline and a terminator.
*/
if (len + prefix_len + text_len + 1 + 1 > buf_size) {
/* Drop even the current line if no space. */
if (len + prefix_len + line_len + 1 + 1 > buf_size)
break;
text_len = buf_size - len - prefix_len - 1 - 1;
truncated = true;
}
memmove(text + prefix_len, text, text_len); memcpy(text, prefix, prefix_len);
/*
* Increment the prepared length to include the text and
* prefix that were just moved+copied. Also increment for the
* newline at the end of this line. If this is the last line,
* there is no newline, but it will be added immediately below.
*/
len += prefix_len + line_len + 1;
if (text_len == line_len) {
/*
* This is the last line. Add the trailing newline
* removed in vprintk_store().
*/
text[prefix_len + line_len] = '\n';
break;
}
/*
* Advance beyond the added prefix and the related line with
* its newline.
*/
text += prefix_len + line_len + 1;
/*
* The remaining text has only decreased by the line with its
* newline.
*
* Note that @text_len can become zero. It happens when @text
* ended with a newline (either due to truncation or the
* original string ending with "\n\n"). The loop is correctly
* repeated and (if not truncated) an empty line with a prefix
* will be prepared.
*/
text_len -= line_len + 1;
}
/*
* If a buffer was provided, it will be terminated. Space for the
* string terminator is guaranteed to be available. The terminator is
* not counted in the return value.
*/
if (buf_size > 0) r->text_buf[len] = 0; return len;}
static size_t get_record_print_text_size(struct printk_info *info,
unsigned int line_count,
bool syslog, bool time)
{
char prefix[PRINTK_PREFIX_MAX];
size_t prefix_len;
prefix_len = info_print_prefix(info, syslog, time, prefix);
/*
* Each line will be preceded with a prefix. The intermediate
* newlines are already within the text, but a final trailing
* newline will be added.
*/
return ((prefix_len * line_count) + info->text_len + 1);
}
/*
* Beginning with @start_seq, find the first record where it and all following
* records up to (but not including) @max_seq fit into @size.
*
* @max_seq is simply an upper bound and does not need to exist. If the caller
* does not require an upper bound, -1 can be used for @max_seq.
*/
static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
bool syslog, bool time)
{
struct printk_info info;
unsigned int line_count;
size_t len = 0;
u64 seq;
/* Determine the size of the records up to @max_seq. */
prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
if (info.seq >= max_seq)
break;
len += get_record_print_text_size(&info, line_count, syslog, time);
}
/*
* Adjust the upper bound for the next loop to avoid subtracting
* lengths that were never added.
*/
if (seq < max_seq)
max_seq = seq;
/*
* Move first record forward until length fits into the buffer. Ignore
* newest messages that were not counted in the above cycle. Messages
* might appear and get lost in the meantime. This is a best effort
* that prevents an infinite loop that could occur with a retry.
*/
prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
if (len <= size || info.seq >= max_seq)
break;
len -= get_record_print_text_size(&info, line_count, syslog, time);
}
return seq;
}
/* The caller is responsible for making sure @size is greater than 0. */
static int syslog_print(char __user *buf, int size)
{
struct printk_info info;
struct printk_record r;
char *text;
int len = 0;
u64 seq;
text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
if (!text)
return -ENOMEM;
prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
mutex_lock(&syslog_lock);
/*
* Wait for the @syslog_seq record to be available. @syslog_seq may
* change while waiting.
*/
do {
seq = syslog_seq;
mutex_unlock(&syslog_lock);
/*
* Guarantee this task is visible on the waitqueue before
* checking the wake condition.
*
* The full memory barrier within set_current_state() of
* prepare_to_wait_event() pairs with the full memory barrier
* within wq_has_sleeper().
*
* This pairs with __wake_up_klogd:A.
*/
len = wait_event_interruptible(log_wait,
prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
mutex_lock(&syslog_lock);
if (len)
goto out;
} while (syslog_seq != seq);
/*
* Copy records that fit into the buffer. The above cycle makes sure
* that the first record is always available.
*/
do {
size_t n;
size_t skip;
int err;
if (!prb_read_valid(prb, syslog_seq, &r))
break;
if (r.info->seq != syslog_seq) {
/* message is gone, move to next valid one */
syslog_seq = r.info->seq;
syslog_partial = 0;
}
/*
* To keep reading/counting partial line consistent,
* use printk_time value as of the beginning of a line.
*/
if (!syslog_partial)
syslog_time = printk_time;
skip = syslog_partial;
n = record_print_text(&r, true, syslog_time);
if (n - syslog_partial <= size) {
/* message fits into buffer, move forward */
syslog_seq = r.info->seq + 1;
n -= syslog_partial;
syslog_partial = 0;
} else if (!len){
/* partial read(), remember position */
n = size;
syslog_partial += n;
} else
n = 0;
if (!n)
break;
mutex_unlock(&syslog_lock);
err = copy_to_user(buf, text + skip, n);
mutex_lock(&syslog_lock);
if (err) {
if (!len)
len = -EFAULT;
break;
}
len += n;
size -= n;
buf += n;
} while (size);
out:
mutex_unlock(&syslog_lock);
kfree(text);
return len;
}
static int syslog_print_all(char __user *buf, int size, bool clear)
{
struct printk_info info;
struct printk_record r;
char *text;
int len = 0;
u64 seq;
bool time;
text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
if (!text)
return -ENOMEM;
time = printk_time;
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
*/
seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
size, true, time);
prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
prb_for_each_record(seq, prb, seq, &r) {
int textlen;
textlen = record_print_text(&r, true, time);
if (len + textlen > size) {
seq--;
break;
}
if (copy_to_user(buf + len, text, textlen))
len = -EFAULT;
else
len += textlen;
if (len < 0)
break;
}
if (clear) {
mutex_lock(&syslog_lock);
latched_seq_write(&clear_seq, seq);
mutex_unlock(&syslog_lock);
}
kfree(text);
return len;
}
static void syslog_clear(void)
{
mutex_lock(&syslog_lock);
latched_seq_write(&clear_seq, prb_next_seq(prb));
mutex_unlock(&syslog_lock);
}
int do_syslog(int type, char __user *buf, int len, int source)
{
struct printk_info info;
bool clear = false;
static int saved_console_loglevel = LOGLEVEL_DEFAULT;
int error;
error = check_syslog_permissions(type, source);
if (error)
return error;
switch (type) {
case SYSLOG_ACTION_CLOSE: /* Close log */
break;
case SYSLOG_ACTION_OPEN: /* Open log */
break;
case SYSLOG_ACTION_READ: /* Read from log */
if (!buf || len < 0)
return -EINVAL;
if (!len)
return 0;
if (!access_ok(buf, len))
return -EFAULT;
error = syslog_print(buf, len);
break;
/* Read/clear last kernel messages */
case SYSLOG_ACTION_READ_CLEAR:
clear = true;
fallthrough;
/* Read last kernel messages */
case SYSLOG_ACTION_READ_ALL:
if (!buf || len < 0)
return -EINVAL;
if (!len)
return 0;
if (!access_ok(buf, len))
return -EFAULT;
error = syslog_print_all(buf, len, clear);
break;
/* Clear ring buffer */
case SYSLOG_ACTION_CLEAR:
syslog_clear();
break;
/* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_OFF:
if (saved_console_loglevel == LOGLEVEL_DEFAULT)
saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
/* Enable logging to console */
case SYSLOG_ACTION_CONSOLE_ON:
if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
console_loglevel = saved_console_loglevel;
saved_console_loglevel = LOGLEVEL_DEFAULT;
}
break;
/* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_LEVEL:
if (len < 1 || len > 8)
return -EINVAL;
if (len < minimum_console_loglevel)
len = minimum_console_loglevel;
console_loglevel = len;
/* Implicitly re-enable logging to console */
saved_console_loglevel = LOGLEVEL_DEFAULT;
break;
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
mutex_lock(&syslog_lock);
if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
/* No unread messages. */
mutex_unlock(&syslog_lock);
return 0;
}
if (info.seq != syslog_seq) {
/* messages are gone, move to first one */
syslog_seq = info.seq;
syslog_partial = 0;
}
if (source == SYSLOG_FROM_PROC) {
/*
* Short-cut for poll(/"proc/kmsg") which simply checks
* for pending data, not the size; return the count of
* records, not the length.
*/
error = prb_next_seq(prb) - syslog_seq;
} else {
bool time = syslog_partial ? syslog_time : printk_time;
unsigned int line_count;
u64 seq;
prb_for_each_info(syslog_seq, prb, seq, &info,
&line_count) {
error += get_record_print_text_size(&info, line_count,
true, time);
time = printk_time;
}
error -= syslog_partial;
}
mutex_unlock(&syslog_lock);
break;
/* Size of the log buffer */
case SYSLOG_ACTION_SIZE_BUFFER:
error = log_buf_len;
break;
default:
error = -EINVAL;
break;
}
return error;
}
SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
{
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
/*
* Special console_lock variants that help to reduce the risk of soft-lockups.
* They allow to pass console_lock to another printk() call using a busy wait.
*/
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_owner_dep_map = {
.name = "console_owner"
};
#endif
static DEFINE_RAW_SPINLOCK(console_owner_lock);
static struct task_struct *console_owner;
static bool console_waiter;
/**
* console_lock_spinning_enable - mark beginning of code where another
* thread might safely busy wait
*
* This basically converts console_lock into a spinlock. This marks
* the section where the console_lock owner can not sleep, because
* there may be a waiter spinning (like a spinlock). Also it must be
* ready to hand over the lock at the end of the section.
*/
void console_lock_spinning_enable(void)
{
/*
* Do not use spinning in panic(). The panic CPU wants to keep the lock.
* Non-panic CPUs abandon the flush anyway.
*
* Just keep the lockdep annotation. The panic-CPU should avoid
* taking console_owner_lock because it might cause a deadlock.
* This looks like the easiest way how to prevent false lockdep
* reports without handling races a lockless way.
*/
if (panic_in_progress())
goto lockdep;
raw_spin_lock(&console_owner_lock);
console_owner = current;
raw_spin_unlock(&console_owner_lock);
lockdep:
/* The waiter may spin on us after setting console_owner */
spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);}
/**
* console_lock_spinning_disable_and_check - mark end of code where another
* thread was able to busy wait and check if there is a waiter
* @cookie: cookie returned from console_srcu_read_lock()
*
* This is called at the end of the section where spinning is allowed.
* It has two functions. First, it is a signal that it is no longer
* safe to start busy waiting for the lock. Second, it checks if
* there is a busy waiter and passes the lock rights to her.
*
* Important: Callers lose both the console_lock and the SRCU read lock if
* there was a busy waiter. They must not touch items synchronized by
* console_lock or SRCU read lock in this case.
*
* Return: 1 if the lock rights were passed, 0 otherwise.
*/
int console_lock_spinning_disable_and_check(int cookie)
{
int waiter;
/*
* Ignore spinning waiters during panic() because they might get stopped
* or blocked at any time,
*
* It is safe because nobody is allowed to start spinning during panic
* in the first place. If there has been a waiter then non panic CPUs
* might stay spinning. They would get stopped anyway. The panic context
* will never start spinning and an interrupted spin on panic CPU will
* never continue.
*/
if (panic_in_progress()) {
/* Keep lockdep happy. */
spin_release(&console_owner_dep_map, _THIS_IP_);
return 0;
}
raw_spin_lock(&console_owner_lock);
waiter = READ_ONCE(console_waiter);
console_owner = NULL;
raw_spin_unlock(&console_owner_lock);
if (!waiter) {
spin_release(&console_owner_dep_map, _THIS_IP_);
return 0;
}
/* The waiter is now free to continue */
WRITE_ONCE(console_waiter, false);
spin_release(&console_owner_dep_map, _THIS_IP_);
/*
* Preserve lockdep lock ordering. Release the SRCU read lock before
* releasing the console_lock.
*/
console_srcu_read_unlock(cookie);
/*
* Hand off console_lock to waiter. The waiter will perform
* the up(). After this, the waiter is the console_lock owner.
*/
mutex_release(&console_lock_dep_map, _THIS_IP_);
return 1;}
/**
* console_trylock_spinning - try to get console_lock by busy waiting
*
* This allows to busy wait for the console_lock when the current
* owner is running in specially marked sections. It means that
* the current owner is running and cannot reschedule until it
* is ready to lose the lock.
*
* Return: 1 if we got the lock, 0 othrewise
*/
static int console_trylock_spinning(void)
{
struct task_struct *owner = NULL;
bool waiter;
bool spin = false;
unsigned long flags;
if (console_trylock())
return 1;
/*
* It's unsafe to spin once a panic has begun. If we are the
* panic CPU, we may have already halted the owner of the
* console_sem. If we are not the panic CPU, then we should
* avoid taking console_sem, so the panic CPU has a better
* chance of cleanly acquiring it later.
*/
if (panic_in_progress())
return 0;
printk_safe_enter_irqsave(flags);
raw_spin_lock(&console_owner_lock);
owner = READ_ONCE(console_owner);
waiter = READ_ONCE(console_waiter);
if (!waiter && owner && owner != current) {
WRITE_ONCE(console_waiter, true);
spin = true;
}
raw_spin_unlock(&console_owner_lock);
/*
* If there is an active printk() writing to the
* consoles, instead of having it write our data too,
* see if we can offload that load from the active
* printer, and do some printing ourselves.
* Go into a spin only if there isn't already a waiter
* spinning, and there is an active printer, and
* that active printer isn't us (recursive printk?).
*/
if (!spin) { printk_safe_exit_irqrestore(flags);
return 0;
}
/* We spin waiting for the owner to release us */
spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
/* Owner will clear console_waiter on hand off */
while (READ_ONCE(console_waiter)) cpu_relax();
spin_release(&console_owner_dep_map, _THIS_IP_);
printk_safe_exit_irqrestore(flags);
/*
* The owner passed the console lock to us.
* Since we did not spin on console lock, annotate
* this as a trylock. Otherwise lockdep will
* complain.
*/
mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
/*
* Update @console_may_schedule for trylock because the previous
* owner may have been schedulable.
*/
console_may_schedule = 0;
return 1;
}
/*
* Recursion is tracked separately on each CPU. If NMIs are supported, an
* additional NMI context per CPU is also separately tracked. Until per-CPU
* is available, a separate "early tracking" is performed.
*/
static DEFINE_PER_CPU(u8, printk_count);
static u8 printk_count_early;
#ifdef CONFIG_HAVE_NMI
static DEFINE_PER_CPU(u8, printk_count_nmi);
static u8 printk_count_nmi_early;
#endif
/*
* Recursion is limited to keep the output sane. printk() should not require
* more than 1 level of recursion (allowing, for example, printk() to trigger
* a WARN), but a higher value is used in case some printk-internal errors
* exist, such as the ringbuffer validation checks failing.
*/
#define PRINTK_MAX_RECURSION 3
/*
* Return a pointer to the dedicated counter for the CPU+context of the
* caller.
*/
static u8 *__printk_recursion_counter(void)
{
#ifdef CONFIG_HAVE_NMI
if (in_nmi()) {
if (printk_percpu_data_ready())
return this_cpu_ptr(&printk_count_nmi);
return &printk_count_nmi_early;
}
#endif
if (printk_percpu_data_ready()) return this_cpu_ptr(&printk_count);
return &printk_count_early;
}
/*
* Enter recursion tracking. Interrupts are disabled to simplify tracking.
* The caller must check the boolean return value to see if the recursion is
* allowed. On failure, interrupts are not disabled.
*
* @recursion_ptr must be a variable of type (u8 *) and is the same variable
* that is passed to printk_exit_irqrestore().
*/
#define printk_enter_irqsave(recursion_ptr, flags) \
({ \
bool success = true; \
\
typecheck(u8 *, recursion_ptr); \
local_irq_save(flags); \
(recursion_ptr) = __printk_recursion_counter(); \
if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
local_irq_restore(flags); \
success = false; \
} else { \
(*(recursion_ptr))++; \
} \
success; \
})
/* Exit recursion tracking, restoring interrupts. */
#define printk_exit_irqrestore(recursion_ptr, flags) \
do { \
typecheck(u8 *, recursion_ptr); \
(*(recursion_ptr))--; \
local_irq_restore(flags); \
} while (0)
int printk_delay_msec __read_mostly;
static inline void printk_delay(int level)
{
boot_delay_msec(level);
if (unlikely(printk_delay_msec)) {
int m = printk_delay_msec;
while (m--) {
mdelay(1);
touch_nmi_watchdog();
}
}
}
static inline u32 printk_caller_id(void)
{
return in_task() ? task_pid_nr(current) : 0x80000000 + smp_processor_id();
}
/**
* printk_parse_prefix - Parse level and control flags.
*
* @text: The terminated text message.
* @level: A pointer to the current level value, will be updated.
* @flags: A pointer to the current printk_info flags, will be updated.
*
* @level may be NULL if the caller is not interested in the parsed value.
* Otherwise the variable pointed to by @level must be set to
* LOGLEVEL_DEFAULT in order to be updated with the parsed value.
*
* @flags may be NULL if the caller is not interested in the parsed value.
* Otherwise the variable pointed to by @flags will be OR'd with the parsed
* value.
*
* Return: The length of the parsed level and control flags.
*/
u16 printk_parse_prefix(const char *text, int *level,
enum printk_info_flags *flags)
{
u16 prefix_len = 0;
int kern_level;
while (*text) { kern_level = printk_get_level(text);
if (!kern_level)
break;
switch (kern_level) {
case '0' ... '7':
if (level && *level == LOGLEVEL_DEFAULT)
*level = kern_level - '0';
break;
case 'c': /* KERN_CONT */
if (flags) *flags |= LOG_CONT;
}
prefix_len += 2;
text += 2;
}
return prefix_len;}
__printf(5, 0)
static u16 printk_sprint(char *text, u16 size, int facility,
enum printk_info_flags *flags, const char *fmt,
va_list args)
{
u16 text_len;
text_len = vscnprintf(text, size, fmt, args);
/* Mark and strip a trailing newline. */
if (text_len && text[text_len - 1] == '\n') {
text_len--;
*flags |= LOG_NEWLINE;
}
/* Strip log level and control flags. */
if (facility == 0) {
u16 prefix_len;
prefix_len = printk_parse_prefix(text, NULL, NULL);
if (prefix_len) {
text_len -= prefix_len; memmove(text, text + prefix_len, text_len);
}
}
trace_console(text, text_len);
return text_len;
}
__printf(4, 0)
int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
struct prb_reserved_entry e;
enum printk_info_flags flags = 0;
struct printk_record r;
unsigned long irqflags;
u16 trunc_msg_len = 0;
char prefix_buf[8];
u8 *recursion_ptr;
u16 reserve_size;
va_list args2;
u32 caller_id;
u16 text_len;
int ret = 0;
u64 ts_nsec;
if (!printk_enter_irqsave(recursion_ptr, irqflags)) return 0;
/*
* Since the duration of printk() can vary depending on the message
* and state of the ringbuffer, grab the timestamp now so that it is
* close to the call of printk(). This provides a more deterministic
* timestamp with respect to the caller.
*/
ts_nsec = local_clock();
caller_id = printk_caller_id();
/*
* The sprintf needs to come first since the syslog prefix might be
* passed in as a parameter. An extra byte must be reserved so that
* later the vscnprintf() into the reserved buffer has room for the
* terminating '\0', which is not counted by vsnprintf().
*/
va_copy(args2, args);
reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
va_end(args2);
if (reserve_size > PRINTKRB_RECORD_MAX)
reserve_size = PRINTKRB_RECORD_MAX;
/* Extract log level or control flags. */
if (facility == 0) printk_parse_prefix(&prefix_buf[0], &level, &flags); if (level == LOGLEVEL_DEFAULT)
level = default_message_loglevel;
if (dev_info)
flags |= LOG_NEWLINE; if (is_printk_force_console()) flags |= LOG_FORCE_CON; if (flags & LOG_CONT) { prb_rec_init_wr(&r, reserve_size); if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
facility, &flags, fmt, args);
r.info->text_len += text_len;
if (flags & LOG_FORCE_CON)
r.info->flags |= LOG_FORCE_CON; if (flags & LOG_NEWLINE) {
r.info->flags |= LOG_NEWLINE;
prb_final_commit(&e);
} else {
prb_commit(&e);
}
ret = text_len; goto out;
}
}
/*
* Explicitly initialize the record before every prb_reserve() call.
* prb_reserve_in_last() and prb_reserve() purposely invalidate the
* structure when they fail.
*/
prb_rec_init_wr(&r, reserve_size);
if (!prb_reserve(&e, prb, &r)) {
/* truncate the message if it is too long for empty buffer */
truncate_msg(&reserve_size, &trunc_msg_len);
prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
if (!prb_reserve(&e, prb, &r))
goto out;
}
/* fill message */
text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
if (trunc_msg_len)
memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len); r.info->text_len = text_len + trunc_msg_len;
r.info->facility = facility;
r.info->level = level & 7;
r.info->flags = flags & 0x1f;
r.info->ts_nsec = ts_nsec;
r.info->caller_id = caller_id;
if (dev_info)
memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
/* A message without a trailing newline can be continued. */
if (!(flags & LOG_NEWLINE))
prb_commit(&e);
else
prb_final_commit(&e);
ret = text_len + trunc_msg_len;
out:
printk_exit_irqrestore(recursion_ptr, irqflags);
return ret;
}
/*
* This acts as a one-way switch to allow legacy consoles to print from
* the printk() caller context on a panic CPU. It also attempts to flush
* the legacy consoles in this context.
*/
void printk_legacy_allow_panic_sync(void)
{
struct console_flush_type ft;
legacy_allow_panic_sync = true;
printk_get_console_flush_type(&ft);
if (ft.legacy_direct) {
if (console_trylock())
console_unlock();
}
}
bool __read_mostly debug_non_panic_cpus;
#ifdef CONFIG_PRINTK_CALLER
static int __init debug_non_panic_cpus_setup(char *str)
{
debug_non_panic_cpus = true;
pr_info("allow messages from non-panic CPUs in panic()\n");
return 0;
}
early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup);
module_param(debug_non_panic_cpus, bool, 0644);
MODULE_PARM_DESC(debug_non_panic_cpus,
"allow messages from non-panic CPUs in panic()");
#endif
asmlinkage int vprintk_emit(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
struct console_flush_type ft;
int printed_len;
/* Suppress unimportant messages after panic happens */
if (unlikely(suppress_printk)) return 0;
/*
* The messages on the panic CPU are the most important. If
* non-panic CPUs are generating any messages, they will be
* silently dropped.
*/
if (panic_on_other_cpu() && !debug_non_panic_cpus && !panic_triggering_all_cpu_backtrace)
return 0;
printk_get_console_flush_type(&ft);
/* If called from the scheduler, we can not call up(). */
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
ft.legacy_offload |= ft.legacy_direct;
ft.legacy_direct = false;
}
printk_delay(level);
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending(); if (ft.nbcon_offload) nbcon_kthreads_wake(); if (ft.legacy_direct) {
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during
* printing of all remaining records to all consoles so that
* this context can return as soon as possible. Hopefully
* another printk() caller will take over the printing.
*/
preempt_disable();
/*
* Try to acquire and then immediately release the console
* semaphore. The release will print out buffers. With the
* spinning variant, this context tries to take over the
* printing from another printing context.
*/
if (console_trylock_spinning())
console_unlock();
preempt_enable();
}
if (ft.legacy_offload) defer_console_output();
else
wake_up_klogd();
return printed_len;
}
EXPORT_SYMBOL(vprintk_emit);
int vprintk_default(const char *fmt, va_list args)
{
return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
}
EXPORT_SYMBOL_GPL(vprintk_default);
asmlinkage __visible int _printk(const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = vprintk(fmt, args);
va_end(args);
return r;
}
EXPORT_SYMBOL(_printk);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
#else /* CONFIG_PRINTK */
#define printk_time false
#define prb_read_valid(rb, seq, r) false
#define prb_first_valid_seq(rb) 0
#define prb_next_seq(rb) 0
static u64 syslog_seq;
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
#endif /* CONFIG_PRINTK */
#ifdef CONFIG_EARLY_PRINTK
struct console *early_console;
asmlinkage __visible void early_printk(const char *fmt, ...)
{
va_list ap;
char buf[512];
int n;
if (!early_console)
return;
va_start(ap, fmt);
n = vscnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
early_console->write(early_console, buf, n);
}
#endif
static void set_user_specified(struct console_cmdline *c, bool user_specified)
{
if (!user_specified)
return;
/*
* @c console was defined by the user on the command line.
* Do not clear when added twice also by SPCR or the device tree.
*/
c->user_specified = true;
/* At least one console defined by the user on the command line. */
console_set_on_cmdline = 1;
}
static int __add_preferred_console(const char *name, const short idx,
const char *devname, char *options,
char *brl_options, bool user_specified)
{
struct console_cmdline *c;
int i;
if (!name && !devname)
return -EINVAL;
/*
* We use a signed short index for struct console for device drivers to
* indicate a not yet assigned index or port. However, a negative index
* value is not valid when the console name and index are defined on
* the command line.
*/
if (name && idx < 0)
return -EINVAL;
/*
* See if this tty is not yet registered, and
* if we have a slot free.
*/
for (i = 0, c = console_cmdline;
i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
i++, c++) {
if ((name && strcmp(c->name, name) == 0 && c->index == idx) ||
(devname && strcmp(c->devname, devname) == 0)) {
if (!brl_options)
preferred_console = i;
set_user_specified(c, user_specified);
return 0;
}
}
if (i == MAX_CMDLINECONSOLES)
return -E2BIG;
if (!brl_options)
preferred_console = i;
if (name)
strscpy(c->name, name);
if (devname)
strscpy(c->devname, devname);
c->options = options;
set_user_specified(c, user_specified);
braille_set_options(c, brl_options);
c->index = idx;
return 0;
}
static int __init console_msg_format_setup(char *str)
{
if (!strcmp(str, "syslog"))
console_msg_format = MSG_FORMAT_SYSLOG;
if (!strcmp(str, "default"))
console_msg_format = MSG_FORMAT_DEFAULT;
return 1;
}
__setup("console_msg_format=", console_msg_format_setup);
/*
* Set up a console. Called via do_early_param() in init/main.c
* for each "console=" parameter in the boot command line.
*/
static int __init console_setup(char *str)
{
static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4);
char buf[sizeof(console_cmdline[0].devname)];
char *brl_options = NULL;
char *ttyname = NULL;
char *devname = NULL;
char *options;
char *s;
int idx;
/*
* console="" or console=null have been suggested as a way to
* disable console output. Use ttynull that has been created
* for exactly this purpose.
*/
if (str[0] == 0 || strcmp(str, "null") == 0) {
__add_preferred_console("ttynull", 0, NULL, NULL, NULL, true);
return 1;
}
if (_braille_console_setup(&str, &brl_options))
return 1;
/* For a DEVNAME:0.0 style console the character device is unknown early */
if (strchr(str, ':'))
devname = buf;
else
ttyname = buf;
/*
* Decode str into name, index, options.
*/
if (ttyname && isdigit(str[0]))
scnprintf(buf, sizeof(buf), "ttyS%s", str);
else
strscpy(buf, str);
options = strchr(str, ',');
if (options)
*(options++) = 0;
#ifdef __sparc__
if (!strcmp(str, "ttya"))
strscpy(buf, "ttyS0");
if (!strcmp(str, "ttyb"))
strscpy(buf, "ttyS1");
#endif
for (s = buf; *s; s++)
if ((ttyname && isdigit(*s)) || *s == ',')
break;
/* @idx will get defined when devname matches. */
if (devname)
idx = -1;
else
idx = simple_strtoul(s, NULL, 10);
*s = 0;
__add_preferred_console(ttyname, idx, devname, options, brl_options, true);
return 1;
}
__setup("console=", console_setup);
/**
* add_preferred_console - add a device to the list of preferred consoles.
* @name: device name
* @idx: device index
* @options: options for this console
*
* The last preferred console added will be used for kernel messages
* and stdin/out/err for init. Normally this is used by console_setup
* above to handle user-supplied console arguments; however it can also
* be used by arch-specific code either to override the user or more
* commonly to provide a default console (ie from PROM variables) when
* the user has not supplied one.
*/
int add_preferred_console(const char *name, const short idx, char *options)
{
return __add_preferred_console(name, idx, NULL, options, NULL, false);
}
/**
* match_devname_and_update_preferred_console - Update a preferred console
* when matching devname is found.
* @devname: DEVNAME:0.0 style device name
* @name: Name of the corresponding console driver, e.g. "ttyS"
* @idx: Console index, e.g. port number.
*
* The function checks whether a device with the given @devname is
* preferred via the console=DEVNAME:0.0 command line option.
* It fills the missing console driver name and console index
* so that a later register_console() call could find (match)
* and enable this device.
*
* It might be used when a driver subsystem initializes particular
* devices with already known DEVNAME:0.0 style names. And it
* could predict which console driver name and index this device
* would later get associated with.
*
* Return: 0 on success, negative error code on failure.
*/
int match_devname_and_update_preferred_console(const char *devname,
const char *name,
const short idx)
{
struct console_cmdline *c = console_cmdline;
int i;
if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0)
return -EINVAL;
for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
i++, c++) {
if (!strcmp(devname, c->devname)) {
pr_info("associate the preferred console \"%s\" with \"%s%d\"\n",
devname, name, idx);
strscpy(c->name, name);
c->index = idx;
return 0;
}
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
bool console_suspend_enabled = true;
EXPORT_SYMBOL(console_suspend_enabled);
static int __init console_suspend_disable(char *str)
{
console_suspend_enabled = false;
return 1;
}
__setup("no_console_suspend", console_suspend_disable);
module_param_named(console_suspend, console_suspend_enabled,
bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
" and hibernate operations");
static bool printk_console_no_auto_verbose;
void console_verbose(void)
{
if (console_loglevel && !printk_console_no_auto_verbose)
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
}
EXPORT_SYMBOL_GPL(console_verbose);
module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
/**
* console_suspend_all - suspend the console subsystem
*
* This disables printk() while we go into suspend states
*/
void console_suspend_all(void)
{
struct console *con;
if (!console_suspend_enabled)
return;
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
pr_flush(1000, true);
console_list_lock();
for_each_console(con)
console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
console_list_unlock();
/*
* Ensure that all SRCU list walks have completed. All printing
* contexts must be able to see that they are suspended so that it
* is guaranteed that all printing has stopped when this function
* completes.
*/
synchronize_srcu(&console_srcu);
}
void console_resume_all(void)
{
struct console_flush_type ft;
struct console *con;
if (!console_suspend_enabled)
return;
console_list_lock();
for_each_console(con)
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
console_list_unlock();
/*
* Ensure that all SRCU list walks have completed. All printing
* contexts must be able to see they are no longer suspended so
* that they are guaranteed to wake up and resume printing.
*/
synchronize_srcu(&console_srcu);
printk_get_console_flush_type(&ft);
if (ft.nbcon_offload)
nbcon_kthreads_wake();
if (ft.legacy_offload)
defer_console_output();
pr_flush(1000, true);
}
/**
* console_cpu_notify - print deferred console messages after CPU hotplug
* @cpu: unused
*
* If printk() is called from a CPU that is not online yet, the messages
* will be printed on the console only if there are CON_ANYTIME consoles.
* This function is called when a new CPU comes online (or fails to come
* up) or goes offline.
*/
static int console_cpu_notify(unsigned int cpu)
{
struct console_flush_type ft;
if (!cpuhp_tasks_frozen) {
printk_get_console_flush_type(&ft);
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
if (ft.legacy_direct) {
if (console_trylock())
console_unlock();
}
}
return 0;
}
/**
* console_lock - block the console subsystem from printing
*
* Acquires a lock which guarantees that no consoles will
* be in or enter their write() callback.
*
* Can sleep, returns nothing.
*/
void console_lock(void)
{
might_sleep();
/* On panic, the console_lock must be left to the panic cpu. */
while (panic_on_other_cpu())
msleep(1000);
down_console_sem();
console_locked = 1;
console_may_schedule = 1;
}
EXPORT_SYMBOL(console_lock);
/**
* console_trylock - try to block the console subsystem from printing
*
* Try to acquire a lock which guarantees that no consoles will
* be in or enter their write() callback.
*
* returns 1 on success, and 0 on failure to acquire the lock.
*/
int console_trylock(void)
{
/* On panic, the console_lock must be left to the panic cpu. */
if (panic_on_other_cpu()) return 0;
if (down_trylock_console_sem())
return 0;
console_locked = 1;
console_may_schedule = 0;
return 1;}
EXPORT_SYMBOL(console_trylock);
int is_console_locked(void)
{
return console_locked;
}
EXPORT_SYMBOL(is_console_locked);
static void __console_unlock(void)
{
console_locked = 0;
up_console_sem();}
#ifdef CONFIG_PRINTK
/*
* Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting
* the existing message over and inserting the scratchbuf message.
*
* @pmsg is the original printk message.
* @fmt is the printf format of the message which will prepend the existing one.
*
* If there is not enough space in @pmsg->pbufs->outbuf, the existing
* message text will be sufficiently truncated.
*
* If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
*/
__printf(2, 3)
static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...)
{
struct printk_buffers *pbufs = pmsg->pbufs;
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
const size_t outbuf_sz = sizeof(pbufs->outbuf);
char *scratchbuf = &pbufs->scratchbuf[0];
char *outbuf = &pbufs->outbuf[0];
va_list args;
size_t len;
va_start(args, fmt);
len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args);
va_end(args);
/*
* Make sure outbuf is sufficiently large before prepending.
* Keep at least the prefix when the message must be truncated.
* It is a rather theoretical problem when someone tries to
* use a minimalist buffer.
*/
if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
return;
if (pmsg->outbuf_len + len >= outbuf_sz) {
/* Truncate the message, but keep it terminated. */
pmsg->outbuf_len = outbuf_sz - (len + 1);
outbuf[pmsg->outbuf_len] = 0;
}
memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
memcpy(outbuf, scratchbuf, len);
pmsg->outbuf_len += len;
}
/*
* Prepend the message in @pmsg->pbufs->outbuf with a "dropped message".
* @pmsg->outbuf_len is updated appropriately.
*
* @pmsg is the printk message to prepend.
*
* @dropped is the dropped count to report in the dropped message.
*/
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
{
console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped);
}
/*
* Prepend the message in @pmsg->pbufs->outbuf with a "replay message".
* @pmsg->outbuf_len is updated appropriately.
*
* @pmsg is the printk message to prepend.
*/
void console_prepend_replay(struct printk_message *pmsg)
{
console_prepend_message(pmsg, "** replaying previous printk message **\n");
}
/*
* Read and format the specified record (or a later record if the specified
* record is not available).
*
* @pmsg will contain the formatted result. @pmsg->pbufs must point to a
* struct printk_buffers.
*
* @seq is the record to read and format. If it is not available, the next
* valid record is read.
*
* @is_extended specifies if the message should be formatted for extended
* console output.
*
* @may_supress specifies if records may be skipped based on loglevel.
*
* Returns false if no record is available. Otherwise true and all fields
* of @pmsg are valid. (See the documentation of struct printk_message
* for information about the @pmsg fields.)
*/
bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
bool is_extended, bool may_suppress)
{
struct printk_buffers *pbufs = pmsg->pbufs;
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
const size_t outbuf_sz = sizeof(pbufs->outbuf);
char *scratchbuf = &pbufs->scratchbuf[0];
char *outbuf = &pbufs->outbuf[0];
struct printk_info info;
struct printk_record r;
size_t len = 0;
bool force_con;
/*
* Formatting extended messages requires a separate buffer, so use the
* scratch buffer to read in the ringbuffer text.
*
* Formatting normal messages is done in-place, so read the ringbuffer
* text directly into the output buffer.
*/
if (is_extended)
prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
else
prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
if (!prb_read_valid(prb, seq, &r))
return false;
pmsg->seq = r.info->seq;
pmsg->dropped = r.info->seq - seq;
force_con = r.info->flags & LOG_FORCE_CON;
/*
* Skip records that are not forced to be printed on consoles and that
* has level above the console loglevel.
*/
if (!force_con && may_suppress && suppress_message_printing(r.info->level))
goto out;
if (is_extended) { len = info_print_ext_header(outbuf, outbuf_sz, r.info); len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
&r.text_buf[0], r.info->text_len, &r.info->dev_info);
} else {
len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
}
out:
pmsg->outbuf_len = len; return true;}
/*
* Legacy console printing from printk() caller context does not respect
* raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a
* false positive. For PREEMPT_RT the false positive condition does not
* occur.
*
* This map is used to temporarily establish LD_WAIT_SLEEP context for the
* console write() callback when legacy printing to avoid false positive
* lockdep complaints, thus allowing lockdep to continue to function for
* real issues.
*/
#ifdef CONFIG_PREEMPT_RT
static inline void printk_legacy_allow_spinlock_enter(void) { }
static inline void printk_legacy_allow_spinlock_exit(void) { }
#else
static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
static inline void printk_legacy_allow_spinlock_enter(void)
{
lock_map_acquire_try(&printk_legacy_map);
}
static inline void printk_legacy_allow_spinlock_exit(void)
{
lock_map_release(&printk_legacy_map);
}
#endif /* CONFIG_PREEMPT_RT */
/*
* Used as the printk buffers for non-panic, serialized console printing.
* This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
* Its usage requires the console_lock held.
*/
struct printk_buffers printk_shared_pbufs;
/*
* Print one record for the given console. The record printed is whatever
* record is the next available record for the given console.
*
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding both the
* console_lock and the SRCU read lock. Otherwise it is set to false.
*
* @cookie is the cookie from the SRCU read lock.
*
* Returns false if the given console has no next record to print, otherwise
* true.
*
* Requires the console_lock and the SRCU read lock.
*/
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
{
bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
char *outbuf = &printk_shared_pbufs.outbuf[0];
struct printk_message pmsg = {
.pbufs = &printk_shared_pbufs,
};
unsigned long flags;
*handover = false;
if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
return false;
con->dropped += pmsg.dropped;
/* Skip messages of formatted length 0. */
if (pmsg.outbuf_len == 0) {
con->seq = pmsg.seq + 1;
goto skip;
}
if (con->dropped && !is_extended) {
console_prepend_dropped(&pmsg, con->dropped);
con->dropped = 0;
}
/* Write everything out to the hardware. */
if (force_legacy_kthread() && !panic_in_progress()) {
/*
* With forced threading this function is in a task context
* (either legacy kthread or get_init_console_seq()). There
* is no need for concern about printk reentrance, handovers,
* or lockdep complaints.
*/
con->write(con, outbuf, pmsg.outbuf_len);
con->seq = pmsg.seq + 1;
} else {
/*
* While actively printing out messages, if another printk()
* were to occur on another CPU, it may wait for this one to
* finish. This task can not be preempted if there is a
* waiter waiting to take over.
*
* Interrupts are disabled because the hand over to a waiter
* must not be interrupted until the hand over is completed
* (@console_waiter is cleared).
*/
printk_safe_enter_irqsave(flags);
console_lock_spinning_enable();
/* Do not trace print latency. */
stop_critical_timings();
printk_legacy_allow_spinlock_enter();
con->write(con, outbuf, pmsg.outbuf_len);
printk_legacy_allow_spinlock_exit();
start_critical_timings();
con->seq = pmsg.seq + 1;
*handover = console_lock_spinning_disable_and_check(cookie);
printk_safe_exit_irqrestore(flags);
}
skip:
return true;
}
#else
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
{
*handover = false;
return false;
}
static inline void printk_kthreads_check_locked(void) { }
#endif /* CONFIG_PRINTK */
/*
* Print out all remaining records to all consoles.
*
* @do_cond_resched is set by the caller. It can be true only in schedulable
* context.
*
* @next_seq is set to the sequence number after the last available record.
* The value is valid only when this function returns true. It means that all
* usable consoles are completely flushed.
*
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
* console_lock. Otherwise it is set to false.
*
* Returns true when there was at least one usable console and all messages
* were flushed to all usable consoles. A returned false informs the caller
* that everything was not flushed (either there were no usable consoles or
* another context has taken over printing or it is a panic situation and this
* is not the panic CPU). Regardless the reason, the caller should assume it
* is not useful to immediately try again.
*
* Requires the console_lock.
*/
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
{
struct console_flush_type ft;
bool any_usable = false;
struct console *con;
bool any_progress;
int cookie;
*next_seq = 0;
*handover = false;
do {
any_progress = false;
printk_get_console_flush_type(&ft);
cookie = console_srcu_read_lock();
for_each_console_srcu(con) { short flags = console_srcu_read_flags(con);
u64 printk_seq;
bool progress;
/*
* console_flush_all() is only responsible for nbcon
* consoles when the nbcon consoles cannot print via
* their atomic or threaded flushing.
*/
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) continue; if (!console_is_usable(con, flags, !do_cond_resched)) continue;
any_usable = true;
if (flags & CON_NBCON) {
progress = nbcon_legacy_emit_next_record(con, handover, cookie,
!do_cond_resched);
printk_seq = nbcon_seq_read(con);
} else {
progress = console_emit_next_record(con, handover, cookie); printk_seq = con->seq;
}
/*
* If a handover has occurred, the SRCU read lock
* is already released.
*/
if (*handover) return false;
/* Track the next of the highest seq flushed. */
if (printk_seq > *next_seq)
*next_seq = printk_seq;
if (!progress)
continue;
any_progress = true;
/* Allow panic_cpu to take over the consoles safely. */
if (panic_on_other_cpu())
goto abandon;
if (do_cond_resched) cond_resched();
}
console_srcu_read_unlock(cookie); } while (any_progress);
return any_usable;
abandon:
console_srcu_read_unlock(cookie); return false;}
static void __console_flush_and_unlock(void)
{
bool do_cond_resched;
bool handover;
bool flushed;
u64 next_seq;
/*
* Console drivers are called with interrupts disabled, so
* @console_may_schedule should be cleared before; however, we may
* end up dumping a lot of lines, for example, if called from
* console registration path, and should invoke cond_resched()
* between lines if allowable. Not doing so can cause a very long
* scheduling stall on a slow console leading to RCU stall and
* softlockup warnings which exacerbate the issue with more
* messages practically incapacitating the system. Therefore, create
* a local to use for the printing loop.
*/
do_cond_resched = console_may_schedule;
do {
console_may_schedule = 0;
flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
if (!handover) __console_unlock();
/*
* Abort if there was a failure to flush all messages to all
* usable consoles. Either it is not possible to flush (in
* which case it would be an infinite loop of retrying) or
* another context has taken over printing.
*/
if (!flushed)
break;
/*
* Some context may have added new records after
* console_flush_all() but before unlocking the console.
* Re-check if there is a new record to flush. If the trylock
* fails, another context is already handling the printing.
*/
} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
}
/**
* console_unlock - unblock the legacy console subsystem from printing
*
* Releases the console_lock which the caller holds to block printing of
* the legacy console subsystem.
*
* While the console_lock was held, console output may have been buffered
* by printk(). If this is the case, console_unlock() emits the output on
* legacy consoles prior to releasing the lock.
*
* console_unlock(); may be called from any context.
*/
void console_unlock(void)
{
struct console_flush_type ft;
printk_get_console_flush_type(&ft);
if (ft.legacy_direct) __console_flush_and_unlock();
else
__console_unlock();
}
EXPORT_SYMBOL(console_unlock);
/**
* console_conditional_schedule - yield the CPU if required
*
* If the console code is currently allowed to sleep, and
* if this CPU should yield the CPU to another task, do
* so here.
*
* Must be called within console_lock();.
*/
void __sched console_conditional_schedule(void)
{
if (console_may_schedule)
cond_resched();
}
EXPORT_SYMBOL(console_conditional_schedule);
void console_unblank(void)
{
bool found_unblank = false;
struct console *c;
int cookie;
/*
* First check if there are any consoles implementing the unblank()
* callback. If not, there is no reason to continue and take the
* console lock, which in particular can be dangerous if
* @oops_in_progress is set.
*/
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
short flags = console_srcu_read_flags(c);
if (flags & CON_SUSPENDED)
continue;
if ((flags & CON_ENABLED) && c->unblank) {
found_unblank = true;
break;
}
}
console_srcu_read_unlock(cookie);
if (!found_unblank)
return;
/*
* Stop console printing because the unblank() callback may
* assume the console is not within its write() callback.
*
* If @oops_in_progress is set, this may be an atomic context.
* In that case, attempt a trylock as best-effort.
*/
if (oops_in_progress) {
/* Semaphores are not NMI-safe. */
if (in_nmi())
return;
/*
* Attempting to trylock the console lock can deadlock
* if another CPU was stopped while modifying the
* semaphore. "Hope and pray" that this is not the
* current situation.
*/
if (down_trylock_console_sem() != 0)
return;
} else
console_lock();
console_locked = 1;
console_may_schedule = 0;
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
short flags = console_srcu_read_flags(c);
if (flags & CON_SUSPENDED)
continue;
if ((flags & CON_ENABLED) && c->unblank)
c->unblank();
}
console_srcu_read_unlock(cookie);
console_unlock();
if (!oops_in_progress)
pr_flush(1000, true);
}
/*
* Rewind all consoles to the oldest available record.
*
* IMPORTANT: The function is safe only when called under
* console_lock(). It is not enforced because
* it is used as a best effort in panic().
*/
static void __console_rewind_all(void)
{
struct console *c;
short flags;
int cookie;
u64 seq;
seq = prb_first_valid_seq(prb);
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
flags = console_srcu_read_flags(c);
if (flags & CON_NBCON) {
nbcon_seq_force(c, seq);
} else {
/*
* This assignment is safe only when called under
* console_lock(). On panic, legacy consoles are
* only best effort.
*/
c->seq = seq;
}
}
console_srcu_read_unlock(cookie);
}
/**
* console_flush_on_panic - flush console content on panic
* @mode: flush all messages in buffer or just the pending ones
*
* Immediately output all pending messages no matter what.
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
struct console_flush_type ft;
bool handover;
u64 next_seq;
/*
* Ignore the console lock and flush out the messages. Attempting a
* trylock would not be useful because:
*
* - if it is contended, it must be ignored anyway
* - console_lock() and console_trylock() block and fail
* respectively in panic for non-panic CPUs
* - semaphores are not NMI-safe
*/
/*
* If another context is holding the console lock,
* @console_may_schedule might be set. Clear it so that
* this context does not call cond_resched() while flushing.
*/
console_may_schedule = 0;
if (mode == CONSOLE_REPLAY_ALL)
__console_rewind_all();
printk_get_console_flush_type(&ft);
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
/* Flush legacy consoles once allowed, even when dangerous. */
if (legacy_allow_panic_sync)
console_flush_all(false, &next_seq, &handover);
}
/*
* Return the console tty driver structure and its associated index
*/
struct tty_driver *console_device(int *index)
{
struct console *c;
struct tty_driver *driver = NULL;
int cookie;
/*
* Take console_lock to serialize device() callback with
* other console operations. For example, fg_console is
* modified under console_lock when switching vt.
*/
console_lock();
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
if (!c->device)
continue;
driver = c->device(c, index);
if (driver)
break;
}
console_srcu_read_unlock(cookie);
console_unlock();
return driver;
}
/*
* Prevent further output on the passed console device so that (for example)
* serial drivers can suspend console output before suspending a port, and can
* re-enable output afterwards.
*/
void console_suspend(struct console *console)
{
__pr_flush(console, 1000, true);
console_list_lock();
console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
console_list_unlock();
/*
* Ensure that all SRCU list walks have completed. All contexts must
* be able to see that this console is disabled so that (for example)
* the caller can suspend the port without risk of another context
* using the port.
*/
synchronize_srcu(&console_srcu);
}
EXPORT_SYMBOL(console_suspend);
void console_resume(struct console *console)
{
struct console_flush_type ft;
bool is_nbcon;
console_list_lock();
console_srcu_write_flags(console, console->flags | CON_ENABLED);
is_nbcon = console->flags & CON_NBCON;
console_list_unlock();
/*
* Ensure that all SRCU list walks have completed. The related
* printing context must be able to see it is enabled so that
* it is guaranteed to wake up and resume printing.
*/
synchronize_srcu(&console_srcu);
printk_get_console_flush_type(&ft);
if (is_nbcon && ft.nbcon_offload)
nbcon_kthread_wake(console);
else if (ft.legacy_offload)
defer_console_output();
__pr_flush(console, 1000, true);
}
EXPORT_SYMBOL(console_resume);
#ifdef CONFIG_PRINTK
static int unregister_console_locked(struct console *console);
/* True when system boot is far enough to create printer threads. */
bool printk_kthreads_ready __ro_after_init;
static struct task_struct *printk_legacy_kthread;
static bool legacy_kthread_should_wakeup(void)
{
struct console_flush_type ft;
struct console *con;
bool ret = false;
int cookie;
if (kthread_should_stop())
return true;
printk_get_console_flush_type(&ft);
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
short flags = console_srcu_read_flags(con);
u64 printk_seq;
/*
* The legacy printer thread is only responsible for nbcon
* consoles when the nbcon consoles cannot print via their
* atomic or threaded flushing.
*/
if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
continue;
if (!console_is_usable(con, flags, false))
continue;
if (flags & CON_NBCON) {
printk_seq = nbcon_seq_read(con);
} else {
/*
* It is safe to read @seq because only this
* thread context updates @seq.
*/
printk_seq = con->seq;
}
if (prb_read_valid(prb, printk_seq, NULL)) {
ret = true;
break;
}
}
console_srcu_read_unlock(cookie);
return ret;
}
static int legacy_kthread_func(void *unused)
{
for (;;) {
wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
if (kthread_should_stop())
break;
console_lock();
__console_flush_and_unlock();
}
return 0;
}
static bool legacy_kthread_create(void)
{
struct task_struct *kt;
lockdep_assert_console_list_lock_held();
kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy");
if (WARN_ON(IS_ERR(kt))) {
pr_err("failed to start legacy printing thread\n");
return false;
}
printk_legacy_kthread = kt;
/*
* It is important that console printing threads are scheduled
* shortly after a printk call and with generous runtime budgets.
*/
sched_set_normal(printk_legacy_kthread, -20);
return true;
}
/**
* printk_kthreads_shutdown - shutdown all threaded printers
*
* On system shutdown all threaded printers are stopped. This allows printk
* to transition back to atomic printing, thus providing a robust mechanism
* for the final shutdown/reboot messages to be output.
*/
static void printk_kthreads_shutdown(void)
{
struct console *con;
console_list_lock();
if (printk_kthreads_running) {
printk_kthreads_running = false;
for_each_console(con) {
if (con->flags & CON_NBCON)
nbcon_kthread_stop(con);
}
/*
* The threads may have been stopped while printing a
* backlog. Flush any records left over.
*/
nbcon_atomic_flush_pending();
}
console_list_unlock();
}
static struct syscore_ops printk_syscore_ops = {
.shutdown = printk_kthreads_shutdown,
};
/*
* If appropriate, start nbcon kthreads and set @printk_kthreads_running.
* If any kthreads fail to start, those consoles are unregistered.
*
* Must be called under console_list_lock().
*/
static void printk_kthreads_check_locked(void)
{
struct hlist_node *tmp;
struct console *con;
lockdep_assert_console_list_lock_held();
if (!printk_kthreads_ready)
return;
/* Start or stop the legacy kthread when needed. */
if (have_legacy_console || have_boot_console) {
if (!printk_legacy_kthread &&
force_legacy_kthread() &&
!legacy_kthread_create()) {
/*
* All legacy consoles must be unregistered. If there
* are any nbcon consoles, they will set up their own
* kthread.
*/
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
if (con->flags & CON_NBCON)
continue;
unregister_console_locked(con);
}
}
} else if (printk_legacy_kthread) {
kthread_stop(printk_legacy_kthread);
printk_legacy_kthread = NULL;
}
/*
* Printer threads cannot be started as long as any boot console is
* registered because there is no way to synchronize the hardware
* registers between boot console code and regular console code.
* It can only be known that there will be no new boot consoles when
* an nbcon console is registered.
*/
if (have_boot_console || !have_nbcon_console) {
/* Clear flag in case all nbcon consoles unregistered. */
printk_kthreads_running = false;
return;
}
if (printk_kthreads_running)
return;
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
if (!(con->flags & CON_NBCON))
continue;
if (!nbcon_kthread_create(con))
unregister_console_locked(con);
}
printk_kthreads_running = true;
}
static int __init printk_set_kthreads_ready(void)
{
register_syscore_ops(&printk_syscore_ops);
console_list_lock();
printk_kthreads_ready = true;
printk_kthreads_check_locked();
console_list_unlock();
return 0;
}
early_initcall(printk_set_kthreads_ready);
#endif /* CONFIG_PRINTK */
static int __read_mostly keep_bootcon;
static int __init keep_bootcon_setup(char *str)
{
keep_bootcon = 1;
pr_info("debug: skip boot console de-registration.\n");
return 0;
}
early_param("keep_bootcon", keep_bootcon_setup);
static int console_call_setup(struct console *newcon, char *options)
{
int err;
if (!newcon->setup)
return 0;
/* Synchronize with possible boot console. */
console_lock();
err = newcon->setup(newcon, options);
console_unlock();
return err;
}
/*
* This is called by register_console() to try to match
* the newly registered console with any of the ones selected
* by either the command line or add_preferred_console() and
* setup/enable it.
*
* Care need to be taken with consoles that are statically
* enabled such as netconsole
*/
static int try_enable_preferred_console(struct console *newcon,
bool user_specified)
{
struct console_cmdline *c;
int i, err;
for (i = 0, c = console_cmdline;
i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
i++, c++) {
/* Console not yet initialized? */
if (!c->name[0])
continue;
if (c->user_specified != user_specified)
continue;
if (!newcon->match ||
newcon->match(newcon, c->name, c->index, c->options) != 0) {
/* default matching */
BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
if (strcmp(c->name, newcon->name) != 0)
continue;
if (newcon->index >= 0 &&
newcon->index != c->index)
continue;
if (newcon->index < 0)
newcon->index = c->index;
if (_braille_register_console(newcon, c))
return 0;
err = console_call_setup(newcon, c->options);
if (err)
return err;
}
newcon->flags |= CON_ENABLED;
if (i == preferred_console)
newcon->flags |= CON_CONSDEV;
return 0;
}
/*
* Some consoles, such as pstore and netconsole, can be enabled even
* without matching. Accept the pre-enabled consoles only when match()
* and setup() had a chance to be called.
*/
if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
return 0;
return -ENOENT;
}
/* Try to enable the console unconditionally */
static void try_enable_default_console(struct console *newcon)
{
if (newcon->index < 0)
newcon->index = 0;
if (console_call_setup(newcon, NULL) != 0)
return;
newcon->flags |= CON_ENABLED;
if (newcon->device)
newcon->flags |= CON_CONSDEV;
}
/* Return the starting sequence number for a newly registered console. */
static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
{
struct console *con;
bool handover;
u64 init_seq;
if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
/* Get a consistent copy of @syslog_seq. */
mutex_lock(&syslog_lock);
init_seq = syslog_seq;
mutex_unlock(&syslog_lock);
} else {
/* Begin with next message added to ringbuffer. */
init_seq = prb_next_seq(prb);
/*
* If any enabled boot consoles are due to be unregistered
* shortly, some may not be caught up and may be the same
* device as @newcon. Since it is not known which boot console
* is the same device, flush all consoles and, if necessary,
* start with the message of the enabled boot console that is
* the furthest behind.
*/
if (bootcon_registered && !keep_bootcon) {
/*
* Hold the console_lock to stop console printing and
* guarantee safe access to console->seq.
*/
console_lock();
/*
* Flush all consoles and set the console to start at
* the next unprinted sequence number.
*/
if (!console_flush_all(true, &init_seq, &handover)) {
/*
* Flushing failed. Just choose the lowest
* sequence of the enabled boot consoles.
*/
/*
* If there was a handover, this context no
* longer holds the console_lock.
*/
if (handover)
console_lock();
init_seq = prb_next_seq(prb);
for_each_console(con) {
u64 seq;
if (!(con->flags & CON_BOOT) ||
!(con->flags & CON_ENABLED)) {
continue;
}
if (con->flags & CON_NBCON)
seq = nbcon_seq_read(con);
else
seq = con->seq;
if (seq < init_seq)
init_seq = seq;
}
}
console_unlock();
}
}
return init_seq;
}
#define console_first() \
hlist_entry(console_list.first, struct console, node)
static int unregister_console_locked(struct console *console);
/*
* The console driver calls this routine during kernel initialization
* to register the console printing procedure with printk() and to
* print any messages that were printed by the kernel before the
* console driver was initialized.
*
* This can happen pretty early during the boot process (because of
* early_printk) - sometimes before setup_arch() completes - be careful
* of what kernel features are used - they may not be initialised yet.
*
* There are two types of consoles - bootconsoles (early_printk) and
* "real" consoles (everything which is not a bootconsole) which are
* handled differently.
* - Any number of bootconsoles can be registered at any time.
* - As soon as a "real" console is registered, all bootconsoles
* will be unregistered automatically.
* - Once a "real" console is registered, any attempt to register a
* bootconsoles will be rejected
*/
void register_console(struct console *newcon)
{
bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
bool bootcon_registered = false;
bool realcon_registered = false;
struct console *con;
unsigned long flags;
u64 init_seq;
int err;
console_list_lock();
for_each_console(con) {
if (WARN(con == newcon, "console '%s%d' already registered\n",
con->name, con->index)) {
goto unlock;
}
if (con->flags & CON_BOOT)
bootcon_registered = true;
else
realcon_registered = true;
}
/* Do not register boot consoles when there already is a real one. */
if ((newcon->flags & CON_BOOT) && realcon_registered) {
pr_info("Too late to register bootconsole %s%d\n",
newcon->name, newcon->index);
goto unlock;
}
if (newcon->flags & CON_NBCON) {
/*
* Ensure the nbcon console buffers can be allocated
* before modifying any global data.
*/
if (!nbcon_alloc(newcon))
goto unlock;
}
/*
* See if we want to enable this console driver by default.
*
* Nope when a console is preferred by the command line, device
* tree, or SPCR.
*
* The first real console with tty binding (driver) wins. More
* consoles might get enabled before the right one is found.
*
* Note that a console with tty binding will have CON_CONSDEV
* flag set and will be first in the list.
*/
if (preferred_console < 0) {
if (hlist_empty(&console_list) || !console_first()->device ||
console_first()->flags & CON_BOOT) {
try_enable_default_console(newcon);
}
}
/* See if this console matches one we selected on the command line */
err = try_enable_preferred_console(newcon, true);
/* If not, try to match against the platform default(s) */
if (err == -ENOENT)
err = try_enable_preferred_console(newcon, false);
/* printk() messages are not printed to the Braille console. */
if (err || newcon->flags & CON_BRL) {
if (newcon->flags & CON_NBCON)
nbcon_free(newcon);
goto unlock;
}
/*
* If we have a bootconsole, and are switching to a real console,
* don't print everything out again, since when the boot console, and
* the real console are the same physical device, it's annoying to
* see the beginning boot messages twice
*/
if (bootcon_registered &&
((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
newcon->flags &= ~CON_PRINTBUFFER;
}
newcon->dropped = 0;
init_seq = get_init_console_seq(newcon, bootcon_registered);
if (newcon->flags & CON_NBCON) {
have_nbcon_console = true;
nbcon_seq_force(newcon, init_seq);
} else {
have_legacy_console = true;
newcon->seq = init_seq;
}
if (newcon->flags & CON_BOOT)
have_boot_console = true;
/*
* If another context is actively using the hardware of this new
* console, it will not be aware of the nbcon synchronization. This
* is a risk that two contexts could access the hardware
* simultaneously if this new console is used for atomic printing
* and the other context is still using the hardware.
*
* Use the driver synchronization to ensure that the hardware is not
* in use while this new console transitions to being registered.
*/
if (use_device_lock)
newcon->device_lock(newcon, &flags);
/*
* Put this console in the list - keep the
* preferred driver at the head of the list.
*/
if (hlist_empty(&console_list)) {
/* Ensure CON_CONSDEV is always set for the head. */
newcon->flags |= CON_CONSDEV;
hlist_add_head_rcu(&newcon->node, &console_list);
} else if (newcon->flags & CON_CONSDEV) {
/* Only the new head can have CON_CONSDEV set. */
console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
hlist_add_head_rcu(&newcon->node, &console_list);
} else {
hlist_add_behind_rcu(&newcon->node, console_list.first);
}
/*
* No need to synchronize SRCU here! The caller does not rely
* on all contexts being able to see the new console before
* register_console() completes.
*/
/* This new console is now registered. */
if (use_device_lock)
newcon->device_unlock(newcon, flags);
console_sysfs_notify();
/*
* By unregistering the bootconsoles after we enable the real console
* we get the "console xxx enabled" message on all the consoles -
* boot consoles, real consoles, etc - this is to ensure that end
* users know there might be something in the kernel's log buffer that
* went to the bootconsole (that they do not see on the real console)
*/
con_printk(KERN_INFO, newcon, "enabled\n");
if (bootcon_registered &&
((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
!keep_bootcon) {
struct hlist_node *tmp;
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
if (con->flags & CON_BOOT)
unregister_console_locked(con);
}
}
/* Changed console list, may require printer threads to start/stop. */
printk_kthreads_check_locked();
unlock:
console_list_unlock();
}
EXPORT_SYMBOL(register_console);
/* Must be called under console_list_lock(). */
static int unregister_console_locked(struct console *console)
{
bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
bool found_legacy_con = false;
bool found_nbcon_con = false;
bool found_boot_con = false;
unsigned long flags;
struct console *c;
int res;
lockdep_assert_console_list_lock_held();
con_printk(KERN_INFO, console, "disabled\n");
res = _braille_unregister_console(console);
if (res < 0)
return res;
if (res > 0)
return 0;
if (!console_is_registered_locked(console))
res = -ENODEV;
else if (console_is_usable(console, console->flags, true))
__pr_flush(console, 1000, true);
/* Disable it unconditionally */
console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
if (res < 0)
return res;
/*
* Use the driver synchronization to ensure that the hardware is not
* in use while this console transitions to being unregistered.
*/
if (use_device_lock)
console->device_lock(console, &flags);
hlist_del_init_rcu(&console->node);
if (use_device_lock)
console->device_unlock(console, flags);
/*
* <HISTORICAL>
* If this isn't the last console and it has CON_CONSDEV set, we
* need to set it on the next preferred console.
* </HISTORICAL>
*
* The above makes no sense as there is no guarantee that the next
* console has any device attached. Oh well....
*/
if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
/*
* Ensure that all SRCU list walks have completed. All contexts
* must not be able to see this console in the list so that any
* exit/cleanup routines can be performed safely.
*/
synchronize_srcu(&console_srcu);
/*
* With this console gone, the global flags tracking registered
* console types may have changed. Update them.
*/
for_each_console(c) {
if (c->flags & CON_BOOT)
found_boot_con = true;
if (c->flags & CON_NBCON)
found_nbcon_con = true;
else
found_legacy_con = true;
}
if (!found_boot_con)
have_boot_console = found_boot_con;
if (!found_legacy_con)
have_legacy_console = found_legacy_con;
if (!found_nbcon_con)
have_nbcon_console = found_nbcon_con;
/* @have_nbcon_console must be updated before calling nbcon_free(). */
if (console->flags & CON_NBCON)
nbcon_free(console);
console_sysfs_notify();
if (console->exit)
res = console->exit(console);
/* Changed console list, may require printer threads to start/stop. */
printk_kthreads_check_locked();
return res;
}
int unregister_console(struct console *console)
{
int res;
console_list_lock();
res = unregister_console_locked(console);
console_list_unlock();
return res;
}
EXPORT_SYMBOL(unregister_console);
/**
* console_force_preferred_locked - force a registered console preferred
* @con: The registered console to force preferred.
*
* Must be called under console_list_lock().
*/
void console_force_preferred_locked(struct console *con)
{
struct console *cur_pref_con;
if (!console_is_registered_locked(con))
return;
cur_pref_con = console_first();
/* Already preferred? */
if (cur_pref_con == con)
return;
/*
* Delete, but do not re-initialize the entry. This allows the console
* to continue to appear registered (via any hlist_unhashed_lockless()
* checks), even though it was briefly removed from the console list.
*/
hlist_del_rcu(&con->node);
/*
* Ensure that all SRCU list walks have completed so that the console
* can be added to the beginning of the console list and its forward
* list pointer can be re-initialized.
*/
synchronize_srcu(&console_srcu);
con->flags |= CON_CONSDEV;
WARN_ON(!con->device);
/* Only the new head can have CON_CONSDEV set. */
console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
hlist_add_head_rcu(&con->node, &console_list);
}
EXPORT_SYMBOL(console_force_preferred_locked);
/*
* Initialize the console device. This is called *early*, so
* we can't necessarily depend on lots of kernel help here.
* Just do some early initializations, and do the complex setup
* later.
*/
void __init console_init(void)
{
int ret;
initcall_t call;
initcall_entry_t *ce;
#ifdef CONFIG_NULL_TTY_DEFAULT_CONSOLE
if (!console_set_on_cmdline)
add_preferred_console("ttynull", 0, NULL);
#endif
/* Setup the default TTY line discipline. */
n_tty_init();
/*
* set up the console device so that later boot sequences can
* inform about problems etc..
*/
ce = __con_initcall_start;
trace_initcall_level("console");
while (ce < __con_initcall_end) {
call = initcall_from_entry(ce);
trace_initcall_start(call);
ret = call();
trace_initcall_finish(call, ret);
ce++;
}
}
/*
* Some boot consoles access data that is in the init section and which will
* be discarded after the initcalls have been run. To make sure that no code
* will access this data, unregister the boot consoles in a late initcall.
*
* If for some reason, such as deferred probe or the driver being a loadable
* module, the real console hasn't registered yet at this point, there will
* be a brief interval in which no messages are logged to the console, which
* makes it difficult to diagnose problems that occur during this time.
*
* To mitigate this problem somewhat, only unregister consoles whose memory
* intersects with the init section. Note that all other boot consoles will
* get unregistered when the real preferred console is registered.
*/
static int __init printk_late_init(void)
{
struct hlist_node *tmp;
struct console *con;
int ret;
console_list_lock();
hlist_for_each_entry_safe(con, tmp, &console_list, node) {
if (!(con->flags & CON_BOOT))
continue;
/* Check addresses that might be used for enabled consoles. */
if (init_section_intersects(con, sizeof(*con)) ||
init_section_contains(con->write, 0) ||
init_section_contains(con->read, 0) ||
init_section_contains(con->device, 0) ||
init_section_contains(con->unblank, 0) ||
init_section_contains(con->data, 0)) {
/*
* Please, consider moving the reported consoles out
* of the init section.
*/
pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
con->name, con->index);
unregister_console_locked(con);
}
}
console_list_unlock();
ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
console_cpu_notify);
WARN_ON(ret < 0);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
console_cpu_notify, NULL);
WARN_ON(ret < 0);
printk_sysctl_init();
return 0;
}
late_initcall(printk_late_init);
#if defined CONFIG_PRINTK
/* If @con is specified, only wait for that console. Otherwise wait for all. */
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
{
unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
unsigned long remaining_jiffies = timeout_jiffies;
struct console_flush_type ft;
struct console *c;
u64 last_diff = 0;
u64 printk_seq;
short flags;
int cookie;
u64 diff;
u64 seq;
/* Sorry, pr_flush() will not work this early. */
if (system_state < SYSTEM_SCHEDULING)
return false;
might_sleep();
seq = prb_next_reserve_seq(prb);
/* Flush the consoles so that records up to @seq are printed. */
printk_get_console_flush_type(&ft);
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
if (ft.legacy_direct) {
console_lock();
console_unlock();
}
for (;;) {
unsigned long begin_jiffies;
unsigned long slept_jiffies;
diff = 0;
/*
* Hold the console_lock to guarantee safe access to
* console->seq. Releasing console_lock flushes more
* records in case @seq is still not printed on all
* usable consoles.
*
* Holding the console_lock is not necessary if there
* are no legacy or boot consoles. However, such a
* console could register at any time. Always hold the
* console_lock as a precaution rather than
* synchronizing against register_console().
*/
console_lock();
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
if (con && con != c)
continue;
flags = console_srcu_read_flags(c);
/*
* If consoles are not usable, it cannot be expected
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
if (!console_is_usable(c, flags, true) &&
!console_is_usable(c, flags, false)) {
continue;
}
if (flags & CON_NBCON) {
printk_seq = nbcon_seq_read(c);
} else {
printk_seq = c->seq;
}
if (printk_seq < seq)
diff += seq - printk_seq;
}
console_srcu_read_unlock(cookie);
if (diff != last_diff && reset_on_progress)
remaining_jiffies = timeout_jiffies;
console_unlock();
/* Note: @diff is 0 if there are no usable consoles. */
if (diff == 0 || remaining_jiffies == 0)
break;
/* msleep(1) might sleep much longer. Check time by jiffies. */
begin_jiffies = jiffies;
msleep(1);
slept_jiffies = jiffies - begin_jiffies;
remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
last_diff = diff;
}
return (diff == 0);
}
/**
* pr_flush() - Wait for printing threads to catch up.
*
* @timeout_ms: The maximum time (in ms) to wait.
* @reset_on_progress: Reset the timeout if forward progress is seen.
*
* A value of 0 for @timeout_ms means no waiting will occur. A value of -1
* represents infinite waiting.
*
* If @reset_on_progress is true, the timeout will be reset whenever any
* printer has been seen to make some forward progress.
*
* Context: Process context. May sleep while acquiring console lock.
* Return: true if all usable printers are caught up.
*/
bool pr_flush(int timeout_ms, bool reset_on_progress)
{
return __pr_flush(NULL, timeout_ms, reset_on_progress);
}
/*
* Delayed printk version, for scheduler-internal messages:
*/
#define PRINTK_PENDING_WAKEUP 0x01
#define PRINTK_PENDING_OUTPUT 0x02
static DEFINE_PER_CPU(int, printk_pending);
static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_OUTPUT) {
if (force_legacy_kthread()) {
if (printk_legacy_kthread)
wake_up_interruptible(&legacy_wait);
} else {
if (console_trylock())
console_unlock();
}
}
if (pending & PRINTK_PENDING_WAKEUP)
wake_up_interruptible(&log_wait);
}
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
static void __wake_up_klogd(int val)
{
if (!printk_percpu_data_ready())
return;
preempt_disable();
/*
* Guarantee any new records can be seen by tasks preparing to wait
* before this context checks if the wait queue is empty.
*
* The full memory barrier within wq_has_sleeper() pairs with the full
* memory barrier within set_current_state() of
* prepare_to_wait_event(), which is called after ___wait_event() adds
* the waiter but before it has checked the wait condition.
*
* This pairs with devkmsg_read:A and syslog_print:A.
*/
if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ (val & PRINTK_PENDING_OUTPUT)) { this_cpu_or(printk_pending, val);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
}
preempt_enable();}
/**
* wake_up_klogd - Wake kernel logging daemon
*
* Use this function when new records have been added to the ringbuffer
* and the console printing of those records has already occurred or is
* known to be handled by some other context. This function will only
* wake the logging daemon.
*
* Context: Any context.
*/
void wake_up_klogd(void)
{
__wake_up_klogd(PRINTK_PENDING_WAKEUP);
}
/**
* defer_console_output - Wake kernel logging daemon and trigger
* console printing in a deferred context
*
* Use this function when new records have been added to the ringbuffer,
* this context is responsible for console printing those records, but
* the current context is not allowed to perform the console printing.
* Trigger an irq_work context to perform the console printing. This
* function also wakes the logging daemon.
*
* Context: Any context.
*/
void defer_console_output(void)
{
/*
* New messages may have been added directly to the ringbuffer
* using vprintk_store(), so wake any waiters as well.
*/
__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
}
void printk_trigger_flush(void)
{
defer_console_output();
}
int vprintk_deferred(const char *fmt, va_list args)
{
return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
}
int _printk_deferred(const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = vprintk_deferred(fmt, args);
va_end(args);
return r;
}
/*
* printk rate limiting, lifted from the networking subsystem.
*
* This enforces a rate limit: not more than 10 kernel messages
* every 5s to make a denial-of-service attack impossible.
*/
DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
int __printk_ratelimit(const char *func)
{
return ___ratelimit(&printk_ratelimit_state, func);
}
EXPORT_SYMBOL(__printk_ratelimit);
/**
* printk_timed_ratelimit - caller-controlled printk ratelimiting
* @caller_jiffies: pointer to caller's state
* @interval_msecs: minimum interval between prints
*
* printk_timed_ratelimit() returns true if more than @interval_msecs
* milliseconds have elapsed since the last time printk_timed_ratelimit()
* returned true.
*/
bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msecs)
{
unsigned long elapsed = jiffies - *caller_jiffies;
if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
return false;
*caller_jiffies = jiffies;
return true;
}
EXPORT_SYMBOL(printk_timed_ratelimit);
static DEFINE_SPINLOCK(dump_list_lock);
static LIST_HEAD(dump_list);
/**
* kmsg_dump_register - register a kernel log dumper.
* @dumper: pointer to the kmsg_dumper structure
*
* Adds a kernel log dumper to the system. The dump callback in the
* structure will be called when the kernel oopses or panics and must be
* set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
*/
int kmsg_dump_register(struct kmsg_dumper *dumper)
{
unsigned long flags;
int err = -EBUSY;
/* The dump callback needs to be set */
if (!dumper->dump)
return -EINVAL;
spin_lock_irqsave(&dump_list_lock, flags);
/* Don't allow registering multiple times */
if (!dumper->registered) {
dumper->registered = 1;
list_add_tail_rcu(&dumper->list, &dump_list);
err = 0;
}
spin_unlock_irqrestore(&dump_list_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(kmsg_dump_register);
/**
* kmsg_dump_unregister - unregister a kmsg dumper.
* @dumper: pointer to the kmsg_dumper structure
*
* Removes a dump device from the system. Returns zero on success and
* %-EINVAL otherwise.
*/
int kmsg_dump_unregister(struct kmsg_dumper *dumper)
{
unsigned long flags;
int err = -EINVAL;
spin_lock_irqsave(&dump_list_lock, flags);
if (dumper->registered) {
dumper->registered = 0;
list_del_rcu(&dumper->list);
err = 0;
}
spin_unlock_irqrestore(&dump_list_lock, flags);
synchronize_rcu();
return err;
}
EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
static bool always_kmsg_dump;
module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
{
switch (reason) {
case KMSG_DUMP_PANIC:
return "Panic";
case KMSG_DUMP_OOPS:
return "Oops";
case KMSG_DUMP_EMERG:
return "Emergency";
case KMSG_DUMP_SHUTDOWN:
return "Shutdown";
default:
return "Unknown";
}
}
EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
/**
* kmsg_dump_desc - dump kernel log to kernel message dumpers.
* @reason: the reason (oops, panic etc) for dumping
* @desc: a short string to describe what caused the panic or oops. Can be NULL
* if no additional description is available.
*
* Call each of the registered dumper's dump() callback, which can
* retrieve the kmsg records with kmsg_dump_get_line() or
* kmsg_dump_get_buffer().
*/
void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
struct kmsg_dumper *dumper;
struct kmsg_dump_detail detail = {
.reason = reason,
.description = desc};
rcu_read_lock();
list_for_each_entry_rcu(dumper, &dump_list, list) {
enum kmsg_dump_reason max_reason = dumper->max_reason;
/*
* If client has not provided a specific max_reason, default
* to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
*/
if (max_reason == KMSG_DUMP_UNDEF) {
max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
KMSG_DUMP_OOPS;
}
if (reason > max_reason)
continue;
/* invoke dumper which will iterate over records */
dumper->dump(dumper, &detail);
}
rcu_read_unlock();
}
/**
* kmsg_dump_get_line - retrieve one kmsg log line
* @iter: kmsg dump iterator
* @syslog: include the "<4>" prefixes
* @line: buffer to copy the line to
* @size: maximum size of the buffer
* @len: length of line placed into buffer
*
* Start at the beginning of the kmsg buffer, with the oldest kmsg
* record, and copy one record into the provided buffer.
*
* Consecutive calls will return the next available record moving
* towards the end of the buffer with the youngest messages.
*
* A return value of FALSE indicates that there are no more records to
* read.
*/
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len)
{
u64 min_seq = latched_seq_read_nolock(&clear_seq);
struct printk_info info;
unsigned int line_count;
struct printk_record r;
size_t l = 0;
bool ret = false;
if (iter->cur_seq < min_seq)
iter->cur_seq = min_seq;
prb_rec_init_rd(&r, &info, line, size);
/* Read text or count text lines? */
if (line) {
if (!prb_read_valid(prb, iter->cur_seq, &r))
goto out;
l = record_print_text(&r, syslog, printk_time);
} else {
if (!prb_read_valid_info(prb, iter->cur_seq,
&info, &line_count)) {
goto out;
}
l = get_record_print_text_size(&info, line_count, syslog,
printk_time);
}
iter->cur_seq = r.info->seq + 1;
ret = true;
out:
if (len)
*len = l;
return ret;
}
EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
/**
* kmsg_dump_get_buffer - copy kmsg log lines
* @iter: kmsg dump iterator
* @syslog: include the "<4>" prefixes
* @buf: buffer to copy the line to
* @size: maximum size of the buffer
* @len_out: length of line placed into buffer
*
* Start at the end of the kmsg buffer and fill the provided buffer
* with as many of the *youngest* kmsg records that fit into it.
* If the buffer is large enough, all available kmsg records will be
* copied with a single call.
*
* Consecutive calls will fill the buffer with the next block of
* available older records, not including the earlier retrieved ones.
*
* A return value of FALSE indicates that there are no more records to
* read.
*/
bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
char *buf, size_t size, size_t *len_out)
{
u64 min_seq = latched_seq_read_nolock(&clear_seq);
struct printk_info info;
struct printk_record r;
u64 seq;
u64 next_seq;
size_t len = 0;
bool ret = false;
bool time = printk_time;
if (!buf || !size)
goto out;
if (iter->cur_seq < min_seq)
iter->cur_seq = min_seq;
if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
if (info.seq != iter->cur_seq) {
/* messages are gone, move to first available one */
iter->cur_seq = info.seq;
}
}
/* last entry */
if (iter->cur_seq >= iter->next_seq)
goto out;
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump. Pass in size-1
* because this function (by way of record_print_text()) will
* not write more than size-1 bytes of text into @buf.
*/
seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
size - 1, syslog, time);
/*
* Next kmsg_dump_get_buffer() invocation will dump block of
* older records stored right before this one.
*/
next_seq = seq;
prb_rec_init_rd(&r, &info, buf, size);
prb_for_each_record(seq, prb, seq, &r) {
if (r.info->seq >= iter->next_seq)
break;
len += record_print_text(&r, syslog, time);
/* Adjust record to store to remaining buffer space. */
prb_rec_init_rd(&r, &info, buf + len, size - len);
}
iter->next_seq = next_seq;
ret = true;
out:
if (len_out)
*len_out = len;
return ret;
}
EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
/**
* kmsg_dump_rewind - reset the iterator
* @iter: kmsg dump iterator
*
* Reset the dumper's iterator so that kmsg_dump_get_line() and
* kmsg_dump_get_buffer() can be called again and used multiple
* times within the same dumper.dump() callback.
*/
void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
{
iter->cur_seq = latched_seq_read_nolock(&clear_seq);
iter->next_seq = prb_next_seq(prb);
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
/**
* console_try_replay_all - try to replay kernel log on consoles
*
* Try to obtain lock on console subsystem and replay all
* available records in printk buffer on the consoles.
* Does nothing if lock is not obtained.
*
* Context: Any, except for NMI.
*/
void console_try_replay_all(void)
{
struct console_flush_type ft;
printk_get_console_flush_type(&ft);
if (console_trylock()) {
__console_rewind_all();
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
if (ft.nbcon_offload)
nbcon_kthreads_wake();
if (ft.legacy_offload)
defer_console_output();
/* Consoles are flushed as part of console_unlock(). */
console_unlock();
}
}
#endif
#ifdef CONFIG_SMP
static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
bool is_printk_cpu_sync_owner(void)
{
return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
}
/**
* __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
* spinning lock is not owned by any CPU.
*
* Context: Any context.
*/
void __printk_cpu_sync_wait(void)
{
do {
cpu_relax();
} while (atomic_read(&printk_cpu_sync_owner) != -1);
}
EXPORT_SYMBOL(__printk_cpu_sync_wait);
/**
* __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
* spinning lock.
*
* If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the
* lock, this function succeeds immediately.
*
* Context: Any context. Expects interrupts to be disabled.
* Return: 1 on success, otherwise 0.
*/
int __printk_cpu_sync_try_get(void)
{
int cpu;
int old;
cpu = smp_processor_id();
/*
* Guarantee loads and stores from this CPU when it is the lock owner
* are _not_ visible to the previous lock owner. This pairs with
* __printk_cpu_sync_put:B.
*
* Memory barrier involvement:
*
* If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
* then __printk_cpu_sync_put:A can never read from
* __printk_cpu_sync_try_get:B.
*
* Relies on:
*
* RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
* of the previous CPU
* matching
* ACQUIRE from __printk_cpu_sync_try_get:A to
* __printk_cpu_sync_try_get:B of this CPU
*/
old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
cpu); /* LMM(__printk_cpu_sync_try_get:A) */
if (old == -1) {
/*
* This CPU is now the owner and begins loading/storing
* data: LMM(__printk_cpu_sync_try_get:B)
*/
return 1;
} else if (old == cpu) {
/* This CPU is already the owner. */
atomic_inc(&printk_cpu_sync_nested);
return 1;
}
return 0;
}
EXPORT_SYMBOL(__printk_cpu_sync_try_get);
/**
* __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
*
* The calling processor must be the owner of the lock.
*
* Context: Any context. Expects interrupts to be disabled.
*/
void __printk_cpu_sync_put(void)
{
if (atomic_read(&printk_cpu_sync_nested)) {
atomic_dec(&printk_cpu_sync_nested);
return;
}
/*
* This CPU is finished loading/storing data:
* LMM(__printk_cpu_sync_put:A)
*/
/*
* Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs
* with __printk_cpu_sync_try_get:A.
*
* Memory barrier involvement:
*
* If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
* then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
*
* Relies on:
*
* RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
* of this CPU
* matching
* ACQUIRE from __printk_cpu_sync_try_get:A to
* __printk_cpu_sync_try_get:B of the next CPU
*/
atomic_set_release(&printk_cpu_sync_owner,
-1); /* LMM(__printk_cpu_sync_put:B) */
}
EXPORT_SYMBOL(__printk_cpu_sync_put);
#endif /* CONFIG_SMP */
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
#define CREATE_TRACE_POINTS
#include <trace/events/notifier.h>
/*
* Notifier chain core routines. The exported routines below
* are layered on top of these, with appropriate locking added.
*/
static int notifier_chain_register(struct notifier_block **nl,
struct notifier_block *n,
bool unique_priority)
{
while ((*nl) != NULL) {
if (unlikely((*nl) == n)) {
WARN(1, "notifier callback %ps already registered",
n->notifier_call);
return -EEXIST;
}
if (n->priority > (*nl)->priority)
break;
if (n->priority == (*nl)->priority && unique_priority)
return -EBUSY;
nl = &((*nl)->next);
}
n->next = *nl;
rcu_assign_pointer(*nl, n);
trace_notifier_register((void *)n->notifier_call);
return 0;
}
static int notifier_chain_unregister(struct notifier_block **nl,
struct notifier_block *n)
{
while ((*nl) != NULL) {
if ((*nl) == n) {
rcu_assign_pointer(*nl, n->next);
trace_notifier_unregister((void *)n->notifier_call);
return 0;
}
nl = &((*nl)->next);
}
return -ENOENT;
}
/**
* notifier_call_chain - Informs the registered notifiers about an event.
* @nl: Pointer to head of the blocking notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
* @nr_to_call: Number of notifier functions to be called. Don't care
* value of this parameter is -1.
* @nr_calls: Records the number of notifications sent. Don't care
* value of this field is NULL.
* Return: notifier_call_chain returns the value returned by the
* last notifier function called.
*/
static int notifier_call_chain(struct notifier_block **nl,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
{ int ret = NOTIFY_DONE;
struct notifier_block *nb, *next_nb;
nb = rcu_dereference_raw(*nl);
while (nb && nr_to_call) { next_nb = rcu_dereference_raw(nb->next);
#ifdef CONFIG_DEBUG_NOTIFIERS
if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
WARN(1, "Invalid notifier called!");
nb = next_nb;
continue;
}
#endif
trace_notifier_run((void *)nb->notifier_call);
ret = nb->notifier_call(nb, val, v);
if (nr_calls)
(*nr_calls)++; if (ret & NOTIFY_STOP_MASK)
break;
nb = next_nb;
nr_to_call--;
}
return ret;
}
NOKPROBE_SYMBOL(notifier_call_chain);
/**
* notifier_call_chain_robust - Inform the registered notifiers about an event
* and rollback on error.
* @nl: Pointer to head of the blocking notifier chain
* @val_up: Value passed unmodified to the notifier function
* @val_down: Value passed unmodified to the notifier function when recovering
* from an error on @val_up
* @v: Pointer passed unmodified to the notifier function
*
* NOTE: It is important the @nl chain doesn't change between the two
* invocations of notifier_call_chain() such that we visit the
* exact same notifier callbacks; this rules out any RCU usage.
*
* Return: the return value of the @val_up call.
*/
static int notifier_call_chain_robust(struct notifier_block **nl,
unsigned long val_up, unsigned long val_down,
void *v)
{
int ret, nr = 0;
ret = notifier_call_chain(nl, val_up, v, -1, &nr);
if (ret & NOTIFY_STOP_MASK)
notifier_call_chain(nl, val_down, v, nr-1, NULL);
return ret;
}
/*
* Atomic notifier chain routines. Registration and unregistration
* use a spinlock, and call_chain is synchronized by RCU (no locks).
*/
/**
* atomic_notifier_chain_register - Add notifier to an atomic notifier chain
* @nh: Pointer to head of the atomic notifier chain
* @n: New entry in notifier chain
*
* Adds a notifier to an atomic notifier chain.
*
* Returns 0 on success, %-EEXIST on error.
*/
int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
struct notifier_block *n)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&nh->lock, flags);
ret = notifier_chain_register(&nh->head, n, false);
spin_unlock_irqrestore(&nh->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
/**
* atomic_notifier_chain_register_unique_prio - Add notifier to an atomic notifier chain
* @nh: Pointer to head of the atomic notifier chain
* @n: New entry in notifier chain
*
* Adds a notifier to an atomic notifier chain if there is no other
* notifier registered using the same priority.
*
* Returns 0 on success, %-EEXIST or %-EBUSY on error.
*/
int atomic_notifier_chain_register_unique_prio(struct atomic_notifier_head *nh,
struct notifier_block *n)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&nh->lock, flags);
ret = notifier_chain_register(&nh->head, n, true);
spin_unlock_irqrestore(&nh->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_chain_register_unique_prio);
/**
* atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
* @nh: Pointer to head of the atomic notifier chain
* @n: Entry to remove from notifier chain
*
* Removes a notifier from an atomic notifier chain.
*
* Returns zero on success or %-ENOENT on failure.
*/
int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *n)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&nh->lock, flags);
ret = notifier_chain_unregister(&nh->head, n);
spin_unlock_irqrestore(&nh->lock, flags);
synchronize_rcu();
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
/**
* atomic_notifier_call_chain - Call functions in an atomic notifier chain
* @nh: Pointer to head of the atomic notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn. The functions
* run in an atomic context, so they must not block.
* This routine uses RCU to synchronize with changes to the chain.
*
* If the return value of the notifier can be and'ed
* with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
* will return immediately, with the return value of
* the notifier function which halted execution.
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v)
{
int ret;
rcu_read_lock();
ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
NOKPROBE_SYMBOL(atomic_notifier_call_chain);
/**
* atomic_notifier_call_chain_is_empty - Check whether notifier chain is empty
* @nh: Pointer to head of the atomic notifier chain
*
* Checks whether notifier chain is empty.
*
* Returns true is notifier chain is empty, false otherwise.
*/
bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh)
{
return !rcu_access_pointer(nh->head);
}
/*
* Blocking notifier chain routines. All access to the chain is
* synchronized by an rwsem.
*/
static int __blocking_notifier_chain_register(struct blocking_notifier_head *nh,
struct notifier_block *n,
bool unique_priority)
{
int ret;
/*
* This code gets used during boot-up, when task switching is
* not yet working and interrupts must remain disabled. At
* such times we must not call down_write().
*/
if (unlikely(system_state == SYSTEM_BOOTING))
return notifier_chain_register(&nh->head, n, unique_priority);
down_write(&nh->rwsem);
ret = notifier_chain_register(&nh->head, n, unique_priority);
up_write(&nh->rwsem);
return ret;
}
/**
* blocking_notifier_chain_register - Add notifier to a blocking notifier chain
* @nh: Pointer to head of the blocking notifier chain
* @n: New entry in notifier chain
*
* Adds a notifier to a blocking notifier chain.
* Must be called in process context.
*
* Returns 0 on success, %-EEXIST on error.
*/
int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
struct notifier_block *n)
{
return __blocking_notifier_chain_register(nh, n, false);
}
EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
/**
* blocking_notifier_chain_register_unique_prio - Add notifier to a blocking notifier chain
* @nh: Pointer to head of the blocking notifier chain
* @n: New entry in notifier chain
*
* Adds a notifier to an blocking notifier chain if there is no other
* notifier registered using the same priority.
*
* Returns 0 on success, %-EEXIST or %-EBUSY on error.
*/
int blocking_notifier_chain_register_unique_prio(struct blocking_notifier_head *nh,
struct notifier_block *n)
{
return __blocking_notifier_chain_register(nh, n, true);
}
EXPORT_SYMBOL_GPL(blocking_notifier_chain_register_unique_prio);
/**
* blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
* @nh: Pointer to head of the blocking notifier chain
* @n: Entry to remove from notifier chain
*
* Removes a notifier from a blocking notifier chain.
* Must be called from process context.
*
* Returns zero on success or %-ENOENT on failure.
*/
int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
struct notifier_block *n)
{
int ret;
/*
* This code gets used during boot-up, when task switching is
* not yet working and interrupts must remain disabled. At
* such times we must not call down_write().
*/
if (unlikely(system_state == SYSTEM_BOOTING))
return notifier_chain_unregister(&nh->head, n);
down_write(&nh->rwsem);
ret = notifier_chain_unregister(&nh->head, n);
up_write(&nh->rwsem);
return ret;
}
EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
int ret = NOTIFY_DONE;
/*
* We check the head outside the lock, but if this access is
* racy then it does not matter what the result of the test
* is, we re-check the list after having taken the lock anyway:
*/
if (rcu_access_pointer(nh->head)) {
down_read(&nh->rwsem);
ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
up_read(&nh->rwsem);
}
return ret;
}
EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_robust);
/**
* blocking_notifier_call_chain - Call functions in a blocking notifier chain
* @nh: Pointer to head of the blocking notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn. The functions
* run in a process context, so they are allowed to block.
*
* If the return value of the notifier can be and'ed
* with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
* will return immediately, with the return value of
* the notifier function which halted execution.
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v)
{
int ret = NOTIFY_DONE;
/*
* We check the head outside the lock, but if this access is
* racy then it does not matter what the result of the test
* is, we re-check the list after having taken the lock anyway:
*/
if (rcu_access_pointer(nh->head)) {
down_read(&nh->rwsem);
ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
up_read(&nh->rwsem);
}
return ret;}
EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
/*
* Raw notifier chain routines. There is no protection;
* the caller must provide it. Use at your own risk!
*/
/**
* raw_notifier_chain_register - Add notifier to a raw notifier chain
* @nh: Pointer to head of the raw notifier chain
* @n: New entry in notifier chain
*
* Adds a notifier to a raw notifier chain.
* All locking must be provided by the caller.
*
* Returns 0 on success, %-EEXIST on error.
*/
int raw_notifier_chain_register(struct raw_notifier_head *nh,
struct notifier_block *n)
{
return notifier_chain_register(&nh->head, n, false);
}
EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
/**
* raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
* @nh: Pointer to head of the raw notifier chain
* @n: Entry to remove from notifier chain
*
* Removes a notifier from a raw notifier chain.
* All locking must be provided by the caller.
*
* Returns zero on success or %-ENOENT on failure.
*/
int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
struct notifier_block *n)
{
return notifier_chain_unregister(&nh->head, n);
}
EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
return notifier_call_chain_robust(&nh->head, val_up, val_down, v);
}
EXPORT_SYMBOL_GPL(raw_notifier_call_chain_robust);
/**
* raw_notifier_call_chain - Call functions in a raw notifier chain
* @nh: Pointer to head of the raw notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn. The functions
* run in an undefined context.
* All locking must be provided by the caller.
*
* If the return value of the notifier can be and'ed
* with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
* will return immediately, with the return value of
* the notifier function which halted execution.
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v)
{
return notifier_call_chain(&nh->head, val, v, -1, NULL);
}
EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
/*
* SRCU notifier chain routines. Registration and unregistration
* use a mutex, and call_chain is synchronized by SRCU (no locks).
*/
/**
* srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
* @nh: Pointer to head of the SRCU notifier chain
* @n: New entry in notifier chain
*
* Adds a notifier to an SRCU notifier chain.
* Must be called in process context.
*
* Returns 0 on success, %-EEXIST on error.
*/
int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *n)
{
int ret;
/*
* This code gets used during boot-up, when task switching is
* not yet working and interrupts must remain disabled. At
* such times we must not call mutex_lock().
*/
if (unlikely(system_state == SYSTEM_BOOTING))
return notifier_chain_register(&nh->head, n, false);
mutex_lock(&nh->mutex);
ret = notifier_chain_register(&nh->head, n, false);
mutex_unlock(&nh->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
/**
* srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
* @nh: Pointer to head of the SRCU notifier chain
* @n: Entry to remove from notifier chain
*
* Removes a notifier from an SRCU notifier chain.
* Must be called from process context.
*
* Returns zero on success or %-ENOENT on failure.
*/
int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
struct notifier_block *n)
{
int ret;
/*
* This code gets used during boot-up, when task switching is
* not yet working and interrupts must remain disabled. At
* such times we must not call mutex_lock().
*/
if (unlikely(system_state == SYSTEM_BOOTING))
return notifier_chain_unregister(&nh->head, n);
mutex_lock(&nh->mutex);
ret = notifier_chain_unregister(&nh->head, n);
mutex_unlock(&nh->mutex);
synchronize_srcu(&nh->srcu);
return ret;
}
EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
/**
* srcu_notifier_call_chain - Call functions in an SRCU notifier chain
* @nh: Pointer to head of the SRCU notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn. The functions
* run in a process context, so they are allowed to block.
*
* If the return value of the notifier can be and'ed
* with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
* will return immediately, with the return value of
* the notifier function which halted execution.
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v)
{
int ret;
int idx;
idx = srcu_read_lock(&nh->srcu);
ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
srcu_read_unlock(&nh->srcu, idx);
return ret;
}
EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
/**
* srcu_init_notifier_head - Initialize an SRCU notifier head
* @nh: Pointer to head of the srcu notifier chain
*
* Unlike other sorts of notifier heads, SRCU notifier heads require
* dynamic initialization. Be sure to call this routine before
* calling any of the other SRCU notifier routines for this head.
*
* If an SRCU notifier head is deallocated, it must first be cleaned
* up by calling srcu_cleanup_notifier_head(). Otherwise the head's
* per-cpu data (used by the SRCU mechanism) will leak.
*/
void srcu_init_notifier_head(struct srcu_notifier_head *nh)
{
mutex_init(&nh->mutex);
if (init_srcu_struct(&nh->srcu) < 0)
BUG();
nh->head = NULL;
}
EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
static ATOMIC_NOTIFIER_HEAD(die_chain);
int notrace notify_die(enum die_val val, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
.regs = regs,
.str = str,
.err = err,
.trapnr = trap,
.signr = sig,
};
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"notify_die called but RCU thinks we're quiescent");
return atomic_notifier_call_chain(&die_chain, val, &args);
}
NOKPROBE_SYMBOL(notify_die);
int register_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
int unregister_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&die_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_die_notifier);
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the Interfaces handler.
*
* Version: @(#)dev.h 1.0.10 08/12/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Bjorn Ekwall. <bj0rn@blox.se>
* Pekka Riikonen <priikone@poseidon.pspt.fi>
*
* Moved to /usr/include/linux for NET3
*/
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H
#include <linux/timer.h>
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
#include <asm/local.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
#include <linux/workqueue.h>
#include <linux/dynamic_queue_limits.h>
#include <net/net_namespace.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
#include <linux/netdevice_xmit.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
#include <uapi/linux/netdev.h>
#include <linux/hashtable.h>
#include <linux/rbtree.h>
#include <net/net_trackers.h>
#include <net/net_debug.h>
#include <net/dropreason-core.h>
#include <net/neighbour_tables.h>
struct netpoll_info;
struct device;
struct ethtool_ops;
struct kernel_hwtstamp_config;
struct phy_device;
struct dsa_port;
struct ip_tunnel_parm_kern;
struct macsec_context;
struct macsec_ops;
struct netdev_config;
struct netdev_name_node;
struct sd_flow_limit;
struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
/* 802.15.4 specific */
struct wpan_dev;
struct mpls_dev;
/* UDP Tunnel offloads */
struct udp_tunnel_info;
struct udp_tunnel_nic_info;
struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
struct ethtool_netdev_state;
struct phy_link_topology;
struct hwtstamp_provider;
typedef u32 xdp_features_t;
void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
#define MAX_NEST_DEV 8
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
*
* - qdisc return codes
* - driver transmit return codes
* - errno values
*
* Drivers are allowed to return any one of those in their hard_start_xmit()
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
* higher layers. Virtual network devices transmit synchronously; in this case
* the driver transmit return codes are consumed by dev_queue_xmit(), and all
* others are propagated to higher layers.
*/
/* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x01 /* skb dropped */
#define NET_XMIT_CN 0x02 /* congestion notification */
#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
* indicates that the device will soon be dropping packets, or already drops
* some packets of the same priority; prompting us to send less aggressively. */
#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
/* Driver transmit return codes */
#define NETDEV_TX_MASK 0xf0
enum netdev_tx {
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
};
typedef enum netdev_tx netdev_tx_t;
/*
* Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
* hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
*/
static inline bool dev_xmit_complete(int rc)
{
/*
* Positive cases with an skb consumed by a driver:
* - successful transmission (rc == NETDEV_TX_OK)
* - error while transmitting (rc < 0)
* - error while queueing to a different device (rc & NET_XMIT_MASK)
*/
if (likely(rc < NET_XMIT_MASK))
return true;
return false;
}
/*
* Compute the worst-case header length according to the protocols
* used.
*/
#if defined(CONFIG_HYPERV_NET)
# define LL_MAX_HEADER 128
#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
# define LL_MAX_HEADER 96
# endif
#else
# define LL_MAX_HEADER 32
#endif
#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
!IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
#define MAX_HEADER LL_MAX_HEADER
#else
#define MAX_HEADER (LL_MAX_HEADER + 48)
#endif
/*
* Old network device statistics. Fields are native words
* (unsigned long) so they can be read and written atomically.
*/
#define NET_DEV_STAT(FIELD) \
union { \
unsigned long FIELD; \
atomic_long_t __##FIELD; \
}
struct net_device_stats {
NET_DEV_STAT(rx_packets);
NET_DEV_STAT(tx_packets);
NET_DEV_STAT(rx_bytes);
NET_DEV_STAT(tx_bytes);
NET_DEV_STAT(rx_errors);
NET_DEV_STAT(tx_errors);
NET_DEV_STAT(rx_dropped);
NET_DEV_STAT(tx_dropped);
NET_DEV_STAT(multicast);
NET_DEV_STAT(collisions);
NET_DEV_STAT(rx_length_errors);
NET_DEV_STAT(rx_over_errors);
NET_DEV_STAT(rx_crc_errors);
NET_DEV_STAT(rx_frame_errors);
NET_DEV_STAT(rx_fifo_errors);
NET_DEV_STAT(rx_missed_errors);
NET_DEV_STAT(tx_aborted_errors);
NET_DEV_STAT(tx_carrier_errors);
NET_DEV_STAT(tx_fifo_errors);
NET_DEV_STAT(tx_heartbeat_errors);
NET_DEV_STAT(tx_window_errors);
NET_DEV_STAT(rx_compressed);
NET_DEV_STAT(tx_compressed);
};
#undef NET_DEV_STAT
/* per-cpu stats, allocated on demand.
* Try to fit them in a single cache line, for dev_get_stats() sake.
*/
struct net_device_core_stats {
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long rx_nohandler;
unsigned long rx_otherhost_dropped;
} __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h>
#include <linux/skbuff.h>
struct neighbour;
struct neigh_parms;
struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
struct rb_node node;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
#define NETDEV_HW_ADDR_T_SAN 2
#define NETDEV_HW_ADDR_T_UNICAST 3
#define NETDEV_HW_ADDR_T_MULTICAST 4
bool global_use;
int sync_cnt;
int refcount;
int synced;
struct rcu_head rcu_head;
};
struct netdev_hw_addr_list {
struct list_head list;
int count;
/* Auxiliary tree for faster lookup on addition and deletion */
struct rb_root tree;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
#define netdev_hw_addr_list_for_each(ha, l) \
list_for_each_entry(ha, &(l)->list, list)
#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
#define netdev_for_each_synced_uc_addr(_ha, _dev) \
netdev_for_each_uc_addr((_ha), (_dev)) \
if ((_ha)->sync_cnt)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
#define netdev_for_each_synced_mc_addr(_ha, _dev) \
netdev_for_each_mc_addr((_ha), (_dev)) \
if ((_ha)->sync_cnt)
struct hh_cache {
unsigned int hh_len;
seqlock_t hh_lock;
/* cached hardware header; allow for machine alignment needs. */
#define HH_DATA_MOD 16
#define HH_DATA_OFF(__len) \
(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
#define HH_DATA_ALIGN(__len) \
(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
* (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
*
* We could use other alignment values, but we must maintain the
* relationship HH alignment <= LL alignment.
*/
#define LL_RESERVED_SPACE(dev) \
((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
& ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
& ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
bool (*validate)(const char *ll_header, unsigned int len);
__be16 (*parse_protocol)(const struct sk_buff *skb);
};
/* These flag bits are private to the generic network queueing
* layer; they may not be explicitly referenced by any other
* code.
*/
enum netdev_state_t {
__LINK_STATE_START,
__LINK_STATE_PRESENT,
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
__LINK_STATE_TESTING,
};
struct gro_list {
struct list_head list;
int count;
};
/*
* size of gro hash buckets, must be <= the number of bits in
* gro_node::bitmask
*/
#define GRO_HASH_BUCKETS 8
/**
* struct gro_node - structure to support Generic Receive Offload
* @bitmask: bitmask to indicate used buckets in @hash
* @hash: hashtable of pending aggregated skbs, separated by flows
* @rx_list: list of pending ``GRO_NORMAL`` skbs
* @rx_count: cached current length of @rx_list
* @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone
*/
struct gro_node {
unsigned long bitmask;
struct gro_list hash[GRO_HASH_BUCKETS];
struct list_head rx_list;
u32 rx_count;
u32 cached_napi_id;
};
/*
* Structure for per-NAPI config
*/
struct napi_config {
u64 gro_flush_timeout;
u64 irq_suspend_timeout;
u32 defer_hard_irqs;
cpumask_t affinity_mask;
u8 threaded;
unsigned int napi_id;
};
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
* to the per-CPU poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
unsigned long state;
int weight;
u32 defer_hard_irqs_count;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
/* CPU actively polling if netpoll is configured */
int poll_owner;
#endif
/* CPU on which NAPI has been scheduled for processing */
int list_owner;
struct net_device *dev;
struct sk_buff *skb;
struct gro_node gro;
struct hrtimer timer;
/* all fields past this point are write-protected by netdev_lock */
struct task_struct *thread;
unsigned long gro_flush_timeout;
unsigned long irq_suspend_timeout;
u32 defer_hard_irqs;
/* control-path-only fields follow */
u32 napi_id;
struct list_head dev_list;
struct hlist_node napi_hash_node;
int irq;
struct irq_affinity_notify notify;
int napi_rmap_idx;
int index;
struct napi_config *config;
};
enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_MISSED, /* reschedule a napi */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_LISTED, /* NAPI added to system lists */
NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */
};
enum {
NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER),
};
enum gro_result {
GRO_MERGED,
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
/*
* enum rx_handler_result - Possible return values for rx_handlers.
* @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
* further.
* @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
* case skb->dev was changed by rx_handler.
* @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
* @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
*
* rx_handlers are functions called from inside __netif_receive_skb(), to do
* special processing of the skb, prior to delivery to protocol handlers.
*
* Currently, a net_device can only have a single rx_handler registered. Trying
* to register a second rx_handler will return -EBUSY.
*
* To register a rx_handler on a net_device, use netdev_rx_handler_register().
* To unregister a rx_handler on a net_device, use
* netdev_rx_handler_unregister().
*
* Upon return, rx_handler is expected to tell __netif_receive_skb() what to
* do with the skb.
*
* If the rx_handler consumed the skb in some way, it should return
* RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
* the skb to be delivered in some other way.
*
* If the rx_handler changed skb->dev, to divert the skb to another
* net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
* new device will be called if it exists.
*
* If the rx_handler decides the skb should be ignored, it should return
* RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
* are registered on exact device (ptype->dev == skb->dev).
*
* If the rx_handler didn't change skb->dev, but wants the skb to be normally
* delivered, it should return RX_HANDLER_PASS.
*
* A device without a registered rx_handler will behave as if rx_handler
* returned RX_HANDLER_PASS.
*/
enum rx_handler_result {
RX_HANDLER_CONSUMED,
RX_HANDLER_ANOTHER,
RX_HANDLER_EXACT,
RX_HANDLER_PASS,
};
typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
void __napi_schedule(struct napi_struct *n);
void __napi_schedule_irqoff(struct napi_struct *n);
static inline bool napi_disable_pending(struct napi_struct *n)
{
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
static inline bool napi_prefer_busy_poll(struct napi_struct *n)
{
return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
}
/**
* napi_is_scheduled - test if NAPI is scheduled
* @n: NAPI context
*
* This check is "best-effort". With no locking implemented,
* a NAPI can be scheduled or terminate right after this check
* and produce not precise results.
*
* NAPI_STATE_SCHED is an internal state, napi_is_scheduled
* should not be used normally and napi_schedule should be
* used instead.
*
* Use only if the driver really needs to check if a NAPI
* is scheduled for example in the context of delayed timer
* that can be skipped if a NAPI is already scheduled.
*
* Return: True if NAPI is scheduled, False otherwise.
*/
static inline bool napi_is_scheduled(struct napi_struct *n)
{
return test_bit(NAPI_STATE_SCHED, &n->state);
}
bool napi_schedule_prep(struct napi_struct *n);
/**
* napi_schedule - schedule NAPI poll
* @n: NAPI context
*
* Schedule NAPI poll routine to be called if it is not already
* running.
* Return: true if we schedule a NAPI or false if not.
* Refer to napi_schedule_prep() for additional reason on why
* a NAPI might not be scheduled.
*/
static inline bool napi_schedule(struct napi_struct *n)
{
if (napi_schedule_prep(n)) {
__napi_schedule(n);
return true;
}
return false;
}
/**
* napi_schedule_irqoff - schedule NAPI poll
* @n: NAPI context
*
* Variant of napi_schedule(), assuming hard irqs are masked.
*/
static inline void napi_schedule_irqoff(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule_irqoff(n);
}
/**
* napi_complete_done - NAPI processing complete
* @n: NAPI context
* @work_done: number of packets processed
*
* Mark NAPI processing as complete. Should only be called if poll budget
* has not been completely consumed.
* Prefer over napi_complete().
* Return: false if device should avoid rearming interrupts.
*/
bool napi_complete_done(struct napi_struct *n, int work_done);
static inline bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}
void netif_threaded_enable(struct net_device *dev);
int dev_set_threaded(struct net_device *dev,
enum netdev_napi_threaded threaded);
void napi_disable(struct napi_struct *n);
void napi_disable_locked(struct napi_struct *n);
void napi_enable(struct napi_struct *n);
void napi_enable_locked(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
* @n: NAPI context
*
* Wait until NAPI is done being scheduled on this context.
* Waits till any outstanding processing completes but
* does not disable future activations.
*/
static inline void napi_synchronize(const struct napi_struct *n)
{
if (IS_ENABLED(CONFIG_SMP))
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
else
barrier();
}
/**
* napi_if_scheduled_mark_missed - if napi is running, set the
* NAPIF_STATE_MISSED
* @n: NAPI context
*
* If napi is running, set the NAPIF_STATE_MISSED, and return true if
* NAPI is scheduled.
**/
static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
{
unsigned long val, new;
val = READ_ONCE(n->state);
do {
if (val & NAPIF_STATE_DISABLE)
return true;
if (!(val & NAPIF_STATE_SCHED))
return false;
new = val | NAPIF_STATE_MISSED;
} while (!try_cmpxchg(&n->state, &val, new));
return true;
}
enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,
__QUEUE_STATE_STACK_XOFF,
__QUEUE_STATE_FROZEN,
};
#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
QUEUE_STATE_FROZEN)
#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
QUEUE_STATE_FROZEN)
/*
* __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
* netif_tx_* functions below are used to manipulate this flag. The
* __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
* queue independently. The netif_xmit_*stopped functions below are called
* to check if the queue has been stopped by the driver or stack (either
* of the XOFF bits are set in the state). Drivers should not need to call
* netif_xmit*stopped functions, they should only be using netif_tx_*.
*/
struct netdev_queue {
/*
* read-mostly part
*/
struct net_device *dev;
netdevice_tracker dev_tracker;
struct Qdisc __rcu *qdisc;
struct Qdisc __rcu *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
const struct attribute_group **groups;
#endif
unsigned long tx_maxrate;
/*
* Number of TX timeouts for this queue
* (/sys/class/net/DEV/Q/trans_timeout)
*/
atomic_long_t trans_timeout;
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
#ifdef CONFIG_XDP_SOCKETS
/* "ops protected", see comment about net_device::lock */
struct xsk_buff_pool *pool;
#endif
/*
* write-mostly part
*/
#ifdef CONFIG_BQL
struct dql dql;
#endif
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
* Time (in jiffies) of last Tx
*/
unsigned long trans_start;
unsigned long state;
/*
* slow- / control-path part
*/
/* NAPI instance for the queue
* "ops protected", see comment about net_device::lock
*/
struct napi_struct *napi;
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
int numa_node;
#endif
} ____cacheline_aligned_in_smp;
extern int sysctl_fb_tunnels_only_for_init_net;
extern int sysctl_devconf_inherit_init_net;
/*
* sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
* == 1 : For initns only
* == 2 : For none.
*/
static inline bool net_has_fallback_tunnels(const struct net *net)
{
#if IS_ENABLED(CONFIG_SYSCTL)
int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
return !fb_tunnels_only_for_init_net || (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
#else
return true;
#endif
}
static inline int net_inherit_devconf(void)
{
#if IS_ENABLED(CONFIG_SYSCTL)
return READ_ONCE(sysctl_devconf_inherit_init_net);
#else
return 0;
#endif
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
return q->numa_node;
#else
return NUMA_NO_NODE;
#endif
}
static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
q->numa_node = node;
#endif
}
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
#endif
/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
enum xps_map_type {
XPS_CPUS = 0,
XPS_RXQS,
XPS_MAPS_MAX,
};
#ifdef CONFIG_XPS
/*
* This structure holds an XPS map which can be of variable length. The
* map is an array of queues.
*/
struct xps_map {
unsigned int len;
unsigned int alloc_len;
struct rcu_head rcu;
u16 queues[];
};
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
- sizeof(struct xps_map)) / sizeof(u16))
/*
* This structure holds all XPS maps for device. Maps are indexed by CPU.
*
* We keep track of the number of cpus/rxqs used when the struct is allocated,
* in nr_ids. This will help not accessing out-of-bound memory.
*
* We keep track of the number of traffic classes used when the struct is
* allocated, in num_tc. This will be used to navigate the maps, to ensure we're
* not crossing its upper bound, as the original dev->num_tc can be updated in
* the meantime.
*/
struct xps_dev_maps {
struct rcu_head rcu;
unsigned int nr_ids;
s16 num_tc;
struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
};
#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
(_rxqs * (_tcs) * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */
#define TC_MAX_QUEUE 16
#define TC_BITMASK 15
/* HW offloaded queuing disciplines txq count and offset maps */
struct netdev_tc_txq {
u16 count;
u16 offset;
};
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/*
* This structure is to hold information about the device
* configured to run FCoE protocol stack.
*/
struct netdev_fcoe_hbainfo {
char manufacturer[64];
char serial_number[64];
char hardware_version[64];
char driver_version[64];
char optionrom_version[64];
char firmware_version[64];
char model[256];
char model_description[256];
};
#endif
#define MAX_PHYS_ITEM_ID_LEN 32
/* This structure holds a unique identifier to identify some
* physical item (port for example) used by a netdevice.
*/
struct netdev_phys_item_id {
unsigned char id[MAX_PHYS_ITEM_ID_LEN];
unsigned char id_len;
};
static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
struct netdev_phys_item_id *b)
{
return a->id_len == b->id_len &&
memcmp(a->id, b->id, a->id_len) == 0;
}
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
enum net_device_path_type {
DEV_PATH_ETHERNET = 0,
DEV_PATH_VLAN,
DEV_PATH_BRIDGE,
DEV_PATH_PPPOE,
DEV_PATH_DSA,
DEV_PATH_MTK_WDMA,
};
struct net_device_path {
enum net_device_path_type type;
const struct net_device *dev;
union {
struct {
u16 id;
__be16 proto;
u8 h_dest[ETH_ALEN];
} encap;
struct {
enum {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
DEV_PATH_BR_VLAN_UNTAG,
DEV_PATH_BR_VLAN_UNTAG_HW,
} vlan_mode;
u16 vlan_id;
__be16 vlan_proto;
} bridge;
struct {
int port;
u16 proto;
} dsa;
struct {
u8 wdma_idx;
u8 queue;
u16 wcid;
u8 bss;
u8 amsdu;
} mtk_wdma;
};
};
#define NET_DEVICE_PATH_STACK_MAX 5
#define NET_DEVICE_PATH_VLAN_MAX 2
struct net_device_path_stack {
int num_paths;
struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
};
struct net_device_path_ctx {
const struct net_device *dev;
u8 daddr[ETH_ALEN];
int num_vlans;
struct {
u16 id;
__be16 proto;
} vlan[NET_DEVICE_PATH_VLAN_MAX];
};
enum tc_setup_type {
TC_QUERY_CAPS,
TC_SETUP_QDISC_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
TC_SETUP_CLSMATCHALL,
TC_SETUP_CLSBPF,
TC_SETUP_BLOCK,
TC_SETUP_QDISC_CBS,
TC_SETUP_QDISC_RED,
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
TC_SETUP_QDISC_ETF,
TC_SETUP_ROOT_QDISC,
TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO,
TC_SETUP_FT,
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
TC_SETUP_QDISC_HTB,
TC_SETUP_ACT,
};
/* These structures hold the attributes of bpf state that are being passed
* to the netdevice through the bpf op.
*/
enum bpf_netdev_command {
/* Set or clear a bpf program used in the earliest stages of packet
* rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
* is responsible for calling bpf_prog_put on any old progs that are
* stored. In case of error, the callee need not release the new prog
* reference, but on success it takes ownership and must bpf_prog_put
* when it is no longer used.
*/
XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
XDP_SETUP_XSK_POOL,
};
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
struct xdp_dev_bulk_queue;
struct bpf_xdp_link;
enum bpf_xdp_mode {
XDP_MODE_SKB = 0,
XDP_MODE_DRV = 1,
XDP_MODE_HW = 2,
__MAX_XDP_MODE
};
struct bpf_xdp_entity {
struct bpf_prog *prog;
struct bpf_xdp_link *link;
};
struct netdev_bpf {
enum bpf_netdev_command command;
union {
/* XDP_SETUP_PROG */
struct {
u32 flags;
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
};
/* XDP_SETUP_XSK_POOL */
struct {
struct xsk_buff_pool *pool;
u16 queue_id;
} xsk;
};
};
/* Flags for ndo_xsk_wakeup. */
#define XDP_WAKEUP_RX (1 << 0)
#define XDP_WAKEUP_TX (1 << 1)
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
int (*xdo_dev_state_add)(struct net_device *dev,
struct xfrm_state *x,
struct netlink_ext_ack *extack);
void (*xdo_dev_state_delete)(struct net_device *dev,
struct xfrm_state *x);
void (*xdo_dev_state_free)(struct net_device *dev,
struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
#endif
struct dev_ifalias {
struct rcu_head rcuhead;
char ifalias[];
};
struct devlink;
struct tlsdev_ops;
struct netdev_net_notifier {
struct list_head list;
struct notifier_block *nb;
};
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
* This function is called once when a network device is registered.
* The network device can use this for any late stage initialization
* or semantic validation. It can fail with an error code which will
* be propagated back to register_netdev.
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
* This function is called when a network device transitions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
* This function is called when a network device transitions to the down
* state.
*
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
* struct net_device *dev);
* Called when a packet needs to be transmitted.
* Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
* the queue before that can happen; it's for obsolete devices and weird
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
* Required; cannot be NULL.
*
* netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
* struct net_device *dev
* netdev_features_t features);
* Called by core transmit path to determine if device is capable of
* performing offload operations on a given packet. This is to give
* the device an opportunity to implement any restrictions that cannot
* be otherwise expressed by feature flags. The check is called with
* the set of features that the stack has calculated and it returns
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* struct net_device *sb_dev);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
* changes to configuration when multicast or promiscuous is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
* If driver handles unicast address filtering, it should set
* IFF_UNICAST_FLT in its priv_flags.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If this interface is not defined, the
* MAC address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Old-style ioctl entry point. This is used internally by the
* ieee802154 subsystem but is no longer called by the device
* ioctl handler.
*
* int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Used by the bonding driver for its device specific ioctls:
* SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
* SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
*
* * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
* SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
* is retained for legacy reasons; new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
* Called when a user wants to change the Maximum Transfer Unit
* of a device.
*
* void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
* Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* void (*ndo_get_stats64)(struct net_device *dev,
* struct rtnl_link_stats64 *storage);
* struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
* Called when a user wants to get the network device usage
* statistics. Drivers must do one of the following:
* 1. Define @ndo_get_stats64 to fill in a zero-initialised
* rtnl_link_stats64 structure passed by the caller.
* 2. Define @ndo_get_stats to update a net_device_stats structure
* (which should normally be dev->stats) and return a pointer to
* it. The structure may be changed asynchronously only if each
* field is written atomically.
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
* bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
* Return true if this device supports offload stats of this attr_id.
*
* int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
* void *attr_data)
* Get statistics for offload operations by attr_id. Write it into the
* attr_data pointer.
*
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device supports VLAN filtering this function is called when a
* VLAN id is registered.
*
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device supports VLAN filtering this function is called when a
* VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
* int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
* u8 qos, __be16 proto);
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
* int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_config)(struct net_device *dev,
* int vf, struct ifla_vf_info *ivf);
* int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
*
* Enable or disable the VF ability to query its RSS Redirection Table and
* Hash Key. This is needed since on some devices VF share this information
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
* void *type_data);
* Called to setup any 'tc' scheduler, classifier or action on @dev.
* This is always called from the stack with the rtnl lock held and netif
* tx queues stopped. This allows the netdevice to perform queue
* management safely.
*
* Fiber Channel over Ethernet (FCoE) offload functions.
* int (*ndo_fcoe_enable)(struct net_device *dev);
* Called when the FCoE protocol stack wants to start using LLD for FCoE
* so the underlying device can perform whatever needed configuration or
* initialization to support acceleration of FCoE traffic.
*
* int (*ndo_fcoe_disable)(struct net_device *dev);
* Called when the FCoE protocol stack wants to stop using LLD for FCoE
* so the underlying device can perform whatever needed clean-ups to
* stop supporting acceleration of FCoE traffic.
*
* int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
* struct scatterlist *sgl, unsigned int sgc);
* Called when the FCoE Initiator wants to initialize an I/O that
* is a possible candidate for Direct Data Placement (DDP). The LLD can
* perform necessary setup and returns 1 to indicate the device is set up
* successfully to perform DDP on this I/O, otherwise this returns 0.
*
* int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
* Called when the FCoE Initiator/Target is done with the DDPed I/O as
* indicated by the FC exchange id 'xid', so the underlying device can
* clean up and reuse resources for later DDP requests.
*
* int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
* struct scatterlist *sgl, unsigned int sgc);
* Called when the FCoE Target wants to initialize an I/O that
* is a possible candidate for Direct Data Placement (DDP). The LLD can
* perform necessary setup and returns 1 to indicate the device is set up
* successfully to perform DDP on this I/O, otherwise this returns 0.
*
* int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
* struct netdev_fcoe_hbainfo *hbainfo);
* Called when the FCoE Protocol stack wants information on the underlying
* device. This information is utilized by the FCoE protocol stack to
* register attributes with Fiber Channel management service as per the
* FC-GS Fabric Device Management Information(FDMI) specification.
*
* int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
* Called when the underlying device wants to override default World Wide
* Name (WWN) generation mechanism in FCoE protocol stack to pass its own
* World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
* protocol stack to use.
*
* RFS acceleration.
* int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
* u16 rxq_index, u32 flow_id);
* Set hardware filter for RFS. rxq_index is the target queue index;
* flow_id is a flow ID to be passed to rps_may_expire_flow() later.
* Return the filter ID on success, or a negative error code.
*
* Slave management functions (for bridge, bonding, etc).
* int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
* Called to make another netdev an underling.
*
* int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
* Called to release previously enslaved netdev.
*
* struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
* struct sk_buff *skb,
* bool all_slaves);
* Get the xmit slave of master device. If all_slaves is true, function
* assume all the slaves can transmit.
*
* Feature/offload setting functions.
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
* Adjusts the requested feature flags according to device-specific
* constraints, and returns the resulting flags. Must not modify
* the device state.
*
* int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
* Must return >0 or -errno if it changed dev->features itself.
*
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
* const unsigned char *addr, u16 vid, u16 flags,
* bool *notified, struct netlink_ext_ack *extack);
* Adds an FDB entry to dev for addr.
* Callee shall set *notified to true if it sent any appropriate
* notification(s). Otherwise core will send a generic one.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
* const unsigned char *addr, u16 vid
* bool *notified, struct netlink_ext_ack *extack);
* Deletes the FDB entry from dev corresponding to addr.
* Callee shall set *notified to true if it sent any appropriate
* notification(s). Otherwise core will send a generic one.
* int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
* struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
* int *idx)
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
* int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
* u16 nlmsg_flags, struct netlink_ext_ack *extack);
* Adds an MDB entry to dev.
* int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
* struct netlink_ext_ack *extack);
* Deletes the MDB entry from dev.
* int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
* struct netlink_ext_ack *extack);
* Bulk deletes MDB entries from dev.
* int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
* struct netlink_callback *cb);
* Dumps MDB entries from dev. The first argument (marker) in the netlink
* callback is used by core rtnetlink code.
*
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags, struct netlink_ext_ack *extack)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
* struct net_device *dev, u32 filter_mask,
* int nlflags)
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags);
*
* int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
* Called to change device carrier. Soft-devices (like dummy, team, etc)
* which do not represent real hardware may define this to allow their
* userspace components to manage their virtual carrier state. Devices
* that determine carrier state from physical hardware properties (eg
* network cables) or protocol-dependent mechanisms (eg
* USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
*
* int (*ndo_get_phys_port_id)(struct net_device *dev,
* struct netdev_phys_item_id *ppid);
* Called to get ID of physical port of this device. If driver does
* not implement this, it is assumed that the hw is not able to have
* multiple net devices on single physical port.
*
* int (*ndo_get_port_parent_id)(struct net_device *dev,
* struct netdev_phys_item_id *ppid)
* Called to get the parent ID of the physical port of this device.
*
* void* (*ndo_dfwd_add_station)(struct net_device *pdev,
* struct net_device *dev)
* Called by upper layer devices to accelerate switching or other
* station functionality into hardware. 'pdev is the lowerdev
* to use for the offload and 'dev' is the net device that will
* back the offload. Returns a pointer to the private structure
* the upper layer will maintain.
* void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
* Called by upper layer device to delete the station created
* by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
* the station and priv is the structure returned by the add
* operation.
* int (*ndo_set_tx_maxrate)(struct net_device *dev,
* int queue_index, u32 maxrate);
* Called when a user wants to set a max-rate limitation of specific
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
* int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
* sampling packet.
* void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
* This function is used to specify the headroom that the skb must
* consider when allocation skb during packet reception. Setting
* appropriate rx headroom value allows avoiding skb head copy on
* forward. Setting a negative value resets the rx headroom to the
* default value.
* int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
* This function is used to set or query state related to XDP on the
* netdevice and manage BPF offload. See definition of
* enum bpf_netdev_command for details.
* int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
* u32 flags);
* This function is used to submit @n XDP packets for transmit on a
* netdevice. Returns number of frames successfully transmitted, frames
* that got dropped are freed/returned via xdp_return_frame().
* Returns negative number, means general error invoking ndo, meaning
* no frames were xmit'ed and core-caller will free all frames.
* struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
* struct xdp_buff *xdp);
* Get the xmit slave of master device based on the xdp_buff.
* int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
* This function is used to wake up the softirq, ksoftirqd or kthread
* responsible for sending and/or receiving packets on a specific
* queue id bound to an AF_XDP socket. The flags field specifies if
* only RX, only Tx, or both should be woken up using the flags
* XDP_WAKEUP_RX and XDP_WAKEUP_TX.
* int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
* int cmd);
* Add, change, delete or get information on an IPv4 tunnel.
* struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
* If a device is paired with a peer device, return the peer instance.
* The caller must be under RCU read context.
* int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
* Get the forwarding path to reach the real device from the HW destination address
* ktime_t (*ndo_get_tstamp)(struct net_device *dev,
* const struct skb_shared_hwtstamps *hwtstamps,
* bool cycles);
* Get hardware timestamp based on normal/adjustable time or free running
* cycle counter. This function is required if physical clock supports a
* free running cycle counter.
*
* int (*ndo_hwtstamp_get)(struct net_device *dev,
* struct kernel_hwtstamp_config *kernel_config);
* Get the currently configured hardware timestamping parameters for the
* NIC device.
*
* int (*ndo_hwtstamp_set)(struct net_device *dev,
* struct kernel_hwtstamp_config *kernel_config,
* struct netlink_ext_ack *extack);
* Change the hardware timestamping parameters for NIC device.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
struct net_device *dev);
netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_eth_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_siocbond)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_siocwandev)(struct net_device *dev,
struct if_settings *ifs);
int (*ndo_siocdevprivate)(struct net_device *dev,
struct ifreq *ifr,
void __user *data, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev,
unsigned int txqueue);
void (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
int (*ndo_get_offload_stats)(int attr_id,
const struct net_device *dev,
void *attr_data);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
__be16 proto, u16 vid);
int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
__be16 proto, u16 vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
int (*ndo_netpoll_setup)(struct net_device *dev);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
int queue, u16 vlan,
u8 qos, __be16 proto);
int (*ndo_set_vf_rate)(struct net_device *dev,
int vf, int min_tx_rate,
int max_tx_rate);
int (*ndo_set_vf_spoofchk)(struct net_device *dev,
int vf, bool setting);
int (*ndo_set_vf_trust)(struct net_device *dev,
int vf, bool setting);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
int (*ndo_set_vf_link_state)(struct net_device *dev,
int vf, int link_state);
int (*ndo_get_vf_stats)(struct net_device *dev,
int vf,
struct ifla_vf_stats
*vf_stats);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
int (*ndo_get_vf_guid)(struct net_device *dev,
int vf,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
int (*ndo_set_vf_guid)(struct net_device *dev,
int vf, u64 guid,
int guid_type);
int (*ndo_set_vf_rss_query_en)(
struct net_device *dev,
int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev,
enum tc_setup_type type,
void *type_data);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
u16 xid,
struct scatterlist *sgl,
unsigned int sgc);
int (*ndo_fcoe_ddp_done)(struct net_device *dev,
u16 xid);
int (*ndo_fcoe_ddp_target)(struct net_device *dev,
u16 xid,
struct scatterlist *sgl,
unsigned int sgc);
int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
struct netdev_fcoe_hbainfo *hbainfo);
#endif
#if IS_ENABLED(CONFIG_LIBFCOE)
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
#ifdef CONFIG_RFS_ACCEL
int (*ndo_rx_flow_steer)(struct net_device *dev,
const struct sk_buff *skb,
u16 rxq_index,
u32 flow_id);
#endif
int (*ndo_add_slave)(struct net_device *dev,
struct net_device *slave_dev,
struct netlink_ext_ack *extack);
int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev);
struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
struct sock *sk);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_neigh_construct)(struct net_device *dev,
struct neighbour *n);
void (*ndo_neigh_destroy)(struct net_device *dev,
struct neighbour *n);
int (*ndo_fdb_add)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid,
u16 flags,
bool *notified,
struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid,
bool *notified,
struct netlink_ext_ack *extack);
int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
struct net_device *dev,
struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
int *idx);
int (*ndo_fdb_get)(struct sk_buff *skb,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid, u32 portid, u32 seq,
struct netlink_ext_ack *extack);
int (*ndo_mdb_add)(struct net_device *dev,
struct nlattr *tb[],
u16 nlmsg_flags,
struct netlink_ext_ack *extack);
int (*ndo_mdb_del)(struct net_device *dev,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
int (*ndo_mdb_del_bulk)(struct net_device *dev,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
int (*ndo_mdb_dump)(struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb);
int (*ndo_mdb_get)(struct net_device *dev,
struct nlattr *tb[], u32 portid,
u32 seq,
struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags,
struct netlink_ext_ack *extack);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask,
int nlflags);
int (*ndo_bridge_dellink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int (*ndo_get_port_parent_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
void *priv);
int (*ndo_set_tx_maxrate)(struct net_device *dev,
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
int needed_headroom);
int (*ndo_bpf)(struct net_device *dev,
struct netdev_bpf *bpf);
int (*ndo_xdp_xmit)(struct net_device *dev, int n,
struct xdp_frame **xdp,
u32 flags);
struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
struct xdp_buff *xdp);
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
int (*ndo_tunnel_ctl)(struct net_device *dev,
struct ip_tunnel_parm_kern *p,
int cmd);
struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
struct net_device_path *path);
ktime_t (*ndo_get_tstamp)(struct net_device *dev,
const struct skb_shared_hwtstamps *hwtstamps,
bool cycles);
int (*ndo_hwtstamp_get)(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_config);
int (*ndo_hwtstamp_set)(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_config,
struct netlink_ext_ack *extack);
#if IS_ENABLED(CONFIG_NET_SHAPER)
/**
* @net_shaper_ops: Device shaping offload operations
* see include/net/net_shapers.h
*/
const struct net_shaper_ops *net_shaper_ops;
#endif
};
/**
* enum netdev_priv_flags - &struct net_device priv_flags
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
* userspace; this means that the order of these flags can change
* during any kernel release.
*
* You should add bitfield booleans after either net_device::priv_flags
* (hotpath) or ::threaded (slowpath) instead of extending these flags.
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
* @IFF_BONDING: bonding master or slave
* @IFF_ISATAP: ISATAP interface (RFC4214)
* @IFF_WAN_HDLC: WAN HDLC device
* @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
* release skb->dst
* @IFF_DONT_BRIDGE: disallow bridging this ether dev
* @IFF_DISABLE_NETPOLL: disable netpoll at run-time
* @IFF_MACVLAN_PORT: device used as macvlan port
* @IFF_BRIDGE_PORT: device used as bridge port
* @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
* @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
* @IFF_UNICAST_FLT: Supports unicast filtering
* @IFF_TEAM_PORT: device used as team port
* @IFF_SUPP_NOFCS: device supports sending custom FCS
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
* @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
* underlying stacked devices
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
* @IFF_TEAM: device is a team device
* @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
* @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
* entity (i.e. the master device for bridged veth)
* @IFF_MACSEC: device is a MACsec device
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
* @IFF_NO_ADDRCONF: prevent ipv6 addrconf
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
IFF_EBRIDGE = 1<<1,
IFF_BONDING = 1<<2,
IFF_ISATAP = 1<<3,
IFF_WAN_HDLC = 1<<4,
IFF_XMIT_DST_RELEASE = 1<<5,
IFF_DONT_BRIDGE = 1<<6,
IFF_DISABLE_NETPOLL = 1<<7,
IFF_MACVLAN_PORT = 1<<8,
IFF_BRIDGE_PORT = 1<<9,
IFF_OVS_DATAPATH = 1<<10,
IFF_TX_SKB_SHARING = 1<<11,
IFF_UNICAST_FLT = 1<<12,
IFF_TEAM_PORT = 1<<13,
IFF_SUPP_NOFCS = 1<<14,
IFF_LIVE_ADDR_CHANGE = 1<<15,
IFF_MACVLAN = 1<<16,
IFF_XMIT_DST_RELEASE_PERM = 1<<17,
IFF_L3MDEV_MASTER = 1<<18,
IFF_NO_QUEUE = 1<<19,
IFF_OPENVSWITCH = 1<<20,
IFF_L3MDEV_SLAVE = 1<<21,
IFF_TEAM = 1<<22,
IFF_RXFH_CONFIGURED = 1<<23,
IFF_PHONY_HEADROOM = 1<<24,
IFF_MACSEC = 1<<25,
IFF_NO_RX_HANDLER = 1<<26,
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
IFF_NO_ADDRCONF = BIT_ULL(30),
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
};
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
ML_PRIV_NONE,
ML_PRIV_CAN,
};
enum netdev_stat_type {
NETDEV_PCPU_STAT_NONE,
NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
};
enum netdev_reg_state {
NETREG_UNINITIALIZED = 0,
NETREG_REGISTERED, /* completed register_netdevice */
NETREG_UNREGISTERING, /* called unregister_netdevice */
NETREG_UNREGISTERED, /* completed unregister todo */
NETREG_RELEASED, /* called free_netdev */
NETREG_DUMMY, /* dummy device for NAPI poll */
};
/**
* struct net_device - The DEVICE structure.
*
* Actually, this whole structure is a big mistake. It mixes I/O
* data with strictly "high-level" data, and it has to know about
* almost every data structure used in the INET module.
*
* @priv_flags: flags invisible to userspace defined as bits, see
* enum netdev_priv_flags for the definitions
* @lltx: device supports lockless Tx. Deprecated for real HW
* drivers. Mainly used by logical interfaces, such as
* bonding and tunnels
* @netmem_tx: device support netmem_tx.
*
* @name: This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
* of the interface.
*
* @name_node: Name hashlist node
* @ifalias: SNMP alias
* @mem_end: Shared memory end
* @mem_start: Shared memory start
* @base_addr: Device I/O address
* @irq: Device IRQ number
*
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
* @napi_list: List entry used for polling NAPI devices
* @unreg_list: List entry when we are unregistering the
* device; see the function unregister_netdev
* @close_list: List entry used when we are closing the device
* @ptype_all: Device-specific packet handlers for all protocols
* @ptype_specific: Device-specific, protocol-specific packet handlers
*
* @adj_list: Directly linked devices, like slaves for bonding
* @features: Currently active device features
* @hw_features: User-changeable features
*
* @wanted_features: User-requested features
* @vlan_features: Mask of features inheritable by VLAN devices
*
* @hw_enc_features: Mask of features inherited by encapsulating devices
* This field indicates what encapsulation
* offloads the hardware is capable of doing,
* and drivers will need to set them appropriately.
*
* @mpls_features: Mask of features inheritable by MPLS
* @gso_partial_features: value(s) from NETIF_F_GSO\*
*
* @ifindex: interface index
* @group: The group the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
*
* @core_stats: core networking counters,
* do not use this in drivers
* @carrier_up_count: Number of times the carrier has been up
* @carrier_down_count: Number of times the carrier has been down
*
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
* see <net/iw_handler.h> for details.
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
* @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks.
* @ethtool_ops: Management operations
* @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
* discovery handling. Necessary for e.g. 6LoWPAN.
* @xfrmdev_ops: Transformation offload operations
* @tlsdev_ops: Transport Layer Security offload operations
* @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
* @xdp_features: XDP capability supported by the device
* @gflags: Global flags ( kept as legacy )
* @priv_len: Size of the ->priv flexible array
* @priv: Flexible array containing private data
* @operstate: RFC2863 operstate
* @link_mode: Mapping policy to operstate
* @if_port: Selectable AUI, TP, ...
* @dma: DMA channel
* @mtu: Interface MTU value
* @min_mtu: Interface Minimum MTU value
* @max_mtu: Interface Maximum MTU value
* @type: Interface hardware type
* @hard_header_len: Maximum hardware header length.
* @min_header_len: Minimum hardware header length
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
* @needed_tailroom: Extra tailroom the hardware may need, but not in all
* cases can this be guaranteed. Some cases also use
* LL_MAX_HEADER instead to allocate the skb
*
* interface address info:
*
* @perm_addr: Permanent hw address
* @addr_assign_type: Hw address assignment type
* @addr_len: Hardware address length
* @upper_level: Maximum depth level of upper devices.
* @lower_level: Maximum depth level of lower devices.
* @threaded: napi threaded state.
* @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
* @name_assign_type: network interface name assignment type
* @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
* @uc: unicast mac addresses
* @mc: multicast mac addresses
* @dev_addrs: list of device hw addresses
* @queues_kset: Group of all Kobjects in the Tx and RX queues
* @promiscuity: Number of times the NIC is told to work in
* promiscuous mode; if it becomes 0 the NIC will
* exit promiscuous mode
* @allmulti: Counter, enables or disables allmulticast mode
*
* @vlan_info: VLAN info
* @dsa_ptr: dsa specific data
* @tipc_ptr: TIPC specific data
* @atalk_ptr: AppleTalk link
* @ip_ptr: IPv4 specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
* @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
* device struct
* @mpls_ptr: mpls_dev struct pointer
* @mctp_ptr: MCTP specific data
* @psp_dev: PSP crypto device registered for this netdev
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
*
* @_rx: Array of RX queues
* @num_rx_queues: Number of RX queues
* allocated at register_netdev() time
* @real_num_rx_queues: Number of RX queues currently active in device
* @xdp_prog: XDP sockets filter program pointer
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
* @tcx_ingress: BPF & clsact qdisc specific data for ingress processing
* @ingress_queue: XXX: need comments on this one
* @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
*
* @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
* indexed by RX queue number. Assigned by driver.
* This must only be set if the ndo_rx_flow_steer
* operation is defined
* @index_hlist: Device index hash chain
*
* @_tx: Array of TX queues
* @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
* @real_num_tx_queues: Number of TX queues currently active in device
* @qdisc: Root qdisc from userspace point of view
* @tx_queue_len: Max frames per queue allowed
* @tx_global_lock: XXX: need comments on this one
* @xdp_bulkq: XDP device bulk queue
* @xps_maps: all CPUs/RXQs maps for XPS device
*
* @xps_maps: XXX: need comments on this one
* @tcx_egress: BPF & clsact qdisc specific data for egress processing
* @nf_hooks_egress: netfilter hooks executed for egress packets
* @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
* @proto_down_reason: reason a netdev interface is held down
* @pcpu_refcnt: Number of references to this device
* @dev_refcnt: Number of references to this device
* @refcnt_tracker: Tracker directory for tracked references to this device
* @todo_list: Delayed register/unregister
* @link_watch_list: XXX: need comments on this one
*
* @reg_state: Register/unregister state machine
* @dismantle: Device is going to be freed
* @needs_free_netdev: Should unregister perform free_netdev?
* @priv_destructor: Called from unregister
* @npinfo: XXX: need comments on this one
* @nd_net: Network namespace this network device is inside
* protected by @lock
*
* @ml_priv: Mid-layer private
* @ml_priv_type: Mid-layer private type
*
* @pcpu_stat_type: Type of device statistics which the core should
* allocate/free: none, lstats, tstats, dstats. none
* means the driver is handling statistics allocation/
* freeing internally.
* @lstats: Loopback statistics: packets, bytes
* @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
* @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
*
* @garp_port: GARP
* @mrp_port: MRP
*
* @dm_private: Drop monitor private
*
* @dev: Class/net/name entry
* @sysfs_groups: Space for optional device, statistics and wireless
* sysfs groups
*
* @sysfs_rx_queue_group: Space for optional per-rx queue attributes
* @rtnl_link_ops: Rtnl_link_ops
* @stat_ops: Optional ops for queue-aware statistics
* @queue_mgmt_ops: Optional ops for queue management
*
* @gso_max_size: Maximum size of generic segmentation offload
* @tso_max_size: Device (as in HW) limit on the max TSO request size
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
* @tso_max_segs: Device (as in HW) limit on the max TSO segment count
* @gso_ipv4_max_size: Maximum size of generic segmentation offload,
* for IPv4.
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
* @tc_to_txq: XXX: need comments on this one
* @prio_tc_map: XXX: need comments on this one
*
* @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
*
* @priomap: XXX: need comments on this one
* @link_topo: Physical link topology tracking attached PHYs
* @phydev: Physical device may attach itself
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
* switch port.
*
* @irq_affinity_auto: driver wants the core to store and re-assign the IRQ
* affinity. Set by netif_enable_irq_affinity(), then
* the driver must create a persistent napi by
* netif_napi_add_config() and finally bind the napi to
* IRQ (via netif_napi_set_irq()).
*
* @rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap.
* Set by calling netif_enable_cpu_rmap().
*
* @see_all_hwtstamp_requests: device wants to see calls to
* ndo_hwtstamp_set() for all timestamp requests
* regardless of source, even if those aren't
* HWTSTAMP_SOURCE_NETDEV
* @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
* @netns_immutable: interface can't change network namespaces
* @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes
*
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
* to another network namespace.
*
* @macsec_ops: MACsec offloading ops
*
* @udp_tunnel_nic_info: static structure describing the UDP tunnel
* offload capabilities of the device
* @udp_tunnel_nic: UDP tunnel offload state
* @ethtool: ethtool related state
* @xdp_state: stores info on attached XDP BPF programs
*
* @nested_level: Used as a parameter of spin_lock_nested() of
* dev->addr_list_lock.
* @unlink_list: As netif_addr_lock() can be called recursively,
* keep a list of interfaces to be deleted.
* @gro_max_size: Maximum size of aggregated packet in generic
* receive offload (GRO)
* @gro_ipv4_max_size: Maximum size of aggregated packet in generic
* receive offload (GRO), for IPv4.
* @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP
* zero copy driver
*
* @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
* @linkwatch_dev_tracker: refcount tracker used by linkwatch.
* @watchdog_dev_tracker: refcount tracker used by watchdog.
* @dev_registered_tracker: tracker for reference held while
* registered
* @offload_xstats_l3: L3 HW stats for this netdevice.
*
* @devlink_port: Pointer to related devlink port structure.
* Assigned by a driver before netdev registration using
* SET_NETDEV_DEVLINK_PORT macro. This pointer is static
* during the time netdevice is registered.
*
* @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
* where the clock is recovered.
*
* @max_pacing_offload_horizon: max EDT offload horizon in nsec.
* @napi_config: An array of napi_config structures containing per-NAPI
* settings.
* @num_napi_configs: number of allocated NAPI config structs,
* always >= max(num_rx_queues, num_tx_queues).
* @gro_flush_timeout: timeout for GRO layer in NAPI
* @napi_defer_hard_irqs: If not zero, provides a counter that would
* allow to avoid NIC hard IRQ, on busy queues.
*
* @neighbours: List heads pointing to this device's neighbours'
* dev_list, one per address-family.
* @hwprov: Tracks which PTP performs hardware packet time stamping.
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
/* Cacheline organization can be found documented in
* Documentation/networking/net_cachelines/net_device.rst.
* Please update the document when adding new fields.
*/
/* TX read-mostly hotpath */
__cacheline_group_begin(net_device_read_tx);
struct_group(priv_flags_fast,
unsigned long priv_flags:32;
unsigned long lltx:1;
unsigned long netmem_tx:1;
);
const struct net_device_ops *netdev_ops;
const struct header_ops *header_ops;
struct netdev_queue *_tx;
netdev_features_t gso_partial_features;
unsigned int real_num_tx_queues;
unsigned int gso_max_size;
unsigned int gso_ipv4_max_size;
u16 gso_max_segs;
s16 num_tc;
/* Note : dev->mtu is often read without holding a lock.
* Writers usually hold RTNL.
* It is recommended to use READ_ONCE() to annotate the reads,
* and to use WRITE_ONCE() to annotate the writes.
*/
unsigned int mtu;
unsigned short needed_headroom;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
#endif
#ifdef CONFIG_NETFILTER_EGRESS
struct nf_hook_entries __rcu *nf_hooks_egress;
#endif
#ifdef CONFIG_NET_XGRESS
struct bpf_mprog_entry __rcu *tcx_egress;
#endif
__cacheline_group_end(net_device_read_tx);
/* TXRX read-mostly hotpath */
__cacheline_group_begin(net_device_read_txrx);
union {
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
};
unsigned long state;
unsigned int flags;
unsigned short hard_header_len;
netdev_features_t features;
struct inet6_dev __rcu *ip6_ptr;
__cacheline_group_end(net_device_read_txrx);
/* RX read-mostly hotpath */
__cacheline_group_begin(net_device_read_rx);
struct bpf_prog __rcu *xdp_prog;
struct list_head ptype_specific;
int ifindex;
unsigned int real_num_rx_queues;
struct netdev_rx_queue *_rx;
unsigned int gro_max_size;
unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
possible_net_t nd_net;
#ifdef CONFIG_NETPOLL
struct netpoll_info __rcu *npinfo;
#endif
#ifdef CONFIG_NET_XGRESS
struct bpf_mprog_entry __rcu *tcx_ingress;
#endif
__cacheline_group_end(net_device_read_rx);
char name[IFNAMSIZ];
struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias;
/*
* I/O specific fields
* FIXME: Merge these and struct ifmap into one
*/
unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;
/*
* Some hardware also needs these fields (state,dev_list,
* napi_list,unreg_list,close_list) but they are not
* part of the usual set specified in Space.c.
*/
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
struct list_head ptype_all;
struct {
struct list_head upper;
struct list_head lower;
} adj_list;
/* Read-mostly cache-line for fast-path access */
xdp_features_t xdp_features;
const struct xdp_metadata_ops *xdp_metadata_ops;
const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
unsigned short gflags;
unsigned short needed_tailroom;
netdev_features_t hw_features;
netdev_features_t wanted_features;
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
unsigned int min_mtu;
unsigned int max_mtu;
unsigned short type;
unsigned char min_header_len;
unsigned char name_assign_type;
int group;
struct net_device_stats stats; /* not used by modern drivers */
struct net_device_core_stats __percpu *core_stats;
/* Stats to monitor link on/off, flapping */
atomic_t carrier_up_count;
atomic_t carrier_down_count;
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def *wireless_handlers;
#endif
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
#endif
#if IS_ENABLED(CONFIG_IPV6)
const struct ndisc_ops *ndisc_ops;
#endif
#ifdef CONFIG_XFRM_OFFLOAD
const struct xfrmdev_ops *xfrmdev_ops;
#endif
#if IS_ENABLED(CONFIG_TLS_DEVICE)
const struct tlsdev_ops *tlsdev_ops;
#endif
unsigned int operstate;
unsigned char link_mode;
unsigned char if_port;
unsigned char dma;
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
unsigned char addr_len;
unsigned char upper_level;
unsigned char lower_level;
u8 threaded;
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
int irq;
u32 priv_len;
spinlock_t addr_list_lock;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
#ifdef CONFIG_SYSFS
struct kset *queues_kset;
#endif
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
#endif
unsigned int promiscuity;
unsigned int allmulti;
bool uc_promisc;
#ifdef CONFIG_LOCKDEP
unsigned char nested_level;
#endif
/* Protocol-specific pointers */
struct in_device __rcu *ip_ptr;
/** @fib_nh_head: nexthops associated with this netdev */
struct hlist_head fib_nh_head;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
#if IS_ENABLED(CONFIG_NET_DSA)
struct dsa_port *dsa_ptr;
#endif
#if IS_ENABLED(CONFIG_TIPC)
struct tipc_bearer __rcu *tipc_ptr;
#endif
#if IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
#endif
#if IS_ENABLED(CONFIG_AX25)
struct ax25_dev __rcu *ax25_ptr;
#endif
#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
#endif
#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
struct wpan_dev *ieee802154_ptr;
#endif
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
struct mpls_dev __rcu *mpls_ptr;
#endif
#if IS_ENABLED(CONFIG_MCTP)
struct mctp_dev __rcu *mctp_ptr;
#endif
#if IS_ENABLED(CONFIG_INET_PSP)
struct psp_dev __rcu *psp_dev;
#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
/* Interface address info used in eth_type_trans() */
const unsigned char *dev_addr;
unsigned int num_rx_queues;
#define GRO_LEGACY_MAX_SIZE 65536u
/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
* and shinfo->gso_segs is a 16bit field.
*/
#define GRO_MAX_SIZE (8 * 65535u)
unsigned int xdp_zc_max_segs;
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
struct nf_hook_entries __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rx_cpu_rmap;
#endif
struct hlist_node index_hlist;
/*
* Cache lines mostly used on transmit path
*/
unsigned int num_tx_queues;
struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
int watchdog_timeo;
u32 proto_down_reason;
struct list_head todo_list;
#ifdef CONFIG_PCPU_DEV_REFCNT
int __percpu *pcpu_refcnt;
#else
refcount_t dev_refcnt;
#endif
struct ref_tracker_dir refcnt_tracker;
struct list_head link_watch_list;
u8 reg_state;
bool dismantle;
/** @moving_ns: device is changing netns, protected by @lock */
bool moving_ns;
/** @rtnl_link_initializing: Device being created, suppress events */
bool rtnl_link_initializing;
bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);
/* mid-layer private */
void *ml_priv;
enum netdev_ml_priv_type ml_priv_type;
enum netdev_stat_type pcpu_stat_type:8;
#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
#endif
#if IS_ENABLED(CONFIG_MRP)
struct mrp_port __rcu *mrp_port;
#endif
#if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
struct dm_hw_stat_delta __rcu *dm_private;
#endif
struct device dev;
const struct attribute_group *sysfs_groups[5];
const struct attribute_group *sysfs_rx_queue_group;
const struct rtnl_link_ops *rtnl_link_ops;
const struct netdev_stat_ops *stat_ops;
const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SEGS 65535u
#define GSO_LEGACY_MAX_SIZE 65536u
/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
* and shinfo->gso_segs is a 16bit field.
*/
#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
#define TSO_LEGACY_MAX_SIZE 65536
#define TSO_MAX_SIZE UINT_MAX
unsigned int tso_max_size;
#define TSO_MAX_SEGS U16_MAX
u16 tso_max_segs;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
unsigned int fcoe_ddp_xid;
#endif
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
struct phy_link_topology *link_topo;
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
bool irq_affinity_auto;
bool rx_cpu_rmap_auto;
/* priv_flags_slow, ungrouped to save space */
unsigned long see_all_hwtstamp_requests:1;
unsigned long change_proto_down:1;
unsigned long netns_immutable:1;
unsigned long fcoe_mtu:1;
struct list_head net_notifier_list;
#if IS_ENABLED(CONFIG_MACSEC)
/* MACsec management functions */
const struct macsec_ops *macsec_ops;
#endif
const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
struct udp_tunnel_nic *udp_tunnel_nic;
/** @cfg: net_device queue-related configuration */
struct netdev_config *cfg;
/**
* @cfg_pending: same as @cfg but when device is being actively
* reconfigured includes any changes to the configuration
* requested by the user, but which may or may not be rejected.
*/
struct netdev_config *cfg_pending;
struct ethtool_netdev_state *ethtool;
/* protected by rtnl_lock */
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
u8 dev_addr_shadow[MAX_ADDR_LEN];
netdevice_tracker linkwatch_dev_tracker;
netdevice_tracker watchdog_dev_tracker;
netdevice_tracker dev_registered_tracker;
struct rtnl_hw_stats64 *offload_xstats_l3;
struct devlink_port *devlink_port;
#if IS_ENABLED(CONFIG_DPLL)
struct dpll_pin __rcu *dpll_pin;
#endif
#if IS_ENABLED(CONFIG_PAGE_POOL)
/** @page_pools: page pools created for this netdevice */
struct hlist_head page_pools;
#endif
/** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
struct dim_irq_moder *irq_moder;
u64 max_pacing_offload_horizon;
struct napi_config *napi_config;
u32 num_napi_configs;
u32 napi_defer_hard_irqs;
unsigned long gro_flush_timeout;
/**
* @up: copy of @state's IFF_UP, but safe to read with just @lock.
* May report false negatives while the device is being opened
* or closed (@lock does not protect .ndo_open, or .ndo_close).
*/
bool up;
/**
* @request_ops_lock: request the core to run all @netdev_ops and
* @ethtool_ops under the @lock.
*/
bool request_ops_lock;
/**
* @lock: netdev-scope lock, protects a small selection of fields.
* Should always be taken using netdev_lock() / netdev_unlock() helpers.
* Drivers are free to use it for other protection.
*
* For the drivers that implement shaper or queue API, the scope
* of this lock is expanded to cover most ndo/queue/ethtool/sysfs
* operations. Drivers may opt-in to this behavior by setting
* @request_ops_lock.
*
* @lock protection mixes with rtnl_lock in multiple ways, fields are
* either:
*
* - simply protected by the instance @lock;
*
* - double protected - writers hold both locks, readers hold either;
*
* - ops protected - protected by the lock held around the NDOs
* and other callbacks, that is the instance lock on devices for
* which netdev_need_ops_lock() returns true, otherwise by rtnl_lock;
*
* - double ops protected - always protected by rtnl_lock but for
* devices for which netdev_need_ops_lock() returns true - also
* the instance lock.
*
* Simply protects:
* @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
* @net_shaper_hierarchy, @reg_state, @threaded
*
* Double protects:
* @up, @moving_ns, @nd_net, @xdp_features
*
* Double ops protects:
* @real_num_rx_queues, @real_num_tx_queues
*
* Also protects some fields in:
* struct napi_struct, struct netdev_queue, struct netdev_rx_queue
*
* Ordering: take after rtnl_lock.
*/
struct mutex lock;
#if IS_ENABLED(CONFIG_NET_SHAPER)
/**
* @net_shaper_hierarchy: data tracking the current shaper status
* see include/net/net_shapers.h
*/
struct net_shaper_hierarchy *net_shaper_hierarchy;
#endif
struct hlist_head neighbours[NEIGH_NR_TABLES];
struct hwtstamp_provider __rcu *hwprov;
u8 priv[] ____cacheline_aligned
__counted_by(priv_len);
} ____cacheline_aligned;
#define to_net_dev(d) container_of(d, struct net_device, dev)
/*
* Driver should use this to assign devlink port instance to a netdevice
* before it registers the netdevice. Therefore devlink_port is static
* during the netdev lifetime after it is registered.
*/
#define SET_NETDEV_DEVLINK_PORT(dev, port) \
({ \
WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
((dev)->devlink_port = (port)); \
})
static inline bool netif_elide_gro(const struct net_device *dev)
{
if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
return true;
return false;
}
#define NETDEV_ALIGN 32
static inline
int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
{
return dev->prio_tc_map[prio & TC_BITMASK];
}
static inline
int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
{
if (tc >= dev->num_tc)
return -EINVAL;
dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
return 0;
}
int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
void netdev_reset_tc(struct net_device *dev);
int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
static inline
int netdev_get_num_tc(struct net_device *dev)
{
return dev->num_tc;
}
static inline void net_prefetch(void *p)
{
prefetch(p);
#if L1_CACHE_BYTES < 128
prefetch((u8 *)p + L1_CACHE_BYTES);
#endif
}
static inline void net_prefetchw(void *p)
{
prefetchw(p);
#if L1_CACHE_BYTES < 128
prefetchw((u8 *)p + L1_CACHE_BYTES);
#endif
}
void netdev_unbind_sb_channel(struct net_device *dev,
struct net_device *sb_dev);
int netdev_bind_sb_channel_queue(struct net_device *dev,
struct net_device *sb_dev,
u8 tc, u16 count, u16 offset);
int netdev_set_sb_channel(struct net_device *dev, u16 channel);
static inline int netdev_get_sb_channel(struct net_device *dev)
{
return max_t(int, -dev->num_tc, 0);
}
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
return &dev->_tx[index];
}
static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
const struct sk_buff *skb)
{
return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
}
static inline void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *,
struct netdev_queue *,
void *),
void *arg)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) f(dev, &dev->_tx[i], arg);
}
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
*/
static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
{
return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
}
static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
{
if (dev->netdev_ops->ndo_set_rx_headroom)
dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
}
/* set the device rx headroom to the dev's default */
static inline void netdev_reset_rx_headroom(struct net_device *dev)
{
netdev_set_rx_headroom(dev, -1);
}
static inline void *netdev_get_ml_priv(struct net_device *dev,
enum netdev_ml_priv_type type)
{
if (dev->ml_priv_type != type)
return NULL;
return dev->ml_priv;
}
static inline void netdev_set_ml_priv(struct net_device *dev,
void *ml_priv,
enum netdev_ml_priv_type type)
{
WARN(dev->ml_priv_type && dev->ml_priv_type != type,
"Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
dev->ml_priv_type, type);
WARN(!dev->ml_priv_type && dev->ml_priv,
"Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
dev->ml_priv = ml_priv;
dev->ml_priv_type = type;
}
/*
* Net namespace inlines
*/
static inline
struct net *dev_net(const struct net_device *dev)
{
return read_pnet(&dev->nd_net);
}
static inline
struct net *dev_net_rcu(const struct net_device *dev)
{
return read_pnet_rcu(&dev->nd_net);
}
static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
write_pnet(&dev->nd_net, net);
}
/**
* netdev_priv - access network device private data
* @dev: network device
*
* Get network device private data
*/
static inline void *netdev_priv(const struct net_device *dev)
{
return (void *)dev->priv;
}
/* Set the sysfs physical device reference for the network logical device
* if set prior to registration will cause a symlink during initialization.
*/
#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
/* Set the sysfs device type for the network logical device to allow
* fine-grained identification of different network device types. For
* example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type,
struct napi_struct *napi);
static inline void netdev_lock(struct net_device *dev)
{
mutex_lock(&dev->lock);}
static inline void netdev_unlock(struct net_device *dev)
{
mutex_unlock(&dev->lock);}
/* Additional netdev_lock()-related helpers are in net/netdev_lock.h */
void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);
static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
{
netdev_lock(napi->dev);
netif_napi_set_irq_locked(napi, irq);
netdev_unlock(napi->dev);
}
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
#define NAPI_POLL_WEIGHT 64
void netif_napi_add_weight_locked(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight);
static inline void
netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
netdev_lock(dev);
netif_napi_add_weight_locked(dev, napi, poll, weight);
netdev_unlock(dev);
}
/**
* netif_napi_add() - initialize a NAPI context
* @dev: network device
* @napi: NAPI context
* @poll: polling function
*
* netif_napi_add() must be used to initialize a NAPI context prior to calling
* *any* of the other NAPI-related functions.
*/
static inline void
netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int))
{
netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
static inline void
netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int))
{
netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
}
static inline void
netif_napi_add_tx_weight(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight)
{
set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
netif_napi_add_weight(dev, napi, poll, weight);
}
static inline void
netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int index)
{
napi->index = index;
napi->config = &dev->napi_config[index];
netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
}
/**
* netif_napi_add_config - initialize a NAPI context with persistent config
* @dev: network device
* @napi: NAPI context
* @poll: polling function
* @index: the NAPI index
*/
static inline void
netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int index)
{
netdev_lock(dev);
netif_napi_add_config_locked(dev, napi, poll, index);
netdev_unlock(dev);
}
/**
* netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
* @dev: network device
* @napi: NAPI context
* @poll: polling function
*
* This variant of netif_napi_add() should be used from drivers using NAPI
* to exclusively poll a TX queue.
* This will avoid we add it into napi_hash[], thus polluting this hash table.
*/
static inline void netif_napi_add_tx(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int))
{
netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
void __netif_napi_del_locked(struct napi_struct *napi);
/**
* __netif_napi_del - remove a NAPI context
* @napi: NAPI context
*
* Warning: caller must observe RCU grace period before freeing memory
* containing @napi. Drivers might want to call this helper to combine
* all the needed RCU grace periods into a single one.
*/
static inline void __netif_napi_del(struct napi_struct *napi)
{
netdev_lock(napi->dev);
__netif_napi_del_locked(napi);
netdev_unlock(napi->dev);
}
static inline void netif_napi_del_locked(struct napi_struct *napi)
{
__netif_napi_del_locked(napi);
synchronize_net();
}
/**
* netif_napi_del - remove a NAPI context
* @napi: NAPI context
*
* netif_napi_del() removes a NAPI context from the network device NAPI list
*/
static inline void netif_napi_del(struct napi_struct *napi)
{
__netif_napi_del(napi);
synchronize_net();
}
int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs);
void netif_set_affinity_auto(struct net_device *dev);
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
bool ignore_outgoing;
struct net_device *dev; /* NULL is wildcarded here */
netdevice_tracker dev_tracker;
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *);
void (*list_func) (struct list_head *,
struct packet_type *,
struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;
};
struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff *(*gro_receive)(struct list_head *head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
struct packet_offload {
__be16 type; /* This is really htons(ether_type). */
u16 priority;
struct offload_callbacks callbacks;
struct list_head list;
};
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64_stats_t rx_packets;
u64_stats_t rx_bytes;
u64_stats_t tx_packets;
u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64));
struct pcpu_dstats {
u64_stats_t rx_packets;
u64_stats_t rx_bytes;
u64_stats_t tx_packets;
u64_stats_t tx_bytes;
u64_stats_t rx_drops;
u64_stats_t tx_drops;
struct u64_stats_sync syncp;
} __aligned(8 * sizeof(u64));
struct pcpu_lstats {
u64_stats_t packets;
u64_stats_t bytes;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
{
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
u64_stats_add(&tstats->rx_bytes, len);
u64_stats_inc(&tstats->rx_packets);
u64_stats_update_end(&tstats->syncp);
}
static inline void dev_sw_netstats_tx_add(struct net_device *dev,
unsigned int packets,
unsigned int len)
{
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
u64_stats_add(&tstats->tx_bytes, len);
u64_stats_add(&tstats->tx_packets, packets);
u64_stats_update_end(&tstats->syncp);
}
static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
{
struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
u64_stats_update_begin(&lstats->syncp);
u64_stats_add(&lstats->bytes, len);
u64_stats_inc(&lstats->packets);
u64_stats_update_end(&lstats->syncp);
}
static inline void dev_dstats_rx_add(struct net_device *dev,
unsigned int len)
{
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
u64_stats_inc(&dstats->rx_packets);
u64_stats_add(&dstats->rx_bytes, len);
u64_stats_update_end(&dstats->syncp);
}
static inline void dev_dstats_rx_dropped(struct net_device *dev)
{
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
u64_stats_inc(&dstats->rx_drops);
u64_stats_update_end(&dstats->syncp);
}
static inline void dev_dstats_rx_dropped_add(struct net_device *dev,
unsigned int packets)
{
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
u64_stats_add(&dstats->rx_drops, packets);
u64_stats_update_end(&dstats->syncp);
}
static inline void dev_dstats_tx_add(struct net_device *dev,
unsigned int len)
{
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
u64_stats_inc(&dstats->tx_packets);
u64_stats_add(&dstats->tx_bytes, len);
u64_stats_update_end(&dstats->syncp);
}
static inline void dev_dstats_tx_dropped(struct net_device *dev)
{
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
u64_stats_inc(&dstats->tx_drops);
u64_stats_update_end(&dstats->syncp);
}
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
if (pcpu_stats) { \
int __cpu; \
for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
pcpu_stats; \
})
#define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
#define devm_netdev_alloc_pcpu_stats(dev, type) \
({ \
typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
if (pcpu_stats) { \
int __cpu; \
for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
pcpu_stats; \
})
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
NETDEV_LAG_TX_TYPE_RANDOM,
NETDEV_LAG_TX_TYPE_BROADCAST,
NETDEV_LAG_TX_TYPE_ROUNDROBIN,
NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
NETDEV_LAG_TX_TYPE_HASH,
};
enum netdev_lag_hash {
NETDEV_LAG_HASH_NONE,
NETDEV_LAG_HASH_L2,
NETDEV_LAG_HASH_L34,
NETDEV_LAG_HASH_L23,
NETDEV_LAG_HASH_E23,
NETDEV_LAG_HASH_E34,
NETDEV_LAG_HASH_VLAN_SRCMAC,
NETDEV_LAG_HASH_UNKNOWN,
};
struct netdev_lag_upper_info {
enum netdev_lag_tx_type tx_type;
enum netdev_lag_hash hash_type;
};
struct netdev_lag_lower_state_info {
u8 link_up : 1,
tx_enabled : 1;
};
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
* and the rtnetlink notification exclusion list in rtnetlink_event() when
* adding new types.
*/
enum netdev_cmd {
NETDEV_UP = 1, /* For now you can't veto a device up/down */
NETDEV_DOWN,
NETDEV_REBOOT, /* Tell a protocol stack a network interface
detected a hardware crash and restarted
- we can use this eg to kick tcp sessions
once done */
NETDEV_CHANGE, /* Notify device state change */
NETDEV_REGISTER,
NETDEV_UNREGISTER,
NETDEV_CHANGEMTU, /* notify after mtu change happened */
NETDEV_CHANGEADDR, /* notify after the address change */
NETDEV_PRE_CHANGEADDR, /* notify before the address change */
NETDEV_GOING_DOWN,
NETDEV_CHANGENAME,
NETDEV_FEAT_CHANGE,
NETDEV_BONDING_FAILOVER,
NETDEV_PRE_UP,
NETDEV_PRE_TYPE_CHANGE,
NETDEV_POST_TYPE_CHANGE,
NETDEV_POST_INIT,
NETDEV_PRE_UNINIT,
NETDEV_RELEASE,
NETDEV_NOTIFY_PEERS,
NETDEV_JOIN,
NETDEV_CHANGEUPPER,
NETDEV_RESEND_IGMP,
NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
NETDEV_CHANGEINFODATA,
NETDEV_BONDING_INFO,
NETDEV_PRECHANGEUPPER,
NETDEV_CHANGELOWERSTATE,
NETDEV_UDP_TUNNEL_PUSH_INFO,
NETDEV_UDP_TUNNEL_DROP_INFO,
NETDEV_CHANGE_TX_QUEUE_LEN,
NETDEV_CVLAN_FILTER_PUSH_INFO,
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
NETDEV_OFFLOAD_XSTATS_ENABLE,
NETDEV_OFFLOAD_XSTATS_DISABLE,
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
NETDEV_XDP_FEAT_CHANGE,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
int unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb);
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn);
int unregister_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn);
struct netdev_notifier_info {
struct net_device *dev;
struct netlink_ext_ack *extack;
};
struct netdev_notifier_info_ext {
struct netdev_notifier_info info; /* must be first */
union {
u32 mtu;
} ext;
};
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
};
struct netdev_notifier_changeupper_info {
struct netdev_notifier_info info; /* must be first */
struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */
bool linking; /* is the notification for link or unlink */
void *upper_info; /* upper dev info */
};
struct netdev_notifier_changelowerstate_info {
struct netdev_notifier_info info; /* must be first */
void *lower_state_info; /* is lower dev state */
};
struct netdev_notifier_pre_changeaddr_info {
struct netdev_notifier_info info; /* must be first */
const unsigned char *dev_addr;
};
enum netdev_offload_xstats_type {
NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
};
struct netdev_notifier_offload_xstats_info {
struct netdev_notifier_info info; /* must be first */
enum netdev_offload_xstats_type type;
union {
/* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
struct netdev_notifier_offload_xstats_rd *report_delta;
/* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
struct netdev_notifier_offload_xstats_ru *report_used;
};
};
int netdev_offload_xstats_enable(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct netlink_ext_ack *extack);
int netdev_offload_xstats_disable(struct net_device *dev,
enum netdev_offload_xstats_type type);
bool netdev_offload_xstats_enabled(const struct net_device *dev,
enum netdev_offload_xstats_type type);
int netdev_offload_xstats_get(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *stats, bool *used,
struct netlink_ext_ack *extack);
void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
const struct rtnl_hw_stats64 *stats);
void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
void netdev_offload_xstats_push_delta(struct net_device *dev,
enum netdev_offload_xstats_type type,
const struct rtnl_hw_stats64 *stats);
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
info->dev = dev;
info->extack = NULL;
}
static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
{
return info->dev;
}
static inline struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
{
return info->extack;
}
int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info);
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_rcu(net, d) \
list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_safe(net, d, n) \
list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d) \
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue_reverse(net, d) \
list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
for_each_netdev_rcu(dev_net_rcu(bond), slave) \
if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
#define for_each_netdev_dump(net, d, ifindex) \
for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
ULONG_MAX, XA_PRESENT)); ifindex++)
static inline struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = dev->dev_list.next;
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *next_net_device_rcu(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = rcu_dereference(list_next_rcu(&dev->dev_list));
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *first_net_device(struct net *net)
{
return list_empty(&net->dev_base_head) ? NULL :
net_device_entry(net->dev_base_head.next);
}
int netdev_boot_setup_check(struct net_device *dev);
struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
struct net_device_path_stack *stack);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int netif_open(struct net_device *dev, struct netlink_ext_ack *extack);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void netif_close(struct net_device *dev);
void dev_close(struct net_device *dev);
void netif_close_many(struct list_head *head, bool unlink);
void netif_disable_lro(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
static inline int dev_queue_xmit(struct sk_buff *skb)
{
return __dev_queue_xmit(skb, NULL);
}
static inline int dev_queue_xmit_accel(struct sk_buff *skb,
struct net_device *sb_dev)
{
return __dev_queue_xmit(skb, sb_dev);
}
static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{
int ret;
ret = __dev_direct_xmit(skb, queue_id);
if (!dev_xmit_complete(ret))
kfree_skb(skb);
return ret;
}
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
}
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *netdev_get_by_index(struct net *net, int ifindex,
netdevice_tracker *tracker, gfp_t gfp);
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp);
struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
unsigned short flags, unsigned short mask);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
void netdev_copy_name(struct net_device *dev, char *name);
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned int len)
{
if (!dev->header_ops || !dev->header_ops->create)
return 0;
return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
}
static inline int dev_parse_header(const struct sk_buff *skb,
unsigned char *haddr)
{
const struct net_device *dev = skb->dev;
if (!dev->header_ops || !dev->header_ops->parse)
return 0;
return dev->header_ops->parse(skb, haddr);
}
static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
{
const struct net_device *dev = skb->dev;
if (!dev->header_ops || !dev->header_ops->parse_protocol)
return 0;
return dev->header_ops->parse_protocol(skb);
}
/* ll_header must have at least hard_header_len allocated */
static inline bool dev_validate_header(const struct net_device *dev,
char *ll_header, int len)
{
if (likely(len >= dev->hard_header_len))
return true;
if (len < dev->min_header_len)
return false;
if (capable(CAP_SYS_RAWIO)) {
memset(ll_header + len, 0, dev->hard_header_len - len);
return true;
}
if (dev->header_ops && dev->header_ops->validate)
return dev->header_ops->validate(ll_header, len);
return false;
}
static inline bool dev_has_header(const struct net_device *dev)
{
return dev->header_ops && dev->header_ops->create;
}
struct numa_drop_counters {
atomic_t drops0 ____cacheline_aligned_in_smp;
atomic_t drops1 ____cacheline_aligned_in_smp;
};
static inline int numa_drop_read(const struct numa_drop_counters *ndc)
{
return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1);
}
static inline void numa_drop_add(struct numa_drop_counters *ndc, int val)
{
int n = numa_node_id() % 2;
if (n)
atomic_add(val, &ndc->drops1);
else
atomic_add(val, &ndc->drops0);
}
static inline void numa_drop_reset(struct numa_drop_counters *ndc)
{
atomic_set(&ndc->drops0, 0);
atomic_set(&ndc->drops1, 0);
}
/*
* Incoming packets are placed on per-CPU queues
*/
struct softnet_data {
struct list_head poll_list;
struct sk_buff_head process_queue;
local_lock_t process_queue_bh_lock;
/* stats */
unsigned int processed;
unsigned int time_squeeze;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
#endif
unsigned int received_rps;
bool in_net_rx_action;
bool in_napi_threaded_poll;
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
#endif
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue;
#ifdef CONFIG_XFRM_OFFLOAD
struct sk_buff_head xfrm_backlog;
#endif
/* written and read only by owning cpu: */
struct netdev_xmit xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
*/
unsigned int input_queue_head ____cacheline_aligned_in_smp;
/* Elements below can be accessed between CPUs for RPS/RFS */
call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_tail;
#endif
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
struct numa_drop_counters drop_counters;
int defer_ipi_scheduled ____cacheline_aligned_in_smp;
call_single_data_t defer_csd;
};
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
struct page_pool_bh {
struct page_pool *pool;
local_lock_t bh_lock;
};
DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
{
return this_cpu_read(softnet_data.xmit.recursion);
}
#else
static inline int dev_recursion_level(void)
{
return current->net_xmit.recursion;
}
#endif
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
static inline void netif_tx_schedule_all(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
* netif_start_queue - allow transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
*/
static inline void netif_start_queue(struct net_device *dev)
{
netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_start_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_start_queue(txq);
}
}
void netif_tx_wake_queue(struct netdev_queue *dev_queue);
/**
* netif_wake_queue - restart transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available.
*/
static inline void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_wake_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_wake_queue(txq);
}
}
static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
/* Paired with READ_ONCE() from dev_watchdog() */
WRITE_ONCE(dev_queue->trans_start, jiffies);
/* This barrier is paired with smp_mb() from dev_watchdog() */
smp_mb__before_atomic();
/* Must be an atomic op see netif_txq_try_stop() */
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
* netif_stop_queue - stop transmitted packets
* @dev: network device
*
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
*/
static inline void netif_stop_queue(struct net_device *dev)
{
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
void netif_tx_stop_all_queues(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
* netif_queue_stopped - test if transmit queue is flowblocked
* @dev: network device
*
* Test if transmit queue on device is currently unable to send.
*/
static inline bool netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_ANY_XOFF;
}
static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
}
static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
}
/**
* netdev_queue_set_dql_min_limit - set dql minimum limit
* @dev_queue: pointer to transmit queue
* @min_limit: dql minimum limit
*
* Forces xmit_more() to return true until the minimum threshold
* defined by @min_limit is reached (or until the tx queue is
* empty). Warning: to be use with care, misuse will impact the
* latency.
*/
static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
unsigned int min_limit)
{
#ifdef CONFIG_BQL
dev_queue->dql.min_limit = min_limit;
#endif
}
static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
{
#ifdef CONFIG_BQL
/* Non-BQL migrated drivers will return 0, too. */
return dql_avail(&txq->dql);
#else
return 0;
#endif
}
/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their ndo_start_xmit(),
* to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_BQL
prefetchw(&dev_queue->dql.num_queued);
#endif
}
/**
* netdev_txq_bql_complete_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their TX completion path,
* to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_BQL
prefetchw(&dev_queue->dql.limit);
#endif
}
/**
* netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
* @dev_queue: network device queue
* @bytes: number of bytes queued to the device queue
*
* Report the number of bytes queued for sending/completion to the network
* device hardware queue. @bytes should be a good approximation and should
* exactly match netdev_completed_queue() @bytes.
* This is typically called once per packet, from ndo_start_xmit().
*/
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{
#ifdef CONFIG_BQL
dql_queued(&dev_queue->dql, bytes);
if (likely(dql_avail(&dev_queue->dql) >= 0))
return;
/* Paired with READ_ONCE() from dev_watchdog() */
WRITE_ONCE(dev_queue->trans_start, jiffies);
/* This barrier is paired with smp_mb() from dev_watchdog() */
smp_mb__before_atomic();
set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
/*
* The XOFF flag must be set before checking the dql_avail below,
* because in netdev_tx_completed_queue we update the dql_completed
* before checking the XOFF flag.
*/
smp_mb__after_atomic();
/* check again in case another CPU has just made room avail */
if (unlikely(dql_avail(&dev_queue->dql) >= 0))
clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
#endif
}
/* Variant of netdev_tx_sent_queue() for drivers that are aware
* that they should not test BQL status themselves.
* We do want to change __QUEUE_STATE_STACK_XOFF only for the last
* skb of a batch.
* Returns true if the doorbell must be used to kick the NIC.
*/
static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes,
bool xmit_more)
{
if (xmit_more) {
#ifdef CONFIG_BQL
dql_queued(&dev_queue->dql, bytes);
#endif
return netif_tx_queue_stopped(dev_queue);
}
netdev_tx_sent_queue(dev_queue, bytes);
return true;
}
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
* @bytes: number of bytes queued to the hardware device queue
*
* Report the number of bytes queued for sending/completion to the network
* device hardware queue#0. @bytes should be a good approximation and should
* exactly match netdev_completed_queue() @bytes.
* This is typically called once per packet, from ndo_start_xmit().
*/
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}
static inline bool __netdev_sent_queue(struct net_device *dev,
unsigned int bytes,
bool xmit_more)
{
return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
xmit_more);
}
/**
* netdev_tx_completed_queue - report number of packets/bytes at TX completion.
* @dev_queue: network device queue
* @pkts: number of packets (currently ignored)
* @bytes: number of bytes dequeued from the device queue
*
* Must be called at most once per TX completion round (and not per
* individual packet), so that BQL can adjust its limits appropriately.
*/
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
#ifdef CONFIG_BQL
if (unlikely(!bytes))
return;
dql_completed(&dev_queue->dql, bytes);
/*
* Without the memory barrier there is a small possibility that
* netdev_tx_sent_queue will miss the update and cause the queue to
* be stopped forever
*/
smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
if (unlikely(dql_avail(&dev_queue->dql) < 0))
return;
if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
netif_schedule_queue(dev_queue);
#endif
}
/**
* netdev_completed_queue - report bytes and packets completed by device
* @dev: network device
* @pkts: actual number of packets sent over the medium
* @bytes: actual number of bytes sent over the medium
*
* Report the number of bytes and packets transmitted by the network device
* hardware queue over the physical medium, @bytes must exactly match the
* @bytes amount passed to netdev_sent_queue()
*/
static inline void netdev_completed_queue(struct net_device *dev,
unsigned int pkts, unsigned int bytes)
{
netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
}
static inline void netdev_tx_reset_queue(struct netdev_queue *q)
{
#ifdef CONFIG_BQL
clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
dql_reset(&q->dql);
#endif
}
/**
* netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
* @dev: network device
* @qid: stack index of the queue to reset
*/
static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
u32 qid)
{
netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
}
/**
* netdev_reset_queue - reset the packets and bytes count of a network device
* @dev_queue: network device
*
* Reset the bytes and packet count of a network device and clear the
* software flow control OFF bit for this network device
*/
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
netdev_tx_reset_subqueue(dev_queue, 0);
}
/**
* netdev_cap_txqueue - check if selected tx queue exceeds device queues
* @dev: network device
* @queue_index: given tx queue index
*
* Returns 0 if given tx queue index >= number of device tx queues,
* otherwise returns the originally passed tx queue index.
*/
static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
dev->name, queue_index,
dev->real_num_tx_queues);
return 0;
}
return queue_index;
}
/**
* netif_running - test if up
* @dev: network device
*
* Test if the device has been brought up.
*/
static inline bool netif_running(const struct net_device *dev)
{
return test_bit(__LINK_STATE_START, &dev->state);
}
/*
* Routines to manage the subqueues on a device. We only need start,
* stop, and a check if it's stopped. All other device management is
* done at the overall netdevice level.
* Also test the device if we're multiqueue.
*/
/**
* netif_start_subqueue - allow sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Start individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_start_queue(txq);
}
/**
* netif_stop_subqueue - stop sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Stop individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_stop_queue(txq);
}
/**
* __netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
static inline bool __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return netif_tx_queue_stopped(txq);
}
/**
* netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @skb: sub queue buffer pointer
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
static inline bool netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
/**
* netif_wake_subqueue - allow sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Resume individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_wake_queue(txq);
}
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
u16 index, enum xps_map_type type);
/**
* netif_attr_test_mask - Test a CPU or Rx queue set in a mask
* @j: CPU/Rx queue index
* @mask: bitmask of all cpus/rx queues
* @nr_bits: number of bits in the bitmask
*
* Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
*/
static inline bool netif_attr_test_mask(unsigned long j,
const unsigned long *mask,
unsigned int nr_bits)
{
cpu_max_bits_warn(j, nr_bits);
return test_bit(j, mask);
}
/**
* netif_attr_test_online - Test for online CPU/Rx queue
* @j: CPU/Rx queue index
* @online_mask: bitmask for CPUs/Rx queues that are online
* @nr_bits: number of bits in the bitmask
*
* Returns: true if a CPU/Rx queue is online.
*/
static inline bool netif_attr_test_online(unsigned long j,
const unsigned long *online_mask,
unsigned int nr_bits)
{
cpu_max_bits_warn(j, nr_bits);
if (online_mask)
return test_bit(j, online_mask);
return (j < nr_bits);
}
/**
* netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
* @n: CPU/Rx queue index
* @srcp: the cpumask/Rx queue mask pointer
* @nr_bits: number of bits in the bitmask
*
* Returns: next (after n) CPU/Rx queue index in the mask;
* >= nr_bits if no further CPUs/Rx queues set.
*/
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
{
/* -1 is a legal arg here. */
if (n != -1)
cpu_max_bits_warn(n, nr_bits);
if (srcp)
return find_next_bit(srcp, nr_bits, n + 1);
return n + 1;
}
/**
* netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
* @n: CPU/Rx queue index
* @src1p: the first CPUs/Rx queues mask pointer
* @src2p: the second CPUs/Rx queues mask pointer
* @nr_bits: number of bits in the bitmask
*
* Returns: next (after n) CPU/Rx queue index set in both masks;
* >= nr_bits if no further CPUs/Rx queues set in both.
*/
static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
unsigned int nr_bits)
{
/* -1 is a legal arg here. */
if (n != -1)
cpu_max_bits_warn(n, nr_bits);
if (src1p && src2p)
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
else if (src1p)
return find_next_bit(src1p, nr_bits, n + 1);
else if (src2p)
return find_next_bit(src2p, nr_bits, n + 1);
return n + 1;
}
#else
static inline int netif_set_xps_queue(struct net_device *dev,
const struct cpumask *mask,
u16 index)
{
return 0;
}
static inline int __netif_set_xps_queue(struct net_device *dev,
const unsigned long *mask,
u16 index, enum xps_map_type type)
{
return 0;
}
#endif
/**
* netif_is_multiqueue - test if device has multiple transmit queues
* @dev: network device
*
* Check if device has multiple transmit queues
*/
static inline bool netif_is_multiqueue(const struct net_device *dev)
{
return dev->num_tx_queues > 1;
}
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
int netif_set_real_num_queues(struct net_device *dev,
unsigned int txq, unsigned int rxq);
int netif_get_num_default_rss_queues(void);
void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
/*
* It is not allowed to call kfree_skb() or consume_skb() from hardware
* interrupt context or with hardware interrupts being disabled.
* (in_hardirq() || irqs_disabled())
*
* We provide four helpers that can be used in following contexts :
*
* dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
* replacing kfree_skb(skb)
*
* dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
* Typically used in place of consume_skb(skb) in TX completion path
*
* dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
* replacing kfree_skb(skb)
*
* dev_consume_skb_any(skb) when caller doesn't know its current irq context,
* and consumed a packet. Used in place of consume_skb(skb)
*/
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_irq(struct sk_buff *skb)
{
dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
}
static inline void dev_kfree_skb_any(struct sk_buff *skb)
{
dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_any(struct sk_buff *skb)
{
dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
}
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
const struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
void netif_receive_skb_list_internal(struct list_head *head);
void netif_receive_skb_list(struct list_head *head);
gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb);
static inline gro_result_t napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb)
{
return gro_receive_skb(&napi->gro, skb);
}
struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline void napi_free_frags(struct napi_struct *napi)
{
kfree_skb(napi->skb);
napi->skb = NULL;
}
bool netdev_is_rx_handler_busy(struct net_device *dev);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
static inline bool is_socket_ioctl_cmd(unsigned int cmd)
{
return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
}
int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
int put_user_ifreq(struct ifreq *ifr, void __user *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
void __user *data, bool *need_copyout);
int dev_ifconf(struct net *net, struct ifconf __user *ifc);
int dev_eth_ioctl(struct net_device *dev,
struct ifreq *ifr, unsigned int cmd);
int generic_hwtstamp_get_lower(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_cfg);
int generic_hwtstamp_set_lower(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_cfg,
struct netlink_ext_ack *extack);
int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int netif_get_flags(const struct net_device *dev);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
int netif_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
int netif_set_alias(struct net_device *dev, const char *alias, size_t len);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex,
struct netlink_ext_ack *extack);
int dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat);
int __netif_set_mtu(struct net_device *dev, int new_mtu);
int netif_set_mtu(struct net_device *dev, int new_mtu);
int dev_set_mtu(struct net_device *, int);
int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int netif_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
u8 dev_xdp_prog_count(struct net_device *dev);
int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
u8 dev_xdp_sb_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
u32 dev_get_min_mp_channel_count(const struct net_device *dev);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb,
const bool check_mtu)
{
const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
unsigned int len;
if (!(dev->flags & IFF_UP))
return false;
if (!check_mtu)
return true;
len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
if (skb->len <= len)
return true;
/* if TSO is enabled, we don't care about the length as the packet
* could be forwarded without being segmented before
*/
if (skb_is_gso(skb))
return true;
return false;
}
void netdev_core_stats_inc(struct net_device *dev, u32 offset);
#define DEV_CORE_STATS_INC(FIELD) \
static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
{ \
netdev_core_stats_inc(dev, \
offsetof(struct net_device_core_stats, FIELD)); \
}
DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)
DEV_CORE_STATS_INC(rx_nohandler)
DEV_CORE_STATS_INC(rx_otherhost_dropped)
#undef DEV_CORE_STATS_INC
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}
bool dev_nit_active_rcu(const struct net_device *dev);
static inline bool dev_nit_active(const struct net_device *dev)
{
bool ret;
rcu_read_lock();
ret = dev_nit_active_rcu(dev);
rcu_read_unlock();
return ret;
}
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
static inline void __dev_put(struct net_device *dev)
{
if (dev) {
#ifdef CONFIG_PCPU_DEV_REFCNT
this_cpu_dec(*dev->pcpu_refcnt);
#else
refcount_dec(&dev->dev_refcnt);
#endif
}
}
static inline void __dev_hold(struct net_device *dev)
{
if (dev) {
#ifdef CONFIG_PCPU_DEV_REFCNT
this_cpu_inc(*dev->pcpu_refcnt);
#else
refcount_inc(&dev->dev_refcnt);
#endif
}
}
static inline void __netdev_tracker_alloc(struct net_device *dev,
netdevice_tracker *tracker,
gfp_t gfp)
{
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
#endif
}
/* netdev_tracker_alloc() can upgrade a prior untracked reference
* taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
*/
static inline void netdev_tracker_alloc(struct net_device *dev,
netdevice_tracker *tracker, gfp_t gfp)
{
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
refcount_dec(&dev->refcnt_tracker.no_tracker);
__netdev_tracker_alloc(dev, tracker, gfp);
#endif
}
static inline void netdev_tracker_free(struct net_device *dev,
netdevice_tracker *tracker)
{
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
ref_tracker_free(&dev->refcnt_tracker, tracker);
#endif
}
static inline void netdev_hold(struct net_device *dev,
netdevice_tracker *tracker, gfp_t gfp)
{
if (dev) { __dev_hold(dev);
__netdev_tracker_alloc(dev, tracker, gfp);
}
}
static inline void netdev_put(struct net_device *dev,
netdevice_tracker *tracker)
{
if (dev) {
netdev_tracker_free(dev, tracker);
__dev_put(dev);
}
}
/**
* dev_hold - get reference to device
* @dev: network device
*
* Hold reference to device to keep it from being freed.
* Try using netdev_hold() instead.
*/
static inline void dev_hold(struct net_device *dev)
{
netdev_hold(dev, NULL, GFP_ATOMIC);
}
/**
* dev_put - release reference to device
* @dev: network device
*
* Release reference to device to allow it to be freed.
* Try using netdev_put() instead.
*/
static inline void dev_put(struct net_device *dev)
{
netdev_put(dev, NULL);
}
DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
static inline void netdev_ref_replace(struct net_device *odev,
struct net_device *ndev,
netdevice_tracker *tracker,
gfp_t gfp)
{
if (odev)
netdev_tracker_free(odev, tracker);
__dev_hold(ndev);
__dev_put(odev);
if (ndev)
__netdev_tracker_alloc(ndev, tracker, gfp);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
* and _off may be called from IRQ context, but it is caller
* who is responsible for serialization of these calls.
*
* The name carrier is inappropriate, these functions should really be
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
void linkwatch_fire_event(struct net_device *dev);
/**
* linkwatch_sync_dev - sync linkwatch for the given device
* @dev: network device to sync linkwatch for
*
* Sync linkwatch for the given device, removing it from the
* pending work list (if queued).
*/
void linkwatch_sync_dev(struct net_device *dev);
void __linkwatch_sync_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
* @dev: network device
*
* Check if carrier is present on device
*/
static inline bool netif_carrier_ok(const struct net_device *dev)
{
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
unsigned long dev_trans_start(struct net_device *dev);
void netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
void netif_carrier_off(struct net_device *dev);
void netif_carrier_event(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
* @dev: network device
*
* Mark device as dormant (as per RFC2863).
*
* The dormant state indicates that the relevant interface is not
* actually in a condition to pass packets (i.e., it is not 'up') but is
* in a "pending" state, waiting for some external event. For "on-
* demand" interfaces, this new state identifies the situation where the
* interface is waiting for events to place it in the up state.
*/
static inline void netif_dormant_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_dormant_off - set device as not dormant.
* @dev: network device
*
* Device is not in dormant state.
*/
static inline void netif_dormant_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_dormant - test if device is dormant
* @dev: network device
*
* Check if device is dormant.
*/
static inline bool netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
/**
* netif_testing_on - mark device as under test.
* @dev: network device
*
* Mark device as under test (as per RFC2863).
*
* The testing state indicates that some test(s) must be performed on
* the interface. After completion, of the test, the interface state
* will change to up, dormant, or down, as appropriate.
*/
static inline void netif_testing_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_testing_off - set device as not under test.
* @dev: network device
*
* Device is not in testing state.
*/
static inline void netif_testing_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_testing - test if device is under test
* @dev: network device
*
* Check if device is under test
*/
static inline bool netif_testing(const struct net_device *dev)
{
return test_bit(__LINK_STATE_TESTING, &dev->state);
}
/**
* netif_oper_up - test if device is operational
* @dev: network device
*
* Check if carrier is operational
*/
static inline bool netif_oper_up(const struct net_device *dev)
{
unsigned int operstate = READ_ONCE(dev->operstate); return operstate == IF_OPER_UP ||
operstate == IF_OPER_UNKNOWN /* backward compat */;
}
/**
* netif_device_present - is device available or removed
* @dev: network device
*
* Check if device has not been removed from system.
*/
static inline bool netif_device_present(const struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
void netif_device_detach(struct net_device *dev);
void netif_device_attach(struct net_device *dev);
/*
* Network interface message level settings
*/
enum {
NETIF_MSG_DRV_BIT,
NETIF_MSG_PROBE_BIT,
NETIF_MSG_LINK_BIT,
NETIF_MSG_TIMER_BIT,
NETIF_MSG_IFDOWN_BIT,
NETIF_MSG_IFUP_BIT,
NETIF_MSG_RX_ERR_BIT,
NETIF_MSG_TX_ERR_BIT,
NETIF_MSG_TX_QUEUED_BIT,
NETIF_MSG_INTR_BIT,
NETIF_MSG_TX_DONE_BIT,
NETIF_MSG_RX_STATUS_BIT,
NETIF_MSG_PKTDATA_BIT,
NETIF_MSG_HW_BIT,
NETIF_MSG_WOL_BIT,
/* When you add a new bit above, update netif_msg_class_names array
* in net/ethtool/common.c
*/
NETIF_MSG_CLASS_COUNT,
};
/* Both ethtool_ops interface and internal driver implementation use u32 */
static_assert(NETIF_MSG_CLASS_COUNT <= 32);
#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
#define NETIF_MSG_DRV __NETIF_MSG(DRV)
#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
#define NETIF_MSG_LINK __NETIF_MSG(LINK)
#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
#define NETIF_MSG_INTR __NETIF_MSG(INTR)
#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
#define NETIF_MSG_HW __NETIF_MSG(HW)
#define NETIF_MSG_WOL __NETIF_MSG(WOL)
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
{
/* use default */
if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
return default_msg_enable_bits;
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
return (1U << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
{
__acquire(&txq->_xmit_lock);
return true;
}
static inline void __netif_tx_release(struct netdev_queue *txq)
{
__release(&txq->_xmit_lock);
}
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok)) {
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
/*
* txq->trans_start can be read locklessly from dev_watchdog()
*/
static inline void txq_trans_update(const struct net_device *dev,
struct netdev_queue *txq)
{
if (!dev->lltx)
WRITE_ONCE(txq->trans_start, jiffies);
}
static inline void txq_trans_cond_update(struct netdev_queue *txq)
{
unsigned long now = jiffies;
if (READ_ONCE(txq->trans_start) != now)
WRITE_ONCE(txq->trans_start, now);
}
/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
static inline void netif_trans_update(struct net_device *dev)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
txq_trans_cond_update(txq);
}
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
*
* Get network device transmit lock
*/
void netif_tx_lock(struct net_device *dev);
static inline void netif_tx_lock_bh(struct net_device *dev)
{
local_bh_disable();
netif_tx_lock(dev);
}
void netif_tx_unlock(struct net_device *dev);
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
netif_tx_unlock(dev);
local_bh_enable();
}
#define HARD_TX_LOCK(dev, txq, cpu) { \
if (!(dev)->lltx) { \
__netif_tx_lock(txq, cpu); \
} else { \
__netif_tx_acquire(txq); \
} \
}
#define HARD_TX_TRYLOCK(dev, txq) \
(!(dev)->lltx ? \
__netif_tx_trylock(txq) : \
__netif_tx_acquire(txq))
#define HARD_TX_UNLOCK(dev, txq) { \
if (!(dev)->lltx) { \
__netif_tx_unlock(txq); \
} else { \
__netif_tx_release(txq); \
} \
}
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
int cpu;
local_bh_disable();
cpu = smp_processor_id();
spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
__netif_tx_lock(txq, cpu);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)
{
unsigned char nest_level = 0;
#ifdef CONFIG_LOCKDEP
nest_level = dev->nested_level;
#endif
spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_lock_bh(struct net_device *dev)
{
unsigned char nest_level = 0;
#ifdef CONFIG_LOCKDEP
nest_level = dev->nested_level;
#endif
local_bh_disable();
spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_unlock(struct net_device *dev)
{
spin_unlock(&dev->addr_list_lock);
}
static inline void netif_addr_unlock_bh(struct net_device *dev)
{
spin_unlock_bh(&dev->addr_list_lock);
}
/*
* dev_addrs walker. Should be used only for read access. Call with
* rcu_read_lock held.
*/
#define for_each_dev_addr(dev, ha) \
list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
void ether_setup(struct net_device *dev);
/* Allocate dummy net_device */
struct net_device *alloc_netdev_dummy(int sizeof_priv);
/* Support for loadable net-drivers */
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs);
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
count)
int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);
int devm_register_netdev(struct device *dev, struct net_device *ndev);
/* General hardware address lists handling functions */
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *));
int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *, int),
int (*unsync)(struct net_device *,
const unsigned char *, int));
void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *, int));
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *));
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
void dev_addr_mod(struct net_device *dev, unsigned int offset,
const void *addr, size_t len);
static inline void
__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
{
dev_addr_mod(dev, 0, addr, len);
}
static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
{
__dev_addr_set(dev, addr, dev->addr_len);
}
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
/* Functions used for unicast addresses handling */
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_uc_del(struct net_device *dev, const unsigned char *addr);
int dev_uc_sync(struct net_device *to, struct net_device *from);
int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_uc_unsync(struct net_device *to, struct net_device *from);
void dev_uc_flush(struct net_device *dev);
void dev_uc_init(struct net_device *dev);
/**
* __dev_uc_sync - Synchronize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
*/
static inline int __dev_uc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *))
{
return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
}
/**
* __dev_uc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_uc_sync().
*/
static inline void __dev_uc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
{
__hw_addr_unsync_dev(&dev->uc, dev, unsync);
}
/* Functions used for multicast addresses handling */
int dev_mc_add(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_mc_del(struct net_device *dev, const unsigned char *addr);
int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_sync(struct net_device *to, struct net_device *from);
int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_mc_unsync(struct net_device *to, struct net_device *from);
void dev_mc_flush(struct net_device *dev);
void dev_mc_init(struct net_device *dev);
/**
* __dev_mc_sync - Synchronize device's multicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
*/
static inline int __dev_mc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *))
{
return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
}
/**
* __dev_mc_unsync - Remove synchronized addresses from device
* @dev: device to sync
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_mc_sync().
*/
static inline void __dev_mc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
{
__hw_addr_unsync_dev(&dev->mc, dev, unsync);
}
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
int netif_set_promiscuity(struct net_device *dev, int inc);
int dev_set_promiscuity(struct net_device *dev, int inc);
int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
int dev_set_allmulti(struct net_device *dev, int inc);
void netif_state_change(struct net_device *dev);
void netdev_state_change(struct net_device *dev);
void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
void dev_load(struct net *net, const char *name);
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats);
void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats);
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
enum {
NESTED_SYNC_IMM_BIT,
NESTED_SYNC_TODO_BIT,
};
#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
struct netdev_nested_priv {
unsigned char flags;
void *data;
};
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
for (iter = &(dev)->adj_list.upper, \
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
updev; \
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *upper_dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
bool netdev_has_any_upper_dev(struct net_device *dev);
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
struct list_head **iter);
#define netdev_for_each_lower_private(dev, priv, iter) \
for (iter = (dev)->adj_list.lower.next, \
priv = netdev_lower_get_next_private(dev, &(iter)); \
priv; \
priv = netdev_lower_get_next_private(dev, &(iter)))
#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
for (iter = &(dev)->adj_list.lower, \
priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
priv; \
priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
void *netdev_lower_get_next(struct net_device *dev,
struct list_head **iter);
#define netdev_for_each_lower_dev(dev, ldev, iter) \
for (iter = (dev)->adj_list.lower.next, \
ldev = netdev_lower_get_next(dev, &(iter)); \
ldev; \
ldev = netdev_lower_get_next(dev, &(iter)))
struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter);
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv);
int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv);
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
struct netlink_ext_ack *extack);
int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev,
void *upper_priv, void *upper_info,
struct netlink_ext_ack *extack);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
int netdev_adjacent_change_prepare(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev,
struct netlink_ext_ack *extack);
void netdev_adjacent_change_commit(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev);
void netdev_adjacent_change_abort(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
int skb_checksum_help(struct sk_buff *skb);
int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features);
struct netdev_bonding_info {
ifslave slave;
ifbond master;
};
struct netdev_notifier_bonding_info {
struct netdev_notifier_info info; /* must be first */
struct netdev_bonding_info bonding_info;
};
void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
void ethtool_notify(struct net_device *dev, unsigned int cmd);
#else
static inline void ethtool_notify(struct net_device *dev, unsigned int cmd)
{
}
#endif
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
__be16 protocol)
{
if (protocol == htons(ETH_P_FCOE))
return !!(features & NETIF_F_FCOE_CRC);
/* Assume this is an IP checksum (not SCTP CRC) */
if (features & NETIF_F_HW_CSUM) {
/* Can checksum everything */
return true;
}
switch (protocol) {
case htons(ETH_P_IP):
return !!(features & NETIF_F_IP_CSUM);
case htons(ETH_P_IPV6):
return !!(features & NETIF_F_IPV6_CSUM);
default:
return false;
}
}
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
#else
static inline void netdev_rx_csum_fault(struct net_device *dev,
struct sk_buff *skb)
{
}
#endif
/* rx skb timestamps */
void net_enable_timestamp(void);
void net_disable_timestamp(void);
static inline ktime_t netdev_get_tstamp(struct net_device *dev,
const struct skb_shared_hwtstamps *hwtstamps,
bool cycles)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_get_tstamp)
return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
return hwtstamps->hwtstamp;
}
#ifndef CONFIG_PREEMPT_RT
static inline void netdev_xmit_set_more(bool more)
{
__this_cpu_write(softnet_data.xmit.more, more);
}
static inline bool netdev_xmit_more(void)
{
return __this_cpu_read(softnet_data.xmit.more);
}
#else
static inline void netdev_xmit_set_more(bool more)
{
current->net_xmit.more = more;
}
static inline bool netdev_xmit_more(void)
{
return current->net_xmit.more;
}
#endif
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
bool more)
{
netdev_xmit_set_more(more);
return ops->ndo_start_xmit(skb, dev);
}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{
const struct net_device_ops *ops = dev->netdev_ops;
netdev_tx_t rc;
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK) txq_trans_update(dev, txq); return rc;
}
int netdev_class_create_file_ns(const struct class_attribute *class_attr,
const void *ns);
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
if (f1 & NETIF_F_HW_CSUM) f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
else
f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
return f1 & f2;
}
static inline netdev_features_t netdev_get_wanted_features(
struct net_device *dev)
{
return (dev->features & ~dev->hw_features) | dev->wanted_features;
}
netdev_features_t netdev_increment_features(netdev_features_t all,
netdev_features_t one, netdev_features_t mask);
/* Allow TSO being used on stacked device :
* Performing the GSO segmentation before last device
* is a performance improvement.
*/
static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
netdev_features_t mask)
{
return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
}
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);
void skb_warn_bad_offload(const struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
netdev_features_t feature;
if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER)) gso_type |= __SKB_GSO_TCP_FIXEDID;
feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ACCECN !=
(NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) && (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
static inline bool netif_needs_gso(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
void netif_inherit_tso_max(struct net_device *to,
const struct net_device *from);
static inline unsigned int
netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
{
/* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
return skb->protocol == htons(ETH_P_IPV6) ?
READ_ONCE(dev->gro_max_size) :
READ_ONCE(dev->gro_ipv4_max_size);
}
static inline unsigned int
netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
{
/* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
return skb->protocol == htons(ETH_P_IPV6) ? READ_ONCE(dev->gso_max_size) :
READ_ONCE(dev->gso_ipv4_max_size);
}
static inline bool netif_is_macsec(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACSEC;
}
static inline bool netif_is_macvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
}
static inline bool netif_is_macvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN_PORT;
}
static inline bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
}
static inline bool netif_is_bond_slave(const struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
static inline bool netif_supports_nofcs(struct net_device *dev)
{
return dev->priv_flags & IFF_SUPP_NOFCS;
}
static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
}
static inline bool netif_is_l3_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_MASTER;
}
static inline bool netif_is_l3_slave(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_SLAVE;
}
static inline int dev_sdif(const struct net_device *dev)
{
#ifdef CONFIG_NET_L3_MASTER_DEV
if (netif_is_l3_slave(dev))
return dev->ifindex;
#endif
return 0;
}
static inline bool netif_is_bridge_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_EBRIDGE;
}
static inline bool netif_is_bridge_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_BRIDGE_PORT;
}
static inline bool netif_is_ovs_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_OPENVSWITCH;
}
static inline bool netif_is_ovs_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_OVS_DATAPATH;
}
static inline bool netif_is_any_bridge_master(const struct net_device *dev)
{
return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
}
static inline bool netif_is_any_bridge_port(const struct net_device *dev)
{
return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
}
static inline bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
}
static inline bool netif_is_team_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM_PORT;
}
static inline bool netif_is_lag_master(const struct net_device *dev)
{
return netif_is_bond_master(dev) || netif_is_team_master(dev);
}
static inline bool netif_is_lag_port(const struct net_device *dev)
{
return netif_is_bond_slave(dev) || netif_is_team_port(dev);
}
static inline bool netif_is_rxfh_configured(const struct net_device *dev)
{
return dev->priv_flags & IFF_RXFH_CONFIGURED;
}
static inline bool netif_is_failover(const struct net_device *dev)
{
return dev->priv_flags & IFF_FAILOVER;
}
static inline bool netif_is_failover_slave(const struct net_device *dev)
{
return dev->priv_flags & IFF_FAILOVER_SLAVE;
}
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
}
/* return true if dev can't cope with mtu frames that need vlan tag insertion */
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
{
/* TODO: reserve and use an additional IFF bit, if we get more users */
return netif_is_macsec(dev);
}
extern struct pernet_operations __net_initdata loopback_net_ops;
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* netdev_printk helpers, similar to dev_printk */
static inline const char *netdev_name(const struct net_device *dev)
{
if (!dev->name[0] || strchr(dev->name, '%'))
return "(unnamed net_device)";
return dev->name;
}
static inline const char *netdev_reg_state(const struct net_device *dev)
{
u8 reg_state = READ_ONCE(dev->reg_state); switch (reg_state) {
case NETREG_UNINITIALIZED: return " (uninitialized)";
case NETREG_REGISTERED: return "";
case NETREG_UNREGISTERING: return " (unregistering)";
case NETREG_UNREGISTERED: return " (unregistered)";
case NETREG_RELEASED: return " (released)";
case NETREG_DUMMY: return " (dummy)";
}
WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state); return " (unknown)";
}
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
/*
* netdev_WARN() acts like dev_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
#define netdev_WARN_ONCE(dev, format, args...) \
WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
/*
* The list of packet types we will receive (as opposed to discard)
* and the routines to invoke.
*
* Why 16. Because with 16 the only overlap we get on a hash of the
* low nibble of the protocol value is RARP/SNAP/X.25.
*
* 0800 IP
* 0001 802.3
* 0002 AX.25
* 0004 802.2
* 8035 RARP
* 0005 SNAP
* 0805 X.25
* 0806 ARP
* 8137 IPX
* 0009 Localtalk
* 86DD IPv6
*/
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
extern struct net_device *blackhole_netdev;
/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
#include <linux/vfsdebug.h>
#include <linux/linkage.h>
#include <linux/wait_bit.h>
#include <linux/kdev_t.h>
#include <linux/dcache.h>
#include <linux/path.h>
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/list.h>
#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
#include <linux/xarray.h>
#include <linux/rbtree.h>
#include <linux/init.h>
#include <linux/pid.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/mm_types.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fcntl.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
#include <linux/migrate_mode.h>
#include <linux/uidgid.h>
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
#include <linux/delayed_call.h>
#include <linux/uuid.h>
#include <linux/errseq.h>
#include <linux/ioprio.h>
#include <linux/fs_types.h>
#include <linux/build_bug.h>
#include <linux/stddef.h>
#include <linux/mount.h>
#include <linux/cred.h>
#include <linux/mnt_idmapping.h>
#include <linux/slab.h>
#include <linux/maple_tree.h>
#include <linux/rw_hint.h>
#include <linux/file_ref.h>
#include <linux/unicode.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
struct backing_dev_info;
struct bdi_writeback;
struct bio;
struct io_comp_batch;
struct export_operations;
struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
struct kiocb;
struct kobject;
struct pipe_inode_info;
struct poll_table_struct;
struct kstatfs;
struct vm_area_struct;
struct vfsmount;
struct cred;
struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
struct fscrypt_operations;
struct fsverity_operations;
struct fsnotify_mark_connector;
struct fsnotify_sb_info;
struct fs_context;
struct fs_parameter_spec;
struct file_kattr;
struct iomap_ops;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
typedef __kernel_rwf_t rwf_t;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
#define MAY_EXEC 0x00000001
#define MAY_WRITE 0x00000002
#define MAY_READ 0x00000004
#define MAY_APPEND 0x00000008
#define MAY_ACCESS 0x00000010
#define MAY_OPEN 0x00000020
#define MAY_CHDIR 0x00000040
/* called from RCU mode, don't block */
#define MAY_NOT_BLOCK 0x00000080
/*
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
* to O_WRONLY and O_RDWR via the strange trick in do_dentry_open()
*/
/* file is open for reading */
#define FMODE_READ ((__force fmode_t)(1 << 0))
/* file is open for writing */
#define FMODE_WRITE ((__force fmode_t)(1 << 1))
/* file is seekable */
#define FMODE_LSEEK ((__force fmode_t)(1 << 2))
/* file can be accessed using pread */
#define FMODE_PREAD ((__force fmode_t)(1 << 3))
/* file can be accessed using pwrite */
#define FMODE_PWRITE ((__force fmode_t)(1 << 4))
/* File is opened for execution with sys_execve / sys_uselib */
#define FMODE_EXEC ((__force fmode_t)(1 << 5))
/* File writes are restricted (block device specific) */
#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6))
/* File supports atomic writes */
#define FMODE_CAN_ATOMIC_WRITE ((__force fmode_t)(1 << 7))
/* FMODE_* bit 8 */
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)(1 << 9))
/* 64bit hashes as llseek() offset (for directories) */
#define FMODE_64BITHASH ((__force fmode_t)(1 << 10))
/*
* Don't update ctime and mtime.
*
* Currently a special hack for the XFS open_by_handle ioctl, but we'll
* hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
*/
#define FMODE_NOCMTIME ((__force fmode_t)(1 << 11))
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
/* Supports IOCB_HAS_METADATA */
#define FMODE_HAS_METADATA ((__force fmode_t)(1 << 13))
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)(1 << 14))
/* File needs atomic accesses to f_pos */
#define FMODE_ATOMIC_POS ((__force fmode_t)(1 << 15))
/* Write access to underlying fs */
#define FMODE_WRITER ((__force fmode_t)(1 << 16))
/* Has read method(s) */
#define FMODE_CAN_READ ((__force fmode_t)(1 << 17))
/* Has write method(s) */
#define FMODE_CAN_WRITE ((__force fmode_t)(1 << 18))
#define FMODE_OPENED ((__force fmode_t)(1 << 19))
#define FMODE_CREATED ((__force fmode_t)(1 << 20))
/* File is stream-like */
#define FMODE_STREAM ((__force fmode_t)(1 << 21))
/* File supports DIRECT IO */
#define FMODE_CAN_ODIRECT ((__force fmode_t)(1 << 22))
#define FMODE_NOREUSE ((__force fmode_t)(1 << 23))
/* File is embedded in backing_file object */
#define FMODE_BACKING ((__force fmode_t)(1 << 24))
/*
* Together with FMODE_NONOTIFY_PERM defines which fsnotify events shouldn't be
* generated (see below)
*/
#define FMODE_NONOTIFY ((__force fmode_t)(1 << 25))
/*
* Together with FMODE_NONOTIFY defines which fsnotify events shouldn't be
* generated (see below)
*/
#define FMODE_NONOTIFY_PERM ((__force fmode_t)(1 << 26))
/* File is capable of returning -EAGAIN if I/O will block */
#define FMODE_NOWAIT ((__force fmode_t)(1 << 27))
/* File represents mount that needs unmounting */
#define FMODE_NEED_UNMOUNT ((__force fmode_t)(1 << 28))
/* File does not contribute to nr_files count */
#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29))
/*
* The two FMODE_NONOTIFY* define which fsnotify events should not be generated
* for an open file. These are the possible values of
* (f->f_mode & FMODE_FSNOTIFY_MASK) and their meaning:
*
* FMODE_NONOTIFY - suppress all (incl. non-permission) events.
* FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events.
* FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only FAN_ACCESS_PERM.
*/
#define FMODE_FSNOTIFY_MASK \
(FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)
#define FMODE_FSNOTIFY_NONE(mode) \
((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
#define FMODE_FSNOTIFY_HSM(mode) \
((mode & FMODE_FSNOTIFY_MASK) == 0 || \
(mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM))
#define FMODE_FSNOTIFY_ACCESS_PERM(mode) \
((mode & FMODE_FSNOTIFY_MASK) == 0)
#else
#define FMODE_FSNOTIFY_ACCESS_PERM(mode) 0
#define FMODE_FSNOTIFY_HSM(mode) 0
#endif
/*
* Attribute flags. These should be or-ed together to figure out what
* has been changed!
*/
#define ATTR_MODE (1 << 0)
#define ATTR_UID (1 << 1)
#define ATTR_GID (1 << 2)
#define ATTR_SIZE (1 << 3)
#define ATTR_ATIME (1 << 4)
#define ATTR_MTIME (1 << 5)
#define ATTR_CTIME (1 << 6)
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
#define ATTR_CTIME_SET (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
#define ATTR_KILL_PRIV (1 << 14)
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
#define ATTR_TIMES_SET (1 << 16)
#define ATTR_TOUCH (1 << 17)
#define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */
/*
* Whiteout is represented by a char device. The following constants define the
* mode and device number to use.
*/
#define WHITEOUT_MODE 0
#define WHITEOUT_DEV 0
/*
* This is the Inode Attributes structure, used for notify_change(). It
* uses the above definitions as flags, to know which values have changed.
* Also, in this manner, a Filesystem can look at only the values it cares
* about. Basically, these are the attributes that the VFS layer can
* request to change from the FS layer.
*
* Derek Atkins <warlord@MIT.EDU> 94-10-20
*/
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
/*
* The two anonymous unions wrap structures with the same member.
*
* Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which
* are a dedicated type requiring the filesystem to use the dedicated
* helpers. Other filesystem can continue to use ia_{g,u}id until they
* have been ported.
*
* They always contain the same value. In other words FS_ALLOW_IDMAP
* pass down the same value on idmapped mounts as they would on regular
* mounts.
*/
union {
kuid_t ia_uid;
vfsuid_t ia_vfsuid;
};
union {
kgid_t ia_gid;
vfsgid_t ia_vfsgid;
};
loff_t ia_size;
struct timespec64 ia_atime;
struct timespec64 ia_mtime;
struct timespec64 ia_ctime;
/*
* Not an attribute, but an auxiliary info for filesystems wanting to
* implement an ftruncate() like method. NOTE: filesystem should
* check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
*/
struct file *ia_file;
};
/*
* Includes for diskquotas.
*/
#include <linux/quota.h>
/*
* Maximum number of layers of fs stack. Needs to be limited to
* prevent kernel stack overflow
*/
#define FILESYSTEM_MAX_STACK_DEPTH 2
/**
* enum positive_aop_returns - aop return codes with specific semantics
*
* @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
* completed, that the page is still locked, and
* should be considered active. The VM uses this hint
* to return the page to the active list -- it won't
* be a candidate for writeback again in the near
* future. Other callers must be careful to unlock
* the page if they get this return. Returned by
* writepage();
*
* @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
* unlocked it and the page might have been truncated.
* The caller should back up to acquiring a new page and
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
* by read_folio().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
* page to allow for functions that return the number of bytes operated on in a
* given page.
*/
enum positive_aop_returns {
AOP_WRITEPAGE_ACTIVATE = 0x80000,
AOP_TRUNCATED_PAGE = 0x80001,
};
/*
* oh the beauties of C type declarations.
*/
struct page;
struct address_space;
struct writeback_control;
struct readahead_control;
/* Match RWF_* bits to IOCB bits */
#define IOCB_HIPRI (__force int) RWF_HIPRI
#define IOCB_DSYNC (__force int) RWF_DSYNC
#define IOCB_SYNC (__force int) RWF_SYNC
#define IOCB_NOWAIT (__force int) RWF_NOWAIT
#define IOCB_APPEND (__force int) RWF_APPEND
#define IOCB_ATOMIC (__force int) RWF_ATOMIC
#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE
#define IOCB_NOSIGNAL (__force int) RWF_NOSIGNAL
/* non-RWF related bits - start at 16 */
#define IOCB_EVENTFD (1 << 16)
#define IOCB_DIRECT (1 << 17)
#define IOCB_WRITE (1 << 18)
/* iocb->ki_waitq is valid */
#define IOCB_WAITQ (1 << 19)
#define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21)
/*
* IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
* iocb completion can be passed back to the owner for execution from a safe
* context rather than needing to be punted through a workqueue. If this
* flag is set, the bio completion handling may set iocb->dio_complete to a
* handler function and iocb->private to context information for that handler.
* The issuer should call the handler with that context information from task
* context to complete the processing of the iocb. Note that while this
* provides a task context for the dio_complete() callback, it should only be
* used on the completion side for non-IO generating completions. It's fine to
* call blocking functions from this callback, but they should not wait for
* unrelated IO (like cache flushing, new IO generation, etc).
*/
#define IOCB_DIO_CALLER_COMP (1 << 22)
/* kiocb is a read or write operation submitted by fs/aio.c. */
#define IOCB_AIO_RW (1 << 23)
#define IOCB_HAS_METADATA (1 << 24)
/* for use in trace events */
#define TRACE_IOCB_STRINGS \
{ IOCB_HIPRI, "HIPRI" }, \
{ IOCB_DSYNC, "DSYNC" }, \
{ IOCB_SYNC, "SYNC" }, \
{ IOCB_NOWAIT, "NOWAIT" }, \
{ IOCB_APPEND, "APPEND" }, \
{ IOCB_ATOMIC, "ATOMIC" }, \
{ IOCB_DONTCACHE, "DONTCACHE" }, \
{ IOCB_EVENTFD, "EVENTFD"}, \
{ IOCB_DIRECT, "DIRECT" }, \
{ IOCB_WRITE, "WRITE" }, \
{ IOCB_WAITQ, "WAITQ" }, \
{ IOCB_NOIO, "NOIO" }, \
{ IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
{ IOCB_DIO_CALLER_COMP, "CALLER_COMP" }, \
{ IOCB_AIO_RW, "AIO_RW" }, \
{ IOCB_HAS_METADATA, "AIO_HAS_METADATA" }
struct kiocb {
struct file *ki_filp;
loff_t ki_pos;
void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
u8 ki_write_stream;
union {
/*
* Only used for async buffered reads, where it denotes the
* page waitqueue associated with completing the read. Valid
* IFF IOCB_WAITQ is set.
*/
struct wait_page_queue *ki_waitq;
/*
* Can be used for O_DIRECT IO, where the completion handling
* is punted back to the issuer of the IO. May only be set
* if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
* must then check for presence of this handler when ki_complete
* is invoked. The data passed in to this handler must be
* assigned to ->private when dio_complete is assigned.
*/
ssize_t (*dio_complete)(void *data);
};
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
return kiocb->ki_complete == NULL;
}
struct address_space_operations {
int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
/* Mark a folio dirty. Return true if this dirtied it */
bool (*dirty_folio)(struct address_space *, struct folio *);
void (*readahead)(struct readahead_control *);
int (*write_begin)(const struct kiocb *, struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata);
int (*write_end)(const struct kiocb *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct folio *folio, void *fsdata);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
* migrate the contents of a folio to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
*/
int (*migrate_folio)(struct address_space *, struct folio *dst,
struct folio *src, enum migrate_mode);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
};
extern const struct address_space_operations empty_aops;
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
* @i_pages: Cached pages.
* @invalidate_lock: Guards coherency between page cache contents and
* file offset->disk block mappings in the filesystem during invalidates.
* It is also used to block modification of page cache contents through
* memory mappings.
* @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings.
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
* @writeback_index: Writeback starts here.
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
* @i_private_lock: For use by the owner of the address_space.
* @i_private_list: For use by the owner of the address_space.
* @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
struct xarray i_pages;
struct rw_semaphore invalidate_lock;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
/* number of thp, only for non-shmem files */
atomic_t nr_thps;
#endif
struct rb_root_cached i_mmap;
unsigned long nrpages;
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
errseq_t wb_err;
spinlock_t i_private_lock;
struct list_head i_private_list;
struct rw_semaphore i_mmap_rwsem;
void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
* of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON.
*/
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
#define PAGECACHE_TAG_DIRTY XA_MARK_0
#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
#define PAGECACHE_TAG_TOWRITE XA_MARK_2
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
static inline void i_mmap_lock_write(struct address_space *mapping)
{
down_write(&mapping->i_mmap_rwsem);
}
static inline int i_mmap_trylock_write(struct address_space *mapping)
{
return down_write_trylock(&mapping->i_mmap_rwsem);
}
static inline void i_mmap_unlock_write(struct address_space *mapping)
{
up_write(&mapping->i_mmap_rwsem);
}
static inline int i_mmap_trylock_read(struct address_space *mapping)
{
return down_read_trylock(&mapping->i_mmap_rwsem);
}
static inline void i_mmap_lock_read(struct address_space *mapping)
{
down_read(&mapping->i_mmap_rwsem);
}
static inline void i_mmap_unlock_read(struct address_space *mapping)
{
up_read(&mapping->i_mmap_rwsem);
}
static inline void i_mmap_assert_locked(struct address_space *mapping)
{
lockdep_assert_held(&mapping->i_mmap_rwsem);
}
static inline void i_mmap_assert_write_locked(struct address_space *mapping)
{
lockdep_assert_held_write(&mapping->i_mmap_rwsem);
}
/*
* Might pages of this file be mapped into userspace?
*/
static inline int mapping_mapped(const struct address_space *mapping)
{
return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
/*
* Might pages of this file have been modified in userspace?
* Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
* If i_mmap_writable is negative, no new writable mappings are allowed. You
* can only deny writable mappings, if none exists right now.
*/
static inline int mapping_writably_mapped(const struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}
static inline int mapping_map_writable(struct address_space *mapping)
{
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
0 : -EPERM;
}
static inline void mapping_unmap_writable(struct address_space *mapping)
{
atomic_dec(&mapping->i_mmap_writable);
}
static inline int mapping_deny_writable(struct address_space *mapping)
{
return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
0 : -EBUSY;
}
static inline void mapping_allow_writable(struct address_space *mapping)
{
atomic_inc(&mapping->i_mmap_writable);
}
/*
* Use sequence counter to get consistent i_size on 32-bit processors.
*/
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#include <linux/seqlock.h>
#define __NEED_I_SIZE_ORDERED
#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
#else
#define i_size_ordered_init(inode) do { } while (0)
#endif
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
/*
* ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
* cache the ACL. This also means that ->get_inode_acl() can be called in RCU
* mode with the LOOKUP_RCU flag.
*/
#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
uncached_acl_sentinel(struct task_struct *task)
{
return (void *)task + 1;
}
static inline bool
is_uncached_acl(struct posix_acl *acl)
{
return (long)acl & 1;
}
#define IOP_FASTPERM 0x0001
#define IOP_LOOKUP 0x0002
#define IOP_NOFOLLOW 0x0004
#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
#define IOP_MGTIME 0x0020
#define IOP_CACHED_LINK 0x0040
/*
* Inode state bits. Protected by inode->i_lock
*
* Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
* I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
*
* Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
* until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
* various stages of removing an inode.
*
* Two bits are used for locking and completion notification, I_NEW and I_SYNC.
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
* fdatasync() (unless I_DIRTY_DATASYNC is also set).
* Timestamp updates are the usual cause.
* I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
* these changes separately from I_DIRTY_SYNC so that we
* don't have to write inode on fdatasync() when only
* e.g. the timestamps have changed.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
* I_DIRTY_TIME The inode itself has dirty timestamps, and the
* lazytime mount option is enabled. We keep track of this
* separately from I_DIRTY_SYNC in order to implement
* lazytime. This gets cleared if I_DIRTY_INODE
* (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
* I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
* in place because writeback might already be in progress
* and we don't want to lose the time update
* I_NEW Serves as both a mutex and completion notification.
* New inodes set I_NEW. If two processes both create
* the same inode, one of them will release its inode and
* wait for I_NEW to be released before returning.
* Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
* also cause waiting on I_NEW, without I_NEW actually
* being set. find_inode() uses this to prevent returning
* nearly-dead inodes.
* I_WILL_FREE Must be set when calling write_inode_now() if i_count
* is zero. I_FREEING must be set when I_WILL_FREE is
* cleared.
* I_FREEING Set when inode is about to be freed but still has dirty
* pages or buffers attached or the inode itself is still
* dirty.
* I_CLEAR Added by clear_inode(). In this state the inode is
* clean and can be destroyed. Inode keeps I_FREEING.
*
* Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
* prohibited for many purposes. iget() must wait for
* the inode to be completely released, then create it
* anew. Other functions will just ignore such inodes,
* if appropriate. I_NEW is used for waiting.
*
* I_SYNC Writeback of inode is running. The bit is set during
* data writeback, and cleared with a wakeup on the bit
* address once it is done. The bit is also used to pin
* the inode in memory for flusher thread.
*
* I_REFERENCED Marks the inode as recently references on the LRU list.
*
* I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
* synchronize competing switching instances and to tell
* wb stat updates to grab the i_pages lock. See
* inode_switch_wbs_work_fn() for details.
*
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
* and work dirs among overlayfs mounts.
*
* I_CREATING New object's inode in the middle of setting up.
*
* I_DONTCACHE Evict inode as soon as it is not used anymore.
*
* I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
* Used to detect that mark_inode_dirty() should not move
* inode between dirty lists.
*
* I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
*
* I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
* i_count.
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*
* __I_{SYNC,NEW,LRU_ISOLATING} are used to derive unique addresses to wait
* upon. There's one free address left.
*/
enum inode_state_bits {
__I_NEW = 0U,
__I_SYNC = 1U,
__I_LRU_ISOLATING = 2U
/* reserved wait address bit 3 */
};
enum inode_state_flags_t {
I_NEW = (1U << __I_NEW),
I_SYNC = (1U << __I_SYNC),
I_LRU_ISOLATING = (1U << __I_LRU_ISOLATING),
/* reserved flag bit 3 */
I_DIRTY_SYNC = (1U << 4),
I_DIRTY_DATASYNC = (1U << 5),
I_DIRTY_PAGES = (1U << 6),
I_WILL_FREE = (1U << 7),
I_FREEING = (1U << 8),
I_CLEAR = (1U << 9),
I_REFERENCED = (1U << 10),
I_LINKABLE = (1U << 11),
I_DIRTY_TIME = (1U << 12),
I_WB_SWITCH = (1U << 13),
I_OVL_INUSE = (1U << 14),
I_CREATING = (1U << 15),
I_DONTCACHE = (1U << 16),
I_SYNC_QUEUED = (1U << 17),
I_PINNING_NETFS_WB = (1U << 18)
};
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
/*
* Keep mostly read-only and often accessed (especially for
* the RCU path lookup and 'stat' data) fields at the beginning
* of the 'struct inode'
*/
struct inode {
umode_t i_mode;
unsigned short i_opflags;
kuid_t i_uid;
kgid_t i_gid;
unsigned int i_flags;
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *i_acl;
struct posix_acl *i_default_acl;
#endif
const struct inode_operations *i_op;
struct super_block *i_sb;
struct address_space *i_mapping;
#ifdef CONFIG_SECURITY
void *i_security;
#endif
/* Stat data, not accessed from path walking */
unsigned long i_ino;
/*
* Filesystems may only read i_nlink directly. They shall use the
* following functions for modification:
*
* (set|clear|inc|drop)_nlink
* inode_(inc|dec)_link_count
*/
union {
const unsigned int i_nlink;
unsigned int __i_nlink;
};
dev_t i_rdev;
loff_t i_size;
time64_t i_atime_sec;
time64_t i_mtime_sec;
time64_t i_ctime_sec;
u32 i_atime_nsec;
u32 i_mtime_nsec;
u32 i_ctime_nsec;
u32 i_generation;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
enum rw_hint i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
seqcount_t i_size_seqcount;
#endif
/* Misc */
enum inode_state_flags_t i_state;
/* 32-bit hole */
struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
unsigned long dirtied_time_when;
struct hlist_node i_hash;
struct list_head i_io_list; /* backing dev IO list */
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *i_wb; /* the associated cgroup wb */
/* foreign inode detection, see wbc_detach_inode() */
int i_wb_frn_winner;
u16 i_wb_frn_avg_time;
u16 i_wb_frn_history;
#endif
struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
struct list_head i_wb_list; /* backing dev writeback list */
union {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
atomic64_t i_version;
atomic64_t i_sequence; /* see futex */
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
atomic_t i_readcount; /* struct files open RO */
#endif
union {
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
void (*free_inode)(struct inode *);
};
struct file_lock_context *i_flctx;
struct address_space i_data;
union {
struct list_head i_devices;
int i_linklen;
};
union {
struct pipe_inode_info *i_pipe;
struct cdev *i_cdev;
char *i_link;
unsigned i_dir_seq;
};
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
/* 32-bit hole reserved for expanding i_fsnotify_mask */
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
void *i_private; /* fs or device private pointer */
} __randomize_layout;
static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen)
{
VFS_WARN_ON_INODE(strlen(link) != linklen, inode);
VFS_WARN_ON_INODE(inode->i_opflags & IOP_CACHED_LINK, inode);
inode->i_link = link;
inode->i_linklen = linklen;
inode->i_opflags |= IOP_CACHED_LINK;
}
/*
* Get bit address from inode->i_state to use with wait_var_event()
* infrastructre.
*/
#define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit))
struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
struct inode *inode, u32 bit);
static inline void inode_wake_up_bit(struct inode *inode, u32 bit)
{
/* Caller is responsible for correct memory barriers. */
wake_up_var(inode_state_wait_address(inode, bit));
}
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
static inline unsigned int i_blocksize(const struct inode *node)
{
return (1 << node->i_blkbits);
}
static inline int inode_unhashed(struct inode *inode)
{
return hlist_unhashed(&inode->i_hash);
}
/*
* __mark_inode_dirty expects inodes to be hashed. Since we don't
* want special inodes in the fileset inode space, we make them
* appear hashed, but do not put on any lists. hlist_del()
* will work fine and require no locking.
*/
static inline void inode_fake_hash(struct inode *inode)
{
hlist_add_fake(&inode->i_hash);
}
/*
* inode->i_rwsem nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
* 1: parent
* 2: child/target
* 3: xattr
* 4: second non-directory
* 5: second parent (when locking independent directories in rename)
*
* I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
* non-directories at once.
*
* The locking order between these classes is
* parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
*/
enum inode_i_mutex_lock_class
{
I_MUTEX_NORMAL,
I_MUTEX_PARENT,
I_MUTEX_CHILD,
I_MUTEX_XATTR,
I_MUTEX_NONDIR2,
I_MUTEX_PARENT2,
};
static inline void inode_lock(struct inode *inode)
{
down_write(&inode->i_rwsem);
}
static inline __must_check int inode_lock_killable(struct inode *inode)
{
return down_write_killable(&inode->i_rwsem);
}
static inline void inode_unlock(struct inode *inode)
{
up_write(&inode->i_rwsem);
}
static inline void inode_lock_shared(struct inode *inode)
{
down_read(&inode->i_rwsem);
}
static inline __must_check int inode_lock_shared_killable(struct inode *inode)
{
return down_read_killable(&inode->i_rwsem);
}
static inline void inode_unlock_shared(struct inode *inode)
{
up_read(&inode->i_rwsem);
}
static inline int inode_trylock(struct inode *inode)
{
return down_write_trylock(&inode->i_rwsem);
}
static inline int inode_trylock_shared(struct inode *inode)
{
return down_read_trylock(&inode->i_rwsem);
}
static inline int inode_is_locked(struct inode *inode)
{
return rwsem_is_locked(&inode->i_rwsem);
}
static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
{
down_write_nested(&inode->i_rwsem, subclass);
}
static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
{
down_read_nested(&inode->i_rwsem, subclass);
}
static inline void filemap_invalidate_lock(struct address_space *mapping)
{
down_write(&mapping->invalidate_lock);
}
static inline void filemap_invalidate_unlock(struct address_space *mapping)
{
up_write(&mapping->invalidate_lock);
}
static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
{
down_read(&mapping->invalidate_lock);
}
static inline int filemap_invalidate_trylock_shared(
struct address_space *mapping)
{
return down_read_trylock(&mapping->invalidate_lock);
}
static inline void filemap_invalidate_unlock_shared(
struct address_space *mapping)
{
up_read(&mapping->invalidate_lock);
}
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
void filemap_invalidate_lock_two(struct address_space *mapping1,
struct address_space *mapping2);
void filemap_invalidate_unlock_two(struct address_space *mapping1,
struct address_space *mapping2);
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
* with respect to the local cpu (unlike with preempt disabled),
* but they don't need to be atomic with respect to other cpus like in
* true SMP (so they need either to either locally disable irq around
* the read or for example on x86 they can be still implemented as a
* cmpxchg8b without the need of the lock prefix). For SMP compiles
* and 64bit archs it makes no difference if preempt is enabled or not.
*/
static inline loff_t i_size_read(const struct inode *inode)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
loff_t i_size;
unsigned int seq;
do {
seq = read_seqcount_begin(&inode->i_size_seqcount);
i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size;
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
loff_t i_size;
preempt_disable();
i_size = inode->i_size;
preempt_enable();
return i_size;
#else
/* Pairs with smp_store_release() in i_size_write() */
return smp_load_acquire(&inode->i_size);
#endif
}
/*
* NOTE: unlike i_size_read(), i_size_write() does need locking around it
* (normally i_rwsem), otherwise on 32bit/SMP an update of i_size_seqcount
* can be lost, resulting in subsequent i_size_read() calls spinning forever.
*/
static inline void i_size_write(struct inode *inode, loff_t i_size)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
preempt_disable();
write_seqcount_begin(&inode->i_size_seqcount);
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
preempt_enable();
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable();
inode->i_size = i_size;
preempt_enable();
#else
/*
* Pairs with smp_load_acquire() in i_size_read() to ensure
* changes related to inode size (such as page contents) are
* visible before we see the changed inode size.
*/
smp_store_release(&inode->i_size, i_size);
#endif
}
static inline unsigned iminor(const struct inode *inode)
{
return MINOR(inode->i_rdev);
}
static inline unsigned imajor(const struct inode *inode)
{
return MAJOR(inode->i_rdev);
}
struct fown_struct {
struct file *file; /* backpointer for security modules */
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
kuid_t uid, euid; /* uid/euid of process setting the owner */
int signum; /* posix.1b rt signal to be delivered on IO */
};
/**
* struct file_ra_state - Track a file's readahead state.
* @start: Where the most recent readahead started.
* @size: Number of pages read in the most recent readahead.
* @async_size: Numer of pages that were/are not needed immediately
* and so were/are genuinely "ahead". Start next readahead when
* the first of these pages is accessed.
* @ra_pages: Maximum size of a readahead request, copied from the bdi.
* @order: Preferred folio order used for most recent readahead.
* @mmap_miss: How many mmap accesses missed in the page cache.
* @prev_pos: The last byte in the most recent read request.
*
* When this structure is passed to ->readahead(), the "most recent"
* readahead means the current readahead.
*/
struct file_ra_state {
pgoff_t start;
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
unsigned short order;
unsigned short mmap_miss;
loff_t prev_pos;
};
/*
* Check if @index falls in the readahead windows.
*/
static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
{
return (index >= ra->start &&
index < ra->start + ra->size);
}
/**
* struct file - Represents a file
* @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
* @f_mode: FMODE_* flags often used in hotpaths
* @f_op: file operations
* @f_mapping: Contents of a cacheable, mappable object.
* @private_data: filesystem or driver specific data
* @f_inode: cached inode
* @f_flags: file flags
* @f_iocb_flags: iocb flags
* @f_cred: stashed credentials of creator/opener
* @f_owner: file owner
* @f_path: path of the file
* @__f_path: writable alias for @f_path; *ONLY* for core VFS and only before
* the file gets open
* @f_pos_lock: lock protecting file position
* @f_pipe: specific to pipes
* @f_pos: file position
* @f_security: LSM security context of this file
* @f_wb_err: writeback error
* @f_sb_err: per sb writeback errors
* @f_ep: link of all epoll hooks for this file
* @f_task_work: task work entry point
* @f_llist: work queue entrypoint
* @f_ra: file's readahead state
* @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
* @f_ref: reference count
*/
struct file {
spinlock_t f_lock;
fmode_t f_mode;
const struct file_operations *f_op;
struct address_space *f_mapping;
void *private_data;
struct inode *f_inode;
unsigned int f_flags;
unsigned int f_iocb_flags;
const struct cred *f_cred;
struct fown_struct *f_owner;
/* --- cacheline 1 boundary (64 bytes) --- */
union {
const struct path f_path;
struct path __f_path;
};
union {
/* regular files (with FMODE_ATOMIC_POS) and directories */
struct mutex f_pos_lock;
/* pipes */
u64 f_pipe;
};
loff_t f_pos;
#ifdef CONFIG_SECURITY
void *f_security;
#endif
/* --- cacheline 2 boundary (128 bytes) --- */
errseq_t f_wb_err;
errseq_t f_sb_err;
#ifdef CONFIG_EPOLL
struct hlist_head *f_ep;
#endif
union {
struct callback_head f_task_work;
struct llist_node f_llist;
struct file_ra_state f_ra;
freeptr_t f_freeptr;
};
file_ref_t f_ref;
/* --- cacheline 3 boundary (192 bytes) --- */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
unsigned char f_handle[] __counted_by(handle_bytes);
};
static inline struct file *get_file(struct file *f)
{
file_ref_inc(&f->f_ref); return f;
}
struct file *get_file_rcu(struct file __rcu **f);
struct file *get_file_active(struct file **f);
#define file_count(f) file_ref_read(&(f)->f_ref)
#define MAX_NON_LFS ((1UL<<31) - 1)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64
#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock;
struct file_lease;
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
#define OFFSET_MAX type_max(loff_t)
#define OFFT_OFFSET_MAX type_max(off_t)
#endif
int file_f_owner_allocate(struct file *file);
static inline struct fown_struct *file_f_owner(const struct file *file)
{
return READ_ONCE(file->f_owner);
}
extern void send_sigio(struct fown_struct *fown, int fd, int band);
static inline struct inode *file_inode(const struct file *f)
{
return f->f_inode;
}
/*
* file_dentry() is a relic from the days that overlayfs was using files with a
* "fake" path, meaning, f_path on overlayfs and f_inode on underlying fs.
* In those days, file_dentry() was needed to get the underlying fs dentry that
* matches f_inode.
* Files with "fake" path should not exist nowadays, so use an assertion to make
* sure that file_dentry() was not papering over filesystem bugs.
*/
static inline struct dentry *file_dentry(const struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
WARN_ON_ONCE(d_inode(dentry) != file_inode(file));
return dentry;
}
struct fasync_struct {
rwlock_t fa_lock;
int magic;
int fa_fd;
struct fasync_struct *fa_next; /* singly linked list */
struct file *fa_file;
struct rcu_head fa_rcu;
};
#define FASYNC_MAGIC 0x4601
/* SMP safe fasync helpers: */
extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
extern int fasync_remove_entry(struct file *, struct fasync_struct **);
extern struct fasync_struct *fasync_alloc(void);
extern void fasync_free(struct fasync_struct *);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct file *file);
/*
* sb->s_flags. Note that these mirror the equivalent MS_* flags where
* represented in both.
*/
#define SB_RDONLY BIT(0) /* Mount read-only */
#define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */
#define SB_NODEV BIT(2) /* Disallow access to device special files */
#define SB_NOEXEC BIT(3) /* Disallow program execution */
#define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */
#define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */
#define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */
#define SB_NOATIME BIT(10) /* Do not update access times. */
#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
#define SB_SILENT BIT(15)
#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
#define SB_I_VERSION BIT(23) /* Update inode I_version field */
#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
#define SB_DEAD BIT(21)
#define SB_DYING BIT(24)
#define SB_FORCE BIT(27)
#define SB_NOSEC BIT(28)
#define SB_BORN BIT(29)
#define SB_ACTIVE BIT(30)
#define SB_NOUSER BIT(31)
/* These flags relate to encoding and casefolding */
#define SB_ENC_STRICT_MODE_FL (1 << 0)
#define SB_ENC_NO_COMPAT_FALLBACK_FL (1 << 1)
#define sb_has_strict_encoding(sb) \
(sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
#if IS_ENABLED(CONFIG_UNICODE)
#define sb_no_casefold_compat_fallback(sb) \
(sb->s_encoding_flags & SB_ENC_NO_COMPAT_FALLBACK_FL)
#else
#define sb_no_casefold_compat_fallback(sb) (1)
#endif
/*
* Umount options
*/
#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
#define MNT_DETACH 0x00000002 /* Just detach from the tree */
#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
/* sb->s_iflags */
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
#define SB_I_ALLOW_HSM 0x00004000 /* Allow HSM events on this superblock */
/* Possible states of 'frozen' field */
enum {
SB_UNFROZEN = 0, /* FS is unfrozen */
SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
* internal threads if needed) */
SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
};
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
unsigned short frozen; /* Is sb frozen? */
int freeze_kcount; /* How many kernel freeze requests? */
int freeze_ucount; /* How many userspace freeze requests? */
const void *freeze_owner; /* Owner of the freeze */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
struct mount;
struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
unsigned char s_blocksize_bits;
unsigned long s_blocksize;
loff_t s_maxbytes; /* Max file size */
struct file_system_type *s_type;
const struct super_operations *s_op;
const struct dquot_operations *dq_op;
const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
unsigned long s_iflags; /* internal SB_I_* flags */
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
int s_count;
atomic_t s_active;
#ifdef CONFIG_SECURITY
void *s_security;
#endif
const struct xattr_handler * const *s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
#endif
#ifdef CONFIG_FS_VERITY
const struct fsverity_operations *s_vop;
#endif
#if IS_ENABLED(CONFIG_UNICODE)
struct unicode_map *s_encoding;
__u16 s_encoding_flags;
#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct mount *s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
struct file *s_bdev_file;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
unsigned int s_quota_types; /* Bitmask of supported quota types */
struct quota_info s_dquot; /* Diskquota specific options */
struct sb_writers s_writers;
/*
* Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
* s_fsnotify_info together for cache efficiency. They are frequently
* accessed and rarely modified.
*/
void *s_fs_info; /* Filesystem private info */
/* Granularity of c/m/atime in ns (cannot be worse than a second) */
u32 s_time_gran;
/* Time limits for c/m/atime in seconds */
time64_t s_time_min;
time64_t s_time_max;
#ifdef CONFIG_FSNOTIFY
u32 s_fsnotify_mask;
struct fsnotify_sb_info *s_fsnotify_info;
#endif
/*
* q: why are s_id and s_sysfs_name not the same? both are human
* readable strings that identify the filesystem
* a: s_id is allowed to change at runtime; it's used in log messages,
* and we want to when a device starts out as single device (s_id is dev
* name) but then a device is hot added and we have to switch to
* identifying it by UUID
* but s_sysfs_name is a handle for programmatic access, and can't
* change at runtime
*/
char s_id[32]; /* Informational name */
uuid_t s_uuid; /* UUID */
u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */
/* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */
char s_sysfs_name[UUID_STRING_LEN + 1];
unsigned int s_max_links;
unsigned int s_d_flags; /* default d_flags for dentries */
/*
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
*/
struct mutex s_vfs_rename_mutex; /* Kludge */
/*
* Filesystem subtype. If non-empty the filesystem type field
* in /proc/mounts will be "type.subtype"
*/
const char *s_subtype;
const struct dentry_operations *__s_d_op; /* default d_op for dentries */
struct shrinker *s_shrink; /* per-sb shrinker handle */
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
/* Read-only state of the superblock is being changed */
int s_readonly_remount;
/* per-sb errseq_t for reporting writeback errors via syncfs */
errseq_t s_wb_err;
/* AIO completions deferred from interrupt context */
struct workqueue_struct *s_dio_done_wq;
struct hlist_head s_pins;
/*
* Owning user namespace and default context in which to
* interpret filesystem uids, gids, quotas, device nodes,
* xattrs and security labels.
*/
struct user_namespace *s_user_ns;
/*
* The list_lru structure is essentially just a pointer to a table
* of per-node lru lists, each of which has its own spinlock.
* There is no need to put them into separate cachelines.
*/
struct list_lru s_dentry_lru;
struct list_lru s_inode_lru;
struct rcu_head rcu;
struct work_struct destroy_work;
struct mutex s_sync_lock; /* sync serialisation lock */
/*
* Indicates how deep in a filesystem stack this SB is
*/
int s_stack_depth;
/* s_inode_list_lock protects s_inodes */
spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
struct list_head s_inodes; /* all inodes */
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
} __randomize_layout;
static inline struct user_namespace *i_user_ns(const struct inode *inode)
{
return inode->i_sb->s_user_ns;
}
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
* instead deal with the raw numeric values that are stored
* in the filesystem.
*/
static inline uid_t i_uid_read(const struct inode *inode)
{
return from_kuid(i_user_ns(inode), inode->i_uid);
}
static inline gid_t i_gid_read(const struct inode *inode)
{
return from_kgid(i_user_ns(inode), inode->i_gid);
}
static inline void i_uid_write(struct inode *inode, uid_t uid)
{
inode->i_uid = make_kuid(i_user_ns(inode), uid);
}
static inline void i_gid_write(struct inode *inode, gid_t gid)
{
inode->i_gid = make_kgid(i_user_ns(inode), gid);
}
/**
* i_uid_into_vfsuid - map an inode's i_uid down according to an idmapping
* @idmap: idmap of the mount the inode was found from
* @inode: inode to map
*
* Return: whe inode's i_uid mapped down according to @idmap.
* If the inode's i_uid has no mapping INVALID_VFSUID is returned.
*/
static inline vfsuid_t i_uid_into_vfsuid(struct mnt_idmap *idmap,
const struct inode *inode)
{
return make_vfsuid(idmap, i_user_ns(inode), inode->i_uid);
}
/**
* i_uid_needs_update - check whether inode's i_uid needs to be updated
* @idmap: idmap of the mount the inode was found from
* @attr: the new attributes of @inode
* @inode: the inode to update
*
* Check whether the $inode's i_uid field needs to be updated taking idmapped
* mounts into account if the filesystem supports it.
*
* Return: true if @inode's i_uid field needs to be updated, false if not.
*/
static inline bool i_uid_needs_update(struct mnt_idmap *idmap,
const struct iattr *attr,
const struct inode *inode)
{
return ((attr->ia_valid & ATTR_UID) &&
!vfsuid_eq(attr->ia_vfsuid,
i_uid_into_vfsuid(idmap, inode)));
}
/**
* i_uid_update - update @inode's i_uid field
* @idmap: idmap of the mount the inode was found from
* @attr: the new attributes of @inode
* @inode: the inode to update
*
* Safely update @inode's i_uid field translating the vfsuid of any idmapped
* mount into the filesystem kuid.
*/
static inline void i_uid_update(struct mnt_idmap *idmap,
const struct iattr *attr,
struct inode *inode)
{
if (attr->ia_valid & ATTR_UID)
inode->i_uid = from_vfsuid(idmap, i_user_ns(inode),
attr->ia_vfsuid);
}
/**
* i_gid_into_vfsgid - map an inode's i_gid down according to an idmapping
* @idmap: idmap of the mount the inode was found from
* @inode: inode to map
*
* Return: the inode's i_gid mapped down according to @idmap.
* If the inode's i_gid has no mapping INVALID_VFSGID is returned.
*/
static inline vfsgid_t i_gid_into_vfsgid(struct mnt_idmap *idmap,
const struct inode *inode)
{
return make_vfsgid(idmap, i_user_ns(inode), inode->i_gid);
}
/**
* i_gid_needs_update - check whether inode's i_gid needs to be updated
* @idmap: idmap of the mount the inode was found from
* @attr: the new attributes of @inode
* @inode: the inode to update
*
* Check whether the $inode's i_gid field needs to be updated taking idmapped
* mounts into account if the filesystem supports it.
*
* Return: true if @inode's i_gid field needs to be updated, false if not.
*/
static inline bool i_gid_needs_update(struct mnt_idmap *idmap,
const struct iattr *attr,
const struct inode *inode)
{
return ((attr->ia_valid & ATTR_GID) &&
!vfsgid_eq(attr->ia_vfsgid,
i_gid_into_vfsgid(idmap, inode)));
}
/**
* i_gid_update - update @inode's i_gid field
* @idmap: idmap of the mount the inode was found from
* @attr: the new attributes of @inode
* @inode: the inode to update
*
* Safely update @inode's i_gid field translating the vfsgid of any idmapped
* mount into the filesystem kgid.
*/
static inline void i_gid_update(struct mnt_idmap *idmap,
const struct iattr *attr,
struct inode *inode)
{
if (attr->ia_valid & ATTR_GID)
inode->i_gid = from_vfsgid(idmap, i_user_ns(inode),
attr->ia_vfsgid);
}
/**
* inode_fsuid_set - initialize inode's i_uid field with callers fsuid
* @inode: inode to initialize
* @idmap: idmap of the mount the inode was found from
*
* Initialize the i_uid field of @inode. If the inode was found/created via
* an idmapped mount map the caller's fsuid according to @idmap.
*/
static inline void inode_fsuid_set(struct inode *inode,
struct mnt_idmap *idmap)
{
inode->i_uid = mapped_fsuid(idmap, i_user_ns(inode));
}
/**
* inode_fsgid_set - initialize inode's i_gid field with callers fsgid
* @inode: inode to initialize
* @idmap: idmap of the mount the inode was found from
*
* Initialize the i_gid field of @inode. If the inode was found/created via
* an idmapped mount map the caller's fsgid according to @idmap.
*/
static inline void inode_fsgid_set(struct inode *inode,
struct mnt_idmap *idmap)
{
inode->i_gid = mapped_fsgid(idmap, i_user_ns(inode));
}
/**
* fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped
* @sb: the superblock we want a mapping in
* @idmap: idmap of the relevant mount
*
* Check whether the caller's fsuid and fsgid have a valid mapping in the
* s_user_ns of the superblock @sb. If the caller is on an idmapped mount map
* the caller's fsuid and fsgid according to the @idmap first.
*
* Return: true if fsuid and fsgid is mapped, false if not.
*/
static inline bool fsuidgid_has_mapping(struct super_block *sb,
struct mnt_idmap *idmap)
{
struct user_namespace *fs_userns = sb->s_user_ns;
kuid_t kuid;
kgid_t kgid;
kuid = mapped_fsuid(idmap, fs_userns);
if (!uid_valid(kuid))
return false;
kgid = mapped_fsgid(idmap, fs_userns);
if (!gid_valid(kgid))
return false;
return kuid_has_mapping(fs_userns, kuid) &&
kgid_has_mapping(fs_userns, kgid);
}
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
struct timespec64 inode_set_ctime_deleg(struct inode *inode,
struct timespec64 update);
static inline time64_t inode_get_atime_sec(const struct inode *inode)
{
return inode->i_atime_sec;
}
static inline long inode_get_atime_nsec(const struct inode *inode)
{
return inode->i_atime_nsec;
}
static inline struct timespec64 inode_get_atime(const struct inode *inode)
{
struct timespec64 ts = { .tv_sec = inode_get_atime_sec(inode),
.tv_nsec = inode_get_atime_nsec(inode) };
return ts;
}
static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
struct timespec64 ts)
{
inode->i_atime_sec = ts.tv_sec;
inode->i_atime_nsec = ts.tv_nsec;
return ts;
}
static inline struct timespec64 inode_set_atime(struct inode *inode,
time64_t sec, long nsec)
{
struct timespec64 ts = { .tv_sec = sec,
.tv_nsec = nsec };
return inode_set_atime_to_ts(inode, ts);
}
static inline time64_t inode_get_mtime_sec(const struct inode *inode)
{
return inode->i_mtime_sec;
}
static inline long inode_get_mtime_nsec(const struct inode *inode)
{
return inode->i_mtime_nsec;
}
static inline struct timespec64 inode_get_mtime(const struct inode *inode)
{
struct timespec64 ts = { .tv_sec = inode_get_mtime_sec(inode),
.tv_nsec = inode_get_mtime_nsec(inode) };
return ts;
}
static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
struct timespec64 ts)
{
inode->i_mtime_sec = ts.tv_sec;
inode->i_mtime_nsec = ts.tv_nsec;
return ts;
}
static inline struct timespec64 inode_set_mtime(struct inode *inode,
time64_t sec, long nsec)
{
struct timespec64 ts = { .tv_sec = sec,
.tv_nsec = nsec };
return inode_set_mtime_to_ts(inode, ts);
}
/*
* Multigrain timestamps
*
* Conditionally use fine-grained ctime and mtime timestamps when there
* are users actively observing them via getattr. The primary use-case
* for this is NFS clients that use the ctime to distinguish between
* different states of the file, and that are often fooled by multiple
* operations that occur in the same coarse-grained timer tick.
*/
#define I_CTIME_QUERIED ((u32)BIT(31))
static inline time64_t inode_get_ctime_sec(const struct inode *inode)
{
return inode->i_ctime_sec;
}
static inline long inode_get_ctime_nsec(const struct inode *inode)
{
return inode->i_ctime_nsec & ~I_CTIME_QUERIED;
}
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
struct timespec64 ts = { .tv_sec = inode_get_ctime_sec(inode),
.tv_nsec = inode_get_ctime_nsec(inode) };
return ts;
}
struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts);
/**
* inode_set_ctime - set the ctime in the inode
* @inode: inode in which to set the ctime
* @sec: tv_sec value to set
* @nsec: tv_nsec value to set
*
* Set the ctime in @inode to { @sec, @nsec }
*/
static inline struct timespec64 inode_set_ctime(struct inode *inode,
time64_t sec, long nsec)
{
struct timespec64 ts = { .tv_sec = sec,
.tv_nsec = nsec };
return inode_set_ctime_to_ts(inode, ts);
}
struct timespec64 simple_inode_init_ts(struct inode *inode);
/*
* Snapshotting support.
*/
/*
* These are internal functions, please use sb_start_{write,pagefault,intwrite}
* instead.
*/
static inline void __sb_end_write(struct super_block *sb, int level)
{
percpu_up_read(sb->s_writers.rw_sem + level-1);
}
static inline void __sb_start_write(struct super_block *sb, int level)
{
percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true);
}
static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
{
return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
}
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], _THIS_IP_)
/**
* __sb_write_started - check if sb freeze level is held
* @sb: the super we write to
* @level: the freeze level
*
* * > 0 - sb freeze level is held
* * 0 - sb freeze level is not held
* * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
*/
static inline int __sb_write_started(const struct super_block *sb, int level)
{
return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
}
/**
* sb_write_started - check if SB_FREEZE_WRITE is held
* @sb: the super we write to
*
* May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
*/
static inline bool sb_write_started(const struct super_block *sb)
{
return __sb_write_started(sb, SB_FREEZE_WRITE);
}
/**
* sb_write_not_started - check if SB_FREEZE_WRITE is not held
* @sb: the super we write to
*
* May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
*/
static inline bool sb_write_not_started(const struct super_block *sb)
{
return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
}
/**
* file_write_started - check if SB_FREEZE_WRITE is held
* @file: the file we write to
*
* May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
* May be false positive with !S_ISREG, because file_start_write() has
* no effect on !S_ISREG.
*/
static inline bool file_write_started(const struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
return sb_write_started(file_inode(file)->i_sb);
}
/**
* file_write_not_started - check if SB_FREEZE_WRITE is not held
* @file: the file we write to
*
* May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
* May be false positive with !S_ISREG, because file_start_write() has
* no effect on !S_ISREG.
*/
static inline bool file_write_not_started(const struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
return sb_write_not_started(file_inode(file)->i_sb);
}
/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
*
* Decrement number of writers to the filesystem. Wake up possible waiters
* wanting to freeze the filesystem.
*/
static inline void sb_end_write(struct super_block *sb)
{
__sb_end_write(sb, SB_FREEZE_WRITE);
}
/**
* sb_end_pagefault - drop write access to a superblock from a page fault
* @sb: the super we wrote to
*
* Decrement number of processes handling write page fault to the filesystem.
* Wake up possible waiters wanting to freeze the filesystem.
*/
static inline void sb_end_pagefault(struct super_block *sb)
{
__sb_end_write(sb, SB_FREEZE_PAGEFAULT);
}
/**
* sb_end_intwrite - drop write access to a superblock for internal fs purposes
* @sb: the super we wrote to
*
* Decrement fs-internal number of writers to the filesystem. Wake up possible
* waiters wanting to freeze the filesystem.
*/
static inline void sb_end_intwrite(struct super_block *sb)
{
__sb_end_write(sb, SB_FREEZE_FS);
}
/**
* sb_start_write - get write access to a superblock
* @sb: the super we write to
*
* When a process wants to write data or metadata to a file system (i.e. dirty
* a page or an inode), it should embed the operation in a sb_start_write() -
* sb_end_write() pair to get exclusion against file system freezing. This
* function increments number of writers preventing freezing. If the file
* system is already frozen, the function waits until the file system is
* thawed.
*
* Since freeze protection behaves as a lock, users have to preserve
* ordering of freeze protection and other filesystem locks. Generally,
* freeze protection should be the outermost lock. In particular, we have:
*
* sb_start_write
* -> i_rwsem (write path, truncate, directory ops, ...)
* -> s_umount (freeze_super, thaw_super)
*/
static inline void sb_start_write(struct super_block *sb)
{
__sb_start_write(sb, SB_FREEZE_WRITE);
}
static inline bool sb_start_write_trylock(struct super_block *sb)
{
return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
}
/**
* sb_start_pagefault - get write access to a superblock from a page fault
* @sb: the super we write to
*
* When a process starts handling write page fault, it should embed the
* operation into sb_start_pagefault() - sb_end_pagefault() pair to get
* exclusion against file system freezing. This is needed since the page fault
* is going to dirty a page. This function increments number of running page
* faults preventing freezing. If the file system is already frozen, the
* function waits until the file system is thawed.
*
* Since page fault freeze protection behaves as a lock, users have to preserve
* ordering of freeze protection and other filesystem locks. It is advised to
* put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
* handling code implies lock dependency:
*
* mmap_lock
* -> sb_start_pagefault
*/
static inline void sb_start_pagefault(struct super_block *sb)
{
__sb_start_write(sb, SB_FREEZE_PAGEFAULT);
}
/**
* sb_start_intwrite - get write access to a superblock for internal fs purposes
* @sb: the super we write to
*
* This is the third level of protection against filesystem freezing. It is
* free for use by a filesystem. The only requirement is that it must rank
* below sb_start_pagefault.
*
* For example filesystem can call sb_start_intwrite() when starting a
* transaction which somewhat eases handling of freezing for internal sources
* of filesystem changes (internal fs threads, discarding preallocation on file
* close, etc.).
*/
static inline void sb_start_intwrite(struct super_block *sb)
{
__sb_start_write(sb, SB_FREEZE_FS);
}
static inline bool sb_start_intwrite_trylock(struct super_block *sb)
{
return __sb_start_write_trylock(sb, SB_FREEZE_FS);
}
bool inode_owner_or_capable(struct mnt_idmap *idmap,
const struct inode *inode);
/*
* VFS helper functions..
*/
int vfs_create(struct mnt_idmap *, struct inode *,
struct dentry *, umode_t, bool);
struct dentry *vfs_mkdir(struct mnt_idmap *, struct inode *,
struct dentry *, umode_t);
int vfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
umode_t, dev_t);
int vfs_symlink(struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
int vfs_link(struct dentry *, struct mnt_idmap *, struct inode *,
struct dentry *, struct inode **);
int vfs_rmdir(struct mnt_idmap *, struct inode *, struct dentry *);
int vfs_unlink(struct mnt_idmap *, struct inode *, struct dentry *,
struct inode **);
/**
* struct renamedata - contains all information required for renaming
* @mnt_idmap: idmap of the mount in which the rename is happening.
* @old_parent: parent of source
* @old_dentry: source
* @new_parent: parent of destination
* @new_dentry: destination
* @delegated_inode: returns an inode needing a delegation break
* @flags: rename flags
*/
struct renamedata {
struct mnt_idmap *mnt_idmap;
struct dentry *old_parent;
struct dentry *old_dentry;
struct dentry *new_parent;
struct dentry *new_dentry;
struct inode **delegated_inode;
unsigned int flags;
} __randomize_layout;
int vfs_rename(struct renamedata *);
static inline int vfs_whiteout(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry)
{
return vfs_mknod(idmap, dir, dentry, S_IFCHR | WHITEOUT_MODE,
WHITEOUT_DEV);
}
struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
const struct path *parentpath,
umode_t mode, int open_flag,
const struct cred *cred);
struct file *kernel_file_open(const struct path *path, int flags,
const struct cred *cred);
int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
void *);
int vfs_fchown(struct file *file, uid_t user, gid_t group);
int vfs_fchmod(struct file *file, umode_t mode);
int vfs_utimes(const struct path *path, struct timespec64 *times);
#ifdef CONFIG_COMPAT
extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
#else
#define compat_ptr_ioctl NULL
#endif
/*
* VFS file helper functions.
*/
void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
umode_t mode_strip_sgid(struct mnt_idmap *idmap,
const struct inode *dir, umode_t mode);
bool in_group_or_capable(struct mnt_idmap *idmap,
const struct inode *inode, vfsgid_t vfsgid);
/*
* This is the "filldir" function type, used by readdir() to let
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
* Return 'true' to keep going and 'false' if there are no more entries.
*/
struct dir_context;
typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
filldir_t actor;
loff_t pos;
/*
* Filesystems MUST NOT MODIFY count, but may use as a hint:
* 0 unknown
* > 0 space in buffer (assume at least one entry)
* INT_MAX unlimited
*/
int count;
};
/* If OR-ed with d_type, pending signals are not checked */
#define FILLDIR_FLAG_NOINTR 0x1000
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
*
* NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
* NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
* NOMMU_MAP_READ: Can be mapped for reading
* NOMMU_MAP_WRITE: Can be mapped for writing
* NOMMU_MAP_EXEC: Can be mapped for execution
*/
#define NOMMU_MAP_COPY 0x00000001
#define NOMMU_MAP_DIRECT 0x00000008
#define NOMMU_MAP_READ VM_MAYREAD
#define NOMMU_MAP_WRITE VM_MAYWRITE
#define NOMMU_MAP_EXEC VM_MAYEXEC
#define NOMMU_VMFLAGS \
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
/*
* These flags control the behavior of the remap_file_range function pointer.
* If it is called with len == 0 that means "remap to end of source file".
* See Documentation/filesystems/vfs.rst for more details about this call.
*
* REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
* REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
*/
#define REMAP_FILE_DEDUP (1 << 0)
#define REMAP_FILE_CAN_SHORTEN (1 << 1)
/*
* These flags signal that the caller is ok with altering various aspects of
* the behavior of the remap operation. The changes must be made by the
* implementation; the vfs remap helper functions can take advantage of them.
* Flags in this category exist to preserve the quirky behavior of the hoisted
* btrfs clone/dedupe ioctls.
*/
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
/*
* These flags control the behavior of vfs_copy_file_range().
* They are not available to the user via syscall.
*
* COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
*/
#define COPY_FILE_SPLICE (1 << 0)
struct iov_iter;
struct io_uring_cmd;
struct offset_ctx;
typedef unsigned int __bitwise fop_flags_t;
struct file_operations {
struct module *owner;
fop_flags_t fop_flags;
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
unsigned int flags);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
void (*splice_eof)(struct file *file);
int (*setlease)(struct file *, int, struct file_lease **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
#ifndef CONFIG_MMU
unsigned (*mmap_capabilities)(struct file *);
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
loff_t, size_t, unsigned int);
loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
unsigned int poll_flags);
int (*mmap_prepare)(struct vm_area_desc *);
} __randomize_layout;
/* Supports async buffered reads */
#define FOP_BUFFER_RASYNC ((__force fop_flags_t)(1 << 0))
/* Supports async buffered writes */
#define FOP_BUFFER_WASYNC ((__force fop_flags_t)(1 << 1))
/* Supports synchronous page faults for mappings */
#define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2))
/* Supports non-exclusive O_DIRECT writes from multiple threads */
#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
/* Contains huge pages */
#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
/* Treat loff_t as unsigned (e.g., /dev/mem) */
#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
/* Supports asynchronous lock callbacks */
#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
/* File system supports uncached read/write buffered IO */
#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
int (*) (struct file *, struct dir_context *));
#define WRAP_DIR_ITER(x) \
static int shared_##x(struct file *file , struct dir_context *ctx) \
{ return wrap_directory_iterator(file, ctx, x); }
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
int (*permission) (struct mnt_idmap *, struct inode *, int);
struct posix_acl * (*get_inode_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
int (*create) (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,
const char *);
struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,
struct dentry *, umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t,dev_t);
int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
int (*setattr) (struct mnt_idmap *, struct dentry *, struct iattr *);
int (*getattr) (struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
int (*update_time)(struct inode *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
int (*tmpfile) (struct mnt_idmap *, struct inode *,
struct file *, umode_t);
struct posix_acl *(*get_acl)(struct mnt_idmap *, struct dentry *,
int);
int (*set_acl)(struct mnt_idmap *, struct dentry *,
struct posix_acl *, int);
int (*fileattr_set)(struct mnt_idmap *idmap,
struct dentry *dentry, struct file_kattr *fa);
int (*fileattr_get)(struct dentry *dentry, struct file_kattr *fa);
struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
} ____cacheline_aligned;
/* Did the driver provide valid mmap hook configuration? */
static inline bool can_mmap_file(struct file *file)
{
bool has_mmap = file->f_op->mmap;
bool has_mmap_prepare = file->f_op->mmap_prepare;
/* Hooks are mutually exclusive. */
if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
return false;
if (!has_mmap && !has_mmap_prepare)
return false;
return true;
}
int __compat_vma_mmap_prepare(const struct file_operations *f_op,
struct file *file, struct vm_area_struct *vma);
int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma);
static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
{
if (file->f_op->mmap_prepare)
return compat_vma_mmap_prepare(file, vma);
return file->f_op->mmap(file, vma);
}
static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
{
return file->f_op->mmap_prepare(desc);
}
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write);
int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *len, unsigned int remap_flags,
const struct iomap_ops *dax_read_ops);
int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *count, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
/**
* enum freeze_holder - holder of the freeze
* @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
* @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
* @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
* @FREEZE_EXCL: a freeze that can only be undone by the owner
*
* Indicate who the owner of the freeze or thaw request is and whether
* the freeze needs to be exclusive or can nest.
* Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
* same holder aren't allowed. It is however allowed to hold a single
* @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
* the same time. This is relied upon by some filesystems during online
* repair or similar.
*/
enum freeze_holder {
FREEZE_HOLDER_KERNEL = (1U << 0),
FREEZE_HOLDER_USERSPACE = (1U << 1),
FREEZE_MAY_NEST = (1U << 2),
FREEZE_EXCL = (1U << 3),
};
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
void (*free_inode)(struct inode *);
void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_super) (struct super_block *, enum freeze_holder who, const void *owner);
int (*freeze_fs) (struct super_block *);
int (*thaw_super) (struct super_block *, enum freeze_holder who, const void *owner);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin) (struct super_block *);
int (*show_options)(struct seq_file *, struct dentry *);
int (*show_devname)(struct seq_file *, struct dentry *);
int (*show_path)(struct seq_file *, struct dentry *);
int (*show_stats)(struct seq_file *, struct dentry *);
#ifdef CONFIG_QUOTA
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
struct dquot __rcu **(*get_dquots)(struct inode *);
#endif
long (*nr_cached_objects)(struct super_block *,
struct shrink_control *);
long (*free_cached_objects)(struct super_block *,
struct shrink_control *);
/*
* If a filesystem can support graceful removal of a device and
* continue read-write operations, implement this callback.
*
* Return 0 if the filesystem can continue read-write.
* Non-zero return value or no such callback means the fs will be shutdown
* as usual.
*/
int (*remove_bdev)(struct super_block *sb, struct block_device *bdev);
void (*shutdown)(struct super_block *sb);
};
/*
* Inode flags - they have no relation to superblock flags now
*/
#define S_SYNC (1 << 0) /* Writes are synced at once */
#define S_NOATIME (1 << 1) /* Do not update access times */
#define S_APPEND (1 << 2) /* Append-only file */
#define S_IMMUTABLE (1 << 3) /* Immutable file */
#define S_DEAD (1 << 4) /* removed, but still open directory */
#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */
#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */
#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */
#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */
#define S_PRIVATE (1 << 9) /* Inode is fs-internal */
#define S_IMA (1 << 10) /* Inode has an associated IMA struct */
#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */
#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */
#ifdef CONFIG_FS_DAX
#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */
#else
#define S_DAX 0 /* Make all the DAX code disappear */
#endif
#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
#define S_CASEFOLD (1 << 15) /* Casefolded file */
#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
#define S_ANON_INODE (1 << 19) /* Inode is an anonymous inode */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
* flags just means all the inodes inherit those flags by default. It might be
* possible to override it selectively if you really wanted to with some
* ioctl() that is not currently implemented.
*
* Exception: SB_RDONLY is always applied to the entire file system.
*
* Unfortunately, it is possible to change a filesystems flags with it mounted
* with files in use. This means that all of the inodes will not have their
* i_flags updated. Hence, i_flags no longer inherit the superblock mount
* flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \
((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK)
#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME)
#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION)
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
#ifdef CONFIG_FS_POSIX_ACL
#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
#else
#define IS_POSIXACL(inode) 0
#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
#ifdef CONFIG_SWAP
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
#else
#define IS_SWAPFILE(inode) ((void)(inode), 0U)
#endif
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD)
#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
#define IS_ANON_FILE(inode) ((inode)->i_flags & S_ANON_INODE)
static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
struct inode *inode)
{
return !vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
!vfsgid_valid(i_gid_into_vfsgid(idmap, inode));
}
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = filp->f_iocb_flags,
.ki_ioprio = get_current_ioprio(),
};
}
static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = kiocb_src->ki_flags,
.ki_ioprio = kiocb_src->ki_ioprio,
.ki_pos = kiocb_src->ki_pos,
};
}
extern void __mark_inode_dirty(struct inode *, int);
static inline void mark_inode_dirty(struct inode *inode)
{
__mark_inode_dirty(inode, I_DIRTY);
}
static inline void mark_inode_dirty_sync(struct inode *inode)
{
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
static inline int icount_read(const struct inode *inode)
{
return atomic_read(&inode->i_count);
}
/*
* Returns true if the given inode itself only has dirty timestamps (its pages
* may still be dirty) and isn't currently being allocated or freed.
* Filesystems should call this if when writing an inode when lazytime is
* enabled, they want to opportunistically write the timestamps of other inodes
* located very nearby on-disk, e.g. in the same inode block. This returns true
* if the given inode is in need of such an opportunistic update. Requires
* i_lock, or at least later re-checking under i_lock.
*/
static inline bool inode_is_dirtytime_only(struct inode *inode)
{
return (inode->i_state & (I_DIRTY_TIME | I_NEW |
I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
}
extern void inc_nlink(struct inode *inode);
extern void drop_nlink(struct inode *inode);
extern void clear_nlink(struct inode *inode);
extern void set_nlink(struct inode *inode, unsigned int nlink);
static inline void inode_inc_link_count(struct inode *inode)
{
inc_nlink(inode);
mark_inode_dirty(inode);
}
static inline void inode_dec_link_count(struct inode *inode)
{
drop_nlink(inode);
mark_inode_dirty(inode);
}
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
S_CTIME = 4,
S_VERSION = 8,
};
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
int inode_update_time(struct inode *inode, int flags);
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
touch_atime(&file->f_path);
}
extern int file_modified(struct file *file);
int kiocb_modified(struct kiocb *iocb);
int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
const char *name;
int fs_flags;
#define FS_REQUIRES_DEV 1
#define FS_BINARY_MOUNTDATA 2
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_LBS 128 /* FS supports LBS */
#define FS_POWER_FREEZE 256 /* Always freeze on suspend/hibernate */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
struct hlist_head fs_supers;
struct lock_class_key s_lock_key;
struct lock_class_key s_umount_key;
struct lock_class_key s_vfs_rename_key;
struct lock_class_key s_writers_key[SB_FREEZE_LEVELS];
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
struct lock_class_key invalidate_lock_key;
struct lock_class_key i_mutex_dir_key;
};
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
/**
* is_mgtime: is this inode using multigrain timestamps
* @inode: inode to test for multigrain timestamps
*
* Return true if the inode uses multigrain timestamps, false otherwise.
*/
static inline bool is_mgtime(const struct inode *inode)
{
return inode->i_opflags & IOP_MGTIME;
}
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
int set_anon_super_fc(struct super_block *s, struct fs_context *fc);
int get_anon_bdev(dev_t *);
void free_anon_bdev(dev_t);
struct super_block *sget_fc(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
int (*set)(struct super_block *, struct fs_context *));
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) ({ \
const struct file_operations *_fops = (fops); \
(((_fops) && try_module_get((_fops)->owner) ? (_fops) : NULL)); \
})
#define fops_put(fops) ({ \
const struct file_operations *_fops = (fops); \
if (_fops) \
module_put((_fops)->owner); \
})
/*
* This one is to be used *ONLY* from ->open() instances.
* fops must be non-NULL, pinned down *and* module dependencies
* should be sufficient to pin the caller down as well.
*/
#define replace_fops(f, fops) \
do { \
struct file *__file = (f); \
fops_put(__file->f_op); \
BUG_ON(!(__file->f_op = (fops))); \
} while(0)
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
int freeze_super(struct super_block *super, enum freeze_holder who,
const void *freeze_owner);
int thaw_super(struct super_block *super, enum freeze_holder who,
const void *freeze_owner);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
static inline void super_set_uuid(struct super_block *sb, const u8 *uuid, unsigned len)
{
if (WARN_ON(len > sizeof(sb->s_uuid)))
len = sizeof(sb->s_uuid);
sb->s_uuid_len = len;
memcpy(&sb->s_uuid, uuid, len);
}
/* set sb sysfs name based on sb->s_bdev */
static inline void super_set_sysfs_name_bdev(struct super_block *sb)
{
snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pg", sb->s_bdev);
}
/* set sb sysfs name based on sb->s_uuid */
static inline void super_set_sysfs_name_uuid(struct super_block *sb)
{
WARN_ON(sb->s_uuid_len != sizeof(sb->s_uuid));
snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pU", sb->s_uuid.b);
}
/* set sb sysfs name based on sb->s_id */
static inline void super_set_sysfs_name_id(struct super_block *sb)
{
strscpy(sb->s_sysfs_name, sb->s_id, sizeof(sb->s_sysfs_name));
}
/* try to use something standard before you use this */
__printf(2, 3)
static inline void super_set_sysfs_name_generic(struct super_block *sb, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vsnprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), fmt, args);
va_end(args);
}
extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
void iput_not_last(struct inode *);
int inode_update_timestamps(struct inode *inode, int flags);
int generic_update_time(struct inode *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
/* fs/open.c */
struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
atomic_t refcnt;
struct audit_names *aname;
const char iname[];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
return mnt_idmap(file->f_path.mnt);
}
/**
* is_idmapped_mnt - check whether a mount is mapped
* @mnt: the mount to check
*
* If @mnt has an non @nop_mnt_idmap attached to it then @mnt is mapped.
*
* Return: true if mount is mapped, false if not.
*/
static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
{
return mnt_idmap(mnt) != &nop_mnt_idmap;
}
int vfs_truncate(const struct path *, loff_t);
int do_truncate(struct mnt_idmap *, struct dentry *, loff_t start,
unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
int do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(const struct path *,
const char *, int, umode_t);
static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
const char *name, int flags, umode_t mode)
{
return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
name, flags, mode);
}
struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_open_nonotify(const struct path *path, int flags,
const struct cred *cred);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
const struct path *backing_file_user_path(const struct file *f);
/*
* When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
* stored in ->vm_file is a backing file whose f_inode is on the underlying
* filesystem. When the mapped file path and inode number are displayed to
* user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
* path and inode number to display to the user, which is the path of the fd
* that user has requested to map and the inode number that would be returned
* by fstat() on that same fd.
*/
/* Get the path to display in /proc/<pid>/maps */
static inline const struct path *file_user_path(const struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
return backing_file_user_path(f);
return &f->f_path;
}
/* Get the inode whose inode number to display in /proc/<pid>/maps */
static inline const struct inode *file_user_inode(const struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
return d_inode(backing_file_user_path(f)->dentry);
return file_inode(f);
}
static inline struct file *file_clone_open(struct file *file)
{
return dentry_open(&file->f_path, file->f_flags, file->f_cred);
}
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int);
extern struct filename *getname_uflags(const char __user *, int);
static inline struct filename *getname(const char __user *name)
{
return getname_flags(name, 0);
}
extern struct filename *getname_kernel(const char *);
extern struct filename *__getname_maybe_null(const char __user *);
static inline struct filename *getname_maybe_null(const char __user *name, int flags)
{
if (!(flags & AT_EMPTY_PATH))
return getname(name);
if (!name)
return NULL;
return __getname_maybe_null(name);
}
extern void putname(struct filename *name);
DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
static inline struct filename *refname(struct filename *name)
{
atomic_inc(&name->refcnt);
return name;
}
extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
/* Helper for the simple case when original dentry is used */
static inline int finish_open_simple(struct file *file, int error)
{
if (error)
return error;
return finish_open(file, file->f_path.dentry, NULL);
}
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
extern struct kmem_cache *names_cachep;
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
extern struct super_block *blockdev_superblock;
static inline bool sb_is_blkdev_sb(struct super_block *sb)
{
return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
}
void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
/* fs/char_dev.c */
#define CHRDEV_MAJOR_MAX 512
/* Marks the bottom of the first segment of free char majors */
#define CHRDEV_MAJOR_DYN_END 234
/* Marks the top and bottom of the second segment of free char majors */
#define CHRDEV_MAJOR_DYN_EXT_START 511
#define CHRDEV_MAJOR_DYN_EXT_END 384
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
unsigned int count, const char *name,
const struct file_operations *fops);
extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
unsigned int count, const char *name);
extern void unregister_chrdev_region(dev_t, unsigned);
extern void chrdev_show(struct seq_file *,off_t);
static inline int register_chrdev(unsigned int major, const char *name,
const struct file_operations *fops)
{
return __register_chrdev(major, 0, 256, name, fops);
}
static inline void unregister_chrdev(unsigned int major, const char *name)
{
__unregister_chrdev(major, 0, 256, name);
}
extern void init_special_inode(struct inode *, umode_t, dev_t);
/* Invalid inode operations -- fs/bad_inode.c */
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start,
loff_t end);
static inline int file_write_and_wait(struct file *file)
{
return file_write_and_wait_range(file, 0, LLONG_MAX);
}
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
unsigned int flags);
static inline bool iocb_is_dsync(const struct kiocb *iocb)
{
return (iocb->ki_flags & IOCB_DSYNC) ||
IS_SYNC(iocb->ki_filp->f_mapping->host);
}
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
* of bytes passed in, or an error if syncing the file failed.
*/
static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
if (iocb_is_dsync(iocb)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
if (ret)
return ret;
} else if (iocb->ki_flags & IOCB_DONTCACHE) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
filemap_fdatawrite_range_kick(mapping, iocb->ki_pos - count,
iocb->ki_pos - 1);
}
return count;
}
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
extern int bmap(struct inode *inode, sector_t *block);
#else
static inline int bmap(struct inode *inode, sector_t *block)
{
return -EINVAL;
}
#endif
int notify_change(struct mnt_idmap *, struct dentry *,
struct iattr *, struct inode **);
int inode_permission(struct mnt_idmap *, struct inode *, int);
int generic_permission(struct mnt_idmap *, struct inode *, int);
static inline int file_permission(struct file *file, int mask)
{
return inode_permission(file_mnt_idmap(file),
file_inode(file), mask);
}
static inline int path_permission(const struct path *path, int mask)
{
return inode_permission(mnt_idmap(path->mnt),
d_inode(path->dentry), mask);
}
int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode);
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
}
static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
{
return (inode->i_mode ^ mode) & S_IFMT;
}
/**
* file_start_write - get write access to a superblock for regular file io
* @file: the file we want to write to
*
* This is a variant of sb_start_write() which is a noop on non-regualr file.
* Should be matched with a call to file_end_write().
*/
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
sb_start_write(file_inode(file)->i_sb);
}
static inline bool file_start_write_trylock(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
return sb_start_write_trylock(file_inode(file)->i_sb);
}
/**
* file_end_write - drop write access to a superblock of a regular file
* @file: the file we wrote to
*
* Should be matched with a call to file_start_write().
*/
static inline void file_end_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
sb_end_write(file_inode(file)->i_sb);
}
/**
* kiocb_start_write - get write access to a superblock for async file io
* @iocb: the io context we want to submit the write with
*
* This is a variant of sb_start_write() for async io submission.
* Should be matched with a call to kiocb_end_write().
*/
static inline void kiocb_start_write(struct kiocb *iocb)
{
struct inode *inode = file_inode(iocb->ki_filp);
sb_start_write(inode->i_sb);
/*
* Fool lockdep by telling it the lock got released so that it
* doesn't complain about the held lock when we return to userspace.
*/
__sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
}
/**
* kiocb_end_write - drop write access to a superblock after async file io
* @iocb: the io context we sumbitted the write with
*
* Should be matched with a call to kiocb_start_write().
*/
static inline void kiocb_end_write(struct kiocb *iocb)
{
struct inode *inode = file_inode(iocb->ki_filp);
/*
* Tell lockdep we inherited freeze protection from submission thread.
*/
__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
sb_end_write(inode->i_sb);
}
/*
* This is used for regular files where some users -- especially the
* currently executed binary in a process, previously handled via
* VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
* read-write shared) accesses.
*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
* deny_write_access() denies write access to a file.
* allow_write_access() re-enables write access to a file.
*
* The i_writecount field of an inode can have the following values:
* 0: no write access, no denied write access
* < 0: (-i_writecount) users that denied write access to the file.
* > 0: (i_writecount) users that have write access to the file.
*
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
* except for the cases where we don't hold i_writecount yet. Then we need to
* use {get,deny}_write_access() - these functions check the sign and refuse
* to do the change if sign is wrong.
*/
static inline int get_write_access(struct inode *inode)
{
return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY;
}
static inline int deny_write_access(struct file *file)
{
struct inode *inode = file_inode(file);
return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
}
static inline void put_write_access(struct inode * inode)
{
atomic_dec(&inode->i_writecount);
}
static inline void allow_write_access(struct file *file)
{
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
/*
* Do not prevent write to executable file when watched by pre-content events.
*
* Note that FMODE_FSNOTIFY_HSM mode is set depending on pre-content watches at
* the time of file open and remains constant for entire lifetime of the file,
* so if pre-content watches are added post execution or removed before the end
* of the execution, it will not cause i_writecount reference leak.
*/
static inline int exe_file_deny_write_access(struct file *exe_file)
{
if (unlikely(FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
return 0;
return deny_write_access(exe_file);
}
static inline void exe_file_allow_write_access(struct file *exe_file)
{
if (unlikely(!exe_file || FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
return;
allow_write_access(exe_file);
}
static inline void file_set_fsnotify_mode(struct file *file, fmode_t mode)
{
file->f_mode &= ~FMODE_FSNOTIFY_MASK;
file->f_mode |= mode;
}
static inline bool inode_is_open_for_write(const struct inode *inode)
{
return atomic_read(&inode->i_writecount) > 0;
}
#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
static inline void i_readcount_dec(struct inode *inode)
{
BUG_ON(atomic_dec_return(&inode->i_readcount) < 0);
}
static inline void i_readcount_inc(struct inode *inode)
{
atomic_inc(&inode->i_readcount);
}
#else
static inline void i_readcount_dec(struct inode *inode)
{
return;
}
static inline void i_readcount_inc(struct inode *inode)
{
return;
}
#endif
extern int do_pipe_flags(int *, int);
extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
extern bool is_subdir(struct dentry *, struct dentry *);
extern bool path_is_under(const struct path *, const struct path *);
extern char *file_path(struct file *, char *, int);
/**
* is_dot_dotdot - returns true only if @name is "." or ".."
* @name: file name to check
* @len: length of file name, in bytes
*/
static inline bool is_dot_dotdot(const char *name, size_t len)
{
return len && unlikely(name[0] == '.') &&
(len == 1 || (len == 2 && name[1] == '.'));
}
/**
* name_contains_dotdot - check if a file name contains ".." path components
* @name: File path string to check
* Search for ".." surrounded by either '/' or start/end of string.
*/
static inline bool name_contains_dotdot(const char *name)
{
size_t name_len;
name_len = strlen(name);
return strcmp(name, "..") == 0 ||
strncmp(name, "../", 3) == 0 ||
strstr(name, "/../") != NULL ||
(name_len >= 3 && strcmp(name + name_len - 3, "/..") == 0);
}
#include <linux/err.h>
/* needed for stackable file system support */
extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
extern int inode_init_always_gfp(struct super_block *, struct inode *, gfp_t);
static inline int inode_init_always(struct super_block *sb, struct inode *inode)
{
return inode_init_always_gfp(sb, inode, GFP_NOFS);
}
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
extern int inode_just_drop(struct inode *inode);
static inline int inode_generic_drop(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
extern void d_mark_dontcache(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
void *data);
extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *),
void *data);
struct inode *iget5_locked(struct super_block *, unsigned long,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *), void *);
struct inode *iget5_locked_rcu(struct super_block *, unsigned long,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
unsigned long,
int (*match)(struct inode *,
unsigned long, void *),
void *data);
extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
int (*)(struct inode *, void *), void *);
extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
#else
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
void dump_mapping(const struct address_space *);
/*
* Userspace may rely on the inode number being non-zero. For example, glibc
* simply ignores files with zero i_ino in unlink() and other places.
*
* As an additional complication, if userspace was compiled with
* _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the
* lower 32 bits, so we need to check that those aren't zero explicitly. With
* _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but
* better safe than sorry.
*/
static inline bool is_zero_ino(ino_t ino)
{
return (u32)ino == 0;
}
/*
* inode->i_lock must be held
*/
static inline void __iget(struct inode *inode)
{
atomic_inc(&inode->i_count);
}
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
struct inode *alloc_inode(struct super_block *sb);
static inline struct inode *new_inode_pseudo(struct super_block *sb)
{
return alloc_inode(sb);
}
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
extern int file_remove_privs(struct file *);
int setattr_should_drop_sgid(struct mnt_idmap *idmap,
const struct inode *inode);
/*
* This must be used for allocating filesystems specific inodes to set
* up the inode reclaim context correctly.
*/
#define alloc_inode_sb(_sb, _cache, _gfp) kmem_cache_alloc_lru(_cache, &_sb->s_inode_lru, _gfp)
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
{
__insert_inode_hash(inode, inode->i_ino);
}
extern void __remove_inode_hash(struct inode *);
static inline void remove_inode_hash(struct inode *inode)
{
if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
__remove_inode_hash(inode);
}
extern void inode_sb_list_add(struct inode *inode);
extern void inode_add_lru(struct inode *inode);
int sb_set_blocksize(struct super_block *sb, int size);
int __must_check sb_min_blocksize(struct super_block *sb, int size);
int generic_file_mmap(struct file *, struct vm_area_struct *);
int generic_file_mmap_prepare(struct vm_area_desc *desc);
int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
extern int generic_write_check_limits(struct file *file, loff_t pos,
loff_t *count);
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
ssize_t direct_written, ssize_t buffered_written);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
/* fs/splice.c */
ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
ssize_t copy_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
int whence, loff_t maxsize, loff_t eof);
loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
u64 *cookie);
extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
int rw_verify_area(int, struct file *, const loff_t *, size_t);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
extern int stream_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
loff_t file_offset);
enum {
/* need locking between buffered and direct access */
DIO_LOCKING = 0x01,
/* filesystem does not support filling holes */
DIO_SKIP_HOLES = 0x02,
};
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
dio_iodone_t end_io,
int flags);
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
struct inode *inode,
struct iov_iter *iter,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
get_block, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
bool inode_dio_finished(const struct inode *inode);
void inode_dio_wait(struct inode *inode);
void inode_dio_wait_interruptible(struct inode *inode);
/**
* inode_dio_begin - signal start of a direct I/O requests
* @inode: inode the direct I/O happens on
*
* This is called once we've finished processing a direct I/O request,
* and is used to wake up callers waiting for direct I/O to be quiesced.
*/
static inline void inode_dio_begin(struct inode *inode)
{
atomic_inc(&inode->i_dio_count);
}
/**
* inode_dio_end - signal finish of a direct I/O requests
* @inode: inode the direct I/O happens on
*
* This is called once we've finished processing a direct I/O request,
* and is used to wake up callers waiting for direct I/O to be quiesced.
*/
static inline void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
wake_up_var(&inode->i_dio_count);
}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
extern int readlink_copy(char __user *, int, const char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
extern const char *page_get_link_raw(struct dentry *, struct inode *,
struct delayed_call *);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
void generic_fill_statx_atomic_writes(struct kstat *stat,
unsigned int unit_min,
unsigned int unit_max,
unsigned int unit_max_opt);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
static inline loff_t __inode_get_bytes(struct inode *inode)
{
return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
}
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
const char *simple_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern const struct inode_operations simple_symlink_inode_operations;
extern int iterate_dir(struct file *, struct dir_context *);
int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
int flags);
int vfs_fstat(int fd, struct kstat *stat);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
return vfs_fstatat(AT_FDCWD, filename, stat, 0);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
}
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
void filesystems_freeze(bool freeze_all);
void filesystems_thaw(void);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
extern int simple_setattr(struct mnt_idmap *, struct dentry *,
struct iattr *);
extern int simple_getattr(struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename(struct mnt_idmap *, struct inode *,
struct dentry *, struct inode *, struct dentry *,
unsigned int);
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern void locked_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
extern int simple_write_begin(const struct kiocb *iocb,
struct address_space *mapping,
loff_t pos, unsigned len,
struct folio **foliop, void **fsdata);
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode);
extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
extern void make_empty_dir_inode(struct inode *inode);
extern bool is_empty_dir_inode(struct inode *inode);
struct tree_descr { const char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
extern int simple_fill_super(struct super_block *, unsigned long,
const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
struct dentry *simple_start_creating(struct dentry *, const char *);
extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
loff_t *ppos, const void *from, size_t available);
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count);
struct offset_ctx {
struct maple_tree mt;
unsigned long next_offset;
};
void simple_offset_init(struct offset_ctx *octx);
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int simple_offset_rename_exchange(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry);
void simple_offset_destroy(struct offset_ctx *octx);
extern const struct file_operations simple_offset_dir_operations;
extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
extern void generic_set_sb_d_ops(struct super_block *sb);
extern int generic_ci_match(const struct inode *parent,
const struct qstr *name,
const struct qstr *folded_name,
const u8 *de_name, u32 de_name_len);
#if IS_ENABLED(CONFIG_UNICODE)
int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str);
int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name);
/**
* generic_ci_validate_strict_name - Check if a given name is suitable
* for a directory
*
* This functions checks if the proposed filename is valid for the
* parent directory. That means that only valid UTF-8 filenames will be
* accepted for casefold directories from filesystems created with the
* strict encoding flag. That also means that any name will be
* accepted for directories that doesn't have casefold enabled, or
* aren't being strict with the encoding.
*
* @dir: inode of the directory where the new file will be created
* @name: name of the new file
*
* Return:
* * True: if the filename is suitable for this directory. It can be
* true if a given name is not suitable for a strict encoding
* directory, but the directory being used isn't strict
* * False if the filename isn't suitable for this directory. This only
* happens when a directory is casefolded and the filesystem is strict
* about its encoding.
*/
static inline bool generic_ci_validate_strict_name(struct inode *dir,
const struct qstr *name)
{
if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb))
return true;
/*
* A casefold dir must have a encoding set, unless the filesystem
* is corrupted
*/
if (WARN_ON_ONCE(!dir->i_sb->s_encoding))
return true;
return !utf8_validate(dir->i_sb->s_encoding, name);
}
#else
static inline bool generic_ci_validate_strict_name(struct inode *dir,
const struct qstr *name)
{
return true;
}
#endif
static inline struct unicode_map *sb_encoding(const struct super_block *sb)
{
#if IS_ENABLED(CONFIG_UNICODE)
return sb->s_encoding;
#else
return NULL;
#endif
}
static inline bool sb_has_encoding(const struct super_block *sb)
{
return !!sb_encoding(sb);
}
/*
* Compare if two super blocks have the same encoding and flags
*/
static inline bool sb_same_encoding(const struct super_block *sb1,
const struct super_block *sb2)
{
#if IS_ENABLED(CONFIG_UNICODE)
if (sb1->s_encoding == sb2->s_encoding)
return true;
return (sb1->s_encoding && sb2->s_encoding &&
(sb1->s_encoding->version == sb2->s_encoding->version) &&
(sb1->s_encoding_flags == sb2->s_encoding_flags));
#else
return true;
#endif
}
int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
unsigned int ia_valid);
int setattr_prepare(struct mnt_idmap *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
void setattr_copy(struct mnt_idmap *, struct inode *inode,
const struct iattr *attr);
extern int file_update_time(struct file *file);
static inline bool file_is_dax(const struct file *file)
{
return file && IS_DAX(file->f_mapping->host);
}
static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
return file_is_dax(vma->vm_file);
}
static inline bool vma_is_fsdax(struct vm_area_struct *vma)
{
struct inode *inode;
if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
return false;
if (!vma_is_dax(vma))
return false;
inode = file_inode(vma->vm_file);
if (S_ISCHR(inode->i_mode))
return false; /* device-dax */
return true;
}
static inline int iocb_flags(struct file *file)
{
int res = 0;
if (file->f_flags & O_APPEND)
res |= IOCB_APPEND;
if (file->f_flags & O_DIRECT)
res |= IOCB_DIRECT;
if (file->f_flags & O_DSYNC)
res |= IOCB_DSYNC;
if (file->f_flags & __O_SYNC)
res |= IOCB_SYNC;
return res;
}
static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
int rw_type)
{
int kiocb_flags = 0;
/* make sure there's no overlap between RWF and private IOCB flags */
BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD);
if (!flags)
return 0;
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
if (unlikely((flags & RWF_APPEND) && (flags & RWF_NOAPPEND)))
return -EINVAL;
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
}
if (flags & RWF_ATOMIC) {
if (rw_type != WRITE)
return -EOPNOTSUPP;
if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE))
return -EOPNOTSUPP;
}
if (flags & RWF_DONTCACHE) {
/* file system must support it */
if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE))
return -EOPNOTSUPP;
/* DAX mappings not supported */
if (IS_DAX(ki->ki_filp->f_mapping->host))
return -EOPNOTSUPP;
}
kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
kiocb_flags |= IOCB_DSYNC;
if ((flags & RWF_NOAPPEND) && (ki->ki_flags & IOCB_APPEND)) {
if (IS_APPEND(file_inode(ki->ki_filp)))
return -EPERM;
ki->ki_flags &= ~IOCB_APPEND;
}
ki->ki_flags |= kiocb_flags;
return 0;
}
/* Transaction based IO helpers */
/*
* An argresp is stored in an allocated page and holds the
* size of the argument or response, along with its content
*/
struct simple_transaction_argresp {
ssize_t size;
char data[];
};
#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
char *simple_transaction_get(struct file *file, const char __user *buf,
size_t size);
ssize_t simple_transaction_read(struct file *file, char __user *buf,
size_t size, loff_t *pos);
int simple_transaction_release(struct inode *inode, struct file *file);
void simple_transaction_set(struct file *file, size_t n);
/*
* simple attribute files
*
* These attributes behave similar to those in sysfs:
*
* Writing to an attribute immediately sets a value, an open file can be
* written to multiple times.
*
* Reading from an attribute creates a buffer from the value that might get
* read with multiple read calls. When the attribute has been read
* completely, no further read calls are possible until the file is opened
* again.
*
* All attributes contain a text representation of a numeric value
* that are accessed with the get() and set() functions.
*/
#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
return simple_attr_open(inode, file, __get, __set, __fmt); \
} \
static const struct file_operations __fops = { \
.owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = simple_attr_read, \
.write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
.llseek = generic_file_llseek, \
}
#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
static inline __printf(1, 2)
void __simple_attr_check_format(const char *fmt, ...)
{
/* don't do anything, just let the compiler check the arguments; */
}
int simple_attr_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt);
int simple_attr_release(struct inode *inode, struct file *file);
ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
struct ctl_table;
int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
static inline bool is_sxid(umode_t mode)
{
return mode & (S_ISUID | S_ISGID);
}
static inline int check_sticky(struct mnt_idmap *idmap,
struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & S_ISVTX))
return 0;
return __check_sticky(idmap, dir, inode);
}
static inline void inode_has_no_xattr(struct inode *inode)
{
if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC))
inode->i_flags |= S_NOSEC;
}
static inline bool is_root_inode(struct inode *inode)
{
return inode == inode->i_sb->s_root->d_inode;
}
static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
file->f_path.dentry->d_inode->i_ino, DT_DIR);
}
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
d_parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
return false;
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (!dir_emit_dotdot(file, ctx))
return false;
ctx->pos = 2;
}
return true;
}
static inline bool dir_relax(struct inode *inode)
{
inode_unlock(inode);
inode_lock(inode);
return !IS_DEADDIR(inode);
}
static inline bool dir_relax_shared(struct inode *inode)
{
inode_unlock_shared(inode);
inode_lock_shared(inode);
return !IS_DEADDIR(inode);
}
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
/* mm/fadvise.c */
extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
static inline bool vfs_empty_path(int dfd, const char __user *path)
{
char c;
if (dfd < 0)
return false;
/* We now allow NULL to be used for empty path. */
if (!path)
return true;
if (unlikely(get_user(c, path)))
return false;
return !c;
}
int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
static inline bool extensible_ioctl_valid(unsigned int cmd_a,
unsigned int cmd_b, size_t min_size)
{
if (_IOC_DIR(cmd_a) != _IOC_DIR(cmd_b))
return false;
if (_IOC_TYPE(cmd_a) != _IOC_TYPE(cmd_b))
return false;
if (_IOC_NR(cmd_a) != _IOC_NR(cmd_b))
return false;
if (_IOC_SIZE(cmd_a) < min_size)
return false;
return true;
}
#endif /* _LINUX_FS_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SELinux NetLabel Support
*
* This file provides the necessary glue to tie NetLabel into the SELinux
* subsystem.
*
* Author: Paul Moore <paul@paul-moore.com>
*/
/*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2007, 2008
*/
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/gfp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/lsm_hooks.h>
#include <net/sock.h>
#include <net/netlabel.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include "objsec.h"
#include "security.h"
#include "netlabel.h"
/**
* selinux_netlbl_sidlookup_cached - Cache a SID lookup
* @skb: the packet
* @family: the packet's address family
* @secattr: the NetLabel security attributes
* @sid: the SID
*
* Description:
* Query the SELinux security server to lookup the correct SID for the given
* security attributes. If the query is successful, cache the result to speed
* up future lookups. Returns zero on success, negative values on failure.
*
*/
static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
u16 family,
struct netlbl_lsm_secattr *secattr,
u32 *sid)
{
int rc;
rc = security_netlbl_secattr_to_sid(secattr, sid);
if (rc == 0 &&
(secattr->flags & NETLBL_SECATTR_CACHEABLE) &&
(secattr->flags & NETLBL_SECATTR_CACHE))
netlbl_cache_add(skb, family, secattr);
return rc;
}
/**
* selinux_netlbl_sock_genattr - Generate the NetLabel socket secattr
* @sk: the socket
*
* Description:
* Generate the NetLabel security attributes for a socket, making full use of
* the socket's attribute cache. Returns a pointer to the security attributes
* on success, or an ERR_PTR on failure.
*
*/
static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
{
int rc;
struct sk_security_struct *sksec = selinux_sock(sk);
struct netlbl_lsm_secattr *secattr;
if (sksec->nlbl_secattr != NULL)
return sksec->nlbl_secattr;
secattr = netlbl_secattr_alloc(GFP_ATOMIC); if (secattr == NULL)
return ERR_PTR(-ENOMEM);
rc = security_netlbl_sid_to_secattr(sksec->sid, secattr);
if (rc != 0) {
netlbl_secattr_free(secattr);
return ERR_PTR(rc);
}
sksec->nlbl_secattr = secattr; return secattr;}
/**
* selinux_netlbl_sock_getattr - Get the cached NetLabel secattr
* @sk: the socket
* @sid: the SID
*
* Query the socket's cached secattr and if the SID matches the cached value
* return the cache, otherwise return NULL.
*
*/
static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr(
const struct sock *sk,
u32 sid)
{
struct sk_security_struct *sksec = selinux_sock(sk);
struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr;
if (secattr == NULL)
return NULL;
if ((secattr->flags & NETLBL_SECATTR_SECID) &&
(secattr->attr.secid == sid))
return secattr;
return NULL;
}
/**
* selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache
*
* Description:
* Invalidate the NetLabel security attribute mapping cache.
*
*/
void selinux_netlbl_cache_invalidate(void)
{
netlbl_cache_invalidate();
}
/**
* selinux_netlbl_err - Handle a NetLabel packet error
* @skb: the packet
* @family: the packet's address family
* @error: the error code
* @gateway: true if host is acting as a gateway, false otherwise
*
* Description:
* When a packet is dropped due to a call to avc_has_perm() pass the error
* code to the NetLabel subsystem so any protocol specific processing can be
* done. This is safe to call even if you are unsure if NetLabel labeling is
* present on the packet, NetLabel is smart enough to only act when it should.
*
*/
void selinux_netlbl_err(struct sk_buff *skb, u16 family, int error, int gateway)
{
netlbl_skbuff_err(skb, family, error, gateway);
}
/**
* selinux_netlbl_sk_security_free - Free the NetLabel fields
* @sksec: the sk_security_struct
*
* Description:
* Free all of the memory in the NetLabel fields of a sk_security_struct.
*
*/
void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec)
{
if (!sksec->nlbl_secattr)
return;
netlbl_secattr_free(sksec->nlbl_secattr);
sksec->nlbl_secattr = NULL;
sksec->nlbl_state = NLBL_UNSET;
}
/**
* selinux_netlbl_sk_security_reset - Reset the NetLabel fields
* @sksec: the sk_security_struct
*
* Description:
* Called when the NetLabel state of a sk_security_struct needs to be reset.
* The caller is responsible for all the NetLabel sk_security_struct locking.
*
*/
void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec)
{
sksec->nlbl_state = NLBL_UNSET;
}
/**
* selinux_netlbl_skbuff_getsid - Get the sid of a packet using NetLabel
* @skb: the packet
* @family: protocol family
* @type: NetLabel labeling protocol type
* @sid: the SID
*
* Description:
* Call the NetLabel mechanism to get the security attributes of the given
* packet and use those attributes to determine the correct context/SID to
* assign to the packet. Returns zero on success, negative values on failure.
*
*/
int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
u16 family,
u32 *type,
u32 *sid)
{
int rc;
struct netlbl_lsm_secattr secattr;
if (!netlbl_enabled()) {
*type = NETLBL_NLTYPE_NONE;
*sid = SECSID_NULL;
return 0;
}
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
rc = selinux_netlbl_sidlookup_cached(skb, family,
&secattr, sid);
else
*sid = SECSID_NULL;
*type = secattr.type;
netlbl_secattr_destroy(&secattr);
return rc;
}
/**
* selinux_netlbl_skbuff_setsid - Set the NetLabel on a packet given a sid
* @skb: the packet
* @family: protocol family
* @sid: the SID
*
* Description
* Call the NetLabel mechanism to set the label of a packet using @sid.
* Returns zero on success, negative values on failure.
*
*/
int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
u16 family,
u32 sid)
{
int rc;
struct netlbl_lsm_secattr secattr_storage;
struct netlbl_lsm_secattr *secattr = NULL;
struct sock *sk;
/* if this is a locally generated packet check to see if it is already
* being labeled by it's parent socket, if it is just exit */
sk = skb_to_full_sk(skb);
if (sk != NULL) {
struct sk_security_struct *sksec = selinux_sock(sk);
if (sksec->nlbl_state != NLBL_REQSKB)
return 0;
secattr = selinux_netlbl_sock_getattr(sk, sid);
}
if (secattr == NULL) {
secattr = &secattr_storage;
netlbl_secattr_init(secattr);
rc = security_netlbl_sid_to_secattr(sid, secattr);
if (rc != 0)
goto skbuff_setsid_return;
}
rc = netlbl_skbuff_setattr(skb, family, secattr);
skbuff_setsid_return:
if (secattr == &secattr_storage)
netlbl_secattr_destroy(secattr);
return rc;
}
/**
* selinux_netlbl_sctp_assoc_request - Label an incoming sctp association.
* @asoc: incoming association.
* @skb: the packet.
*
* Description:
* A new incoming connection is represented by @asoc, ......
* Returns zero on success, negative values on failure.
*
*/
int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
int rc;
struct netlbl_lsm_secattr secattr;
struct sk_security_struct *sksec = selinux_sock(asoc->base.sk);
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
if (asoc->base.sk->sk_family != PF_INET &&
asoc->base.sk->sk_family != PF_INET6)
return 0;
netlbl_secattr_init(&secattr);
rc = security_netlbl_sid_to_secattr(asoc->secid, &secattr);
if (rc != 0)
goto assoc_request_return;
/* Move skb hdr address info to a struct sockaddr and then call
* netlbl_conn_setattr().
*/
if (ip_hdr(skb)->version == 4) {
addr4.sin_family = AF_INET;
addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
rc = netlbl_conn_setattr(asoc->base.sk, (void *)&addr4, &secattr);
} else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
addr6.sin6_family = AF_INET6;
addr6.sin6_addr = ipv6_hdr(skb)->saddr;
rc = netlbl_conn_setattr(asoc->base.sk, (void *)&addr6, &secattr);
} else {
rc = -EAFNOSUPPORT;
}
if (rc == 0)
sksec->nlbl_state = NLBL_LABELED;
assoc_request_return:
netlbl_secattr_destroy(&secattr);
return rc;
}
/**
* selinux_netlbl_inet_conn_request - Label an incoming stream connection
* @req: incoming connection request socket
* @family: the request socket's address family
*
* Description:
* A new incoming connection request is represented by @req, we need to label
* the new request_sock here and the stack will ensure the on-the-wire label
* will get preserved when a full sock is created once the connection handshake
* is complete. Returns zero on success, negative values on failure.
*
*/
int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family)
{
int rc;
struct netlbl_lsm_secattr secattr;
if (family != PF_INET && family != PF_INET6)
return 0;
netlbl_secattr_init(&secattr);
rc = security_netlbl_sid_to_secattr(req->secid, &secattr);
if (rc != 0)
goto inet_conn_request_return;
rc = netlbl_req_setattr(req, &secattr);
inet_conn_request_return:
netlbl_secattr_destroy(&secattr);
return rc;
}
/**
* selinux_netlbl_inet_csk_clone - Initialize the newly created sock
* @sk: the new sock
* @family: the sock's address family
*
* Description:
* A new connection has been established using @sk, we've already labeled the
* socket via the request_sock struct in selinux_netlbl_inet_conn_request() but
* we need to set the NetLabel state here since we now have a sock structure.
*
*/
void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
{
struct sk_security_struct *sksec = selinux_sock(sk);
if (family == PF_INET || family == PF_INET6)
sksec->nlbl_state = NLBL_LABELED;
else
sksec->nlbl_state = NLBL_UNSET;
}
/**
* selinux_netlbl_sctp_sk_clone - Copy state to the newly created sock
* @sk: current sock
* @newsk: the new sock
*
* Description:
* Called whenever a new socket is created by accept(2) or sctp_peeloff(3).
*/
void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk)
{
struct sk_security_struct *sksec = selinux_sock(sk);
struct sk_security_struct *newsksec = selinux_sock(newsk);
newsksec->nlbl_state = sksec->nlbl_state;
}
/**
* selinux_netlbl_socket_post_create - Label a socket using NetLabel
* @sk: the sock to label
* @family: protocol family
*
* Description:
* Attempt to label a socket using the NetLabel mechanism using the given
* SID. Returns zero values on success, negative values on failure.
*
*/
int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
{
int rc;
struct sk_security_struct *sksec = selinux_sock(sk);
struct netlbl_lsm_secattr *secattr;
if (family != PF_INET && family != PF_INET6) return 0; secattr = selinux_netlbl_sock_genattr(sk);
if (IS_ERR(secattr))
return PTR_ERR(secattr);
/* On socket creation, replacement of IP options is safe even if
* the caller does not hold the socket lock.
*/
rc = netlbl_sock_setattr(sk, family, secattr, true);
switch (rc) {
case 0:
sksec->nlbl_state = NLBL_LABELED; break;
case -EDESTADDRREQ:
sksec->nlbl_state = NLBL_REQSKB;
rc = 0;
break;
}
return rc;
}
/**
* selinux_netlbl_sock_rcv_skb - Do an inbound access check using NetLabel
* @sksec: the sock's sk_security_struct
* @skb: the packet
* @family: protocol family
* @ad: the audit data
*
* Description:
* Fetch the NetLabel security attributes from @skb and perform an access check
* against the receiving socket. Returns zero on success, negative values on
* error.
*
*/
int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
struct sk_buff *skb,
u16 family,
struct common_audit_data *ad)
{
int rc;
u32 nlbl_sid;
u32 perm;
struct netlbl_lsm_secattr secattr;
if (!netlbl_enabled())
return 0;
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
rc = selinux_netlbl_sidlookup_cached(skb, family,
&secattr, &nlbl_sid);
else
nlbl_sid = SECINITSID_UNLABELED;
netlbl_secattr_destroy(&secattr);
if (rc != 0)
return rc;
switch (sksec->sclass) {
case SECCLASS_UDP_SOCKET:
perm = UDP_SOCKET__RECVFROM;
break;
case SECCLASS_TCP_SOCKET:
perm = TCP_SOCKET__RECVFROM;
break;
default:
perm = RAWIP_SOCKET__RECVFROM;
}
rc = avc_has_perm(sksec->sid, nlbl_sid, sksec->sclass, perm, ad);
if (rc == 0)
return 0;
if (nlbl_sid != SECINITSID_UNLABELED)
netlbl_skbuff_err(skb, family, rc, 0);
return rc;
}
/**
* selinux_netlbl_option - Is this a NetLabel option
* @level: the socket level or protocol
* @optname: the socket option name
*
* Description:
* Returns true if @level and @optname refer to a NetLabel option.
* Helper for selinux_netlbl_socket_setsockopt().
*/
static inline int selinux_netlbl_option(int level, int optname)
{
return (level == IPPROTO_IP && optname == IP_OPTIONS) || (level == IPPROTO_IPV6 && optname == IPV6_HOPOPTS);
}
/**
* selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel
* @sock: the socket
* @level: the socket level or protocol
* @optname: the socket option name
*
* Description:
* Check the setsockopt() call and if the user is trying to replace the IP
* options on a socket and a NetLabel is in place for the socket deny the
* access; otherwise allow the access. Returns zero when the access is
* allowed, -EACCES when denied, and other negative values on error.
*
*/
int selinux_netlbl_socket_setsockopt(struct socket *sock,
int level,
int optname)
{
int rc = 0;
struct sock *sk = sock->sk;
struct sk_security_struct *sksec = selinux_sock(sk);
struct netlbl_lsm_secattr secattr;
if (selinux_netlbl_option(level, optname) && (sksec->nlbl_state == NLBL_LABELED ||
sksec->nlbl_state == NLBL_CONNLABELED)) {
netlbl_secattr_init(&secattr);
lock_sock(sk);
/* call the netlabel function directly as we want to see the
* on-the-wire label that is assigned via the socket's options
* and not the cached netlabel/lsm attributes */
rc = netlbl_sock_getattr(sk, &secattr);
release_sock(sk);
if (rc == 0) rc = -EACCES;
else if (rc == -ENOMSG)
rc = 0;
netlbl_secattr_destroy(&secattr);
}
return rc;
}
/**
* selinux_netlbl_socket_connect_helper - Help label a client-side socket on
* connect
* @sk: the socket to label
* @addr: the destination address
*
* Description:
* Attempt to label a connected socket with NetLabel using the given address.
* Returns zero values on success, negative values on failure.
*
*/
static int selinux_netlbl_socket_connect_helper(struct sock *sk,
struct sockaddr *addr)
{
int rc;
struct sk_security_struct *sksec = selinux_sock(sk);
struct netlbl_lsm_secattr *secattr;
/* connected sockets are allowed to disconnect when the address family
* is set to AF_UNSPEC, if that is what is happening we want to reset
* the socket */
if (addr->sa_family == AF_UNSPEC) {
netlbl_sock_delattr(sk);
sksec->nlbl_state = NLBL_REQSKB;
rc = 0;
return rc;
}
secattr = selinux_netlbl_sock_genattr(sk);
if (IS_ERR(secattr))
return PTR_ERR(secattr);
rc = netlbl_conn_setattr(sk, addr, secattr);
if (rc == 0)
sksec->nlbl_state = NLBL_CONNLABELED;
return rc;
}
/**
* selinux_netlbl_socket_connect_locked - Label a client-side socket on
* connect
* @sk: the socket to label
* @addr: the destination address
*
* Description:
* Attempt to label a connected socket that already has the socket locked
* with NetLabel using the given address.
* Returns zero values on success, negative values on failure.
*
*/
int selinux_netlbl_socket_connect_locked(struct sock *sk,
struct sockaddr *addr)
{
struct sk_security_struct *sksec = selinux_sock(sk);
if (sksec->nlbl_state != NLBL_REQSKB &&
sksec->nlbl_state != NLBL_CONNLABELED)
return 0;
return selinux_netlbl_socket_connect_helper(sk, addr);
}
/**
* selinux_netlbl_socket_connect - Label a client-side socket on connect
* @sk: the socket to label
* @addr: the destination address
*
* Description:
* Attempt to label a connected socket with NetLabel using the given address.
* Returns zero values on success, negative values on failure.
*
*/
int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
{
int rc;
lock_sock(sk);
rc = selinux_netlbl_socket_connect_locked(sk, addr);
release_sock(sk);
return rc;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of the multi-level security (MLS) policy.
*
* Author : Stephen Smalley, <stephen.smalley.work@gmail.com>
*/
/*
* Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
* Support for enhanced MLS infrastructure.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
*
* Updated: Hewlett-Packard <paul@paul-moore.com>
* Added support to import/export the MLS label from NetLabel
* Copyright (C) Hewlett-Packard Development Company, L.P., 2006
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <net/netlabel.h>
#include "sidtab.h"
#include "mls.h"
#include "policydb.h"
#include "services.h"
/*
* Return the length in bytes for the MLS fields of the
* security context string representation of `context'.
*/
int mls_compute_context_len(struct policydb *p, struct context *context)
{
int i, l, len, head, prev;
char *nm;
struct ebitmap *e;
struct ebitmap_node *node;
if (!p->mls_enabled)
return 0;
len = 1; /* for the beginning ":" */
for (l = 0; l < 2; l++) {
u32 index_sens = context->range.level[l].sens;
len += strlen(sym_name(p, SYM_LEVELS, index_sens - 1));
/* categories */
head = -2;
prev = -2;
e = &context->range.level[l].cat;
ebitmap_for_each_positive_bit(e, node, i)
{
if (i - prev > 1) {
/* one or more negative bits are skipped */
if (head != prev) {
nm = sym_name(p, SYM_CATS, prev);
len += strlen(nm) + 1;
}
nm = sym_name(p, SYM_CATS, i);
len += strlen(nm) + 1;
head = i;
}
prev = i;
}
if (prev != head) {
nm = sym_name(p, SYM_CATS, prev);
len += strlen(nm) + 1;
}
if (l == 0) {
if (mls_level_eq(&context->range.level[0],
&context->range.level[1]))
break;
else
len++;
}
}
return len;
}
/*
* Write the security context string representation of
* the MLS fields of `context' into the string `*scontext'.
* Update `*scontext' to point to the end of the MLS fields.
*/
void mls_sid_to_context(struct policydb *p, struct context *context,
char **scontext)
{
char *scontextp, *nm;
int i, l, head, prev;
struct ebitmap *e;
struct ebitmap_node *node;
if (!p->mls_enabled)
return;
scontextp = *scontext;
*scontextp = ':';
scontextp++;
for (l = 0; l < 2; l++) {
strcpy(scontextp, sym_name(p, SYM_LEVELS,
context->range.level[l].sens - 1));
scontextp += strlen(scontextp);
/* categories */
head = -2;
prev = -2;
e = &context->range.level[l].cat;
ebitmap_for_each_positive_bit(e, node, i)
{
if (i - prev > 1) {
/* one or more negative bits are skipped */
if (prev != head) {
if (prev - head > 1)
*scontextp++ = '.';
else
*scontextp++ = ',';
nm = sym_name(p, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
if (prev < 0)
*scontextp++ = ':';
else
*scontextp++ = ',';
nm = sym_name(p, SYM_CATS, i);
strcpy(scontextp, nm);
scontextp += strlen(nm);
head = i;
}
prev = i;
}
if (prev != head) {
if (prev - head > 1)
*scontextp++ = '.';
else
*scontextp++ = ',';
nm = sym_name(p, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
if (l == 0) {
if (mls_level_eq(&context->range.level[0],
&context->range.level[1]))
break;
else
*scontextp++ = '-';
}
}
*scontext = scontextp;
}
int mls_level_isvalid(struct policydb *p, struct mls_level *l)
{
struct level_datum *levdatum;
if (!l->sens || l->sens > p->p_levels.nprim) return 0;
levdatum = symtab_search(&p->p_levels,
sym_name(p, SYM_LEVELS, l->sens - 1));
if (!levdatum)
return 0;
/*
* Return 1 iff all the bits set in l->cat are also be set in
* levdatum->level->cat and no bit in l->cat is larger than
* p->p_cats.nprim.
*/
return ebitmap_contains(&levdatum->level.cat, &l->cat,
p->p_cats.nprim);
}
int mls_range_isvalid(struct policydb *p, struct mls_range *r)
{ return (mls_level_isvalid(p, &r->level[0]) && mls_level_isvalid(p, &r->level[1]) && mls_level_dom(&r->level[1], &r->level[0]));}
/*
* Return 1 if the MLS fields in the security context
* structure `c' are valid. Return 0 otherwise.
*/
int mls_context_isvalid(struct policydb *p, struct context *c)
{
struct user_datum *usrdatum;
if (!p->mls_enabled) return 1;
if (!mls_range_isvalid(p, &c->range))
return 0; if (c->role == OBJECT_R_VAL)
return 1;
/*
* User must be authorized for the MLS range.
*/
if (!c->user || c->user > p->p_users.nprim)
return 0;
usrdatum = p->user_val_to_struct[c->user - 1];
if (!mls_range_contains(usrdatum->range, c->range)) return 0; /* user may not be associated with range */
return 1;
}
/*
* Set the MLS fields in the security context structure
* `context' based on the string representation in
* the string `scontext'.
*
* This function modifies the string in place, inserting
* NULL characters to terminate the MLS fields.
*
* If a def_sid is provided and no MLS field is present,
* copy the MLS field of the associated default context.
* Used for upgraded to MLS systems where objects may lack
* MLS fields.
*
* Policy read-lock must be held for sidtab lookup.
*
*/
int mls_context_to_sid(struct policydb *pol, char oldc, char *scontext,
struct context *context, struct sidtab *s, u32 def_sid)
{
char *sensitivity, *cur_cat, *next_cat, *rngptr;
struct level_datum *levdatum;
struct cat_datum *catdatum, *rngdatum;
u32 i;
int l, rc;
char *rangep[2];
if (!pol->mls_enabled) {
/*
* With no MLS, only return -EINVAL if there is a MLS field
* and it did not come from an xattr.
*/
if (oldc && def_sid == SECSID_NULL)
return -EINVAL;
return 0;
}
/*
* No MLS component to the security context, try and map to
* default if provided.
*/
if (!oldc) {
struct context *defcon;
if (def_sid == SECSID_NULL)
return -EINVAL;
defcon = sidtab_search(s, def_sid);
if (!defcon)
return -EINVAL;
return mls_context_cpy(context, defcon);
}
/*
* If we're dealing with a range, figure out where the two parts
* of the range begin.
*/
rangep[0] = scontext;
rangep[1] = strchr(scontext, '-');
if (rangep[1]) {
rangep[1][0] = '\0';
rangep[1]++;
}
/* For each part of the range: */
for (l = 0; l < 2; l++) {
/* Split sensitivity and category set. */
sensitivity = rangep[l];
if (sensitivity == NULL)
break;
next_cat = strchr(sensitivity, ':');
if (next_cat)
*(next_cat++) = '\0';
/* Parse sensitivity. */
levdatum = symtab_search(&pol->p_levels, sensitivity);
if (!levdatum)
return -EINVAL;
context->range.level[l].sens = levdatum->level.sens;
/* Extract category set. */
while (next_cat != NULL) {
cur_cat = next_cat;
next_cat = strchr(next_cat, ',');
if (next_cat != NULL)
*(next_cat++) = '\0';
/* Separate into range if exists */
rngptr = strchr(cur_cat, '.');
if (rngptr != NULL) {
/* Remove '.' */
*rngptr++ = '\0';
}
catdatum = symtab_search(&pol->p_cats, cur_cat);
if (!catdatum)
return -EINVAL;
rc = ebitmap_set_bit(&context->range.level[l].cat,
catdatum->value - 1, 1);
if (rc)
return rc;
/* If range, set all categories in range */
if (rngptr == NULL)
continue;
rngdatum = symtab_search(&pol->p_cats, rngptr);
if (!rngdatum)
return -EINVAL;
if (catdatum->value >= rngdatum->value)
return -EINVAL;
for (i = catdatum->value; i < rngdatum->value; i++) {
rc = ebitmap_set_bit(
&context->range.level[l].cat, i, 1);
if (rc)
return rc;
}
}
}
/* If we didn't see a '-', the range start is also the range end. */
if (rangep[1] == NULL) {
context->range.level[1].sens = context->range.level[0].sens;
rc = ebitmap_cpy(&context->range.level[1].cat,
&context->range.level[0].cat);
if (rc)
return rc;
}
return 0;
}
/*
* Set the MLS fields in the security context structure
* `context' based on the string representation in
* the string `str'. This function will allocate temporary memory with the
* given constraints of gfp_mask.
*/
int mls_from_string(struct policydb *p, char *str, struct context *context,
gfp_t gfp_mask)
{
char *tmpstr;
int rc;
if (!p->mls_enabled)
return -EINVAL;
tmpstr = kstrdup(str, gfp_mask);
if (!tmpstr) {
rc = -ENOMEM;
} else {
rc = mls_context_to_sid(p, ':', tmpstr, context, NULL,
SECSID_NULL);
kfree(tmpstr);
}
return rc;
}
/*
* Copies the MLS range `range' into `context'.
*/
int mls_range_set(struct context *context, struct mls_range *range)
{
int l, rc = 0;
/* Copy the MLS range into the context */
for (l = 0; l < 2; l++) {
context->range.level[l].sens = range->level[l].sens;
rc = ebitmap_cpy(&context->range.level[l].cat,
&range->level[l].cat);
if (rc)
break;
}
return rc;
}
int mls_setup_user_range(struct policydb *p, struct context *fromcon,
struct user_datum *user, struct context *usercon)
{
if (p->mls_enabled) {
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
struct mls_level *user_low = &(user->range.level[0]);
struct mls_level *user_clr = &(user->range.level[1]);
struct mls_level *user_def = &(user->dfltlevel);
struct mls_level *usercon_sen = &(usercon->range.level[0]);
struct mls_level *usercon_clr = &(usercon->range.level[1]);
/* Honor the user's default level if we can */
if (mls_level_between(user_def, fromcon_sen, fromcon_clr))
*usercon_sen = *user_def;
else if (mls_level_between(fromcon_sen, user_def, user_clr))
*usercon_sen = *fromcon_sen;
else if (mls_level_between(fromcon_clr, user_low, user_def))
*usercon_sen = *user_low;
else
return -EINVAL;
/* Lower the clearance of available contexts
if the clearance of "fromcon" is lower than
that of the user's default clearance (but
only if the "fromcon" clearance dominates
the user's computed sensitivity level) */
if (mls_level_dom(user_clr, fromcon_clr))
*usercon_clr = *fromcon_clr;
else if (mls_level_dom(fromcon_clr, user_clr))
*usercon_clr = *user_clr;
else
return -EINVAL;
}
return 0;
}
/*
* Convert the MLS fields in the security context
* structure `oldc' from the values specified in the
* policy `oldp' to the values specified in the policy `newp',
* storing the resulting context in `newc'.
*/
int mls_convert_context(struct policydb *oldp, struct policydb *newp,
struct context *oldc, struct context *newc)
{
struct level_datum *levdatum;
struct cat_datum *catdatum;
struct ebitmap_node *node;
u32 i;
int l;
if (!oldp->mls_enabled || !newp->mls_enabled)
return 0;
for (l = 0; l < 2; l++) {
char *name = sym_name(oldp, SYM_LEVELS,
oldc->range.level[l].sens - 1);
levdatum = symtab_search(&newp->p_levels, name);
if (!levdatum)
return -EINVAL;
newc->range.level[l].sens = levdatum->level.sens;
ebitmap_for_each_positive_bit(&oldc->range.level[l].cat, node,
i)
{
int rc;
catdatum = symtab_search(&newp->p_cats,
sym_name(oldp, SYM_CATS, i));
if (!catdatum)
return -EINVAL;
rc = ebitmap_set_bit(&newc->range.level[l].cat,
catdatum->value - 1, 1);
if (rc)
return rc;
}
}
return 0;
}
int mls_compute_sid(struct policydb *p, struct context *scontext,
struct context *tcontext, u16 tclass, u32 specified,
struct context *newcontext, bool sock)
{
struct range_trans rtr;
struct mls_range *r;
struct class_datum *cladatum;
char default_range = 0;
if (!p->mls_enabled)
return 0;
switch (specified) {
case AVTAB_TRANSITION:
/* Look for a range transition rule. */
rtr.source_type = scontext->type;
rtr.target_type = tcontext->type;
rtr.target_class = tclass;
r = policydb_rangetr_search(p, &rtr);
if (r) return mls_range_set(newcontext, r); if (tclass && tclass <= p->p_classes.nprim) { cladatum = p->class_val_to_struct[tclass - 1];
if (cladatum)
default_range = cladatum->default_range;
}
switch (default_range) {
case DEFAULT_SOURCE_LOW:
return mls_context_cpy_low(newcontext, scontext);
case DEFAULT_SOURCE_HIGH:
return mls_context_cpy_high(newcontext, scontext);
case DEFAULT_SOURCE_LOW_HIGH:
return mls_context_cpy(newcontext, scontext);
case DEFAULT_TARGET_LOW:
return mls_context_cpy_low(newcontext, tcontext);
case DEFAULT_TARGET_HIGH:
return mls_context_cpy_high(newcontext, tcontext);
case DEFAULT_TARGET_LOW_HIGH:
return mls_context_cpy(newcontext, tcontext);
case DEFAULT_GLBLUB:
return mls_context_glblub(newcontext, scontext,
tcontext);
}
fallthrough;
case AVTAB_CHANGE:
if ((tclass == p->process_class) || sock)
/* Use the process MLS attributes. */
return mls_context_cpy(newcontext, scontext);
else
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
}
return -EINVAL;}
#ifdef CONFIG_NETLABEL
/**
* mls_export_netlbl_lvl - Export the MLS sensitivity levels to NetLabel
* @p: the policy
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Given the security context copy the low MLS sensitivity level into the
* NetLabel MLS sensitivity level field.
*
*/
void mls_export_netlbl_lvl(struct policydb *p, struct context *context,
struct netlbl_lsm_secattr *secattr)
{
if (!p->mls_enabled)
return;
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
secattr->flags |= NETLBL_SECATTR_MLS_LVL;}
/**
* mls_import_netlbl_lvl - Import the NetLabel MLS sensitivity levels
* @p: the policy
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Given the security context and the NetLabel security attributes, copy the
* NetLabel MLS sensitivity level into the context.
*
*/
void mls_import_netlbl_lvl(struct policydb *p, struct context *context,
struct netlbl_lsm_secattr *secattr)
{
if (!p->mls_enabled)
return;
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
context->range.level[1].sens = context->range.level[0].sens;
}
/**
* mls_export_netlbl_cat - Export the MLS categories to NetLabel
* @p: the policy
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Given the security context copy the low MLS categories into the NetLabel
* MLS category field. Returns zero on success, negative values on failure.
*
*/
int mls_export_netlbl_cat(struct policydb *p, struct context *context,
struct netlbl_lsm_secattr *secattr)
{
int rc;
if (!p->mls_enabled)
return 0;
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
&secattr->attr.mls.cat);
if (rc == 0 && secattr->attr.mls.cat != NULL) secattr->flags |= NETLBL_SECATTR_MLS_CAT;
return rc;
}
/**
* mls_import_netlbl_cat - Import the MLS categories from NetLabel
* @p: the policy
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Copy the NetLabel security attributes into the SELinux context; since the
* NetLabel security attribute only contains a single MLS category use it for
* both the low and high categories of the context. Returns zero on success,
* negative values on failure.
*
*/
int mls_import_netlbl_cat(struct policydb *p, struct context *context,
struct netlbl_lsm_secattr *secattr)
{
int rc;
if (!p->mls_enabled)
return 0;
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
secattr->attr.mls.cat);
if (rc)
goto import_netlbl_cat_failure;
memcpy(&context->range.level[1].cat, &context->range.level[0].cat,
sizeof(context->range.level[0].cat));
return 0;
import_netlbl_cat_failure:
ebitmap_destroy(&context->range.level[0].cat);
return rc;
}
#endif /* CONFIG_NETLABEL */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CPUMASK_H
#define __LINUX_CPUMASK_H
/*
* Cpumasks provide a bitmap suitable for representing the
* set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/cpumask_types.h>
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/gfp_types.h>
#include <linux/numa.h>
/**
* cpumask_pr_args - printf args to output a cpumask
* @maskp: cpumask to be printed
*
* Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
extern unsigned int nr_cpu_ids;
#endif
static __always_inline void set_nr_cpu_ids(unsigned int nr)
{
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
WARN_ON(nr != nr_cpu_ids);
#else
nr_cpu_ids = nr;
#endif
}
/*
* We have several different "preferred sizes" for the cpumask
* operations, depending on operation.
*
* For example, the bitmap scanning and operating operations have
* optimized routines that work for the single-word case, but only when
* the size is constant. So if NR_CPUS fits in one single word, we are
* better off using that small constant, in order to trigger the
* optimized bit finding. That is 'small_cpumask_size'.
*
* The clearing and copying operations will similarly perform better
* with a constant size, but we limit that size arbitrarily to four
* words. We call this 'large_cpumask_size'.
*
* Finally, some operations just want the exact limit, either because
* they set bits or just don't have any faster fixed-sized versions. We
* call this just 'nr_cpumask_bits'.
*
* Note that these optional constants are always guaranteed to be at
* least as big as 'nr_cpu_ids' itself is, and all our cpumask
* allocations are at least that size (see cpumask_size()). The
* optimization comes from being able to potentially use a compile-time
* constant instead of a run-time generated exact number of CPUs.
*/
#if NR_CPUS <= BITS_PER_LONG
#define small_cpumask_bits ((unsigned int)NR_CPUS)
#define large_cpumask_bits ((unsigned int)NR_CPUS)
#elif NR_CPUS <= 4*BITS_PER_LONG
#define small_cpumask_bits nr_cpu_ids
#define large_cpumask_bits ((unsigned int)NR_CPUS)
#else
#define small_cpumask_bits nr_cpu_ids
#define large_cpumask_bits nr_cpu_ids
#endif
#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
* possible, present, active and online cpus.
*
* cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
* cpu_present_mask - has bit 'cpu' set iff cpu is populated
* cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
* The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
* that it is possible might ever be plugged in at anytime during the
* life of that system boot. The cpu_present_mask is dynamic(*),
* representing which CPUs are currently plugged in. And
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
*
* (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
*
* Subtleties:
* 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_masks are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
* and cpu_*() macros in the UP case. This ugliness is a UP
* optimization - don't waste any instructions or memory references
* asking if you're online or how many CPUs there are if there is
* only one CPU.
*/
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_enabled_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
#define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
extern atomic_t __num_online_cpus;
extern cpumask_t cpus_booted_once_mask;
static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
WARN_ON_ONCE(cpu >= bits);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
}
/* verify cpu argument to cpumask_* operators */
static __always_inline unsigned int cpumask_check(unsigned int cpu)
{
cpu_max_bits_warn(cpu, small_cpumask_bits);
return cpu;
}
/**
* cpumask_first - get the first cpu in a cpumask
* @srcp: the cpumask pointer
*
* Return: >= nr_cpu_ids if no cpus set.
*/
static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_first_zero - get the first unset cpu in a cpumask
* @srcp: the cpumask pointer
*
* Return: >= nr_cpu_ids if all cpus are set.
*/
static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_first_and - return the first cpu from *srcp1 & *srcp2
* @srcp1: the first input
* @srcp2: the second input
*
* Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
static __always_inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
* cpumask_first_andnot - return the first cpu from *srcp1 & ~*srcp2
* @srcp1: the first input
* @srcp2: the second input
*
* Return: >= nr_cpu_ids if no such cpu found.
*/
static __always_inline
unsigned int cpumask_first_andnot(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
* cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
* @srcp1: the first input
* @srcp2: the second input
* @srcp3: the third input
*
* Return: >= nr_cpu_ids if no cpus set in all.
*/
static __always_inline
unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
const struct cpumask *srcp2,
const struct cpumask *srcp3)
{
return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
cpumask_bits(srcp3), small_cpumask_bits);
}
/**
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
*
* Return: >= nr_cpumask_bits if no CPUs set.
*/
static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_next - get the next cpu in a cpumask
* @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
* Return: >= nr_cpu_ids if no further cpus set.
*/
static __always_inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1);
}
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
* @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
* Return: >= nr_cpu_ids if no further cpus unset.
*/
static __always_inline
unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1);
}
#if NR_CPUS == 1
/* Uniprocessor: there is only one valid CPU */
static __always_inline
unsigned int cpumask_local_spread(unsigned int i, int node)
{
return 0;
}
static __always_inline
unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return cpumask_first_and(src1p, src2p);
}
static __always_inline
unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
#else
unsigned int cpumask_local_spread(unsigned int i, int node);
unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p);
unsigned int cpumask_any_distribute(const struct cpumask *srcp);
#endif /* NR_CPUS */
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
* @n: the cpu prior to the place to search (i.e. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
* Return: >= nr_cpu_ids if no further cpus set in both.
*/
static __always_inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits, n + 1);
}
/**
* cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
* @n: the cpu prior to the place to search (i.e. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
* Return: >= nr_cpu_ids if no further cpus set in both.
*/
static __always_inline
unsigned int cpumask_next_andnot(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits, n + 1);
}
/**
* cpumask_next_and_wrap - get the next cpu in *src1p & *src2p, starting from
* @n+1. If nothing found, wrap around and start from
* the beginning
* @n: the cpu prior to the place to search (i.e. search starts from @n+1)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
* Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src1p & @src2p is empty.
*/
static __always_inline
unsigned int cpumask_next_and_wrap(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits, n + 1);
}
/**
* cpumask_next_wrap - get the next cpu in *src, starting from @n+1. If nothing
* found, wrap around and start from the beginning
* @n: the cpu prior to the place to search (i.e. search starts from @n+1)
* @src: cpumask pointer
*
* Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src is empty.
*/
static __always_inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_bit_wrap(cpumask_bits(src), small_cpumask_bits, n + 1);
}
/**
* cpumask_random - get random cpu in *src.
* @src: cpumask pointer
*
* Return: random set bit, or >= nr_cpu_ids if @src is empty.
*/
static __always_inline
unsigned int cpumask_random(const struct cpumask *src)
{
return find_random_bit(cpumask_bits(src), nr_cpu_ids);
}
/**
* for_each_cpu - iterate over every cpu in a mask
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
* @start: the start location
*
* The implementation does not assume any bit in @mask is set (including @start).
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_wrap(cpu, mask, start) \
for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
* @mask1: the first cpumask pointer
* @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
* cpumask_and(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_and(cpu, mask1, mask2) \
for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
* for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
* those present in another.
* @cpu: the (optionally unsigned) integer iterator
* @mask1: the first cpumask pointer
* @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
* cpumask_andnot(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_andnot(cpu, mask1, mask2) \
for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
* for_each_cpu_or - iterate over every cpu present in either mask
* @cpu: the (optionally unsigned) integer iterator
* @mask1: the first cpumask pointer
* @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
* cpumask_or(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_or(cpu, mask1, mask2) \
for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
* for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask.
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_from(cpu, mask) \
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
/**
* cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
* If @cpu == -1, the function is equivalent to cpumask_any().
* Return: >= nr_cpu_ids if no cpus set.
*/
static __always_inline
unsigned int cpumask_any_but(const struct cpumask *mask, int cpu)
{
unsigned int i;
/* -1 is a legal arg here. */
if (cpu != -1)
cpumask_check(cpu);
for_each_cpu(i, mask)
if (i != cpu)
break;
return i;
}
/**
* cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one.
* @mask1: the first input cpumask
* @mask2: the second input cpumask
* @cpu: the cpu to ignore
*
* If @cpu == -1, the function is equivalent to cpumask_any_and().
* Returns >= nr_cpu_ids if no cpus set.
*/
static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
int cpu)
{
unsigned int i;
/* -1 is a legal arg here. */
if (cpu != -1)
cpumask_check(cpu);
i = cpumask_first_and(mask1, mask2);
if (i != cpu)
return i;
return cpumask_next_and(cpu, mask1, mask2);
}
/**
* cpumask_any_andnot_but - pick an arbitrary cpu from *mask1 & ~*mask2, but not this one.
* @mask1: the first input cpumask
* @mask2: the second input cpumask
* @cpu: the cpu to ignore
*
* If @cpu == -1, the function returns the first matching cpu.
* Returns >= nr_cpu_ids if no cpus set.
*/
static __always_inline
unsigned int cpumask_any_andnot_but(const struct cpumask *mask1,
const struct cpumask *mask2,
int cpu)
{
unsigned int i;
/* -1 is a legal arg here. */
if (cpu != -1)
cpumask_check(cpu);
i = cpumask_first_andnot(mask1, mask2);
if (i != cpu)
return i;
return cpumask_next_andnot(cpu, mask1, mask2);
}
/**
* cpumask_nth - get the Nth cpu in a cpumask
* @srcp: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static __always_inline
unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
}
/**
* cpumask_nth_and - get the Nth cpu in 2 cpumasks
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static __always_inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
small_cpumask_bits, cpumask_check(cpu));
}
/**
* cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
* @srcp3: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static __always_inline
unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2,
const struct cpumask *srcp3)
{
return find_nth_and_andnot_bit(cpumask_bits(srcp1),
cpumask_bits(srcp2),
cpumask_bits(srcp3),
small_cpumask_bits, cpumask_check(cpu));
}
#define CPU_BITS_NONE \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
}
#define CPU_BITS_CPU0 \
{ \
[0] = 1UL \
}
/**
* cpumask_set_cpu - set a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
static __always_inline
void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
static __always_inline
void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
/**
* cpumask_clear_cpus - clear cpus in a cpumask
* @dstp: the cpumask pointer
* @cpu: cpu number (< nr_cpu_ids)
* @ncpus: number of cpus to clear (< nr_cpu_ids)
*/
static __always_inline void cpumask_clear_cpus(struct cpumask *dstp,
unsigned int cpu, unsigned int ncpus)
{
cpumask_check(cpu + ncpus - 1);
bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus);
}
/**
* cpumask_clear_cpu - clear a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
/**
* cpumask_test_cpu - test for a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
* Return: true if @cpu is set in @cpumask, else returns false
*/
static __always_inline
bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
* test_and_set_bit wrapper for cpumasks.
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline
bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
/**
* cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
* test_and_clear_bit wrapper for cpumasks.
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline
bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
/**
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
static __always_inline void cpumask_setall(struct cpumask *dstp)
{
if (small_const_nbits(small_cpumask_bits)) {
cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
return;
}
bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
}
/**
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
static __always_inline void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
/**
* cpumask_and - *dstp = *src1p & *src2p
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
*
* Return: false if *@dstp is empty, else returns true
*/
static __always_inline
bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
}
/**
* cpumask_or - *dstp = *src1p | *src2p
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
*/
static __always_inline
void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
}
/**
* cpumask_xor - *dstp = *src1p ^ *src2p
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
*/
static __always_inline
void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
}
/**
* cpumask_andnot - *dstp = *src1p & ~*src2p
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
*
* Return: false if *@dstp is empty, else returns true
*/
static __always_inline
bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
}
/**
* cpumask_equal - *src1p == *src2p
* @src1p: the first input
* @src2p: the second input
*
* Return: true if the cpumasks are equal, false if not
*/
static __always_inline
bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
}
/**
* cpumask_or_equal - *src1p | *src2p == *src3p
* @src1p: the first input
* @src2p: the second input
* @src3p: the third input
*
* Return: true if first cpumask ORed with second cpumask == third cpumask,
* otherwise false
*/
static __always_inline
bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
cpumask_bits(src3p), small_cpumask_bits);
}
/**
* cpumask_intersects - (*src1p & *src2p) != 0
* @src1p: the first input
* @src2p: the second input
*
* Return: true if first cpumask ANDed with second cpumask is non-empty,
* otherwise false
*/
static __always_inline
bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
}
/**
* cpumask_subset - (*src1p & ~*src2p) == 0
* @src1p: the first input
* @src2p: the second input
*
* Return: true if *@src1p is a subset of *@src2p, else returns false
*/
static __always_inline
bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
}
/**
* cpumask_empty - *srcp == 0
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
*
* Return: true if srcp is empty (has no bits set), else false
*/
static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_full - *srcp == 0xFFFFFFFF...
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
*
* Return: true if srcp is full (has all bits set), else false
*/
static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
/**
* cpumask_weight - Count of bits in *srcp
* @srcp: the cpumask to count bits (< nr_cpu_ids) in.
*
* Return: count of bits set in *srcp
*/
static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
* @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
* @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
static __always_inline
unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
* cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
* @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
* @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
static __always_inline
unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
* cpumask_shift_right - *dstp = *srcp >> n
* @dstp: the cpumask result
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
static __always_inline
void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
small_cpumask_bits);
}
/**
* cpumask_shift_left - *dstp = *srcp << n
* @dstp: the cpumask result
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
static __always_inline
void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
}
/**
* cpumask_copy - *dstp = *srcp
* @dstp: the result
* @srcp: the input cpumask
*/
static __always_inline
void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
/**
* cpumask_any - pick an arbitrary cpu from *srcp
* @srcp: the input cpumask
*
* Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any(srcp) cpumask_first(srcp)
/**
* cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
* Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
/**
* cpumask_of - the cpumask containing just a given cpu
* @cpu: the cpu (<= nr_cpu_ids)
*/
#define cpumask_of(cpu) (get_cpu_mask(cpu))
/**
* cpumask_parse_user - extract a cpumask from a user string
* @buf: the buffer to extract from
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
* Return: -errno, or 0 for success.
*/
static __always_inline
int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
* cpumask_parselist_user - extract a cpumask from a user string
* @buf: the buffer to extract from
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
* Return: -errno, or 0 for success.
*/
static __always_inline
int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
}
/**
* cpumask_parse - extract a cpumask from a string
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
* Return: -errno, or 0 for success.
*/
static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
* cpulist_parse - extract a cpumask from a user string of ranges
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
* Return: -errno, or 0 for success.
*/
static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
* cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
*
* Return: size to allocate for a &struct cpumask in bytes
*/
static __always_inline unsigned int cpumask_size(void)
{
return bitmap_size(large_cpumask_bits);
}
#ifdef CONFIG_CPUMASK_OFFSTACK
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
#define __cpumask_var_read_mostly __read_mostly
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
static __always_inline
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
}
/**
* alloc_cpumask_var - allocate a struct cpumask
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
*
* Return: %true if allocation succeeded, %false if not
*/
static __always_inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
static __always_inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
}
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
#else
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
/* It's common to want to use cpu_all_mask in struct member initializers,
* so it has to refer to an address rather than a pointer. */
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
#define cpu_all_mask to_cpumask(cpu_all_bits)
/* First bits of cpu_bit_bitmap are in fact unset. */
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
#if NR_CPUS == 1
/* Uniprocessor: the possible/online/present masks are always "1" */
#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
#define for_each_possible_cpu_wrap(cpu, start) \
for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
#define for_each_online_cpu_wrap(cpu, start) \
for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
#define for_each_possible_cpu_wrap(cpu, start) \
for_each_cpu_wrap((cpu), cpu_possible_mask, (start))
#define for_each_online_cpu_wrap(cpu, start) \
for_each_cpu_wrap((cpu), cpu_online_mask, (start))
#endif
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
#define assign_cpu(cpu, mask, val) \
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
#define __assign_cpu(cpu, mask, val) \
__assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
void set_cpu_online(unsigned int cpu, bool online);
/**
* to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
* There are a few places where cpumask_var_t isn't appropriate and
* static cpumasks must be used (eg. very early boot), yet we don't
* expose the definition of 'struct cpumask'.
*
* This does the conversion, and can be used as a constant initializer.
*/
#define to_cpumask(bitmap) \
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
/*
* Special-case data structure for "single bit set only" constant CPU masks.
*
* We pre-generate all the 64 (or 32) possible bit positions, with enough
* padding to the left and the right, and return the constant pointer
* appropriately offset.
*/
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
return to_cpumask(p);
}
#if NR_CPUS > 1
/**
* num_online_cpus() - Read the number of online CPUs
*
* Despite the fact that __num_online_cpus is of type atomic_t, this
* interface gives only a momentary snapshot and is not protected against
* concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
* region.
*
* Return: momentary snapshot of the number of online CPUs
*/
static __always_inline unsigned int num_online_cpus(void)
{
return raw_atomic_read(&__num_online_cpus);
}
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_enabled_cpus() cpumask_weight(cpu_enabled_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
static __always_inline bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_online_mask);
}
static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_enabled_mask);
}
static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
}
static __always_inline bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_present_mask);
}
static __always_inline bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_active_mask);
}
static __always_inline bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_dying_mask);
}
#else
#define num_online_cpus() 1U
#define num_possible_cpus() 1U
#define num_enabled_cpus() 1U
#define num_present_cpus() 1U
#define num_active_cpus() 1U
static __always_inline bool cpu_online(unsigned int cpu)
{
return cpu == 0;
}
static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpu == 0;
}
static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpu == 0;
}
static __always_inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
}
static __always_inline bool cpu_active(unsigned int cpu)
{
return cpu == 0;
}
static __always_inline bool cpu_dying(unsigned int cpu)
{
return false;
}
#endif /* NR_CPUS > 1 */
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#else /* NR_CPUS > BITS_PER_LONG */
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#endif /* NR_CPUS > BITS_PER_LONG */
/**
* cpumap_print_to_pagebuf - copies the cpumask into the buffer either
* as comma-separated list of cpus or hex values of cpumask
* @list: indicates whether the cpumap must be list
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
* Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
nr_cpu_ids);
}
/**
* cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
* hex values of cpumask
*
* @buf: the buffer to copy into
* @mask: the cpumask to copy
* @off: in the string from which we are copying, we copy to @buf
* @count: the maximum number of bytes to print
*
* The function prints the cpumask into the buffer as hex values of
* cpumask; Typically used by bin_attribute to export cpumask bitmask
* ABI.
*
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
static __always_inline
ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
}
/**
* cpumap_print_list_to_buf - copies the cpumask into the buffer as
* comma-separated list of cpus
* @buf: the buffer to copy into
* @mask: the cpumask to copy
* @off: in the string from which we are copying, we copy to @buf
* @count: the maximum number of bytes to print
*
* Everything is same with the above cpumap_print_bitmask_to_buf()
* except the print format.
*
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
static __always_inline
ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
}
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
#else
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
#endif /* NR_CPUS > BITS_PER_LONG */
#define CPU_MASK_NONE \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
} }
#define CPU_MASK_CPU0 \
(cpumask_t) { { \
[0] = 1UL \
} }
/*
* Provide a valid theoretical max size for cpumap and cpulist sysfs files
* to avoid breaking userspace which may allocate a buffer based on the size
* reported by e.g. fstat.
*
* for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
*
* For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
* to 2 orders of magnitude larger than 8192. And then we divide by 2 to
* cover a worst-case of every other cpu being on one of two nodes for a
* very large NR_CPUS.
*
* Use PAGE_SIZE as a minimum for smaller configurations while avoiding
* unsigned comparison to -1.
*/
#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
#endif /* __LINUX_CPUMASK_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_REF_H
#define _LINUX_PAGE_REF_H
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/tracepoint-defs.h>
DECLARE_TRACEPOINT(page_ref_set);
DECLARE_TRACEPOINT(page_ref_mod);
DECLARE_TRACEPOINT(page_ref_mod_and_test);
DECLARE_TRACEPOINT(page_ref_mod_and_return);
DECLARE_TRACEPOINT(page_ref_mod_unless);
DECLARE_TRACEPOINT(page_ref_freeze);
DECLARE_TRACEPOINT(page_ref_unfreeze);
#ifdef CONFIG_DEBUG_PAGE_REF
/*
* Ideally we would want to use the trace_<tracepoint>_enabled() helper
* functions. But due to include header file issues, that is not
* feasible. Instead we have to open code the static key functions.
*
* See trace_##name##_enabled(void) in include/linux/tracepoint.h
*/
#define page_ref_tracepoint_active(t) tracepoint_enabled(t)
extern void __page_ref_set(struct page *page, int v);
extern void __page_ref_mod(struct page *page, int v);
extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
extern void __page_ref_mod_unless(struct page *page, int v, int u);
extern void __page_ref_freeze(struct page *page, int v, int ret);
extern void __page_ref_unfreeze(struct page *page, int v);
#else
#define page_ref_tracepoint_active(t) false
static inline void __page_ref_set(struct page *page, int v)
{
}
static inline void __page_ref_mod(struct page *page, int v)
{
}
static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
{
}
static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
{
}
static inline void __page_ref_mod_unless(struct page *page, int v, int u)
{
}
static inline void __page_ref_freeze(struct page *page, int v, int ret)
{
}
static inline void __page_ref_unfreeze(struct page *page, int v)
{
}
#endif
static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
/**
* folio_ref_count - The reference count on this folio.
* @folio: The folio.
*
* The refcount is usually incremented by calls to folio_get() and
* decremented by calls to folio_put(). Some typical users of the
* folio refcount:
*
* - Each reference from a page table
* - The page cache
* - Filesystem private data
* - The LRU list
* - Pipes
* - Direct IO which references this page in the process address space
*
* Return: The number of references to this folio.
*/
static inline int folio_ref_count(const struct folio *folio)
{
return page_ref_count(&folio->page);
}
static inline int page_count(const struct page *page)
{
return folio_ref_count(page_folio(page));
}
static inline void set_page_count(struct page *page, int v)
{
atomic_set(&page->_refcount, v);
if (page_ref_tracepoint_active(page_ref_set))
__page_ref_set(page, v);
}
static inline void folio_set_count(struct folio *folio, int v)
{
set_page_count(&folio->page, v);
}
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
*/
static inline void init_page_count(struct page *page)
{
set_page_count(page, 1);
}
static inline void page_ref_add(struct page *page, int nr)
{
atomic_add(nr, &page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, nr);
}
static inline void folio_ref_add(struct folio *folio, int nr)
{
page_ref_add(&folio->page, nr);
}
static inline void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -nr);
}
static inline void folio_ref_sub(struct folio *folio, int nr)
{
page_ref_sub(&folio->page, nr);
}
static inline int folio_ref_sub_return(struct folio *folio, int nr)
{
int ret = atomic_sub_return(nr, &folio->_refcount);
if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(&folio->page, -nr, ret);
return ret;
}
static inline void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, 1);
}
static inline void folio_ref_inc(struct folio *folio)
{
page_ref_inc(&folio->page);
}
static inline void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -1);
}
static inline void folio_ref_dec(struct folio *folio)
{
page_ref_dec(&folio->page);
}
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
return ret;
}
static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
{
return page_ref_sub_and_test(&folio->page, nr);
}
static inline int page_ref_inc_return(struct page *page)
{
int ret = atomic_inc_return(&page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, 1, ret);
return ret;
}
static inline int folio_ref_inc_return(struct folio *folio)
{
return page_ref_inc_return(&folio->page);
}
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
return ret;
}
static inline int folio_ref_dec_and_test(struct folio *folio)
{
return page_ref_dec_and_test(&folio->page);
}
static inline int page_ref_dec_return(struct page *page)
{
int ret = atomic_dec_return(&page->_refcount);
if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, -1, ret);
return ret;
}
static inline int folio_ref_dec_return(struct folio *folio)
{
return page_ref_dec_return(&folio->page);
}
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
bool ret = false;
rcu_read_lock();
/* avoid writing to the vmemmap area being remapped */
if (page_count_writable(page, u))
ret = atomic_add_unless(&page->_refcount, nr, u);
rcu_read_unlock();
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
return ret;
}
static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
{
return page_ref_add_unless(&folio->page, nr, u);
}
/**
* folio_try_get - Attempt to increase the refcount on a folio.
* @folio: The folio.
*
* If you do not already have a reference to a folio, you can attempt to
* get one using this function. It may fail if, for example, the folio
* has been freed since you found a pointer to it, or it is frozen for
* the purposes of splitting or migration.
*
* Return: True if the reference count was successfully incremented.
*/
static inline bool folio_try_get(struct folio *folio)
{
return folio_ref_add_unless(folio, 1, 0);
}
static inline bool folio_ref_try_add(struct folio *folio, int count)
{
return folio_ref_add_unless(folio, count, 0);
}
static inline int page_ref_freeze(struct page *page, int count)
{
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
if (page_ref_tracepoint_active(page_ref_freeze))
__page_ref_freeze(page, count, ret);
return ret;
}
static inline int folio_ref_freeze(struct folio *folio, int count)
{
return page_ref_freeze(&folio->page, count);
}
static inline void page_ref_unfreeze(struct page *page, int count)
{
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
atomic_set_release(&page->_refcount, count);
if (page_ref_tracepoint_active(page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
static inline void folio_ref_unfreeze(struct folio *folio, int count)
{
page_ref_unfreeze(&folio->page, count);
}
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1995 Linus Torvalds
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*
* X86-64 port
* Andi Kleen.
*
* CPU hotplug support - ashok.raj@intel.com
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/prctl.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
#include <linux/syscalls.h>
#include <linux/iommu.h>
#include <asm/processor.h>
#include <asm/pkru.h>
#include <asm/fpu/sched.h>
#include <asm/mmu_context.h>
#include <asm/prctl.h>
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
#include <asm/vdso.h>
#include <asm/resctrl.h>
#include <asm/unistd.h>
#include <asm/fsgsbase.h>
#include <asm/fred.h>
#include <asm/msr.h>
#ifdef CONFIG_IA32_EMULATION
/* Not included via unistd.h */
#include <asm/unistd_32_ia32.h>
#endif
#include "process.h"
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
const char *log_lvl)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7;
unsigned int fsindex, gsindex;
unsigned int ds, es;
show_iret_regs(regs, log_lvl);
if (regs->orig_ax != -1)
pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
else
pr_cont("\n");
printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n",
log_lvl, regs->ax, regs->bx, regs->cx);
printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n",
log_lvl, regs->dx, regs->si, regs->di);
printk("%sRBP: %016lx R08: %016lx R09: %016lx\n",
log_lvl, regs->bp, regs->r8, regs->r9);
printk("%sR10: %016lx R11: %016lx R12: %016lx\n",
log_lvl, regs->r10, regs->r11, regs->r12);
printk("%sR13: %016lx R14: %016lx R15: %016lx\n",
log_lvl, regs->r13, regs->r14, regs->r15);
if (mode == SHOW_REGS_SHORT)
return;
if (mode == SHOW_REGS_USER) {
rdmsrq(MSR_FS_BASE, fs);
rdmsrq(MSR_KERNEL_GS_BASE, shadowgs);
printk("%sFS: %016lx GS: %016lx\n",
log_lvl, fs, shadowgs);
return;
}
asm("movl %%ds,%0" : "=r" (ds));
asm("movl %%es,%0" : "=r" (es));
asm("movl %%fs,%0" : "=r" (fsindex));
asm("movl %%gs,%0" : "=r" (gsindex));
rdmsrq(MSR_FS_BASE, fs);
rdmsrq(MSR_GS_BASE, gs);
rdmsrq(MSR_KERNEL_GS_BASE, shadowgs);
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = __read_cr3();
cr4 = __read_cr4();
printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
log_lvl, fs, fsindex, gs, gsindex, shadowgs);
printk("%sCS: %04x DS: %04x ES: %04x CR0: %016lx\n",
log_lvl, regs->cs, ds, es, cr0);
printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n",
log_lvl, cr2, cr3, cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
get_debugreg(d2, 2);
get_debugreg(d3, 3);
get_debugreg(d6, 6);
get_debugreg(d7, 7);
/* Only print out debug registers if they are in their non-default state. */
if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
(d6 == DR6_RESERVED) && (d7 == DR7_FIXED_1))) {
printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n",
log_lvl, d0, d1, d2);
printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n",
log_lvl, d3, d6, d7);
}
if (cr4 & X86_CR4_PKE)
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
void release_thread(struct task_struct *dead_task)
{
WARN_ON(dead_task->mm);
}
enum which_selector {
FS,
GS
};
/*
* Out of line to be protected from kprobes and tracing. If this would be
* traced or probed than any access to a per CPU variable happens with
* the wrong GS.
*
* It is not used on Xen paravirt. When paravirt support is needed, it
* needs to be renamed with native_ prefix.
*/
static noinstr unsigned long __rdgsbase_inactive(void)
{
unsigned long gsbase;
lockdep_assert_irqs_disabled();
/*
* SWAPGS is no longer needed thus NOT allowed with FRED because
* FRED transitions ensure that an operating system can _always_
* operate with its own GS base address:
* - For events that occur in ring 3, FRED event delivery swaps
* the GS base address with the IA32_KERNEL_GS_BASE MSR.
* - ERETU (the FRED transition that returns to ring 3) also swaps
* the GS base address with the IA32_KERNEL_GS_BASE MSR.
*
* And the operating system can still setup the GS segment for a
* user thread without the need of loading a user thread GS with:
* - Using LKGS, available with FRED, to modify other attributes
* of the GS segment without compromising its ability always to
* operate with its own GS base address.
* - Accessing the GS segment base address for a user thread as
* before using RDMSR or WRMSR on the IA32_KERNEL_GS_BASE MSR.
*
* Note, LKGS loads the GS base address into the IA32_KERNEL_GS_BASE
* MSR instead of the GS segment’s descriptor cache. As such, the
* operating system never changes its runtime GS base address.
*/
if (!cpu_feature_enabled(X86_FEATURE_FRED) &&
!cpu_feature_enabled(X86_FEATURE_XENPV)) {
native_swapgs();
gsbase = rdgsbase();
native_swapgs();
} else {
instrumentation_begin();
rdmsrq(MSR_KERNEL_GS_BASE, gsbase);
instrumentation_end();
}
return gsbase;
}
/*
* Out of line to be protected from kprobes and tracing. If this would be
* traced or probed than any access to a per CPU variable happens with
* the wrong GS.
*
* It is not used on Xen paravirt. When paravirt support is needed, it
* needs to be renamed with native_ prefix.
*/
static noinstr void __wrgsbase_inactive(unsigned long gsbase)
{
lockdep_assert_irqs_disabled();
if (!cpu_feature_enabled(X86_FEATURE_FRED) &&
!cpu_feature_enabled(X86_FEATURE_XENPV)) {
native_swapgs();
wrgsbase(gsbase);
native_swapgs();
} else {
instrumentation_begin();
wrmsrq(MSR_KERNEL_GS_BASE, gsbase);
instrumentation_end();
}
}
/*
* Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
* not available. The goal is to be reasonably fast on non-FSGSBASE systems.
* It's forcibly inlined because it'll generate better code and this function
* is hot.
*/
static __always_inline void save_base_legacy(struct task_struct *prev_p,
unsigned short selector,
enum which_selector which)
{
if (likely(selector == 0)) {
/*
* On Intel (without X86_BUG_NULL_SEG), the segment base could
* be the pre-existing saved base or it could be zero. On AMD
* (with X86_BUG_NULL_SEG), the segment base could be almost
* anything.
*
* This branch is very hot (it's hit twice on almost every
* context switch between 64-bit programs), and avoiding
* the RDMSR helps a lot, so we just assume that whatever
* value is already saved is correct. This matches historical
* Linux behavior, so it won't break existing applications.
*
* To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
* report that the base is zero, it needs to actually be zero:
* see the corresponding logic in load_seg_legacy.
*/
} else {
/*
* If the selector is 1, 2, or 3, then the base is zero on
* !X86_BUG_NULL_SEG CPUs and could be anything on
* X86_BUG_NULL_SEG CPUs. In the latter case, Linux
* has never attempted to preserve the base across context
* switches.
*
* If selector > 3, then it refers to a real segment, and
* saving the base isn't necessary.
*/
if (which == FS)
prev_p->thread.fsbase = 0;
else
prev_p->thread.gsbase = 0;
}
}
static __always_inline void save_fsgs(struct task_struct *task)
{
savesegment(fs, task->thread.fsindex);
savesegment(gs, task->thread.gsindex);
if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
/*
* If FSGSBASE is enabled, we can't make any useful guesses
* about the base, and user code expects us to save the current
* value. Fortunately, reading the base directly is efficient.
*/
task->thread.fsbase = rdfsbase();
task->thread.gsbase = __rdgsbase_inactive();
} else {
save_base_legacy(task, task->thread.fsindex, FS);
save_base_legacy(task, task->thread.gsindex, GS);
}
}
/*
* While a process is running,current->thread.fsbase and current->thread.gsbase
* may not match the corresponding CPU registers (see save_base_legacy()).
*/
void current_save_fsgs(void)
{
unsigned long flags;
/* Interrupts need to be off for FSGSBASE */
local_irq_save(flags); save_fsgs(current); local_irq_restore(flags);}
#if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL_GPL(current_save_fsgs);
#endif
static __always_inline void loadseg(enum which_selector which,
unsigned short sel)
{
if (which == FS)
loadsegment(fs, sel);
else
load_gs_index(sel);
}
static __always_inline void load_seg_legacy(unsigned short prev_index,
unsigned long prev_base,
unsigned short next_index,
unsigned long next_base,
enum which_selector which)
{
if (likely(next_index <= 3)) {
/*
* The next task is using 64-bit TLS, is not using this
* segment at all, or is having fun with arcane CPU features.
*/
if (next_base == 0) {
/*
* Nasty case: on AMD CPUs, we need to forcibly zero
* the base.
*/
if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
loadseg(which, __USER_DS);
loadseg(which, next_index);
} else {
/*
* We could try to exhaustively detect cases
* under which we can skip the segment load,
* but there's really only one case that matters
* for performance: if both the previous and
* next states are fully zeroed, we can skip
* the load.
*
* (This assumes that prev_base == 0 has no
* false positives. This is the case on
* Intel-style CPUs.)
*/
if (likely(prev_index | next_index | prev_base))
loadseg(which, next_index);
}
} else {
if (prev_index != next_index)
loadseg(which, next_index);
wrmsrq(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
next_base);
}
} else {
/*
* The next task is using a real segment. Loading the selector
* is sufficient.
*/
loadseg(which, next_index);
}
}
/*
* Store prev's PKRU value and load next's PKRU value if they differ. PKRU
* is not XSTATE managed on context switch because that would require a
* lookup in the task's FPU xsave buffer and require to keep that updated
* in various places.
*/
static __always_inline void x86_pkru_load(struct thread_struct *prev,
struct thread_struct *next)
{
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return;
/* Stash the prev task's value: */
prev->pkru = rdpkru();
/*
* PKRU writes are slightly expensive. Avoid them when not
* strictly necessary:
*/
if (prev->pkru != next->pkru)
wrpkru(next->pkru);
}
static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
struct thread_struct *next)
{
if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
/* Update the FS and GS selectors if they could have changed. */
if (unlikely(prev->fsindex || next->fsindex))
loadseg(FS, next->fsindex);
if (unlikely(prev->gsindex || next->gsindex))
loadseg(GS, next->gsindex);
/* Update the bases. */
wrfsbase(next->fsbase);
__wrgsbase_inactive(next->gsbase);
} else {
load_seg_legacy(prev->fsindex, prev->fsbase,
next->fsindex, next->fsbase, FS);
load_seg_legacy(prev->gsindex, prev->gsbase,
next->gsindex, next->gsbase, GS);
}
}
unsigned long x86_fsgsbase_read_task(struct task_struct *task,
unsigned short selector)
{
unsigned short idx = selector >> 3;
unsigned long base;
if (likely((selector & SEGMENT_TI_MASK) == 0)) {
if (unlikely(idx >= GDT_ENTRIES))
return 0;
/*
* There are no user segments in the GDT with nonzero bases
* other than the TLS segments.
*/
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return 0;
idx -= GDT_ENTRY_TLS_MIN;
base = get_desc_base(&task->thread.tls_array[idx]);
} else {
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
/*
* If performance here mattered, we could protect the LDT
* with RCU. This is a slow path, though, so we can just
* take the mutex.
*/
mutex_lock(&task->mm->context.lock);
ldt = task->mm->context.ldt;
if (unlikely(!ldt || idx >= ldt->nr_entries))
base = 0;
else
base = get_desc_base(ldt->entries + idx);
mutex_unlock(&task->mm->context.lock);
#else
base = 0;
#endif
}
return base;
}
unsigned long x86_gsbase_read_cpu_inactive(void)
{
unsigned long gsbase;
if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
unsigned long flags;
local_irq_save(flags);
gsbase = __rdgsbase_inactive();
local_irq_restore(flags);
} else {
rdmsrq(MSR_KERNEL_GS_BASE, gsbase);
}
return gsbase;
}
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
unsigned long flags;
local_irq_save(flags);
__wrgsbase_inactive(gsbase);
local_irq_restore(flags);
} else {
wrmsrq(MSR_KERNEL_GS_BASE, gsbase);
}
}
unsigned long x86_fsbase_read_task(struct task_struct *task)
{
unsigned long fsbase;
if (task == current)
fsbase = x86_fsbase_read_cpu();
else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
(task->thread.fsindex == 0))
fsbase = task->thread.fsbase;
else
fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
return fsbase;
}
unsigned long x86_gsbase_read_task(struct task_struct *task)
{
unsigned long gsbase;
if (task == current)
gsbase = x86_gsbase_read_cpu_inactive();
else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
(task->thread.gsindex == 0))
gsbase = task->thread.gsbase;
else
gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
return gsbase;
}
void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
{
WARN_ON_ONCE(task == current);
task->thread.fsbase = fsbase;
}
void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
{
WARN_ON_ONCE(task == current);
task->thread.gsbase = gsbase;
}
static void
start_thread_common(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp,
u16 _cs, u16 _ss, u16 _ds)
{
WARN_ON_ONCE(regs != current_pt_regs());
if (static_cpu_has(X86_BUG_NULL_SEG)) {
/* Loading zero below won't clear the base. */
loadsegment(fs, __USER_DS);
load_gs_index(__USER_DS);
}
reset_thread_features();
loadsegment(fs, 0);
loadsegment(es, _ds);
loadsegment(ds, _ds);
load_gs_index(0);
regs->ip = new_ip;
regs->sp = new_sp;
regs->csx = _cs;
regs->ssx = _ss;
/*
* Allow single-step trap and NMI when starting a new task, thus
* once the new task enters user space, single-step trap and NMI
* are both enabled immediately.
*
* Entering a new task is logically speaking a return from a
* system call (exec, fork, clone, etc.). As such, if ptrace
* enables single stepping a single step exception should be
* allowed to trigger immediately upon entering user space.
* This is not optional.
*
* NMI should *never* be disabled in user space. As such, this
* is an optional, opportunistic way to catch errors.
*
* Paranoia: High-order 48 bits above the lowest 16 bit SS are
* discarded by the legacy IRET instruction on all Intel, AMD,
* and Cyrix/Centaur/VIA CPUs, thus can be set unconditionally,
* even when FRED is not enabled. But we choose the safer side
* to use these bits only when FRED is enabled.
*/
if (cpu_feature_enabled(X86_FEATURE_FRED)) {
regs->fred_ss.swevent = true;
regs->fred_ss.nmi = true;
}
regs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
}
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
start_thread_common(regs, new_ip, new_sp,
__USER_CS, __USER_DS, 0);
}
EXPORT_SYMBOL_GPL(start_thread);
#ifdef CONFIG_COMPAT
void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32)
{
start_thread_common(regs, new_ip, new_sp,
x32 ? __USER_CS : __USER32_CS,
__USER_DS, __USER_DS);
}
#endif
/*
* switch_to(x,y) should switch tasks from x to y.
*
* This could still be optimized:
* - fold all the options into a flag word and test it with a single test.
* - could test fs/gs bitsliced
*
* Kprobes not supported here. Set the probe on schedule instead.
* Function graph tracer not supported too.
*/
__no_kmsan_checks
__visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread;
struct thread_struct *next = &next_p->thread;
int cpu = smp_processor_id();
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
this_cpu_read(hardirq_stack_inuse));
switch_fpu(prev_p, cpu);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
*
* (e.g. xen_load_tls())
*/
save_fsgs(prev_p);
/*
* Load TLS before restoring any segments so that segment loads
* reference the correct GDT entries.
*/
load_TLS(next, cpu);
/*
* Leave lazy mode, flushing any hypercalls made here. This
* must be done after loading TLS entries in the GDT but before
* loading segments that might reference them.
*/
arch_end_context_switch(next_p);
/* Switch DS and ES.
*
* Reading them only returns the selectors, but writing them (if
* nonzero) loads the full descriptor from the GDT or LDT. The
* LDT for next is loaded in switch_mm, and the GDT is loaded
* above.
*
* We therefore need to write new values to the segment
* registers on every context switch unless both the new and old
* values are zero.
*
* Note that we don't need to do anything for CS and SS, as
* those are saved and restored as part of pt_regs.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
x86_fsgsbase_load(prev, next);
x86_pkru_load(prev, next);
/*
* Switch the PDA and FPU contexts.
*/
raw_cpu_write(current_task, next_p);
raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
/* Reload sp0. */
update_task_stack(next_p);
switch_to_extra(prev_p, next_p);
if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
/*
* AMD CPUs have a misfeature: SYSRET sets the SS selector but
* does not update the cached descriptor. As a result, if we
* do SYSRET while SS is NULL, we'll end up in user mode with
* SS apparently equal to __USER_DS but actually unusable.
*
* The straightforward workaround would be to fix it up just
* before SYSRET, but that would slow down the system call
* fast paths. Instead, we ensure that SS is never NULL in
* system call context. We do this by replacing NULL SS
* selectors at every context switch. SYSCALL sets up a valid
* SS, so the only way to get NULL is to re-enter the kernel
* from CPL 3 through an interrupt. Since that can't happen
* in the same task as a running syscall, we are guaranteed to
* context switch between every interrupt vector entry and a
* subsequent SYSRET.
*
* We read SS first because SS reads are much faster than
* writes. Out of caution, we force SS to __KERNEL_DS even if
* it previously had a different non-NULL value.
*/
unsigned short ss_sel;
savesegment(ss, ss_sel);
if (ss_sel != __KERNEL_DS)
loadsegment(ss, __KERNEL_DS);
}
/* Load the Intel cache allocation PQR MSR. */
resctrl_arch_sched_in(next_p);
/* Reset hw history on AMD CPUs */
if (cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS))
wrmsrl(MSR_AMD_WORKLOAD_HRST, 0x1);
return prev_p;
}
void set_personality_64bit(void)
{
/* inherit personality from parent */
/* Make sure to be in 64bit mode */
clear_thread_flag(TIF_ADDR32);
/* Pretend that this comes from a 64bit execve */
task_pt_regs(current)->orig_ax = __NR_execve;
current_thread_info()->status &= ~TS_COMPAT;
if (current->mm)
__set_bit(MM_CONTEXT_HAS_VSYSCALL, ¤t->mm->context.flags);
/* TBD: overwrites user setup. Should have two bits.
But 64bit processes have always behaved this way,
so it's not too bad. The main problem is just that
32bit children are affected again. */
current->personality &= ~READ_IMPLIES_EXEC;
}
static void __set_personality_x32(void)
{
#ifdef CONFIG_X86_X32_ABI
if (current->mm)
current->mm->context.flags = 0;
current->personality &= ~READ_IMPLIES_EXEC;
/*
* in_32bit_syscall() uses the presence of the x32 syscall bit
* flag to determine compat status. The x86 mmap() code relies on
* the syscall bitness so set x32 syscall bit right here to make
* in_32bit_syscall() work during exec().
*
* Pretend to come from a x32 execve.
*/
task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
current_thread_info()->status &= ~TS_COMPAT;
#endif
}
static void __set_personality_ia32(void)
{
#ifdef CONFIG_IA32_EMULATION
if (current->mm) {
/*
* uprobes applied to this MM need to know this and
* cannot use user_64bit_mode() at that time.
*/
__set_bit(MM_CONTEXT_UPROBE_IA32, ¤t->mm->context.flags);
}
current->personality |= force_personality32;
/* Prepare the first "return" to user space */
task_pt_regs(current)->orig_ax = __NR_ia32_execve;
current_thread_info()->status |= TS_COMPAT;
#endif
}
void set_personality_ia32(bool x32)
{
/* Make sure to be in 32bit mode */
set_thread_flag(TIF_ADDR32);
if (x32)
__set_personality_x32();
else
__set_personality_ia32();
}
EXPORT_SYMBOL_GPL(set_personality_ia32);
#ifdef CONFIG_CHECKPOINT_RESTORE
static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
{
int ret;
ret = map_vdso_once(image, addr);
if (ret)
return ret;
return (long)image->size;
}
#endif
#ifdef CONFIG_ADDRESS_MASKING
#define LAM_U57_BITS 6
static void enable_lam_func(void *__mm)
{
struct mm_struct *mm = __mm;
unsigned long lam;
if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) {
lam = mm_lam_cr3_mask(mm);
write_cr3(__read_cr3() | lam);
cpu_tlbstate_update_lam(lam, mm_untag_mask(mm));
}
}
static void mm_enable_lam(struct mm_struct *mm)
{
mm->context.lam_cr3_mask = X86_CR3_LAM_U57;
mm->context.untag_mask = ~GENMASK(62, 57);
/*
* Even though the process must still be single-threaded at this
* point, kernel threads may be using the mm. IPI those kernel
* threads if they exist.
*/
on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true);
set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
}
static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
{
if (!cpu_feature_enabled(X86_FEATURE_LAM))
return -ENODEV;
/* PTRACE_ARCH_PRCTL */
if (current->mm != mm)
return -EINVAL;
if (mm_valid_pasid(mm) &&
!test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags))
return -EINVAL;
if (mmap_write_lock_killable(mm))
return -EINTR;
/*
* MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from
* being enabled unless the process is single threaded:
*/
if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) {
mmap_write_unlock(mm);
return -EBUSY;
}
if (!nr_bits || nr_bits > LAM_U57_BITS) {
mmap_write_unlock(mm);
return -EINVAL;
}
mm_enable_lam(mm);
mmap_write_unlock(mm);
return 0;
}
#endif
long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
{
int ret = 0;
switch (option) {
case ARCH_SET_GS: {
if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* ARCH_SET_GS has always overwritten the index
* and the base. Zero is the most sensible value
* to put in the index, and is the only value that
* makes any sense if FSGSBASE is unavailable.
*/
if (task == current) {
loadseg(GS, 0);
x86_gsbase_write_cpu_inactive(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.gsbase.
*/
task->thread.gsbase = arg2;
} else {
task->thread.gsindex = 0;
x86_gsbase_write_task(task, arg2);
}
preempt_enable();
break;
}
case ARCH_SET_FS: {
/*
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* Set the selector to 0 for the same reason
* as %gs above.
*/
if (task == current) {
loadseg(FS, 0);
x86_fsbase_write_cpu(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.fsbase.
*/
task->thread.fsbase = arg2;
} else {
task->thread.fsindex = 0;
x86_fsbase_write_task(task, arg2);
}
preempt_enable();
break;
}
case ARCH_GET_FS: {
unsigned long base = x86_fsbase_read_task(task);
ret = put_user(base, (unsigned long __user *)arg2);
break;
}
case ARCH_GET_GS: {
unsigned long base = x86_gsbase_read_task(task);
ret = put_user(base, (unsigned long __user *)arg2);
break;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
# ifdef CONFIG_X86_X32_ABI
case ARCH_MAP_VDSO_X32:
return prctl_map_vdso(&vdso_image_x32, arg2);
# endif
# ifdef CONFIG_IA32_EMULATION
case ARCH_MAP_VDSO_32:
return prctl_map_vdso(&vdso_image_32, arg2);
# endif
case ARCH_MAP_VDSO_64:
return prctl_map_vdso(&vdso_image_64, arg2);
#endif
#ifdef CONFIG_ADDRESS_MASKING
case ARCH_GET_UNTAG_MASK:
return put_user(task->mm->context.untag_mask,
(unsigned long __user *)arg2);
case ARCH_ENABLE_TAGGED_ADDR:
return prctl_enable_tagged_addr(task->mm, arg2);
case ARCH_FORCE_TAGGED_SVA:
if (current != task)
return -EINVAL;
set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags);
return 0;
case ARCH_GET_MAX_TAG_BITS:
if (!cpu_feature_enabled(X86_FEATURE_LAM))
return put_user(0, (unsigned long __user *)arg2);
else
return put_user(LAM_U57_BITS, (unsigned long __user *)arg2);
#endif
case ARCH_SHSTK_ENABLE:
case ARCH_SHSTK_DISABLE:
case ARCH_SHSTK_LOCK:
case ARCH_SHSTK_UNLOCK:
case ARCH_SHSTK_STATUS:
return shstk_prctl(task, option, arg2);
default:
ret = -EINVAL;
break;
}
return ret;
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions for the 'struct sk_buff' memory handlers.
*
* Authors:
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Florian La Roche, <rzsfl@rz.uni-sb.de>
*/
#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/bug.h>
#include <linux/bvec.h>
#include <linux/cache.h>
#include <linux/rbtree.h>
#include <linux/socket.h>
#include <linux/refcount.h>
#include <linux/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
#include <net/flow_dissector.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <linux/llist.h>
#include <linux/page_frag_cache.h>
#include <net/flow.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
#include <net/net_debug.h>
#include <net/dropreason-core.h>
#include <net/netmem.h>
/**
* DOC: skb checksums
*
* The interface for checksum offload between the stack and networking drivers
* is as follows...
*
* IP checksum related features
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Drivers advertise checksum offload capabilities in the features of a device.
* From the stack's point of view these are capabilities offered by the driver.
* A driver typically only advertises features that it is capable of offloading
* to its device.
*
* .. flat-table:: Checksum related device features
* :widths: 1 10
*
* * - %NETIF_F_HW_CSUM
* - The driver (or its device) is able to compute one
* IP (one's complement) checksum for any combination
* of protocols or protocol layering. The checksum is
* computed and set in a packet per the CHECKSUM_PARTIAL
* interface (see below).
*
* * - %NETIF_F_IP_CSUM
* - Driver (device) is only able to checksum plain
* TCP or UDP packets over IPv4. These are specifically
* unencapsulated packets of the form IPv4|TCP or
* IPv4|UDP where the Protocol field in the IPv4 header
* is TCP or UDP. The IPv4 header may contain IP options.
* This feature cannot be set in features for a device
* with NETIF_F_HW_CSUM also set. This feature is being
* DEPRECATED (see below).
*
* * - %NETIF_F_IPV6_CSUM
* - Driver (device) is only able to checksum plain
* TCP or UDP packets over IPv6. These are specifically
* unencapsulated packets of the form IPv6|TCP or
* IPv6|UDP where the Next Header field in the IPv6
* header is either TCP or UDP. IPv6 extension headers
* are not supported with this feature. This feature
* cannot be set in features for a device with
* NETIF_F_HW_CSUM also set. This feature is being
* DEPRECATED (see below).
*
* * - %NETIF_F_RXCSUM
* - Driver (device) performs receive checksum offload.
* This flag is only used to disable the RX checksum
* feature for a device. The stack will accept receive
* checksum indication in packets received on a device
* regardless of whether NETIF_F_RXCSUM is set.
*
* Checksumming of received packets by device
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Indication of checksum verification is set in &sk_buff.ip_summed.
* Possible values are:
*
* - %CHECKSUM_NONE
*
* Device did not checksum this packet e.g. due to lack of capabilities.
* The packet contains full (though not verified) checksum in packet but
* not in skb->csum. Thus, skb->csum is undefined in this case.
*
* - %CHECKSUM_UNNECESSARY
*
* The hardware you're dealing with doesn't calculate the full checksum
* (as in %CHECKSUM_COMPLETE), but it does parse headers and verify checksums
* for specific protocols. For such packets it will set %CHECKSUM_UNNECESSARY
* if their checksums are okay. &sk_buff.csum is still undefined in this case
* though. A driver or device must never modify the checksum field in the
* packet even if checksum is verified.
*
* %CHECKSUM_UNNECESSARY is applicable to following protocols:
*
* - TCP: IPv6 and IPv4.
* - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
* zero UDP checksum for either IPv4 or IPv6, the networking stack
* may perform further validation in this case.
* - GRE: only if the checksum is present in the header.
* - SCTP: indicates the CRC in SCTP header has been validated.
* - FCOE: indicates the CRC in FC frame has been validated.
*
* &sk_buff.csum_level indicates the number of consecutive checksums found in
* the packet minus one that have been verified as %CHECKSUM_UNNECESSARY.
* For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
* and a device is able to verify the checksums for UDP (possibly zero),
* GRE (checksum flag is set) and TCP, &sk_buff.csum_level would be set to
* two. If the device were only able to verify the UDP checksum and not
* GRE, either because it doesn't support GRE checksum or because GRE
* checksum is bad, skb->csum_level would be set to zero (TCP checksum is
* not considered in this case).
*
* - %CHECKSUM_COMPLETE
*
* This is the most generic way. The device supplied checksum of the _whole_
* packet as seen by netif_rx() and fills in &sk_buff.csum. This means the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
* Notes:
*
* - Even if device supports only some protocols, but is able to produce
* skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
* - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
*
* - %CHECKSUM_PARTIAL
*
* A checksum is set up to be offloaded to a device as described in the
* output description for CHECKSUM_PARTIAL. This may occur on a packet
* received directly from another Linux OS, e.g., a virtualized Linux kernel
* on the same host, or it may be set in the input path in GRO or remote
* checksum offload. For the purposes of checksum verification, the checksum
* referred to by skb->csum_start + skb->csum_offset and any preceding
* checksums in the packet are considered verified. Any checksums in the
* packet that are after the checksum being offloaded are not considered to
* be verified.
*
* Checksumming on transmit for non-GSO
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The stack requests checksum offload in the &sk_buff.ip_summed for a packet.
* Values are:
*
* - %CHECKSUM_PARTIAL
*
* The driver is required to checksum the packet as seen by hard_start_xmit()
* from &sk_buff.csum_start up to the end, and to record/write the checksum at
* offset &sk_buff.csum_start + &sk_buff.csum_offset.
* A driver may verify that the
* csum_start and csum_offset values are valid values given the length and
* offset of the packet, but it should not attempt to validate that the
* checksum refers to a legitimate transport layer checksum -- it is the
* purview of the stack to validate that csum_start and csum_offset are set
* correctly.
*
* When the stack requests checksum offload for a packet, the driver MUST
* ensure that the checksum is set correctly. A driver can either offload the
* checksum calculation to the device, or call skb_checksum_help (in the case
* that the device does not support offload for a particular checksum).
*
* %NETIF_F_IP_CSUM and %NETIF_F_IPV6_CSUM are being deprecated in favor of
* %NETIF_F_HW_CSUM. New devices should use %NETIF_F_HW_CSUM to indicate
* checksum offload capability.
* skb_csum_hwoffload_help() can be called to resolve %CHECKSUM_PARTIAL based
* on network device checksumming capabilities: if a packet does not match
* them, skb_checksum_help() or skb_crc32c_help() (depending on the value of
* &sk_buff.csum_not_inet, see :ref:`crc`)
* is called to resolve the checksum.
*
* - %CHECKSUM_NONE
*
* The skb was already checksummed by the protocol, or a checksum is not
* required.
*
* - %CHECKSUM_UNNECESSARY
*
* This has the same meaning as CHECKSUM_NONE for checksum offload on
* output.
*
* - %CHECKSUM_COMPLETE
*
* Not used in checksum output. If a driver observes a packet with this value
* set in skbuff, it should treat the packet as if %CHECKSUM_NONE were set.
*
* .. _crc:
*
* Non-IP checksum (CRC) offloads
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* .. flat-table::
* :widths: 1 10
*
* * - %NETIF_F_SCTP_CRC
* - This feature indicates that a device is capable of
* offloading the SCTP CRC in a packet. To perform this offload the stack
* will set csum_start and csum_offset accordingly, set ip_summed to
* %CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication
* in the skbuff that the %CHECKSUM_PARTIAL refers to CRC32c.
* A driver that supports both IP checksum offload and SCTP CRC32c offload
* must verify which offload is configured for a packet by testing the
* value of &sk_buff.csum_not_inet; skb_crc32c_csum_help() is provided to
* resolve %CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
*
* * - %NETIF_F_FCOE_CRC
* - This feature indicates that a device is capable of offloading the FCOE
* CRC in a packet. To perform this offload the stack will set ip_summed
* to %CHECKSUM_PARTIAL and set csum_start and csum_offset
* accordingly. Note that there is no indication in the skbuff that the
* %CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
* both IP checksum offload and FCOE CRC offload must verify which offload
* is configured for a packet, presumably by inspecting packet headers.
*
* Checksumming on output with GSO
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* In the case of a GSO packet (skb_is_gso() is true), checksum offload
* is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
* gso_type is %SKB_GSO_TCPV4 or %SKB_GSO_TCPV6, TCP checksum offload as
* part of the GSO operation is implied. If a checksum is being offloaded
* with GSO then ip_summed is %CHECKSUM_PARTIAL, and both csum_start and
* csum_offset are set to refer to the outermost checksum being offloaded
* (two offloaded checksums are possible with UDP encapsulation).
*/
/* Don't change this without changing skb_csum_unnecessary! */
#define CHECKSUM_NONE 0
#define CHECKSUM_UNNECESSARY 1
#define CHECKSUM_COMPLETE 2
#define CHECKSUM_PARTIAL 3
/* Maximum value in skb->csum_level */
#define SKB_MAX_CSUM_LEVEL 3
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
/* For X bytes available in skb->head, what is the minimal
* allocation needed, knowing struct skb_shared_info needs
* to be aligned.
*/
#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
/* return minimum truesize of one skb containing X bytes of data */
#define SKB_TRUESIZE(X) ((X) + \
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
struct net_device;
struct scatterlist;
struct pipe_inode_info;
struct iov_iter;
struct napi_struct;
struct bpf_prog;
union bpf_attr;
struct skb_ext;
struct ts_config;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
enum {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
BRNF_PROTO_PPPOE
} orig_proto:8;
u8 pkt_otherhost:1;
u8 in_prerouting:1;
u8 bridged_dnat:1;
u8 sabotage_in_done:1;
__u16 frag_max_size;
int physinif;
/* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
union {
/* prerouting: detect dnat in orig/reply direction */
__be32 ipv4_daddr;
struct in6_addr ipv6_daddr;
/* after prerouting + nat detected: store original source
* mac since neigh resolution overwrites it, only used while
* skb is out in neigh layer.
*/
char neigh_header[8];
};
};
#endif
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
/* Chain in tc_skb_ext will be used to share the tc chain with
* ovs recirc_id. It will be set to the current chain by tc
* and read by ovs to recirc_id.
*/
struct tc_skb_ext {
union {
u64 act_miss_cookie;
__u32 chain;
};
__u16 mru;
__u16 zone;
u8 post_ct:1;
u8 post_ct_snat:1;
u8 post_ct_dnat:1;
u8 act_miss:1; /* Set if act_miss_cookie is used */
u8 l2_miss:1; /* Set by bridge upon FDB or MDB miss */
};
#endif
struct sk_buff_head {
/* These two members must be first to match sk_buff. */
struct_group_tagged(sk_buff_list, list,
struct sk_buff *next;
struct sk_buff *prev;
);
__u32 qlen;
spinlock_t lock;
};
struct sk_buff;
#ifndef CONFIG_MAX_SKB_FRAGS
# define CONFIG_MAX_SKB_FRAGS 17
#endif
#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
* segment using its current segmentation instead.
*/
#define GSO_BY_FRAGS 0xFFFF
typedef struct skb_frag {
netmem_ref netmem;
unsigned int len;
unsigned int offset;
} skb_frag_t;
/**
* skb_frag_size() - Returns the size of a skb fragment
* @frag: skb fragment
*/
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
return frag->len;
}
/**
* skb_frag_size_set() - Sets the size of a skb fragment
* @frag: skb fragment
* @size: size of fragment
*/
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
frag->len = size;
}
/**
* skb_frag_size_add() - Increments the size of a skb fragment by @delta
* @frag: skb fragment
* @delta: value to add
*/
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
frag->len += delta;
}
/**
* skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
* @frag: skb fragment
* @delta: value to subtract
*/
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
frag->len -= delta;
}
/**
* skb_frag_must_loop - Test if %p is a high memory page
* @p: fragment's page
*/
static inline bool skb_frag_must_loop(struct page *p)
{
#if defined(CONFIG_HIGHMEM)
if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
return true;
#endif
return false;
}
/**
* skb_frag_foreach_page - loop over pages in a fragment
*
* @f: skb frag to operate on
* @f_off: offset from start of f->netmem
* @f_len: length from f_off to loop over
* @p: (temp var) current page
* @p_off: (temp var) offset from start of current page,
* non-zero only on first page.
* @p_len: (temp var) length in current page,
* < PAGE_SIZE only on first and last page.
* @copied: (temp var) length so far, excluding current p_len.
*
* A fragment can hold a compound page, in which case per-page
* operations, notably kmap_atomic, must be called for each
* regular page.
*/
#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
p_off = (f_off) & (PAGE_SIZE - 1), \
p_len = skb_frag_must_loop(p) ? \
min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
copied = 0; \
copied < f_len; \
copied += p_len, p++, p_off = 0, \
p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
/**
* struct skb_shared_hwtstamps - hardware time stamps
* @hwtstamp: hardware time stamp transformed into duration
* since arbitrary point in time
* @netdev_data: address/cookie of network device driver used as
* reference to actual hardware time stamp
*
* Software time stamps generated by ktime_get_real() are stored in
* skb->tstamp.
*
* hwtstamps can only be compared against other hwtstamps from
* the same device.
*
* This structure is attached to packets as part of the
* &skb_shared_info. Use skb_hwtstamps() to get a pointer.
*/
struct skb_shared_hwtstamps {
union {
ktime_t hwtstamp;
void *netdev_data;
};
};
/* Definitions for tx_flags in struct skb_shared_info */
enum {
/* generate hardware time stamp */
SKBTX_HW_TSTAMP_NOBPF = 1 << 0,
/* generate software time stamp when queueing packet to NIC */
SKBTX_SW_TSTAMP = 1 << 1,
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
/* generate software time stamp on packet tx completion */
SKBTX_COMPLETION_TSTAMP = 1 << 3,
/* determine hardware time stamp based on time or cycles */
SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
/* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6,
/* used for bpf extension when a bpf program is loaded */
SKBTX_BPF = 1 << 7,
};
#define SKBTX_HW_TSTAMP (SKBTX_HW_TSTAMP_NOBPF | SKBTX_BPF)
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
SKBTX_SCHED_TSTAMP | \
SKBTX_BPF | \
SKBTX_COMPLETION_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
SKBTX_ANY_SW_TSTAMP)
/* Definitions for flags in struct skb_shared_info */
enum {
/* use zcopy routines */
SKBFL_ZEROCOPY_ENABLE = BIT(0),
/* This indicates at least one fragment might be overwritten
* (as in vmsplice(), sendfile() ...)
* If we need to compute a TX checksum, we'll need to copy
* all frags to avoid possible bad checksum
*/
SKBFL_SHARED_FRAG = BIT(1),
/* segment contains only zerocopy data and should not be
* charged to the kernel memory.
*/
SKBFL_PURE_ZEROCOPY = BIT(2),
SKBFL_DONT_ORPHAN = BIT(3),
/* page references are managed by the ubuf_info, so it's safe to
* use frags only up until ubuf_info is released
*/
SKBFL_MANAGED_FRAG_REFS = BIT(4),
};
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
struct ubuf_info_ops {
void (*complete)(struct sk_buff *, struct ubuf_info *,
bool zerocopy_success);
/* has to be compatible with skb_zcopy_set() */
int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg);
};
/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
* The zerocopy_success argument is true if zero copy transmit occurred,
* false on data copy or out of memory error caused by data copy attempt.
* The ctx field is used to track device context.
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
const struct ubuf_info_ops *ops;
refcount_t refcnt;
u8 flags;
};
struct ubuf_info_msgzc {
struct ubuf_info ubuf;
union {
struct {
unsigned long desc;
void *ctx;
};
struct {
u32 id;
u16 len;
u16 zerocopy:1;
u32 bytelen;
};
};
struct mmpin {
struct user_struct *user;
unsigned int num_pg;
} mmp;
};
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
#define uarg_to_msgzc(ubuf_ptr) container_of((ubuf_ptr), struct ubuf_info_msgzc, \
ubuf)
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
/* Preserve some data across TX submission and completion.
*
* Note, this state is stored in the driver. Extending the layout
* might need some special care.
*/
struct xsk_tx_metadata_compl {
__u64 *tx_timestamp;
};
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
__u8 flags;
__u8 meta_len;
__u8 nr_frags;
__u8 tx_flags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
struct sk_buff *frag_list;
union {
struct skb_shared_hwtstamps hwtstamps;
struct xsk_tx_metadata_compl xsk_meta;
};
unsigned int gso_type;
u32 tskey;
/*
* Warning : all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
union {
struct {
u32 xdp_frags_size;
u32 xdp_frags_truesize;
};
/*
* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor.
*/
void *destructor_arg;
};
/* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS];
};
/**
* DOC: dataref and headerless skbs
*
* Transport layers send out clones of payload skbs they hold for
* retransmissions. To allow lower layers of the stack to prepend their headers
* we split &skb_shared_info.dataref into two halves.
* The lower 16 bits count the overall number of references.
* The higher 16 bits indicate how many of the references are payload-only.
* skb_header_cloned() checks if skb is allowed to add / write the headers.
*
* The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr
* (via __skb_header_release()). Any clone created from marked skb will get
* &sk_buff.hdr_len populated with the available headroom.
* If there's the only clone in existence it's able to modify the headroom
* at will. The sequence of calls inside the transport layer is::
*
* <alloc skb>
* skb_reserve()
* __skb_header_release()
* skb_clone()
* // send the clone down the stack
*
* This is not a very generic construct and it depends on the transport layers
* doing the right thing. In practice there's usually only one payload-only skb.
* Having multiple payload-only skbs with different lengths of hdr_len is not
* possible. The payload-only skbs should never leave their owner.
*/
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
enum {
SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
};
enum {
SKB_GSO_TCPV4 = 1 << 0,
/* This indicates the skb is from an untrusted source. */
SKB_GSO_DODGY = 1 << 1,
/* This indicates the tcp segment has CWR set. */
SKB_GSO_TCP_ECN = 1 << 2,
__SKB_GSO_TCP_FIXEDID = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
SKB_GSO_FCOE = 1 << 5,
SKB_GSO_GRE = 1 << 6,
SKB_GSO_GRE_CSUM = 1 << 7,
SKB_GSO_IPXIP4 = 1 << 8,
SKB_GSO_IPXIP6 = 1 << 9,
SKB_GSO_UDP_TUNNEL = 1 << 10,
SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
SKB_GSO_PARTIAL = 1 << 12,
SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
SKB_GSO_SCTP = 1 << 14,
SKB_GSO_ESP = 1 << 15,
SKB_GSO_UDP = 1 << 16,
SKB_GSO_UDP_L4 = 1 << 17,
SKB_GSO_FRAGLIST = 1 << 18,
SKB_GSO_TCP_ACCECN = 1 << 19,
/* These indirectly map onto the same netdev feature.
* If NETIF_F_TSO_MANGLEID is set it may mangle both inner and outer IDs.
*/
SKB_GSO_TCP_FIXEDID = 1 << 30,
SKB_GSO_TCP_FIXEDID_INNER = 1 << 31,
};
#if BITS_PER_LONG > 32
#define NET_SKBUFF_DATA_USES_OFFSET 1
#endif
#ifdef NET_SKBUFF_DATA_USES_OFFSET
typedef unsigned int sk_buff_data_t;
#else
typedef unsigned char *sk_buff_data_t;
#endif
enum skb_tstamp_type {
SKB_CLOCK_REALTIME,
SKB_CLOCK_MONOTONIC,
SKB_CLOCK_TAI,
__SKB_CLOCK_MAX = SKB_CLOCK_TAI,
};
/**
* DOC: Basic sk_buff geometry
*
* struct sk_buff itself is a metadata structure and does not hold any packet
* data. All the data is held in associated buffers.
*
* &sk_buff.head points to the main "head" buffer. The head buffer is divided
* into two parts:
*
* - data buffer, containing headers and sometimes payload;
* this is the part of the skb operated on by the common helpers
* such as skb_put() or skb_pull();
* - shared info (struct skb_shared_info) which holds an array of pointers
* to read-only data in the (page, offset, length) format.
*
* Optionally &skb_shared_info.frag_list may point to another skb.
*
* Basic diagram may look like this::
*
* ---------------
* | sk_buff |
* ---------------
* ,--------------------------- + head
* / ,----------------- + data
* / / ,----------- + tail
* | | | , + end
* | | | |
* v v v v
* -----------------------------------------------
* | headroom | data | tailroom | skb_shared_info |
* -----------------------------------------------
* + [page frag]
* + [page frag]
* + [page frag]
* + [page frag] ---------
* + frag_list --> | sk_buff |
* ---------
*
*/
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
* @tstamp: Time we arrived/left
* @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
* for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
* @list: queue head
* @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
* @dev: Device we arrived on/are leaving by
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
* @len: Length of actual data
* @data_len: Data length
* @mac_len: Length of link layer header
* @hdr_len: writable header length of cloned skb
* @csum: Checksum (must include start/offset pair)
* @csum_start: Offset from skb->head where checksumming should start
* @csum_offset: Offset from csum_start where checksum should be stored
* @priority: Packet queueing priority
* @ignore_df: allow local fragmentation
* @cloned: Head may be cloned (check refcnt to be sure)
* @ip_summed: Driver fed us an IP checksum
* @nohdr: Payload reference only, must not modify header
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
* @inner_protocol_type: whether the inner protocol is
* ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
* @remcsum_offload: remote checksum offload is enabled
* @offload_fwd_mark: Packet was L2-forwarded in hardware
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
* @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
* @protocol: Packet protocol from driver
* @destructor: Destruct function
* @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
* @_sk_redir: socket redirection information for skmsg
* @_nfct: Associated connection, if any (with nfctinfo bits)
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @head_frag: skb was allocated from page fragments,
* not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @pp_recycle: mark the packet for recycling instead of freeing (implies
* page_pool support on driver)
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
* ports.
* @sw_hash: indicates hash was computed in software stack
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @encapsulation: indicates the inner headers in the skbuff are valid
* @encap_hdr_csum: software checksum is needed
* @csum_valid: checksum is already valid
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
* @csum_complete_sw: checksum was completed by software
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
* @unreadable: indicates that at least 1 of the fragments in this skb is
* unreadable.
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
* @tstamp_type: When set, skb->tstamp has the
* delivery_time clock base of skb->tstamp.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
* @alloc_cpu: CPU which did the skb allocation.
* @secmark: security marking
* @mark: Generic packet mark
* @reserved_tailroom: (aka @mark) number of bytes of free space available
* at the tail of an sk_buff
* @vlan_all: vlan fields (proto & tci)
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
* @inner_ipproto: (aka @inner_protocol) stores ipproto when
* skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
* @inner_transport_header: Inner transport layer header (encapsulation)
* @inner_network_header: Network layer header (encapsulation)
* @inner_mac_header: Link layer header (encapsulation)
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
* @kcov_handle: KCOV remote handle for remote coverage collection
* @tail: Tail pointer
* @end: End pointer
* @head: Head of buffer
* @data: Data head pointer
* @truesize: Buffer size
* @users: User count - see {datagram,tcp}.c
* @extensions: allocated extensions, valid if active_extensions is nonzero
*/
struct sk_buff {
union {
struct {
/* These two members must be first to match sk_buff_head. */
struct sk_buff *next;
struct sk_buff *prev;
union {
struct net_device *dev;
/* Some protocols might use this space to store information,
* while device pointer would be NULL.
* UDP receive path is one user.
*/
unsigned long dev_scratch;
};
};
struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
struct list_head list;
struct llist_node ll_node;
};
struct sock *sk;
union {
ktime_t tstamp;
u64 skb_mstamp_ns; /* earliest departure time */
};
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
char cb[48] __aligned(8);
union {
struct {
unsigned long _skb_refdst;
void (*destructor)(struct sk_buff *skb);
};
struct list_head tcp_tsorted_anchor;
#ifdef CONFIG_NET_SOCK_MSG
unsigned long _sk_redir;
#endif
};
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
unsigned long _nfct;
#endif
unsigned int len,
data_len;
__u16 mac_len,
hdr_len;
/* Following fields are _not_ copied in __copy_skb_header()
* Note that queue_mapping is here mostly to fill a hole.
*/
__u16 queue_mapping;
/* if you move cloned around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define CLONED_MASK (1 << 7)
#else
#define CLONED_MASK 1
#endif
#define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset)
/* private: */
__u8 __cloned_offset[0];
/* public: */
__u8 cloned:1,
nohdr:1,
fclone:2,
peeked:1,
head_frag:1,
pfmemalloc:1,
pp_recycle:1; /* page_pool recycle indicator */
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;
#endif
/* Fields enclosed in headers group are copied
* using a single memcpy() in __copy_skb_header()
*/
struct_group(headers,
/* private: */
__u8 __pkt_type_offset[0];
/* public: */
__u8 pkt_type:3; /* see PKT_TYPE_MAX */
__u8 ignore_df:1;
__u8 dst_pending_confirm:1;
__u8 ip_summed:2;
__u8 ooo_okay:1;
/* private: */
__u8 __mono_tc_offset[0];
/* public: */
__u8 tstamp_type:2; /* See skb_tstamp_type */
#ifdef CONFIG_NET_XGRESS
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
__u8 tc_skip_classify:1;
#endif
__u8 remcsum_offload:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 inner_protocol_type:1;
__u8 l4_hash:1;
__u8 sw_hash:1;
#ifdef CONFIG_WIRELESS
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
#endif
__u8 no_fcs:1;
/* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
#if IS_ENABLED(CONFIG_IP_VS)
__u8 ipvs_property:1;
#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
__u8 nf_trace:1;
#endif
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
__u8 offload_l3_fwd_mark:1;
#endif
__u8 redirected:1;
#ifdef CONFIG_NET_REDIRECT
__u8 from_ingress:1;
#endif
#ifdef CONFIG_NETFILTER_SKIP_EGRESS
__u8 nf_skip_egress:1;
#endif
#ifdef CONFIG_SKB_DECRYPTED
__u8 decrypted:1;
#endif
__u8 slow_gro:1;
#if IS_ENABLED(CONFIG_IP_SCTP)
__u8 csum_not_inet:1;
#endif
__u8 unreadable:1;
#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
u16 alloc_cpu;
union {
__wsum csum;
struct {
__u16 csum_start;
__u16 csum_offset;
};
};
__u32 priority;
int skb_iif;
__u32 hash;
union {
u32 vlan_all;
struct {
__be16 vlan_proto;
__u16 vlan_tci;
};
};
#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
union {
unsigned int napi_id;
unsigned int sender_cpu;
};
#endif
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
#endif
union {
__u32 mark;
__u32 reserved_tailroom;
};
union {
__be16 inner_protocol;
__u8 inner_ipproto;
};
__u16 inner_transport_header;
__u16 inner_network_header;
__u16 inner_mac_header;
__be16 protocol;
__u16 transport_header;
__u16 network_header;
__u16 mac_header;
#ifdef CONFIG_KCOV
u64 kcov_handle;
#endif
); /* end headers group */
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
sk_buff_data_t end;
unsigned char *head,
*data;
unsigned int truesize;
refcount_t users;
#ifdef CONFIG_SKB_EXTENSIONS
/* only usable after checking ->active_extensions != 0 */
struct skb_ext *extensions;
#endif
};
/* if you move pkt_type around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_TYPE_MAX (7 << 5)
#else
#define PKT_TYPE_MAX 7
#endif
#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
/* if you move tc_at_ingress or tstamp_type
* around, you also must adapt these constants.
*/
#ifdef __BIG_ENDIAN_BITFIELD
#define SKB_TSTAMP_TYPE_MASK (3 << 6)
#define SKB_TSTAMP_TYPE_RSHIFT (6)
#define TC_AT_INGRESS_MASK (1 << 5)
#else
#define SKB_TSTAMP_TYPE_MASK (3)
#define TC_AT_INGRESS_MASK (1 << 2)
#endif
#define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset)
#ifdef __KERNEL__
/*
* Handling routines are only of interest to the kernel
*/
#define SKB_ALLOC_FCLONE 0x01
#define SKB_ALLOC_RX 0x02
#define SKB_ALLOC_NAPI 0x04
/**
* skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
* @skb: buffer
*/
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
{
return unlikely(skb->pfmemalloc);
}
/*
* skb might have a dst pointer attached, refcounted or not.
* _skb_refdst low order bit is set if refcount was _not_ taken
*/
#define SKB_DST_NOREF 1UL
#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
/**
* skb_dst - returns skb dst_entry
* @skb: buffer
*
* Returns: skb dst_entry, regardless of reference taken or not.
*/
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
/* If refdst was not refcounted, check we still are in a
* rcu_read_lock section
*/
WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
!rcu_read_lock_held() &&
!rcu_read_lock_bh_held());
return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
}
static inline void skb_dst_check_unset(struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE((skb->_skb_refdst & SKB_DST_PTRMASK) &&
!(skb->_skb_refdst & SKB_DST_NOREF));
}
/**
* skb_dstref_steal() - return current dst_entry value and clear it
* @skb: buffer
*
* Resets skb dst_entry without adjusting its reference count. Useful in
* cases where dst_entry needs to be temporarily reset and restored.
* Note that the returned value cannot be used directly because it
* might contain SKB_DST_NOREF bit.
*
* When in doubt, prefer skb_dst_drop() over skb_dstref_steal() to correctly
* handle dst_entry reference counting.
*
* Returns: original skb dst_entry.
*/
static inline unsigned long skb_dstref_steal(struct sk_buff *skb)
{
unsigned long refdst = skb->_skb_refdst;
skb->_skb_refdst = 0;
return refdst;
}
/**
* skb_dstref_restore() - restore skb dst_entry removed via skb_dstref_steal()
* @skb: buffer
* @refdst: dst entry from a call to skb_dstref_steal()
*/
static inline void skb_dstref_restore(struct sk_buff *skb, unsigned long refdst)
{
skb_dst_check_unset(skb);
skb->_skb_refdst = refdst;
}
/**
* skb_dst_set - sets skb dst
* @skb: buffer
* @dst: dst entry
*
* Sets skb dst, assuming a reference was taken on dst and should
* be released by skb_dst_drop()
*/
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
skb_dst_check_unset(skb);
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
/**
* skb_dst_set_noref - sets skb dst, hopefully, without taking reference
* @skb: buffer
* @dst: dst entry
*
* Sets skb dst, assuming a reference was not taken on dst.
* If dst entry is cached, we do not take reference and dst_release
* will be avoided by refdst_drop. If dst entry is not cached, we take
* reference, so that last dst_release can destroy the dst immediately.
*/
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
skb_dst_check_unset(skb);
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
/**
* skb_dst_is_noref - Test if skb dst isn't refcounted
* @skb: buffer
*/
static inline bool skb_dst_is_noref(const struct sk_buff *skb)
{
return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
}
/* For mangling skb->pkt_type from user space side from applications
* such as nft, tc, etc, we only allow a conservative subset of
* possible pkt_types to be set.
*/
static inline bool skb_pkt_type_ok(u32 ptype)
{
return ptype <= PACKET_OTHERHOST;
}
/**
* skb_napi_id - Returns the skb's NAPI id
* @skb: buffer
*/
static inline unsigned int skb_napi_id(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
return skb->napi_id;
#else
return 0;
#endif
}
static inline bool skb_wifi_acked_valid(const struct sk_buff *skb)
{
#ifdef CONFIG_WIRELESS
return skb->wifi_acked_valid;
#else
return 0;
#endif
}
/**
* skb_unref - decrement the skb's reference count
* @skb: buffer
*
* Returns: true if we can free the skb.
*/
static inline bool skb_unref(struct sk_buff *skb)
{
if (unlikely(!skb))
return false;
if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1)) smp_rmb(); else if (likely(!refcount_dec_and_test(&skb->users)))
return false;
return true;
}
static inline bool skb_data_unref(const struct sk_buff *skb,
struct skb_shared_info *shinfo)
{
int bias;
if (!skb->cloned)
return true;
bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; if (atomic_read(&shinfo->dataref) == bias)
smp_rmb();
else if (atomic_sub_return(bias, &shinfo->dataref))
return false;
return true;
}
void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason reason);
static inline void
kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{
sk_skb_reason_drop(NULL, skb, reason);
}
/**
* kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
* @skb: buffer to free
*/
static inline void kfree_skb(struct sk_buff *skb)
{
kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
void skb_release_head_state(struct sk_buff *skb);
void kfree_skb_list_reason(struct sk_buff *segs,
enum skb_drop_reason reason);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
static inline void kfree_skb_list(struct sk_buff *segs)
{
kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
}
#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
#else
static inline void consume_skb(struct sk_buff *skb)
{
return kfree_skb(skb);
}
#endif
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
bool *fragstolen, int *delta_truesize);
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
void skb_attempt_defer_free(struct sk_buff *skb);
u32 napi_skb_cache_get_bulk(void **skbs, u32 n);
struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
struct sk_buff *slab_build_skb(void *data);
/**
* alloc_skb - allocate a network buffer
* @size: size to allocate
* @priority: allocation mask
*
* This function is a convenient wrapper around __alloc_skb().
*/
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
}
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long data_len,
int max_page_order,
int *errcode,
gfp_t gfp_mask);
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
/* Layout of fast clones : [skb1][skb2][fclone_ref] */
struct sk_buff_fclones {
struct sk_buff skb1;
struct sk_buff skb2;
refcount_t fclone_ref;
};
/**
* skb_fclone_busy - check if fclone is busy
* @sk: socket
* @skb: buffer
*
* Returns: true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
* so we also check that didn't happen.
*/
static inline bool skb_fclone_busy(const struct sock *sk,
const struct sk_buff *skb)
{
const struct sk_buff_fclones *fclones;
fclones = container_of(skb, struct sk_buff_fclones, skb1);
return skb->fclone == SKB_FCLONE_ORIG &&
refcount_read(&fclones->fclone_ref) > 1 &&
READ_ONCE(fclones->skb2.sk) == sk;
}
/**
* alloc_skb_fclone - allocate a network buffer from fclone cache
* @size: size to allocate
* @priority: allocation mask
*
* This function is a convenient wrapper around __alloc_skb().
*/
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
void skb_headers_offset_update(struct sk_buff *skb, int off);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
gfp_t gfp_mask, bool fclone);
static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
gfp_t gfp_mask)
{
return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
}
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
/**
* skb_pad - zero pad the tail of an skb
* @skb: buffer to pad
* @pad: space to pad
*
* Ensure that a buffer is followed by a padding area that is zero
* filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire.
*
* May return error in out of memory cases. The skb is freed on error.
*/
static inline int skb_pad(struct sk_buff *skb, int pad)
{
return __skb_pad(skb, pad, true);
}
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
int offset, size_t size, size_t max_frags);
struct skb_seq_state {
__u32 lower_offset;
__u32 upper_offset;
__u32 frag_idx;
__u32 stepped_offset;
struct sk_buff *root_skb;
struct sk_buff *cur_skb;
__u8 *frag_data;
__u32 frag_off;
};
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int to, struct skb_seq_state *st);
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config);
/*
* Packet hash types specify the type of hash in skb_set_hash.
*
* Hash types refer to the protocol layer addresses which are used to
* construct a packet's hash. The hashes are used to differentiate or identify
* flows of the protocol layer for the hash type. Hash types are either
* layer-2 (L2), layer-3 (L3), or layer-4 (L4).
*
* Properties of hashes:
*
* 1) Two packets in different flows have different hash values
* 2) Two packets in the same flow should have the same hash value
*
* A hash at a higher layer is considered to be more specific. A driver should
* set the most specific hash possible.
*
* A driver cannot indicate a more specific hash than the layer at which a hash
* was computed. For instance an L3 hash cannot be set as an L4 hash.
*
* A driver may indicate a hash level which is less specific than the
* actual layer the hash was computed on. For instance, a hash computed
* at L4 may be considered an L3 hash. This should only be done if the
* driver can't unambiguously determine that the HW computed the hash at
* the higher layer. Note that the "should" in the second property above
* permits this.
*/
enum pkt_hash_types {
PKT_HASH_TYPE_NONE, /* Undefined type */
PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
};
static inline void skb_clear_hash(struct sk_buff *skb)
{
skb->hash = 0;
skb->sw_hash = 0;
skb->l4_hash = 0;
}
static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
if (!skb->l4_hash)
skb_clear_hash(skb);
}
static inline void
__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
{
skb->l4_hash = is_l4;
skb->sw_hash = is_sw;
skb->hash = hash;
}
static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{
/* Used by drivers to set hash from HW */
__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
}
static inline void
__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
{
__skb_set_hash(skb, hash, true, is_l4);
}
u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb);
static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
{
return __skb_get_hash_symmetric_net(NULL, skb);
}
void __skb_get_hash_net(const struct net *net, struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
const struct flow_keys_basic *keys, int hlen);
__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
const void *data, int hlen_proto);
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
struct bpf_flow_dissector;
u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
__be16 proto, int nhoff, int hlen, unsigned int flags);
bool __skb_flow_dissect(const struct net *net,
const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, const void *data,
__be16 proto, int nhoff, int hlen, unsigned int flags);
static inline bool skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, unsigned int flags)
{
return __skb_flow_dissect(NULL, skb, flow_dissector,
target_container, NULL, 0, 0, 0, flags);
}
static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
struct flow_keys *flow,
unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
flow, NULL, 0, 0, 0, flags);
}
static inline bool
skb_flow_dissect_flow_keys_basic(const struct net *net,
const struct sk_buff *skb,
struct flow_keys_basic *flow,
const void *data, __be16 proto,
int nhoff, int hlen, unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
data, proto, nhoff, hlen, flags);
}
void skb_flow_dissect_meta(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
/* Gets a skb connection tracking info, ctinfo map should be a
* map of mapsize to translate enum ip_conntrack_info states
* to user states.
*/
void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
u16 *ctinfo_map, size_t mapsize,
bool post_ct, u16 zone);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
void skb_flow_dissect_hash(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
__skb_get_hash_net(net, skb);
return skb->hash;
}
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
__skb_get_hash_net(NULL, skb); return skb->hash;
}
static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
if (!skb->l4_hash && !skb->sw_hash) {
struct flow_keys keys;
__u32 hash = __get_hash_from_flowi6(fl6, &keys);
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}
return skb->hash;
}
__u32 skb_get_hash_perturb(const struct sk_buff *skb,
const siphash_key_t *perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
return skb->hash;
}
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
to->hash = from->hash;
to->sw_hash = from->sw_hash;
to->l4_hash = from->l4_hash;
};
static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
const struct sk_buff *skb2)
{
#ifdef CONFIG_SKB_DECRYPTED
return skb2->decrypted - skb1->decrypted;
#else
return 0;
#endif
}
static inline bool skb_is_decrypted(const struct sk_buff *skb)
{
#ifdef CONFIG_SKB_DECRYPTED
return skb->decrypted;
#else
return false;
#endif
}
static inline void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from)
{
#ifdef CONFIG_SKB_DECRYPTED
to->decrypted = from->decrypted;
#endif
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
return skb->head + skb->end;
}
static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end;
}
static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
{
skb->end = offset;
}
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
return skb->end;
}
static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end - skb->head;
}
static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
{
skb->end = skb->head + offset;
}
#endif
extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops;
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg, bool devmem);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
struct net_devmem_dmabuf_binding;
int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from,
size_t length,
struct net_devmem_dmabuf_binding *binding);
int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length);
static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
struct msghdr *msg, int len)
{
return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len,
NULL);
}
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg,
struct net_devmem_dmabuf_binding *binding);
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
{
return &skb_shinfo(skb)->hwtstamps;
}
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; return is_zcopy ? skb_uarg(skb) : NULL;
}
static inline bool skb_zcopy_pure(const struct sk_buff *skb)
{
return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
}
static inline bool skb_zcopy_managed(const struct sk_buff *skb)
{
return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS;
}
static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
const struct sk_buff *skb2)
{
return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
}
static inline void net_zcopy_get(struct ubuf_info *uarg)
{
refcount_inc(&uarg->refcnt);
}
static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
{
skb_shinfo(skb)->destructor_arg = uarg;
skb_shinfo(skb)->flags |= uarg->flags;
}
static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
bool *have_ref)
{
if (skb && uarg && !skb_zcopy(skb)) {
if (unlikely(have_ref && *have_ref))
*have_ref = false;
else
net_zcopy_get(uarg);
skb_zcopy_init(skb, uarg);
}
}
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
}
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
{
return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
}
static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
{
return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
static inline void net_zcopy_put(struct ubuf_info *uarg)
{
if (uarg)
uarg->ops->complete(NULL, uarg, true);
}
static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
if (uarg) {
if (uarg->ops == &msg_zerocopy_ubuf_ops)
msg_zerocopy_put_abort(uarg, have_uref);
else if (have_uref)
net_zcopy_put(uarg);
}
}
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
{
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
if (!skb_zcopy_is_nouarg(skb))
uarg->ops->complete(skb, uarg, zerocopy_success);
skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
}
void __skb_zcopy_downgrade_managed(struct sk_buff *skb);
static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
{
if (unlikely(skb_zcopy_managed(skb)))
__skb_zcopy_downgrade_managed(skb);
}
/* Return true if frags in this skb are readable by the host. */
static inline bool skb_frags_readable(const struct sk_buff *skb)
{
return !skb->unreadable;
}
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
}
static inline void skb_poison_list(struct sk_buff *skb)
{
#ifdef CONFIG_DEBUG_NET
skb->next = SKB_LIST_POISON_NEXT;
#endif
}
/* Iterate through singly-linked GSO fragments of an skb. */
#define skb_list_walk_safe(first, skb, next_skb) \
for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
(skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
static inline void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
skb_mark_not_on_list(skb);
}
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
*
* Returns true if the queue is empty, false otherwise.
*/
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
return list->next == (const struct sk_buff *) list;
}
/**
* skb_queue_empty_lockless - check if a queue is empty
* @list: queue head
*
* Returns true if the queue is empty, false otherwise.
* This variant can be used in lockless contexts.
*/
static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
{
return READ_ONCE(list->next) == (const struct sk_buff *) list;
}
/**
* skb_queue_is_last - check if skb is the last entry in the queue
* @list: queue head
* @skb: buffer
*
* Returns true if @skb is the last buffer on the list.
*/
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return skb->next == (const struct sk_buff *) list;
}
/**
* skb_queue_is_first - check if skb is the first entry in the queue
* @list: queue head
* @skb: buffer
*
* Returns true if @skb is the first buffer on the list.
*/
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return skb->prev == (const struct sk_buff *) list;
}
/**
* skb_queue_next - return the next packet in the queue
* @list: queue head
* @skb: current buffer
*
* Return the next packet in @list after @skb. It is only valid to
* call this if skb_queue_is_last() evaluates to false.
*/
static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
/* This BUG_ON may seem severe, but if we just return then we
* are going to dereference garbage.
*/
BUG_ON(skb_queue_is_last(list, skb));
return skb->next;
}
/**
* skb_queue_prev - return the prev packet in the queue
* @list: queue head
* @skb: current buffer
*
* Return the prev packet in @list before @skb. It is only valid to
* call this if skb_queue_is_first() evaluates to false.
*/
static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
/* This BUG_ON may seem severe, but if we just return then we
* are going to dereference garbage.
*/
BUG_ON(skb_queue_is_first(list, skb));
return skb->prev;
}
/**
* skb_get - reference buffer
* @skb: buffer to reference
*
* Makes another reference to a socket buffer and returns a pointer
* to the buffer.
*/
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
refcount_inc(&skb->users);
return skb;
}
/*
* If users == 1, we are the only owner and can avoid redundant atomic changes.
*/
/**
* skb_cloned - is the buffer a clone
* @skb: buffer to check
*
* Returns true if the buffer was generated with skb_clone() and is
* one of multiple shared copies of the buffer. Cloned buffers are
* shared data so must not be written to under normal circumstances.
*/
static inline int skb_cloned(const struct sk_buff *skb)
{
return skb->cloned &&
(atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
}
static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb))
return pskb_expand_head(skb, 0, 0, pri);
return 0;
}
/* This variant of skb_unclone() makes sure skb->truesize
* and skb_end_offset() are not changed, whenever a new skb->head is needed.
*
* Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X))
* when various debugging features are in place.
*/
int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
{
might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb))
return __skb_unclone_keeptruesize(skb, pri);
return 0;
}
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
*
* Returns true if modifying the header part of the buffer requires
* the data to be copied.
*/
static inline int skb_header_cloned(const struct sk_buff *skb)
{
int dataref;
if (!skb->cloned)
return 0;
dataref = atomic_read(&skb_shinfo(skb)->dataref);
dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
return dataref != 1;
}
static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
{
might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_header_cloned(skb))
return pskb_expand_head(skb, 0, 0, pri);
return 0;
}
/**
* __skb_header_release() - allow clones to use the headroom
* @skb: buffer to operate on
*
* See "DOC: dataref and headerless skbs".
*/
static inline void __skb_header_release(struct sk_buff *skb)
{
skb->nohdr = 1;
atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
}
/**
* skb_shared - is the buffer shared
* @skb: buffer to check
*
* Returns true if more than one person has a reference to this
* buffer.
*/
static inline int skb_shared(const struct sk_buff *skb)
{
return refcount_read(&skb->users) != 1;
}
/**
* skb_share_check - check if buffer is shared and if so clone it
* @skb: buffer to check
* @pri: priority for memory allocation
*
* If the buffer is shared the buffer is cloned and the old copy
* drops a reference. A new clone with a single reference is returned.
* If the buffer is not shared the original buffer is returned. When
* being called from interrupt status or with spinlocks held pri must
* be GFP_ATOMIC.
*
* NULL is returned on a memory allocation failure.
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
{
might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
if (likely(nskb))
consume_skb(skb);
else
kfree_skb(skb);
skb = nskb;
}
return skb;
}
/*
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
* a packet that's being forwarded.
*/
/**
* skb_unshare - make a copy of a shared buffer
* @skb: buffer to check
* @pri: priority for memory allocation
*
* If the socket buffer is a clone then this function creates a new
* copy of the data, drops a reference count on the old copy and returns
* the new copy with the reference count at 1. If the buffer is not a clone
* the original buffer is returned. When called with a spinlock held or
* from interrupt state @pri must be %GFP_ATOMIC
*
* %NULL is returned on a memory allocation failure.
*/
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
gfp_t pri)
{
might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
/* Free our shared copy */
if (likely(nskb))
consume_skb(skb);
else
kfree_skb(skb);
skb = nskb;
}
return skb;
}
/**
* skb_peek - peek at the head of an &sk_buff_head
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
* be careful with this one. A peek leaves the buffer on the
* list and someone else may run off with it. You must hold
* the appropriate locks or have a private queue to do this.
*
* Returns %NULL for an empty list or a pointer to the head element.
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
{
struct sk_buff *skb = list_->next;
if (skb == (struct sk_buff *)list_)
skb = NULL;
return skb;
}
/**
* __skb_peek - peek at the head of a non-empty &sk_buff_head
* @list_: list to peek at
*
* Like skb_peek(), but the caller knows that the list is not empty.
*/
static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
{
return list_->next;
}
/**
* skb_peek_next - peek skb following the given one from a queue
* @skb: skb to start from
* @list_: list to peek at
*
* Returns %NULL when the end of the list is met or a pointer to the
* next element. The reference count is not incremented and the
* reference is therefore volatile. Use with caution.
*/
static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
const struct sk_buff_head *list_)
{
struct sk_buff *next = skb->next;
if (next == (struct sk_buff *)list_)
next = NULL;
return next;
}
/**
* skb_peek_tail - peek at the tail of an &sk_buff_head
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
* be careful with this one. A peek leaves the buffer on the
* list and someone else may run off with it. You must hold
* the appropriate locks or have a private queue to do this.
*
* Returns %NULL for an empty list or a pointer to the tail element.
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
struct sk_buff *skb = READ_ONCE(list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
return skb;
}
/**
* skb_queue_len - get queue length
* @list_: list to measure
*
* Return the length of an &sk_buff queue.
*/
static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
{
return list_->qlen;
}
/**
* skb_queue_len_lockless - get queue length
* @list_: list to measure
*
* Return the length of an &sk_buff queue.
* This variant can be used in lockless contexts.
*/
static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
{
return READ_ONCE(list_->qlen);
}
/**
* __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
* @list: queue to initialize
*
* This initializes only the list and queue length aspects of
* an sk_buff_head object. This allows to initialize the list
* aspects of an sk_buff_head without reinitializing things like
* the spinlock. It can also be used for on-stack sk_buff_head
* objects where the spinlock is known to not be used.
*/
static inline void __skb_queue_head_init(struct sk_buff_head *list)
{
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
/*
* This function creates a split out lock class for each invocation;
* this is needed for now since a whole lot of users of the skb-queue
* infrastructure in drivers have different locking usage (in hardirq)
* than the networking core (in softirq only). In the long run either the
* network layer or drivers should need annotation to consolidate the
* main types of usage into 3 classes.
*/
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
__skb_queue_head_init(list);
}
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
skb_queue_head_init(list);
lockdep_set_class(&list->lock, class);
}
/*
* Insert an sk_buff on a list.
*
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
/* See skb_queue_empty_lockless() and skb_peek_tail()
* for the opposite READ_ONCE()
*/
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk);
WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk);
WRITE_ONCE(list->qlen, list->qlen + 1);
}
static inline void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *next)
{
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;
WRITE_ONCE(first->prev, prev);
WRITE_ONCE(prev->next, first);
WRITE_ONCE(last->next, next);
WRITE_ONCE(next->prev, last);
}
/**
* skb_queue_splice - join two skb lists, this is designed for stacks
* @list: the new list to add
* @head: the place to add it in the first list
*/
static inline void skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
}
}
/**
* skb_queue_splice_init - join two skb lists and reinitialise the emptied list
* @list: the new list to add
* @head: the place to add it in the first list
*
* The list at @list is reinitialised
*/
static inline void skb_queue_splice_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}
/**
* skb_queue_splice_tail - join two skb lists, each list being a queue
* @list: the new list to add
* @head: the place to add it in the first list
*/
static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
}
}
/**
* skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
* @list: the new list to add
* @head: the place to add it in the first list
*
* Each of the lists is a queue.
* The list at @list is reinitialised
*/
static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}
/**
* __skb_queue_after - queue a buffer at the list head
* @list: list to use
* @prev: place after this buffer
* @newsk: buffer to queue
*
* Queue a buffer int the middle of a list. This function takes no locks
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
*/
static inline void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
__skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
}
void skb_append(struct sk_buff *old, struct sk_buff *newsk,
struct sk_buff_head *list);
static inline void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
struct sk_buff *newsk)
{
__skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
}
/**
* __skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the start of a list. This function takes no locks
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
*/
static inline void __skb_queue_head(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_after(list, (struct sk_buff *)list, newsk);
}
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
/**
* __skb_queue_tail - queue a buffer at the list tail
* @list: list to use
* @newsk: buffer to queue
*
* Queue a buffer at the end of a list. This function takes no locks
* and you must therefore hold required locks before calling it.
*
* A buffer cannot be placed on two lists at the same time.
*/
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_before(list, (struct sk_buff *)list, newsk);
}
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
/*
* remove sk_buff from list. _Must_ be called atomically, and with
* the list known..
*/
void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
WRITE_ONCE(list->qlen, list->qlen - 1);
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
WRITE_ONCE(next->prev, prev);
WRITE_ONCE(prev->next, next);
}
/**
* __skb_dequeue - remove from the head of the queue
* @list: list to dequeue from
*
* Remove the head of the list. This function does not take any locks
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek(list); if (skb)
__skb_unlink(skb, list);
return skb;
}
struct sk_buff *skb_dequeue(struct sk_buff_head *list);
/**
* __skb_dequeue_tail - remove from the tail of the queue
* @list: list to dequeue from
*
* Remove the tail of the list. This function does not take any locks
* so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty.
*/
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
if (skb)
__skb_unlink(skb, list);
return skb;
}
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline bool skb_is_nonlinear(const struct sk_buff *skb)
{
return skb->data_len;
}
static inline unsigned int skb_headlen(const struct sk_buff *skb)
{
return skb->len - skb->data_len;
}
static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
{
unsigned int i, len = 0;
for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
return len;
}
static inline unsigned int skb_pagelen(const struct sk_buff *skb)
{
return skb_headlen(skb) + __skb_pagelen(skb);
}
static inline void skb_frag_fill_netmem_desc(skb_frag_t *frag,
netmem_ref netmem, int off,
int size)
{
frag->netmem = netmem;
frag->offset = off;
skb_frag_size_set(frag, size);
}
static inline void skb_frag_fill_page_desc(skb_frag_t *frag,
struct page *page,
int off, int size)
{
skb_frag_fill_netmem_desc(frag, page_to_netmem(page), off, size);
}
static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo,
int i, netmem_ref netmem,
int off, int size)
{
skb_frag_t *frag = &shinfo->frags[i];
skb_frag_fill_netmem_desc(frag, netmem, off, size);
}
static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
int i, struct page *page,
int off, int size)
{
__skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off,
size);
}
/**
* skb_len_add - adds a number to len fields of skb
* @skb: buffer to add len to
* @delta: number of bytes to add
*/
static inline void skb_len_add(struct sk_buff *skb, int delta)
{
skb->len += delta;
skb->data_len += delta;
skb->truesize += delta;
}
/**
* __skb_fill_netmem_desc - initialise a fragment in an skb
* @skb: buffer containing fragment to be initialised
* @i: fragment index to initialise
* @netmem: the netmem to use for this fragment
* @off: the offset to the data with @page
* @size: the length of the data
*
* Initialises the @i'th fragment of @skb to point to &size bytes at
* offset @off within @page.
*
* Does not take any additional reference on the fragment.
*/
static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
netmem_ref netmem, int off, int size)
{
struct page *page;
__skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
if (netmem_is_net_iov(netmem)) {
skb->unreadable = true;
return;
}
page = netmem_to_page(netmem);
/* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
*/
page = compound_head(page);
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
__skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
}
static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i,
netmem_ref netmem, int off, int size)
{
__skb_fill_netmem_desc(skb, i, netmem, off, size);
skb_shinfo(skb)->nr_frags = i + 1;
}
/**
* skb_fill_page_desc - initialise a paged fragment in an skb
* @skb: buffer containing fragment to be initialised
* @i: paged fragment index to initialise
* @page: the page to use for this fragment
* @off: the offset to the data with @page
* @size: the length of the data
*
* As per __skb_fill_page_desc() -- initialises the @i'th fragment of
* @skb to point to @size bytes at offset @off within @page. In
* addition updates @skb such that @i is the last fragment.
*
* Does not take any additional reference on the fragment.
*/
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
}
/**
* skb_fill_page_desc_noacc - initialise a paged fragment in an skb
* @skb: buffer containing fragment to be initialised
* @i: paged fragment index to initialise
* @page: the page to use for this fragment
* @off: the offset to the data with @page
* @size: the length of the data
*
* Variant of skb_fill_page_desc() which does not deal with
* pfmemalloc, if page is not owned by us.
*/
static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
struct page *page, int off,
int size)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
__skb_fill_page_desc_noacc(shinfo, i, page, off, size);
shinfo->nr_frags = i + 1;
}
void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
int off, int size, unsigned int truesize);
static inline void skb_add_rx_frag(struct sk_buff *skb, int i,
struct page *page, int off, int size,
unsigned int truesize)
{
skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size,
truesize);
}
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize);
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
return skb->head + skb->tail;
}
static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
skb->tail = skb->data - skb->head;
}
static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
skb_reset_tail_pointer(skb);
skb->tail += offset;
}
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
return skb->tail;
}
static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
skb->tail = skb->data;
}
static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
skb->tail = skb->data + offset;
}
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
static inline void skb_assert_len(struct sk_buff *skb)
{
#ifdef CONFIG_DEBUG_NET
if (WARN_ONCE(!skb->len, "%s\n", __func__))
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
#endif /* CONFIG_DEBUG_NET */
}
#if defined(CONFIG_FAIL_SKB_REALLOC)
void skb_might_realloc(struct sk_buff *skb);
#else
static inline void skb_might_realloc(struct sk_buff *skb) {}
#endif
/*
* Add data to an sk_buff
*/
void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
void *skb_put(struct sk_buff *skb, unsigned int len);
static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
{
void *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb);
skb->tail += len;
skb->len += len;
return tmp;
}
static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
{
void *tmp = __skb_put(skb, len);
memset(tmp, 0, len);
return tmp;
}
static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
unsigned int len)
{
void *tmp = __skb_put(skb, len);
memcpy(tmp, data, len);
return tmp;
}
static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
{
*(u8 *)__skb_put(skb, 1) = val;
}
static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
{
void *tmp = skb_put(skb, len);
memset(tmp, 0, len);
return tmp;
}
static inline void *skb_put_data(struct sk_buff *skb, const void *data,
unsigned int len)
{
void *tmp = skb_put(skb, len);
memcpy(tmp, data, len);
return tmp;
}
static inline void skb_put_u8(struct sk_buff *skb, u8 val)
{
*(u8 *)skb_put(skb, 1) = val;
}
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
skb->data -= len;
skb->len += len;
return skb->data;
}
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
skb->len -= len; if (unlikely(skb->len < skb->data_len)) {
#if defined(CONFIG_DEBUG_NET)
skb->len += len;
pr_err("__skb_pull(len=%u)\n", len);
skb_dump(KERN_ERR, skb, false);
#endif
BUG();
}
return skb->data += len;
}
static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
{
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
void *skb_pull_data(struct sk_buff *skb, size_t len);
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline enum skb_drop_reason
pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
skb_might_realloc(skb);
if (likely(len <= skb_headlen(skb)))
return SKB_NOT_DROPPED_YET;
if (unlikely(len > skb->len))
return SKB_DROP_REASON_PKT_TOO_SMALL;
if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb))))
return SKB_DROP_REASON_NOMEM;
return SKB_NOT_DROPPED_YET;
}
static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (!pskb_may_pull(skb, len))
return NULL;
skb->len -= len;
return skb->data += len;
}
void skb_condense(struct sk_buff *skb);
/**
* skb_headroom - bytes at buffer head
* @skb: buffer to check
*
* Return the number of bytes of free space at the head of an &sk_buff.
*/
static inline unsigned int skb_headroom(const struct sk_buff *skb)
{
return skb->data - skb->head;
}
/**
* skb_tailroom - bytes at buffer end
* @skb: buffer to check
*
* Return the number of bytes of free space at the tail of an sk_buff
*/
static inline int skb_tailroom(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
}
/**
* skb_availroom - bytes at buffer end
* @skb: buffer to check
*
* Return the number of bytes of free space at the tail of an sk_buff
* allocated by sk_stream_alloc()
*/
static inline int skb_availroom(const struct sk_buff *skb)
{
if (skb_is_nonlinear(skb))
return 0;
return skb->end - skb->tail - skb->reserved_tailroom;
}
/**
* skb_reserve - adjust headroom
* @skb: buffer to alter
* @len: bytes to move
*
* Increase the headroom of an empty &sk_buff by reducing the tail
* room. This is only allowed for an empty buffer.
*/
static inline void skb_reserve(struct sk_buff *skb, int len)
{
skb->data += len;
skb->tail += len;
}
/**
* skb_tailroom_reserve - adjust reserved_tailroom
* @skb: buffer to alter
* @mtu: maximum amount of headlen permitted
* @needed_tailroom: minimum amount of reserved_tailroom
*
* Set reserved_tailroom so that headlen can be as large as possible but
* not larger than mtu and tailroom cannot be smaller than
* needed_tailroom.
* The required headroom should already have been reserved before using
* this function.
*/
static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
unsigned int needed_tailroom)
{
SKB_LINEAR_ASSERT(skb);
if (mtu < skb_tailroom(skb) - needed_tailroom)
/* use at most mtu */
skb->reserved_tailroom = skb_tailroom(skb) - mtu;
else
/* use up to all available space */
skb->reserved_tailroom = needed_tailroom;
}
#define ENCAP_TYPE_ETHER 0
#define ENCAP_TYPE_IPPROTO 1
static inline void skb_set_inner_protocol(struct sk_buff *skb,
__be16 protocol)
{
skb->inner_protocol = protocol;
skb->inner_protocol_type = ENCAP_TYPE_ETHER;
}
static inline void skb_set_inner_ipproto(struct sk_buff *skb,
__u8 ipproto)
{
skb->inner_ipproto = ipproto;
skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
}
static inline void skb_reset_inner_headers(struct sk_buff *skb)
{
skb->inner_mac_header = skb->mac_header;
skb->inner_network_header = skb->network_header;
skb->inner_transport_header = skb->transport_header;
}
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != (typeof(skb->mac_header))~0U;
}
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
if (!skb_mac_header_was_set(skb)) {
DEBUG_NET_WARN_ON_ONCE(1);
skb->mac_len = 0;
} else {
skb->mac_len = skb->network_header - skb->mac_header;
}
}
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
*skb)
{
return skb->head + skb->inner_transport_header;
}
static inline int skb_inner_transport_offset(const struct sk_buff *skb)
{
return skb_inner_transport_header(skb) - skb->data;
}
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
long offset = skb->data - skb->head;
DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset);
skb->inner_transport_header = offset;
}
static inline void skb_set_inner_transport_header(struct sk_buff *skb,
const int offset)
{
skb_reset_inner_transport_header(skb);
skb->inner_transport_header += offset;
}
static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_network_header;
}
static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
long offset = skb->data - skb->head;
DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset);
skb->inner_network_header = offset;
}
static inline void skb_set_inner_network_header(struct sk_buff *skb,
const int offset)
{
skb_reset_inner_network_header(skb);
skb->inner_network_header += offset;
}
static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
{
return skb->inner_network_header > 0;
}
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
}
static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
long offset = skb->data - skb->head;
DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset);
skb->inner_mac_header = offset;
}
static inline void skb_set_inner_mac_header(struct sk_buff *skb,
const int offset)
{
skb_reset_inner_mac_header(skb);
skb->inner_mac_header += offset;
}
static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
{
return skb->transport_header != (typeof(skb->transport_header))~0U;
}
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->head + skb->transport_header;
}
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
long offset = skb->data - skb->head;
DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset);
skb->transport_header = offset;
}
/**
* skb_reset_transport_header_careful - conditionally reset transport header
* @skb: buffer to alter
*
* Hardened version of skb_reset_transport_header().
*
* Returns: true if the operation was a success.
*/
static inline bool __must_check
skb_reset_transport_header_careful(struct sk_buff *skb)
{
long offset = skb->data - skb->head;
if (unlikely(offset != (typeof(skb->transport_header))offset))
return false;
if (unlikely(offset == (typeof(skb->transport_header))~0U))
return false;
skb->transport_header = offset;
return true;
}
static inline void skb_set_transport_header(struct sk_buff *skb,
const int offset)
{
skb_reset_transport_header(skb);
skb->transport_header += offset;
}
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
return skb->head + skb->network_header;
}
static inline void skb_reset_network_header(struct sk_buff *skb)
{
long offset = skb->data - skb->head;
DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset);
skb->network_header = offset;
}
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
skb_reset_network_header(skb);
skb->network_header += offset;
}
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->head + skb->mac_header;
}
static inline int skb_mac_offset(const struct sk_buff *skb)
{
return skb_mac_header(skb) - skb->data;
}
static inline u32 skb_mac_header_len(const struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->network_header - skb->mac_header;
}
static inline void skb_unset_mac_header(struct sk_buff *skb)
{
skb->mac_header = (typeof(skb->mac_header))~0U;
}
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
long offset = skb->data - skb->head;
DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset);
skb->mac_header = offset;
}
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
skb_reset_mac_header(skb);
skb->mac_header += offset;
}
static inline void skb_pop_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->network_header;
}
static inline void skb_probe_transport_header(struct sk_buff *skb)
{
struct flow_keys_basic keys;
if (skb_transport_header_was_set(skb))
return;
if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
}
static inline void skb_mac_header_rebuild(struct sk_buff *skb)
{
if (skb_mac_header_was_set(skb)) {
const unsigned char *old_mac = skb_mac_header(skb);
skb_set_mac_header(skb, -skb->mac_len);
memmove(skb_mac_header(skb), old_mac, skb->mac_len);
}
}
/* Move the full mac header up to current network_header.
* Leaves skb->data pointing at offset skb->mac_len into the mac_header.
* Must be provided the complete mac header length.
*/
static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len)
{
if (skb_mac_header_was_set(skb)) {
const unsigned char *old_mac = skb_mac_header(skb);
skb_set_mac_header(skb, -full_mac_len);
memmove(skb_mac_header(skb), old_mac, full_mac_len);
__skb_push(skb, full_mac_len - skb->mac_len);
}
}
static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
return skb->csum_start - skb_headroom(skb);
}
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
return skb->head + skb->csum_start;
}
static inline int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
}
static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->transport_header - skb->network_header;
}
static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
{
return skb->inner_transport_header - skb->inner_network_header;
}
static inline int skb_network_offset(const struct sk_buff *skb)
{
return skb_network_header(skb) - skb->data;
}
static inline int skb_inner_network_offset(const struct sk_buff *skb)
{
return skb_inner_network_header(skb) - skb->data;
}
static inline enum skb_drop_reason
pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
return pskb_may_pull_reason(skb, skb_network_offset(skb) + len);
}
static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
/*
* CPUs often take a performance hit when accessing unaligned memory
* locations. The actual performance hit varies, it can be small if the
* hardware handles it or large if we have to take an exception and fix it
* in software.
*
* Since an ethernet header is 14 bytes network drivers often end up with
* the IP header at an unaligned offset. The IP header can be aligned by
* shifting the start of the packet by 2 bytes. Drivers should do this
* with:
*
* skb_reserve(skb, NET_IP_ALIGN);
*
* The downside to this alignment of the IP header is that the DMA is now
* unaligned. On some architectures the cost of an unaligned DMA is high
* and this cost outweighs the gains made by aligning the IP header.
*
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
* to be overridden.
*/
#ifndef NET_IP_ALIGN
#define NET_IP_ALIGN 2
#endif
/*
* The networking layer reserves some headroom in skb data (via
* dev_alloc_skb). This is used to avoid having to reallocate skb data when
* the header has to grow. In the default case, if the header has to grow
* 32 bytes or less we avoid the reallocation.
*
* Unfortunately this headroom changes the DMA alignment of the resulting
* network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
* on some architectures. An architecture can override this value,
* perhaps setting it to a cacheline in size (since that will maintain
* cacheline alignment of the DMA). It must be a power of 2.
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
*
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet.
* get_rps_cpu() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
if (WARN_ON(skb_is_nonlinear(skb)))
return;
skb->len = len;
skb_set_tail_pointer(skb, len);
}
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
__skb_set_length(skb, len);
}
void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->data_len)
return ___pskb_trim(skb, len);
__skb_trim(skb, len);
return 0;
}
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
skb_might_realloc(skb);
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
/**
* pskb_trim_unique - remove end from a paged unique (not cloned) buffer
* @skb: buffer to alter
* @len: new length
*
* This is identical to pskb_trim except that the caller knows that
* the skb is not cloned so we should never get an error due to out-
* of-memory.
*/
static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
int err = pskb_trim(skb, len);
BUG_ON(err);
}
static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
{
unsigned int diff = len - skb->len;
if (skb_tailroom(skb) < diff) {
int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
GFP_ATOMIC);
if (ret)
return ret;
}
__skb_set_length(skb, len);
return 0;
}
/**
* skb_orphan - orphan a buffer
* @skb: buffer to orphan
*
* If a buffer currently has an owner then we call the owner's
* destructor function and make the @skb unowned. The buffer continues
* to exist but is no longer charged to its former owner.
*/
static inline void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor) {
skb->destructor(skb);
skb->destructor = NULL;
skb->sk = NULL;
} else {
BUG_ON(skb->sk);
}
}
/**
* skb_orphan_frags - orphan the frags contained in a buffer
* @skb: buffer to orphan frags from
* @gfp_mask: allocation mask for replacement pages
*
* For each frag in the SKB which needs a destructor (i.e. has an
* owner) create a copy of that frag and release the original
* page by calling the destructor.
*/
static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
if (likely(!skb_zcopy(skb)))
return 0;
if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
{
if (likely(!skb_zcopy(skb)))
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
/**
* __skb_queue_purge_reason - empty a list
* @list: list to empty
* @reason: drop reason
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
static inline void __skb_queue_purge_reason(struct sk_buff_head *list,
enum skb_drop_reason reason)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
kfree_skb_reason(skb, reason);
}
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
__skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
}
void skb_queue_purge_reason(struct sk_buff_head *list,
enum skb_drop_reason reason);
static inline void skb_queue_purge(struct sk_buff_head *list)
{
skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
}
unsigned int skb_rbtree_purge(struct rb_root *root);
void skb_errqueue_purge(struct sk_buff_head *list);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
/**
* netdev_alloc_frag - allocate a page fragment
* @fragsz: fragment size
*
* Allocates a frag from a page for receive buffer.
* Uses GFP_ATOMIC allocations.
*/
static inline void *netdev_alloc_frag(unsigned int fragsz)
{
return __netdev_alloc_frag_align(fragsz, ~0u);
}
static inline void *netdev_alloc_frag_align(unsigned int fragsz,
unsigned int align)
{
WARN_ON_ONCE(!is_power_of_2(align));
return __netdev_alloc_frag_align(fragsz, -align);
}
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
gfp_t gfp_mask);
/**
* netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
*/
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
/* legacy helper around __netdev_alloc_skb() */
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
gfp_t gfp_mask)
{
return __netdev_alloc_skb(NULL, length, gfp_mask);
}
/* legacy helper around netdev_alloc_skb() */
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
return netdev_alloc_skb(NULL, length);
}
static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
unsigned int length, gfp_t gfp)
{
struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
if (NET_IP_ALIGN && skb)
skb_reserve(skb, NET_IP_ALIGN);
return skb;
}
static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
static inline void skb_free_frag(void *addr)
{
page_frag_free(addr);
}
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
static inline void *napi_alloc_frag(unsigned int fragsz)
{
return __napi_alloc_frag_align(fragsz, ~0u);
}
static inline void *napi_alloc_frag_align(unsigned int fragsz,
unsigned int align)
{
WARN_ON_ONCE(!is_power_of_2(align));
return __napi_alloc_frag_align(fragsz, -align);
}
struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length);
void napi_consume_skb(struct sk_buff *skb, int budget);
void napi_skb_free_stolen_head(struct sk_buff *skb);
void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason);
/**
* __dev_alloc_pages - allocate page for network Rx
* @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
* @order: size of the allocation
*
* Allocate a new page.
*
* %NULL is returned if there is no free memory.
*/
static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask,
unsigned int order)
{
/* This piece of code contains several assumptions.
* 1. This is for device Rx, therefore a cold page is preferred.
* 2. The expectation is the user wants a compound page.
* 3. If requesting a order 0 page it will not be compound
* due to the check to see if order has a value in prep_new_page
* 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
* code in gfp_to_alloc_flags that should be enforcing this.
*/
gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order);
}
#define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__))
/*
* This specialized allocator has to be a macro for its allocations to be
* accounted separately (to have a separate alloc_tag).
*/
#define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order)
/**
* __dev_alloc_page - allocate a page for network Rx
* @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
*
* Allocate a new page.
*
* %NULL is returned if there is no free memory.
*/
static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)
{
return __dev_alloc_pages_noprof(gfp_mask, 0);
}
#define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__))
/*
* This specialized allocator has to be a macro for its allocations to be
* accounted separately (to have a separate alloc_tag).
*/
#define dev_alloc_page() dev_alloc_pages(0)
/**
* dev_page_is_reusable - check whether a page can be reused for network Rx
* @page: the page to test
*
* A page shouldn't be considered for reusing/recycling if it was allocated
* under memory pressure or at a distant memory node.
*
* Returns: false if this page should be returned to page allocator, true
* otherwise.
*/
static inline bool dev_page_is_reusable(const struct page *page)
{
return likely(page_to_nid(page) == numa_mem_id() &&
!page_is_pfmemalloc(page));
}
/**
* skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
* @page: The page that was allocated from skb_alloc_page
* @skb: The skb that may need pfmemalloc set
*/
static inline void skb_propagate_pfmemalloc(const struct page *page,
struct sk_buff *skb)
{
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
/**
* skb_frag_off() - Returns the offset of a skb fragment
* @frag: the paged fragment
*/
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
{
return frag->offset;
}
/**
* skb_frag_off_add() - Increments the offset of a skb fragment by @delta
* @frag: skb fragment
* @delta: value to add
*/
static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
{
frag->offset += delta;
}
/**
* skb_frag_off_set() - Sets the offset of a skb fragment
* @frag: skb fragment
* @offset: offset of fragment
*/
static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
frag->offset = offset;
}
/**
* skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
* @fragto: skb fragment where offset is set
* @fragfrom: skb fragment offset is copied from
*/
static inline void skb_frag_off_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
fragto->offset = fragfrom->offset;
}
/* Return: true if the skb_frag contains a net_iov. */
static inline bool skb_frag_is_net_iov(const skb_frag_t *frag)
{
return netmem_is_net_iov(frag->netmem);
}
/**
* skb_frag_net_iov - retrieve the net_iov referred to by fragment
* @frag: the fragment
*
* Return: the &struct net_iov associated with @frag. Returns NULL if this
* frag has no associated net_iov.
*/
static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag)
{
if (!skb_frag_is_net_iov(frag))
return NULL;
return netmem_to_net_iov(frag->netmem);
}
/**
* skb_frag_page - retrieve the page referred to by a paged fragment
* @frag: the paged fragment
*
* Return: the &struct page associated with @frag. Returns NULL if this frag
* has no associated page.
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
if (skb_frag_is_net_iov(frag))
return NULL;
return netmem_to_page(frag->netmem);
}
/**
* skb_frag_netmem - retrieve the netmem referred to by a fragment
* @frag: the fragment
*
* Return: the &netmem_ref associated with @frag.
*/
static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag)
{
return frag->netmem;
}
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
const struct bpf_prog *prog);
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
* Returns: the address of the data within @frag. The page must already
* be mapped.
*/
static inline void *skb_frag_address(const skb_frag_t *frag)
{
if (!skb_frag_page(frag))
return NULL;
return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
}
/**
* skb_frag_address_safe - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
* Returns: the address of the data within @frag. Checks that the page
* is mapped and returns %NULL otherwise.
*/
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
struct page *page = skb_frag_page(frag);
void *ptr;
if (!page)
return NULL;
ptr = page_address(page);
if (unlikely(!ptr))
return NULL;
return ptr + skb_frag_off(frag);
}
/**
* skb_frag_page_copy() - sets the page in a fragment from another fragment
* @fragto: skb fragment where page is set
* @fragfrom: skb fragment page is copied from
*/
static inline void skb_frag_page_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
fragto->netmem = fragfrom->netmem;
}
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
/**
* __skb_frag_dma_map - maps a paged fragment via the DMA API
* @dev: the device to map the fragment to
* @frag: the paged fragment to map
* @offset: the offset within the fragment (starting at the
* fragment's own offset)
* @size: the number of bytes to map
* @dir: the direction of the mapping (``PCI_DMA_*``)
*
* Maps the page associated with @frag to @device.
*/
static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
const skb_frag_t *frag,
size_t offset, size_t size,
enum dma_data_direction dir)
{
if (skb_frag_is_net_iov(frag)) {
return netmem_to_net_iov(frag->netmem)->dma_addr + offset +
frag->offset;
}
return dma_map_page(dev, skb_frag_page(frag),
skb_frag_off(frag) + offset, size, dir);
}
#define skb_frag_dma_map(dev, frag, ...) \
CONCATENATE(_skb_frag_dma_map, \
COUNT_ARGS(__VA_ARGS__))(dev, frag, ##__VA_ARGS__)
#define __skb_frag_dma_map1(dev, frag, offset, uf, uo) ({ \
const skb_frag_t *uf = (frag); \
size_t uo = (offset); \
\
__skb_frag_dma_map(dev, uf, uo, skb_frag_size(uf) - uo, \
DMA_TO_DEVICE); \
})
#define _skb_frag_dma_map1(dev, frag, offset) \
__skb_frag_dma_map1(dev, frag, offset, __UNIQUE_ID(frag_), \
__UNIQUE_ID(offset_))
#define _skb_frag_dma_map0(dev, frag) \
_skb_frag_dma_map1(dev, frag, 0)
#define _skb_frag_dma_map2(dev, frag, offset, size) \
__skb_frag_dma_map(dev, frag, offset, size, DMA_TO_DEVICE)
#define _skb_frag_dma_map3(dev, frag, offset, size, dir) \
__skb_frag_dma_map(dev, frag, offset, size, dir)
static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
gfp_t gfp_mask)
{
return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
}
static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
gfp_t gfp_mask)
{
return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
}
/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
* @len: length up to which to write
*
* Returns true if modifying the header part of the cloned buffer
* does not requires the data to be copied.
*/
static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
{
return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len;
}
static inline int skb_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
int delta = 0;
if (headroom > skb_headroom(skb))
delta = headroom - skb_headroom(skb);
if (delta || cloned)
return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
GFP_ATOMIC);
return 0;
}
/**
* skb_cow - copy header of skb when it is required
* @skb: buffer to cow
* @headroom: needed headroom
*
* If the skb passed lacks sufficient headroom or its data part
* is shared, data is reallocated. If reallocation fails, an error
* is returned and original skb is not changed.
*
* The result is skb with writable area skb->head...skb->tail
* and at least @headroom of space at head.
*/
static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
return __skb_cow(skb, headroom, skb_cloned(skb));
}
/**
* skb_cow_head - skb_cow but only making the head writable
* @skb: buffer to cow
* @headroom: needed headroom
*
* This function is identical to skb_cow except that we replace the
* skb_cloned check by skb_header_cloned. It should be used when
* you only need to push on some header and do not need to modify
* the data.
*/
static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
{
return __skb_cow(skb, headroom, skb_header_cloned(skb));
}
/**
* skb_padto - pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
if (likely(size >= len))
return 0;
return skb_pad(skb, len - size);
}
/**
* __skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
* @free_on_error: free buffer on error
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error if @free_on_error is true.
*/
static inline int __must_check __skb_put_padto(struct sk_buff *skb,
unsigned int len,
bool free_on_error)
{
unsigned int size = skb->len;
if (unlikely(size < len)) {
len -= size;
if (__skb_pad(skb, len, free_on_error))
return -ENOMEM;
__skb_put(skb, len);
}
return 0;
}
/**
* skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
{
return __skb_put_padto(skb, len, true);
}
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
__must_check;
static inline bool skb_can_coalesce_netmem(struct sk_buff *skb, int i,
netmem_ref netmem, int off)
{
if (skb_zcopy(skb))
return false;
if (i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
return netmem == skb_frag_netmem(frag) &&
off == skb_frag_off(frag) + skb_frag_size(frag);
}
return false;
}
static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
const struct page *page, int off)
{
return skb_can_coalesce_netmem(skb, i, page_to_netmem(page), off);
}
static inline int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
}
/**
* skb_linearize - convert paged skb to linear one
* @skb: buffer to linarize
*
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released.
*/
static inline int skb_linearize(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}
/**
* skb_has_shared_frag - can any frag be overwritten
* @skb: buffer to test
*
* Return: true if the skb has at least one frag that might be modified
* by an external entity (as in vmsplice()/sendfile())
*/
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) &&
skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
}
/**
* skb_linearize_cow - make sure skb is linear and writable
* @skb: buffer to process
*
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released.
*/
static inline int skb_linearize_cow(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) || skb_cloned(skb) ?
__skb_linearize(skb) : 0;
}
static __always_inline void
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
unsigned int off)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_block_sub(skb->csum,
csum_partial(start, len, 0), off);
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start_offset(skb) < 0)
skb->ip_summed = CHECKSUM_NONE;
}
/**
* skb_postpull_rcsum - update checksum for received skb after pull
* @skb: buffer to update
* @start: start of data before pull
* @len: length of data pulled
*
* After doing a pull on a received packet, you need to call this to
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
* CHECKSUM_NONE so that it can be recomputed from scratch.
*/
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = wsum_negate(csum_partial(start, len,
wsum_negate(skb->csum)));
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start_offset(skb) < 0)
skb->ip_summed = CHECKSUM_NONE;
}
static __always_inline void
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
unsigned int off)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_block_add(skb->csum,
csum_partial(start, len, 0), off);
}
/**
* skb_postpush_rcsum - update checksum for received skb after push
* @skb: buffer to update
* @start: start of data after push
* @len: length of data pushed
*
* After doing a push on a received packet, you need to call this to
* update the CHECKSUM_COMPLETE checksum.
*/
static inline void skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
__skb_postpush_rcsum(skb, start, len, 0);
}
void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
/**
* skb_push_rcsum - push skb and update receive checksum
* @skb: buffer to update
* @len: length of data pulled
*
* This function performs an skb_push on the packet and updates
* the CHECKSUM_COMPLETE checksum. It should be used on
* receive path processing instead of skb_push unless you know
* that the checksum difference is zero (e.g., a valid IP header)
* or you are setting ip_summed to CHECKSUM_NONE.
*/
static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
{
skb_push(skb, len);
skb_postpush_rcsum(skb, skb->data, len);
return skb->data;
}
int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
* @len: new length
*
* This is exactly the same as pskb_trim except that it ensures the
* checksum of received packets are still valid after the operation.
* It can change skb pointers.
*/
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
skb_might_realloc(skb);
if (likely(len >= skb->len))
return 0;
return pskb_trim_rcsum_slow(skb, len);
}
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
__skb_trim(skb, len);
return 0;
}
static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
return __skb_grow(skb, len);
}
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
#define skb_rb_first(root) rb_to_skb(rb_first(root))
#define skb_rb_last(root) rb_to_skb(rb_last(root))
#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
skb = skb->next)
#define skb_queue_walk_safe(queue, skb, tmp) \
for (skb = (queue)->next, tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->next)
#define skb_queue_walk_from(queue, skb) \
for (; skb != (struct sk_buff *)(queue); \
skb = skb->next)
#define skb_rbtree_walk(skb, root) \
for (skb = skb_rb_first(root); skb != NULL; \
skb = skb_rb_next(skb))
#define skb_rbtree_walk_from(skb) \
for (; skb != NULL; \
skb = skb_rb_next(skb))
#define skb_rbtree_walk_from_safe(skb, tmp) \
for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
skb = tmp)
#define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->next)
#define skb_queue_reverse_walk(queue, skb) \
for (skb = (queue)->prev; \
skb != (struct sk_buff *)(queue); \
skb = skb->prev)
#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
for (skb = (queue)->prev, tmp = skb->prev; \
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->prev)
#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->prev; \
skb != (struct sk_buff *)(queue); \
skb = tmp, tmp = skb->prev)
static inline bool skb_has_frag_list(const struct sk_buff *skb)
{
return skb_shinfo(skb)->frag_list != NULL;
}
static inline void skb_frag_list_init(struct sk_buff *skb)
{
skb_shinfo(skb)->frag_list = NULL;
}
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
int *err, long *timeo_p,
const struct sk_buff *skb);
struct sk_buff *__skb_try_recv_from_queue(struct sk_buff_head *queue,
unsigned int flags,
int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags, int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
struct poll_table_struct *wait,
struct sk_buff_head *rcv_queue);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
struct msghdr *msg, int size)
{
return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
int skb_copy_and_crc32c_datagram_iter(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len, u32 *crcp);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int skb_copy_datagram_from_iter_full(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len);
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
int offset, int len, int flags);
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
int len, int hlen);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
int skb_eth_pop(struct sk_buff *skb);
int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
const unsigned char *src);
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
int mac_len, bool ethernet);
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
bool ethernet);
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
int skb_mpls_dec_ttl(struct sk_buff *skb);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
gfp_t gfp);
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
}
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
{
return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
}
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc);
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
const void *data, int hlen, void *buffer)
{
if (likely(hlen - offset >= len))
return (void *)data + offset;
if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) return NULL;
return buffer;
}
static inline void * __must_check
skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
{
return __skb_header_pointer(skb, offset, len, skb->data,
skb_headlen(skb), buffer);
}
static inline void * __must_check
skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)
{
if (likely(skb_headlen(skb) - offset >= len))
return skb->data + offset;
return NULL;
}
/**
* skb_needs_linearize - check if we need to linearize a given skb
* depending on the given device features.
* @skb: socket buffer to check
* @features: net device features
*
* Returns true if either:
* 1. skb has frag_list and the device doesn't support FRAGLIST, or
* 2. skb is fragmented and the device does not support SG.
*/
static inline bool skb_needs_linearize(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_nonlinear(skb) &&
((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
}
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
void *to,
const unsigned int len)
{
memcpy(to, skb->data, len);
}
static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
const int offset, void *to,
const unsigned int len)
{
memcpy(to, skb->data + offset, len);
}
static inline void skb_copy_to_linear_data(struct sk_buff *skb,
const void *from,
const unsigned int len)
{
memcpy(skb->data, from, len);
}
static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
const int offset,
const void *from,
const unsigned int len)
{
memcpy(skb->data + offset, from, len);
}
void skb_init(void);
static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
{
return skb->tstamp;
}
/**
* skb_get_timestamp - get timestamp from a skb
* @skb: skb to get stamp from
* @stamp: pointer to struct __kernel_old_timeval to store stamp in
*
* Timestamps are stored in the skb as offsets to a base timestamp.
* This function converts the offset back to a struct timeval and stores
* it in stamp.
*/
static inline void skb_get_timestamp(const struct sk_buff *skb,
struct __kernel_old_timeval *stamp)
{
*stamp = ns_to_kernel_old_timeval(skb->tstamp);
}
static inline void skb_get_new_timestamp(const struct sk_buff *skb,
struct __kernel_sock_timeval *stamp)
{
struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
stamp->tv_sec = ts.tv_sec;
stamp->tv_usec = ts.tv_nsec / 1000;
}
static inline void skb_get_timestampns(const struct sk_buff *skb,
struct __kernel_old_timespec *stamp)
{
struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
stamp->tv_sec = ts.tv_sec;
stamp->tv_nsec = ts.tv_nsec;
}
static inline void skb_get_new_timestampns(const struct sk_buff *skb,
struct __kernel_timespec *stamp)
{
struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
stamp->tv_sec = ts.tv_sec;
stamp->tv_nsec = ts.tv_nsec;
}
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
skb->tstamp_type = SKB_CLOCK_REALTIME;
}
static inline ktime_t net_timedelta(ktime_t t)
{
return ktime_sub(ktime_get_real(), t);
}
static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
u8 tstamp_type)
{
skb->tstamp = kt;
if (kt)
skb->tstamp_type = tstamp_type;
else
skb->tstamp_type = SKB_CLOCK_REALTIME;
}
static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb,
ktime_t kt, clockid_t clockid)
{
u8 tstamp_type = SKB_CLOCK_REALTIME;
switch (clockid) {
case CLOCK_REALTIME:
break;
case CLOCK_MONOTONIC:
tstamp_type = SKB_CLOCK_MONOTONIC;
break;
case CLOCK_TAI:
tstamp_type = SKB_CLOCK_TAI;
break;
default:
WARN_ON_ONCE(1);
kt = 0;
}
skb_set_delivery_time(skb, kt, tstamp_type);
}
DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
/* It is used in the ingress path to clear the delivery_time.
* If needed, set the skb->tstamp to the (rcv) timestamp.
*/
static inline void skb_clear_delivery_time(struct sk_buff *skb)
{
if (skb->tstamp_type) {
skb->tstamp_type = SKB_CLOCK_REALTIME;
if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real();
else
skb->tstamp = 0;
}
}
static inline void skb_clear_tstamp(struct sk_buff *skb)
{
if (skb->tstamp_type)
return;
skb->tstamp = 0;
}
static inline ktime_t skb_tstamp(const struct sk_buff *skb)
{
if (skb->tstamp_type)
return 0;
return skb->tstamp;
}
static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
{
if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp)
return skb->tstamp;
if (static_branch_unlikely(&netstamp_needed_key) || cond)
return ktime_get_real();
return 0;
}
static inline u8 skb_metadata_len(const struct sk_buff *skb)
{
return skb_shinfo(skb)->meta_len;
}
static inline void *skb_metadata_end(const struct sk_buff *skb)
{
return skb_mac_header(skb);
}
static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
const struct sk_buff *skb_b,
u8 meta_len)
{
const void *a = skb_metadata_end(skb_a);
const void *b = skb_metadata_end(skb_b);
u64 diffs = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
BITS_PER_LONG != 64)
goto slow;
/* Using more efficient variant than plain call to memcmp(). */
switch (meta_len) {
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
case 32: diffs |= __it_diff(a, b, 64);
fallthrough;
case 24: diffs |= __it_diff(a, b, 64);
fallthrough;
case 16: diffs |= __it_diff(a, b, 64);
fallthrough;
case 8: diffs |= __it_diff(a, b, 64);
break;
case 28: diffs |= __it_diff(a, b, 64);
fallthrough;
case 20: diffs |= __it_diff(a, b, 64);
fallthrough;
case 12: diffs |= __it_diff(a, b, 64);
fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
default:
slow:
return memcmp(a - meta_len, b - meta_len, meta_len);
}
return diffs;
}
static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
const struct sk_buff *skb_b)
{
u8 len_a = skb_metadata_len(skb_a);
u8 len_b = skb_metadata_len(skb_b);
if (!(len_a | len_b))
return false;
return len_a != len_b ?
true : __skb_metadata_differs(skb_a, skb_b, len_a);
}
static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
{
skb_shinfo(skb)->meta_len = meta_len;
}
static inline void skb_metadata_clear(struct sk_buff *skb)
{
skb_metadata_set(skb, 0);
}
struct sk_buff *skb_clone_sk(struct sk_buff *skb);
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
void skb_clone_tx_timestamp(struct sk_buff *skb);
bool skb_defer_rx_timestamp(struct sk_buff *skb);
#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
{
}
static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
return false;
}
#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
/**
* skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
*
* PHY drivers may accept clones of transmitted packets for
* timestamping via their phy_driver.txtstamp method. These drivers
* must call this function to return the skb back to the stack with a
* timestamp.
*
* @skb: clone of the original outgoing packet
* @hwtstamps: hardware time stamps
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps);
void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype);
/**
* skb_tstamp_tx - queue clone of skb with send time stamps
* @orig_skb: the original outgoing packet
* @hwtstamps: hardware time stamps, may be NULL if not available
*
* If the skb has a socket associated, then this function clones the
* skb (thus sharing the actual data and optional structures), stores
* the optional hardware time stamping information (if non NULL) or
* generates a software time stamp (otherwise), then queues the clone
* to the error queue of the socket. Errors are silently ignored.
*/
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps);
/**
* skb_tx_timestamp() - Driver hook for transmit timestamping
*
* Ethernet MAC Drivers should call this function in their hard_xmit()
* function immediately before giving the sk_buff to the MAC hardware.
*
* Specifically, one should make absolutely sure that this function is
* called before TX completion of this packet can trigger. Otherwise
* the packet could potentially already be freed.
*
* @skb: A socket buffer.
*/
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
skb_clone_tx_timestamp(skb);
if (skb_shinfo(skb)->tx_flags & (SKBTX_SW_TSTAMP | SKBTX_BPF))
skb_tstamp_tx(skb, NULL);
}
/**
* skb_complete_wifi_ack - deliver skb with wifi status
*
* @skb: the original outgoing packet
* @acked: ack status
*
*/
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
__sum16 __skb_checksum_complete(struct sk_buff *skb);
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
skb->csum_valid ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_start_offset(skb) >= 0));
}
/**
* skb_checksum_complete - Calculate checksum of an entire packet
* @skb: packet to process
*
* This function calculates the checksum over the entire packet plus
* the value of skb->csum. The latter can be used to supply the
* checksum of a pseudo header as used by TCP/UDP. It returns the
* checksum.
*
* For protocols that contain complete checksums such as ICMP/TCP/UDP,
* this function can be used to verify that checksum on received
* packets. In that case the function should return zero if the
* checksum is correct. In particular, this function will return zero
* if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
* hardware has already verified the correctness of the checksum.
*/
static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb) ?
0 : __skb_checksum_complete(skb);
}
static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (skb->csum_level == 0)
skb->ip_summed = CHECKSUM_NONE;
else
skb->csum_level--;
}
}
static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
skb->csum_level++;
} else if (skb->ip_summed == CHECKSUM_NONE) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = 0;
}
}
static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb->ip_summed = CHECKSUM_NONE;
skb->csum_level = 0;
}
}
/* Check if we need to perform checksum complete validation.
*
* Returns: true if checksum complete is needed, false otherwise
* (either checksum is unnecessary or zero checksum is allowed).
*/
static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
bool zero_okay,
__sum16 check)
{
if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
skb->csum_valid = 1;
__skb_decr_checksum_unnecessary(skb);
return false;
}
return true;
}
/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
* in checksum_init.
*/
#define CHECKSUM_BREAK 76
/* Unset checksum-complete
*
* Unset checksum complete can be done when packet is being modified
* (uncompressed for instance) and checksum-complete value is
* invalidated.
*/
static inline void skb_checksum_complete_unset(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
}
/* Validate (init) checksum based on checksum complete.
*
* Return values:
* 0: checksum is validated or try to in skb_checksum_complete. In the latter
* case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
* checksum is stored in skb->csum for use in __skb_checksum_complete
* non-zero: value of invalid checksum
*
*/
static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
bool complete,
__wsum psum)
{
if (skb->ip_summed == CHECKSUM_COMPLETE) {
if (!csum_fold(csum_add(psum, skb->csum))) {
skb->csum_valid = 1;
return 0;
}
}
skb->csum = psum;
if (complete || skb->len <= CHECKSUM_BREAK) {
__sum16 csum;
csum = __skb_checksum_complete(skb);
skb->csum_valid = !csum;
return csum;
}
return 0;
}
static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
{
return 0;
}
/* Perform checksum validate (init). Note that this is a macro since we only
* want to calculate the pseudo header which is an input function if necessary.
* First we try to validate without any computation (checksum unnecessary) and
* then calculate based on checksum complete calling the function to compute
* pseudo header.
*
* Return values:
* 0: checksum is validated or try to in skb_checksum_complete
* non-zero: value of invalid checksum
*/
#define __skb_checksum_validate(skb, proto, complete, \
zero_okay, check, compute_pseudo) \
({ \
__sum16 __ret = 0; \
skb->csum_valid = 0; \
if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
__ret = __skb_checksum_validate_complete(skb, \
complete, compute_pseudo(skb, proto)); \
__ret; \
})
#define skb_checksum_init(skb, proto, compute_pseudo) \
__skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
__skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
#define skb_checksum_validate(skb, proto, compute_pseudo) \
__skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
#define skb_checksum_validate_zero_check(skb, proto, check, \
compute_pseudo) \
__skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
#define skb_checksum_simple_validate(skb) \
__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
{
return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
}
static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
{
skb->csum = ~pseudo;
skb->ip_summed = CHECKSUM_COMPLETE;
}
#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
do { \
if (__skb_checksum_convert_check(skb)) \
__skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
} while (0)
static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
u16 start, u16 offset)
{
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
skb->csum_offset = offset - start;
}
/* Update skbuf and packet to reflect the remote checksum offload operation.
* When called, ptr indicates the starting point for skb->csum when
* ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
* here, skb_postpull_rcsum is done so skb->csum start is ptr.
*/
static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
int start, int offset, bool nopartial)
{
__wsum delta;
if (!nopartial) {
skb_remcsum_adjust_partial(skb, ptr, start, offset);
return;
}
if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
__skb_checksum_complete(skb);
skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
}
delta = remcsum_adjust(ptr, skb->csum, start, offset);
/* Adjust skb->csum since we changed the packet */
skb->csum = csum_add(skb->csum, delta);
}
static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
return (void *)(skb->_nfct & NFCT_PTRMASK);
#else
return NULL;
#endif
}
static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
return skb->_nfct;
#else
return 0UL;
#endif
}
static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
skb->slow_gro |= !!nfct;
skb->_nfct = nfct;
#endif
}
#ifdef CONFIG_SKB_EXTENSIONS
enum skb_ext_id {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
SKB_EXT_BRIDGE_NF,
#endif
#ifdef CONFIG_XFRM
SKB_EXT_SEC_PATH,
#endif
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
TC_SKB_EXT,
#endif
#if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP,
#endif
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
SKB_EXT_MCTP,
#endif
#if IS_ENABLED(CONFIG_INET_PSP)
SKB_EXT_PSP,
#endif
SKB_EXT_NUM, /* must be last */
};
/**
* struct skb_ext - sk_buff extensions
* @refcnt: 1 on allocation, deallocated on 0
* @offset: offset to add to @data to obtain extension address
* @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
* @data: start of extension data, variable sized
*
* Note: offsets/lengths are stored in chunks of 8 bytes, this allows
* to use 'u8' types while allowing up to 2kb worth of extension data.
*/
struct skb_ext {
refcount_t refcnt;
u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
u8 chunks; /* same */
char data[] __aligned(8);
};
struct skb_ext *__skb_ext_alloc(gfp_t flags);
void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
struct skb_ext *ext);
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_put(struct skb_ext *ext);
static inline void skb_ext_put(struct sk_buff *skb)
{
if (skb->active_extensions)
__skb_ext_put(skb->extensions);
}
static inline void __skb_ext_copy(struct sk_buff *dst,
const struct sk_buff *src)
{
dst->active_extensions = src->active_extensions; if (src->active_extensions) {
struct skb_ext *ext = src->extensions;
refcount_inc(&ext->refcnt); dst->extensions = ext;
}
}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
{
skb_ext_put(dst);
__skb_ext_copy(dst, src);
}
static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
{
return !!ext->offset[i];
}
static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
{
return skb->active_extensions & (1 << id);
}
static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
{
if (skb_ext_exist(skb, id))
__skb_ext_del(skb, id);
}
static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
{
if (skb_ext_exist(skb, id)) {
struct skb_ext *ext = skb->extensions;
return (void *)ext + (ext->offset[id] << 3);
}
return NULL;
}
static inline void skb_ext_reset(struct sk_buff *skb)
{
if (unlikely(skb->active_extensions)) {
__skb_ext_put(skb->extensions);
skb->active_extensions = 0;
}
}
static inline bool skb_has_extensions(struct sk_buff *skb)
{
return unlikely(skb->active_extensions);
}
#else
static inline void skb_ext_put(struct sk_buff *skb) {}
static inline void skb_ext_reset(struct sk_buff *skb) {}
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
#endif /* CONFIG_SKB_EXTENSIONS */
static inline void nf_reset_ct(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(skb));
skb->_nfct = 0;
#endif
}
static inline void nf_reset_trace(struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
skb->nf_trace = 0;
#endif
}
static inline void ipvs_reset(struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IP_VS)
skb->ipvs_property = 0;
#endif
}
/* Note: This doesn't put any conntrack info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src));
#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
if (copy)
dst->nf_trace = src->nf_trace;
#endif
}
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
dst->slow_gro = src->slow_gro;
__nf_copy(dst, src, true);
}
#ifdef CONFIG_NETWORK_SECMARK
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{
to->secmark = from->secmark;
}
static inline void skb_init_secmark(struct sk_buff *skb)
{
skb->secmark = 0;
}
#else
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }
static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
static inline int secpath_exists(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
#else
return 0;
#endif
}
static inline bool skb_irq_freeable(const struct sk_buff *skb)
{
return !skb->destructor &&
!secpath_exists(skb) &&
!skb_nfct(skb) &&
!skb->_skb_refdst &&
!skb_has_frag_list(skb);
}
static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
skb->queue_mapping = queue_mapping;
}
static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
{
return skb->queue_mapping;
}
static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
to->queue_mapping = from->queue_mapping;
}
static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
{
skb->queue_mapping = rx_queue + 1;
}
static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
{
return skb->queue_mapping - 1;
}
static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
{
return skb->queue_mapping != 0;
}
static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
{
skb->dst_pending_confirm = val;
}
static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
{
return skb->dst_pending_confirm != 0;
}
static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
return skb_ext_find(skb, SKB_EXT_SEC_PATH);
#else
return NULL;
#endif
}
static inline bool skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
}
/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_v6(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
}
/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
}
static inline void skb_gso_reset(struct sk_buff *skb)
{
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_segs = 0;
skb_shinfo(skb)->gso_type = 0;
}
static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
u16 increment)
{
if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
return;
shinfo->gso_size += increment;
}
static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
u16 decrement)
{
if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
return;
shinfo->gso_size -= decrement;
}
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
/* LRO sets gso_size but not gso_type, whereas if GSO is really
* wanted then gso_type will be set. */
const struct skb_shared_info *shinfo = skb_shinfo(skb);
if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
unlikely(shinfo->gso_type == 0)) {
__skb_warn_lro_forwarding(skb);
return true;
}
return false;
}
static inline void skb_forward_csum(struct sk_buff *skb)
{
/* Unfortunately we don't support this one. Any brave souls? */
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
}
/**
* skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
* @skb: skb to check
*
* fresh skbs have their ip_summed set to CHECKSUM_NONE.
* Instead of forcing ip_summed to CHECKSUM_NONE, we can
* use this helper, to document places where we make this assertion.
*/
static inline void skb_checksum_none_assert(const struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE);
}
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
__sum16(*skb_chkf)(struct sk_buff *skb));
/**
* skb_head_is_locked - Determine if the skb->head is locked down
* @skb: skb to check
*
* The head on skbs build around a head frag can be removed if they are
* not cloned. This function returns true if the skb head is locked down
* due to either being allocated via kmalloc, or by being a clone with
* multiple references to the head.
*/
static inline bool skb_head_is_locked(const struct sk_buff *skb)
{
return !skb->head_frag || skb_cloned(skb);
}
/* Local Checksum Offload.
* Compute outer checksum based on the assumption that the
* inner checksum will be offloaded later.
* See Documentation/networking/checksum-offloads.rst for
* explanation of how this works.
* Fill in outer checksum adjustment (e.g. with sum of outer
* pseudo-header) before calling.
* Also ensure that inner checksum is in linear data area.
*/
static inline __wsum lco_csum(struct sk_buff *skb)
{
unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *l4_hdr = skb_transport_header(skb);
__wsum partial;
/* Start with complement of inner checksum adjustment */
partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
skb->csum_offset));
/* Add in checksum of our headers (incl. outer checksum
* adjustment filled in by caller) and return result.
*/
return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
}
static inline bool skb_is_redirected(const struct sk_buff *skb)
{
return skb->redirected;
}
static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
{
skb->redirected = 1;
#ifdef CONFIG_NET_REDIRECT
skb->from_ingress = from_ingress;
if (skb->from_ingress)
skb_clear_tstamp(skb);
#endif
}
static inline void skb_reset_redirect(struct sk_buff *skb)
{
skb->redirected = 0;
}
static inline void skb_set_redirected_noclear(struct sk_buff *skb,
bool from_ingress)
{
skb->redirected = 1;
#ifdef CONFIG_NET_REDIRECT
skb->from_ingress = from_ingress;
#endif
}
static inline bool skb_csum_is_sctp(struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IP_SCTP)
return skb->csum_not_inet;
#else
return 0;
#endif
}
static inline void skb_reset_csum_not_inet(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
#if IS_ENABLED(CONFIG_IP_SCTP)
skb->csum_not_inet = 0;
#endif
}
static inline void skb_set_kcov_handle(struct sk_buff *skb,
const u64 kcov_handle)
{
#ifdef CONFIG_KCOV
skb->kcov_handle = kcov_handle;
#endif
}
static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
{
#ifdef CONFIG_KCOV
return skb->kcov_handle;
#else
return 0;
#endif
}
static inline void skb_mark_for_recycle(struct sk_buff *skb)
{
#ifdef CONFIG_PAGE_POOL
skb->pp_recycle = 1;
#endif
}
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
ssize_t maxsize);
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2022 Linutronix GmbH, John Ogness
// Copyright (C) 2022 Intel, Thomas Gleixner
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/kthread.h>
#include <linux/minmax.h>
#include <linux/panic.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include "internal.h"
#include "printk_ringbuffer.h"
/*
* Printk console printing implementation for consoles which does not depend
* on the legacy style console_lock mechanism.
*
* The state of the console is maintained in the "nbcon_state" atomic
* variable.
*
* The console is locked when:
*
* - The 'prio' field contains the priority of the context that owns the
* console. Only higher priority contexts are allowed to take over the
* lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
*
* - The 'cpu' field denotes on which CPU the console is locked. It is used
* to prevent busy waiting on the same CPU. Also it informs the lock owner
* that it has lost the lock in a more complex scenario when the lock was
* taken over by a higher priority context, released, and taken on another
* CPU with the same priority as the interrupted owner.
*
* The acquire mechanism uses a few more fields:
*
* - The 'req_prio' field is used by the handover approach to make the
* current owner aware that there is a context with a higher priority
* waiting for the friendly handover.
*
* - The 'unsafe' field allows to take over the console in a safe way in the
* middle of emitting a message. The field is set only when accessing some
* shared resources or when the console device is manipulated. It can be
* cleared, for example, after emitting one character when the console
* device is in a consistent state.
*
* - The 'unsafe_takeover' field is set when a hostile takeover took the
* console in an unsafe state. The console will stay in the unsafe state
* until re-initialized.
*
* The acquire mechanism uses three approaches:
*
* 1) Direct acquire when the console is not owned or is owned by a lower
* priority context and is in a safe state.
*
* 2) Friendly handover mechanism uses a request/grant handshake. It is used
* when the current owner has lower priority and the console is in an
* unsafe state.
*
* The requesting context:
*
* a) Sets its priority into the 'req_prio' field.
*
* b) Waits (with a timeout) for the owning context to unlock the
* console.
*
* c) Takes the lock and clears the 'req_prio' field.
*
* The owning context:
*
* a) Observes the 'req_prio' field set on exit from the unsafe
* console state.
*
* b) Gives up console ownership by clearing the 'prio' field.
*
* 3) Unsafe hostile takeover allows to take over the lock even when the
* console is an unsafe state. It is used only in panic() by the final
* attempt to flush consoles in a try and hope mode.
*
* Note that separate record buffers are used in panic(). As a result,
* the messages can be read and formatted without any risk even after
* using the hostile takeover in unsafe state.
*
* The release function simply clears the 'prio' field.
*
* All operations on @console::nbcon_state are atomic cmpxchg based to
* handle concurrency.
*
* The acquire/release functions implement only minimal policies:
*
* - Preference for higher priority contexts.
* - Protection of the panic CPU.
*
* All other policy decisions must be made at the call sites:
*
* - What is marked as an unsafe section.
* - Whether to spin-wait if there is already an owner and the console is
* in an unsafe state.
* - Whether to attempt an unsafe hostile takeover.
*
* The design allows to implement the well known:
*
* acquire()
* output_one_printk_record()
* release()
*
* The output of one printk record might be interrupted with a higher priority
* context. The new owner is supposed to reprint the entire interrupted record
* from scratch.
*/
/**
* nbcon_state_set - Helper function to set the console state
* @con: Console to update
* @new: The new state to write
*
* Only to be used when the console is not yet or no longer visible in the
* system. Otherwise use nbcon_state_try_cmpxchg().
*/
static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
{
atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
}
/**
* nbcon_state_read - Helper function to read the console state
* @con: Console to read
* @state: The state to store the result
*/
static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
{
state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
}
/**
* nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
* @con: Console to update
* @cur: Old/expected state
* @new: New state
*
* Return: True on success. False on fail and @cur is updated.
*/
static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
struct nbcon_state *new)
{
return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
}
/**
* nbcon_seq_read - Read the current console sequence
* @con: Console to read the sequence of
*
* Return: Sequence number of the next record to print on @con.
*/
u64 nbcon_seq_read(struct console *con)
{
unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
return __ulseq_to_u64seq(prb, nbcon_seq);
}
/**
* nbcon_seq_force - Force console sequence to a specific value
* @con: Console to work on
* @seq: Sequence number value to set
*
* Only to be used during init (before registration) or in extreme situations
* (such as panic with CONSOLE_REPLAY_ALL).
*/
void nbcon_seq_force(struct console *con, u64 seq)
{
/*
* If the specified record no longer exists, the oldest available record
* is chosen. This is especially important on 32bit systems because only
* the lower 32 bits of the sequence number are stored. The upper 32 bits
* are derived from the sequence numbers available in the ringbuffer.
*/
u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
}
/**
* nbcon_seq_try_update - Try to update the console sequence number
* @ctxt: Pointer to an acquire context that contains
* all information about the acquire mode
* @new_seq: The new sequence number to set
*
* @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
* the 64bit value). This could be a different value than @new_seq if
* nbcon_seq_force() was used or the current context no longer owns the
* console. In the later case, it will stop printing anyway.
*/
static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
{
unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
struct console *con = ctxt->console;
if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
__u64seq_to_ulseq(new_seq))) {
ctxt->seq = new_seq;
} else {
ctxt->seq = nbcon_seq_read(con);
}
}
/**
* nbcon_context_try_acquire_direct - Try to acquire directly
* @ctxt: The context of the caller
* @cur: The current console state
* @is_reacquire: This acquire is a reacquire
*
* Acquire the console when it is released. Also acquire the console when
* the current owner has a lower priority and the console is in a safe state.
*
* Return: 0 on success. Otherwise, an error code on failure. Also @cur
* is updated to the latest state when failed to modify it.
*
* Errors:
*
* -EPERM: A panic is in progress and this is neither the panic
* CPU nor is this a reacquire. Or the current owner or
* waiter has the same or higher priority. No acquire
* method can be successful in these cases.
*
* -EBUSY: The current owner has a lower priority but the console
* in an unsafe state. The caller should try using
* the handover acquire method.
*/
static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
struct nbcon_state *cur, bool is_reacquire)
{
unsigned int cpu = smp_processor_id();
struct console *con = ctxt->console;
struct nbcon_state new;
do {
/*
* Panic does not imply that the console is owned. However,
* since all non-panic CPUs are stopped during panic(), it
* is safer to have them avoid gaining console ownership.
*
* If this acquire is a reacquire (and an unsafe takeover
* has not previously occurred) then it is allowed to attempt
* a direct acquire in panic. This gives console drivers an
* opportunity to perform any necessary cleanup if they were
* interrupted by the panic CPU while printing.
*/
if (panic_on_other_cpu() &&
(!is_reacquire || cur->unsafe_takeover)) {
return -EPERM;
}
if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
return -EPERM;
if (cur->unsafe)
return -EBUSY;
/*
* The console should never be safe for a direct acquire
* if an unsafe hostile takeover has ever happened.
*/
WARN_ON_ONCE(cur->unsafe_takeover);
new.atom = cur->atom;
new.prio = ctxt->prio;
new.req_prio = NBCON_PRIO_NONE;
new.unsafe = cur->unsafe_takeover;
new.cpu = cpu;
} while (!nbcon_state_try_cmpxchg(con, cur, &new));
return 0;
}
static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
{
/*
* The request context is well defined by the @req_prio because:
*
* - Only a context with a priority higher than the owner can become
* a waiter.
* - Only a context with a priority higher than the waiter can
* directly take over the request.
* - There are only three priorities.
* - Only one CPU is allowed to request PANIC priority.
* - Lower priorities are ignored during panic() until reboot.
*
* As a result, the following scenario is *not* possible:
*
* 1. This context is currently a waiter.
* 2. Another context with a higher priority than this context
* directly takes ownership.
* 3. The higher priority context releases the ownership.
* 4. Another lower priority context takes the ownership.
* 5. Another context with the same priority as this context
* creates a request and starts waiting.
*
* Event #1 implies this context is EMERGENCY.
* Event #2 implies the new context is PANIC.
* Event #3 occurs when panic() has flushed the console.
* Event #4 occurs when a non-panic CPU reacquires.
* Event #5 is not possible due to the panic_on_other_cpu() check
* in nbcon_context_try_acquire_handover().
*/
return (cur->req_prio == expected_prio);
}
/**
* nbcon_context_try_acquire_requested - Try to acquire after having
* requested a handover
* @ctxt: The context of the caller
* @cur: The current console state
*
* This is a helper function for nbcon_context_try_acquire_handover().
* It is called when the console is in an unsafe state. The current
* owner will release the console on exit from the unsafe region.
*
* Return: 0 on success and @cur is updated to the new console state.
* Otherwise an error code on failure.
*
* Errors:
*
* -EPERM: A panic is in progress and this is not the panic CPU
* or this context is no longer the waiter.
*
* -EBUSY: The console is still locked. The caller should
* continue waiting.
*
* Note: The caller must still remove the request when an error has occurred
* except when this context is no longer the waiter.
*/
static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
struct nbcon_state *cur)
{
unsigned int cpu = smp_processor_id();
struct console *con = ctxt->console;
struct nbcon_state new;
/* Note that the caller must still remove the request! */
if (panic_on_other_cpu())
return -EPERM;
/*
* Note that the waiter will also change if there was an unsafe
* hostile takeover.
*/
if (!nbcon_waiter_matches(cur, ctxt->prio))
return -EPERM;
/* If still locked, caller should continue waiting. */
if (cur->prio != NBCON_PRIO_NONE)
return -EBUSY;
/*
* The previous owner should have never released ownership
* in an unsafe region.
*/
WARN_ON_ONCE(cur->unsafe);
new.atom = cur->atom;
new.prio = ctxt->prio;
new.req_prio = NBCON_PRIO_NONE;
new.unsafe = cur->unsafe_takeover;
new.cpu = cpu;
if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
/*
* The acquire could fail only when it has been taken
* over by a higher priority context.
*/
WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
return -EPERM;
}
/* Handover success. This context now owns the console. */
return 0;
}
/**
* nbcon_context_try_acquire_handover - Try to acquire via handover
* @ctxt: The context of the caller
* @cur: The current console state
*
* The function must be called only when the context has higher priority
* than the current owner and the console is in an unsafe state.
* It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
*
* The function sets "req_prio" field to make the current owner aware of
* the request. Then it waits until the current owner releases the console,
* or an even higher context takes over the request, or timeout expires.
*
* The current owner checks the "req_prio" field on exit from the unsafe
* region and releases the console. It does not touch the "req_prio" field
* so that the console stays reserved for the waiter.
*
* Return: 0 on success. Otherwise, an error code on failure. Also @cur
* is updated to the latest state when failed to modify it.
*
* Errors:
*
* -EPERM: A panic is in progress and this is not the panic CPU.
* Or a higher priority context has taken over the
* console or the handover request.
*
* -EBUSY: The current owner is on the same CPU so that the hand
* shake could not work. Or the current owner is not
* willing to wait (zero timeout). Or the console does
* not enter the safe state before timeout passed. The
* caller might still use the unsafe hostile takeover
* when allowed.
*
* -EAGAIN: @cur has changed when creating the handover request.
* The caller should retry with direct acquire.
*/
static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
struct nbcon_state *cur)
{
unsigned int cpu = smp_processor_id();
struct console *con = ctxt->console;
struct nbcon_state new;
int timeout;
int request_err = -EBUSY;
/*
* Check that the handover is called when the direct acquire failed
* with -EBUSY.
*/
WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
WARN_ON_ONCE(!cur->unsafe);
/*
* Panic does not imply that the console is owned. However, it
* is critical that non-panic CPUs during panic are unable to
* wait for a handover in order to satisfy the assumptions of
* nbcon_waiter_matches(). In particular, the assumption that
* lower priorities are ignored during panic.
*/
if (panic_on_other_cpu())
return -EPERM;
/* Handover is not possible on the same CPU. */
if (cur->cpu == cpu)
return -EBUSY;
/*
* Console stays unsafe after an unsafe takeover until re-initialized.
* Waiting is not going to help in this case.
*/
if (cur->unsafe_takeover)
return -EBUSY;
/* Is the caller willing to wait? */
if (ctxt->spinwait_max_us == 0)
return -EBUSY;
/*
* Setup a request for the handover. The caller should try to acquire
* the console directly when the current state has been modified.
*/
new.atom = cur->atom;
new.req_prio = ctxt->prio;
if (!nbcon_state_try_cmpxchg(con, cur, &new))
return -EAGAIN;
cur->atom = new.atom;
/* Wait until there is no owner and then acquire the console. */
for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
/* On successful acquire, this request is cleared. */
request_err = nbcon_context_try_acquire_requested(ctxt, cur);
if (!request_err)
return 0;
/*
* If the acquire should be aborted, it must be ensured
* that the request is removed before returning to caller.
*/
if (request_err == -EPERM)
break;
udelay(1);
/* Re-read the state because some time has passed. */
nbcon_state_read(con, cur);
}
/* Timed out or aborted. Carefully remove handover request. */
do {
/*
* No need to remove request if there is a new waiter. This
* can only happen if a higher priority context has taken over
* the console or the handover request.
*/
if (!nbcon_waiter_matches(cur, ctxt->prio))
return -EPERM;
/* Unset request for handover. */
new.atom = cur->atom;
new.req_prio = NBCON_PRIO_NONE;
if (nbcon_state_try_cmpxchg(con, cur, &new)) {
/*
* Request successfully unset. Report failure of
* acquiring via handover.
*/
cur->atom = new.atom;
return request_err;
}
/*
* Unable to remove request. Try to acquire in case
* the owner has released the lock.
*/
} while (nbcon_context_try_acquire_requested(ctxt, cur));
/* Lucky timing. The acquire succeeded while removing the request. */
return 0;
}
/**
* nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
* @ctxt: The context of the caller
* @cur: The current console state
*
* Acquire the console even in the unsafe state.
*
* It can be permitted by setting the 'allow_unsafe_takeover' field only
* by the final attempt to flush messages in panic().
*
* Return: 0 on success. -EPERM when not allowed by the context.
*/
static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
struct nbcon_state *cur)
{
unsigned int cpu = smp_processor_id();
struct console *con = ctxt->console;
struct nbcon_state new;
if (!ctxt->allow_unsafe_takeover)
return -EPERM;
/* Ensure caller is allowed to perform unsafe hostile takeovers. */
if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
return -EPERM;
/*
* Check that try_acquire_direct() and try_acquire_handover() returned
* -EBUSY in the right situation.
*/
WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
WARN_ON_ONCE(cur->unsafe != true);
do {
new.atom = cur->atom;
new.cpu = cpu;
new.prio = ctxt->prio;
new.unsafe |= cur->unsafe_takeover;
new.unsafe_takeover |= cur->unsafe;
} while (!nbcon_state_try_cmpxchg(con, cur, &new));
return 0;
}
static struct printk_buffers panic_nbcon_pbufs;
/**
* nbcon_context_try_acquire - Try to acquire nbcon console
* @ctxt: The context of the caller
* @is_reacquire: This acquire is a reacquire
*
* Context: Under @ctxt->con->device_lock() or local_irq_save().
* Return: True if the console was acquired. False otherwise.
*
* If the caller allowed an unsafe hostile takeover, on success the
* caller should check the current console state to see if it is
* in an unsafe state. Otherwise, on success the caller may assume
* the console is not in an unsafe state.
*/
static bool nbcon_context_try_acquire(struct nbcon_context *ctxt, bool is_reacquire)
{
struct console *con = ctxt->console;
struct nbcon_state cur;
int err;
nbcon_state_read(con, &cur);
try_again:
err = nbcon_context_try_acquire_direct(ctxt, &cur, is_reacquire);
if (err != -EBUSY)
goto out;
err = nbcon_context_try_acquire_handover(ctxt, &cur);
if (err == -EAGAIN)
goto try_again;
if (err != -EBUSY)
goto out;
err = nbcon_context_try_acquire_hostile(ctxt, &cur);
out:
if (err)
return false;
/* Acquire succeeded. */
/* Assign the appropriate buffer for this context. */
if (panic_on_this_cpu())
ctxt->pbufs = &panic_nbcon_pbufs;
else
ctxt->pbufs = con->pbufs;
/* Set the record sequence for this context to print. */
ctxt->seq = nbcon_seq_read(ctxt->console);
return true;
}
static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
int expected_prio)
{
/*
* A similar function, nbcon_waiter_matches(), only deals with
* EMERGENCY and PANIC priorities. However, this function must also
* deal with the NORMAL priority, which requires additional checks
* and constraints.
*
* For the case where preemption and interrupts are disabled, it is
* enough to also verify that the owning CPU has not changed.
*
* For the case where preemption or interrupts are enabled, an
* external synchronization method *must* be used. In particular,
* the driver-specific locking mechanism used in device_lock()
* (including disabling migration) should be used. It prevents
* scenarios such as:
*
* 1. [Task A] owns a context with NBCON_PRIO_NORMAL on [CPU X] and
* is scheduled out.
* 2. Another context takes over the lock with NBCON_PRIO_EMERGENCY
* and releases it.
* 3. [Task B] acquires a context with NBCON_PRIO_NORMAL on [CPU X]
* and is scheduled out.
* 4. [Task A] gets running on [CPU X] and sees that the console is
* still owned by a task on [CPU X] with NBON_PRIO_NORMAL. Thus
* [Task A] thinks it is the owner when it is not.
*/
if (cur->prio != expected_prio)
return false;
if (cur->cpu != expected_cpu)
return false;
return true;
}
/**
* nbcon_context_release - Release the console
* @ctxt: The nbcon context from nbcon_context_try_acquire()
*/
static void nbcon_context_release(struct nbcon_context *ctxt)
{
unsigned int cpu = smp_processor_id();
struct console *con = ctxt->console;
struct nbcon_state cur;
struct nbcon_state new;
nbcon_state_read(con, &cur);
do {
if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
break;
new.atom = cur.atom;
new.prio = NBCON_PRIO_NONE;
/*
* If @unsafe_takeover is set, it is kept set so that
* the state remains permanently unsafe.
*/
new.unsafe |= cur.unsafe_takeover;
} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
ctxt->pbufs = NULL;
}
/**
* nbcon_context_can_proceed - Check whether ownership can proceed
* @ctxt: The nbcon context from nbcon_context_try_acquire()
* @cur: The current console state
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
*
* Must be invoked when entering the unsafe state to make sure that it still
* owns the lock. Also must be invoked when exiting the unsafe context
* to eventually free the lock for a higher priority context which asked
* for the friendly handover.
*
* It can be called inside an unsafe section when the console is just
* temporary in safe state instead of exiting and entering the unsafe
* state.
*
* Also it can be called in the safe context before doing an expensive
* safe operation. It does not make sense to do the operation when
* a higher priority context took the lock.
*
* When this function returns false then the calling context no longer owns
* the console and is no longer allowed to go forward. In this case it must
* back out immediately and carefully. The buffer content is also no longer
* trusted since it no longer belongs to the calling context.
*/
static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
{
unsigned int cpu = smp_processor_id();
/* Make sure this context still owns the console. */
if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
return false;
/* The console owner can proceed if there is no waiter. */
if (cur->req_prio == NBCON_PRIO_NONE)
return true;
/*
* A console owner within an unsafe region is always allowed to
* proceed, even if there are waiters. It can perform a handover
* when exiting the unsafe region. Otherwise the waiter will
* need to perform an unsafe hostile takeover.
*/
if (cur->unsafe)
return true;
/* Waiters always have higher priorities than owners. */
WARN_ON_ONCE(cur->req_prio <= cur->prio);
/*
* Having a safe point for take over and eventually a few
* duplicated characters or a full line is way better than a
* hostile takeover. Post processing can take care of the garbage.
* Release and hand over.
*/
nbcon_context_release(ctxt);
/*
* It is not clear whether the waiter really took over ownership. The
* outermost callsite must make the final decision whether console
* ownership is needed for it to proceed. If yes, it must reacquire
* ownership (possibly hostile) before carefully proceeding.
*
* The calling context no longer owns the console so go back all the
* way instead of trying to implement reacquire heuristics in tons of
* places.
*/
return false;
}
/**
* nbcon_can_proceed - Check whether ownership can proceed
* @wctxt: The write context that was handed to the write function
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
*
* It is used in nbcon_enter_unsafe() to make sure that it still owns the
* lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
* for a higher priority context which asked for the friendly handover.
*
* It can be called inside an unsafe section when the console is just
* temporary in safe state instead of exiting and entering the unsafe state.
*
* Also it can be called in the safe context before doing an expensive safe
* operation. It does not make sense to do the operation when a higher
* priority context took the lock.
*
* When this function returns false then the calling context no longer owns
* the console and is no longer allowed to go forward. In this case it must
* back out immediately and carefully. The buffer content is also no longer
* trusted since it no longer belongs to the calling context.
*/
bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
struct console *con = ctxt->console;
struct nbcon_state cur;
nbcon_state_read(con, &cur);
return nbcon_context_can_proceed(ctxt, &cur);
}
EXPORT_SYMBOL_GPL(nbcon_can_proceed);
#define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
#define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
/**
* __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
* @ctxt: The nbcon context from nbcon_context_try_acquire()
* @unsafe: The new value for the unsafe bit
*
* Return: True if the unsafe state was updated and this context still
* owns the console. Otherwise false if ownership was handed
* over or taken.
*
* This function allows console owners to modify the unsafe status of the
* console.
*
* When this function returns false then the calling context no longer owns
* the console and is no longer allowed to go forward. In this case it must
* back out immediately and carefully. The buffer content is also no longer
* trusted since it no longer belongs to the calling context.
*
* Internal helper to avoid duplicated code.
*/
static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
{
struct console *con = ctxt->console;
struct nbcon_state cur;
struct nbcon_state new;
nbcon_state_read(con, &cur);
do {
/*
* The unsafe bit must not be cleared if an
* unsafe hostile takeover has occurred.
*/
if (!unsafe && cur.unsafe_takeover)
goto out;
if (!nbcon_context_can_proceed(ctxt, &cur))
return false;
new.atom = cur.atom;
new.unsafe = unsafe;
} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
cur.atom = new.atom;
out:
return nbcon_context_can_proceed(ctxt, &cur);
}
static void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
char *buf, unsigned int len)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
struct console *con = ctxt->console;
struct nbcon_state cur;
wctxt->outbuf = buf;
wctxt->len = len;
nbcon_state_read(con, &cur);
wctxt->unsafe_takeover = cur.unsafe_takeover;
}
/**
* nbcon_enter_unsafe - Enter an unsafe region in the driver
* @wctxt: The write context that was handed to the write function
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
*
* When this function returns false then the calling context no longer owns
* the console and is no longer allowed to go forward. In this case it must
* back out immediately and carefully. The buffer content is also no longer
* trusted since it no longer belongs to the calling context.
*/
bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
bool is_owner;
is_owner = nbcon_context_enter_unsafe(ctxt);
if (!is_owner)
nbcon_write_context_set_buf(wctxt, NULL, 0);
return is_owner;
}
EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
/**
* nbcon_exit_unsafe - Exit an unsafe region in the driver
* @wctxt: The write context that was handed to the write function
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
*
* When this function returns false then the calling context no longer owns
* the console and is no longer allowed to go forward. In this case it must
* back out immediately and carefully. The buffer content is also no longer
* trusted since it no longer belongs to the calling context.
*/
bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
bool ret;
ret = nbcon_context_exit_unsafe(ctxt);
if (!ret)
nbcon_write_context_set_buf(wctxt, NULL, 0);
return ret;
}
EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
/**
* nbcon_reacquire_nobuf - Reacquire a console after losing ownership
* while printing
* @wctxt: The write context that was handed to the write callback
*
* Since ownership can be lost at any time due to handover or takeover, a
* printing context _must_ be prepared to back out immediately and
* carefully. However, there are scenarios where the printing context must
* reacquire ownership in order to finalize or revert hardware changes.
*
* This function allows a printing context to reacquire ownership using the
* same priority as its previous ownership.
*
* Note that after a successful reacquire the printing context will have no
* output buffer because that has been lost. This function cannot be used to
* resume printing.
*/
void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
while (!nbcon_context_try_acquire(ctxt, true))
cpu_relax();
nbcon_write_context_set_buf(wctxt, NULL, 0);
}
EXPORT_SYMBOL_GPL(nbcon_reacquire_nobuf);
/**
* nbcon_emit_next_record - Emit a record in the acquired context
* @wctxt: The write context that will be handed to the write function
* @use_atomic: True if the write_atomic() callback is to be used
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
*
* When this function returns false then the calling context no longer owns
* the console and is no longer allowed to go forward. In this case it must
* back out immediately and carefully. The buffer content is also no longer
* trusted since it no longer belongs to the calling context. If the caller
* wants to do more it must reacquire the console first.
*
* When true is returned, @wctxt->ctxt.backlog indicates whether there are
* still records pending in the ringbuffer,
*/
static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
struct console *con = ctxt->console;
bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
struct printk_message pmsg = {
.pbufs = ctxt->pbufs,
};
unsigned long con_dropped;
struct nbcon_state cur;
unsigned long dropped;
unsigned long ulseq;
/*
* This function should never be called for consoles that have not
* implemented the necessary callback for writing: i.e. legacy
* consoles and, when atomic, nbcon consoles with no write_atomic().
* Handle it as if ownership was lost and try to continue.
*
* Note that for nbcon consoles the write_thread() callback is
* mandatory and was already checked in nbcon_alloc().
*/
if (WARN_ON_ONCE((use_atomic && !con->write_atomic) ||
!(console_srcu_read_flags(con) & CON_NBCON))) {
nbcon_context_release(ctxt);
return false;
}
/*
* The printk buffers are filled within an unsafe section. This
* prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
* clobbering each other.
*/
if (!nbcon_context_enter_unsafe(ctxt))
return false;
ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
if (!ctxt->backlog)
return nbcon_context_exit_unsafe(ctxt);
/*
* @con->dropped is not protected in case of an unsafe hostile
* takeover. In that situation the update can be racy so
* annotate it accordingly.
*/
con_dropped = data_race(READ_ONCE(con->dropped));
dropped = con_dropped + pmsg.dropped;
if (dropped && !is_extended)
console_prepend_dropped(&pmsg, dropped);
/*
* If the previous owner was assigned the same record, this context
* has taken over ownership and is replaying the record. Prepend a
* message to let the user know the record is replayed.
*/
ulseq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_prev_seq));
if (__ulseq_to_u64seq(prb, ulseq) == pmsg.seq) {
console_prepend_replay(&pmsg);
} else {
/*
* Ensure this context is still the owner before trying to
* update @nbcon_prev_seq. Otherwise the value in @ulseq may
* not be from the previous owner and instead be some later
* value from the context that took over ownership.
*/
nbcon_state_read(con, &cur);
if (!nbcon_context_can_proceed(ctxt, &cur))
return false;
atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_prev_seq), &ulseq,
__u64seq_to_ulseq(pmsg.seq));
}
if (!nbcon_context_exit_unsafe(ctxt))
return false;
/* For skipped records just update seq/dropped in @con. */
if (pmsg.outbuf_len == 0)
goto update_con;
/* Initialize the write context for driver callbacks. */
nbcon_write_context_set_buf(wctxt, &pmsg.pbufs->outbuf[0], pmsg.outbuf_len);
if (use_atomic)
con->write_atomic(con, wctxt);
else
con->write_thread(con, wctxt);
if (!wctxt->outbuf) {
/*
* Ownership was lost and reacquired by the driver. Handle it
* as if ownership was lost.
*/
nbcon_context_release(ctxt);
return false;
}
/*
* Ownership may have been lost but _not_ reacquired by the driver.
* This case is detected and handled when entering unsafe to update
* dropped/seq values.
*/
/*
* Since any dropped message was successfully output, reset the
* dropped count for the console.
*/
dropped = 0;
update_con:
/*
* The dropped count and the sequence number are updated within an
* unsafe section. This limits update races to the panic context and
* allows the panic context to win.
*/
if (!nbcon_context_enter_unsafe(ctxt))
return false;
if (dropped != con_dropped) {
/* Counterpart to the READ_ONCE() above. */
WRITE_ONCE(con->dropped, dropped);
}
nbcon_seq_try_update(ctxt, pmsg.seq + 1);
return nbcon_context_exit_unsafe(ctxt);
}
/*
* nbcon_emit_one - Print one record for an nbcon console using the
* specified callback
* @wctxt: An initialized write context struct to use for this context
* @use_atomic: True if the write_atomic() callback is to be used
*
* Return: True, when a record has been printed and there are still
* pending records. The caller might want to continue flushing.
*
* False, when there is no pending record, or when the console
* context cannot be acquired, or the ownership has been lost.
* The caller should give up. Either the job is done, cannot be
* done, or will be handled by the owning context.
*
* This is an internal helper to handle the locking of the console before
* calling nbcon_emit_next_record().
*/
static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
struct console *con = ctxt->console;
unsigned long flags;
bool ret = false;
if (!use_atomic) {
con->device_lock(con, &flags);
/*
* Ensure this stays on the CPU to make handover and
* takeover possible.
*/
cant_migrate();
}
if (!nbcon_context_try_acquire(ctxt, false))
goto out;
/*
* nbcon_emit_next_record() returns false when the console was
* handed over or taken over. In both cases the context is no
* longer valid.
*
* The higher priority printing context takes over responsibility
* to print the pending records.
*/
if (!nbcon_emit_next_record(wctxt, use_atomic))
goto out;
nbcon_context_release(ctxt);
ret = ctxt->backlog;
out:
if (!use_atomic)
con->device_unlock(con, flags);
return ret;
}
/**
* nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
* @con: Console to operate on
* @ctxt: The nbcon context from nbcon_context_try_acquire()
*
* Return: True if the thread should shutdown or if the console is
* allowed to print and a record is available. False otherwise.
*
* After the thread wakes up, it must first check if it should shutdown before
* attempting any printing.
*/
static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
{
bool ret = false;
short flags;
int cookie;
if (kthread_should_stop())
return true;
cookie = console_srcu_read_lock();
flags = console_srcu_read_flags(con);
if (console_is_usable(con, flags, false)) {
/* Bring the sequence in @ctxt up to date */
ctxt->seq = nbcon_seq_read(con);
ret = prb_read_valid(prb, ctxt->seq, NULL);
}
console_srcu_read_unlock(cookie);
return ret;
}
/**
* nbcon_kthread_func - The printer thread function
* @__console: Console to operate on
*
* Return: 0
*/
static int nbcon_kthread_func(void *__console)
{
struct console *con = __console;
struct nbcon_write_context wctxt = {
.ctxt.console = con,
.ctxt.prio = NBCON_PRIO_NORMAL,
};
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
short con_flags;
bool backlog;
int cookie;
wait_for_event:
/*
* Guarantee this task is visible on the rcuwait before
* checking the wake condition.
*
* The full memory barrier within set_current_state() of
* ___rcuwait_wait_event() pairs with the full memory
* barrier within rcuwait_has_sleeper().
*
* This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
*/
rcuwait_wait_event(&con->rcuwait,
nbcon_kthread_should_wakeup(con, ctxt),
TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
do {
if (kthread_should_stop())
return 0;
backlog = false;
/*
* Keep the srcu read lock around the entire operation so that
* synchronize_srcu() can guarantee that the kthread stopped
* or suspended printing.
*/
cookie = console_srcu_read_lock();
con_flags = console_srcu_read_flags(con);
if (console_is_usable(con, con_flags, false))
backlog = nbcon_emit_one(&wctxt, false);
console_srcu_read_unlock(cookie);
cond_resched();
} while (backlog);
goto wait_for_event;
}
/**
* nbcon_irq_work - irq work to wake console printer thread
* @irq_work: The irq work to operate on
*/
static void nbcon_irq_work(struct irq_work *irq_work)
{
struct console *con = container_of(irq_work, struct console, irq_work);
nbcon_kthread_wake(con);
}
static inline bool rcuwait_has_sleeper(struct rcuwait *w)
{
/*
* Guarantee any new records can be seen by tasks preparing to wait
* before this context checks if the rcuwait is empty.
*
* This full memory barrier pairs with the full memory barrier within
* set_current_state() of ___rcuwait_wait_event(), which is called
* after prepare_to_rcuwait() adds the waiter but before it has
* checked the wait condition.
*
* This pairs with nbcon_kthread_func:A.
*/
smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
return rcuwait_active(w);
}
/**
* nbcon_kthreads_wake - Wake up printing threads using irq_work
*/
void nbcon_kthreads_wake(void)
{
struct console *con;
int cookie;
if (!printk_kthreads_running)
return;
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
if (!(console_srcu_read_flags(con) & CON_NBCON))
continue;
/*
* Only schedule irq_work if the printing thread is
* actively waiting. If not waiting, the thread will
* notice by itself that it has work to do.
*/
if (rcuwait_has_sleeper(&con->rcuwait))
irq_work_queue(&con->irq_work);
}
console_srcu_read_unlock(cookie);
}
/*
* nbcon_kthread_stop - Stop a console printer thread
* @con: Console to operate on
*/
void nbcon_kthread_stop(struct console *con)
{
lockdep_assert_console_list_lock_held();
if (!con->kthread)
return;
kthread_stop(con->kthread);
con->kthread = NULL;
}
/**
* nbcon_kthread_create - Create a console printer thread
* @con: Console to operate on
*
* Return: True if the kthread was started or already exists.
* Otherwise false and @con must not be registered.
*
* This function is called when it will be expected that nbcon consoles are
* flushed using the kthread. The messages printed with NBCON_PRIO_NORMAL
* will be no longer flushed by the legacy loop. This is why failure must
* be fatal for console registration.
*
* If @con was already registered and this function fails, @con must be
* unregistered before the global state variable @printk_kthreads_running
* can be set.
*/
bool nbcon_kthread_create(struct console *con)
{
struct task_struct *kt;
lockdep_assert_console_list_lock_held();
if (con->kthread)
return true;
kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
if (WARN_ON(IS_ERR(kt))) {
con_printk(KERN_ERR, con, "failed to start printing thread\n");
return false;
}
con->kthread = kt;
/*
* It is important that console printing threads are scheduled
* shortly after a printk call and with generous runtime budgets.
*/
sched_set_normal(con->kthread, -20);
return true;
}
/* Track the nbcon emergency nesting per CPU. */
static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
/**
* nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
*
* Context: For reading, any context. For writing, any context which could
* not be migrated to another CPU.
* Return: Either a pointer to the per CPU emergency nesting counter of
* the current CPU or to the init data during early boot.
*
* The function is safe for reading per-CPU variables in any context because
* preemption is disabled if the current CPU is in the emergency state. See
* also nbcon_cpu_emergency_enter().
*/
static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
{
/*
* The value of __printk_percpu_data_ready gets set in normal
* context and before SMP initialization. As a result it could
* never change while inside an nbcon emergency section.
*/
if (!printk_percpu_data_ready())
return &early_nbcon_pcpu_emergency_nesting; return raw_cpu_ptr(&nbcon_pcpu_emergency_nesting);}
/**
* nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
* printing on the current CPU
*
* Context: Any context.
* Return: The nbcon_prio to use for acquiring an nbcon console in this
* context for printing.
*
* The function is safe for reading per-CPU data in any context because
* preemption is disabled if the current CPU is in the emergency or panic
* state.
*/
enum nbcon_prio nbcon_get_default_prio(void)
{
unsigned int *cpu_emergency_nesting;
if (panic_on_this_cpu()) return NBCON_PRIO_PANIC;
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
if (*cpu_emergency_nesting) return NBCON_PRIO_EMERGENCY;
return NBCON_PRIO_NORMAL;
}
/**
* nbcon_legacy_emit_next_record - Print one record for an nbcon console
* in legacy contexts
* @con: The console to print on
* @handover: Will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding
* both the console_lock and the SRCU read lock. Otherwise it
* is set to false.
* @cookie: The cookie from the SRCU read lock.
* @use_atomic: Set true when called in an atomic or unknown context.
* It affects which nbcon callback will be used: write_atomic()
* or write_thread().
*
* When false, the write_thread() callback is used and would be
* called in a preemtible context unless disabled by the
* device_lock. The legacy handover is not allowed in this mode.
*
* Context: Any context except NMI.
* Return: True, when a record has been printed and there are still
* pending records. The caller might want to continue flushing.
*
* False, when there is no pending record, or when the console
* context cannot be acquired, or the ownership has been lost.
* The caller should give up. Either the job is done, cannot be
* done, or will be handled by the owning context.
*
* This function is meant to be called by console_flush_all() to print records
* on nbcon consoles from legacy context (printing via console unlocking).
* Essentially it is the nbcon version of console_emit_next_record().
*/
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie, bool use_atomic)
{
struct nbcon_write_context wctxt = { };
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
unsigned long flags;
bool progress;
ctxt->console = con;
ctxt->prio = nbcon_get_default_prio();
if (use_atomic) {
/*
* In an atomic or unknown context, use the same procedure as
* in console_emit_next_record(). It allows to handover.
*/
printk_safe_enter_irqsave(flags);
console_lock_spinning_enable();
stop_critical_timings();
}
progress = nbcon_emit_one(&wctxt, use_atomic);
if (use_atomic) {
start_critical_timings();
*handover = console_lock_spinning_disable_and_check(cookie);
printk_safe_exit_irqrestore(flags);
} else {
/* Non-atomic does not perform legacy spinning handovers. */
*handover = false;
}
return progress;
}
/**
* __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
* write_atomic() callback
* @con: The nbcon console to flush
* @stop_seq: Flush up until this record
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*
* Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
* failure.
*
* Errors:
*
* -EPERM: Unable to acquire console ownership.
*
* -EAGAIN: Another context took over ownership while printing.
*
* -ENOENT: A record before @stop_seq is not available.
*
* If flushing up to @stop_seq was not successful, it only makes sense for the
* caller to try again when -EAGAIN was returned. When -EPERM is returned,
* this context is not allowed to acquire the console. When -ENOENT is
* returned, it cannot be expected that the unfinalized record will become
* available.
*/
static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
bool allow_unsafe_takeover)
{
struct nbcon_write_context wctxt = { };
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
int err = 0;
ctxt->console = con;
ctxt->spinwait_max_us = 2000;
ctxt->prio = nbcon_get_default_prio();
ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
if (!nbcon_context_try_acquire(ctxt, false))
return -EPERM;
while (nbcon_seq_read(con) < stop_seq) {
/*
* nbcon_emit_next_record() returns false when the console was
* handed over or taken over. In both cases the context is no
* longer valid.
*/
if (!nbcon_emit_next_record(&wctxt, true))
return -EAGAIN;
if (!ctxt->backlog) {
/* Are there reserved but not yet finalized records? */
if (nbcon_seq_read(con) < stop_seq)
err = -ENOENT;
break;
}
}
nbcon_context_release(ctxt);
return err;
}
/**
* nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
* write_atomic() callback
* @con: The nbcon console to flush
* @stop_seq: Flush up until this record
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*
* This will stop flushing before @stop_seq if another context has ownership.
* That context is then responsible for the flushing. Likewise, if new records
* are added while this context was flushing and there is no other context
* to handle the printing, this context must also flush those records.
*/
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
bool allow_unsafe_takeover)
{
struct console_flush_type ft;
unsigned long flags;
int err;
again:
/*
* Atomic flushing does not use console driver synchronization (i.e.
* it does not hold the port lock for uart consoles). Therefore IRQs
* must be disabled to avoid being interrupted and then calling into
* a driver that will deadlock trying to acquire console ownership.
*/
local_irq_save(flags);
err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
local_irq_restore(flags);
/*
* If there was a new owner (-EPERM, -EAGAIN), that context is
* responsible for completing.
*
* Do not wait for records not yet finalized (-ENOENT) to avoid a
* possible deadlock. They will either get flushed by the writer or
* eventually skipped on panic CPU.
*/
if (err)
return;
/*
* If flushing was successful but more records are available, this
* context must flush those remaining records if the printer thread
* is not available do it.
*/
printk_get_console_flush_type(&ft);
if (!ft.nbcon_offload &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
stop_seq = prb_next_reserve_seq(prb);
goto again;
}
}
/**
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
* write_atomic() callback
* @stop_seq: Flush up until this record
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
*/
static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
{
struct console *con;
int cookie;
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
short flags = console_srcu_read_flags(con);
if (!(flags & CON_NBCON))
continue;
if (!console_is_usable(con, flags, true))
continue;
if (nbcon_seq_read(con) >= stop_seq)
continue;
nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
}
console_srcu_read_unlock(cookie);
}
/**
* nbcon_atomic_flush_pending - Flush all nbcon consoles using their
* write_atomic() callback
*
* Flush the backlog up through the currently newest record. Any new
* records added while flushing will not be flushed if there is another
* context available to handle the flushing. This is to avoid one CPU
* printing unbounded because other CPUs continue to add records.
*/
void nbcon_atomic_flush_pending(void)
{
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
}
/**
* nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
* write_atomic() callback and allowing unsafe hostile takeovers
*
* Flush the backlog up through the currently newest record. Unsafe hostile
* takeovers will be performed, if necessary.
*/
void nbcon_atomic_flush_unsafe(void)
{
__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
}
/**
* nbcon_cpu_emergency_enter - Enter an emergency section where printk()
* messages for that CPU are flushed directly
*
* Context: Any context. Disables preemption.
*
* When within an emergency section, printk() calls will attempt to flush any
* pending messages in the ringbuffer.
*/
void nbcon_cpu_emergency_enter(void)
{
unsigned int *cpu_emergency_nesting;
preempt_disable();
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
(*cpu_emergency_nesting)++;
}
/**
* nbcon_cpu_emergency_exit - Exit an emergency section
*
* Context: Within an emergency section. Enables preemption.
*/
void nbcon_cpu_emergency_exit(void)
{
unsigned int *cpu_emergency_nesting;
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
(*cpu_emergency_nesting)--;
preempt_enable();
}
/**
* nbcon_alloc - Allocate and init the nbcon console specific data
* @con: Console to initialize
*
* Return: True if the console was fully allocated and initialized.
* Otherwise @con must not be registered.
*
* When allocation and init was successful, the console must be properly
* freed using nbcon_free() once it is no longer needed.
*/
bool nbcon_alloc(struct console *con)
{
struct nbcon_state state = { };
/* Synchronize the kthread start. */
lockdep_assert_console_list_lock_held();
/* The write_thread() callback is mandatory. */
if (WARN_ON(!con->write_thread))
return false;
rcuwait_init(&con->rcuwait);
init_irq_work(&con->irq_work, nbcon_irq_work);
atomic_long_set(&ACCESS_PRIVATE(con, nbcon_prev_seq), -1UL);
nbcon_state_set(con, &state);
/*
* Initialize @nbcon_seq to the highest possible sequence number so
* that practically speaking it will have nothing to print until a
* desired initial sequence number has been set via nbcon_seq_force().
*/
atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), ULSEQ_MAX(prb));
if (con->flags & CON_BOOT) {
/*
* Boot console printing is synchronized with legacy console
* printing, so boot consoles can share the same global printk
* buffers.
*/
con->pbufs = &printk_shared_pbufs;
} else {
con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
if (!con->pbufs) {
con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
return false;
}
if (printk_kthreads_ready && !have_boot_console) {
if (!nbcon_kthread_create(con)) {
kfree(con->pbufs);
con->pbufs = NULL;
return false;
}
/* Might be the first kthread. */
printk_kthreads_running = true;
}
}
return true;
}
/**
* nbcon_free - Free and cleanup the nbcon console specific data
* @con: Console to free/cleanup nbcon data
*
* Important: @have_nbcon_console must be updated before calling
* this function. In particular, it can be set only when there
* is still another nbcon console registered.
*/
void nbcon_free(struct console *con)
{
struct nbcon_state state = { };
/* Synchronize the kthread stop. */
lockdep_assert_console_list_lock_held();
if (printk_kthreads_running) {
nbcon_kthread_stop(con);
/* Might be the last nbcon console.
*
* Do not rely on printk_kthreads_check_locked(). It is not
* called in some code paths, see nbcon_free() callers.
*/
if (!have_nbcon_console)
printk_kthreads_running = false;
}
nbcon_state_set(con, &state);
/* Boot consoles share global printk buffers. */
if (!(con->flags & CON_BOOT))
kfree(con->pbufs);
con->pbufs = NULL;
}
/**
* nbcon_device_try_acquire - Try to acquire nbcon console and enter unsafe
* section
* @con: The nbcon console to acquire
*
* Context: Under the locking mechanism implemented in
* @con->device_lock() including disabling migration.
* Return: True if the console was acquired. False otherwise.
*
* Console drivers will usually use their own internal synchronization
* mechasism to synchronize between console printing and non-printing
* activities (such as setting baud rates). However, nbcon console drivers
* supporting atomic consoles may also want to mark unsafe sections when
* performing non-printing activities in order to synchronize against their
* atomic_write() callback.
*
* This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
* and marks it unsafe for handover/takeover.
*/
bool nbcon_device_try_acquire(struct console *con)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
cant_migrate();
memset(ctxt, 0, sizeof(*ctxt));
ctxt->console = con;
ctxt->prio = NBCON_PRIO_NORMAL;
if (!nbcon_context_try_acquire(ctxt, false))
return false;
if (!nbcon_context_enter_unsafe(ctxt))
return false;
return true;
}
EXPORT_SYMBOL_GPL(nbcon_device_try_acquire);
/**
* nbcon_device_release - Exit unsafe section and release the nbcon console
* @con: The nbcon console acquired in nbcon_device_try_acquire()
*/
void nbcon_device_release(struct console *con)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
struct console_flush_type ft;
int cookie;
if (!nbcon_context_exit_unsafe(ctxt))
return;
nbcon_context_release(ctxt);
/*
* This context must flush any new records added while the console
* was locked if the printer thread is not available to do it. The
* console_srcu_read_lock must be taken to ensure the console is
* usable throughout flushing.
*/
cookie = console_srcu_read_lock();
printk_get_console_flush_type(&ft);
if (console_is_usable(con, console_srcu_read_flags(con), true) &&
!ft.nbcon_offload &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
/*
* If nbcon_atomic flushing is not available, fallback to
* using the legacy loop.
*/
if (ft.nbcon_atomic) {
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
} else if (ft.legacy_direct) {
if (console_trylock())
console_unlock();
} else if (ft.legacy_offload) {
printk_trigger_flush();
}
}
console_srcu_read_unlock(cookie);
}
EXPORT_SYMBOL_GPL(nbcon_device_release);
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
*/
/* Devmaps primary use is as a backend map for XDP BPF helper call
* bpf_redirect_map(). Because XDP is mostly concerned with performance we
* spent some effort to ensure the datapath with redirect maps does not use
* any locking. This is a quick note on the details.
*
* We have three possible paths to get into the devmap control plane bpf
* syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
* will invoke an update, delete, or lookup operation. To ensure updates and
* deletes appear atomic from the datapath side xchg() is used to modify the
* netdev_map array. Then because the datapath does a lookup into the netdev_map
* array (read-only) from an RCU critical section we use call_rcu() to wait for
* an rcu grace period before free'ing the old data structures. This ensures the
* datapath always has a valid copy. However, the datapath does a "flush"
* operation that pushes any pending packets in the driver outside the RCU
* critical section. Each bpf_dtab_netdev tracks these pending operations using
* a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
* this list is empty, indicating outstanding flush operations have completed.
*
* BPF syscalls may race with BPF program calls on any of the update, delete
* or lookup operations. As noted above the xchg() operation also keep the
* netdev_map consistent in this case. From the devmap side BPF programs
* calling into these operations are the same as multiple user space threads
* making system calls.
*
* Finally, any of the above may race with a netdev_unregister notifier. The
* unregister notifier must search for net devices in the map structure that
* contain a reference to the net device and remove them. This is a two step
* process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
* check to see if the ifindex is the same as the net_device being removed.
* When removing the dev a cmpxchg() is used to ensure the correct dev is
* removed, in the case of a concurrent update or delete operation it is
* possible that the initially referenced dev is no longer in the map. As the
* notifier hook walks the map we know that new dev references can not be
* added by the user because core infrastructure ensures dev_get_by_index()
* calls will fail at this point.
*
* The devmap_hash type is a map type which interprets keys as ifindexes and
* indexes these using a hashmap. This allows maps that use ifindex as key to be
* densely packed instead of having holes in the lookup array for unused
* ifindexes. The setup and packet enqueue/send code is shared between the two
* types of devmap; only the lookup and insertion is different.
*/
#include <linux/bpf.h>
#include <net/xdp.h>
#include <linux/filter.h>
#include <trace/events/xdp.h>
#include <linux/btf_ids.h>
#define DEV_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
struct xdp_dev_bulk_queue {
struct xdp_frame *q[DEV_MAP_BULK_SIZE];
struct list_head flush_node;
struct net_device *dev;
struct net_device *dev_rx;
struct bpf_prog *xdp_prog;
unsigned int count;
};
struct bpf_dtab_netdev {
struct net_device *dev; /* must be first member, due to tracepoint */
struct hlist_node index_hlist;
struct bpf_prog *xdp_prog;
struct rcu_head rcu;
unsigned int idx;
struct bpf_devmap_val val;
};
struct bpf_dtab {
struct bpf_map map;
struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
struct list_head list;
/* these are only used for DEVMAP_HASH type maps */
struct hlist_head *dev_index_head;
spinlock_t index_lock;
unsigned int items;
u32 n_buckets;
};
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);
static struct hlist_head *dev_map_create_hash(unsigned int entries,
int numa_node)
{
int i;
struct hlist_head *hash;
hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
if (hash != NULL)
for (i = 0; i < entries; i++)
INIT_HLIST_HEAD(&hash[i]);
return hash;
}
static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
int idx)
{
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
}
static int dev_map_alloc_check(union bpf_attr *attr)
{
u32 valsize = attr->value_size;
/* check sanity of attributes. 2 value sizes supported:
* 4 bytes: ifindex
* 8 bytes: ifindex + prog fd
*/
if (attr->max_entries == 0 || attr->key_size != 4 ||
(valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
attr->map_flags & ~DEV_CREATE_FLAG_MASK)
return -EINVAL;
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
/* Hash table size must be power of 2; roundup_pow_of_two()
* can overflow into UB on 32-bit arches
*/
if (attr->max_entries > 1UL << 31)
return -EINVAL;
}
return 0;
}
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
/* Lookup returns a pointer straight to dev->ifindex, so make sure the
* verifier prevents writes from the BPF side
*/
attr->map_flags |= BPF_F_RDONLY_PROG;
bpf_map_init_from_attr(&dtab->map, attr);
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
/* Hash table size must be power of 2 */
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
dtab->map.numa_node);
if (!dtab->dev_index_head)
return -ENOMEM;
spin_lock_init(&dtab->index_lock);
} else {
dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
return -ENOMEM;
}
return 0;
}
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
{
struct bpf_dtab *dtab;
int err;
dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
if (!dtab)
return ERR_PTR(-ENOMEM);
err = dev_map_init_map(dtab, attr);
if (err) {
bpf_map_area_free(dtab);
return ERR_PTR(err);
}
spin_lock(&dev_map_lock);
list_add_tail_rcu(&dtab->list, &dev_map_list);
spin_unlock(&dev_map_lock);
return &dtab->map;
}
static void dev_map_free(struct bpf_map *map)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
u32 i;
/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
* so the programs (can be more than one that used this map) were
* disconnected from events. The following synchronize_rcu() guarantees
* both rcu read critical sections complete and waits for
* preempt-disable regions (NAPI being the relevant context here) so we
* are certain there will be no further reads against the netdev_map and
* all flush operations are complete. Flush operations can only be done
* from NAPI context for this reason.
*/
spin_lock(&dev_map_lock);
list_del_rcu(&dtab->list);
spin_unlock(&dev_map_lock);
/* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
* during NAPI callback and cleared after the XDP redirect. There is no
* explicit RCU read section which protects bpf_redirect_info->map but
* local_bh_disable() also marks the beginning an RCU section. This
* makes the complete softirq callback RCU protected. Thus after
* following synchronize_rcu() there no bpf_redirect_info->map == map
* assignment.
*/
synchronize_rcu();
/* Make sure prior __dev_map_entry_free() have completed. */
rcu_barrier();
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
for (i = 0; i < dtab->n_buckets; i++) {
struct bpf_dtab_netdev *dev;
struct hlist_head *head;
struct hlist_node *next;
head = dev_map_index_hash(dtab, i);
hlist_for_each_entry_safe(dev, next, head, index_hlist) {
hlist_del_rcu(&dev->index_hlist);
if (dev->xdp_prog)
bpf_prog_put(dev->xdp_prog);
dev_put(dev->dev);
kfree(dev);
}
}
bpf_map_area_free(dtab->dev_index_head);
} else {
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
dev = rcu_dereference_raw(dtab->netdev_map[i]);
if (!dev)
continue;
if (dev->xdp_prog)
bpf_prog_put(dev->xdp_prog);
dev_put(dev->dev);
kfree(dev);
}
bpf_map_area_free(dtab->netdev_map);
}
bpf_map_area_free(dtab);
}
static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
u32 index = key ? *(u32 *)key : U32_MAX;
u32 *next = next_key;
if (index >= dtab->map.max_entries) {
*next = 0;
return 0;
}
if (index == dtab->map.max_entries - 1)
return -ENOENT;
*next = index + 1;
return 0;
}
/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
* by local_bh_disable() (from XDP calls inside NAPI). The
* rcu_read_lock_bh_held() below makes lockdep accept both.
*/
static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct hlist_head *head = dev_map_index_hash(dtab, key);
struct bpf_dtab_netdev *dev;
hlist_for_each_entry_rcu(dev, head, index_hlist,
lockdep_is_held(&dtab->index_lock))
if (dev->idx == key)
return dev;
return NULL;
}
static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
void *next_key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
u32 idx, *next = next_key;
struct bpf_dtab_netdev *dev, *next_dev;
struct hlist_head *head;
int i = 0;
if (!key)
goto find_first;
idx = *(u32 *)key;
dev = __dev_map_hash_lookup_elem(map, idx);
if (!dev)
goto find_first;
next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
struct bpf_dtab_netdev, index_hlist);
if (next_dev) {
*next = next_dev->idx;
return 0;
}
i = idx & (dtab->n_buckets - 1);
i++;
find_first:
for (; i < dtab->n_buckets; i++) {
head = dev_map_index_hash(dtab, i);
next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
struct bpf_dtab_netdev,
index_hlist);
if (next_dev) {
*next = next_dev->idx;
return 0;
}
}
return -ENOENT;
}
static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
struct xdp_frame **frames, int n,
struct net_device *tx_dev,
struct net_device *rx_dev)
{
struct xdp_txq_info txq = { .dev = tx_dev };
struct xdp_rxq_info rxq = { .dev = rx_dev };
struct xdp_buff xdp;
int i, nframes = 0;
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
u32 act;
int err;
xdp_convert_frame_to_buff(xdpf, &xdp);
xdp.txq = &txq;
xdp.rxq = &rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
switch (act) {
case XDP_PASS:
err = xdp_update_frame_from_buff(&xdp, xdpf);
if (unlikely(err < 0))
xdp_return_frame_rx_napi(xdpf);
else
frames[nframes++] = xdpf;
break;
default:
bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(tx_dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
xdp_return_frame_rx_napi(xdpf);
break;
}
}
return nframes; /* sent frames count */
}
static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
{
struct net_device *dev = bq->dev;
unsigned int cnt = bq->count;
int sent = 0, err = 0;
int to_send = cnt;
int i;
if (unlikely(!cnt))
return;
for (i = 0; i < cnt; i++) {
struct xdp_frame *xdpf = bq->q[i];
prefetch(xdpf);
}
if (bq->xdp_prog) {
to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
if (!to_send)
goto out;
}
sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
if (sent < 0) {
/* If ndo_xdp_xmit fails with an errno, no frames have
* been xmit'ed.
*/
err = sent;
sent = 0;
}
/* If not all frames have been transmitted, it is our
* responsibility to free them
*/
for (i = sent; unlikely(i < to_send); i++)
xdp_return_frame_rx_napi(bq->q[i]);
out:
bq->count = 0;
trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
}
/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
* driver before returning from its napi->poll() routine. See the comment above
* xdp_do_flush() in filter.c.
*/
void __dev_flush(struct list_head *flush_list)
{
struct xdp_dev_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
bq_xmit_all(bq, XDP_XMIT_FLUSH);
bq->dev_rx = NULL;
bq->xdp_prog = NULL;
__list_del_clearprev(&bq->flush_node);
}
}
/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
* by local_bh_disable() (from XDP calls inside NAPI). The
* rcu_read_lock_bh_held() below makes lockdep accept both.
*/
static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *obj;
if (key >= map->max_entries)
return NULL;
obj = rcu_dereference_check(dtab->netdev_map[key],
rcu_read_lock_bh_held());
return obj;
}
/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
* variable access, and map elements stick around. See comment above
* xdp_do_flush() in filter.c.
*/
static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx, struct bpf_prog *xdp_prog)
{
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(bq, 0);
/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
* from net_device drivers NAPI func end.
*
* Do the same with xdp_prog and flush_list since these fields
* are only ever modified together.
*/
if (!bq->dev_rx) {
struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
bq->dev_rx = dev_rx;
bq->xdp_prog = xdp_prog;
list_add(&bq->flush_node, flush_list);
}
bq->q[bq->count++] = xdpf;
}
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx,
struct bpf_prog *xdp_prog)
{
int err;
if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
return -EOPNOTSUPP;
if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
xdp_frame_has_frags(xdpf)))
return -EOPNOTSUPP;
err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
if (unlikely(err))
return err;
bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
return 0;
}
static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
{
struct xdp_txq_info txq = { .dev = dst->dev };
struct xdp_buff xdp;
u32 act;
if (!dst->xdp_prog)
return XDP_PASS;
__skb_pull(skb, skb->mac_len);
xdp.txq = &txq;
act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
switch (act) {
case XDP_PASS:
__skb_push(skb, skb->mac_len);
break;
default:
bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dst->dev, dst->xdp_prog, act);
fallthrough;
case XDP_DROP:
kfree_skb(skb);
break;
}
return act;
}
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
}
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
struct net_device *dev = dst->dev;
return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
}
static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
{
if (!obj)
return false;
if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
return false;
if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
xdp_frame_has_frags(xdpf)))
return false;
if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
return false;
return true;
}
static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
struct net_device *dev_rx,
struct xdp_frame *xdpf)
{
struct xdp_frame *nxdpf;
nxdpf = xdpf_clone(xdpf);
if (!nxdpf)
return -ENOMEM;
bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
return 0;
}
static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
{
while (num_excluded--) {
if (ifindex == excluded[num_excluded])
return true;
}
return false;
}
/* Get ifindex of each upper device. 'indexes' must be able to hold at
* least MAX_NEST_DEV elements.
* Returns the number of ifindexes added.
*/
static int get_upper_ifindexes(struct net_device *dev, int *indexes)
{
struct net_device *upper;
struct list_head *iter;
int n = 0;
netdev_for_each_upper_dev_rcu(dev, upper, iter) {
indexes[n++] = upper->ifindex;
}
return n;
}
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
struct bpf_map *map, bool exclude_ingress)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dst, *last_dst = NULL;
int excluded_devices[1+MAX_NEST_DEV];
struct hlist_head *head;
int num_excluded = 0;
unsigned int i;
int err;
if (exclude_ingress) {
num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
excluded_devices[num_excluded++] = dev_rx->ifindex;
}
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
for (i = 0; i < map->max_entries; i++) {
dst = rcu_dereference_check(dtab->netdev_map[i],
rcu_read_lock_bh_held());
if (!is_valid_dst(dst, xdpf))
continue;
if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
continue;
/* we only need n-1 clones; last_dst enqueued below */
if (!last_dst) {
last_dst = dst;
continue;
}
err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
if (err)
return err;
last_dst = dst;
}
} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
for (i = 0; i < dtab->n_buckets; i++) {
head = dev_map_index_hash(dtab, i);
hlist_for_each_entry_rcu(dst, head, index_hlist,
lockdep_is_held(&dtab->index_lock)) {
if (!is_valid_dst(dst, xdpf))
continue;
if (is_ifindex_excluded(excluded_devices, num_excluded,
dst->dev->ifindex))
continue;
/* we only need n-1 clones; last_dst enqueued below */
if (!last_dst) {
last_dst = dst;
continue;
}
err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
if (err)
return err;
last_dst = dst;
}
}
}
/* consume the last copy of the frame */
if (last_dst)
bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
else
xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
return 0;
}
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
const struct bpf_prog *xdp_prog)
{
int err;
err = xdp_ok_fwd_dev(dst->dev, skb->len);
if (unlikely(err))
return err;
/* Redirect has already succeeded semantically at this point, so we just
* return 0 even if packet is dropped. Helper below takes care of
* freeing skb.
*/
if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
return 0;
skb->dev = dst->dev;
generic_xdp_tx(skb, xdp_prog);
return 0;
}
static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
struct sk_buff *skb,
const struct bpf_prog *xdp_prog)
{
struct sk_buff *nskb;
int err;
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
err = dev_map_generic_redirect(dst, nskb, xdp_prog);
if (unlikely(err)) {
consume_skb(nskb);
return err;
}
return 0;
}
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
const struct bpf_prog *xdp_prog,
struct bpf_map *map, bool exclude_ingress)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dst, *last_dst = NULL;
int excluded_devices[1+MAX_NEST_DEV];
struct hlist_head *head;
struct hlist_node *next;
int num_excluded = 0;
unsigned int i;
int err;
if (exclude_ingress) {
num_excluded = get_upper_ifindexes(dev, excluded_devices);
excluded_devices[num_excluded++] = dev->ifindex;
}
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
for (i = 0; i < map->max_entries; i++) {
dst = rcu_dereference_check(dtab->netdev_map[i],
rcu_read_lock_bh_held());
if (!dst)
continue;
if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
continue;
/* we only need n-1 clones; last_dst enqueued below */
if (!last_dst) {
last_dst = dst;
continue;
}
err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
if (err)
return err;
last_dst = dst;
}
} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
for (i = 0; i < dtab->n_buckets; i++) {
head = dev_map_index_hash(dtab, i);
hlist_for_each_entry_safe(dst, next, head, index_hlist) {
if (is_ifindex_excluded(excluded_devices, num_excluded,
dst->dev->ifindex))
continue;
/* we only need n-1 clones; last_dst enqueued below */
if (!last_dst) {
last_dst = dst;
continue;
}
err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
if (err)
return err;
last_dst = dst;
}
}
}
/* consume the first skb and return */
if (last_dst)
return dev_map_generic_redirect(last_dst, skb, xdp_prog);
/* dtab is empty */
consume_skb(skb);
return 0;
}
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
return obj ? &obj->val : NULL;
}
static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
*(u32 *)key);
return obj ? &obj->val : NULL;
}
static void __dev_map_entry_free(struct rcu_head *rcu)
{
struct bpf_dtab_netdev *dev;
dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
if (dev->xdp_prog)
bpf_prog_put(dev->xdp_prog);
dev_put(dev->dev);
kfree(dev);
}
static long dev_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *old_dev;
u32 k = *(u32 *)key;
if (k >= map->max_entries)
return -EINVAL;
old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
if (old_dev) {
call_rcu(&old_dev->rcu, __dev_map_entry_free);
atomic_dec((atomic_t *)&dtab->items);
}
return 0;
}
static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *old_dev;
u32 k = *(u32 *)key;
unsigned long flags;
int ret = -ENOENT;
spin_lock_irqsave(&dtab->index_lock, flags);
old_dev = __dev_map_hash_lookup_elem(map, k);
if (old_dev) {
dtab->items--;
hlist_del_init_rcu(&old_dev->index_hlist);
call_rcu(&old_dev->rcu, __dev_map_entry_free);
ret = 0;
}
spin_unlock_irqrestore(&dtab->index_lock, flags);
return ret;
}
static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
struct bpf_dtab *dtab,
struct bpf_devmap_val *val,
unsigned int idx)
{
struct bpf_prog *prog = NULL;
struct bpf_dtab_netdev *dev;
dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
GFP_NOWAIT,
dtab->map.numa_node);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->dev = dev_get_by_index(net, val->ifindex);
if (!dev->dev)
goto err_out;
if (val->bpf_prog.fd > 0) {
prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
BPF_PROG_TYPE_XDP, false);
if (IS_ERR(prog))
goto err_put_dev;
if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
!bpf_prog_map_compatible(&dtab->map, prog))
goto err_put_prog;
}
dev->idx = idx;
if (prog) {
dev->xdp_prog = prog;
dev->val.bpf_prog.id = prog->aux->id;
} else {
dev->xdp_prog = NULL;
dev->val.bpf_prog.id = 0;
}
dev->val.ifindex = val->ifindex;
return dev;
err_put_prog:
bpf_prog_put(prog);
err_put_dev:
dev_put(dev->dev);
err_out:
kfree(dev);
return ERR_PTR(-EINVAL);
}
static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
void *key, void *value, u64 map_flags)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev, *old_dev;
struct bpf_devmap_val val = {};
u32 i = *(u32 *)key;
if (unlikely(map_flags > BPF_EXIST))
return -EINVAL;
if (unlikely(i >= dtab->map.max_entries))
return -E2BIG;
if (unlikely(map_flags == BPF_NOEXIST))
return -EEXIST;
/* already verified value_size <= sizeof val */
memcpy(&val, value, map->value_size);
if (!val.ifindex) {
dev = NULL;
/* can not specify fd if ifindex is 0 */
if (val.bpf_prog.fd > 0)
return -EINVAL;
} else {
dev = __dev_map_alloc_node(net, dtab, &val, i);
if (IS_ERR(dev))
return PTR_ERR(dev);
}
/* Use call_rcu() here to ensure rcu critical sections have completed
* Remembering the driver side flush operation will happen before the
* net device is removed.
*/
old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
if (old_dev)
call_rcu(&old_dev->rcu, __dev_map_entry_free);
else
atomic_inc((atomic_t *)&dtab->items);
return 0;
}
static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
return __dev_map_update_elem(current->nsproxy->net_ns,
map, key, value, map_flags);
}
static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
void *key, void *value, u64 map_flags)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev, *old_dev;
struct bpf_devmap_val val = {};
u32 idx = *(u32 *)key;
unsigned long flags;
int err = -EEXIST;
/* already verified value_size <= sizeof val */
memcpy(&val, value, map->value_size);
if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
return -EINVAL;
spin_lock_irqsave(&dtab->index_lock, flags);
old_dev = __dev_map_hash_lookup_elem(map, idx);
if (old_dev && (map_flags & BPF_NOEXIST))
goto out_err;
dev = __dev_map_alloc_node(net, dtab, &val, idx);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
goto out_err;
}
if (old_dev) {
hlist_del_rcu(&old_dev->index_hlist);
} else {
if (dtab->items >= dtab->map.max_entries) {
spin_unlock_irqrestore(&dtab->index_lock, flags);
call_rcu(&dev->rcu, __dev_map_entry_free);
return -E2BIG;
}
dtab->items++;
}
hlist_add_head_rcu(&dev->index_hlist,
dev_map_index_hash(dtab, idx));
spin_unlock_irqrestore(&dtab->index_lock, flags);
if (old_dev)
call_rcu(&old_dev->rcu, __dev_map_entry_free);
return 0;
out_err:
spin_unlock_irqrestore(&dtab->index_lock, flags);
return err;
}
static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
return __dev_map_hash_update_elem(current->nsproxy->net_ns,
map, key, value, map_flags);
}
static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
{
return __bpf_xdp_redirect_map(map, ifindex, flags,
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
__dev_map_lookup_elem);
}
static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
{
return __bpf_xdp_redirect_map(map, ifindex, flags,
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
__dev_map_hash_lookup_elem);
}
static u64 dev_map_mem_usage(const struct bpf_map *map)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
u64 usage = sizeof(struct bpf_dtab);
if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
else
usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
usage += atomic_read((atomic_t *)&dtab->items) *
(u64)sizeof(struct bpf_dtab_netdev);
return usage;
}
BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
const struct bpf_map_ops dev_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = dev_map_alloc_check,
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
.map_get_next_key = dev_map_get_next_key,
.map_lookup_elem = dev_map_lookup_elem,
.map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem,
.map_check_btf = map_check_no_btf,
.map_mem_usage = dev_map_mem_usage,
.map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_map_redirect,
};
const struct bpf_map_ops dev_map_hash_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = dev_map_alloc_check,
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
.map_get_next_key = dev_map_hash_get_next_key,
.map_lookup_elem = dev_map_hash_lookup_elem,
.map_update_elem = dev_map_hash_update_elem,
.map_delete_elem = dev_map_hash_delete_elem,
.map_check_btf = map_check_no_btf,
.map_mem_usage = dev_map_mem_usage,
.map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_hash_map_redirect,
};
static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
struct net_device *netdev)
{
unsigned long flags;
u32 i;
spin_lock_irqsave(&dtab->index_lock, flags);
for (i = 0; i < dtab->n_buckets; i++) {
struct bpf_dtab_netdev *dev;
struct hlist_head *head;
struct hlist_node *next;
head = dev_map_index_hash(dtab, i); hlist_for_each_entry_safe(dev, next, head, index_hlist) {
if (netdev != dev->dev)
continue; dtab->items--; hlist_del_rcu(&dev->index_hlist);
call_rcu(&dev->rcu, __dev_map_entry_free);
}
}
spin_unlock_irqrestore(&dtab->index_lock, flags);
}
static int dev_map_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct bpf_dtab *dtab;
int i, cpu;
switch (event) {
case NETDEV_REGISTER:
if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
break;
/* will be freed in free_netdev() */
netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
if (!netdev->xdp_bulkq)
return NOTIFY_BAD;
for_each_possible_cpu(cpu) per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
break;
case NETDEV_UNREGISTER:
/* This rcu_read_lock/unlock pair is needed because
* dev_map_list is an RCU list AND to ensure a delete
* operation does not free a netdev_map entry while we
* are comparing it against the netdev being unregistered.
*/
rcu_read_lock(); list_for_each_entry_rcu(dtab, &dev_map_list, list) { if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { dev_map_hash_remove_netdev(dtab, netdev);
continue;
}
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev, *odev;
dev = rcu_dereference(dtab->netdev_map[i]); if (!dev || netdev != dev->dev) continue;
odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
if (dev == odev) {
call_rcu(&dev->rcu,
__dev_map_entry_free);
atomic_dec((atomic_t *)&dtab->items);
}
}
}
rcu_read_unlock();
break;
default:
break;
}
return NOTIFY_OK;}
static struct notifier_block dev_map_notifier = {
.notifier_call = dev_map_notification,
};
static int __init dev_map_init(void)
{
/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
offsetof(struct _bpf_dtab_netdev, dev));
register_netdevice_notifier(&dev_map_notifier);
return 0;
}
subsys_initcall(dev_map_init);
// SPDX-License-Identifier: GPL-2.0-only
/*
* Simple NUMA memory policy for the Linux kernel.
*
* Copyright 2003,2004 Andi Kleen, SuSE Labs.
* (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
*
* NUMA policy allows the user to give hints in which node(s) memory should
* be allocated.
*
* Support six policies per VMA and per process:
*
* The VMA policy has priority over the process policy for a page fault.
*
* interleave Allocate memory interleaved over a set of nodes,
* with normal fallback if it fails.
* For VMA based allocations this interleaves based on the
* offset into the backing object or offset into the mapping
* for anonymous memory. For process policy an process counter
* is used.
*
* weighted interleave
* Allocate memory interleaved over a set of nodes based on
* a set of weights (per-node), with normal fallback if it
* fails. Otherwise operates the same as interleave.
* Example: nodeset(0,1) & weights (2,1) - 2 pages allocated
* on node 0 for every 1 page allocated on node 1.
*
* bind Only allocate memory on a specific set of nodes,
* no fallback.
* FIXME: memory is allocated starting with the first node
* to the last. It would be better if bind would truly restrict
* the allocation to memory nodes instead
*
* preferred Try a specific node first before normal fallback.
* As a special case NUMA_NO_NODE here means do the allocation
* on the local CPU. This is normally identical to default,
* but useful to set in a VMA when you have a non default
* process policy.
*
* preferred many Try a set of nodes first before normal fallback. This is
* similar to preferred without the special case.
*
* default Allocate on the local node first, or when on a VMA
* use the process policy. This is what Linux always did
* in a NUMA aware kernel and still does by, ahem, default.
*
* The process policy is applied for most non interrupt memory allocations
* in that process' context. Interrupts ignore the policies and always
* try to allocate on the local CPU. The VMA policy is only applied for memory
* allocations for a VMA in the VM.
*
* Currently there are a few corner cases in swapping where the policy
* is not applied, but the majority should be handled. When process policy
* is used it is not remembered over swap outs/swap ins.
*
* Only the highest zone in the zone hierarchy gets policied. Allocations
* requesting a lower zone just use default policy. This implies that
* on systems with highmem kernel lowmem allocation don't get policied.
* Same with GFP_DMA allocations.
*
* For shmem/tmpfs shared memory the policy is shared between
* all users and remembered even when nobody has memory mapped.
*/
/* Notebook:
fix mmap readahead to honour policy and enable policy for any page cache
object
statistics for bigpages
global policy for page cache? currently it uses process policy. Requires
first item above.
handle mremap for shared memory (currently ignored for the policy)
grows down?
make bind policy root only? It can trigger oom much faster and the
kernel is not always grateful with that.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mempolicy.h>
#include <linux/pagewalk.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/numa_balancing.h>
#include <linux/sched/task.h>
#include <linux/nodemask.h>
#include <linux/cpuset.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/nsproxy.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/ptrace.h>
#include <linux/swap.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/printk.h>
#include <linux/swapops.h>
#include <linux/gcd.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <linux/uaccess.h>
#include <linux/memory.h>
#include "internal.h"
/* Internal flags */
#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
#define MPOL_MF_WRLOCK (MPOL_MF_INTERNAL << 2) /* Write-lock walked vmas */
static struct kmem_cache *policy_cache;
static struct kmem_cache *sn_cache;
/* Highest zone. An specific allocation for a zone below that is not
policied. */
enum zone_type policy_zone = 0;
/*
* run-time system-wide default policy => local allocation
*/
static struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
.mode = MPOL_LOCAL,
};
static struct mempolicy preferred_node_policy[MAX_NUMNODES];
/*
* weightiness balances the tradeoff between small weights (cycles through nodes
* faster, more fair/even distribution) and large weights (smaller errors
* between actual bandwidth ratios and weight ratios). 32 is a number that has
* been found to perform at a reasonable compromise between the two goals.
*/
static const int weightiness = 32;
/*
* A null weighted_interleave_state is interpreted as having .mode="auto",
* and .iw_table is interpreted as an array of 1s with length nr_node_ids.
*/
struct weighted_interleave_state {
bool mode_auto;
u8 iw_table[];
};
static struct weighted_interleave_state __rcu *wi_state;
static unsigned int *node_bw_table;
/*
* wi_state_lock protects both wi_state and node_bw_table.
* node_bw_table is only used by writers to update wi_state.
*/
static DEFINE_MUTEX(wi_state_lock);
static u8 get_il_weight(int node)
{
struct weighted_interleave_state *state;
u8 weight = 1;
rcu_read_lock();
state = rcu_dereference(wi_state);
if (state)
weight = state->iw_table[node];
rcu_read_unlock();
return weight;
}
/*
* Convert bandwidth values into weighted interleave weights.
* Call with wi_state_lock.
*/
static void reduce_interleave_weights(unsigned int *bw, u8 *new_iw)
{
u64 sum_bw = 0;
unsigned int cast_sum_bw, scaling_factor = 1, iw_gcd = 0;
int nid;
for_each_node_state(nid, N_MEMORY)
sum_bw += bw[nid];
/* Scale bandwidths to whole numbers in the range [1, weightiness] */
for_each_node_state(nid, N_MEMORY) {
/*
* Try not to perform 64-bit division.
* If sum_bw < scaling_factor, then sum_bw < U32_MAX.
* If sum_bw > scaling_factor, then round the weight up to 1.
*/
scaling_factor = weightiness * bw[nid];
if (bw[nid] && sum_bw < scaling_factor) {
cast_sum_bw = (unsigned int)sum_bw;
new_iw[nid] = scaling_factor / cast_sum_bw;
} else {
new_iw[nid] = 1;
}
if (!iw_gcd)
iw_gcd = new_iw[nid];
iw_gcd = gcd(iw_gcd, new_iw[nid]);
}
/* 1:2 is strictly better than 16:32. Reduce by the weights' GCD. */
for_each_node_state(nid, N_MEMORY)
new_iw[nid] /= iw_gcd;
}
int mempolicy_set_node_perf(unsigned int node, struct access_coordinate *coords)
{
struct weighted_interleave_state *new_wi_state, *old_wi_state = NULL;
unsigned int *old_bw, *new_bw;
unsigned int bw_val;
int i;
bw_val = min(coords->read_bandwidth, coords->write_bandwidth);
new_bw = kcalloc(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
if (!new_bw)
return -ENOMEM;
new_wi_state = kmalloc(struct_size(new_wi_state, iw_table, nr_node_ids),
GFP_KERNEL);
if (!new_wi_state) {
kfree(new_bw);
return -ENOMEM;
}
new_wi_state->mode_auto = true;
for (i = 0; i < nr_node_ids; i++)
new_wi_state->iw_table[i] = 1;
/*
* Update bandwidth info, even in manual mode. That way, when switching
* to auto mode in the future, iw_table can be overwritten using
* accurate bw data.
*/
mutex_lock(&wi_state_lock);
old_bw = node_bw_table;
if (old_bw)
memcpy(new_bw, old_bw, nr_node_ids * sizeof(*old_bw));
new_bw[node] = bw_val;
node_bw_table = new_bw;
old_wi_state = rcu_dereference_protected(wi_state,
lockdep_is_held(&wi_state_lock));
if (old_wi_state && !old_wi_state->mode_auto) {
/* Manual mode; skip reducing weights and updating wi_state */
mutex_unlock(&wi_state_lock);
kfree(new_wi_state);
goto out;
}
/* NULL wi_state assumes auto=true; reduce weights and update wi_state*/
reduce_interleave_weights(new_bw, new_wi_state->iw_table);
rcu_assign_pointer(wi_state, new_wi_state);
mutex_unlock(&wi_state_lock);
if (old_wi_state) {
synchronize_rcu();
kfree(old_wi_state);
}
out:
kfree(old_bw);
return 0;
}
/**
* numa_nearest_node - Find nearest node by state
* @node: Node id to start the search
* @state: State to filter the search
*
* Lookup the closest node by distance if @nid is not in state.
*
* Return: this @node if it is in state, otherwise the closest node by distance
*/
int numa_nearest_node(int node, unsigned int state)
{
int min_dist = INT_MAX, dist, n, min_node;
if (state >= NR_NODE_STATES)
return -EINVAL;
if (node == NUMA_NO_NODE || node_state(node, state))
return node;
min_node = node;
for_each_node_state(n, state) {
dist = node_distance(node, n);
if (dist < min_dist) {
min_dist = dist;
min_node = n;
}
}
return min_node;
}
EXPORT_SYMBOL_GPL(numa_nearest_node);
/**
* nearest_node_nodemask - Find the node in @mask at the nearest distance
* from @node.
*
* @node: a valid node ID to start the search from.
* @mask: a pointer to a nodemask representing the allowed nodes.
*
* This function iterates over all nodes in @mask and calculates the
* distance from the starting @node, then it returns the node ID that is
* the closest to @node, or MAX_NUMNODES if no node is found.
*
* Note that @node must be a valid node ID usable with node_distance(),
* providing an invalid node ID (e.g., NUMA_NO_NODE) may result in crashes
* or unexpected behavior.
*/
int nearest_node_nodemask(int node, nodemask_t *mask)
{
int dist, n, min_dist = INT_MAX, min_node = MAX_NUMNODES;
for_each_node_mask(n, *mask) {
dist = node_distance(node, n);
if (dist < min_dist) {
min_dist = dist;
min_node = n;
}
}
return min_node;
}
EXPORT_SYMBOL_GPL(nearest_node_nodemask);
struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
int node;
if (pol)
return pol;
node = numa_node_id(); if (node != NUMA_NO_NODE) {
pol = &preferred_node_policy[node];
/* preferred_node_policy is not initialised early in boot */
if (pol->mode)
return pol;
}
return &default_policy;}
static const struct mempolicy_operations {
int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
} mpol_ops[MPOL_MAX];
static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
{
return pol->flags & MPOL_MODE_FLAGS;
}
static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
const nodemask_t *rel)
{
nodemask_t tmp;
nodes_fold(tmp, *orig, nodes_weight(*rel));
nodes_onto(*ret, tmp, *rel);
}
static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
pol->nodes = *nodes;
return 0;
}
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
nodes_clear(pol->nodes);
node_set(first_node(*nodes), pol->nodes);
return 0;
}
/*
* mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
* any, for the new policy. mpol_new() has already validated the nodes
* parameter with respect to the policy mode and flags.
*
* Must be called holding task's alloc_lock to protect task's mems_allowed
* and mempolicy. May also be called holding the mmap_lock for write.
*/
static int mpol_set_nodemask(struct mempolicy *pol,
const nodemask_t *nodes, struct nodemask_scratch *nsc)
{
int ret;
/*
* Default (pol==NULL) resp. local memory policies are not a
* subject of any remapping. They also do not need any special
* constructor.
*/
if (!pol || pol->mode == MPOL_LOCAL)
return 0;
/* Check N_MEMORY */
nodes_and(nsc->mask1,
cpuset_current_mems_allowed, node_states[N_MEMORY]);
VM_BUG_ON(!nodes);
if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
else
nodes_and(nsc->mask2, *nodes, nsc->mask1);
if (mpol_store_user_nodemask(pol))
pol->w.user_nodemask = *nodes;
else
pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
return ret;
}
/*
* This function just creates a new policy, does some check and simple
* initialization. You must invoke mpol_set_nodemask() to set nodes.
*/
static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
{
struct mempolicy *policy;
if (mode == MPOL_DEFAULT) {
if (nodes && !nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
return NULL;
}
VM_BUG_ON(!nodes);
/*
* MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
* MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
* All other modes require a valid pointer to a non-empty nodemask.
*/
if (mode == MPOL_PREFERRED) {
if (nodes_empty(*nodes)) {
if (((flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES)))
return ERR_PTR(-EINVAL);
mode = MPOL_LOCAL;
}
} else if (mode == MPOL_LOCAL) {
if (!nodes_empty(*nodes) ||
(flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES))
return ERR_PTR(-EINVAL);
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!policy)
return ERR_PTR(-ENOMEM);
atomic_set(&policy->refcnt, 1);
policy->mode = mode;
policy->flags = flags;
policy->home_node = NUMA_NO_NODE;
return policy;
}
/* Slow path of a mpol destructor. */
void __mpol_put(struct mempolicy *pol)
{
if (!atomic_dec_and_test(&pol->refcnt))
return;
kmem_cache_free(policy_cache, pol);
}
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
{
}
static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
{
nodemask_t tmp;
if (pol->flags & MPOL_F_STATIC_NODES)
nodes_and(tmp, pol->w.user_nodemask, *nodes);
else if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
else {
nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
*nodes);
pol->w.cpuset_mems_allowed = *nodes;
}
if (nodes_empty(tmp))
tmp = *nodes;
pol->nodes = tmp;
}
static void mpol_rebind_preferred(struct mempolicy *pol,
const nodemask_t *nodes)
{
pol->w.cpuset_mems_allowed = *nodes;
}
/*
* mpol_rebind_policy - Migrate a policy to a different set of nodes
*
* Per-vma policies are protected by mmap_lock. Allocations using per-task
* policies are protected by task->mems_allowed_seq to prevent a premature
* OOM/allocation failure due to parallel nodemask modification.
*/
static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol || pol->mode == MPOL_LOCAL)
return;
if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
mpol_ops[pol->mode].rebind(pol, newmask);
}
/*
* Wrapper for mpol_rebind_policy() that just requires task
* pointer, and updates task mempolicy.
*
* Called with task's alloc_lock held.
*/
void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{
mpol_rebind_policy(tsk->mempolicy, new);
}
/*
* Rebind each vma in mm to new nodemask.
*
* Call holding a reference to mm. Takes mm->mmap_lock during call.
*/
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
mmap_write_lock(mm);
for_each_vma(vmi, vma) {
vma_start_write(vma);
mpol_rebind_policy(vma->vm_policy, new);
}
mmap_write_unlock(mm);
}
static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
[MPOL_DEFAULT] = {
.rebind = mpol_rebind_default,
},
[MPOL_INTERLEAVE] = {
.create = mpol_new_nodemask,
.rebind = mpol_rebind_nodemask,
},
[MPOL_PREFERRED] = {
.create = mpol_new_preferred,
.rebind = mpol_rebind_preferred,
},
[MPOL_BIND] = {
.create = mpol_new_nodemask,
.rebind = mpol_rebind_nodemask,
},
[MPOL_LOCAL] = {
.rebind = mpol_rebind_default,
},
[MPOL_PREFERRED_MANY] = {
.create = mpol_new_nodemask,
.rebind = mpol_rebind_preferred,
},
[MPOL_WEIGHTED_INTERLEAVE] = {
.create = mpol_new_nodemask,
.rebind = mpol_rebind_nodemask,
},
};
static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags);
static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
pgoff_t ilx, int *nid);
static bool strictly_unmovable(unsigned long flags)
{
/*
* STRICT without MOVE flags lets do_mbind() fail immediately with -EIO
* if any misplaced page is found.
*/
return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ==
MPOL_MF_STRICT;
}
struct migration_mpol { /* for alloc_migration_target_by_mpol() */
struct mempolicy *pol;
pgoff_t ilx;
};
struct queue_pages {
struct list_head *pagelist;
unsigned long flags;
nodemask_t *nmask;
unsigned long start;
unsigned long end;
struct vm_area_struct *first;
struct folio *large; /* note last large folio encountered */
long nr_failed; /* could not be isolated at this time */
};
/*
* Check if the folio's nid is in qp->nmask.
*
* If MPOL_MF_INVERT is set in qp->flags, check if the nid is
* in the invert of qp->nmask.
*/
static inline bool queue_folio_required(struct folio *folio,
struct queue_pages *qp)
{
int nid = folio_nid(folio);
unsigned long flags = qp->flags;
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
}
static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
{
struct folio *folio;
struct queue_pages *qp = walk->private;
if (unlikely(is_pmd_migration_entry(*pmd))) {
qp->nr_failed++;
return;
}
folio = pmd_folio(*pmd);
if (is_huge_zero_folio(folio)) {
walk->action = ACTION_CONTINUE;
return;
}
if (!queue_folio_required(folio, qp))
return;
if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
!vma_migratable(walk->vma) ||
!migrate_folio_add(folio, qp->pagelist, qp->flags))
qp->nr_failed++;
}
/*
* Scan through folios, checking if they satisfy the required conditions,
* moving them from LRU to local pagelist for migration if they do (or not).
*
* queue_folios_pte_range() has two possible return values:
* 0 - continue walking to scan for more, even if an existing folio on the
* wrong node could not be isolated and queued for migration.
* -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL,
* and an existing folio was on a node that does not follow the policy.
*/
static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct folio *folio;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
pte_t *pte, *mapped_pte;
pte_t ptent;
spinlock_t *ptl;
int max_nr, nr;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
queue_folios_pmd(pmd, walk);
spin_unlock(ptl);
goto out;
}
mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return 0;
}
for (; addr != end; pte += nr, addr += nr * PAGE_SIZE) {
max_nr = (end - addr) >> PAGE_SHIFT;
nr = 1;
ptent = ptep_get(pte);
if (pte_none(ptent))
continue;
if (!pte_present(ptent)) {
if (is_migration_entry(pte_to_swp_entry(ptent)))
qp->nr_failed++;
continue;
}
folio = vm_normal_folio(vma, addr, ptent);
if (!folio || folio_is_zone_device(folio))
continue;
if (folio_test_large(folio) && max_nr != 1)
nr = folio_pte_batch(folio, pte, ptent, max_nr);
/*
* vm_normal_folio() filters out zero pages, but there might
* still be reserved folios to skip, perhaps in a VDSO.
*/
if (folio_test_reserved(folio))
continue;
if (!queue_folio_required(folio, qp))
continue;
if (folio_test_large(folio)) {
/*
* A large folio can only be isolated from LRU once,
* but may be mapped by many PTEs (and Copy-On-Write may
* intersperse PTEs of other, order 0, folios). This is
* a common case, so don't mistake it for failure (but
* there can be other cases of multi-mapped pages which
* this quick check does not help to filter out - and a
* search of the pagelist might grow to be prohibitive).
*
* migrate_pages(&pagelist) returns nr_failed folios, so
* check "large" now so that queue_pages_range() returns
* a comparable nr_failed folios. This does imply that
* if folio could not be isolated for some racy reason
* at its first PTE, later PTEs will not give it another
* chance of isolation; but keeps the accounting simple.
*/
if (folio == qp->large)
continue;
qp->large = folio;
}
if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
!vma_migratable(vma) ||
!migrate_folio_add(folio, qp->pagelist, flags)) {
qp->nr_failed += nr;
if (strictly_unmovable(flags))
break;
}
}
pte_unmap_unlock(mapped_pte, ptl);
cond_resched();
out:
if (qp->nr_failed && strictly_unmovable(flags))
return -EIO;
return 0;
}
static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
#ifdef CONFIG_HUGETLB_PAGE
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
struct folio *folio;
spinlock_t *ptl;
pte_t entry;
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
entry = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(entry)) {
if (unlikely(is_hugetlb_entry_migration(entry)))
qp->nr_failed++;
goto unlock;
}
folio = pfn_folio(pte_pfn(entry));
if (!queue_folio_required(folio, qp))
goto unlock;
if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
!vma_migratable(walk->vma)) {
qp->nr_failed++;
goto unlock;
}
/*
* Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
* Choosing not to migrate a shared folio is not counted as a failure.
*
* See folio_maybe_mapped_shared() on possible imprecision when we
* cannot easily detect if a folio is shared.
*/
if ((flags & MPOL_MF_MOVE_ALL) ||
(!folio_maybe_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
if (!folio_isolate_hugetlb(folio, qp->pagelist))
qp->nr_failed++;
unlock:
spin_unlock(ptl);
if (qp->nr_failed && strictly_unmovable(flags))
return -EIO;
#endif
return 0;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* This is used to mark a range of virtual addresses to be inaccessible.
* These are later cleared by a NUMA hinting fault. Depending on these
* faults, pages may be migrated for better NUMA placement.
*
* This is assuming that NUMA faults are handled using PROT_NONE. If
* an architecture makes a different choice, it will need further
* changes to the core.
*/
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
struct mmu_gather tlb;
long nr_updated;
tlb_gather_mmu(&tlb, vma->vm_mm);
nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
if (nr_updated > 0) {
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
count_memcg_events_mm(vma->vm_mm, NUMA_PTE_UPDATES, nr_updated);
}
tlb_finish_mmu(&tlb);
return nr_updated;
}
#endif /* CONFIG_NUMA_BALANCING */
static int queue_pages_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *next, *vma = walk->vma;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
/* range check first */
VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
if (!qp->first) {
qp->first = vma;
if (!(flags & MPOL_MF_DISCONTIG_OK) &&
(qp->start < vma->vm_start))
/* hole at head side of range */
return -EFAULT;
}
next = find_vma(vma->vm_mm, vma->vm_end);
if (!(flags & MPOL_MF_DISCONTIG_OK) &&
((vma->vm_end < qp->end) &&
(!next || vma->vm_end < next->vm_start)))
/* hole at middle or tail of range */
return -EFAULT;
/*
* Need check MPOL_MF_STRICT to return -EIO if possible
* regardless of vma_migratable
*/
if (!vma_migratable(vma) &&
!(flags & MPOL_MF_STRICT))
return 1;
/*
* Check page nodes, and queue pages to move, in the current vma.
* But if no moving, and no strict checking, the scan can be skipped.
*/
if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
return 0;
return 1;
}
static const struct mm_walk_ops queue_pages_walk_ops = {
.hugetlb_entry = queue_folios_hugetlb,
.pmd_entry = queue_folios_pte_range,
.test_walk = queue_pages_test_walk,
.walk_lock = PGWALK_RDLOCK,
};
static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
.hugetlb_entry = queue_folios_hugetlb,
.pmd_entry = queue_folios_pte_range,
.test_walk = queue_pages_test_walk,
.walk_lock = PGWALK_WRLOCK,
};
/*
* Walk through page tables and collect pages to be migrated.
*
* If pages found in a given range are not on the required set of @nodes,
* and migration is allowed, they are isolated and queued to @pagelist.
*
* queue_pages_range() may return:
* 0 - all pages already on the right node, or successfully queued for moving
* (or neither strict checking nor moving requested: only range checking).
* >0 - this number of misplaced folios could not be queued for moving
* (a hugetlbfs page or a transparent huge page being counted as 1).
* -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs.
* -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified.
*/
static long
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
nodemask_t *nodes, unsigned long flags,
struct list_head *pagelist)
{
int err;
struct queue_pages qp = {
.pagelist = pagelist,
.flags = flags,
.nmask = nodes,
.start = start,
.end = end,
.first = NULL,
};
const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ?
&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
err = walk_page_range(mm, start, end, ops, &qp);
if (!qp.first)
/* whole range in hole */
err = -EFAULT;
return err ? : qp.nr_failed;
}
/*
* Apply policy to a single VMA
* This must be called with the mmap_lock held for writing.
*/
static int vma_replace_policy(struct vm_area_struct *vma,
struct mempolicy *pol)
{
int err;
struct mempolicy *old;
struct mempolicy *new;
vma_assert_write_locked(vma);
new = mpol_dup(pol);
if (IS_ERR(new))
return PTR_ERR(new);
if (vma->vm_ops && vma->vm_ops->set_policy) {
err = vma->vm_ops->set_policy(vma, new);
if (err)
goto err_out;
}
old = vma->vm_policy;
vma->vm_policy = new; /* protected by mmap_lock */
mpol_put(old);
return 0;
err_out:
mpol_put(new);
return err;
}
/* Split or merge the VMA (if required) and apply the new policy */
static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct vm_area_struct **prev, unsigned long start,
unsigned long end, struct mempolicy *new_pol)
{
unsigned long vmstart, vmend;
vmend = min(end, vma->vm_end);
if (start > vma->vm_start) {
*prev = vma;
vmstart = start;
} else {
vmstart = vma->vm_start;
}
if (mpol_equal(vma->vm_policy, new_pol)) {
*prev = vma;
return 0;
}
vma = vma_modify_policy(vmi, *prev, vma, vmstart, vmend, new_pol);
if (IS_ERR(vma))
return PTR_ERR(vma);
*prev = vma;
return vma_replace_policy(vma, new_pol);
}
/* Set the process memory policy */
static long do_set_mempolicy(unsigned short mode, unsigned short flags,
nodemask_t *nodes)
{
struct mempolicy *new, *old;
NODEMASK_SCRATCH(scratch);
int ret;
if (!scratch)
return -ENOMEM;
new = mpol_new(mode, flags, nodes);
if (IS_ERR(new)) {
ret = PTR_ERR(new);
goto out;
}
task_lock(current);
ret = mpol_set_nodemask(new, nodes, scratch);
if (ret) {
task_unlock(current);
mpol_put(new);
goto out;
}
old = current->mempolicy;
current->mempolicy = new;
if (new && (new->mode == MPOL_INTERLEAVE ||
new->mode == MPOL_WEIGHTED_INTERLEAVE)) {
current->il_prev = MAX_NUMNODES-1;
current->il_weight = 0;
}
task_unlock(current);
mpol_put(old);
ret = 0;
out:
NODEMASK_SCRATCH_FREE(scratch);
return ret;
}
/*
* Return nodemask for policy for get_mempolicy() query
*
* Called with task's alloc_lock held
*/
static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
{
nodes_clear(*nodes);
if (pol == &default_policy)
return;
switch (pol->mode) {
case MPOL_BIND:
case MPOL_INTERLEAVE:
case MPOL_PREFERRED:
case MPOL_PREFERRED_MANY:
case MPOL_WEIGHTED_INTERLEAVE:
*nodes = pol->nodes;
break;
case MPOL_LOCAL:
/* return empty node mask for local allocation */
break;
default:
BUG();
}
}
static int lookup_node(struct mm_struct *mm, unsigned long addr)
{
struct page *p = NULL;
int ret;
ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
if (ret > 0) {
ret = page_to_nid(p);
put_page(p);
}
return ret;
}
/* Retrieve NUMA policy */
static long do_get_mempolicy(int *policy, nodemask_t *nmask,
unsigned long addr, unsigned long flags)
{
int err;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
if (flags &
~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
return -EINVAL;
if (flags & MPOL_F_MEMS_ALLOWED) {
if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
*policy = 0; /* just so it's initialized */
task_lock(current);
*nmask = cpuset_current_mems_allowed;
task_unlock(current);
return 0;
}
if (flags & MPOL_F_ADDR) {
pgoff_t ilx; /* ignored here */
/*
* Do NOT fall back to task policy if the
* vma/shared policy at addr is NULL. We
* want to return MPOL_DEFAULT in this case.
*/
mmap_read_lock(mm);
vma = vma_lookup(mm, addr);
if (!vma) {
mmap_read_unlock(mm);
return -EFAULT;
}
pol = __get_vma_policy(vma, addr, &ilx);
} else if (addr)
return -EINVAL;
if (!pol)
pol = &default_policy; /* indicates default behavior */
if (flags & MPOL_F_NODE) {
if (flags & MPOL_F_ADDR) {
/*
* Take a refcount on the mpol, because we are about to
* drop the mmap_lock, after which only "pol" remains
* valid, "vma" is stale.
*/
pol_refcount = pol;
vma = NULL;
mpol_get(pol);
mmap_read_unlock(mm);
err = lookup_node(mm, addr);
if (err < 0)
goto out;
*policy = err;
} else if (pol == current->mempolicy &&
pol->mode == MPOL_INTERLEAVE) {
*policy = next_node_in(current->il_prev, pol->nodes);
} else if (pol == current->mempolicy &&
pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
if (current->il_weight)
*policy = current->il_prev;
else
*policy = next_node_in(current->il_prev,
pol->nodes);
} else {
err = -EINVAL;
goto out;
}
} else {
*policy = pol == &default_policy ? MPOL_DEFAULT :
pol->mode;
/*
* Internal mempolicy flags must be masked off before exposing
* the policy to userspace.
*/
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
*nmask = pol->w.user_nodemask;
} else {
task_lock(current);
get_policy_nodemask(pol, nmask);
task_unlock(current);
}
}
out:
mpol_cond_put(pol);
if (vma)
mmap_read_unlock(mm);
if (pol_refcount)
mpol_put(pol_refcount);
return err;
}
#ifdef CONFIG_MIGRATION
static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags)
{
/*
* Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
* Choosing not to migrate a shared folio is not counted as a failure.
*
* See folio_maybe_mapped_shared() on possible imprecision when we
* cannot easily detect if a folio is shared.
*/
if ((flags & MPOL_MF_MOVE_ALL) || !folio_maybe_mapped_shared(folio)) {
if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, foliolist);
node_stat_mod_folio(folio,
NR_ISOLATED_ANON + folio_is_file_lru(folio),
folio_nr_pages(folio));
} else {
/*
* Non-movable folio may reach here. And, there may be
* temporary off LRU folios or non-LRU movable folios.
* Treat them as unmovable folios since they can't be
* isolated, so they can't be moved at the moment.
*/
return false;
}
}
return true;
}
/*
* Migrate pages from one node to a target node.
* Returns error or the number of pages not migrated.
*/
static long migrate_to_node(struct mm_struct *mm, int source, int dest,
int flags)
{
nodemask_t nmask;
struct vm_area_struct *vma;
LIST_HEAD(pagelist);
long nr_failed;
long err = 0;
struct migration_target_control mtc = {
.nid = dest,
.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
.reason = MR_SYSCALL,
};
nodes_clear(nmask);
node_set(source, nmask);
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
mmap_read_lock(mm);
vma = find_vma(mm, 0);
if (unlikely(!vma)) {
mmap_read_unlock(mm);
return 0;
}
/*
* This does not migrate the range, but isolates all pages that
* need migration. Between passing in the full user address
* space range and MPOL_MF_DISCONTIG_OK, this call cannot fail,
* but passes back the count of pages which could not be isolated.
*/
nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
mmap_read_unlock(mm);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
if (err)
putback_movable_pages(&pagelist);
}
if (err >= 0)
err += nr_failed;
return err;
}
/*
* Move pages between the two nodesets so as to preserve the physical
* layout as much as possible.
*
* Returns the number of page that could not be moved.
*/
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
long nr_failed = 0;
long err = 0;
nodemask_t tmp;
lru_cache_disable();
/*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
* bit in 'to' is not also set in 'tmp'. Clear the found 'source'
* bit in 'tmp', and return that <source, dest> pair for migration.
* The pair of nodemasks 'to' and 'from' define the map.
*
* If no pair of bits is found that way, fallback to picking some
* pair of 'source' and 'dest' bits that are not the same. If the
* 'source' and 'dest' bits are the same, this represents a node
* that will be migrating to itself, so no pages need move.
*
* If no bits are left in 'tmp', or if all remaining bits left
* in 'tmp' correspond to the same bit in 'to', return false
* (nothing left to migrate).
*
* This lets us pick a pair of nodes to migrate between, such that
* if possible the dest node is not already occupied by some other
* source node, minimizing the risk of overloading the memory on a
* node that would happen if we migrated incoming memory to a node
* before migrating outgoing memory source that same node.
*
* A single scan of tmp is sufficient. As we go, we remember the
* most recent <s, d> pair that moved (s != d). If we find a pair
* that not only moved, but what's better, moved to an empty slot
* (d is not set in tmp), then we break out then, with that pair.
* Otherwise when we finish scanning from_tmp, we at least have the
* most recent <s, d> pair that moved. If we get all the way through
* the scan of tmp without finding any node that moved, much less
* moved to an empty node, then there is nothing left worth migrating.
*/
tmp = *from;
while (!nodes_empty(tmp)) {
int s, d;
int source = NUMA_NO_NODE;
int dest = 0;
for_each_node_mask(s, tmp) {
/*
* do_migrate_pages() tries to maintain the relative
* node relationship of the pages established between
* threads and memory areas.
*
* However if the number of source nodes is not equal to
* the number of destination nodes we can not preserve
* this node relative relationship. In that case, skip
* copying memory from a node that is in the destination
* mask.
*
* Example: [2,3,4] -> [3,4,5] moves everything.
* [0-7] - > [3,4,5] moves only 0,1,2,6,7.
*/
if ((nodes_weight(*from) != nodes_weight(*to)) &&
(node_isset(s, *to)))
continue;
d = node_remap(s, *from, *to);
if (s == d)
continue;
source = s; /* Node moved. Memorize */
dest = d;
/* dest not in remaining from nodes? */
if (!node_isset(dest, tmp))
break;
}
if (source == NUMA_NO_NODE)
break;
node_clear(source, tmp);
err = migrate_to_node(mm, source, dest, flags);
if (err > 0)
nr_failed += err;
if (err < 0)
break;
}
lru_cache_enable();
if (err < 0)
return err;
return (nr_failed < INT_MAX) ? nr_failed : INT_MAX;
}
/*
* Allocate a new folio for page migration, according to NUMA mempolicy.
*/
static struct folio *alloc_migration_target_by_mpol(struct folio *src,
unsigned long private)
{
struct migration_mpol *mmpol = (struct migration_mpol *)private;
struct mempolicy *pol = mmpol->pol;
pgoff_t ilx = mmpol->ilx;
unsigned int order;
int nid = numa_node_id();
gfp_t gfp;
order = folio_order(src);
ilx += src->index >> order;
if (folio_test_hugetlb(src)) {
nodemask_t *nodemask;
struct hstate *h;
h = folio_hstate(src);
gfp = htlb_alloc_mask(h);
nodemask = policy_nodemask(gfp, pol, ilx, &nid);
return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp,
htlb_allow_alloc_fallback(MR_MEMPOLICY_MBIND));
}
if (folio_test_large(src))
gfp = GFP_TRANSHUGE;
else
gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
return folio_alloc_mpol(gfp, order, pol, ilx, nid);
}
#else
static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags)
{
return false;
}
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
return -ENOSYS;
}
static struct folio *alloc_migration_target_by_mpol(struct folio *src,
unsigned long private)
{
return NULL;
}
#endif
static long do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct vma_iterator vmi;
struct migration_mpol mmpol;
struct mempolicy *new;
unsigned long end;
long err;
long nr_failed;
LIST_HEAD(pagelist);
if (flags & ~(unsigned long)MPOL_MF_VALID)
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
if (start & ~PAGE_MASK)
return -EINVAL;
if (mode == MPOL_DEFAULT)
flags &= ~MPOL_MF_STRICT;
len = PAGE_ALIGN(len);
end = start + len;
if (end < start)
return -EINVAL;
if (end == start)
return 0;
new = mpol_new(mode, mode_flags, nmask);
if (IS_ERR(new))
return PTR_ERR(new);
/*
* If we are using the default policy then operation
* on discontinuous address spaces is okay after all
*/
if (!new)
flags |= MPOL_MF_DISCONTIG_OK;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
lru_cache_disable();
{
NODEMASK_SCRATCH(scratch);
if (scratch) {
mmap_write_lock(mm);
err = mpol_set_nodemask(new, nmask, scratch);
if (err)
mmap_write_unlock(mm);
} else
err = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
}
if (err)
goto mpol_out;
/*
* Lock the VMAs before scanning for pages to migrate,
* to ensure we don't miss a concurrently inserted page.
*/
nr_failed = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist);
if (nr_failed < 0) {
err = nr_failed;
nr_failed = 0;
} else {
vma_iter_init(&vmi, mm, start);
prev = vma_prev(&vmi);
for_each_vma_range(vmi, vma, end) {
err = mbind_range(&vmi, vma, &prev, start, end, new);
if (err)
break;
}
}
if (!err && !list_empty(&pagelist)) {
/* Convert MPOL_DEFAULT's NULL to task or default policy */
if (!new) {
new = get_task_policy(current);
mpol_get(new);
}
mmpol.pol = new;
mmpol.ilx = 0;
/*
* In the interleaved case, attempt to allocate on exactly the
* targeted nodes, for the first VMA to be migrated; for later
* VMAs, the nodes will still be interleaved from the targeted
* nodemask, but one by one may be selected differently.
*/
if (new->mode == MPOL_INTERLEAVE ||
new->mode == MPOL_WEIGHTED_INTERLEAVE) {
struct folio *folio;
unsigned int order;
unsigned long addr = -EFAULT;
list_for_each_entry(folio, &pagelist, lru) {
if (!folio_test_ksm(folio))
break;
}
if (!list_entry_is_head(folio, &pagelist, lru)) {
vma_iter_init(&vmi, mm, start);
for_each_vma_range(vmi, vma, end) {
addr = page_address_in_vma(folio,
folio_page(folio, 0), vma);
if (addr != -EFAULT)
break;
}
}
if (addr != -EFAULT) {
order = folio_order(folio);
/* We already know the pol, but not the ilx */
mpol_cond_put(get_vma_policy(vma, addr, order,
&mmpol.ilx));
/* Set base from which to increment by index */
mmpol.ilx -= folio->index >> order;
}
}
}
mmap_write_unlock(mm);
if (!err && !list_empty(&pagelist)) {
nr_failed |= migrate_pages(&pagelist,
alloc_migration_target_by_mpol, NULL,
(unsigned long)&mmpol, MIGRATE_SYNC,
MR_MEMPOLICY_MBIND, NULL);
}
if (nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
if (!list_empty(&pagelist))
putback_movable_pages(&pagelist);
mpol_out:
mpol_put(new);
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
lru_cache_enable();
return err;
}
/*
* User space interface with variable sized bitmaps for nodelists.
*/
static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
unsigned long maxnode)
{
unsigned long nlongs = BITS_TO_LONGS(maxnode);
int ret;
if (in_compat_syscall())
ret = compat_get_bitmap(mask,
(const compat_ulong_t __user *)nmask,
maxnode);
else
ret = copy_from_user(mask, nmask,
nlongs * sizeof(unsigned long));
if (ret)
return -EFAULT;
if (maxnode % BITS_PER_LONG)
mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
return 0;
}
/* Copy a node mask from user space. */
static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long maxnode)
{
--maxnode;
nodes_clear(*nodes);
if (maxnode == 0 || !nmask)
return 0;
if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
return -EINVAL;
/*
* When the user specified more nodes than supported just check
* if the non supported part is all zero, one word at a time,
* starting at the end.
*/
while (maxnode > MAX_NUMNODES) {
unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
unsigned long t;
if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
return -EFAULT;
if (maxnode - bits >= MAX_NUMNODES) {
maxnode -= bits;
} else {
maxnode = MAX_NUMNODES;
t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
}
if (t)
return -EINVAL;
}
return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
}
/* Copy a kernel node mask to user space */
static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
bool compat = in_compat_syscall();
if (compat)
nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
return -EINVAL;
if (clear_user((char __user *)mask + nbytes, copy - nbytes))
return -EFAULT;
copy = nbytes;
maxnode = nr_node_ids;
}
if (compat)
return compat_put_bitmap((compat_ulong_t __user *)mask,
nodes_addr(*nodes), maxnode);
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
{
*flags = *mode & MPOL_MODE_FLAGS;
*mode &= ~MPOL_MODE_FLAGS;
if ((unsigned int)(*mode) >= MPOL_MAX)
return -EINVAL;
if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
return -EINVAL;
if (*flags & MPOL_F_NUMA_BALANCING) {
if (*mode == MPOL_BIND || *mode == MPOL_PREFERRED_MANY)
*flags |= (MPOL_F_MOF | MPOL_F_MORON);
else
return -EINVAL;
}
return 0;
}
static long kernel_mbind(unsigned long start, unsigned long len,
unsigned long mode, const unsigned long __user *nmask,
unsigned long maxnode, unsigned int flags)
{
unsigned short mode_flags;
nodemask_t nodes;
int lmode = mode;
int err;
start = untagged_addr(start);
err = sanitize_mpol_flags(&lmode, &mode_flags);
if (err)
return err;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
}
SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
unsigned long, home_node, unsigned long, flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct mempolicy *new, *old;
unsigned long end;
int err = -ENOENT;
VMA_ITERATOR(vmi, mm, start);
start = untagged_addr(start);
if (start & ~PAGE_MASK)
return -EINVAL;
/*
* flags is used for future extension if any.
*/
if (flags != 0)
return -EINVAL;
/*
* Check home_node is online to avoid accessing uninitialized
* NODE_DATA.
*/
if (home_node >= MAX_NUMNODES || !node_online(home_node))
return -EINVAL;
len = PAGE_ALIGN(len);
end = start + len;
if (end < start)
return -EINVAL;
if (end == start)
return 0;
mmap_write_lock(mm);
prev = vma_prev(&vmi);
for_each_vma_range(vmi, vma, end) {
/*
* If any vma in the range got policy other than MPOL_BIND
* or MPOL_PREFERRED_MANY we return error. We don't reset
* the home node for vmas we already updated before.
*/
old = vma_policy(vma);
if (!old) {
prev = vma;
continue;
}
if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
err = -EOPNOTSUPP;
break;
}
new = mpol_dup(old);
if (IS_ERR(new)) {
err = PTR_ERR(new);
break;
}
vma_start_write(vma);
new->home_node = home_node;
err = mbind_range(&vmi, vma, &prev, start, end, new);
mpol_put(new);
if (err)
break;
}
mmap_write_unlock(mm);
return err;
}
SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
unsigned long, mode, const unsigned long __user *, nmask,
unsigned long, maxnode, unsigned int, flags)
{
return kernel_mbind(start, len, mode, nmask, maxnode, flags);
}
/* Set the process memory policy */
static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
unsigned long maxnode)
{
unsigned short mode_flags;
nodemask_t nodes;
int lmode = mode;
int err;
err = sanitize_mpol_flags(&lmode, &mode_flags);
if (err)
return err;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
return do_set_mempolicy(lmode, mode_flags, &nodes);
}
SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
unsigned long, maxnode)
{
return kernel_set_mempolicy(mode, nmask, maxnode);
}
static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
const unsigned long __user *old_nodes,
const unsigned long __user *new_nodes)
{
struct mm_struct *mm = NULL;
struct task_struct *task;
nodemask_t task_nodes;
int err;
nodemask_t *old;
nodemask_t *new;
NODEMASK_SCRATCH(scratch);
if (!scratch)
return -ENOMEM;
old = &scratch->mask1;
new = &scratch->mask2;
err = get_nodes(old, old_nodes, maxnode);
if (err)
goto out;
err = get_nodes(new, new_nodes, maxnode);
if (err)
goto out;
/* Find the mm_struct */
rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
rcu_read_unlock();
err = -ESRCH;
goto out;
}
get_task_struct(task);
err = -EINVAL;
/*
* Check if this process has the right to modify the specified process.
* Use the regular "ptrace_may_access()" checks.
*/
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out_put;
}
rcu_read_unlock();
task_nodes = cpuset_mems_allowed(task);
/* Is the user allowed to access the target nodes? */
if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
err = -EPERM;
goto out_put;
}
task_nodes = cpuset_mems_allowed(current);
nodes_and(*new, *new, task_nodes);
if (nodes_empty(*new))
goto out_put;
err = security_task_movememory(task);
if (err)
goto out_put;
mm = get_task_mm(task);
put_task_struct(task);
if (!mm) {
err = -EINVAL;
goto out;
}
err = do_migrate_pages(mm, old, new,
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
mmput(mm);
out:
NODEMASK_SCRATCH_FREE(scratch);
return err;
out_put:
put_task_struct(task);
goto out;
}
SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
const unsigned long __user *, old_nodes,
const unsigned long __user *, new_nodes)
{
return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
}
/* Retrieve NUMA policy */
static int kernel_get_mempolicy(int __user *policy,
unsigned long __user *nmask,
unsigned long maxnode,
unsigned long addr,
unsigned long flags)
{
int err;
int pval;
nodemask_t nodes;
if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
addr = untagged_addr(addr);
err = do_get_mempolicy(&pval, &nodes, addr, flags);
if (err)
return err;
if (policy && put_user(pval, policy))
return -EFAULT;
if (nmask)
err = copy_nodes_to_user(nmask, maxnode, &nodes);
return err;
}
SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long __user *, nmask, unsigned long, maxnode,
unsigned long, addr, unsigned long, flags)
{
return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
}
bool vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return false;
/*
* DAX device mappings require predictable access latency, so avoid
* incurring periodic faults.
*/
if (vma_is_dax(vma))
return false;
if (is_vm_hugetlb_page(vma) &&
!hugepage_migration_supported(hstate_vma(vma)))
return false;
/*
* Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not
* possible.
*/
if (vma->vm_file &&
gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
< policy_zone)
return false;
return true;
}
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr, pgoff_t *ilx)
{
*ilx = 0; return (vma->vm_ops && vma->vm_ops->get_policy) ? vma->vm_ops->get_policy(vma, addr, ilx) : vma->vm_policy;
}
/*
* get_vma_policy(@vma, @addr, @order, @ilx)
* @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup
* @order: 0, or appropriate huge_page_order for interleaving
* @ilx: interleave index (output), for use only when MPOL_INTERLEAVE or
* MPOL_WEIGHTED_INTERLEAVE
*
* Returns effective policy for a VMA at specified address.
* Falls back to current->mempolicy or system default policy, as necessary.
* Shared policies [those marked as MPOL_F_SHARED] require an extra reference
* count--added by the get_policy() vm_op, as appropriate--to protect against
* freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies.
*/
struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr, int order, pgoff_t *ilx)
{
struct mempolicy *pol;
pol = __get_vma_policy(vma, addr, ilx);
if (!pol)
pol = get_task_policy(current); if (pol->mode == MPOL_INTERLEAVE ||
pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
*ilx += vma->vm_pgoff >> order;
*ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
}
return pol;}
bool vma_policy_mof(struct vm_area_struct *vma)
{
struct mempolicy *pol;
if (vma->vm_ops && vma->vm_ops->get_policy) {
bool ret = false;
pgoff_t ilx; /* ignored here */
pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx);
if (pol && (pol->flags & MPOL_F_MOF))
ret = true;
mpol_cond_put(pol);
return ret;
}
pol = vma->vm_policy;
if (!pol)
pol = get_task_policy(current);
return pol->flags & MPOL_F_MOF;
}
bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
{
enum zone_type dynamic_policy_zone = policy_zone;
BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
/*
* if policy->nodes has movable memory only,
* we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
*
* policy->nodes is intersect with node_states[N_MEMORY].
* so if the following test fails, it implies
* policy->nodes has movable memory only.
*/
if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
dynamic_policy_zone = ZONE_MOVABLE;
return zone >= dynamic_policy_zone;
}
static unsigned int weighted_interleave_nodes(struct mempolicy *policy)
{
unsigned int node;
unsigned int cpuset_mems_cookie;
retry:
/* to prevent miscount use tsk->mems_allowed_seq to detect rebind */
cpuset_mems_cookie = read_mems_allowed_begin();
node = current->il_prev;
if (!current->il_weight || !node_isset(node, policy->nodes)) {
node = next_node_in(node, policy->nodes);
if (read_mems_allowed_retry(cpuset_mems_cookie))
goto retry;
if (node == MAX_NUMNODES)
return node;
current->il_prev = node;
current->il_weight = get_il_weight(node);
}
current->il_weight--;
return node;
}
/* Do dynamic interleaving for a process */
static unsigned int interleave_nodes(struct mempolicy *policy)
{
unsigned int nid;
unsigned int cpuset_mems_cookie;
/* to prevent miscount, use tsk->mems_allowed_seq to detect rebind */
do {
cpuset_mems_cookie = read_mems_allowed_begin();
nid = next_node_in(current->il_prev, policy->nodes);
} while (read_mems_allowed_retry(cpuset_mems_cookie));
if (nid < MAX_NUMNODES)
current->il_prev = nid;
return nid;
}
/*
* Depending on the memory policy provide a node from which to allocate the
* next slab entry.
*/
unsigned int mempolicy_slab_node(void)
{
struct mempolicy *policy;
int node = numa_mem_id();
if (!in_task())
return node;
policy = current->mempolicy;
if (!policy)
return node;
switch (policy->mode) {
case MPOL_PREFERRED:
return first_node(policy->nodes);
case MPOL_INTERLEAVE:
return interleave_nodes(policy);
case MPOL_WEIGHTED_INTERLEAVE:
return weighted_interleave_nodes(policy);
case MPOL_BIND:
case MPOL_PREFERRED_MANY:
{
struct zoneref *z;
/*
* Follow bind policy behavior and start allocation at the
* first node.
*/
struct zonelist *zonelist;
enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
&policy->nodes);
return zonelist_zone(z) ? zonelist_node_idx(z) : node;
}
case MPOL_LOCAL:
return node;
default:
BUG();
}
}
static unsigned int read_once_policy_nodemask(struct mempolicy *pol,
nodemask_t *mask)
{
/*
* barrier stabilizes the nodemask locally so that it can be iterated
* over safely without concern for changes. Allocators validate node
* selection does not violate mems_allowed, so this is safe.
*/
barrier();
memcpy(mask, &pol->nodes, sizeof(nodemask_t));
barrier();
return nodes_weight(*mask);
}
static unsigned int weighted_interleave_nid(struct mempolicy *pol, pgoff_t ilx)
{
struct weighted_interleave_state *state;
nodemask_t nodemask;
unsigned int target, nr_nodes;
u8 *table = NULL;
unsigned int weight_total = 0;
u8 weight;
int nid = 0;
nr_nodes = read_once_policy_nodemask(pol, &nodemask);
if (!nr_nodes)
return numa_node_id();
rcu_read_lock();
state = rcu_dereference(wi_state);
/* Uninitialized wi_state means we should assume all weights are 1 */
if (state)
table = state->iw_table;
/* calculate the total weight */
for_each_node_mask(nid, nodemask)
weight_total += table ? table[nid] : 1;
/* Calculate the node offset based on totals */
target = ilx % weight_total;
nid = first_node(nodemask);
while (target) {
/* detect system default usage */
weight = table ? table[nid] : 1;
if (target < weight)
break;
target -= weight;
nid = next_node_in(nid, nodemask);
}
rcu_read_unlock();
return nid;
}
/*
* Do static interleaving for interleave index @ilx. Returns the ilx'th
* node in pol->nodes (starting from ilx=0), wrapping around if ilx
* exceeds the number of present nodes.
*/
static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx)
{
nodemask_t nodemask;
unsigned int target, nnodes;
int i;
int nid;
nnodes = read_once_policy_nodemask(pol, &nodemask);
if (!nnodes)
return numa_node_id();
target = ilx % nnodes;
nid = first_node(nodemask);
for (i = 0; i < target; i++)
nid = next_node(nid, nodemask);
return nid;
}
/*
* Return a nodemask representing a mempolicy for filtering nodes for
* page allocation, together with preferred node id (or the input node id).
*/
static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
pgoff_t ilx, int *nid)
{
nodemask_t *nodemask = NULL;
switch (pol->mode) {
case MPOL_PREFERRED:
/* Override input node id */
*nid = first_node(pol->nodes);
break;
case MPOL_PREFERRED_MANY:
nodemask = &pol->nodes; if (pol->home_node != NUMA_NO_NODE) *nid = pol->home_node;
break;
case MPOL_BIND:
/* Restrict to nodemask (but not on lower zones) */
if (apply_policy_zone(pol, gfp_zone(gfp)) &&
cpuset_nodemask_valid_mems_allowed(&pol->nodes))
nodemask = &pol->nodes;
if (pol->home_node != NUMA_NO_NODE)
*nid = pol->home_node;
/*
* __GFP_THISNODE shouldn't even be used with the bind policy
* because we might easily break the expectation to stay on the
* requested node and not break the policy.
*/
WARN_ON_ONCE(gfp & __GFP_THISNODE);
break;
case MPOL_INTERLEAVE:
/* Override input node id */
*nid = (ilx == NO_INTERLEAVE_INDEX) ? interleave_nodes(pol) : interleave_nid(pol, ilx);
break;
case MPOL_WEIGHTED_INTERLEAVE:
*nid = (ilx == NO_INTERLEAVE_INDEX) ? weighted_interleave_nodes(pol) :
weighted_interleave_nid(pol, ilx);
break;
}
return nodemask;
}
#ifdef CONFIG_HUGETLBFS
/*
* huge_node(@vma, @addr, @gfp_flags, @mpol)
* @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup and interleave policy
* @gfp_flags: for requested zone
* @mpol: pointer to mempolicy pointer for reference counted mempolicy
* @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
*
* Returns a nid suitable for a huge page allocation and a pointer
* to the struct mempolicy for conditional unref after allocation.
* If the effective policy is 'bind' or 'prefer-many', returns a pointer
* to the mempolicy's @nodemask for filtering the zonelist.
*/
int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask)
{
pgoff_t ilx;
int nid;
nid = numa_node_id();
*mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
*nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid);
return nid;
}
/*
* init_nodemask_of_mempolicy
*
* If the current task's mempolicy is "default" [NULL], return 'false'
* to indicate default policy. Otherwise, extract the policy nodemask
* for 'bind' or 'interleave' policy into the argument nodemask, or
* initialize the argument nodemask to contain the single node for
* 'preferred' or 'local' policy and return 'true' to indicate presence
* of non-default mempolicy.
*
* We don't bother with reference counting the mempolicy [mpol_get/put]
* because the current task is examining it's own mempolicy and a task's
* mempolicy is only ever changed by the task itself.
*
* N.B., it is the caller's responsibility to free a returned nodemask.
*/
bool init_nodemask_of_mempolicy(nodemask_t *mask)
{
struct mempolicy *mempolicy;
if (!(mask && current->mempolicy))
return false;
task_lock(current);
mempolicy = current->mempolicy;
switch (mempolicy->mode) {
case MPOL_PREFERRED:
case MPOL_PREFERRED_MANY:
case MPOL_BIND:
case MPOL_INTERLEAVE:
case MPOL_WEIGHTED_INTERLEAVE:
*mask = mempolicy->nodes;
break;
case MPOL_LOCAL:
init_nodemask_of_node(mask, numa_node_id());
break;
default:
BUG();
}
task_unlock(current);
return true;
}
#endif
/*
* mempolicy_in_oom_domain
*
* If tsk's mempolicy is "bind", check for intersection between mask and
* the policy nodemask. Otherwise, return true for all other policies
* including "interleave", as a tsk with "interleave" policy may have
* memory allocated from all nodes in system.
*
* Takes task_lock(tsk) to prevent freeing of its mempolicy.
*/
bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask)
{
struct mempolicy *mempolicy;
bool ret = true;
if (!mask)
return ret;
task_lock(tsk);
mempolicy = tsk->mempolicy;
if (mempolicy && mempolicy->mode == MPOL_BIND)
ret = nodes_intersects(mempolicy->nodes, *mask);
task_unlock(tsk);
return ret;
}
static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
int nid, nodemask_t *nodemask)
{
struct page *page;
gfp_t preferred_gfp;
/*
* This is a two pass approach. The first pass will only try the
* preferred nodes but skip the direct reclaim and allow the
* allocation to fail, while the second pass will try all the
* nodes in system.
*/
preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
page = __alloc_frozen_pages_noprof(preferred_gfp, order, nid, nodemask);
if (!page)
page = __alloc_frozen_pages_noprof(gfp, order, nid, NULL);
return page;
}
/**
* alloc_pages_mpol - Allocate pages according to NUMA mempolicy.
* @gfp: GFP flags.
* @order: Order of the page allocation.
* @pol: Pointer to the NUMA mempolicy.
* @ilx: Index for interleave mempolicy (also distinguishes alloc_pages()).
* @nid: Preferred node (usually numa_node_id() but @mpol may override it).
*
* Return: The page on success or NULL if allocation fails.
*/
static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
struct mempolicy *pol, pgoff_t ilx, int nid)
{
nodemask_t *nodemask;
struct page *page;
nodemask = policy_nodemask(gfp, pol, ilx, &nid);
if (pol->mode == MPOL_PREFERRED_MANY) return alloc_pages_preferred_many(gfp, order, nid, nodemask);
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
/* filter "hugepage" allocation, unless from alloc_pages() */
order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
/*
* For hugepage allocation and non-interleave policy which
* allows the current node (or other explicitly preferred
* node) we only try to allocate from the current/preferred
* node and don't fall back to other nodes, as the cost of
* remote accesses would likely offset THP benefits.
*
* If the policy is interleave or does not allow the current
* node in its nodemask, we allocate the standard way.
*/
if (pol->mode != MPOL_INTERLEAVE &&
pol->mode != MPOL_WEIGHTED_INTERLEAVE &&
(!nodemask || node_isset(nid, *nodemask))) {
/*
* First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact.
*/
page = __alloc_frozen_pages_noprof(
gfp | __GFP_THISNODE | __GFP_NORETRY, order,
nid, NULL);
if (page || !(gfp & __GFP_DIRECT_RECLAIM))
return page;
/*
* If hugepage allocations are configured to always
* synchronous compact or the vma has been madvised
* to prefer hugepage backing, retry allowing remote
* memory with both reclaim and compact as well.
*/
}
}
page = __alloc_frozen_pages_noprof(gfp, order, nid, nodemask);
if (unlikely(pol->mode == MPOL_INTERLEAVE || pol->mode == MPOL_WEIGHTED_INTERLEAVE) && page) {
/* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
if (static_branch_likely(&vm_numa_stat_key) &&
page_to_nid(page) == nid) {
preempt_disable();
__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
preempt_enable();
}
}
return page;
}
struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *pol, pgoff_t ilx, int nid)
{
struct page *page = alloc_pages_mpol(gfp | __GFP_COMP, order, pol,
ilx, nid);
if (!page)
return NULL;
set_page_refcounted(page);
return page_rmappable_folio(page);}
/**
* vma_alloc_folio - Allocate a folio for a VMA.
* @gfp: GFP flags.
* @order: Order of the folio.
* @vma: Pointer to VMA.
* @addr: Virtual address of the allocation. Must be inside @vma.
*
* Allocate a folio for a specific address in @vma, using the appropriate
* NUMA policy. The caller must hold the mmap_lock of the mm_struct of the
* VMA to prevent it from going away. Should be used for all allocations
* for folios that will be mapped into user space, excepting hugetlbfs, and
* excepting where direct use of folio_alloc_mpol() is more appropriate.
*
* Return: The folio on success or NULL if allocation fails.
*/
struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol;
pgoff_t ilx;
struct folio *folio;
if (vma->vm_flags & VM_DROPPABLE)
gfp |= __GFP_NOWARN;
pol = get_vma_policy(vma, addr, order, &ilx);
folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
mpol_cond_put(pol);
return folio;
}
EXPORT_SYMBOL(vma_alloc_folio_noprof);
struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = &default_policy;
/*
* No reference counting needed for current->mempolicy
* nor system default_policy
*/
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);
return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX,
numa_node_id());
}
/**
* alloc_pages - Allocate pages.
* @gfp: GFP flags.
* @order: Power of two of number of pages to allocate.
*
* Allocate 1 << @order contiguous pages. The physical address of the
* first page is naturally aligned (eg an order-3 allocation will be aligned
* to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
* process is honoured when in process context.
*
* Context: Can be called from any context, providing the appropriate GFP
* flags are used.
* Return: The page on success or NULL if allocation fails.
*/
struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
{
struct page *page = alloc_frozen_pages_noprof(gfp, order);
if (page)
set_page_refcounted(page);
return page;}
EXPORT_SYMBOL(alloc_pages_noprof);
struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{
return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order));
}
EXPORT_SYMBOL(folio_alloc_noprof);
static unsigned long alloc_pages_bulk_interleave(gfp_t gfp,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
int nodes;
unsigned long nr_pages_per_node;
int delta;
int i;
unsigned long nr_allocated;
unsigned long total_allocated = 0;
nodes = nodes_weight(pol->nodes);
nr_pages_per_node = nr_pages / nodes;
delta = nr_pages - nodes * nr_pages_per_node;
for (i = 0; i < nodes; i++) {
if (delta) {
nr_allocated = alloc_pages_bulk_noprof(gfp,
interleave_nodes(pol), NULL,
nr_pages_per_node + 1,
page_array);
delta--;
} else {
nr_allocated = alloc_pages_bulk_noprof(gfp,
interleave_nodes(pol), NULL,
nr_pages_per_node, page_array);
}
page_array += nr_allocated;
total_allocated += nr_allocated;
}
return total_allocated;
}
static unsigned long alloc_pages_bulk_weighted_interleave(gfp_t gfp,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
struct weighted_interleave_state *state;
struct task_struct *me = current;
unsigned int cpuset_mems_cookie;
unsigned long total_allocated = 0;
unsigned long nr_allocated = 0;
unsigned long rounds;
unsigned long node_pages, delta;
u8 *weights, weight;
unsigned int weight_total = 0;
unsigned long rem_pages = nr_pages;
nodemask_t nodes;
int nnodes, node;
int resume_node = MAX_NUMNODES - 1;
u8 resume_weight = 0;
int prev_node;
int i;
if (!nr_pages)
return 0;
/* read the nodes onto the stack, retry if done during rebind */
do {
cpuset_mems_cookie = read_mems_allowed_begin();
nnodes = read_once_policy_nodemask(pol, &nodes);
} while (read_mems_allowed_retry(cpuset_mems_cookie));
/* if the nodemask has become invalid, we cannot do anything */
if (!nnodes)
return 0;
/* Continue allocating from most recent node and adjust the nr_pages */
node = me->il_prev;
weight = me->il_weight;
if (weight && node_isset(node, nodes)) {
node_pages = min(rem_pages, weight);
nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
page_array);
page_array += nr_allocated;
total_allocated += nr_allocated;
/* if that's all the pages, no need to interleave */
if (rem_pages <= weight) {
me->il_weight -= rem_pages;
return total_allocated;
}
/* Otherwise we adjust remaining pages, continue from there */
rem_pages -= weight;
}
/* clear active weight in case of an allocation failure */
me->il_weight = 0;
prev_node = node;
/* create a local copy of node weights to operate on outside rcu */
weights = kzalloc(nr_node_ids, GFP_KERNEL);
if (!weights)
return total_allocated;
rcu_read_lock();
state = rcu_dereference(wi_state);
if (state) {
memcpy(weights, state->iw_table, nr_node_ids * sizeof(u8));
rcu_read_unlock();
} else {
rcu_read_unlock();
for (i = 0; i < nr_node_ids; i++)
weights[i] = 1;
}
/* calculate total, detect system default usage */
for_each_node_mask(node, nodes)
weight_total += weights[node];
/*
* Calculate rounds/partial rounds to minimize __alloc_pages_bulk calls.
* Track which node weighted interleave should resume from.
*
* if (rounds > 0) and (delta == 0), resume_node will always be
* the node following prev_node and its weight.
*/
rounds = rem_pages / weight_total;
delta = rem_pages % weight_total;
resume_node = next_node_in(prev_node, nodes);
resume_weight = weights[resume_node];
for (i = 0; i < nnodes; i++) {
node = next_node_in(prev_node, nodes);
weight = weights[node];
node_pages = weight * rounds;
/* If a delta exists, add this node's portion of the delta */
if (delta > weight) {
node_pages += weight;
delta -= weight;
} else if (delta) {
/* when delta is depleted, resume from that node */
node_pages += delta;
resume_node = node;
resume_weight = weight - delta;
delta = 0;
}
/* node_pages can be 0 if an allocation fails and rounds == 0 */
if (!node_pages)
break;
nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
page_array);
page_array += nr_allocated;
total_allocated += nr_allocated;
if (total_allocated == nr_pages)
break;
prev_node = node;
}
me->il_prev = resume_node;
me->il_weight = resume_weight;
kfree(weights);
return total_allocated;
}
static unsigned long alloc_pages_bulk_preferred_many(gfp_t gfp, int nid,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
gfp_t preferred_gfp;
unsigned long nr_allocated = 0;
preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
nr_pages, page_array);
if (nr_allocated < nr_pages)
nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
nr_pages - nr_allocated,
page_array + nr_allocated);
return nr_allocated;
}
/* alloc pages bulk and mempolicy should be considered at the
* same time in some situation such as vmalloc.
*
* It can accelerate memory allocation especially interleaving
* allocate memory.
*/
unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
unsigned long nr_pages, struct page **page_array)
{
struct mempolicy *pol = &default_policy;
nodemask_t *nodemask;
int nid;
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);
if (pol->mode == MPOL_INTERLEAVE)
return alloc_pages_bulk_interleave(gfp, pol,
nr_pages, page_array);
if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
return alloc_pages_bulk_weighted_interleave(
gfp, pol, nr_pages, page_array);
if (pol->mode == MPOL_PREFERRED_MANY)
return alloc_pages_bulk_preferred_many(gfp,
numa_node_id(), pol, nr_pages, page_array);
nid = numa_node_id();
nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
return alloc_pages_bulk_noprof(gfp, nid, nodemask,
nr_pages, page_array);
}
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{
struct mempolicy *pol = mpol_dup(src->vm_policy); if (IS_ERR(pol)) return PTR_ERR(pol);
dst->vm_policy = pol;
return 0;}
/*
* If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
* with the mems_allowed returned by cpuset_mems_allowed(). This
* keeps mempolicies cpuset relative after its cpuset moves. See
* further kernel/cpuset.c update_nodemask().
*
* current's mempolicy may be rebinded by the other task(the task that changes
* cpuset's mems), so we needn't do rebind work for current task.
*/
/* Slow path of a mempolicy duplicate */
struct mempolicy *__mpol_dup(struct mempolicy *old)
{
struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
/* task's mempolicy is protected by alloc_lock */
if (old == current->mempolicy) {
task_lock(current);
*new = *old;
task_unlock(current);
} else
*new = *old;
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
mpol_rebind_policy(new, &mems);
}
atomic_set(&new->refcnt, 1);
return new;
}
/* Slow path of a mempolicy comparison */
bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (!a || !b)
return false;
if (a->mode != b->mode)
return false;
if (a->flags != b->flags)
return false;
if (a->home_node != b->home_node)
return false;
if (mpol_store_user_nodemask(a))
if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
return false;
switch (a->mode) {
case MPOL_BIND:
case MPOL_INTERLEAVE:
case MPOL_PREFERRED:
case MPOL_PREFERRED_MANY:
case MPOL_WEIGHTED_INTERLEAVE:
return !!nodes_equal(a->nodes, b->nodes);
case MPOL_LOCAL:
return true;
default:
BUG();
return false;
}
}
/*
* Shared memory backing store policy support.
*
* Remember policies even when nobody has shared memory mapped.
* The policies are kept in Red-Black tree linked from the inode.
* They are protected by the sp->lock rwlock, which should be held
* for any accesses to the tree.
*/
/*
* lookup first element intersecting start-end. Caller holds sp->lock for
* reading or for writing
*/
static struct sp_node *sp_lookup(struct shared_policy *sp,
pgoff_t start, pgoff_t end)
{
struct rb_node *n = sp->root.rb_node;
while (n) {
struct sp_node *p = rb_entry(n, struct sp_node, nd);
if (start >= p->end)
n = n->rb_right;
else if (end <= p->start)
n = n->rb_left;
else
break;
}
if (!n)
return NULL;
for (;;) {
struct sp_node *w = NULL;
struct rb_node *prev = rb_prev(n);
if (!prev)
break;
w = rb_entry(prev, struct sp_node, nd);
if (w->end <= start)
break;
n = prev;
}
return rb_entry(n, struct sp_node, nd);
}
/*
* Insert a new shared policy into the list. Caller holds sp->lock for
* writing.
*/
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
{
struct rb_node **p = &sp->root.rb_node;
struct rb_node *parent = NULL;
struct sp_node *nd;
while (*p) {
parent = *p;
nd = rb_entry(parent, struct sp_node, nd);
if (new->start < nd->start)
p = &(*p)->rb_left;
else if (new->end > nd->end)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new->nd, parent, p);
rb_insert_color(&new->nd, &sp->root);
}
/* Find shared policy intersecting idx */
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
pgoff_t idx)
{
struct mempolicy *pol = NULL;
struct sp_node *sn;
if (!sp->root.rb_node)
return NULL;
read_lock(&sp->lock);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
read_unlock(&sp->lock);
return pol;
}
static void sp_free(struct sp_node *n)
{
mpol_put(n->policy);
kmem_cache_free(sn_cache, n);
}
/**
* mpol_misplaced - check whether current folio node is valid in policy
*
* @folio: folio to be checked
* @vmf: structure describing the fault
* @addr: virtual address in @vma for shared policy lookup and interleave policy
*
* Lookup current policy node id for vma,addr and "compare to" folio's
* node id. Policy determination "mimics" alloc_page_vma().
* Called from fault path where we know the vma and faulting address.
*
* Return: NUMA_NO_NODE if the page is in a node that is valid for this
* policy, or a suitable node ID to allocate a replacement folio from.
*/
int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
unsigned long addr)
{
struct mempolicy *pol;
pgoff_t ilx;
struct zoneref *z;
int curnid = folio_nid(folio);
struct vm_area_struct *vma = vmf->vma;
int thiscpu = raw_smp_processor_id();
int thisnid = numa_node_id();
int polnid = NUMA_NO_NODE;
int ret = NUMA_NO_NODE;
/*
* Make sure ptl is held so that we don't preempt and we
* have a stable smp processor id
*/
lockdep_assert_held(vmf->ptl);
pol = get_vma_policy(vma, addr, folio_order(folio), &ilx);
if (!(pol->flags & MPOL_F_MOF))
goto out;
switch (pol->mode) {
case MPOL_INTERLEAVE:
polnid = interleave_nid(pol, ilx);
break;
case MPOL_WEIGHTED_INTERLEAVE:
polnid = weighted_interleave_nid(pol, ilx);
break;
case MPOL_PREFERRED:
if (node_isset(curnid, pol->nodes))
goto out;
polnid = first_node(pol->nodes);
break;
case MPOL_LOCAL:
polnid = numa_node_id();
break;
case MPOL_BIND:
case MPOL_PREFERRED_MANY:
/*
* Even though MPOL_PREFERRED_MANY can allocate pages outside
* policy nodemask we don't allow numa migration to nodes
* outside policy nodemask for now. This is done so that if we
* want demotion to slow memory to happen, before allocating
* from some DRAM node say 'x', we will end up using a
* MPOL_PREFERRED_MANY mask excluding node 'x'. In such scenario
* we should not promote to node 'x' from slow memory node.
*/
if (pol->flags & MPOL_F_MORON) {
/*
* Optimize placement among multiple nodes
* via NUMA balancing
*/
if (node_isset(thisnid, pol->nodes))
break;
goto out;
}
/*
* use current page if in policy nodemask,
* else select nearest allowed node, if any.
* If no allowed nodes, use current [!misplaced].
*/
if (node_isset(curnid, pol->nodes))
goto out;
z = first_zones_zonelist(
node_zonelist(thisnid, GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
&pol->nodes);
polnid = zonelist_node_idx(z);
break;
default:
BUG();
}
/* Migrate the folio towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON) {
polnid = thisnid;
if (!should_numa_migrate_memory(current, folio, curnid,
thiscpu))
goto out;
}
if (curnid != polnid)
ret = polnid;
out:
mpol_cond_put(pol);
return ret;
}
/*
* Drop the (possibly final) reference to task->mempolicy. It needs to be
* dropped after task->mempolicy is set to NULL so that any allocation done as
* part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
* policy.
*/
void mpol_put_task_policy(struct task_struct *task)
{
struct mempolicy *pol;
task_lock(task);
pol = task->mempolicy;
task->mempolicy = NULL;
task_unlock(task);
mpol_put(pol);
}
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
rb_erase(&n->nd, &sp->root);
sp_free(n);
}
static void sp_node_init(struct sp_node *node, unsigned long start,
unsigned long end, struct mempolicy *pol)
{
node->start = start;
node->end = end;
node->policy = pol;
}
static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
struct mempolicy *pol)
{
struct sp_node *n;
struct mempolicy *newpol;
n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n)
return NULL;
newpol = mpol_dup(pol);
if (IS_ERR(newpol)) {
kmem_cache_free(sn_cache, n);
return NULL;
}
newpol->flags |= MPOL_F_SHARED;
sp_node_init(n, start, end, newpol);
return n;
}
/* Replace a policy range. */
static int shared_policy_replace(struct shared_policy *sp, pgoff_t start,
pgoff_t end, struct sp_node *new)
{
struct sp_node *n;
struct sp_node *n_new = NULL;
struct mempolicy *mpol_new = NULL;
int ret = 0;
restart:
write_lock(&sp->lock);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
struct rb_node *next = rb_next(&n->nd);
if (n->start >= start) {
if (n->end <= end)
sp_delete(sp, n);
else
n->start = end;
} else {
/* Old policy spanning whole new range. */
if (n->end > end) {
if (!n_new)
goto alloc_new;
*mpol_new = *n->policy;
atomic_set(&mpol_new->refcnt, 1);
sp_node_init(n_new, end, n->end, mpol_new);
n->end = start;
sp_insert(sp, n_new);
n_new = NULL;
mpol_new = NULL;
break;
} else
n->end = start;
}
if (!next)
break;
n = rb_entry(next, struct sp_node, nd);
}
if (new)
sp_insert(sp, new);
write_unlock(&sp->lock);
ret = 0;
err_out:
if (mpol_new)
mpol_put(mpol_new);
if (n_new)
kmem_cache_free(sn_cache, n_new);
return ret;
alloc_new:
write_unlock(&sp->lock);
ret = -ENOMEM;
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n_new)
goto err_out;
mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!mpol_new)
goto err_out;
atomic_set(&mpol_new->refcnt, 1);
goto restart;
}
/**
* mpol_shared_policy_init - initialize shared policy for inode
* @sp: pointer to inode shared policy
* @mpol: struct mempolicy to install
*
* Install non-NULL @mpol in inode's shared policy rb-tree.
* On entry, the current task has a reference on a non-NULL @mpol.
* This must be released on exit.
* This is called at get_inode() calls and we can use GFP_KERNEL.
*/
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
{
int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */
rwlock_init(&sp->lock);
if (mpol) {
struct sp_node *sn;
struct mempolicy *npol;
NODEMASK_SCRATCH(scratch);
if (!scratch)
goto put_mpol;
/* contextualize the tmpfs mount point mempolicy to this file */
npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
if (IS_ERR(npol))
goto free_scratch; /* no valid nodemask intersection */
task_lock(current);
ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch);
task_unlock(current);
if (ret)
goto put_npol;
/* alloc node covering entire file; adds ref to file's npol */
sn = sp_alloc(0, MAX_LFS_FILESIZE >> PAGE_SHIFT, npol);
if (sn)
sp_insert(sp, sn);
put_npol:
mpol_put(npol); /* drop initial ref on file's npol */
free_scratch:
NODEMASK_SCRATCH_FREE(scratch);
put_mpol:
mpol_put(mpol); /* drop our incoming ref on sb mpol */
}
}
int mpol_set_shared_policy(struct shared_policy *sp,
struct vm_area_struct *vma, struct mempolicy *pol)
{
int err;
struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma);
if (pol) {
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
if (!new)
return -ENOMEM;
}
err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
if (err && new)
sp_free(new);
return err;
}
/* Free a backing policy store on inode delete. */
void mpol_free_shared_policy(struct shared_policy *sp)
{
struct sp_node *n;
struct rb_node *next;
if (!sp->root.rb_node)
return;
write_lock(&sp->lock);
next = rb_first(&sp->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
sp_delete(sp, n);
}
write_unlock(&sp->lock);
}
#ifdef CONFIG_NUMA_BALANCING
static int __initdata numabalancing_override;
static void __init check_numabalancing_enable(void)
{
bool numabalancing_default = false;
if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
numabalancing_default = true;
/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
if (numabalancing_override)
set_numabalancing_state(numabalancing_override == 1);
if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
numabalancing_default ? "Enabling" : "Disabling");
set_numabalancing_state(numabalancing_default);
}
}
static int __init setup_numabalancing(char *str)
{
int ret = 0;
if (!str)
goto out;
if (!strcmp(str, "enable")) {
numabalancing_override = 1;
ret = 1;
} else if (!strcmp(str, "disable")) {
numabalancing_override = -1;
ret = 1;
}
out:
if (!ret)
pr_warn("Unable to parse numa_balancing=\n");
return ret;
}
__setup("numa_balancing=", setup_numabalancing);
#else
static inline void __init check_numabalancing_enable(void)
{
}
#endif /* CONFIG_NUMA_BALANCING */
void __init numa_policy_init(void)
{
nodemask_t interleave_nodes;
unsigned long largest = 0;
int nid, prefer = 0;
policy_cache = kmem_cache_create("numa_policy",
sizeof(struct mempolicy),
0, SLAB_PANIC, NULL);
sn_cache = kmem_cache_create("shared_policy_node",
sizeof(struct sp_node),
0, SLAB_PANIC, NULL);
for_each_node(nid) {
preferred_node_policy[nid] = (struct mempolicy) {
.refcnt = ATOMIC_INIT(1),
.mode = MPOL_PREFERRED,
.flags = MPOL_F_MOF | MPOL_F_MORON,
.nodes = nodemask_of_node(nid),
};
}
/*
* Set interleaving policy for system init. Interleaving is only
* enabled across suitably sized nodes (default is >= 16MB), or
* fall back to the largest node if they're all smaller.
*/
nodes_clear(interleave_nodes);
for_each_node_state(nid, N_MEMORY) {
unsigned long total_pages = node_present_pages(nid);
/* Preserve the largest node */
if (largest < total_pages) {
largest = total_pages;
prefer = nid;
}
/* Interleave this node? */
if ((total_pages << PAGE_SHIFT) >= (16 << 20))
node_set(nid, interleave_nodes);
}
/* All too small, use the largest */
if (unlikely(nodes_empty(interleave_nodes)))
node_set(prefer, interleave_nodes);
if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
pr_err("%s: interleaving failed\n", __func__);
check_numabalancing_enable();
}
/* Reset policy of current process to default */
void numa_default_policy(void)
{
do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
}
/*
* Parse and format mempolicy from/to strings
*/
static const char * const policy_modes[] =
{
[MPOL_DEFAULT] = "default",
[MPOL_PREFERRED] = "prefer",
[MPOL_BIND] = "bind",
[MPOL_INTERLEAVE] = "interleave",
[MPOL_WEIGHTED_INTERLEAVE] = "weighted interleave",
[MPOL_LOCAL] = "local",
[MPOL_PREFERRED_MANY] = "prefer (many)",
};
#ifdef CONFIG_TMPFS
/**
* mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
* @str: string containing mempolicy to parse
* @mpol: pointer to struct mempolicy pointer, returned on success.
*
* Format of input:
* <mode>[=<flags>][:<nodelist>]
*
* Return: %0 on success, else %1
*/
int mpol_parse_str(char *str, struct mempolicy **mpol)
{
struct mempolicy *new = NULL;
unsigned short mode_flags;
nodemask_t nodes;
char *nodelist = strchr(str, ':');
char *flags = strchr(str, '=');
int err = 1, mode;
if (flags)
*flags++ = '\0'; /* terminate mode string */
if (nodelist) {
/* NUL-terminate mode or flags string */
*nodelist++ = '\0';
if (nodelist_parse(nodelist, nodes))
goto out;
if (!nodes_subset(nodes, node_states[N_MEMORY]))
goto out;
} else
nodes_clear(nodes);
mode = match_string(policy_modes, MPOL_MAX, str);
if (mode < 0)
goto out;
switch (mode) {
case MPOL_PREFERRED:
/*
* Insist on a nodelist of one node only, although later
* we use first_node(nodes) to grab a single node, so here
* nodelist (or nodes) cannot be empty.
*/
if (nodelist) {
char *rest = nodelist;
while (isdigit(*rest))
rest++;
if (*rest)
goto out;
if (nodes_empty(nodes))
goto out;
}
break;
case MPOL_INTERLEAVE:
case MPOL_WEIGHTED_INTERLEAVE:
/*
* Default to online nodes with memory if no nodelist
*/
if (!nodelist)
nodes = node_states[N_MEMORY];
break;
case MPOL_LOCAL:
/*
* Don't allow a nodelist; mpol_new() checks flags
*/
if (nodelist)
goto out;
break;
case MPOL_DEFAULT:
/*
* Insist on a empty nodelist
*/
if (!nodelist)
err = 0;
goto out;
case MPOL_PREFERRED_MANY:
case MPOL_BIND:
/*
* Insist on a nodelist
*/
if (!nodelist)
goto out;
}
mode_flags = 0;
if (flags) {
/*
* Currently, we only support two mutually exclusive
* mode flags.
*/
if (!strcmp(flags, "static"))
mode_flags |= MPOL_F_STATIC_NODES;
else if (!strcmp(flags, "relative"))
mode_flags |= MPOL_F_RELATIVE_NODES;
else
goto out;
}
new = mpol_new(mode, mode_flags, &nodes);
if (IS_ERR(new))
goto out;
/*
* Save nodes for mpol_to_str() to show the tmpfs mount options
* for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
*/
if (mode != MPOL_PREFERRED) {
new->nodes = nodes;
} else if (nodelist) {
nodes_clear(new->nodes);
node_set(first_node(nodes), new->nodes);
} else {
new->mode = MPOL_LOCAL;
}
/*
* Save nodes for contextualization: this will be used to "clone"
* the mempolicy in a specific context [cpuset] at a later time.
*/
new->w.user_nodemask = nodes;
err = 0;
out:
/* Restore string for error message */
if (nodelist)
*--nodelist = ':';
if (flags)
*--flags = '=';
if (!err)
*mpol = new;
return err;
}
#endif /* CONFIG_TMPFS */
/**
* mpol_to_str - format a mempolicy structure for printing
* @buffer: to contain formatted mempolicy string
* @maxlen: length of @buffer
* @pol: pointer to mempolicy to be formatted
*
* Convert @pol into a string. If @buffer is too short, truncate the string.
* Recommend a @maxlen of at least 51 for the longest mode, "weighted
* interleave", plus the longest flag flags, "relative|balancing", and to
* display at least a few node ids.
*/
void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
char *p = buffer;
nodemask_t nodes = NODE_MASK_NONE;
unsigned short mode = MPOL_DEFAULT;
unsigned short flags = 0;
if (pol &&
pol != &default_policy &&
!(pol >= &preferred_node_policy[0] &&
pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) {
mode = pol->mode;
flags = pol->flags;
}
switch (mode) {
case MPOL_DEFAULT:
case MPOL_LOCAL:
break;
case MPOL_PREFERRED:
case MPOL_PREFERRED_MANY:
case MPOL_BIND:
case MPOL_INTERLEAVE:
case MPOL_WEIGHTED_INTERLEAVE:
nodes = pol->nodes;
break;
default:
WARN_ON_ONCE(1);
snprintf(p, maxlen, "unknown");
return;
}
p += snprintf(p, maxlen, "%s", policy_modes[mode]);
if (flags & MPOL_MODE_FLAGS) {
p += snprintf(p, buffer + maxlen - p, "=");
/*
* Static and relative are mutually exclusive.
*/
if (flags & MPOL_F_STATIC_NODES)
p += snprintf(p, buffer + maxlen - p, "static");
else if (flags & MPOL_F_RELATIVE_NODES)
p += snprintf(p, buffer + maxlen - p, "relative");
if (flags & MPOL_F_NUMA_BALANCING) {
if (!is_power_of_2(flags & MPOL_MODE_FLAGS))
p += snprintf(p, buffer + maxlen - p, "|");
p += snprintf(p, buffer + maxlen - p, "balancing");
}
}
if (!nodes_empty(nodes))
p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
nodemask_pr_args(&nodes));
}
#ifdef CONFIG_SYSFS
struct iw_node_attr {
struct kobj_attribute kobj_attr;
int nid;
};
struct sysfs_wi_group {
struct kobject wi_kobj;
struct mutex kobj_lock;
struct iw_node_attr *nattrs[];
};
static struct sysfs_wi_group *wi_group;
static ssize_t node_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct iw_node_attr *node_attr;
u8 weight;
node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
weight = get_il_weight(node_attr->nid);
return sysfs_emit(buf, "%d\n", weight);
}
static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct weighted_interleave_state *new_wi_state, *old_wi_state = NULL;
struct iw_node_attr *node_attr;
u8 weight = 0;
int i;
node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
if (count == 0 || sysfs_streq(buf, "") ||
kstrtou8(buf, 0, &weight) || weight == 0)
return -EINVAL;
new_wi_state = kzalloc(struct_size(new_wi_state, iw_table, nr_node_ids),
GFP_KERNEL);
if (!new_wi_state)
return -ENOMEM;
mutex_lock(&wi_state_lock);
old_wi_state = rcu_dereference_protected(wi_state,
lockdep_is_held(&wi_state_lock));
if (old_wi_state) {
memcpy(new_wi_state->iw_table, old_wi_state->iw_table,
nr_node_ids * sizeof(u8));
} else {
for (i = 0; i < nr_node_ids; i++)
new_wi_state->iw_table[i] = 1;
}
new_wi_state->iw_table[node_attr->nid] = weight;
new_wi_state->mode_auto = false;
rcu_assign_pointer(wi_state, new_wi_state);
mutex_unlock(&wi_state_lock);
if (old_wi_state) {
synchronize_rcu();
kfree(old_wi_state);
}
return count;
}
static ssize_t weighted_interleave_auto_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct weighted_interleave_state *state;
bool wi_auto = true;
rcu_read_lock();
state = rcu_dereference(wi_state);
if (state)
wi_auto = state->mode_auto;
rcu_read_unlock();
return sysfs_emit(buf, "%s\n", str_true_false(wi_auto));
}
static ssize_t weighted_interleave_auto_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
struct weighted_interleave_state *new_wi_state, *old_wi_state = NULL;
unsigned int *bw;
bool input;
int i;
if (kstrtobool(buf, &input))
return -EINVAL;
new_wi_state = kzalloc(struct_size(new_wi_state, iw_table, nr_node_ids),
GFP_KERNEL);
if (!new_wi_state)
return -ENOMEM;
for (i = 0; i < nr_node_ids; i++)
new_wi_state->iw_table[i] = 1;
mutex_lock(&wi_state_lock);
if (!input) {
old_wi_state = rcu_dereference_protected(wi_state,
lockdep_is_held(&wi_state_lock));
if (!old_wi_state)
goto update_wi_state;
if (input == old_wi_state->mode_auto) {
mutex_unlock(&wi_state_lock);
return count;
}
memcpy(new_wi_state->iw_table, old_wi_state->iw_table,
nr_node_ids * sizeof(u8));
goto update_wi_state;
}
bw = node_bw_table;
if (!bw) {
mutex_unlock(&wi_state_lock);
kfree(new_wi_state);
return -ENODEV;
}
new_wi_state->mode_auto = true;
reduce_interleave_weights(bw, new_wi_state->iw_table);
update_wi_state:
rcu_assign_pointer(wi_state, new_wi_state);
mutex_unlock(&wi_state_lock);
if (old_wi_state) {
synchronize_rcu();
kfree(old_wi_state);
}
return count;
}
static void sysfs_wi_node_delete(int nid)
{
struct iw_node_attr *attr;
if (nid < 0 || nid >= nr_node_ids)
return;
mutex_lock(&wi_group->kobj_lock);
attr = wi_group->nattrs[nid];
if (!attr) {
mutex_unlock(&wi_group->kobj_lock);
return;
}
wi_group->nattrs[nid] = NULL;
mutex_unlock(&wi_group->kobj_lock);
sysfs_remove_file(&wi_group->wi_kobj, &attr->kobj_attr.attr);
kfree(attr->kobj_attr.attr.name);
kfree(attr);
}
static void sysfs_wi_node_delete_all(void)
{
int nid;
for (nid = 0; nid < nr_node_ids; nid++)
sysfs_wi_node_delete(nid);
}
static void wi_state_free(void)
{
struct weighted_interleave_state *old_wi_state;
mutex_lock(&wi_state_lock);
old_wi_state = rcu_dereference_protected(wi_state,
lockdep_is_held(&wi_state_lock));
rcu_assign_pointer(wi_state, NULL);
mutex_unlock(&wi_state_lock);
if (old_wi_state) {
synchronize_rcu();
kfree(old_wi_state);
}
}
static struct kobj_attribute wi_auto_attr =
__ATTR(auto, 0664, weighted_interleave_auto_show,
weighted_interleave_auto_store);
static void wi_cleanup(void) {
sysfs_remove_file(&wi_group->wi_kobj, &wi_auto_attr.attr);
sysfs_wi_node_delete_all();
wi_state_free();
}
static void wi_kobj_release(struct kobject *wi_kobj)
{
kfree(wi_group);
}
static const struct kobj_type wi_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = wi_kobj_release,
};
static int sysfs_wi_node_add(int nid)
{
int ret;
char *name;
struct iw_node_attr *new_attr;
if (nid < 0 || nid >= nr_node_ids) {
pr_err("invalid node id: %d\n", nid);
return -EINVAL;
}
new_attr = kzalloc(sizeof(*new_attr), GFP_KERNEL);
if (!new_attr)
return -ENOMEM;
name = kasprintf(GFP_KERNEL, "node%d", nid);
if (!name) {
kfree(new_attr);
return -ENOMEM;
}
sysfs_attr_init(&new_attr->kobj_attr.attr);
new_attr->kobj_attr.attr.name = name;
new_attr->kobj_attr.attr.mode = 0644;
new_attr->kobj_attr.show = node_show;
new_attr->kobj_attr.store = node_store;
new_attr->nid = nid;
mutex_lock(&wi_group->kobj_lock);
if (wi_group->nattrs[nid]) {
mutex_unlock(&wi_group->kobj_lock);
ret = -EEXIST;
goto out;
}
ret = sysfs_create_file(&wi_group->wi_kobj, &new_attr->kobj_attr.attr);
if (ret) {
mutex_unlock(&wi_group->kobj_lock);
goto out;
}
wi_group->nattrs[nid] = new_attr;
mutex_unlock(&wi_group->kobj_lock);
return 0;
out:
kfree(new_attr->kobj_attr.attr.name);
kfree(new_attr);
return ret;
}
static int wi_node_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
int err;
struct node_notify *nn = data;
int nid = nn->nid;
switch (action) {
case NODE_ADDED_FIRST_MEMORY:
err = sysfs_wi_node_add(nid);
if (err)
pr_err("failed to add sysfs for node%d during hotplug: %d\n",
nid, err);
break;
case NODE_REMOVED_LAST_MEMORY:
sysfs_wi_node_delete(nid);
break;
}
return NOTIFY_OK;
}
static int __init add_weighted_interleave_group(struct kobject *mempolicy_kobj)
{
int nid, err;
wi_group = kzalloc(struct_size(wi_group, nattrs, nr_node_ids),
GFP_KERNEL);
if (!wi_group)
return -ENOMEM;
mutex_init(&wi_group->kobj_lock);
err = kobject_init_and_add(&wi_group->wi_kobj, &wi_ktype, mempolicy_kobj,
"weighted_interleave");
if (err)
goto err_put_kobj;
err = sysfs_create_file(&wi_group->wi_kobj, &wi_auto_attr.attr);
if (err)
goto err_put_kobj;
for_each_online_node(nid) {
if (!node_state(nid, N_MEMORY))
continue;
err = sysfs_wi_node_add(nid);
if (err) {
pr_err("failed to add sysfs for node%d during init: %d\n",
nid, err);
goto err_cleanup_kobj;
}
}
hotplug_node_notifier(wi_node_notifier, DEFAULT_CALLBACK_PRI);
return 0;
err_cleanup_kobj:
wi_cleanup();
kobject_del(&wi_group->wi_kobj);
err_put_kobj:
kobject_put(&wi_group->wi_kobj);
return err;
}
static int __init mempolicy_sysfs_init(void)
{
int err;
static struct kobject *mempolicy_kobj;
mempolicy_kobj = kobject_create_and_add("mempolicy", mm_kobj);
if (!mempolicy_kobj)
return -ENOMEM;
err = add_weighted_interleave_group(mempolicy_kobj);
if (err)
goto err_kobj;
return 0;
err_kobj:
kobject_del(mempolicy_kobj);
kobject_put(mempolicy_kobj);
return err;
}
late_initcall(mempolicy_sysfs_init);
#endif /* CONFIG_SYSFS */
// SPDX-License-Identifier: GPL-2.0
/*
* mm/pgtable-generic.c
*
* Generic pgtable methods declared in linux/pgtable.h
*
* Copyright (C) 2010 Linus Torvalds
*/
#include <linux/pagemap.h>
#include <linux/hugetlb.h>
#include <linux/pgtable.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mm_inline.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
/*
* If a p?d_bad entry is found while walking page tables, report
* the error, before resetting entry to p?d_none. Usually (but
* very seldom) called out from the p?d_none_or_clear_bad macros.
*/
void pgd_clear_bad(pgd_t *pgd)
{
pgd_ERROR(*pgd);
pgd_clear(pgd);
}
#ifndef __PAGETABLE_P4D_FOLDED
void p4d_clear_bad(p4d_t *p4d)
{
p4d_ERROR(*p4d);
p4d_clear(p4d);
}
#endif
#ifndef __PAGETABLE_PUD_FOLDED
void pud_clear_bad(pud_t *pud)
{
pud_ERROR(*pud);
pud_clear(pud);
}
#endif
/*
* Note that the pmd variant below can't be stub'ed out just as for p4d/pud
* above. pmd folding is special and typically pmd_* macros refer to upper
* level even when folded
*/
void pmd_clear_bad(pmd_t *pmd)
{
pmd_ERROR(*pmd);
pmd_clear(pmd);
}
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Only sets the access flags (dirty, accessed), as well as write
* permission. Furthermore, we know it always gets set to a "more
* permissive" setting, which allows most architectures to optimize
* this. We return whether the PTE actually changed, which in turn
* instructs the caller to do things like update__mmu_cache. This
* used to be done in the caller, but sparc needs minor faults to
* force that call on sun4c so we changed this macro slightly
*/
int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
int changed = !pte_same(ptep_get(ptep), entry);
if (changed) {
set_pte_at(vma->vm_mm, address, ptep, entry);
flush_tlb_fix_spurious_fault(vma, address, ptep);
}
return changed;
}
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
int young;
young = ptep_test_and_clear_young(vma, address, ptep);
if (young)
flush_tlb_page(vma, address);
return young;
}
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
struct mm_struct *mm = (vma)->vm_mm;
pte_t pte;
pte = ptep_get_and_clear(mm, address, ptep);
if (pte_accessible(mm, pte))
flush_tlb_page(vma, address);
return pte;}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
int changed = !pmd_same(*pmdp, entry);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
if (changed) {
set_pmd_at(vma->vm_mm, address, pmdp, entry);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}
#endif
#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
int young;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
young = pmdp_test_and_clear_young(vma, address, pmdp);
if (young)
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return young;
}
#endif
#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
pud_t *pudp)
{
pud_t pud;
VM_BUG_ON(address & ~HPAGE_PUD_MASK);
VM_BUG_ON(!pud_trans_huge(*pudp));
pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
return pud;
}
#endif
#endif
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{
assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
if (!pmd_huge_pte(mm, pmdp))
INIT_LIST_HEAD(&pgtable->lru);
else
list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
pmd_huge_pte(mm, pmdp) = pgtable;
}
#endif
#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
/* no "address" argument so destroys page coloring of some arch */
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
pgtable_t pgtable;
assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
pgtable = pmd_huge_pte(mm, pmdp);
pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
struct page, lru);
if (pmd_huge_pte(mm, pmdp))
list_del(&pgtable->lru);
return pgtable;
}
#endif
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
VM_WARN_ON_ONCE(!pmd_present(*pmdp));
pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return old;
}
#endif
#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
VM_WARN_ON_ONCE(!pmd_present(*pmdp));
return pmdp_invalidate(vma, address, pmdp);
}
#endif
#ifndef pmdp_collapse_flush
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
/*
* pmd and hugepage pte format are same. So we could
* use the same function.
*/
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
/* collapse entails shooting down ptes not pmd */
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#endif
/* arch define pte_free_defer in asm/pgalloc.h for its own implementation */
#ifndef pte_free_defer
static void pte_free_now(struct rcu_head *head)
{
struct page *page;
page = container_of(head, struct page, rcu_head);
pte_free(NULL /* mm not passed and not used */, (pgtable_t)page);
}
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
{
struct page *page;
page = pgtable;
call_rcu(&page->rcu_head, pte_free_now);
}
#endif /* pte_free_defer */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \
(defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU))
/*
* See the comment above ptep_get_lockless() in include/linux/pgtable.h:
* the barriers in pmdp_get_lockless() cannot guarantee that the value in
* pmd_high actually belongs with the value in pmd_low; but holding interrupts
* off blocks the TLB flush between present updates, which guarantees that a
* successful __pte_offset_map() points to a page from matched halves.
*/
static unsigned long pmdp_get_lockless_start(void)
{
unsigned long irqflags;
local_irq_save(irqflags);
return irqflags;
}
static void pmdp_get_lockless_end(unsigned long irqflags)
{
local_irq_restore(irqflags);
}
#else
static unsigned long pmdp_get_lockless_start(void) { return 0; }
static void pmdp_get_lockless_end(unsigned long irqflags) { }
#endif
pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
{
unsigned long irqflags;
pmd_t pmdval;
rcu_read_lock();
irqflags = pmdp_get_lockless_start();
pmdval = pmdp_get_lockless(pmd);
pmdp_get_lockless_end(irqflags);
if (pmdvalp)
*pmdvalp = pmdval;
if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
goto nomap;
if (unlikely(pmd_trans_huge(pmdval)))
goto nomap;
if (unlikely(pmd_bad(pmdval))) {
pmd_clear_bad(pmd);
goto nomap;
}
return __pte_map(&pmdval, addr);
nomap:
rcu_read_unlock(); return NULL;}
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp)
{
pmd_t pmdval;
pte_t *pte;
pte = __pte_offset_map(pmd, addr, &pmdval);
if (likely(pte))
*ptlp = pte_lockptr(mm, &pmdval);
return pte;
}
pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, pmd_t *pmdvalp,
spinlock_t **ptlp)
{
pte_t *pte;
VM_WARN_ON_ONCE(!pmdvalp);
pte = __pte_offset_map(pmd, addr, pmdvalp);
if (likely(pte))
*ptlp = pte_lockptr(mm, pmdvalp); return pte;}
/*
* pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation
* __pte_offset_map_lock() below, is usually called with the pmd pointer for
* addr, reached by walking down the mm's pgd, p4d, pud for addr: either while
* holding mmap_lock or vma lock for read or for write; or in truncate or rmap
* context, while holding file's i_mmap_lock or anon_vma lock for read (or for
* write). In a few cases, it may be used with pmd pointing to a pmd_t already
* copied to or constructed on the stack.
*
* When successful, it returns the pte pointer for addr, with its page table
* kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent
* modification by software, with a pointer to that spinlock in ptlp (in some
* configs mm->page_table_lock, in SPLIT_PTLOCK configs a spinlock in table's
* struct page). pte_unmap_unlock(pte, ptl) to unlock and unmap afterwards.
*
* But it is unsuccessful, returning NULL with *ptlp unchanged, if there is no
* page table at *pmd: if, for example, the page table has just been removed,
* or replaced by the huge pmd of a THP. (When successful, *pmd is rechecked
* after acquiring the ptlock, and retried internally if it changed: so that a
* page table can be safely removed or replaced by THP while holding its lock.)
*
* pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above,
* just returns the pte pointer for addr, its page table kmapped if necessary;
* or NULL if there is no page table at *pmd. It does not attempt to lock the
* page table, so cannot normally be used when the page table is to be updated,
* or when entries read must be stable. But it does take rcu_read_lock(): so
* that even when page table is racily removed, it remains a valid though empty
* and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s
* afterwards.
*
* pte_offset_map_ro_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map();
* but when successful, it also outputs a pointer to the spinlock in ptlp - as
* pte_offset_map_lock() does, but in this case without locking it. This helps
* the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time
* act on a changed *pmd: pte_offset_map_ro_nolock() provides the correct spinlock
* pointer for the page table that it returns. Even after grabbing the spinlock,
* we might be looking either at a page table that is still mapped or one that
* was unmapped and is about to get freed. But for R/O access this is sufficient.
* So it is only applicable for read-only cases where any modification operations
* to the page table are not allowed even if the corresponding spinlock is held
* afterwards.
*
* pte_offset_map_rw_nolock(mm, pmd, addr, pmdvalp, ptlp), above, is like
* pte_offset_map_ro_nolock(); but when successful, it also outputs the pdmval.
* It is applicable for may-write cases where any modification operations to the
* page table may happen after the corresponding spinlock is held afterwards.
* But the users should make sure the page table is stable like checking pte_same()
* or checking pmd_same() by using the output pmdval before performing the write
* operations.
*
* Note: "RO" / "RW" expresses the intended semantics, not that the *kmap* will
* be read-only/read-write protected.
*
* Note that free_pgtables(), used after unmapping detached vmas, or when
* exiting the whole mm, does not take page table lock before freeing a page
* table, and may not use RCU at all: "outsiders" like khugepaged should avoid
* pte_offset_map() and co once the vma is detached from mm or mm_users is zero.
*/
pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp)
{
spinlock_t *ptl;
pmd_t pmdval;
pte_t *pte;
again:
pte = __pte_offset_map(pmd, addr, &pmdval); if (unlikely(!pte))
return pte;
ptl = pte_lockptr(mm, &pmdval);
spin_lock(ptl);
if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
*ptlp = ptl; return pte;
}
pte_unmap_unlock(pte, ptl); goto again;}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
/*
* Define 'struct task_struct' and provide the main scheduler
* APIs (schedule(), wakeup variants, etc.)
*/
#include <uapi/linux/sched.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <linux/thread_info.h>
#include <linux/preempt.h>
#include <linux/cpumask_types.h>
#include <linux/cache.h>
#include <linux/irqflags_types.h>
#include <linux/smp_types.h>
#include <linux/pid_types.h>
#include <linux/sem_types.h>
#include <linux/shm.h>
#include <linux/kmsan_types.h>
#include <linux/mutex_types.h>
#include <linux/plist_types.h>
#include <linux/hrtimer_types.h>
#include <linux/timer_types.h>
#include <linux/seccomp_types.h>
#include <linux/nodemask_types.h>
#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
#include <linux/spinlock.h>
#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/netdevice_xmit.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h>
#include <linux/restart_block.h>
#include <uapi/linux/rseq.h>
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
#include <linux/uidgid_types.h>
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
#ifndef COMPILE_OFFSETS
#include <generated/rq-offsets.h>
#endif
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
struct bio_list;
struct blk_plug;
struct bpf_local_storage;
struct bpf_run_ctx;
struct bpf_net_context;
struct capture_control;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
struct perf_event_context;
struct perf_ctx_data;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
struct task_struct;
struct user_event_mm;
#include <linux/sched/ext.h>
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
* We have two separate sets of flags: task->__state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
/* Used in tsk->__state: */
#define TASK_RUNNING 0x00000000
#define TASK_INTERRUPTIBLE 0x00000001
#define TASK_UNINTERRUPTIBLE 0x00000002
#define __TASK_STOPPED 0x00000004
#define __TASK_TRACED 0x00000008
/* Used in tsk->exit_state: */
#define EXIT_DEAD 0x00000010
#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->__state again: */
#define TASK_PARKED 0x00000040
#define TASK_DEAD 0x00000080
#define TASK_WAKEKILL 0x00000100
#define TASK_WAKING 0x00000200
#define TASK_NOLOAD 0x00000400
#define TASK_NEW 0x00000800
#define TASK_RTLOCK_WAIT 0x00001000
#define TASK_FREEZABLE 0x00002000
#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
#define TASK_FROZEN 0x00008000
#define TASK_STATE_MAX 0x00010000
#define TASK_ANY (TASK_STATE_MAX-1)
/*
* DO NOT ADD ANY NEW USERS !
*/
#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED __TASK_TRACED
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
/* get_task_state(): */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
/*
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
*/
#define is_special_task_state(state) \
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
TASK_DEAD | TASK_FROZEN))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
# define debug_normal_state_change(state_value) \
do { \
WARN_ON_ONCE(is_special_task_state(state_value)); \
current->task_state_change = _THIS_IP_; \
} while (0)
# define debug_special_state_change(state_value) \
do { \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
current->task_state_change = _THIS_IP_; \
} while (0)
# define debug_rtlock_wait_set_state() \
do { \
current->saved_state_change = current->task_state_change;\
current->task_state_change = _THIS_IP_; \
} while (0)
# define debug_rtlock_wait_restore_state() \
do { \
current->task_state_change = current->saved_state_change;\
} while (0)
#else
# define debug_normal_state_change(cond) do { } while (0)
# define debug_special_state_change(cond) do { } while (0)
# define debug_rtlock_wait_set_state() do { } while (0)
# define debug_rtlock_wait_restore_state() do { } while (0)
#endif
#define trace_set_current_state(state_value) \
do { \
if (tracepoint_enabled(sched_set_state_tp)) \
__trace_set_current_state(state_value); \
} while (0)
/*
* set_current_state() includes a barrier so that the write of current->__state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
* for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
* if (CONDITION)
* break;
*
* schedule();
* }
* __set_current_state(TASK_RUNNING);
*
* If the caller does not need such serialisation (because, for instance, the
* CONDITION test and condition change and wakeup are under the same lock) then
* use __set_current_state().
*
* The above is typically ordered against the wakeup, which does:
*
* CONDITION = 1;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* where wake_up_state()/try_to_wake_up() executes a full memory barrier before
* accessing p->__state.
*
* Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
* However, with slightly different timing the wakeup TASK_RUNNING store can
* also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
* a problem either because that will result in one extra go around the loop
* and our @cond test will save the day.
*
* Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
} while (0)
#define set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
trace_set_current_state(state_value); \
smp_store_mb(current->__state, (state_value)); \
} while (0)
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
* serialize against wakeups such that any possible in-flight TASK_RUNNING
* stores will not collide with our state change.
*/
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
\
raw_spin_lock_irqsave(¤t->pi_lock, flags); \
debug_special_state_change((state_value)); \
trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
} while (0)
/*
* PREEMPT_RT specific variants for "sleeping" spin/rwlocks
*
* RT's spin/rwlock substitutions are state preserving. The state of the
* task when blocking on the lock is saved in task_struct::saved_state and
* restored after the lock has been acquired. These operations are
* serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
* lock related wakeups while the task is blocked on the lock are
* redirected to operate on task_struct::saved_state to ensure that these
* are not dropped. On restore task_struct::saved_state is set to
* TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
*
* The lock operation looks like this:
*
* current_save_and_set_rtlock_wait_state();
* for (;;) {
* if (try_lock())
* break;
* raw_spin_unlock_irq(&lock->wait_lock);
* schedule_rtlock();
* raw_spin_lock_irq(&lock->wait_lock);
* set_current_state(TASK_RTLOCK_WAIT);
* }
* current_restore_rtlock_saved_state();
*/
#define current_save_and_set_rtlock_wait_state() \
do { \
lockdep_assert_irqs_disabled(); \
raw_spin_lock(¤t->pi_lock); \
current->saved_state = current->__state; \
debug_rtlock_wait_set_state(); \
trace_set_current_state(TASK_RTLOCK_WAIT); \
WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
raw_spin_unlock(¤t->pi_lock); \
} while (0);
#define current_restore_rtlock_saved_state() \
do { \
lockdep_assert_irqs_disabled(); \
raw_spin_lock(¤t->pi_lock); \
debug_rtlock_wait_restore_state(); \
trace_set_current_state(current->saved_state); \
WRITE_ONCE(current->__state, current->saved_state); \
current->saved_state = TASK_RUNNING; \
raw_spin_unlock(¤t->pi_lock); \
} while (0);
#define get_current_state() READ_ONCE(current->__state)
/*
* Define the task command name length as enum, then it can be visible to
* BPF programs.
*/
enum {
TASK_COMM_LEN = 16,
};
extern void sched_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
#ifdef CONFIG_PREEMPT_RT
extern void schedule_rtlock(void);
#endif
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
/* wrapper functions to trace from this header file */
DECLARE_TRACEPOINT(sched_set_state_tp);
extern void __trace_set_current_state(int state_value);
DECLARE_TRACEPOINT(sched_set_need_resched_tp);
extern void __trace_set_need_resched(struct task_struct *curr, int tif);
/**
* struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
* @lock: protects the above two fields
*
* Stores previous user/system time values such that we can guarantee
* monotonicity.
*/
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
u64 utime;
u64 stime;
raw_spinlock_t lock;
#endif
};
enum vtime_state {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
/* Task is idle */
VTIME_IDLE,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
/* Task runs in userspace in a CPU with VTIME active: */
VTIME_USER,
/* Task runs as guests in a CPU with VTIME active: */
VTIME_GUEST,
};
struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
unsigned int cpu;
u64 utime;
u64 stime;
u64 gtime;
};
/*
* Utilization clamp constraints.
* @UCLAMP_MIN: Minimum utilization
* @UCLAMP_MAX: Maximum utilization
* @UCLAMP_CNT: Utilization clamp constraints count
*/
enum uclamp_id {
UCLAMP_MIN = 0,
UCLAMP_MAX,
UCLAMP_CNT
};
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
extern void sched_domains_mutex_lock(void);
extern void sched_domains_mutex_unlock(void);
struct sched_param {
int sched_priority;
};
struct sched_info {
#ifdef CONFIG_SCHED_INFO
/* Cumulative counters: */
/* # of times we have run on this CPU: */
unsigned long pcount;
/* Time spent waiting on a runqueue: */
unsigned long long run_delay;
/* Max time spent waiting on a runqueue: */
unsigned long long max_run_delay;
/* Min time spent waiting on a runqueue: */
unsigned long long min_run_delay;
/* Timestamps: */
/* When did we last run on a CPU? */
unsigned long long last_arrival;
/* When were we last queued to run? */
unsigned long long last_queued;
#endif /* CONFIG_SCHED_INFO */
};
/*
* Integer metrics need fixed point arithmetic, e.g., sched/fair
* has a few: load, load_avg, util_avg, freq, and capacity.
*
* We define a basic fixed point arithmetic range, and then formalize
* all these metrics based on that basic range.
*/
# define SCHED_FIXEDPOINT_SHIFT 10
# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
/* Increase resolution of cpu_capacity calculations */
# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
struct load_weight {
unsigned long weight;
u32 inv_weight;
};
/*
* The load/runnable/util_avg accumulates an infinite geometric series
* (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
*
* [load_avg definition]
*
* load_avg = runnable% * scale_load_down(load)
*
* [runnable_avg definition]
*
* runnable_avg = runnable% * SCHED_CAPACITY_SCALE
*
* [util_avg definition]
*
* util_avg = running% * SCHED_CAPACITY_SCALE
*
* where runnable% is the time ratio that a sched_entity is runnable and
* running% the time ratio that a sched_entity is running.
*
* For cfs_rq, they are the aggregated values of all runnable and blocked
* sched_entities.
*
* The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
* capacity scaling. The scaling is done through the rq_clock_pelt that is used
* for computing those signals (see update_rq_clock_pelt())
*
* N.B., the above ratios (runnable% and running%) themselves are in the
* range of [0, 1]. To do fixed point arithmetics, we therefore scale them
* to as large a range as necessary. This is for example reflected by
* util_avg's SCHED_CAPACITY_SCALE.
*
* [Overflow issue]
*
* The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
* with the highest load (=88761), always runnable on a single cfs_rq,
* and should not overflow as the number already hits PID_MAX_LIMIT.
*
* For all other cases (including 32-bit kernels), struct load_weight's
* weight will overflow first before we do, because:
*
* Max(load_avg) <= Max(load.weight)
*
* Then it is the load_weight's responsibility to consider overflow
* issues.
*/
struct sched_avg {
u64 last_update_time;
u64 load_sum;
u64 runnable_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
unsigned int util_est;
} ____cacheline_aligned;
/*
* The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
* updates. When a task is dequeued, its util_est should not be updated if its
* util_avg has not been updated in the meantime.
* This information is mapped into the MSB bit of util_est at dequeue time.
* Since max value of util_est for a task is 1024 (PELT util_avg for a task)
* it is safe to use MSB.
*/
#define UTIL_EST_WEIGHT_SHIFT 2
#define UTIL_AVG_UNCHANGED 0x80000000
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 wait_sum;
u64 iowait_count;
u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
s64 sum_sleep_runtime;
u64 block_start;
u64 block_max;
s64 sum_block_runtime;
s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
u64 nr_wakeups_migrate;
u64 nr_wakeups_local;
u64 nr_wakeups_remote;
u64 nr_wakeups_affine;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
#ifdef CONFIG_SCHED_CORE
u64 core_forceidle_sum;
#endif
#endif /* CONFIG_SCHEDSTATS */
} ____cacheline_aligned;
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
struct rb_node run_node;
u64 deadline;
u64 min_vruntime;
u64 min_slice;
struct list_head group_node;
unsigned char on_rq;
unsigned char sched_delayed;
unsigned char rel_deadline;
unsigned char custom_slice;
/* hole */
u64 exec_start;
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
union {
/*
* When !@on_rq this field is vlag.
* When cfs_rq->curr == se (which implies @on_rq)
* this field is vprot. See protect_slice().
*/
s64 vlag;
u64 vprot;
};
u64 slice;
u64 nr_migrations;
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
/* rq on which this entity is (to be) queued: */
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
/* cached value of my_q->h_nr_running */
unsigned long runnable_weight;
#endif
/*
* Per entity load average tracking.
*
* Put into separate cache line so it does not
* collide with read-mostly values above.
*/
struct sched_avg avg;
};
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
unsigned short on_rq;
unsigned short on_list;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */
struct rt_rq *rt_rq;
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
} __randomize_layout;
typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
struct sched_dl_entity {
struct rb_node rb_node;
/*
* Original scheduling parameters. Copied here from sched_attr
* during sched_setattr(), they will remain the same until
* the next sched_setattr().
*/
u64 dl_runtime; /* Maximum runtime for each instance */
u64 dl_deadline; /* Relative deadline of each instance */
u64 dl_period; /* Separation of two instances (period) */
u64 dl_bw; /* dl_runtime / dl_period */
u64 dl_density; /* dl_runtime / dl_deadline */
/*
* Actual scheduling parameters. Initialized with the values above,
* they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* Remaining runtime for this instance */
u64 deadline; /* Absolute deadline for this instance */
unsigned int flags; /* Specifying the scheduler behaviour */
/*
* Some bool flags:
*
* @dl_throttled tells if we exhausted the runtime. If so, the
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*
* @dl_non_contending tells if the task is inactive while still
* contributing to the active utilization. In other words, it
* indicates if the inactive timer has been armed and its handler
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
*
* @dl_overrun tells if the task asked to be informed about runtime
* overruns.
*
* @dl_server tells if this is a server entity.
*
* @dl_defer tells if this is a deferred or regular server. For
* now only defer server exists.
*
* @dl_defer_armed tells if the deferrable server is waiting
* for the replenishment timer to activate it.
*
* @dl_server_active tells if the dlserver is active(started).
* dlserver is started on first cfs enqueue on an idle runqueue
* and is stopped when a dequeue results in 0 cfs tasks on the
* runqueue. In other words, dlserver is active only when cpu's
* runqueue has atleast one cfs task.
*
* @dl_defer_running tells if the deferrable server is actually
* running, skipping the defer phase.
*/
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
unsigned int dl_server : 1;
unsigned int dl_server_active : 1;
unsigned int dl_defer : 1;
unsigned int dl_defer_armed : 1;
unsigned int dl_defer_running : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
* own bandwidth to be enforced, thus we need one timer per task.
*/
struct hrtimer dl_timer;
/*
* Inactive timer, responsible for decreasing the active utilization
* at the "0-lag time". When a -deadline task blocks, it contributes
* to GRUB's active utilization until the "0-lag time", hence a
* timer is needed to decrease the active utilization at the correct
* time.
*/
struct hrtimer inactive_timer;
/*
* Bits for DL-server functionality. Also see the comment near
* dl_server_update().
*
* @rq the runqueue this server is for
*
* @server_has_tasks() returns true if @server_pick return a
* runnable task.
*/
struct rq *rq;
dl_server_pick_f server_pick_task;
#ifdef CONFIG_RT_MUTEXES
/*
* Priority Inheritance. When a DEADLINE scheduling entity is boosted
* pi_se points to the donor, otherwise points to the dl_se it belongs
* to (the original one/itself).
*/
struct sched_dl_entity *pi_se;
#endif
};
#ifdef CONFIG_UCLAMP_TASK
/* Number of utilization clamp buckets (shorter alias) */
#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
/*
* Utilization clamp for a scheduling entity
* @value: clamp value "assigned" to a se
* @bucket_id: bucket index corresponding to the "assigned" value
* @active: the se is currently refcounted in a rq's bucket
* @user_defined: the requested clamp value comes from user-space
*
* The bucket_id is the index of the clamp bucket matching the clamp value
* which is pre-computed and stored to avoid expensive integer divisions from
* the fast path.
*
* The active bit is set whenever a task has got an "effective" value assigned,
* which can be different from the clamp value "requested" from user-space.
* This allows to know a task is refcounted in the rq's bucket corresponding
* to the "effective" bucket_id.
*
* The user_defined bit is set whenever a task has got a task-specific clamp
* value requested from userspace, i.e. the system defaults apply to this task
* just as a restriction. This allows to relax default clamps when a less
* restrictive task-specific value has been requested, thus allowing to
* implement a "nice" semantic. For example, a task running with a 20%
* default boost can still drop its own boosting to 0%.
*/
struct uclamp_se {
unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
unsigned int active : 1;
unsigned int user_defined : 1;
};
#endif /* CONFIG_UCLAMP_TASK */
union rcu_special {
struct {
u8 blocked;
u8 need_qs;
u8 exp_hint; /* Hint for performance. */
u8 need_mb; /* Readers need smp_mb(). */
} b; /* Bits. */
u32 s; /* Set of bits. */
};
enum perf_event_task_context {
perf_invalid_context = -1,
perf_hw_context = 0,
perf_sw_context,
perf_nr_task_contexts,
};
/*
* Number of contexts where an event can trigger:
* task, softirq, hardirq, nmi.
*/
#define PERF_NR_CONTEXTS 4
struct wake_q_node {
struct wake_q_node *next;
};
struct kmap_ctrl {
#ifdef CONFIG_KMAP_LOCAL
int idx;
pte_t pteval[KM_MAX_IDX];
#endif
};
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
* For reasons of header soup (see current_thread_info()), this
* must be the first element of task_struct.
*/
struct thread_info thread_info;
#endif
unsigned int __state;
/* saved state for "spinlock sleepers" */
unsigned int saved_state;
/*
* This begins the randomizable portion of task_struct. Only
* scheduling-critical items should be added above here.
*/
randomized_struct_fields_start
void *stack;
refcount_t usage;
/* Per task flags (PF_*), defined further below: */
unsigned int flags;
unsigned int ptrace;
#ifdef CONFIG_MEM_ALLOC_PROFILING
struct alloc_tag *alloc_tag;
#endif
int on_cpu;
struct __call_single_node wake_entry;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
/*
* recent_used_cpu is initially set as the last CPU used by a task
* that wakes affine another task. Waker/wakee relationships can
* push tasks around a CPU where each wakeup moves to the next one.
* Tracking a recently used CPU allows a quick search for a recently
* used CPU that may be idle.
*/
int recent_used_cpu;
int wake_cpu;
int on_rq;
int prio;
int static_prio;
int normal_prio;
unsigned int rt_priority;
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
struct sched_dl_entity *dl_server;
#ifdef CONFIG_SCHED_CLASS_EXT
struct sched_ext_entity scx;
#endif
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
struct rb_node core_node;
unsigned long core_cookie;
unsigned int core_occupation;
#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#ifdef CONFIG_CFS_BANDWIDTH
struct callback_head sched_throttle_work;
struct list_head throttle_node;
bool throttled;
#endif
#endif
#ifdef CONFIG_UCLAMP_TASK
/*
* Clamp values requested for a scheduling entity.
* Must be updated with task_rq_lock() held.
*/
struct uclamp_se uclamp_req[UCLAMP_CNT];
/*
* Effective clamp values used for a scheduling entity.
* Must be updated with task_rq_lock() held.
*/
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
struct sched_statistics stats;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
#endif
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
unsigned int policy;
unsigned long max_allowed_capacity;
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
void *migration_pending;
unsigned short migration_disabled;
unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
u8 rcu_tasks_holdout;
u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
struct list_head rcu_tasks_holdout_list;
int rcu_tasks_exit_cpu;
struct list_head rcu_tasks_exit_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_TASKS_TRACE_RCU
int trc_reader_nesting;
int trc_ipi_to_cpu;
union rcu_special trc_reader_special;
struct list_head trc_holdout_list;
struct list_head trc_blkd_node;
int trc_blkd_cpu;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
struct sched_info sched_info;
struct list_head tasks;
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
struct mm_struct *mm;
struct mm_struct *active_mm;
struct address_space *faults_disabled_mapping;
int exit_state;
int exit_code;
int exit_signal;
/* The signal sent when the parent dies: */
int pdeath_signal;
/* JOBCTL_*, siglock protected: */
unsigned long jobctl;
/* Used for emulating ABI behavior of previous Linux versions: */
unsigned int personality;
/* Scheduler bits, serialized by scheduler locks: */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
unsigned sched_task_hot:1;
/* Force alignment to the next boundary: */
unsigned :0;
/* Unserialized, strictly 'current' */
/*
* This field must not be in the scheduler word above due to wakelist
* queueing no longer being serialized by p->on_cpu. However:
*
* p->XXX = X; ttwu()
* schedule() if (p->on_rq && ..) // false
* smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
* deactivate_task() ttwu_queue_wakelist())
* p->on_rq = 0; p->sched_remote_wakeup = Y;
*
* guarantees all stores of 'current' are visible before
* ->sched_remote_wakeup gets used, so it can be in this word.
*/
unsigned sched_remote_wakeup:1;
#ifdef CONFIG_RT_MUTEXES
unsigned sched_rt_mutex:1;
#endif
/* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
unsigned restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCG_V1
unsigned in_user_fault:1;
#endif
#ifdef CONFIG_LRU_GEN
/* whether the LRU algorithm may apply to this access */
unsigned in_lru_fault:1;
#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
#ifdef CONFIG_CGROUPS
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
/* task is frozen/stopped (used by the cgroup freezer) */
unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP
unsigned use_memdelay:1;
#endif
#ifdef CONFIG_PSI
/* Stalled due to lack of memory */
unsigned in_memstall:1;
#endif
#ifdef CONFIG_PAGE_OWNER
/* Used by page_owner=on to detect recursion in page tracking. */
unsigned in_page_owner:1;
#endif
#ifdef CONFIG_EVENTFD
/* Recursion prevention for eventfd_signal() */
unsigned in_eventfd:1;
#endif
#ifdef CONFIG_ARCH_HAS_CPU_PASID
unsigned pasid_activated:1;
#endif
#ifdef CONFIG_X86_BUS_LOCK_DETECT
unsigned reported_split_lock:1;
#endif
#ifdef CONFIG_TASK_DELAY_ACCT
/* delay due to memory thrashing */
unsigned in_thrashing:1;
#endif
unsigned in_nf_duplicate:1;
#ifdef CONFIG_PREEMPT_RT
struct netdev_xmit net_xmit;
#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
pid_t pid;
pid_t tgid;
#ifdef CONFIG_STACKPROTECTOR
/* Canary value for the -fstack-protector GCC feature: */
unsigned long stack_canary;
#endif
/*
* Pointers to the (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
/* Real parent process: */
struct task_struct __rcu *real_parent;
/* Recipient of SIGCHLD, wait4() reports: */
struct task_struct __rcu *parent;
/*
* Children/sibling form the list of natural children:
*/
struct list_head children;
struct list_head sibling;
struct task_struct *group_leader;
/*
* 'ptraced' is the list of tasks this task is using ptrace() on.
*
* This includes both natural children and PTRACE_ATTACH targets.
* 'ptrace_entry' is this task's link on the p->parent->ptraced list.
*/
struct list_head ptraced;
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
struct list_head thread_node;
struct completion *vfork_done;
/* CLONE_CHILD_SETTID: */
int __user *set_child_tid;
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
/* PF_KTHREAD | PF_IO_WORKER */
void *worker_private;
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
u64 utimescaled;
u64 stimescaled;
#endif
u64 gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
struct vtime vtime;
#endif
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
#endif
/* Context switch counts: */
unsigned long nvcsw;
unsigned long nivcsw;
/* Monotonic time in nsecs: */
u64 start_time;
/* Boot based time in nsecs: */
u64 start_boottime;
/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
unsigned long min_flt;
unsigned long maj_flt;
/* Empty if CONFIG_POSIX_CPUTIMERS=n */
struct posix_cputimers posix_cputimers;
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
struct posix_cputimers_work posix_cputimers_work;
#endif
/* Process credentials: */
/* Tracer's credentials at attach: */
const struct cred __rcu *ptracer_cred;
/* Objective and real subjective task credentials (COW): */
const struct cred __rcu *real_cred;
/* Effective (overridable) subjective task credentials (COW): */
const struct cred __rcu *cred;
#ifdef CONFIG_KEYS
/* Cached requested key. */
struct key *cached_requested_key;
#endif
/*
* executable name, excluding path.
*
* - normally initialized begin_new_exec()
* - set it with set_task_comm()
* - strscpy_pad() to ensure it is always NUL-terminated and
* zero-padded
* - task_lock() to ensure the operation is atomic and the name is
* fully updated.
*/
char comm[TASK_COMM_LEN];
struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
struct sysv_sem sysvsem;
struct sysv_shm sysvshm;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
unsigned long last_switch_count;
unsigned long last_switch_time;
#endif
/* Filesystem information: */
struct fs_struct *fs;
/* Open file information: */
struct files_struct *files;
#ifdef CONFIG_IO_URING
struct io_uring_task *io_uring;
#endif
/* Namespaces: */
struct nsproxy *nsproxy;
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
unsigned int sas_ss_flags;
struct callback_head *task_works;
#ifdef CONFIG_AUDIT
#ifdef CONFIG_AUDITSYSCALL
struct audit_context *audit_context;
#endif
kuid_t loginuid;
unsigned int sessionid;
#endif
struct seccomp seccomp;
struct syscall_user_dispatch syscall_dispatch;
/* Thread group tracking: */
u64 parent_exec_id;
u64 self_exec_id;
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */
struct rb_root_cached pi_waiters;
/* Updated under owner's pi_lock and rq lock */
struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */
struct rt_mutex_waiter *pi_blocked_on;
#endif
struct mutex *blocked_on; /* lock we're blocked on */
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
/*
* Encoded lock address causing task block (lower 2 bits = type from
* <linux/hung_task.h>). Accessed via hung_task_*() helpers.
*/
unsigned long blocker;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
int non_block_count;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
struct irqtrace_events irqtrace;
unsigned int hardirq_threaded;
u64 hardirq_chain_key;
int softirqs_enabled;
int softirq_context;
int irq_config;
#endif
#ifdef CONFIG_PREEMPT_RT
int softirq_disable_cnt;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
unsigned int in_ubsan;
#endif
/* Journalling filesystem info: */
void *journal_info;
/* Stacked block device info: */
struct bio_list *bio_list;
/* Stack plugging: */
struct blk_plug *plug;
/* VM state: */
struct reclaim_state *reclaim_state;
struct io_context *io_context;
#ifdef CONFIG_COMPACTION
struct capture_control *capture_control;
#endif
/* Ptrace state: */
unsigned long ptrace_message;
kernel_siginfo_t *last_siginfo;
struct task_io_accounting ioac;
#ifdef CONFIG_PSI
/* Pressure stall state */
unsigned int psi_flags;
#endif
#ifdef CONFIG_TASK_XACCT
/* Accumulated RSS usage: */
u64 acct_rss_mem1;
/* Accumulated virtual memory usage: */
u64 acct_vm_mem1;
/* stime + utime since last update: */
u64 acct_timexpd;
#endif
#ifdef CONFIG_CPUSETS
/* Protected by ->alloc_lock: */
nodemask_t mems_allowed;
/* Sequence number to catch updates: */
seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock: */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
#ifdef CONFIG_X86_CPU_RESCTRL
u32 closid;
u32 rmid;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
struct mutex futex_exit_mutex;
unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
u8 perf_recursion[PERF_NR_CONTEXTS];
struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
struct perf_ctx_data __rcu *perf_ctx_data;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
unsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA
/* Protected by alloc_lock: */
struct mempolicy *mempolicy;
short il_prev;
u8 il_weight;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
int numa_scan_seq;
unsigned int numa_scan_period;
unsigned int numa_scan_period_max;
int numa_preferred_nid;
unsigned long numa_migrate_retry;
/* Migration stamp: */
u64 node_stamp;
u64 last_task_numa_placement;
u64 last_sum_exec_runtime;
struct callback_head numa_work;
/*
* This pointer is only modified for current in syscall and
* pagefault context (and for tasks being destroyed), so it can be read
* from any of the following contexts:
* - RCU read-side critical section
* - current->numa_group from everywhere
* - task's runqueue locked, task not running
*/
struct numa_group __rcu *numa_group;
/*
* numa_faults is an array split into four regions:
* faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
* in this precise order.
*
* faults_memory: Exponential decaying average of faults on a per-node
* basis. Scheduling placement decisions are made based on these
* counts. The values remain static for the duration of a PTE scan.
* faults_cpu: Track the nodes the process was running on when a NUMA
* hinting fault was incurred.
* faults_memory_buffer and faults_cpu_buffer: Record faults per node
* during the current scan window. When the scan completes, the counts
* in faults_memory and faults_cpu decay and these values are copied.
*/
unsigned long *numa_faults;
unsigned long total_numa_faults;
/*
* numa_faults_locality tracks if faults recorded during the last
* scan window were remote/local or failed to migrate. The task scan
* period is adapted based on the locality of the faults with different
* weights depending on whether they were shared or private faults
*/
unsigned long numa_faults_locality[3];
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_RSEQ
struct rseq __user *rseq;
u32 rseq_len;
u32 rseq_sig;
/*
* RmW on rseq_event_mask must be performed atomically
* with respect to preemption.
*/
unsigned long rseq_event_mask;
# ifdef CONFIG_DEBUG_RSEQ
/*
* This is a place holder to save a copy of the rseq fields for
* validation of read-only fields. The struct rseq has a
* variable-length array at the end, so it cannot be used
* directly. Reserve a size large enough for the known fields.
*/
char rseq_fields[sizeof(struct rseq)];
# endif
#endif
#ifdef CONFIG_SCHED_MM_CID
int mm_cid; /* Current cid in mm */
int last_mm_cid; /* Most recent cid in mm */
int migrate_from_cpu;
int mm_cid_active; /* Whether cid bitmap is active */
struct callback_head cid_work;
#endif
struct tlbflush_unmap_batch tlb_ubc;
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
struct page_frag task_frag;
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
unsigned int fail_nth;
#endif
/*
* When (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for a dirty throttling pause:
*/
int nr_dirtied;
int nr_dirtied_pause;
/* Start of a write-and-pause period: */
unsigned long dirty_paused_when;
#ifdef CONFIG_LATENCYTOP
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
#endif
/*
* Time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
u64 timer_slack_ns;
u64 default_timer_slack_ns;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
unsigned int kasan_depth;
#endif
#ifdef CONFIG_KCSAN
struct kcsan_ctx kcsan_ctx;
#ifdef CONFIG_TRACE_IRQFLAGS
struct irqtrace_events kcsan_save_irqtrace;
#endif
#ifdef CONFIG_KCSAN_WEAK_MEMORY
int kcsan_stack_depth;
#endif
#endif
#ifdef CONFIG_KMSAN
struct kmsan_ctx kmsan_ctx;
#endif
#if IS_ENABLED(CONFIG_KUNIT)
struct kunit *kunit_test;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
unsigned long *ret_stack;
/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;
unsigned long long ftrace_sleeptime;
/*
* Number of functions that haven't been traced
* because of depth overrun:
*/
atomic_t trace_overrun;
/* Pause tracing: */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* Bitmask and counter of trace recursion: */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_KCOV
/* See kernel/kcov.c for more details. */
/* Coverage collection mode enabled for this task (0 if disabled): */
unsigned int kcov_mode;
/* Size of the kcov_area: */
unsigned int kcov_size;
/* Buffer for coverage collection: */
void *kcov_area;
/* KCOV descriptor wired with this task or NULL: */
struct kcov *kcov;
/* KCOV common handle for remote coverage collection: */
u64 kcov_handle;
/* KCOV sequence number: */
int kcov_sequence;
/* Collect coverage from softirq context: */
unsigned int kcov_softirq;
#endif
#ifdef CONFIG_MEMCG_V1
struct mem_cgroup *memcg_in_oom;
#endif
#ifdef CONFIG_MEMCG
/* Number of pages to reclaim on returning to userland: */
unsigned int memcg_nr_pages_over_high;
/* Used by memcontrol for targeted memcg charge: */
struct mem_cgroup *active_memcg;
/* Cache for current->cgroups->memcg->objcg lookups: */
struct obj_cgroup *objcg;
#endif
#ifdef CONFIG_BLK_CGROUP
struct gendisk *throttle_disk;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
# ifdef CONFIG_PREEMPT_RT
unsigned long saved_state_change;
# endif
#endif
struct rcu_head rcu;
refcount_t rcu_users;
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* A live task holds one reference: */
refcount_t stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCH
int patch_state;
#endif
#ifdef CONFIG_SECURITY
/* Used by LSM modules for access restriction: */
void *security;
#endif
#ifdef CONFIG_BPF_SYSCALL
/* Used by BPF task local storage */
struct bpf_local_storage __rcu *bpf_storage;
/* Used for BPF run context */
struct bpf_run_ctx *bpf_ctx;
#endif
/* Used by BPF for per-TASK xdp storage */
struct bpf_net_context *bpf_net_context;
#ifdef CONFIG_KSTACK_ERASE
unsigned long lowest_stack;
#endif
#ifdef CONFIG_KSTACK_ERASE_METRICS
unsigned long prev_lowest_stack;
#endif
#ifdef CONFIG_X86_MCE
void __user *mce_vaddr;
__u64 mce_kflags;
u64 mce_addr;
__u64 mce_ripv : 1,
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
int mce_count;
#endif
#ifdef CONFIG_KRETPROBES
struct llist_head kretprobe_instances;
#endif
#ifdef CONFIG_RETHOOK
struct llist_head rethooks;
#endif
#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
/*
* If L1D flush is supported on mm context switch
* then we use this callback head to queue kill work
* to kill tasks that are not running on SMT disabled
* cores
*/
struct callback_head l1d_flush_kill;
#endif
#ifdef CONFIG_RV
/*
* Per-task RV monitor, fixed in CONFIG_RV_PER_TASK_MONITORS.
* If memory becomes a concern, we can think about a dynamic method.
*/
union rv_task_monitor rv[CONFIG_RV_PER_TASK_MONITORS];
#endif
#ifdef CONFIG_USER_EVENTS
struct user_event_mm *user_event_mm;
#endif
#ifdef CONFIG_UNWIND_USER
struct unwind_task_info unwind_info;
#endif
/* CPU-specific state of this task: */
struct thread_struct thread;
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
*/
randomized_struct_fields_end
} __attribute__ ((aligned (64)));
#ifdef CONFIG_SCHED_PROXY_EXEC
DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec);
static inline bool sched_proxy_exec(void)
{
return static_branch_likely(&__sched_proxy_exec);
}
#else
static inline bool sched_proxy_exec(void)
{
return false;
}
#endif
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
static inline unsigned int __task_state_index(unsigned int tsk_state,
unsigned int tsk_exit_state)
{
unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
/*
* We're lying here, but rather than expose a completely new task state
* to userspace, we can make this appear as if the task has gone through
* a regular rt_mutex_lock() call.
* Report frozen tasks as uninterruptible.
*/
if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
state = TASK_UNINTERRUPTIBLE;
return fls(state);
}
static inline unsigned int task_state_index(struct task_struct *tsk)
{
return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
}
static inline char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1));
return state_char[state];
}
static inline char task_state_to_char(struct task_struct *tsk)
{
return task_index_to_char(task_state_index(tsk));
}
extern struct pid *cad_pid;
/*
* Per process flags
*/
#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* Dumped core */
#define PF_SIGNALED 0x00000400 /* Killed by a signal */
#define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
#define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
#define PF_KSWAPD 0x00020000 /* I am kswapd */
#define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
#define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF__HOLE__00800000 0x00800000
#define PF__HOLE__01000000 0x01000000
#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning.
* See memalloc_pin_save() */
#define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */
#define PF__HOLE__40000000 0x40000000
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
/*
* Only the _current_ task can read/write to tsk->flags, but other
* tasks can access tsk->flags in readonly mode for example
* with tsk_used_math (like during threaded core dumping).
* There is however an exception to this rule during ptrace
* or during fork: the ptracer task is allowed to write to the
* child->flags of its traced child (same goes for fork, the parent
* can write to the child->flags), because we're guaranteed the
* child is not running and in turn not changing child->flags
* at the same time the parent does it.
*/
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
static __always_inline bool is_percpu_thread(void)
{
return (current->flags & PF_NO_SETAFFINITY) &&
(current->nr_cpus_allowed == 1);
}
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
{ return test_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_SET(name, func) \
static inline void task_set_##func(struct task_struct *p) \
{ set_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_CLEAR(name, func) \
static inline void task_clear_##func(struct task_struct *p) \
{ clear_bit(PFA_##name, &p->atomic_flags); }
TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
current->flags &= ~flags;
current->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
/**
* set_cpus_allowed_ptr - set CPU affinity mask of a task
* @p: the task
* @new_mask: CPU affinity mask
*
* Return: zero if successful, or a negative error code
*/
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
*
* Return: The nice value [ -20 ... 0 ... 19 ].
*/
static inline int task_nice(const struct task_struct *p)
{
return PRIO_TO_NICE((p)->static_prio);
}
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
extern void sched_set_fifo_low(struct task_struct *p);
extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
static __always_inline bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & PF_IDLE);
}
extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
struct task_struct task;
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
#ifndef CONFIG_THREAD_INFO_IN_TASK
extern struct thread_info init_thread_info;
#endif
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define task_thread_info(task) (&(task)->thread_info)
#else
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
/*
* find a task by one of its numerical ids
*
* find_task_by_pid_ns():
* finds a task by its pid in the specified namespace
* find_task_by_vpid():
* finds a task by its virtual pid
*
* see also find_vpid() etc in include/linux/pid.h
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
/*
* find a task by its virtual pid and get the task struct
*/
extern struct task_struct *find_get_task_by_vpid(pid_t nr);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
extern void kick_process(struct task_struct *tsk);
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
#define set_task_comm(tsk, from) ({ \
BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \
__set_task_comm(tsk, from, false); \
})
/*
* - Why not use task_lock()?
* User space can randomly change their names anyway, so locking for readers
* doesn't make sense. For writers, locking is probably necessary, as a race
* condition could lead to long-term mixed results.
* The strscpy_pad() in __set_task_comm() can ensure that the task comm is
* always NUL-terminated and zero-padded. Therefore the race condition between
* reader and writer is not an issue.
*
* - BUILD_BUG_ON() can help prevent the buf from being truncated.
* Since the callers don't perform any return value checks, this safeguard is
* necessary.
*/
#define get_task_comm(buf, tsk) ({ \
BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \
strscpy_pad(buf, (tsk)->comm); \
buf; \
})
static __always_inline void scheduler_ipi(void)
{
/*
* Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
* TIF_NEED_RESCHED remotely (for the first time) will also send
* this IPI.
*/
preempt_fold_need_resched();
}
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
/*
* Set thread flags in other task's structures.
* See asm/thread_info.h for TIF_xxxx flags available:
*/
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
bool value)
{
update_ti_thread_flag(task_thread_info(tsk), flag, value);
}
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
if (tracepoint_enabled(sched_set_need_resched_tp) &&
!test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
__trace_set_need_resched(tsk, TIF_NEED_RESCHED);
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
(atomic_long_t *)&task_thread_info(tsk)->flags);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
extern int __cond_resched(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
{
return static_call_mod(cond_resched)();
}
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
extern int dynamic_cond_resched(void);
static __always_inline int _cond_resched(void)
{
return dynamic_cond_resched();
}
#else /* !CONFIG_PREEMPTION */
static inline int _cond_resched(void)
{
return __cond_resched();
}
#endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
#else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
static inline int _cond_resched(void)
{
return 0;
}
#endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
#define cond_resched() ({ \
__might_resched(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
extern int __cond_resched_lock(spinlock_t *lock);
extern int __cond_resched_rwlock_read(rwlock_t *lock);
extern int __cond_resched_rwlock_write(rwlock_t *lock);
#define MIGHT_RESCHED_RCU_SHIFT 8
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
#ifndef CONFIG_PREEMPT_RT
/*
* Non RT kernels have an elevated preempt count due to the held lock,
* but are not allowed to be inside a RCU read side critical section
*/
# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
#else
/*
* spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
* cond_resched*lock() has to take that into account because it checks for
* preempt_count() and rcu_preempt_depth().
*/
# define PREEMPT_LOCK_RESCHED_OFFSETS \
(PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
#endif
#define cond_resched_lock(lock) ({ \
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
__cond_resched_lock(lock); \
})
#define cond_resched_rwlock_read(lock) ({ \
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
__cond_resched_rwlock_read(lock); \
})
#define cond_resched_rwlock_write(lock) ({ \
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
__cond_resched_rwlock_write(lock); \
})
#ifndef CONFIG_PREEMPT_RT
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
struct mutex *m = p->blocked_on;
if (m)
lockdep_assert_held_once(&m->wait_lock);
return m;
}
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
struct mutex *blocked_on = READ_ONCE(p->blocked_on);
WARN_ON_ONCE(!m);
/* The task should only be setting itself as blocked */
WARN_ON_ONCE(p != current);
/* Currently we serialize blocked_on under the mutex::wait_lock */
lockdep_assert_held_once(&m->wait_lock);
/*
* Check ensure we don't overwrite existing mutex value
* with a different mutex. Note, setting it to the same
* lock repeatedly is ok.
*/
WARN_ON_ONCE(blocked_on && blocked_on != m);
WRITE_ONCE(p->blocked_on, m);
}
static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
guard(raw_spinlock_irqsave)(&m->wait_lock);
__set_task_blocked_on(p, m);
}
static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
if (m) {
struct mutex *blocked_on = READ_ONCE(p->blocked_on);
/* Currently we serialize blocked_on under the mutex::wait_lock */
lockdep_assert_held_once(&m->wait_lock);
/*
* There may be cases where we re-clear already cleared
* blocked_on relationships, but make sure we are not
* clearing the relationship with a different lock.
*/
WARN_ON_ONCE(blocked_on && blocked_on != m);
}
WRITE_ONCE(p->blocked_on, NULL);
}
static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
guard(raw_spinlock_irqsave)(&m->wait_lock);
__clear_task_blocked_on(p, m);
}
#else
static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
}
static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
}
#endif /* !CONFIG_PREEMPT_RT */
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
#ifdef CONFIG_SMP
static inline unsigned int task_cpu(const struct task_struct *p)
{
return READ_ONCE(task_thread_info(p)->cpu);
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
static inline unsigned int task_cpu(const struct task_struct *p)
{
return 0;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}
#endif /* CONFIG_SMP */
static inline bool task_is_runnable(struct task_struct *p)
{
return p->on_rq && !p->se.sched_delayed;
}
extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
*
* This allows us to terminate optimistic spin loops and block, analogous to
* the native optimistic spin heuristic of testing if the lock owner task is
* running or not.
*/
#ifndef vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
return false;
}
#endif
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
}
/* Returns effective CPU energy utilization, as seen by the scheduler */
unsigned long sched_cpu_util(int cpu);
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr);
extern int sched_core_idle_cpu(int cpu);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
static inline void sched_core_fork(struct task_struct *p) { }
static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
#endif
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#ifdef CONFIG_MEM_ALLOC_PROFILING
static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
{
swap(current->alloc_tag, tag);
return tag;
}
static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
{
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
#endif
current->alloc_tag = old;
}
#else
#define alloc_tag_save(_tag) NULL
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
#ifndef MODULE
#ifndef COMPILE_OFFSETS
extern void ___migrate_enable(void);
struct rq;
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
/*
* The "struct rq" is not available here, so we can't access the
* "runqueues" with this_cpu_ptr(), as the compilation will fail in
* this_cpu_ptr() -> raw_cpu_ptr() -> __verify_pcpu_ptr():
* typeof((ptr) + 0)
*
* So use arch_raw_cpu_ptr()/PERCPU_PTR() directly here.
*/
#ifdef CONFIG_SMP
#define this_rq_raw() arch_raw_cpu_ptr(&runqueues)
#else
#define this_rq_raw() PERCPU_PTR(&runqueues)
#endif
#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))
static inline void __migrate_enable(void)
{
struct task_struct *p = current;
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Check both overflow from migrate_disable() and superfluous
* migrate_enable().
*/
if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
return;
#endif
if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
}
/*
* Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
*/
guard(preempt)();
if (unlikely(p->cpus_ptr != &p->cpus_mask))
___migrate_enable();
/*
* Mustn't clear migration_disabled() until cpus_ptr points back at the
* regular cpus_mask, otherwise things that race (eg.
* select_fallback_rq) get confused.
*/
barrier();
p->migration_disabled = 0;
this_rq_pinned()--;
}
static inline void __migrate_disable(void)
{
struct task_struct *p = current;
if (p->migration_disabled) {
#ifdef CONFIG_DEBUG_PREEMPT
/*
*Warn about overflow half-way through the range.
*/
WARN_ON_ONCE((s16)p->migration_disabled < 0);
#endif
p->migration_disabled++;
return;
}
guard(preempt)();
this_rq_pinned()++;
p->migration_disabled = 1;
}
#else /* !COMPILE_OFFSETS */
static inline void __migrate_disable(void) { }
static inline void __migrate_enable(void) { }
#endif /* !COMPILE_OFFSETS */
/*
* So that it is possible to not export the runqueues variable, define and
* export migrate_enable/migrate_disable in kernel/sched/core.c too, and use
* them for the modules. The macro "INSTANTIATE_EXPORTED_MIGRATE_DISABLE" will
* be defined in kernel/sched/core.c.
*/
#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
static __always_inline void migrate_disable(void)
{
__migrate_disable();
}
static __always_inline void migrate_enable(void)
{
__migrate_enable();
}
#else /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
extern void migrate_disable(void);
extern void migrate_enable(void);
#endif /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
#else /* MODULE */
extern void migrate_disable(void);
extern void migrate_enable(void);
#endif /* MODULE */
DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H
#ifndef __ASSEMBLY__
#ifndef __GENERATING_BOUNDS_H
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/list_nulls.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <linux/pageblock-flags.h>
#include <linux/page-flags-layout.h>
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/local_lock.h>
#include <linux/zswap.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
#define MAX_PAGE_ORDER 10
#else
#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
/* Defines the order for the number of pages that have a migrate type. */
#ifndef CONFIG_PAGE_BLOCK_MAX_ORDER
#define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER
#else
#define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER
#endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */
/*
* The MAX_PAGE_ORDER, which defines the max order of pages to be allocated
* by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER,
* which defines the order for the number of pages that can have a migrate type
*/
#if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER)
#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER
#endif
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
* costly to service. That is between allocation orders which should
* coalesce naturally under reasonable reclaim pressure and those which
* will not.
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
* ZONE_MOVABLE works. Only movable pages can be allocated
* from MIGRATE_CMA pageblocks and page allocator never
* implicitly change migration type of MIGRATE_CMA pageblock.
*
* The way to use it is to change migratetype of a range of
* pageblocks to MIGRATE_CMA which can be done by
* __free_pageblock_cma() function.
*/
MIGRATE_CMA,
__MIGRATE_TYPE_END = MIGRATE_CMA,
#else
__MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
MIGRATE_TYPES
};
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
/*
* __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio,
* so folio_pfn() cannot be used and pfn is needed.
*/
# define is_migrate_cma_folio(folio, pfn) \
(get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
# define is_migrate_cma_folio(folio, pfn) false
#endif
static inline bool is_migrate_movable(int mt)
{
return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}
/*
* Check whether a migratetype can be merged with another migratetype.
*
* It is only mergeable when it can fall back to other migratetypes for
* allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
*/
static inline bool migratetype_is_mergeable(int mt)
{
return mt < MIGRATE_PCPTYPES;
}
#define for_each_migratetype_order(order, type) \
for (order = 0; order < NR_PAGE_ORDERS; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
#define get_pageblock_migratetype(page) \
get_pfnblock_migratetype(page, page_to_pfn(page))
#define folio_migratetype(folio) \
get_pageblock_migratetype(&folio->page)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
struct pglist_data;
#ifdef CONFIG_NUMA
enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */
NUMA_MISS, /* allocated in non intended node */
NUMA_FOREIGN, /* was intended here, hit elsewhere */
NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
NR_VM_NUMA_EVENT_ITEMS
};
#else
#define NR_VM_NUMA_EVENT_ITEMS 0
#endif
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
NR_FREE_PAGES_BLOCKS,
NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
NR_ZONE_ACTIVE_ANON,
NR_ZONE_INACTIVE_FILE,
NR_ZONE_ACTIVE_FILE,
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
/* Second 128 byte cacheline */
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
NR_FREE_CMA_PAGES,
#ifdef CONFIG_UNACCEPTED_MEMORY
NR_UNACCEPTED,
#endif
NR_VM_ZONE_STAT_ITEMS };
enum node_stat_item {
NR_LRU_BASE,
NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
NR_ACTIVE_ANON, /* " " " " " */
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
NR_SLAB_RECLAIMABLE_B,
NR_SLAB_UNRECLAIMABLE_B,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
WORKINGSET_REFAULT_BASE,
WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
WORKINGSET_REFAULT_FILE,
WORKINGSET_ACTIVATE_BASE,
WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
WORKINGSET_ACTIVATE_FILE,
WORKINGSET_RESTORE_BASE,
WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
WORKINGSET_RESTORE_FILE,
WORKINGSET_NODERECLAIM,
NR_ANON_MAPPED, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
NR_FILE_THPS,
NR_FILE_PMDMAPPED,
NR_ANON_THPS,
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
NR_KERNEL_STACK_KB, /* measured in KiB */
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
NR_PAGETABLE, /* used for pagetables */
NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */
#ifdef CONFIG_IOMMU_SUPPORT
NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */
#endif
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
#ifdef CONFIG_NUMA_BALANCING
PGPROMOTE_SUCCESS, /* promote successfully */
/**
* Candidate pages for promotion based on hint fault latency. This
* counter is used to control the promotion rate and adjust the hot
* threshold.
*/
PGPROMOTE_CANDIDATE,
/**
* Not rate-limited (NRL) candidate pages for those can be promoted
* without considering hot threshold because of enough free pages in
* fast-tier node. These promotions bypass the regular hotness checks
* and do NOT influence the promotion rate-limiter or
* threshold-adjustment logic.
* This is for statistics/monitoring purposes.
*/
PGPROMOTE_CANDIDATE_NRL,
#endif
/* PGDEMOTE_*: pages demoted */
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
PGDEMOTE_PROACTIVE,
#ifdef CONFIG_HUGETLB_PAGE
NR_HUGETLB,
#endif
NR_BALLOON_PAGES,
NR_KERNEL_FILE_PAGES,
NR_VM_NODE_STAT_ITEMS
};
/*
* Returns true if the item should be printed in THPs (/proc/vmstat
* currently prints number of anon, file and shmem THPs. But the item
* is charged in pages).
*/
static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return false;
return item == NR_ANON_THPS ||
item == NR_FILE_THPS ||
item == NR_SHMEM_THPS ||
item == NR_SHMEM_PMDMAPPED ||
item == NR_FILE_PMDMAPPED;
}
/*
* Returns true if the value is measured in bytes (most vmstat values are
* measured in pages). This defines the API part, the internal representation
* might be different.
*/
static __always_inline bool vmstat_item_in_bytes(int idx)
{
/*
* Global and per-node slab counters track slab pages.
* It's expected that changes are multiples of PAGE_SIZE.
* Internally values are stored in pages.
*
* Per-memcg and per-lruvec counters track memory, consumed
* by individual slab objects. These counters are actually
* byte-precise.
*/
return (idx == NR_SLAB_RECLAIMABLE_B ||
idx == NR_SLAB_UNRECLAIMABLE_B);
}
/*
* We do arithmetic on the LRU lists in various places in the code,
* so it is important to keep the active lists LRU_ACTIVE higher in
* the array than the corresponding inactive lists, and to keep
* the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
*
* This has to be kept in sync with the statistics in zone_stat_item
* above and the descriptions in vmstat_text in mm/vmstat.c
*/
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2
enum lru_list {
LRU_INACTIVE_ANON = LRU_BASE,
LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
LRU_UNEVICTABLE,
NR_LRU_LISTS
};
enum vmscan_throttle_state {
VMSCAN_THROTTLE_WRITEBACK,
VMSCAN_THROTTLE_ISOLATED,
VMSCAN_THROTTLE_NOPROGRESS,
VMSCAN_THROTTLE_CONGESTED,
NR_VMSCAN_THROTTLE,
};
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
static inline bool is_file_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
static inline bool is_active_lru(enum lru_list lru)
{
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
#define WORKINGSET_ANON 0
#define WORKINGSET_FILE 1
#define ANON_AND_FILE 2
enum lruvec_flags {
/*
* An lruvec has many dirty pages backed by a congested BDI:
* 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
* It can be cleared by cgroup reclaim or kswapd.
* 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
* It can only be cleared by kswapd.
*
* Essentially, kswapd can unthrottle an lruvec throttled by cgroup
* reclaim, but not vice versa. This only applies to the root cgroup.
* The goal is to prevent cgroup reclaim on the root cgroup (e.g.
* memory.reclaim) to unthrottle an unbalanced node (that was throttled
* by kswapd).
*/
LRUVEC_CGROUP_CONGESTED,
LRUVEC_NODE_CONGESTED,
};
#endif /* !__GENERATING_BOUNDS_H */
/*
* Evictable folios are divided into multiple generations. The youngest and the
* oldest generation numbers, max_seq and min_seq, are monotonically increasing.
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
* corresponding generation. The gen counter in folio->flags stores gen+1 while
* a folio is on one of lrugen->folios[]. Otherwise it stores 0.
*
* After a folio is faulted in, the aging needs to check the accessed bit at
* least twice before handing this folio over to the eviction. The first check
* clears the accessed bit from the initial fault; the second check makes sure
* this folio hasn't been used since then. This process, AKA second chance,
* requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI
* compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two
* generations are considered active; the rest of generations, if they exist,
* are considered inactive. See lru_gen_is_active().
*
* PG_active is always cleared while a folio is on one of lrugen->folios[] so
* that the sliding window needs not to worry about it. And it's set again when
* a folio considered active is isolated for non-reclaiming purposes, e.g.,
* migration. See lru_gen_add_folio() and lru_gen_del_folio().
*
* MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
* number of categories of the active/inactive LRU when keeping track of
* accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
* in folio->flags, masked by LRU_GEN_MASK.
*/
#define MIN_NR_GENS 2U
#define MAX_NR_GENS 4U
/*
* Each generation is divided into multiple tiers. A folio accessed N times
* through file descriptors is in tier order_base_2(N). A folio in the first
* tier (N=0,1) is marked by PG_referenced unless it was faulted in through page
* tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by
* PG_workingset. A folio in any other tier (1<N<5) between the first and last
* is marked by additional bits of LRU_REFS_WIDTH in folio->flags.
*
* In contrast to moving across generations which requires the LRU lock, moving
* across tiers only involves atomic operations on folio->flags and therefore
* has a negligible cost in the buffered access path. In the eviction path,
* comparisons of refaulted/(evicted+protected) from the first tier and the rest
* infer whether folios accessed multiple times through file descriptors are
* statistically hot and thus worth protecting.
*
* MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
* number of categories of the active/inactive LRU when keeping track of
* accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
* folio->flags, masked by LRU_REFS_MASK.
*/
#define MAX_NR_TIERS 4U
#ifndef __GENERATING_BOUNDS_H
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
/*
* For folios accessed multiple times through file descriptors,
* lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags
* after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its
* bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily
* promoted into the second oldest generation in the eviction path. And when
* folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that
* lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is
* only valid when PG_referenced is set.
*
* For folios accessed multiple times through page tables, folio_update_gen()
* from a page table walk or lru_gen_set_refs() from a rmap walk sets
* PG_referenced after the accessed bit is cleared for the first time.
* Thereafter, those two paths set PG_workingset and promote folios to the
* youngest generation. Like folio_inc_gen(), folio_update_gen() also clears
* PG_referenced. Note that for this case, LRU_REFS_MASK is not used.
*
* For both cases above, after PG_workingset is set on a folio, it remains until
* this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It
* can be set again if lru_gen_test_recent() returns true upon a refault.
*/
#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced))
struct lruvec;
struct page_vma_mapped_walk;
#ifdef CONFIG_LRU_GEN
enum {
LRU_GEN_ANON,
LRU_GEN_FILE,
};
enum {
LRU_GEN_CORE,
LRU_GEN_MM_WALK,
LRU_GEN_NONLEAF_YOUNG,
NR_LRU_GEN_CAPS
};
#define MIN_LRU_BATCH BITS_PER_LONG
#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
/* whether to keep historical stats from evicted generations */
#ifdef CONFIG_LRU_GEN_STATS
#define NR_HIST_GENS MAX_NR_GENS
#else
#define NR_HIST_GENS 1U
#endif
/*
* The youngest generation number is stored in max_seq for both anon and file
* types as they are aged on an equal footing. The oldest generation numbers are
* stored in min_seq[] separately for anon and file types so that they can be
* incremented independently. Ideally min_seq[] are kept in sync when both anon
* and file types are evictable. However, to adapt to situations like extreme
* swappiness, they are allowed to be out of sync by at most
* MAX_NR_GENS-MIN_NR_GENS-1.
*
* The number of pages in each generation is eventually consistent and therefore
* can be transiently negative when reset_batch_size() is pending.
*/
struct lru_gen_folio {
/* the aging increments the youngest generation number */
unsigned long max_seq;
/* the eviction increments the oldest generation numbers */
unsigned long min_seq[ANON_AND_FILE];
/* the birth time of each generation in jiffies */
unsigned long timestamps[MAX_NR_GENS];
/* the multi-gen LRU lists, lazily sorted on eviction */
struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the multi-gen LRU sizes, eventually consistent */
long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the exponential moving average of refaulted */
unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
/* the exponential moving average of evicted+protected */
unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
/* can only be modified under the LRU lock */
unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* can be modified without holding the LRU lock */
atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
/* the memcg generation this lru_gen_folio belongs to */
u8 gen;
/* the list segment this lru_gen_folio belongs to */
u8 seg;
/* per-node lru_gen_folio list for global reclaim */
struct hlist_nulls_node list;
};
enum {
MM_LEAF_TOTAL, /* total leaf entries */
MM_LEAF_YOUNG, /* young leaf entries */
MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
NR_MM_STATS
};
/* double-buffering Bloom filters */
#define NR_BLOOM_FILTERS 2
struct lru_gen_mm_state {
/* synced with max_seq after each iteration */
unsigned long seq;
/* where the current iteration continues after */
struct list_head *head;
/* where the last iteration ended before */
struct list_head *tail;
/* Bloom filters flip after each iteration */
unsigned long *filters[NR_BLOOM_FILTERS];
/* the mm stats for debugging */
unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
};
struct lru_gen_mm_walk {
/* the lruvec under reclaim */
struct lruvec *lruvec;
/* max_seq from lru_gen_folio: can be out of date */
unsigned long seq;
/* the next address within an mm to scan */
unsigned long next_addr;
/* to batch promoted pages */
int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* to batch the mm stats */
int mm_stats[NR_MM_STATS];
/* total batched items */
int batched;
int swappiness;
bool force_scan;
};
/*
* For each node, memcgs are divided into two generations: the old and the
* young. For each generation, memcgs are randomly sharded into multiple bins
* to improve scalability. For each bin, the hlist_nulls is virtually divided
* into three segments: the head, the tail and the default.
*
* An onlining memcg is added to the tail of a random bin in the old generation.
* The eviction starts at the head of a random bin in the old generation. The
* per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
* the old generation, is incremented when all its bins become empty.
*
* There are four operations:
* 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
* current generation (old or young) and updates its "seg" to "head";
* 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
* current generation (old or young) and updates its "seg" to "tail";
* 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
* generation, updates its "gen" to "old" and resets its "seg" to "default";
* 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
* young generation, updates its "gen" to "young" and resets its "seg" to
* "default".
*
* The events that trigger the above operations are:
* 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
* 2. The first attempt to reclaim a memcg below low, which triggers
* MEMCG_LRU_TAIL;
* 3. The first attempt to reclaim a memcg offlined or below reclaimable size
* threshold, which triggers MEMCG_LRU_TAIL;
* 4. The second attempt to reclaim a memcg offlined or below reclaimable size
* threshold, which triggers MEMCG_LRU_YOUNG;
* 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
* 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
* 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
*
* Notes:
* 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
* of their max_seq counters ensures the eventual fairness to all eligible
* memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
* 2. There are only two valid generations: old (seq) and young (seq+1).
* MEMCG_NR_GENS is set to three so that when reading the generation counter
* locklessly, a stale value (seq-1) does not wraparound to young.
*/
#define MEMCG_NR_GENS 3
#define MEMCG_NR_BINS 8
struct lru_gen_memcg {
/* the per-node memcg generation counter */
unsigned long seq;
/* each memcg has one lru_gen_folio per node */
unsigned long nr_memcgs[MEMCG_NR_GENS];
/* per-node lru_gen_folio list for global reclaim */
struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
/* protects the above */
spinlock_t lock;
};
void lru_gen_init_pgdat(struct pglist_data *pgdat);
void lru_gen_init_lruvec(struct lruvec *lruvec);
bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
void lru_gen_online_memcg(struct mem_cgroup *memcg);
void lru_gen_offline_memcg(struct mem_cgroup *memcg);
void lru_gen_release_memcg(struct mem_cgroup *memcg);
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
#else /* !CONFIG_LRU_GEN */
static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
{
}
static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
return false;
}
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
}
static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
{
}
static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
{
}
static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
{
}
static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
}
#endif /* CONFIG_LRU_GEN */
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
/* per lruvec lru_lock for memcg */
spinlock_t lru_lock;
/*
* These track the cost of reclaiming one LRU - file or anon -
* over the other. As the observed cost of reclaiming one LRU
* increases, the reclaim scan balance tips toward the other.
*/
unsigned long anon_cost;
unsigned long file_cost;
/* Non-resident age, driven by LRU movement */
atomic_long_t nonresident_age;
/* Refaults at the time of last reclaim cycle */
unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
struct lru_gen_folio lrugen;
#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
struct zswap_lruvec_state zswap_lruvec_state;
};
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
/* Isolate unevictable pages */
#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
/* LRU Isolation modes. */
typedef unsigned __bitwise isolate_mode_t;
enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
WMARK_PROMO,
NR_WMARK
};
/*
* One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
* are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
* is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define NR_PCP_THP 2
#else
#define NR_PCP_THP 0
#endif
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
/*
* Flags used in pcp->flags field.
*
* PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
* previous page freeing. To avoid to drain PCP for an accident
* high-order page freeing.
*
* PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
* draining PCP for consecutive high-order pages freeing without
* allocation if data cache slice of CPU is large enough. To reduce
* zone lock contention and keep cache-hot pages reusing.
*/
#define PCPF_PREV_FREE_HIGH_ORDER BIT(0)
#define PCPF_FREE_HIGH_BATCH BIT(1)
struct per_cpu_pages {
spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int high_min; /* min high watermark */
int high_max; /* max high watermark */
int batch; /* chunk size for buddy add/remove */
u8 flags; /* protected by pcp->lock */
u8 alloc_factor; /* batch scaling factor during allocate */
#ifdef CONFIG_NUMA
u8 expire; /* When 0, remote pagesets are drained */
#endif
short free_count; /* consecutive free count */
/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[NR_PCP_LISTS];
} ____cacheline_aligned_in_smp;
struct per_cpu_zonestat {
#ifdef CONFIG_SMP
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
s8 stat_threshold;
#endif
#ifdef CONFIG_NUMA
/*
* Low priority inaccurate counters that are only folded
* on demand. Use a large type to avoid the overhead of
* folding during refresh_cpu_vm_stats.
*/
unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#endif
};
struct per_cpu_nodestat {
s8 stat_threshold;
s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};
#endif /* !__GENERATING_BOUNDS.H */
enum zone_type {
/*
* ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
* to DMA to all of the addressable memory (ZONE_NORMAL).
* On architectures where this area covers the whole 32 bit address
* space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
* DMA addressing constraints. This distinction is important as a 32bit
* DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
* platforms may need both zones as they support peripherals with
* different DMA addressing limitations.
*/
#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
#endif
#ifdef CONFIG_ZONE_DMA32
ZONE_DMA32,
#endif
/*
* Normal addressable memory is in ZONE_NORMAL. DMA operations can be
* performed on pages in ZONE_NORMAL if the DMA devices support
* transfers to all addressable memory.
*/
ZONE_NORMAL,
#ifdef CONFIG_HIGHMEM
/*
* A memory area that is only addressable by the kernel through
* mapping portions into its own address space. This is for example
* used by i386 to allow the kernel to address the memory beyond
* 900MB. The kernel will set up special mappings (page
* table entries on i386) for each page that the kernel needs to
* access.
*/
ZONE_HIGHMEM,
#endif
/*
* ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
* movable pages with few exceptional cases described below. Main use
* cases for ZONE_MOVABLE are to make memory offlining/unplug more
* likely to succeed, and to locally limit unmovable allocations - e.g.,
* to increase the number of THP/huge pages. Notable special cases are:
*
* 1. Pinned pages: (long-term) pinning of movable pages might
* essentially turn such pages unmovable. Therefore, we do not allow
* pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
* faulted, they come from the right zone right away. However, it is
* still possible that address space already has pages in
* ZONE_MOVABLE at the time when pages are pinned (i.e. user has
* touches that memory before pinning). In such case we migrate them
* to a different zone. When migration fails - pinning fails.
* 2. memblock allocations: kernelcore/movablecore setups might create
* situations where ZONE_MOVABLE contains unmovable allocations
* after boot. Memory offlining and allocations fail early.
* 3. Memory holes: kernelcore/movablecore setups might create very rare
* situations where ZONE_MOVABLE contains memory holes after boot,
* for example, if we have sections that are only partially
* populated. Memory offlining and allocations fail early.
* 4. PG_hwpoison pages: while poisoned pages can be skipped during
* memory offlining, such pages cannot be allocated.
* 5. Unmovable PG_offline pages: in paravirtualized environments,
* hotplugged memory blocks might only partially be managed by the
* buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
* parts not manged by the buddy are unmovable PG_offline pages. In
* some cases (virtio-mem), such pages can be skipped during
* memory offlining, however, cannot be moved/allocated. These
* techniques might use alloc_contig_range() to hide previously
* exposed pages from the buddy again (e.g., to implement some sort
* of memory unplug in virtio-mem).
* 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
* situations where ZERO_PAGE(0) which is allocated differently
* on different platforms may end up in a movable zone. ZERO_PAGE(0)
* cannot be migrated.
* 7. Memory-hotplug: when using memmap_on_memory and onlining the
* memory to the MOVABLE zone, the vmemmap pages are also placed in
* such zone. Such pages cannot be really moved around as they are
* self-stored in the range, but they are treated as movable when
* the range they describe is about to be offlined.
*
* In general, no unmovable allocations that degrade memory offlining
* should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
* have to expect that migrating pages in ZONE_MOVABLE can fail (even
* if has_unmovable_pages() states that there are no unmovable pages,
* there can be false negatives).
*/
ZONE_MOVABLE,
#ifdef CONFIG_ZONE_DEVICE
ZONE_DEVICE,
#endif
__MAX_NR_ZONES
};
#ifndef __GENERATING_BOUNDS_H
#define ASYNC_AND_SYNC 2
struct zone {
/* Read-mostly fields */
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long _watermark[NR_WMARK];
unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
unsigned long nr_free_highatomic;
/*
* We don't know if the memory that we're going to allocate will be
* freeable or/and it will be released eventually, so to avoid totally
* wasting several GB of ram we must reserve some of the lower zone
* memory (otherwise we risk to run OOM on the lower zones despite
* there being tons of freeable ram on the higher zones). This array is
* recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
* changes.
*/
long lowmem_reserve[MAX_NR_ZONES];
#ifdef CONFIG_NUMA
int node;
#endif
struct pglist_data *zone_pgdat;
struct per_cpu_pages __percpu *per_cpu_pageset;
struct per_cpu_zonestat __percpu *per_cpu_zonestats;
/*
* the high and batch values are copied to individual pagesets for
* faster access
*/
int pageset_high_min;
int pageset_high_max;
int pageset_batch;
#ifndef CONFIG_SPARSEMEM
/*
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.
* In SPARSEMEM, this map is stored in struct mem_section
*/
unsigned long *pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;
/*
* spanned_pages is the total pages spanned by the zone, including
* holes, which is calculated as:
* spanned_pages = zone_end_pfn - zone_start_pfn;
*
* present_pages is physical pages existing within the zone, which
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
* present_early_pages is present pages existing within the zone
* located on memory available since early boot, excluding hotplugged
* memory.
*
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
* cma pages is present pages that are assigned for CMA use
* (MIGRATE_CMA).
*
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
* by page allocator and vm scanner to calculate all kinds of watermarks
* and thresholds.
*
* Locking rules:
*
* zone_start_pfn and spanned_pages are protected by span_seqlock.
* It is a seqlock because it has to be read outside of zone->lock,
* and it is done in the main allocator path. But, it is written
* quite infrequently.
*
* The span_seq lock is declared along with zone->lock because it is
* frequently read in proximity to zone->lock. It's good to
* give them a chance of being in the same cacheline.
*
* Write access to present_pages at runtime should be protected by
* mem_hotplug_begin/done(). Any reader who can't tolerant drift of
* present_pages should use get_online_mems() to get a stable value.
*/
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
#if defined(CONFIG_MEMORY_HOTPLUG)
unsigned long present_early_pages;
#endif
#ifdef CONFIG_CMA
unsigned long cma_pages;
#endif
const char *name;
#ifdef CONFIG_MEMORY_ISOLATION
/*
* Number of isolated pageblock. It is used to solve incorrect
* freepage counting problem due to racy retrieving migratetype
* of pageblock. Protected by zone->lock.
*/
unsigned long nr_isolate_pageblock;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
int initialized;
/* Write-intensive fields used from the page allocator */
CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
struct free_area free_area[NR_PAGE_ORDERS];
#ifdef CONFIG_UNACCEPTED_MEMORY
/* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
struct list_head unaccepted_pages;
/* To be called once the last page in the zone is accepted */
struct work_struct unaccepted_cleanup;
#endif
/* zone flags, see below */
unsigned long flags;
/* Primarily protects free_area */
spinlock_t lock;
/* Pages to be freed when next trylock succeeds */
struct llist_head trylock_free_pages;
/* Write-intensive fields used by compaction and vmstats. */
CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
* when reading the number of free pages to avoid per-cpu counter
* drift allowing watermarks to be breached
*/
unsigned long percpu_drift_mark;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* pfn where compaction free scanner should start */
unsigned long compact_cached_free_pfn;
/* pfn where compaction migration scanner should start */
unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
unsigned long compact_init_migrate_pfn;
unsigned long compact_init_free_pfn;
#endif
#ifdef CONFIG_COMPACTION
/*
* On compaction failure, 1<<compact_defer_shift compactions
* are skipped before trying again. The number attempted since
* last failure is tracked with compact_considered.
* compact_order_failed is the minimum compaction failed order.
*/
unsigned int compact_considered;
unsigned int compact_defer_shift;
int compact_order_failed;
#endif
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;
#endif
bool contiguous;
CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
PGDAT_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
*/
PGDAT_WRITEBACK, /* reclaim scanning has recently found
* many pages under writeback
*/
PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
};
enum zone_flags {
ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
* Cleared when kswapd is woken.
*/
ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
static inline unsigned long wmark_pages(const struct zone *z,
enum zone_watermarks w)
{
return z->_watermark[w] + z->watermark_boost;
}
static inline unsigned long min_wmark_pages(const struct zone *z)
{
return wmark_pages(z, WMARK_MIN);
}
static inline unsigned long low_wmark_pages(const struct zone *z)
{
return wmark_pages(z, WMARK_LOW);
}
static inline unsigned long high_wmark_pages(const struct zone *z)
{
return wmark_pages(z, WMARK_HIGH);
}
static inline unsigned long promo_wmark_pages(const struct zone *z)
{
return wmark_pages(z, WMARK_PROMO);
}
static inline unsigned long zone_managed_pages(const struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
static inline unsigned long zone_cma_pages(struct zone *zone)
{
#ifdef CONFIG_CMA
return zone->cma_pages;
#else
return 0;
#endif
}
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
}
static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
static inline bool zone_is_initialized(const struct zone *zone)
{
return zone->initialized;
}
static inline bool zone_is_empty(const struct zone *zone)
{
return zone->spanned_pages == 0;
}
#ifndef BUILD_VDSO32_64
/*
* The zone field is never updated after free_area_init_core()
* sets it, so none of the operations on it need to be atomic.
*/
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
* sections we define the shift as 0; that plus a 0 mask ensures
* the compiler will optimise away reference to them.
*/
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
SECTIONS_PGOFF : ZONES_PGOFF)
#else
#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
NODES_PGOFF : ZONES_PGOFF)
#endif
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type memdesc_zonenum(memdesc_flags_t flags)
{
ASSERT_EXCLUSIVE_BITS(flags.f, ZONES_MASK << ZONES_PGSHIFT);
return (flags.f >> ZONES_PGSHIFT) & ZONES_MASK;
}
static inline enum zone_type page_zonenum(const struct page *page)
{
return memdesc_zonenum(page->flags);
}
static inline enum zone_type folio_zonenum(const struct folio *folio)
{
return memdesc_zonenum(folio->flags);
}
#ifdef CONFIG_ZONE_DEVICE
static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
{
return memdesc_zonenum(mdf) == ZONE_DEVICE;
}
static inline struct dev_pagemap *page_pgmap(const struct page *page)
{
VM_WARN_ON_ONCE_PAGE(!memdesc_is_zone_device(page->flags), page);
return page_folio(page)->pgmap;
}
/*
* Consecutive zone device pages should not be merged into the same sgl
* or bvec segment with other types of pages or if they belong to different
* pgmaps. Otherwise getting the pgmap of a given segment is not possible
* without scanning the entire segment. This helper returns true either if
* both pages are not zone device pages or both pages are zone device pages
* with the same pgmap.
*/
static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
const struct page *b)
{
if (memdesc_is_zone_device(a->flags) != memdesc_is_zone_device(b->flags))
return false;
if (!memdesc_is_zone_device(a->flags))
return true;
return page_pgmap(a) == page_pgmap(b);
}
extern void memmap_init_zone_device(struct zone *, unsigned long,
unsigned long, struct dev_pagemap *);
#else
static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
{
return false;
}
static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
const struct page *b)
{
return true;
}
static inline struct dev_pagemap *page_pgmap(const struct page *page)
{
return NULL;
}
#endif
static inline bool is_zone_device_page(const struct page *page)
{
return memdesc_is_zone_device(page->flags);
}
static inline bool folio_is_zone_device(const struct folio *folio)
{
return memdesc_is_zone_device(folio->flags);
}
static inline bool is_zone_movable_page(const struct page *page)
{
return page_zonenum(page) == ZONE_MOVABLE;
}
static inline bool folio_is_zone_movable(const struct folio *folio)
{
return folio_zonenum(folio) == ZONE_MOVABLE;
}
#endif
/*
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
*/
static inline bool zone_intersects(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
return false;
if (start_pfn >= zone_end_pfn(zone) ||
start_pfn + nr_pages <= zone->zone_start_pfn)
return false;
return true;
}
/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
* queues ("queue_length >> 12") during an aging round.
*/
#define DEF_PRIORITY 12
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
enum {
ZONELIST_FALLBACK, /* zonelist with fallback */
#ifdef CONFIG_NUMA
/*
* The NUMA zonelists are doubled because we need zonelists that
* restrict the allocations to a single node for __GFP_THISNODE.
*/
ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
#endif
MAX_ZONELISTS
};
/*
* This struct contains information about a zone in a zonelist. It is stored
* here to avoid dereferences into large structures and lookups of tables
*/
struct zoneref {
struct zone *zone; /* Pointer to actual zone */
int zone_idx; /* zone_idx(zoneref->zone) */
};
/*
* One allocation request operates on a zonelist. A zonelist
* is a list of zones, the first one is the 'goal' of the
* allocation, the other zones are fallback zones, in decreasing
* priority.
*
* To speed the reading of the zonelist, the zonerefs contain the zone index
* of the entry being read. Helper functions to access information given
* a struct zoneref are
*
* zonelist_zone() - Return the struct zone * for an entry in _zonerefs
* zonelist_zone_idx() - Return the index of the zone for an entry
* zonelist_node_idx() - Return the index of the node for an entry
*/
struct zonelist {
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
};
/*
* The array of struct pages for flatmem.
* It must be declared for SPARSEMEM as well because there are configurations
* that rely on that.
*/
extern struct page *mem_map;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split {
spinlock_t split_queue_lock;
struct list_head split_queue;
unsigned long split_queue_len;
};
#endif
#ifdef CONFIG_MEMORY_FAILURE
/*
* Per NUMA node memory failure handling statistics.
*/
struct memory_failure_stats {
/*
* Number of raw pages poisoned.
* Cases not accounted: memory outside kernel control, offline page,
* arch-specific memory_failure (SGX), hwpoison_filter() filtered
* error events, and unpoison actions from hwpoison_unpoison.
*/
unsigned long total;
/*
* Recovery results of poisoned raw pages handled by memory_failure,
* in sync with mf_result.
* total = ignored + failed + delayed + recovered.
* total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted.
*/
unsigned long ignored;
unsigned long failed;
unsigned long delayed;
unsigned long recovered;
};
#endif
/*
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. On UMA machines there is a single pglist_data which
* describes the whole memory.
*
* Memory statistics and page replacement data structures are maintained on a
* per-zone basis.
*/
typedef struct pglist_data {
/*
* node_zones contains just the zones for THIS node. Not all of the
* zones may be populated, but it is the full list. It is referenced by
* this node's node_zonelists as well as other node's node_zonelists.
*/
struct zone node_zones[MAX_NR_ZONES];
/*
* node_zonelists contains references to all zones in all nodes.
* Generally the first zones will be references to this node's
* node_zones.
*/
struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones; /* number of populated zones in this node */
#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
struct page_ext *node_page_ext;
#endif
#endif
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn,
* node_present_pages, node_spanned_pages or nr_zones to stay constant.
* Also synchronizes pgdat->first_deferred_pfn during deferred page
* init.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
* or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
*
* Nests above zone->lock and zone->span_seqlock
*/
spinlock_t node_size_lock;
#endif
unsigned long node_start_pfn;
unsigned long node_present_pages; /* total number of physical pages */
unsigned long node_spanned_pages; /* total size of physical page
range, including holes */
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
/* workqueues for throttling reclaim for different reasons. */
wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
unsigned long nr_reclaim_start; /* nr pages written while throttled
* when throttling started. */
#ifdef CONFIG_MEMORY_HOTPLUG
struct mutex kswapd_lock;
#endif
struct task_struct *kswapd; /* Protected by kswapd_lock */
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
atomic_t kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
bool proactive_compact_trigger;
#endif
/*
* This is a per-node reserve of pages that are not available
* to userspace allocations.
*/
unsigned long totalreserve_pages;
#ifdef CONFIG_NUMA
/*
* node reclaim becomes active if more unmapped pages exist.
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
* If memory initialisation on large machines is deferred then this
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split deferred_split_queue;
#endif
#ifdef CONFIG_NUMA_BALANCING
/* start time in ms of current promote rate limit period */
unsigned int nbp_rl_start;
/* number of promote candidate pages at start time of current rate limit period */
unsigned long nbp_rl_nr_cand;
/* promote threshold in ms */
unsigned int nbp_threshold;
/* start time in ms of current promote threshold adjustment period */
unsigned int nbp_th_start;
/*
* number of promote candidate pages at start time of current promote
* threshold adjustment period
*/
unsigned long nbp_th_nr_cand;
#endif
/* Fields commonly accessed by the page reclaim scanner */
/*
* NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
*
* Use mem_cgroup_lruvec() to look up lruvecs.
*/
struct lruvec __lruvec;
unsigned long flags;
#ifdef CONFIG_LRU_GEN
/* kswap mm walk data */
struct lru_gen_mm_walk mm_walk;
/* lru_gen_folio list */
struct lru_gen_memcg memcg_lru;
#endif
CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
#ifdef CONFIG_NUMA
struct memory_tier __rcu *memtier;
#endif
#ifdef CONFIG_MEMORY_FAILURE
struct memory_failure_stats mf_stats;
#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
*/
enum meminit_context {
MEMINIT_EARLY,
MEMINIT_HOTPLUG,
};
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);
extern void lruvec_init(struct lruvec *lruvec);
static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
{
#ifdef CONFIG_MEMCG
return lruvec->pgdat;
#else
return container_of(lruvec, struct pglist_data, __lruvec);
#endif
}
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
static inline int local_memory_node(int node_id) { return node_id; };
#endif
/*
* zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
static inline bool zone_is_zone_device(const struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
static inline bool zone_is_zone_device(const struct zone *zone)
{
return false;
}
#endif
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
static inline bool managed_zone(const struct zone *zone)
{
return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
static inline bool populated_zone(const struct zone *zone)
{
return zone->present_pages;
}
#ifdef CONFIG_NUMA
static inline int zone_to_nid(const struct zone *zone)
{
return zone->node;
}
static inline void zone_set_nid(struct zone *zone, int nid)
{
zone->node = nid;
}
#else
static inline int zone_to_nid(const struct zone *zone)
{
return 0;
}
static inline void zone_set_nid(struct zone *zone, int nid) {}
#endif
extern int movable_zone;
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
return (idx == ZONE_HIGHMEM ||
(idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
#else
return 0;
#endif
}
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone: pointer to struct zone variable
* Return: 1 for a highmem zone, 0 otherwise
*/
static inline int is_highmem(const struct zone *zone)
{
return is_highmem_idx(zone_idx(zone));
}
#ifdef CONFIG_ZONE_DMA
bool has_managed_dma(void);
#else
static inline bool has_managed_dma(void)
{
return false;
}
#endif
#ifndef CONFIG_NUMA
extern struct pglist_data contig_page_data;
static inline struct pglist_data *NODE_DATA(int nid)
{
return &contig_page_data;
}
#else /* CONFIG_NUMA */
#include <asm/mmzone.h>
#endif /* !CONFIG_NUMA */
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
/**
* for_each_online_pgdat - helper macro to iterate over all online nodes
* @pgdat: pointer to a pg_data_t variable
*/
#define for_each_online_pgdat(pgdat) \
for (pgdat = first_online_pgdat(); \
pgdat; \
pgdat = next_online_pgdat(pgdat))
/**
* for_each_zone - helper macro to iterate over all memory zones
* @zone: pointer to struct zone variable
*
* The user only needs to declare the zone variable, for_each_zone
* fills it in.
*/
#define for_each_zone(zone) \
for (zone = (first_online_pgdat())->node_zones; \
zone; \
zone = next_zone(zone))
#define for_each_populated_zone(zone) \
for (zone = (first_online_pgdat())->node_zones; \
zone; \
zone = next_zone(zone)) \
if (!populated_zone(zone)) \
; /* do nothing */ \
else
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
return zoneref->zone;
}
static inline int zonelist_zone_idx(const struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
static inline int zonelist_node_idx(const struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}
struct zoneref *__next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes);
/**
* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
* @z: The cursor used as a starting point for the search
* @highest_zoneidx: The zone index of the highest zone to return
* @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
* search. The zoneref returned is a cursor that represents the current zone
* being examined. It should be advanced by one before calling
* next_zones_zonelist again.
*
* Return: the next zone at or below highest_zoneidx within the allowed
* nodemask using a cursor within a zonelist as a starting point
*/
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
return z;
return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
* @zonelist: The zonelist to search for a suitable zone
* @highest_zoneidx: The zone index of the highest zone to return
* @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
* used to iterate the zonelist with next_zones_zonelist by advancing it by
* one before calling.
*
* When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
* never NULL). This may happen either genuinely, or due to concurrent nodemask
* update due to cpuset modification.
*
* Return: Zoneref pointer for the first suitable zone found
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
return next_zones_zonelist(zonelist->_zonerefs,
highest_zoneidx, nodes);
}
/**
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
* @zone: The current zone in the iterator
* @z: The current pointer within zonelist->_zonerefs being iterated
* @zlist: The zonelist being iterated
* @highidx: The zone index of the highest zone to return
* @nodemask: Nodemask allowed by the allocator
*
* This iterator iterates though all zones at or below a given zone index and
* within a given nodemask
*/
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
for (zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
* @zone: The current zone in the iterator
* @z: The current pointer within zonelist->zones being iterated
* @zlist: The zonelist being iterated
* @highidx: The zone index of the highest zone to return
*
* This iterator iterates though all zones at or below a given zone index.
*/
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
/* Whether the 'nodes' are all movable nodes */
static inline bool movable_only_nodes(nodemask_t *nodes)
{
struct zonelist *zonelist;
struct zoneref *z;
int nid;
if (nodes_empty(*nodes))
return false;
/*
* We can chose arbitrary node from the nodemask to get a
* zonelist as they are interlinked. We just need to find
* at least one zone that can satisfy kernel allocations.
*/
nid = first_node(*nodes);
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
return (!zonelist_zone(z)) ? true : false;
}
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn) (0)
#endif
#ifdef CONFIG_SPARSEMEM
/*
* PA_SECTION_SHIFT physical address to/from section number
* PFN_SECTION_SHIFT pfn to/from section number
*/
#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
#endif
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
{
return pfn >> PFN_SECTION_SHIFT;
}
static inline unsigned long section_nr_to_pfn(unsigned long sec)
{
return sec << PFN_SECTION_SHIFT;
}
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
#define SUBSECTION_SHIFT 21
#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
#error Subsection size exceeds section size
#else
#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
#endif
#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
struct rcu_head rcu;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
#endif
/* See declaration of similar field in struct zone */
unsigned long pageblock_flags[0];
};
void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
struct page;
struct page_ext;
struct mem_section {
/*
* This is, logically, a pointer to an array of struct
* pages. However, it is stored with some other magic.
* (see sparse.c::sparse_init_one_section())
*
* Additionally during early boot we encode node id of
* the location of the section here to guide allocation.
* (see sparse.c::memory_present())
*
* Making it a UL at least makes someone do a cast
* before using it wrong.
*/
unsigned long section_mem_map;
struct mem_section_usage *usage;
#ifdef CONFIG_PAGE_EXTENSION
/*
* If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
* section. (see page_ext.h about this.)
*/
struct page_ext *page_ext;
unsigned long pad;
#endif
/*
* WARNING: mem_section must be a power-of-2 in size for the
* calculation and use of SECTION_ROOT_MASK to make sense.
*/
};
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT 1
#endif
#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
#ifdef CONFIG_SPARSEMEM_EXTREME
extern struct mem_section **mem_section;
#else
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
static inline unsigned long *section_to_usemap(struct mem_section *ms)
{
return ms->usage->pageblock_flags;
}
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
unsigned long root = SECTION_NR_TO_ROOT(nr);
if (unlikely(root >= NR_SECTION_ROOTS)) return NULL;
#ifdef CONFIG_SPARSEMEM_EXTREME
if (!mem_section || !mem_section[root])
return NULL;
#endif
return &mem_section[root][nr & SECTION_ROOT_MASK];
}
extern size_t mem_section_usage_size(void);
/*
* We use the lower bits of the mem_map pointer to store
* a little bit of information. The pointer is calculated
* as mem_map - section_nr_to_pfn(pnum). The result is
* aligned to the minimum alignment of the two values:
* 1. All mem_map arrays are page-aligned.
* 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
* lowest bits. PFN_SECTION_SHIFT is arch-specific
* (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
* worst combination is powerpc with 256k pages,
* which results in PFN_SECTION_SHIFT equal 6.
* To sum it up, at least 6 bits are available on all architectures.
* However, we can exceed 6 bits on some other architectures except
* powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
* with the worst case of 64K pages on arm64) if we make sure the
* exceeded bit is not applicable to powerpc.
*/
enum {
SECTION_MARKED_PRESENT_BIT,
SECTION_HAS_MEM_MAP_BIT,
SECTION_IS_ONLINE_BIT,
SECTION_IS_EARLY_BIT,
#ifdef CONFIG_ZONE_DEVICE
SECTION_TAINT_ZONE_DEVICE_BIT,
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
SECTION_IS_VMEMMAP_PREINIT_BIT,
#endif
SECTION_MAP_LAST_BIT,
};
#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
#ifdef CONFIG_ZONE_DEVICE
#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT)
#endif
#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
unsigned long map = section->section_mem_map;
map &= SECTION_MAP_MASK;
return (struct page *)map;
}
static inline int present_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
static inline int present_section_nr(unsigned long nr)
{
return present_section(__nr_to_section(nr));
}
static inline int valid_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
static inline int early_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
static inline int valid_section_nr(unsigned long nr)
{
return valid_section(__nr_to_section(nr));
}
static inline int online_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
#ifdef CONFIG_ZONE_DEVICE
static inline int online_device_section(const struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
#else
static inline int online_device_section(const struct mem_section *section)
{
return 0;
}
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return (section &&
(section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
}
void sparse_vmemmap_init_nid_early(int nid);
void sparse_vmemmap_init_nid_late(int nid);
#else
static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return 0;
}
static inline void sparse_vmemmap_init_nid_early(int nid)
{
}
static inline void sparse_vmemmap_init_nid_late(int nid)
{
}
#endif
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
}
#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#endif
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
}
extern unsigned long __highest_present_section_nr;
static inline int subsection_map_index(unsigned long pfn)
{
return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
struct mem_section_usage *usage = READ_ONCE(ms->usage);
return usage ? test_bit(idx, usage->subsection_map) : 0;
}
static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
{
struct mem_section_usage *usage = READ_ONCE(ms->usage);
int idx = subsection_map_index(*pfn);
unsigned long bit;
if (!usage)
return false;
if (test_bit(idx, usage->subsection_map))
return true;
/* Find the next subsection that exists */
bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx);
if (bit == SUBSECTIONS_PER_SECTION)
return false;
*pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION);
return true;
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
return 1;
}
static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
{
return true;
}
#endif
void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
unsigned long flags);
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
/**
* pfn_valid - check if there is a valid memory map entry for a PFN
* @pfn: the page frame number to check
*
* Check if there is a valid memory map entry aka struct page for the @pfn.
* Note, that availability of the memory map entry does not imply that
* there is actual usable memory at that @pfn. The struct page may
* represent a hole or an unusable page frame.
*
* Return: 1 for PFNs that have memory map entries and 0 otherwise
*/
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
int ret;
/*
* Ensure the upper PAGE_SHIFT bits are clear in the
* pfn. Else it might lead to false positives when
* some of the upper bits are set, but the lower bits
* match a valid pfn.
*/
if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
return 0;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn); rcu_read_lock_sched(); if (!valid_section(ms)) { rcu_read_unlock_sched(); return 0;
}
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
ret = early_section(ms) || pfn_section_valid(ms, pfn); rcu_read_unlock_sched(); return ret;
}
/* Returns end_pfn or higher if no valid PFN remaining in range */
static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn)
{
unsigned long nr = pfn_to_section_nr(pfn);
rcu_read_lock_sched();
while (nr <= __highest_present_section_nr && pfn < end_pfn) {
struct mem_section *ms = __pfn_to_section(pfn);
if (valid_section(ms) &&
(early_section(ms) || pfn_section_first_valid(ms, &pfn))) {
rcu_read_unlock_sched();
return pfn;
}
/* Nothing left in this section? Skip to next section */
nr++;
pfn = section_nr_to_pfn(nr);
}
rcu_read_unlock_sched();
return end_pfn;
}
static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn)
{
pfn++;
if (pfn >= end_pfn)
return end_pfn;
/*
* Either every PFN within the section (or subsection for VMEMMAP) is
* valid, or none of them are. So there's no point repeating the check
* for every PFN; only call first_valid_pfn() again when crossing a
* (sub)section boundary (i.e. !(pfn & ~PAGE_{SUB,}SECTION_MASK)).
*/
if (pfn & ~(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ?
PAGE_SUBSECTION_MASK : PAGE_SECTION_MASK))
return pfn;
return first_valid_pfn(pfn, end_pfn);
}
#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \
for ((_pfn) = first_valid_pfn((_start_pfn), (_end_pfn)); \
(_pfn) < (_end_pfn); \
(_pfn) = next_valid_pfn((_pfn), (_end_pfn)))
#endif
static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
return present_section(__pfn_to_section(pfn));
}
static inline unsigned long next_present_section_nr(unsigned long section_nr)
{
while (++section_nr <= __highest_present_section_nr) {
if (present_section_nr(section_nr))
return section_nr;
}
return -1;
}
#define for_each_present_section_nr(start, section_nr) \
for (section_nr = next_present_section_nr(start - 1); \
section_nr != -1; \
section_nr = next_present_section_nr(section_nr))
/*
* These are _only_ used during initialisation, therefore they
* can use __initdata ... They could have names to indicate
* this restriction.
*/
#ifdef CONFIG_NUMA
#define pfn_to_nid(pfn) \
({ \
unsigned long __pfn_to_nid_pfn = (pfn); \
page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
})
#else
#define pfn_to_nid(pfn) (0)
#endif
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
/*
* Fallback case for when the architecture provides its own pfn_valid() but
* not a corresponding for_each_valid_pfn().
*/
#ifndef for_each_valid_pfn
#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \
for ((_pfn) = (_start_pfn); (_pfn) < (_end_pfn); (_pfn)++) \
if (pfn_valid(_pfn))
#endif
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM printk
#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PRINTK_H
#include <linux/tracepoint.h>
TRACE_EVENT(console,
TP_PROTO(const char *text, size_t len),
TP_ARGS(text, len),
TP_STRUCT__entry(
__dynamic_array(char, msg, len + 1)
),
TP_fast_assign(
/*
* Each trace entry is printed in a new line.
* If the msg finishes with '\n', cut it off
* to avoid blank lines in the trace.
*/
if ((len > 0) && (text[len-1] == '\n'))
len -= 1;
memcpy(__get_str(msg), text, len);
__get_str(msg)[len] = 0;
),
TP_printk("%s", __get_str(msg))
);
#endif /* _TRACE_PRINTK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 1997 Linus Torvalds
* (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
*/
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
#include <linux/memblock.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
#include <linux/buffer_head.h> /* for inode_has_buffers */
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
#include <linux/iversion.h>
#include <linux/rw_hint.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <trace/events/writeback.h>
#define CREATE_TRACE_POINTS
#include <trace/events/timestamp.h>
#include "internal.h"
/*
* Inode locking rules:
*
* inode->i_lock protects:
* inode->i_state, inode->i_hash, __iget(), inode->i_io_list
* Inode LRU list locks protect:
* inode->i_sb->s_inode_lru, inode->i_lru
* inode->i_sb->s_inode_list_lock protects:
* inode->i_sb->s_inodes, inode->i_sb_list
* bdi->wb.list_lock protects:
* bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
* inode_hash_lock protects:
* inode_hashtable, inode->i_hash
*
* Lock ordering:
*
* inode->i_sb->s_inode_list_lock
* inode->i_lock
* Inode LRU list locks
*
* bdi->wb.list_lock
* inode->i_lock
*
* inode_hash_lock
* inode->i_sb->s_inode_list_lock
* inode->i_lock
*
* iunique_lock
* inode_hash_lock
*/
static unsigned int i_hash_mask __ro_after_init;
static unsigned int i_hash_shift __ro_after_init;
static struct hlist_head *inode_hashtable __ro_after_init;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
/*
* Empty aops. Can be used for the cases where the user does not
* define any of the address_space operations.
*/
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);
static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);
static struct kmem_cache *inode_cachep __ro_after_init;
static long get_nr_inodes(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_inodes, i);
return sum < 0 ? 0 : sum;
}
static inline long get_nr_inodes_unused(void)
{
int i;
long sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_unused, i);
return sum < 0 ? 0 : sum;
}
long get_nr_dirty_inodes(void)
{
/* not actually dirty inodes, but a wild approximation */
long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
return nr_dirty > 0 ? nr_dirty : 0;
}
#ifdef CONFIG_DEBUG_FS
static DEFINE_PER_CPU(long, mg_ctime_updates);
static DEFINE_PER_CPU(long, mg_fine_stamps);
static DEFINE_PER_CPU(long, mg_ctime_swaps);
static unsigned long get_mg_ctime_updates(void)
{
unsigned long sum = 0;
int i;
for_each_possible_cpu(i)
sum += data_race(per_cpu(mg_ctime_updates, i));
return sum;
}
static unsigned long get_mg_fine_stamps(void)
{
unsigned long sum = 0;
int i;
for_each_possible_cpu(i)
sum += data_race(per_cpu(mg_fine_stamps, i));
return sum;
}
static unsigned long get_mg_ctime_swaps(void)
{
unsigned long sum = 0;
int i;
for_each_possible_cpu(i)
sum += data_race(per_cpu(mg_ctime_swaps, i));
return sum;
}
#define mgtime_counter_inc(__var) this_cpu_inc(__var)
static int mgts_show(struct seq_file *s, void *p)
{
unsigned long ctime_updates = get_mg_ctime_updates();
unsigned long ctime_swaps = get_mg_ctime_swaps();
unsigned long fine_stamps = get_mg_fine_stamps();
unsigned long floor_swaps = timekeeping_get_mg_floor_swaps();
seq_printf(s, "%lu %lu %lu %lu\n",
ctime_updates, ctime_swaps, fine_stamps, floor_swaps);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mgts);
static int __init mg_debugfs_init(void)
{
debugfs_create_file("multigrain_timestamps", S_IFREG | S_IRUGO, NULL, NULL, &mgts_fops);
return 0;
}
late_initcall(mg_debugfs_init);
#else /* ! CONFIG_DEBUG_FS */
#define mgtime_counter_inc(__var) do { } while (0)
#endif /* CONFIG_DEBUG_FS */
/*
* Handle nr_inode sysctl
*/
#ifdef CONFIG_SYSCTL
/*
* Statistics gathering..
*/
static struct inodes_stat_t inodes_stat;
static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
inodes_stat.nr_inodes = get_nr_inodes();
inodes_stat.nr_unused = get_nr_inodes_unused();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static const struct ctl_table inodes_sysctls[] = {
{
.procname = "inode-nr",
.data = &inodes_stat,
.maxlen = 2*sizeof(long),
.mode = 0444,
.proc_handler = proc_nr_inodes,
},
{
.procname = "inode-state",
.data = &inodes_stat,
.maxlen = 7*sizeof(long),
.mode = 0444,
.proc_handler = proc_nr_inodes,
},
};
static int __init init_fs_inode_sysctls(void)
{
register_sysctl_init("fs", inodes_sysctls);
return 0;
}
early_initcall(init_fs_inode_sysctls);
#endif
static int no_open(struct inode *inode, struct file *file)
{
return -ENXIO;
}
/**
* inode_init_always_gfp - perform inode structure initialisation
* @sb: superblock inode belongs to
* @inode: inode to initialise
* @gfp: allocation flags
*
* These are initializations that need to be done on every inode
* allocation as the fields are not initialised by slab allocation.
* If there are additional allocations required @gfp is used.
*/
int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp)
{
static const struct inode_operations empty_iops;
static const struct file_operations no_open_fops = {.open = no_open};
struct address_space *const mapping = &inode->i_data;
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
inode->i_state = 0;
atomic64_set(&inode->i_sequence, 0);
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
inode->i_fop = &no_open_fops;
inode->i_ino = 0;
inode->__i_nlink = 1;
inode->i_opflags = 0;
if (sb->s_xattr)
inode->i_opflags |= IOP_XATTR; if (sb->s_type->fs_flags & FS_MGTIME)
inode->i_opflags |= IOP_MGTIME;
i_uid_write(inode, 0);
i_gid_write(inode, 0);
atomic_set(&inode->i_writecount, 0);
inode->i_size = 0;
inode->i_write_hint = WRITE_LIFE_NOT_SET;
inode->i_blocks = 0;
inode->i_bytes = 0;
inode->i_generation = 0;
inode->i_pipe = NULL;
inode->i_cdev = NULL;
inode->i_link = NULL;
inode->i_dir_seq = 0;
inode->i_rdev = 0;
inode->dirtied_when = 0;
#ifdef CONFIG_CGROUP_WRITEBACK
inode->i_wb_frn_winner = 0;
inode->i_wb_frn_avg_time = 0;
inode->i_wb_frn_history = 0;
#endif
spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
init_rwsem(&inode->i_rwsem);
lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
atomic_set(&inode->i_dio_count, 0);
mapping->a_ops = &empty_aops;
mapping->host = inode;
mapping->flags = 0;
mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0);
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
atomic_set(&mapping->nr_thps, 0);
#endif
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->i_private_data = NULL;
mapping->writeback_index = 0;
init_rwsem(&mapping->invalidate_lock);
lockdep_set_class_and_name(&mapping->invalidate_lock,
&sb->s_type->invalidate_lock_key,
"mapping.invalidate_lock");
if (sb->s_iflags & SB_I_STABLE_WRITES)
mapping_set_stable_writes(mapping); inode->i_private = NULL;
inode->i_mapping = mapping;
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
#ifdef CONFIG_FS_POSIX_ACL
inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
#endif
inode->i_flctx = NULL;
if (unlikely(security_inode_alloc(inode, gfp)))
return -ENOMEM;
this_cpu_inc(nr_inodes);
return 0;}
EXPORT_SYMBOL(inode_init_always_gfp);
void free_inode_nonrcu(struct inode *inode)
{
kmem_cache_free(inode_cachep, inode);
}
EXPORT_SYMBOL(free_inode_nonrcu);
static void i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
if (inode->free_inode)
inode->free_inode(inode);
else
free_inode_nonrcu(inode);
}
/**
* alloc_inode - obtain an inode
* @sb: superblock
*
* Allocates a new inode for given superblock.
* Inode wont be chained in superblock s_inodes list
* This means :
* - fs can't be unmount
* - quotas, fsnotify, writeback can't work
*/
struct inode *alloc_inode(struct super_block *sb)
{
const struct super_operations *ops = sb->s_op;
struct inode *inode;
if (ops->alloc_inode)
inode = ops->alloc_inode(sb);
else
inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL); if (!inode) return NULL; if (unlikely(inode_init_always(sb, inode))) { if (ops->destroy_inode) {
ops->destroy_inode(inode);
if (!ops->free_inode)
return NULL;
}
inode->free_inode = ops->free_inode;
i_callback(&inode->i_rcu);
return NULL;
}
return inode;
}
void __destroy_inode(struct inode *inode)
{
BUG_ON(inode_has_buffers(inode));
inode_detach_wb(inode);
security_inode_free(inode);
fsnotify_inode_delete(inode);
locks_free_lock_context(inode);
if (!inode->i_nlink) {
WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
atomic_long_dec(&inode->i_sb->s_remove_count);
}
#ifdef CONFIG_FS_POSIX_ACL
if (inode->i_acl && !is_uncached_acl(inode->i_acl))
posix_acl_release(inode->i_acl);
if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
posix_acl_release(inode->i_default_acl);
#endif
this_cpu_dec(nr_inodes);
}
EXPORT_SYMBOL(__destroy_inode);
static void destroy_inode(struct inode *inode)
{
const struct super_operations *ops = inode->i_sb->s_op;
BUG_ON(!list_empty(&inode->i_lru));
__destroy_inode(inode);
if (ops->destroy_inode) {
ops->destroy_inode(inode);
if (!ops->free_inode)
return;
}
inode->free_inode = ops->free_inode;
call_rcu(&inode->i_rcu, i_callback);
}
/**
* drop_nlink - directly drop an inode's link count
* @inode: inode
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink. In cases
* where we are attempting to track writes to the
* filesystem, a decrement to zero means an imminent
* write when the file is truncated and actually unlinked
* on the filesystem.
*/
void drop_nlink(struct inode *inode)
{
WARN_ON(inode->i_nlink == 0);
inode->__i_nlink--;
if (!inode->i_nlink)
atomic_long_inc(&inode->i_sb->s_remove_count);
}
EXPORT_SYMBOL(drop_nlink);
/**
* clear_nlink - directly zero an inode's link count
* @inode: inode
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink. See
* drop_nlink() for why we care about i_nlink hitting zero.
*/
void clear_nlink(struct inode *inode)
{
if (inode->i_nlink) {
inode->__i_nlink = 0;
atomic_long_inc(&inode->i_sb->s_remove_count);
}
}
EXPORT_SYMBOL(clear_nlink);
/**
* set_nlink - directly set an inode's link count
* @inode: inode
* @nlink: new nlink (should be non-zero)
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink.
*/
void set_nlink(struct inode *inode, unsigned int nlink)
{
if (!nlink) {
clear_nlink(inode);
} else {
/* Yes, some filesystems do change nlink from zero to one */
if (inode->i_nlink == 0)
atomic_long_dec(&inode->i_sb->s_remove_count);
inode->__i_nlink = nlink;
}
}
EXPORT_SYMBOL(set_nlink);
/**
* inc_nlink - directly increment an inode's link count
* @inode: inode
*
* This is a low-level filesystem helper to replace any
* direct filesystem manipulation of i_nlink. Currently,
* it is only here for parity with dec_nlink().
*/
void inc_nlink(struct inode *inode)
{
if (unlikely(inode->i_nlink == 0)) { WARN_ON(!(inode->i_state & I_LINKABLE)); atomic_long_dec(&inode->i_sb->s_remove_count);
}
inode->__i_nlink++;}
EXPORT_SYMBOL(inc_nlink);
static void __address_space_init_once(struct address_space *mapping)
{
xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
init_rwsem(&mapping->i_mmap_rwsem);
INIT_LIST_HEAD(&mapping->i_private_list);
spin_lock_init(&mapping->i_private_lock);
mapping->i_mmap = RB_ROOT_CACHED;
}
void address_space_init_once(struct address_space *mapping)
{
memset(mapping, 0, sizeof(*mapping));
__address_space_init_once(mapping);
}
EXPORT_SYMBOL(address_space_init_once);
/*
* These are initializations that only need to be done
* once, because the fields are idempotent across use
* of the inode, so let the slab aware of that.
*/
void inode_init_once(struct inode *inode)
{
memset(inode, 0, sizeof(*inode));
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_devices);
INIT_LIST_HEAD(&inode->i_io_list);
INIT_LIST_HEAD(&inode->i_wb_list);
INIT_LIST_HEAD(&inode->i_lru);
INIT_LIST_HEAD(&inode->i_sb_list);
__address_space_init_once(&inode->i_data);
i_size_ordered_init(inode);
}
EXPORT_SYMBOL(inode_init_once);
static void init_once(void *foo)
{
struct inode *inode = (struct inode *) foo;
inode_init_once(inode);
}
/*
* get additional reference to inode; caller must already hold one.
*/
void ihold(struct inode *inode)
{
WARN_ON(atomic_inc_return(&inode->i_count) < 2);
}
EXPORT_SYMBOL(ihold);
static void __inode_add_lru(struct inode *inode, bool rotate)
{
if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
return;
if (icount_read(inode))
return;
if (!(inode->i_sb->s_flags & SB_ACTIVE))
return;
if (!mapping_shrinkable(&inode->i_data))
return;
if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
inode->i_state |= I_REFERENCED;
}
struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
struct inode *inode, u32 bit)
{
void *bit_address;
bit_address = inode_state_wait_address(inode, bit);
init_wait_var_entry(wqe, bit_address, 0);
return __var_waitqueue(bit_address);
}
EXPORT_SYMBOL(inode_bit_waitqueue);
/*
* Add inode to LRU if needed (inode is unused and clean).
*
* Needs inode->i_lock held.
*/
void inode_add_lru(struct inode *inode)
{
__inode_add_lru(inode, false);
}
static void inode_lru_list_del(struct inode *inode)
{
if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused);
}
static void inode_pin_lru_isolating(struct inode *inode)
{
lockdep_assert_held(&inode->i_lock);
WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
inode->i_state |= I_LRU_ISOLATING;
}
static void inode_unpin_lru_isolating(struct inode *inode)
{
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
inode->i_state &= ~I_LRU_ISOLATING;
/* Called with inode->i_lock which ensures memory ordering. */
inode_wake_up_bit(inode, __I_LRU_ISOLATING);
spin_unlock(&inode->i_lock);
}
static void inode_wait_for_lru_isolating(struct inode *inode)
{
struct wait_bit_queue_entry wqe;
struct wait_queue_head *wq_head;
lockdep_assert_held(&inode->i_lock);
if (!(inode->i_state & I_LRU_ISOLATING))
return;
wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING);
for (;;) {
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
/*
* Checking I_LRU_ISOLATING with inode->i_lock guarantees
* memory ordering.
*/
if (!(inode->i_state & I_LRU_ISOLATING))
break;
spin_unlock(&inode->i_lock);
schedule();
spin_lock(&inode->i_lock);
}
finish_wait(wq_head, &wqe.wq_entry);
WARN_ON(inode->i_state & I_LRU_ISOLATING);
}
/**
* inode_sb_list_add - add inode to the superblock list of inodes
* @inode: inode to add
*/
void inode_sb_list_add(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
spin_lock(&sb->s_inode_list_lock);
list_add(&inode->i_sb_list, &sb->s_inodes);
spin_unlock(&sb->s_inode_list_lock);
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);
static inline void inode_sb_list_del(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (!list_empty(&inode->i_sb_list)) {
spin_lock(&sb->s_inode_list_lock);
list_del_init(&inode->i_sb_list);
spin_unlock(&sb->s_inode_list_lock);
}
}
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
unsigned long tmp;
tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
L1_CACHE_BYTES;
tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
return tmp & i_hash_mask;
}
/**
* __insert_inode_hash - hash an inode
* @inode: unhashed inode
* @hashval: unsigned long value used to locate this object in the
* inode_hashtable.
*
* Add an inode to the inode hash for this superblock.
*/
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
hlist_add_head_rcu(&inode->i_hash, b);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(__insert_inode_hash);
/**
* __remove_inode_hash - remove an inode from the hash
* @inode: inode to unhash
*
* Remove an inode from the superblock.
*/
void __remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
hlist_del_init_rcu(&inode->i_hash);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(__remove_inode_hash);
void dump_mapping(const struct address_space *mapping)
{
struct inode *host;
const struct address_space_operations *a_ops;
struct hlist_node *dentry_first;
struct dentry *dentry_ptr;
struct dentry dentry;
char fname[64] = {};
unsigned long ino;
/*
* If mapping is an invalid pointer, we don't want to crash
* accessing it, so probe everything depending on it carefully.
*/
if (get_kernel_nofault(host, &mapping->host) ||
get_kernel_nofault(a_ops, &mapping->a_ops)) {
pr_warn("invalid mapping:%px\n", mapping);
return;
}
if (!host) {
pr_warn("aops:%ps\n", a_ops);
return;
}
if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
get_kernel_nofault(ino, &host->i_ino)) {
pr_warn("aops:%ps invalid inode:%px\n", a_ops, host);
return;
}
if (!dentry_first) {
pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
return;
}
dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
if (get_kernel_nofault(dentry, dentry_ptr) ||
!dentry.d_parent || !dentry.d_name.name) {
pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
a_ops, ino, dentry_ptr);
return;
}
if (strncpy_from_kernel_nofault(fname, dentry.d_name.name, 63) < 0)
strscpy(fname, "<invalid>");
/*
* Even if strncpy_from_kernel_nofault() succeeded,
* the fname could be unreliable
*/
pr_warn("aops:%ps ino:%lx dentry name(?):\"%s\"\n",
a_ops, ino, fname);
}
void clear_inode(struct inode *inode)
{
/*
* We have to cycle the i_pages lock here because reclaim can be in the
* process of removing the last page (in __filemap_remove_folio())
* and we must not free the mapping under it.
*/
xa_lock_irq(&inode->i_data.i_pages);
BUG_ON(inode->i_data.nrpages);
/*
* Almost always, mapping_empty(&inode->i_data) here; but there are
* two known and long-standing ways in which nodes may get left behind
* (when deep radix-tree node allocation failed partway; or when THP
* collapse_file() failed). Until those two known cases are cleaned up,
* or a cleanup function is called here, do not BUG_ON(!mapping_empty),
* nor even WARN_ON(!mapping_empty).
*/
xa_unlock_irq(&inode->i_data.i_pages);
BUG_ON(!list_empty(&inode->i_data.i_private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
BUG_ON(!list_empty(&inode->i_wb_list));
/* don't need i_lock here, no concurrent mods to i_state */
inode->i_state = I_FREEING | I_CLEAR;
}
EXPORT_SYMBOL(clear_inode);
/*
* Free the inode passed in, removing it from the lists it is still connected
* to. We remove any pages still attached to the inode and wait for any IO that
* is still in progress before finally destroying the inode.
*
* An inode must already be marked I_FREEING so that we avoid the inode being
* moved back onto lists if we race with other code that manipulates the lists
* (e.g. writeback_single_inode). The caller is responsible for setting this.
*
* An inode must already be removed from the LRU list before being evicted from
* the cache. This should occur atomically with setting the I_FREEING state
* flag, so no inodes here should ever be on the LRU when being evicted.
*/
static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
if (!list_empty(&inode->i_io_list))
inode_io_list_del(inode);
inode_sb_list_del(inode);
spin_lock(&inode->i_lock);
inode_wait_for_lru_isolating(inode);
/*
* Wait for flusher thread to be done with the inode so that filesystem
* does not start destroying it while writeback is still running. Since
* the inode has I_FREEING set, flusher thread won't start new work on
* the inode. We just have to wait for running writeback to finish.
*/
inode_wait_for_writeback(inode);
spin_unlock(&inode->i_lock);
if (op->evict_inode) {
op->evict_inode(inode);
} else {
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
}
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
remove_inode_hash(inode);
/*
* Wake up waiters in __wait_on_freeing_inode().
*
* It is an invariant that any thread we need to wake up is already
* accounted for before remove_inode_hash() acquires ->i_lock -- both
* sides take the lock and sleep is aborted if the inode is found
* unhashed. Thus either the sleeper wins and goes off CPU, or removal
* wins and the sleeper aborts after testing with the lock.
*
* This also means we don't need any fences for the call below.
*/
inode_wake_up_bit(inode, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
destroy_inode(inode);
}
/*
* dispose_list - dispose of the contents of a local list
* @head: the head of the list to free
*
* Dispose-list gets a local list with local inodes in it, so it doesn't
* need to worry about list corruption and SMP locks.
*/
static void dispose_list(struct list_head *head)
{
while (!list_empty(head)) {
struct inode *inode;
inode = list_first_entry(head, struct inode, i_lru);
list_del_init(&inode->i_lru);
evict(inode);
cond_resched();
}
}
/**
* evict_inodes - evict all evictable inodes for a superblock
* @sb: superblock to operate on
*
* Make sure that no inodes with zero refcount are retained. This is
* called by superblock shutdown after having SB_ACTIVE flag removed,
* so any inode reaching zero refcount during or after that call will
* be immediately evicted.
*/
void evict_inodes(struct super_block *sb)
{
struct inode *inode;
LIST_HEAD(dispose);
again:
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (icount_read(inode))
continue;
spin_lock(&inode->i_lock);
if (icount_read(inode)) {
spin_unlock(&inode->i_lock);
continue;
}
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
continue;
}
inode->i_state |= I_FREEING;
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
/*
* We can have a ton of inodes to evict at unmount time given
* enough memory, check to see if we need to go to sleep for a
* bit so we don't livelock.
*/
if (need_resched()) {
spin_unlock(&sb->s_inode_list_lock);
cond_resched();
dispose_list(&dispose);
goto again;
}
}
spin_unlock(&sb->s_inode_list_lock);
dispose_list(&dispose);
}
EXPORT_SYMBOL_GPL(evict_inodes);
/*
* Isolate the inode from the LRU in preparation for freeing it.
*
* If the inode has the I_REFERENCED flag set, then it means that it has been
* used recently - the flag is set in iput_final(). When we encounter such an
* inode, clear the flag and move it to the back of the LRU so it gets another
* pass through the LRU before it gets reclaimed. This is necessary because of
* the fact we are doing lazy LRU updates to minimise lock contention so the
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
* with this flag set because they are the inodes that are out of order.
*/
static enum lru_status inode_lru_isolate(struct list_head *item,
struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct inode *inode = container_of(item, struct inode, i_lru);
/*
* We are inverting the lru lock/inode->i_lock here, so use a
* trylock. If we fail to get the lock, just skip it.
*/
if (!spin_trylock(&inode->i_lock))
return LRU_SKIP;
/*
* Inodes can get referenced, redirtied, or repopulated while
* they're already on the LRU, and this can make them
* unreclaimable for a while. Remove them lazily here; iput,
* sync, or the last page cache deletion will requeue them.
*/
if (icount_read(inode) ||
(inode->i_state & ~I_REFERENCED) ||
!mapping_shrinkable(&inode->i_data)) {
list_lru_isolate(lru, &inode->i_lru);
spin_unlock(&inode->i_lock);
this_cpu_dec(nr_unused);
return LRU_REMOVED;
}
/* Recently referenced inodes get one more pass */
if (inode->i_state & I_REFERENCED) {
inode->i_state &= ~I_REFERENCED;
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
}
/*
* On highmem systems, mapping_shrinkable() permits dropping
* page cache in order to free up struct inodes: lowmem might
* be under pressure before the cache inside the highmem zone.
*/
if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
inode_pin_lru_isolating(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&lru->lock);
if (remove_inode_buffers(inode)) {
unsigned long reap;
reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap);
else
__count_vm_events(PGINODESTEAL, reap);
mm_account_reclaimed_pages(reap);
}
inode_unpin_lru_isolating(inode);
return LRU_RETRY;
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
list_lru_isolate_move(lru, &inode->i_lru, freeable);
spin_unlock(&inode->i_lock);
this_cpu_dec(nr_unused);
return LRU_REMOVED;
}
/*
* Walk the superblock inode LRU for freeable inodes and attempt to free them.
* This is called from the superblock shrinker function with a number of inodes
* to trim from the LRU. Inodes to be freed are moved to a temporary list and
* then are freed outside inode_lock by dispose_list().
*/
long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
{
LIST_HEAD(freeable);
long freed;
freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
inode_lru_isolate, &freeable);
dispose_list(&freeable);
return freed;
}
static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
/*
* Called with the inode lock held.
*/
static struct inode *find_inode(struct super_block *sb,
struct hlist_head *head,
int (*test)(struct inode *, void *),
void *data, bool is_inode_hash_locked)
{
struct inode *inode = NULL;
if (is_inode_hash_locked)
lockdep_assert_held(&inode_hash_lock);
else
lockdep_assert_not_held(&inode_hash_lock);
rcu_read_lock();
repeat:
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_sb != sb)
continue;
if (!test(inode, data))
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return inode;
}
rcu_read_unlock();
return NULL;
}
/*
* find_inode_fast is the fast path version of find_inode, see the comment at
* iget_locked for details.
*/
static struct inode *find_inode_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino,
bool is_inode_hash_locked)
{
struct inode *inode = NULL;
if (is_inode_hash_locked)
lockdep_assert_held(&inode_hash_lock);
else
lockdep_assert_not_held(&inode_hash_lock);
rcu_read_lock();
repeat:
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_ino != ino)
continue;
if (inode->i_sb != sb)
continue;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode, is_inode_hash_locked);
goto repeat;
}
if (unlikely(inode->i_state & I_CREATING)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return ERR_PTR(-ESTALE);
}
__iget(inode);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
return inode;
}
rcu_read_unlock();
return NULL;
}
/*
* Each cpu owns a range of LAST_INO_BATCH numbers.
* 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
* to renew the exhausted range.
*
* This does not significantly increase overflow rate because every CPU can
* consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
* NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
* 2^32 range, and is a worst-case. Even a 50% wastage would only increase
* overflow rate by 2x, which does not seem too significant.
*
* On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
* error if st_ino won't fit in target struct field. Use 32bit counter
* here to attempt to avoid that.
*/
#define LAST_INO_BATCH 1024
static DEFINE_PER_CPU(unsigned int, last_ino);
unsigned int get_next_ino(void)
{
unsigned int *p = &get_cpu_var(last_ino);
unsigned int res = *p;
#ifdef CONFIG_SMP
if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
static atomic_t shared_last_ino;
int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
res = next - LAST_INO_BATCH;
}
#endif
res++;
/* get_next_ino should not provide a 0 inode number */
if (unlikely(!res))
res++;
*p = res;
put_cpu_var(last_ino);
return res;}
EXPORT_SYMBOL(get_next_ino);
/**
* new_inode - obtain an inode
* @sb: superblock
*
* Allocates a new inode for given superblock. The default gfp_mask
* for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
* If HIGHMEM pages are unsuitable or it is known that pages allocated
* for the page cache are not reclaimable or migratable,
* mapping_set_gfp_mask() must be called with suitable flags on the
* newly created inode's mapping
*
*/
struct inode *new_inode(struct super_block *sb)
{
struct inode *inode;
inode = alloc_inode(sb);
if (inode)
inode_sb_list_add(inode);
return inode;}
EXPORT_SYMBOL(new_inode);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void lockdep_annotate_inode_mutex_key(struct inode *inode)
{
if (S_ISDIR(inode->i_mode)) {
struct file_system_type *type = inode->i_sb->s_type;
/* Set new key only if filesystem hasn't already changed it */
if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
/*
* ensure nobody is actually holding i_rwsem
*/
init_rwsem(&inode->i_rwsem);
lockdep_set_class(&inode->i_rwsem,
&type->i_mutex_dir_key);
}
}
}
EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
#endif
/**
* unlock_new_inode - clear the I_NEW state and wake up any waiters
* @inode: new inode to unlock
*
* Called when the inode is fully initialised to clear the new state of the
* inode and wake up anyone waiting for the inode to finish initialisation.
*/
void unlock_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW & ~I_CREATING;
/*
* Pairs with the barrier in prepare_to_wait_event() to make sure
* ___wait_var_event() either sees the bit cleared or
* waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
void discard_new_inode(struct inode *inode)
{
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW;
/*
* Pairs with the barrier in prepare_to_wait_event() to make sure
* ___wait_var_event() either sees the bit cleared or
* waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
iput(inode);
}
EXPORT_SYMBOL(discard_new_inode);
/**
* lock_two_nondirectories - take two i_mutexes on non-directory objects
*
* Lock any non-NULL argument. Passed objects must not be directories.
* Zero, one or two objects may be locked by this function.
*
* @inode1: first inode to lock
* @inode2: second inode to lock
*/
void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
if (inode1)
WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
if (inode2)
WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
if (inode1 > inode2)
swap(inode1, inode2);
if (inode1)
inode_lock(inode1);
if (inode2 && inode2 != inode1)
inode_lock_nested(inode2, I_MUTEX_NONDIR2);
}
EXPORT_SYMBOL(lock_two_nondirectories);
/**
* unlock_two_nondirectories - release locks from lock_two_nondirectories()
* @inode1: first inode to unlock
* @inode2: second inode to unlock
*/
void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
if (inode1) {
WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
inode_unlock(inode1);
}
if (inode2 && inode2 != inode1) {
WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
inode_unlock(inode2);
}
}
EXPORT_SYMBOL(unlock_two_nondirectories);
/**
* inode_insert5 - obtain an inode from a mounted file system
* @inode: pre-allocated inode to use for insert to cache
* @hashval: hash value (usually inode number) to get
* @test: callback used for comparisons between inodes
* @set: callback used to initialize a new struct inode
* @data: opaque data pointer to pass to @test and @set
*
* Search for the inode specified by @hashval and @data in the inode cache,
* and if present return it with an increased reference count. This is a
* variant of iget5_locked() that doesn't allocate an inode.
*
* If the inode is not present in the cache, insert the pre-allocated inode and
* return it locked, hashed, and with the I_NEW flag set. The file system gets
* to fill it in before unlocking it via unlock_new_inode().
*
* Note that both @test and @set are called with the inode_hash_lock held, so
* they can't sleep.
*/
struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *), void *data)
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
might_sleep();
again:
spin_lock(&inode_hash_lock);
old = find_inode(inode->i_sb, head, test, data, true);
if (unlikely(old)) {
/*
* Uhhuh, somebody else created the same inode under us.
* Use the old inode instead of the preallocated one.
*/
spin_unlock(&inode_hash_lock);
if (IS_ERR(old))
return NULL;
wait_on_inode(old);
if (unlikely(inode_unhashed(old))) {
iput(old);
goto again;
}
return old;
}
if (set && unlikely(set(inode, data))) {
spin_unlock(&inode_hash_lock);
return NULL;
}
/*
* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
*/
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW;
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
/*
* Add inode to the sb list if it's not already. It has I_NEW at this
* point, so it should be safe to test i_sb_list locklessly.
*/
if (list_empty(&inode->i_sb_list))
inode_sb_list_add(inode);
return inode;
}
EXPORT_SYMBOL(inode_insert5);
/**
* iget5_locked - obtain an inode from a mounted file system
* @sb: super block of file system
* @hashval: hash value (usually inode number) to get
* @test: callback used for comparisons between inodes
* @set: callback used to initialize a new struct inode
* @data: opaque data pointer to pass to @test and @set
*
* Search for the inode specified by @hashval and @data in the inode cache,
* and if present return it with an increased reference count. This is a
* generalized version of iget_locked() for file systems where the inode
* number is not sufficient for unique identification of an inode.
*
* If the inode is not present in the cache, allocate and insert a new inode
* and return it locked, hashed, and with the I_NEW flag set. The file system
* gets to fill it in before unlocking it via unlock_new_inode().
*
* Note that both @test and @set are called with the inode_hash_lock held, so
* they can't sleep.
*/
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *), void *data)
{
struct inode *inode = ilookup5(sb, hashval, test, data);
if (!inode) {
struct inode *new = alloc_inode(sb);
if (new) {
inode = inode_insert5(new, hashval, test, set, data);
if (unlikely(inode != new))
destroy_inode(new);
}
}
return inode;
}
EXPORT_SYMBOL(iget5_locked);
/**
* iget5_locked_rcu - obtain an inode from a mounted file system
* @sb: super block of file system
* @hashval: hash value (usually inode number) to get
* @test: callback used for comparisons between inodes
* @set: callback used to initialize a new struct inode
* @data: opaque data pointer to pass to @test and @set
*
* This is equivalent to iget5_locked, except the @test callback must
* tolerate the inode not being stable, including being mid-teardown.
*/
struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *), void *data)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode, *new;
might_sleep();
again:
inode = find_inode(sb, head, test, data, false);
if (inode) {
if (IS_ERR(inode))
return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
}
return inode;
}
new = alloc_inode(sb);
if (new) {
inode = inode_insert5(new, hashval, test, set, data);
if (unlikely(inode != new))
destroy_inode(new);
}
return inode;
}
EXPORT_SYMBOL_GPL(iget5_locked_rcu);
/**
* iget_locked - obtain an inode from a mounted file system
* @sb: super block of file system
* @ino: inode number to get
*
* Search for the inode specified by @ino in the inode cache and if present
* return it with an increased reference count. This is for file systems
* where the inode number is sufficient for unique identification of an inode.
*
* If the inode is not in cache, allocate a new inode and return it locked,
* hashed, and with the I_NEW flag set. The file system gets to fill it in
* before unlocking it via unlock_new_inode().
*/
struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
might_sleep();
again:
inode = find_inode_fast(sb, head, ino, false);
if (inode) {
if (IS_ERR(inode))
return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
}
return inode;
}
inode = alloc_inode(sb);
if (inode) {
struct inode *old;
spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
old = find_inode_fast(sb, head, ino, true);
if (!old) {
inode->i_ino = ino;
spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
inode_sb_list_add(inode);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
*/
return inode;
}
/*
* Uhhuh, somebody else created the same inode under
* us. Use the old inode instead of the one we just
* allocated.
*/
spin_unlock(&inode_hash_lock);
destroy_inode(inode);
if (IS_ERR(old))
return NULL;
inode = old;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
}
}
return inode;
}
EXPORT_SYMBOL(iget_locked);
/*
* search the inode cache for a matching inode number.
* If we find one, then the inode number we are trying to
* allocate is not unique and so we should not use it.
*
* Returns 1 if the inode number is unique, 0 if it is not.
*/
static int test_inode_iunique(struct super_block *sb, unsigned long ino)
{
struct hlist_head *b = inode_hashtable + hash(sb, ino);
struct inode *inode;
hlist_for_each_entry_rcu(inode, b, i_hash) {
if (inode->i_ino == ino && inode->i_sb == sb)
return 0;
}
return 1;
}
/**
* iunique - get a unique inode number
* @sb: superblock
* @max_reserved: highest reserved inode number
*
* Obtain an inode number that is unique on the system for a given
* superblock. This is used by file systems that have no natural
* permanent inode numbering system. An inode number is returned that
* is higher than the reserved limit but unique.
*
* BUGS:
* With a large number of inodes live on the file system this function
* currently becomes quite slow.
*/
ino_t iunique(struct super_block *sb, ino_t max_reserved)
{
/*
* On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
* error if st_ino won't fit in target struct field. Use 32bit counter
* here to attempt to avoid that.
*/
static DEFINE_SPINLOCK(iunique_lock);
static unsigned int counter;
ino_t res;
rcu_read_lock();
spin_lock(&iunique_lock);
do {
if (counter <= max_reserved)
counter = max_reserved + 1;
res = counter++;
} while (!test_inode_iunique(sb, res));
spin_unlock(&iunique_lock);
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode->i_lock); if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
__iget(inode);
spin_unlock(&inode->i_lock);
} else {
spin_unlock(&inode->i_lock);
/*
* Handle the case where s_op->clear_inode is not been
* called yet, and somebody is calling igrab
* while the inode is getting freed.
*/
inode = NULL;
}
return inode;}
EXPORT_SYMBOL(igrab);
/**
* ilookup5_nowait - search for an inode in the inode cache
* @sb: super block of file system to search
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
*
* Search for the inode specified by @hashval and @data in the inode cache.
* If the inode is in the cache, the inode is returned with an incremented
* reference count.
*
* Note: I_NEW is not waited upon so you have to be very careful what you do
* with the returned inode. You probably should be using ilookup5() instead.
*
* Note2: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
spin_lock(&inode_hash_lock);
inode = find_inode(sb, head, test, data, true);
spin_unlock(&inode_hash_lock);
return IS_ERR(inode) ? NULL : inode;
}
EXPORT_SYMBOL(ilookup5_nowait);
/**
* ilookup5 - search for an inode in the inode cache
* @sb: super block of file system to search
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
*
* Search for the inode specified by @hashval and @data in the inode cache,
* and if the inode is in the cache, return the inode with an incremented
* reference count. Waits on I_NEW before returning the inode.
* returned with an incremented reference count.
*
* This is a generalized version of ilookup() for file systems where the
* inode number is not sufficient for unique identification of an inode.
*
* Note: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct inode *inode;
might_sleep();
again:
inode = ilookup5_nowait(sb, hashval, test, data);
if (inode) {
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
}
}
return inode;
}
EXPORT_SYMBOL(ilookup5);
/**
* ilookup - search for an inode in the inode cache
* @sb: super block of file system to search
* @ino: inode number to search for
*
* Search for the inode @ino in the inode cache, and if the inode is in the
* cache, the inode is returned with an incremented reference count.
*/
struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
might_sleep();
again:
inode = find_inode_fast(sb, head, ino, false);
if (inode) {
if (IS_ERR(inode))
return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
goto again;
}
}
return inode;
}
EXPORT_SYMBOL(ilookup);
/**
* find_inode_nowait - find an inode in the inode cache
* @sb: super block of file system to search
* @hashval: hash value (usually inode number) to search for
* @match: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @match
*
* Search for the inode specified by @hashval and @data in the inode
* cache, where the helper function @match will return 0 if the inode
* does not match, 1 if the inode does match, and -1 if the search
* should be stopped. The @match function must be responsible for
* taking the i_lock spin_lock and checking i_state for an inode being
* freed or being initialized, and incrementing the reference count
* before returning 1. It also must not sleep, since it is called with
* the inode_hash_lock spinlock held.
*
* This is a even more generalized version of ilookup5() when the
* function must never block --- find_inode() can block in
* __wait_on_freeing_inode() --- or when the caller can not increment
* the reference count because the resulting iput() might cause an
* inode eviction. The tradeoff is that the @match funtion must be
* very carefully implemented.
*/
struct inode *find_inode_nowait(struct super_block *sb,
unsigned long hashval,
int (*match)(struct inode *, unsigned long,
void *),
void *data)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode, *ret_inode = NULL;
int mval;
spin_lock(&inode_hash_lock);
hlist_for_each_entry(inode, head, i_hash) {
if (inode->i_sb != sb)
continue;
mval = match(inode, hashval, data);
if (mval == 0)
continue;
if (mval == 1)
ret_inode = inode;
goto out;
}
out:
spin_unlock(&inode_hash_lock);
return ret_inode;
}
EXPORT_SYMBOL(find_inode_nowait);
/**
* find_inode_rcu - find an inode in the inode cache
* @sb: Super block of file system to search
* @hashval: Key to hash
* @test: Function to test match on an inode
* @data: Data for test function
*
* Search for the inode specified by @hashval and @data in the inode cache,
* where the helper function @test will return 0 if the inode does not match
* and 1 if it does. The @test function must be responsible for taking the
* i_lock spin_lock and checking i_state for an inode being freed or being
* initialized.
*
* If successful, this will return the inode for which the @test function
* returned 1 and NULL otherwise.
*
* The @test function is not permitted to take a ref on any inode presented.
* It is also not permitted to sleep.
*
* The caller must hold the RCU read lock.
*/
struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"suspicious find_inode_rcu() usage");
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_sb == sb &&
!(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
test(inode, data))
return inode;
}
return NULL;
}
EXPORT_SYMBOL(find_inode_rcu);
/**
* find_inode_by_ino_rcu - Find an inode in the inode cache
* @sb: Super block of file system to search
* @ino: The inode number to match
*
* Search for the inode specified by @hashval and @data in the inode cache,
* where the helper function @test will return 0 if the inode does not match
* and 1 if it does. The @test function must be responsible for taking the
* i_lock spin_lock and checking i_state for an inode being freed or being
* initialized.
*
* If successful, this will return the inode for which the @test function
* returned 1 and NULL otherwise.
*
* The @test function is not permitted to take a ref on any inode presented.
* It is also not permitted to sleep.
*
* The caller must hold the RCU read lock.
*/
struct inode *find_inode_by_ino_rcu(struct super_block *sb,
unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"suspicious find_inode_by_ino_rcu() usage");
hlist_for_each_entry_rcu(inode, head, i_hash) {
if (inode->i_ino == ino &&
inode->i_sb == sb &&
!(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
return inode;
}
return NULL;
}
EXPORT_SYMBOL(find_inode_by_ino_rcu);
int insert_inode_locked(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
might_sleep();
while (1) {
struct inode *old = NULL;
spin_lock(&inode_hash_lock);
hlist_for_each_entry(old, head, i_hash) {
if (old->i_ino != ino)
continue;
if (old->i_sb != sb)
continue;
spin_lock(&old->i_lock);
if (old->i_state & (I_FREEING|I_WILL_FREE)) {
spin_unlock(&old->i_lock);
continue;
}
break;
}
if (likely(!old)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW | I_CREATING;
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
}
if (unlikely(old->i_state & I_CREATING)) {
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
return -EBUSY;
}
__iget(old);
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
wait_on_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
return -EBUSY;
}
iput(old);
}
}
EXPORT_SYMBOL(insert_inode_locked);
int insert_inode_locked4(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct inode *old;
might_sleep();
inode->i_state |= I_CREATING;
old = inode_insert5(inode, hashval, test, NULL, data);
if (old != inode) {
iput(old);
return -EBUSY;
}
return 0;
}
EXPORT_SYMBOL(insert_inode_locked4);
int inode_just_drop(struct inode *inode)
{
return 1;
}
EXPORT_SYMBOL(inode_just_drop);
/*
* Called when we're dropping the last reference
* to an inode.
*
* Call the FS "drop_inode()" function, defaulting to
* the legacy UNIX filesystem behaviour. If it tells
* us to evict inode, do so. Otherwise, retain inode
* in cache if fs is alive, sync and evict if fs is
* shutting down.
*/
static void iput_final(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const struct super_operations *op = inode->i_sb->s_op;
unsigned long state;
int drop;
WARN_ON(inode->i_state & I_NEW); if (op->drop_inode)
drop = op->drop_inode(inode);
else
drop = inode_generic_drop(inode); if (!drop && !(inode->i_state & I_DONTCACHE) &&
(sb->s_flags & SB_ACTIVE)) {
__inode_add_lru(inode, true);
spin_unlock(&inode->i_lock);
return;
}
state = inode->i_state;
if (!drop) {
WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
spin_lock(&inode->i_lock);
state = inode->i_state;
WARN_ON(state & I_NEW); state &= ~I_WILL_FREE;
}
WRITE_ONCE(inode->i_state, state | I_FREEING); if (!list_empty(&inode->i_lru))
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock); evict(inode);
}
/**
* iput - put an inode
* @inode: inode to put
*
* Puts an inode, dropping its usage count. If the inode use count hits
* zero, the inode is then freed and may also be destroyed.
*
* Consequently, iput() can sleep.
*/
void iput(struct inode *inode)
{
might_sleep();
if (unlikely(!inode))
return;
retry:
lockdep_assert_not_held(&inode->i_lock);
VFS_BUG_ON_INODE(inode->i_state & I_CLEAR, inode);
/*
* Note this assert is technically racy as if the count is bogusly
* equal to one, then two CPUs racing to further drop it can both
* conclude it's fine.
*/
VFS_BUG_ON_INODE(atomic_read(&inode->i_count) < 1, inode);
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
if ((inode->i_state & I_DIRTY_TIME) && inode->i_nlink) { trace_writeback_lazytime_iput(inode);
mark_inode_dirty_sync(inode);
goto retry;
}
spin_lock(&inode->i_lock); if (unlikely((inode->i_state & I_DIRTY_TIME) && inode->i_nlink)) {
spin_unlock(&inode->i_lock);
goto retry;
}
if (!atomic_dec_and_test(&inode->i_count)) {
spin_unlock(&inode->i_lock);
return;
}
/*
* iput_final() drops ->i_lock, we can't assert on it as the inode may
* be deallocated by the time the call returns.
*/
iput_final(inode);}
EXPORT_SYMBOL(iput);
/**
* iput_not_last - put an inode assuming this is not the last reference
* @inode: inode to put
*/
void iput_not_last(struct inode *inode)
{
VFS_BUG_ON_INODE(atomic_read(&inode->i_count) < 2, inode);
WARN_ON(atomic_sub_return(1, &inode->i_count) == 0);
}
EXPORT_SYMBOL(iput_not_last);
#ifdef CONFIG_BLOCK
/**
* bmap - find a block number in a file
* @inode: inode owning the block number being requested
* @block: pointer containing the block to find
*
* Replaces the value in ``*block`` with the block number on the device holding
* corresponding to the requested block number in the file.
* That is, asked for block 4 of inode 1 the function will replace the
* 4 in ``*block``, with disk block relative to the disk start that holds that
* block of the file.
*
* Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
* hole, returns 0 and ``*block`` is also set to 0.
*/
int bmap(struct inode *inode, sector_t *block)
{
if (!inode->i_mapping->a_ops->bmap)
return -EINVAL;
*block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
return 0;
}
EXPORT_SYMBOL(bmap);
#endif
/*
* With relative atime, only update atime if the previous atime is
* earlier than or equal to either the ctime or mtime,
* or if at least a day has passed since the last atime update.
*/
static bool relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
{
struct timespec64 atime, mtime, ctime;
if (!(mnt->mnt_flags & MNT_RELATIME))
return true;
/*
* Is mtime younger than or equal to atime? If yes, update atime:
*/
atime = inode_get_atime(inode);
mtime = inode_get_mtime(inode);
if (timespec64_compare(&mtime, &atime) >= 0)
return true;
/*
* Is ctime younger than or equal to atime? If yes, update atime:
*/
ctime = inode_get_ctime(inode);
if (timespec64_compare(&ctime, &atime) >= 0)
return true;
/*
* Is the previous atime value older than a day? If yes,
* update atime:
*/
if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
return true;
/*
* Good, we can skip the atime update:
*/
return false;
}
/**
* inode_update_timestamps - update the timestamps on the inode
* @inode: inode to be updated
* @flags: S_* flags that needed to be updated
*
* The update_time function is called when an inode's timestamps need to be
* updated for a read or write operation. This function handles updating the
* actual timestamps. It's up to the caller to ensure that the inode is marked
* dirty appropriately.
*
* In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
* attempt to update all three of them. S_ATIME updates can be handled
* independently of the rest.
*
* Returns a set of S_* flags indicating which values changed.
*/
int inode_update_timestamps(struct inode *inode, int flags)
{
int updated = 0;
struct timespec64 now;
if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
struct timespec64 ctime = inode_get_ctime(inode);
struct timespec64 mtime = inode_get_mtime(inode);
now = inode_set_ctime_current(inode);
if (!timespec64_equal(&now, &ctime))
updated |= S_CTIME;
if (!timespec64_equal(&now, &mtime)) {
inode_set_mtime_to_ts(inode, now);
updated |= S_MTIME;
}
if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
updated |= S_VERSION;
} else {
now = current_time(inode);
}
if (flags & S_ATIME) {
struct timespec64 atime = inode_get_atime(inode);
if (!timespec64_equal(&now, &atime)) {
inode_set_atime_to_ts(inode, now);
updated |= S_ATIME;
}
}
return updated;
}
EXPORT_SYMBOL(inode_update_timestamps);
/**
* generic_update_time - update the timestamps on the inode
* @inode: inode to be updated
* @flags: S_* flags that needed to be updated
*
* The update_time function is called when an inode's timestamps need to be
* updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
* or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
* updates can be handled done independently of the rest.
*
* Returns a S_* mask indicating which fields were updated.
*/
int generic_update_time(struct inode *inode, int flags)
{
int updated = inode_update_timestamps(inode, flags);
int dirty_flags = 0;
if (updated & (S_ATIME|S_MTIME|S_CTIME))
dirty_flags = inode->i_sb->s_flags & SB_LAZYTIME ? I_DIRTY_TIME : I_DIRTY_SYNC;
if (updated & S_VERSION)
dirty_flags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, dirty_flags);
return updated;
}
EXPORT_SYMBOL(generic_update_time);
/*
* This does the actual work of updating an inodes time or version. Must have
* had called mnt_want_write() before calling this.
*/
int inode_update_time(struct inode *inode, int flags)
{
if (inode->i_op->update_time)
return inode->i_op->update_time(inode, flags);
generic_update_time(inode, flags);
return 0;
}
EXPORT_SYMBOL(inode_update_time);
/**
* atime_needs_update - update the access time
* @path: the &struct path to update
* @inode: inode to update
*
* Update the accessed time on an inode and mark it for writeback.
* This function automatically handles read only file systems and media,
* as well as the "noatime" flag and inode specific "noatime" markers.
*/
bool atime_needs_update(const struct path *path, struct inode *inode)
{
struct vfsmount *mnt = path->mnt;
struct timespec64 now, atime;
if (inode->i_flags & S_NOATIME)
return false;
/* Atime updates will likely cause i_uid and i_gid to be written
* back improprely if their true value is unknown to the vfs.
*/
if (HAS_UNMAPPED_ID(mnt_idmap(mnt), inode))
return false;
if (IS_NOATIME(inode))
return false;
if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return false;
if (mnt->mnt_flags & MNT_NOATIME)
return false;
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
return false;
now = current_time(inode);
if (!relatime_need_update(mnt, inode, now))
return false;
atime = inode_get_atime(inode);
if (timespec64_equal(&atime, &now))
return false;
return true;
}
void touch_atime(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
struct inode *inode = d_inode(path->dentry);
if (!atime_needs_update(path, inode))
return;
if (!sb_start_write_trylock(inode->i_sb))
return;
if (mnt_get_write_access(mnt) != 0)
goto skip_update;
/*
* File systems can error out when updating inodes if they need to
* allocate new space to modify an inode (such is the case for
* Btrfs), but since we touch atime while walking down the path we
* really don't care if we failed to update the atime of the file,
* so just ignore the return value.
* We may also fail on filesystems that have the ability to make parts
* of the fs read only, e.g. subvolumes in Btrfs.
*/
inode_update_time(inode, S_ATIME);
mnt_put_write_access(mnt);
skip_update:
sb_end_write(inode->i_sb);
}
EXPORT_SYMBOL(touch_atime);
/*
* Return mask of changes for notify_change() that need to be done as a
* response to write or truncate. Return 0 if nothing has to be changed.
* Negative value on error (change should be denied).
*/
int dentry_needs_remove_privs(struct mnt_idmap *idmap,
struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
int mask = 0;
int ret;
if (IS_NOSEC(inode))
return 0;
mask = setattr_should_drop_suidgid(idmap, inode);
ret = security_inode_need_killpriv(dentry);
if (ret < 0)
return ret;
if (ret)
mask |= ATTR_KILL_PRIV;
return mask;
}
static int __remove_privs(struct mnt_idmap *idmap,
struct dentry *dentry, int kill)
{
struct iattr newattrs;
newattrs.ia_valid = ATTR_FORCE | kill;
/*
* Note we call this on write, so notify_change will not
* encounter any conflicting delegations:
*/
return notify_change(idmap, dentry, &newattrs, NULL);
}
static int file_remove_privs_flags(struct file *file, unsigned int flags)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = file_inode(file);
int error = 0;
int kill;
if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
return 0;
kill = dentry_needs_remove_privs(file_mnt_idmap(file), dentry);
if (kill < 0)
return kill;
if (kill) {
if (flags & IOCB_NOWAIT)
return -EAGAIN;
error = __remove_privs(file_mnt_idmap(file), dentry, kill);
}
if (!error)
inode_has_no_xattr(inode);
return error;
}
/**
* file_remove_privs - remove special file privileges (suid, capabilities)
* @file: file to remove privileges from
*
* When file is modified by a write or truncation ensure that special
* file privileges are removed.
*
* Return: 0 on success, negative errno on failure.
*/
int file_remove_privs(struct file *file)
{
return file_remove_privs_flags(file, 0);
}
EXPORT_SYMBOL(file_remove_privs);
/**
* current_time - Return FS time (possibly fine-grained)
* @inode: inode.
*
* Return the current time truncated to the time granularity supported by
* the fs, as suitable for a ctime/mtime change. If the ctime is flagged
* as having been QUERIED, get a fine-grained timestamp, but don't update
* the floor.
*
* For a multigrain inode, this is effectively an estimate of the timestamp
* that a file would receive. An actual update must go through
* inode_set_ctime_current().
*/
struct timespec64 current_time(struct inode *inode)
{
struct timespec64 now;
u32 cns;
ktime_get_coarse_real_ts64_mg(&now);
if (!is_mgtime(inode))
goto out;
/* If nothing has queried it, then coarse time is fine */
cns = smp_load_acquire(&inode->i_ctime_nsec);
if (cns & I_CTIME_QUERIED) {
/*
* If there is no apparent change, then get a fine-grained
* timestamp.
*/
if (now.tv_nsec == (cns & ~I_CTIME_QUERIED))
ktime_get_real_ts64(&now);
}
out:
return timestamp_truncate(now, inode);
}
EXPORT_SYMBOL(current_time);
static int inode_needs_update_time(struct inode *inode)
{
struct timespec64 now, ts;
int sync_it = 0;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
now = current_time(inode);
ts = inode_get_mtime(inode);
if (!timespec64_equal(&ts, &now))
sync_it |= S_MTIME;
ts = inode_get_ctime(inode);
if (!timespec64_equal(&ts, &now))
sync_it |= S_CTIME;
if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
sync_it |= S_VERSION;
return sync_it;
}
static int __file_update_time(struct file *file, int sync_mode)
{
int ret = 0;
struct inode *inode = file_inode(file);
/* try to update time settings */
if (!mnt_get_write_access_file(file)) {
ret = inode_update_time(inode, sync_mode);
mnt_put_write_access_file(file);
}
return ret;
}
/**
* file_update_time - update mtime and ctime time
* @file: file accessed
*
* Update the mtime and ctime members of an inode and mark the inode for
* writeback. Note that this function is meant exclusively for usage in
* the file write path of filesystems, and filesystems may choose to
* explicitly ignore updates via this function with the _NOCMTIME inode
* flag, e.g. for network filesystem where these imestamps are handled
* by the server. This can return an error for file systems who need to
* allocate space in order to update an inode.
*
* Return: 0 on success, negative errno on failure.
*/
int file_update_time(struct file *file)
{
int ret;
struct inode *inode = file_inode(file);
ret = inode_needs_update_time(inode);
if (ret <= 0)
return ret;
return __file_update_time(file, ret);
}
EXPORT_SYMBOL(file_update_time);
/**
* file_modified_flags - handle mandated vfs changes when modifying a file
* @file: file that was modified
* @flags: kiocb flags
*
* When file has been modified ensure that special
* file privileges are removed and time settings are updated.
*
* If IOCB_NOWAIT is set, special file privileges will not be removed and
* time settings will not be updated. It will return -EAGAIN.
*
* Context: Caller must hold the file's inode lock.
*
* Return: 0 on success, negative errno on failure.
*/
static int file_modified_flags(struct file *file, int flags)
{
int ret;
struct inode *inode = file_inode(file);
/*
* Clear the security bits if the process is not being run by root.
* This keeps people from modifying setuid and setgid binaries.
*/
ret = file_remove_privs_flags(file, flags);
if (ret)
return ret;
if (unlikely(file->f_mode & FMODE_NOCMTIME))
return 0;
ret = inode_needs_update_time(inode);
if (ret <= 0)
return ret;
if (flags & IOCB_NOWAIT)
return -EAGAIN;
return __file_update_time(file, ret);
}
/**
* file_modified - handle mandated vfs changes when modifying a file
* @file: file that was modified
*
* When file has been modified ensure that special
* file privileges are removed and time settings are updated.
*
* Context: Caller must hold the file's inode lock.
*
* Return: 0 on success, negative errno on failure.
*/
int file_modified(struct file *file)
{
return file_modified_flags(file, 0);
}
EXPORT_SYMBOL(file_modified);
/**
* kiocb_modified - handle mandated vfs changes when modifying a file
* @iocb: iocb that was modified
*
* When file has been modified ensure that special
* file privileges are removed and time settings are updated.
*
* Context: Caller must hold the file's inode lock.
*
* Return: 0 on success, negative errno on failure.
*/
int kiocb_modified(struct kiocb *iocb)
{
return file_modified_flags(iocb->ki_filp, iocb->ki_flags);
}
EXPORT_SYMBOL_GPL(kiocb_modified);
int inode_needs_sync(struct inode *inode)
{
if (IS_SYNC(inode))
return 1;
if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
return 1;
return 0;
}
EXPORT_SYMBOL(inode_needs_sync);
/*
* If we try to find an inode in the inode hash while it is being
* deleted, we have to wait until the filesystem completes its
* deletion before reporting that it isn't found. This function waits
* until the deletion _might_ have completed. Callers are responsible
* to recheck inode state.
*
* It doesn't matter if I_NEW is not set initially, a call to
* wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
* will DTRT.
*/
static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
{
struct wait_bit_queue_entry wqe;
struct wait_queue_head *wq_head;
/*
* Handle racing against evict(), see that routine for more details.
*/
if (unlikely(inode_unhashed(inode))) {
WARN_ON(is_inode_hash_locked);
spin_unlock(&inode->i_lock);
return;
}
wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
if (is_inode_hash_locked)
spin_unlock(&inode_hash_lock);
schedule();
finish_wait(wq_head, &wqe.wq_entry);
if (is_inode_hash_locked)
spin_lock(&inode_hash_lock);
rcu_read_lock();
}
static __initdata unsigned long ihash_entries;
static int __init set_ihash_entries(char *str)
{
if (!str)
return 0;
ihash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("ihash_entries=", set_ihash_entries);
/*
* Initialize the waitqueues and inode hash table.
*/
void __init inode_init_early(void)
{
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
if (hashdist)
return;
inode_hashtable =
alloc_large_system_hash("Inode-cache",
sizeof(struct hlist_head),
ihash_entries,
14,
HASH_EARLY | HASH_ZERO,
&i_hash_shift,
&i_hash_mask,
0,
0);
}
void __init inode_init(void)
{
/* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache",
sizeof(struct inode),
0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_ACCOUNT),
init_once);
/* Hash may have been set up in inode_init_early */
if (!hashdist)
return;
inode_hashtable =
alloc_large_system_hash("Inode-cache",
sizeof(struct hlist_head),
ihash_entries,
14,
HASH_ZERO,
&i_hash_shift,
&i_hash_mask,
0,
0);
}
void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
{
inode->i_mode = mode;
switch (inode->i_mode & S_IFMT) {
case S_IFCHR:
inode->i_fop = &def_chr_fops;
inode->i_rdev = rdev;
break;
case S_IFBLK:
if (IS_ENABLED(CONFIG_BLOCK))
inode->i_fop = &def_blk_fops;
inode->i_rdev = rdev;
break;
case S_IFIFO:
inode->i_fop = &pipefifo_fops;
break;
case S_IFSOCK:
/* leave it no_open_fops */
break;
default:
printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
" inode %s:%lu\n", mode, inode->i_sb->s_id,
inode->i_ino);
break;
}
}
EXPORT_SYMBOL(init_special_inode);
/**
* inode_init_owner - Init uid,gid,mode for new inode according to posix standards
* @idmap: idmap of the mount the inode was created from
* @inode: New inode
* @dir: Directory inode
* @mode: mode of the new inode
*
* If the inode has been created through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions
* and initializing i_uid and i_gid. On non-idmapped mounts or if permission
* checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
*/
void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
const struct inode *dir, umode_t mode)
{
inode_fsuid_set(inode, idmap);
if (dir && dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
/* Directories are special, and always inherit S_ISGID */
if (S_ISDIR(mode))
mode |= S_ISGID;
} else
inode_fsgid_set(inode, idmap);
inode->i_mode = mode;
}
EXPORT_SYMBOL(inode_init_owner);
/**
* inode_owner_or_capable - check current task permissions to inode
* @idmap: idmap of the mount the inode was found from
* @inode: inode being checked
*
* Return true if current either has CAP_FOWNER in a namespace with the
* inode owner uid mapped, or owns the file.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
bool inode_owner_or_capable(struct mnt_idmap *idmap,
const struct inode *inode)
{
vfsuid_t vfsuid;
struct user_namespace *ns;
vfsuid = i_uid_into_vfsuid(idmap, inode);
if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
return true;
ns = current_user_ns();
if (vfsuid_has_mapping(ns, vfsuid) && ns_capable(ns, CAP_FOWNER))
return true;
return false;
}
EXPORT_SYMBOL(inode_owner_or_capable);
/*
* Direct i/o helper functions
*/
bool inode_dio_finished(const struct inode *inode)
{
return atomic_read(&inode->i_dio_count) == 0;
}
EXPORT_SYMBOL(inode_dio_finished);
/**
* inode_dio_wait - wait for outstanding DIO requests to finish
* @inode: inode to wait for
*
* Waits for all pending direct I/O requests to finish so that we can
* proceed with a truncate or equivalent operation.
*
* Must be called under a lock that serializes taking new references
* to i_dio_count, usually by inode->i_rwsem.
*/
void inode_dio_wait(struct inode *inode)
{
wait_var_event(&inode->i_dio_count, inode_dio_finished(inode));
}
EXPORT_SYMBOL(inode_dio_wait);
void inode_dio_wait_interruptible(struct inode *inode)
{
wait_var_event_interruptible(&inode->i_dio_count,
inode_dio_finished(inode));
}
EXPORT_SYMBOL(inode_dio_wait_interruptible);
/*
* inode_set_flags - atomically set some inode flags
*
* Note: the caller should be holding i_rwsem exclusively, or else be sure that
* they have exclusive access to the inode structure (i.e., while the
* inode is being instantiated). The reason for the cmpxchg() loop
* --- which wouldn't be necessary if all code paths which modify
* i_flags actually followed this rule, is that there is at least one
* code path which doesn't today so we use cmpxchg() out of an abundance
* of caution.
*
* In the long run, i_rwsem is overkill, and we should probably look
* at using the i_lock spinlock to protect i_flags, and then make sure
* it is so documented in include/linux/fs.h and that all code follows
* the locking convention!!
*/
void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask)
{
WARN_ON_ONCE(flags & ~mask);
set_mask_bits(&inode->i_flags, mask, flags);
}
EXPORT_SYMBOL(inode_set_flags);
void inode_nohighmem(struct inode *inode)
{
mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
}
EXPORT_SYMBOL(inode_nohighmem);
struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts)
{
trace_inode_set_ctime_to_ts(inode, &ts);
set_normalized_timespec64(&ts, ts.tv_sec, ts.tv_nsec);
inode->i_ctime_sec = ts.tv_sec;
inode->i_ctime_nsec = ts.tv_nsec;
return ts;
}
EXPORT_SYMBOL(inode_set_ctime_to_ts);
/**
* timestamp_truncate - Truncate timespec to a granularity
* @t: Timespec
* @inode: inode being updated
*
* Truncate a timespec to the granularity supported by the fs
* containing the inode. Always rounds down. gran must
* not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
*/
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode){
struct super_block *sb = inode->i_sb;
unsigned int gran = sb->s_time_gran;
t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)) t.tv_nsec = 0;
/* Avoid division in the common cases 1 ns and 1 s. */
if (gran == 1)
; /* nothing */
else if (gran == NSEC_PER_SEC) t.tv_nsec = 0; else if (gran > 1 && gran < NSEC_PER_SEC)
t.tv_nsec -= t.tv_nsec % gran;
else
WARN(1, "invalid file time granularity: %u", gran); return t;}
EXPORT_SYMBOL(timestamp_truncate);
/**
* inode_set_ctime_current - set the ctime to current_time
* @inode: inode
*
* Set the inode's ctime to the current value for the inode. Returns the
* current value that was assigned. If this is not a multigrain inode, then we
* set it to the later of the coarse time and floor value.
*
* If it is multigrain, then we first see if the coarse-grained timestamp is
* distinct from what is already there. If so, then use that. Otherwise, get a
* fine-grained timestamp.
*
* After that, try to swap the new value into i_ctime_nsec. Accept the
* resulting ctime, regardless of the outcome of the swap. If it has
* already been replaced, then that timestamp is later than the earlier
* unacceptable one, and is thus acceptable.
*/
struct timespec64 inode_set_ctime_current(struct inode *inode)
{
struct timespec64 now;
u32 cns, cur;
ktime_get_coarse_real_ts64_mg(&now);
now = timestamp_truncate(now, inode);
/* Just return that if this is not a multigrain fs */
if (!is_mgtime(inode)) {
inode_set_ctime_to_ts(inode, now);
goto out;
}
/*
* A fine-grained time is only needed if someone has queried
* for timestamps, and the current coarse grained time isn't
* later than what's already there.
*/
cns = smp_load_acquire(&inode->i_ctime_nsec);
if (cns & I_CTIME_QUERIED) {
struct timespec64 ctime = { .tv_sec = inode->i_ctime_sec,
.tv_nsec = cns & ~I_CTIME_QUERIED };
if (timespec64_compare(&now, &ctime) <= 0) { ktime_get_real_ts64_mg(&now);
now = timestamp_truncate(now, inode);
mgtime_counter_inc(mg_fine_stamps);
}
}
mgtime_counter_inc(mg_ctime_updates);
/* No need to cmpxchg if it's exactly the same */
if (cns == now.tv_nsec && inode->i_ctime_sec == now.tv_sec) { trace_ctime_xchg_skip(inode, &now); goto out;
}
cur = cns;
retry:
/* Try to swap the nsec value into place. */
if (try_cmpxchg(&inode->i_ctime_nsec, &cur, now.tv_nsec)) {
/* If swap occurred, then we're (mostly) done */
inode->i_ctime_sec = now.tv_sec;
trace_ctime_ns_xchg(inode, cns, now.tv_nsec, cur);
mgtime_counter_inc(mg_ctime_swaps);
} else {
/*
* Was the change due to someone marking the old ctime QUERIED?
* If so then retry the swap. This can only happen once since
* the only way to clear I_CTIME_QUERIED is to stamp the inode
* with a new ctime.
*/
if (!(cns & I_CTIME_QUERIED) && (cns | I_CTIME_QUERIED) == cur) {
cns = cur;
goto retry;
}
/* Otherwise, keep the existing ctime */
now.tv_sec = inode->i_ctime_sec; now.tv_nsec = cur & ~I_CTIME_QUERIED;
}
out:
return now;
}
EXPORT_SYMBOL(inode_set_ctime_current);
/**
* inode_set_ctime_deleg - try to update the ctime on a delegated inode
* @inode: inode to update
* @update: timespec64 to set the ctime
*
* Attempt to atomically update the ctime on behalf of a delegation holder.
*
* The nfs server can call back the holder of a delegation to get updated
* inode attributes, including the mtime. When updating the mtime, update
* the ctime to a value at least equal to that.
*
* This can race with concurrent updates to the inode, in which
* case the update is skipped.
*
* Note that this works even when multigrain timestamps are not enabled,
* so it is used in either case.
*/
struct timespec64 inode_set_ctime_deleg(struct inode *inode, struct timespec64 update)
{
struct timespec64 now, cur_ts;
u32 cur, old;
/* pairs with try_cmpxchg below */
cur = smp_load_acquire(&inode->i_ctime_nsec);
cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
cur_ts.tv_sec = inode->i_ctime_sec;
/* If the update is older than the existing value, skip it. */
if (timespec64_compare(&update, &cur_ts) <= 0)
return cur_ts;
ktime_get_coarse_real_ts64_mg(&now);
/* Clamp the update to "now" if it's in the future */
if (timespec64_compare(&update, &now) > 0)
update = now;
update = timestamp_truncate(update, inode);
/* No need to update if the values are already the same */
if (timespec64_equal(&update, &cur_ts))
return cur_ts;
/*
* Try to swap the nsec value into place. If it fails, that means
* it raced with an update due to a write or similar activity. That
* stamp takes precedence, so just skip the update.
*/
retry:
old = cur;
if (try_cmpxchg(&inode->i_ctime_nsec, &cur, update.tv_nsec)) {
inode->i_ctime_sec = update.tv_sec;
mgtime_counter_inc(mg_ctime_swaps);
return update;
}
/*
* Was the change due to another task marking the old ctime QUERIED?
*
* If so, then retry the swap. This can only happen once since
* the only way to clear I_CTIME_QUERIED is to stamp the inode
* with a new ctime.
*/
if (!(old & I_CTIME_QUERIED) && (cur == (old | I_CTIME_QUERIED)))
goto retry;
/* Otherwise, it was a new timestamp. */
cur_ts.tv_sec = inode->i_ctime_sec;
cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
return cur_ts;
}
EXPORT_SYMBOL(inode_set_ctime_deleg);
/**
* in_group_or_capable - check whether caller is CAP_FSETID privileged
* @idmap: idmap of the mount @inode was found from
* @inode: inode to check
* @vfsgid: the new/current vfsgid of @inode
*
* Check whether @vfsgid is in the caller's group list or if the caller is
* privileged with CAP_FSETID over @inode. This can be used to determine
* whether the setgid bit can be kept or must be dropped.
*
* Return: true if the caller is sufficiently privileged, false if not.
*/
bool in_group_or_capable(struct mnt_idmap *idmap,
const struct inode *inode, vfsgid_t vfsgid)
{
if (vfsgid_in_group_p(vfsgid))
return true;
if (capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
return true;
return false;
}
EXPORT_SYMBOL(in_group_or_capable);
/**
* mode_strip_sgid - handle the sgid bit for non-directories
* @idmap: idmap of the mount the inode was created from
* @dir: parent directory inode
* @mode: mode of the file to be created in @dir
*
* If the @mode of the new file has both the S_ISGID and S_IXGRP bit
* raised and @dir has the S_ISGID bit raised ensure that the caller is
* either in the group of the parent directory or they have CAP_FSETID
* in their user namespace and are privileged over the parent directory.
* In all other cases, strip the S_ISGID bit from @mode.
*
* Return: the new mode to use for the file
*/
umode_t mode_strip_sgid(struct mnt_idmap *idmap,
const struct inode *dir, umode_t mode)
{
if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
return mode;
if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
return mode;
if (in_group_or_capable(idmap, dir, i_gid_into_vfsgid(idmap, dir)))
return mode;
return mode & ~S_ISGID;
}
EXPORT_SYMBOL(mode_strip_sgid);
#ifdef CONFIG_DEBUG_VFS
/*
* Dump an inode.
*
* TODO: add a proper inode dumping routine, this is a stub to get debug off the
* ground.
*
* TODO: handle getting to fs type with get_kernel_nofault()?
* See dump_mapping() above.
*/
void dump_inode(struct inode *inode, const char *reason)
{
struct super_block *sb = inode->i_sb;
pr_warn("%s encountered for inode %px\n"
"fs %s mode %ho opflags 0x%hx flags 0x%x state 0x%x count %d\n",
reason, inode, sb->s_type->name, inode->i_mode, inode->i_opflags,
inode->i_flags, inode->i_state, atomic_read(&inode->i_count));
}
EXPORT_SYMBOL(dump_inode);
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Descending-priority-sorted double-linked list
*
* (C) 2002-2003 Intel Corp
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
*
* 2001-2005 (c) MontaVista Software, Inc.
* Daniel Walker <dwalker@mvista.com>
*
* (C) 2005 Thomas Gleixner <tglx@linutronix.de>
*
* Simplifications of the original code by
* Oleg Nesterov <oleg@tv-sign.ru>
*
* Based on simple lists (include/linux/list.h).
*
* This is a priority-sorted list of nodes; each node has a
* priority from INT_MIN (highest) to INT_MAX (lowest).
*
* Addition is O(K), removal is O(1), change of priority of a node is
* O(K) and K is the number of RT priority levels used in the system.
* (1 <= K <= 99)
*
* This list is really a list of lists:
*
* - The tier 1 list is the prio_list, different priority nodes.
*
* - The tier 2 list is the node_list, serialized nodes.
*
* Simple ASCII art explanation:
*
* pl:prio_list (only for plist_node)
* nl:node_list
* HEAD| NODE(S)
* |
* ||------------------------------------|
* ||->|pl|<->|pl|<--------------->|pl|<-|
* | |10| |21| |21| |21| |40| (prio)
* | | | | | | | | | | |
* | | | | | | | | | | |
* |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
* |-------------------------------------------|
*
* The nodes on the prio_list list are sorted by priority to simplify
* the insertion of new nodes. There are no nodes with duplicate
* priorites on the list.
*
* The nodes on the node_list are ordered by priority and can contain
* entries which have the same priority. Those entries are ordered
* FIFO
*
* Addition means: look for the prio_list node in the prio_list
* for the priority of the node and insert it before the node_list
* entry of the next prio_list node. If it is the first node of
* that priority, add it to the prio_list in the right position and
* insert it into the serialized node_list list
*
* Removal means remove it from the node_list and remove it from
* the prio_list if the node_list list_head is non empty. In case
* of removal from the prio_list it must be checked whether other
* entries of the same priority are on the list or not. If there
* is another entry of the same priority then this entry has to
* replace the removed entry on the prio_list. If the entry which
* is removed is the only entry of this priority then a simple
* remove from both list is sufficient.
*
* INT_MIN is the highest priority, 0 is the medium highest, INT_MAX
* is lowest priority.
*
* No locking is done, up to the caller.
*/
#ifndef _LINUX_PLIST_H_
#define _LINUX_PLIST_H_
#include <linux/container_of.h>
#include <linux/list.h>
#include <linux/plist_types.h>
#include <asm/bug.h>
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name
*/
#define PLIST_HEAD_INIT(head) \
{ \
.node_list = LIST_HEAD_INIT((head).node_list) \
}
/**
* PLIST_HEAD - declare and init plist_head
* @head: name for struct plist_head variable
*/
#define PLIST_HEAD(head) \
struct plist_head head = PLIST_HEAD_INIT(head)
/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
*/
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
.prio_list = LIST_HEAD_INIT((node).prio_list), \
.node_list = LIST_HEAD_INIT((node).node_list), \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
*/
static inline void
plist_head_init(struct plist_head *head)
{
INIT_LIST_HEAD(&head->node_list);
}
/**
* plist_node_init - Dynamic struct plist_node initializer
* @node: &struct plist_node pointer
* @prio: initial node priority
*/
static inline void plist_node_init(struct plist_node *node, int prio)
{
node->prio = prio;
INIT_LIST_HEAD(&node->prio_list);
INIT_LIST_HEAD(&node->node_list);
}
extern void plist_add(struct plist_node *node, struct plist_head *head);
extern void plist_del(struct plist_node *node, struct plist_head *head);
extern void plist_requeue(struct plist_node *node, struct plist_head *head);
/**
* plist_for_each - iterate over the plist
* @pos: the type * to use as a loop counter
* @head: the head for your list
*/
#define plist_for_each(pos, head) \
list_for_each_entry(pos, &(head)->node_list, node_list)
/**
* plist_for_each_continue - continue iteration over the plist
* @pos: the type * to use as a loop cursor
* @head: the head for your list
*
* Continue to iterate over plist, continuing after the current position.
*/
#define plist_for_each_continue(pos, head) \
list_for_each_entry_continue(pos, &(head)->node_list, node_list)
/**
* plist_for_each_safe - iterate safely over a plist of given type
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
* @head: the head for your list
*
* Iterate over a plist of given type, safe against removal of list entry.
*/
#define plist_for_each_safe(pos, n, head) \
list_for_each_entry_safe(pos, n, &(head)->node_list, node_list)
/**
* plist_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter
* @head: the head for your list
* @mem: the name of the list_head within the struct
*/
#define plist_for_each_entry(pos, head, mem) \
list_for_each_entry(pos, &(head)->node_list, mem.node_list)
/**
* plist_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor
* @head: the head for your list
* @m: the name of the list_head within the struct
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
#define plist_for_each_entry_continue(pos, head, m) \
list_for_each_entry_continue(pos, &(head)->node_list, m.node_list)
/**
* plist_for_each_entry_safe - iterate safely over list of given type
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
* @head: the head for your list
* @m: the name of the list_head within the struct
*
* Iterate over list of given type, safe against removal of list entry.
*/
#define plist_for_each_entry_safe(pos, n, head, m) \
list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list)
/**
* plist_head_empty - return !0 if a plist_head is empty
* @head: &struct plist_head pointer
*/
static inline int plist_head_empty(const struct plist_head *head)
{
return list_empty(&head->node_list);
}
/**
* plist_node_empty - return !0 if plist_node is not on a list
* @node: &struct plist_node pointer
*/
static inline int plist_node_empty(const struct plist_node *node)
{
return list_empty(&node->node_list);
}
/* All functions below assume the plist_head is not empty. */
/**
* plist_first_entry - get the struct for the first entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
* @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PLIST
# define plist_first_entry(head, type, member) \
({ \
WARN_ON(plist_head_empty(head)); \
container_of(plist_first(head), type, member); \
})
#else
# define plist_first_entry(head, type, member) \
container_of(plist_first(head), type, member)
#endif
/**
* plist_last_entry - get the struct for the last entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
* @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PLIST
# define plist_last_entry(head, type, member) \
({ \
WARN_ON(plist_head_empty(head)); \
container_of(plist_last(head), type, member); \
})
#else
# define plist_last_entry(head, type, member) \
container_of(plist_last(head), type, member)
#endif
/**
* plist_next - get the next entry in list
* @pos: the type * to cursor
*/
#define plist_next(pos) \
list_next_entry(pos, node_list)
/**
* plist_prev - get the prev entry in list
* @pos: the type * to cursor
*/
#define plist_prev(pos) \
list_prev_entry(pos, node_list)
/**
* plist_first - return the first node (and thus, highest priority)
* @head: the &struct plist_head pointer
*
* Assumes the plist is _not_ empty.
*/
static inline struct plist_node *plist_first(const struct plist_head *head)
{
return list_entry(head->node_list.next,
struct plist_node, node_list);
}
/**
* plist_last - return the last node (and thus, lowest priority)
* @head: the &struct plist_head pointer
*
* Assumes the plist is _not_ empty.
*/
static inline struct plist_node *plist_last(const struct plist_head *head)
{
return list_entry(head->node_list.prev,
struct plist_node, node_list);
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NET_NETLINK_H
#define __NET_NETLINK_H
#include <linux/types.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
#include <linux/in6.h>
/* ========================================================================
* Netlink Messages and Attributes Interface (As Seen On TV)
* ------------------------------------------------------------------------
* Messages Interface
* ------------------------------------------------------------------------
*
* Message Format:
* <--- nlmsg_total_size(payload) --->
* <-- nlmsg_msg_size(payload) ->
* +----------+- - -+-------------+- - -+-------- - -
* | nlmsghdr | Pad | Payload | Pad | nlmsghdr
* +----------+- - -+-------------+- - -+-------- - -
* nlmsg_data(nlh)---^ ^
* nlmsg_next(nlh)-----------------------+
*
* Payload Format:
* <---------------------- nlmsg_len(nlh) --------------------->
* <------ hdrlen ------> <- nlmsg_attrlen(nlh, hdrlen) ->
* +----------------------+- - -+--------------------------------+
* | Family Header | Pad | Attributes |
* +----------------------+- - -+--------------------------------+
* nlmsg_attrdata(nlh, hdrlen)---^
*
* Data Structures:
* struct nlmsghdr netlink message header
*
* Message Construction:
* nlmsg_new() create a new netlink message
* nlmsg_put() add a netlink message to an skb
* nlmsg_put_answer() callback based nlmsg_put()
* nlmsg_end() finalize netlink message
* nlmsg_get_pos() return current position in message
* nlmsg_trim() trim part of message
* nlmsg_cancel() cancel message construction
* nlmsg_consume() free a netlink message (expected)
* nlmsg_free() free a netlink message (drop)
*
* Message Sending:
* nlmsg_multicast() multicast message to several groups
* nlmsg_unicast() unicast a message to a single socket
* nlmsg_notify() send notification message
*
* Message Length Calculations:
* nlmsg_msg_size(payload) length of message w/o padding
* nlmsg_total_size(payload) length of message w/ padding
* nlmsg_padlen(payload) length of padding at tail
*
* Message Payload Access:
* nlmsg_data(nlh) head of message payload
* nlmsg_len(nlh) length of message payload
* nlmsg_attrdata(nlh, hdrlen) head of attributes data
* nlmsg_attrlen(nlh, hdrlen) length of attributes data
*
* Message Parsing:
* nlmsg_ok(nlh, remaining) does nlh fit into remaining bytes?
* nlmsg_next(nlh, remaining) get next netlink message
* nlmsg_parse() parse attributes of a message
* nlmsg_find_attr() find an attribute in a message
* nlmsg_for_each_msg() loop over all messages
* nlmsg_validate() validate netlink message incl. attrs
* nlmsg_for_each_attr() loop over all attributes
* nlmsg_for_each_attr_type() loop over all attributes with the
* given type
*
* Misc:
* nlmsg_report() report back to application?
*
* ------------------------------------------------------------------------
* Attributes Interface
* ------------------------------------------------------------------------
*
* Attribute Format:
* <------- nla_total_size(payload) ------->
* <---- nla_attr_size(payload) ----->
* +----------+- - -+- - - - - - - - - +- - -+-------- - -
* | Header | Pad | Payload | Pad | Header
* +----------+- - -+- - - - - - - - - +- - -+-------- - -
* <- nla_len(nla) -> ^
* nla_data(nla)----^ |
* nla_next(nla)-----------------------------'
*
* Data Structures:
* struct nlattr netlink attribute header
*
* Attribute Construction:
* nla_reserve(skb, type, len) reserve room for an attribute
* nla_reserve_nohdr(skb, len) reserve room for an attribute w/o hdr
* nla_put(skb, type, len, data) add attribute to skb
* nla_put_nohdr(skb, len, data) add attribute w/o hdr
* nla_append(skb, len, data) append data to skb
*
* Attribute Construction for Basic Types:
* nla_put_u8(skb, type, value) add u8 attribute to skb
* nla_put_u16(skb, type, value) add u16 attribute to skb
* nla_put_u32(skb, type, value) add u32 attribute to skb
* nla_put_u64_64bit(skb, type,
* value, padattr) add u64 attribute to skb
* nla_put_s8(skb, type, value) add s8 attribute to skb
* nla_put_s16(skb, type, value) add s16 attribute to skb
* nla_put_s32(skb, type, value) add s32 attribute to skb
* nla_put_s64(skb, type, value,
* padattr) add s64 attribute to skb
* nla_put_string(skb, type, str) add string attribute to skb
* nla_put_flag(skb, type) add flag attribute to skb
* nla_put_msecs(skb, type, jiffies,
* padattr) add msecs attribute to skb
* nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
* nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
*
* Nested Attributes Construction:
* nla_nest_start(skb, type) start a nested attribute
* nla_nest_end(skb, nla) finalize a nested attribute
* nla_nest_cancel(skb, nla) cancel nested attribute construction
* nla_put_empty_nest(skb, type) create an empty nest
*
* Attribute Length Calculations:
* nla_attr_size(payload) length of attribute w/o padding
* nla_total_size(payload) length of attribute w/ padding
* nla_padlen(payload) length of padding
*
* Attribute Payload Access:
* nla_data(nla) head of attribute payload
* nla_len(nla) length of attribute payload
*
* Attribute Payload Access for Basic Types:
* nla_get_uint(nla) get payload for a uint attribute
* nla_get_sint(nla) get payload for a sint attribute
* nla_get_u8(nla) get payload for a u8 attribute
* nla_get_u16(nla) get payload for a u16 attribute
* nla_get_u32(nla) get payload for a u32 attribute
* nla_get_u64(nla) get payload for a u64 attribute
* nla_get_s8(nla) get payload for a s8 attribute
* nla_get_s16(nla) get payload for a s16 attribute
* nla_get_s32(nla) get payload for a s32 attribute
* nla_get_s64(nla) get payload for a s64 attribute
* nla_get_flag(nla) return 1 if flag is true
* nla_get_msecs(nla) get payload for a msecs attribute
*
* The same functions also exist with _default().
*
* Attribute Misc:
* nla_memcpy(dest, nla, count) copy attribute into memory
* nla_memcmp(nla, data, size) compare attribute with memory area
* nla_strscpy(dst, nla, size) copy attribute to a sized string
* nla_strcmp(nla, str) compare attribute with string
*
* Attribute Parsing:
* nla_ok(nla, remaining) does nla fit into remaining bytes?
* nla_next(nla, remaining) get next netlink attribute
* nla_validate() validate a stream of attributes
* nla_validate_nested() validate a stream of nested attributes
* nla_find() find attribute in stream of attributes
* nla_find_nested() find attribute in nested attributes
* nla_parse() parse and validate stream of attrs
* nla_parse_nested() parse nested attributes
* nla_for_each_attr() loop over all attributes
* nla_for_each_attr_type() loop over all attributes with the
* given type
* nla_for_each_nested() loop over the nested attributes
* nla_for_each_nested_type() loop over the nested attributes with
* the given type
*=========================================================================
*/
/**
* Standard attribute types to specify validation policy
*/
enum {
NLA_UNSPEC,
NLA_U8,
NLA_U16,
NLA_U32,
NLA_U64,
NLA_STRING,
NLA_FLAG,
NLA_MSECS,
NLA_NESTED,
NLA_NESTED_ARRAY,
NLA_NUL_STRING,
NLA_BINARY,
NLA_S8,
NLA_S16,
NLA_S32,
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
NLA_BE16,
NLA_BE32,
NLA_SINT,
NLA_UINT,
__NLA_TYPE_MAX,
};
#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
struct netlink_range_validation {
u64 min, max;
};
struct netlink_range_validation_signed {
s64 min, max;
};
enum nla_policy_validation {
NLA_VALIDATE_NONE,
NLA_VALIDATE_RANGE,
NLA_VALIDATE_RANGE_WARN_TOO_LONG,
NLA_VALIDATE_MIN,
NLA_VALIDATE_MAX,
NLA_VALIDATE_MASK,
NLA_VALIDATE_RANGE_PTR,
NLA_VALIDATE_FUNCTION,
};
/**
* struct nla_policy - attribute validation policy
* @type: Type of attribute or NLA_UNSPEC
* @validation_type: type of attribute validation done in addition to
* type-specific validation (e.g. range, function call), see
* &enum nla_policy_validation
* @len: Type specific length of payload
*
* Policies are defined as arrays of this struct, the array must be
* accessible by attribute type up to the highest identifier to be expected.
*
* Meaning of `len' field:
* NLA_STRING Maximum length of string
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
* (but see also below with the validation type)
* NLA_NESTED,
* NLA_NESTED_ARRAY Length verification is done by checking len of
* nested header (or empty); len field is used if
* nested_policy is also used, for the max attr
* number in the nested policy.
* NLA_SINT, NLA_UINT,
* NLA_U8, NLA_U16,
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
* NLA_S32, NLA_S64,
* NLA_BE16, NLA_BE32,
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
* NLA_BITFIELD32 Unused
* NLA_REJECT Unused
* All other Minimum length of attribute payload
*
* Meaning of validation union:
* NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and
* `bitfield32_valid' is the u32 value of valid flags
* NLA_REJECT This attribute is always rejected and `reject_message'
* may point to a string to report as the error instead
* of the generic one in extended ACK.
* NLA_NESTED `nested_policy' to a nested policy to validate, must
* also set `len' to the max attribute number. Use the
* provided NLA_POLICY_NESTED() macro.
* Note that nla_parse() will validate, but of course not
* parse, the nested sub-policies.
* NLA_NESTED_ARRAY `nested_policy' points to a nested policy to validate,
* must also set `len' to the max attribute number. Use
* the provided NLA_POLICY_NESTED_ARRAY() macro.
* The difference to NLA_NESTED is the structure:
* NLA_NESTED has the nested attributes directly inside
* while an array has the nested attributes at another
* level down and the attribute types directly in the
* nesting don't matter.
* NLA_UINT,
* NLA_U8,
* NLA_U16,
* NLA_U32,
* NLA_U64,
* NLA_BE16,
* NLA_BE32,
* NLA_SINT,
* NLA_S8,
* NLA_S16,
* NLA_S32,
* NLA_S64 The `min' and `max' fields are used depending on the
* validation_type field, if that is min/max/range then
* the min, max or both are used (respectively) to check
* the value of the integer attribute.
* Note that in the interest of code simplicity and
* struct size both limits are s16, so you cannot
* enforce a range that doesn't fall within the range
* of s16 - do that using the NLA_POLICY_FULL_RANGE()
* or NLA_POLICY_FULL_RANGE_SIGNED() macros instead.
* Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and
* NLA_POLICY_RANGE() macros.
* NLA_UINT,
* NLA_U8,
* NLA_U16,
* NLA_U32,
* NLA_U64 If the validation_type field instead is set to
* NLA_VALIDATE_RANGE_PTR, `range' must be a pointer
* to a struct netlink_range_validation that indicates
* the min/max values.
* Use NLA_POLICY_FULL_RANGE().
* NLA_SINT,
* NLA_S8,
* NLA_S16,
* NLA_S32,
* NLA_S64 If the validation_type field instead is set to
* NLA_VALIDATE_RANGE_PTR, `range_signed' must be a
* pointer to a struct netlink_range_validation_signed
* that indicates the min/max values.
* Use NLA_POLICY_FULL_RANGE_SIGNED().
*
* NLA_BINARY If the validation type is like the ones for integers
* above, then the min/max length (not value like for
* integers) of the attribute is enforced.
*
* All other Unused - but note that it's a union
*
* Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
* NLA_U8, NLA_U16,
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
* NLA_S32, NLA_S64,
* NLA_MSECS,
* NLA_BINARY Validation function called for the attribute.
*
* All other Unused - but note that it's a union
*
* Example:
*
* static const u32 myvalidflags = 0xff231023;
*
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
* [ATTR_BAZ] = NLA_POLICY_EXACT_LEN(sizeof(struct mystruct)),
* [ATTR_GOO] = NLA_POLICY_BITFIELD32(myvalidflags),
* };
*/
struct nla_policy {
u8 type;
u8 validation_type;
u16 len;
union {
/**
* @strict_start_type: first attribute to validate strictly
*
* This entry is special, and used for the attribute at index 0
* only, and specifies special data about the policy, namely it
* specifies the "boundary type" where strict length validation
* starts for any attribute types >= this value, also, strict
* nesting validation starts here.
*
* Additionally, it means that NLA_UNSPEC is actually NLA_REJECT
* for any types >= this, so need to use NLA_POLICY_MIN_LEN() to
* get the previous pure { .len = xyz } behaviour. The advantage
* of this is that types not specified in the policy will be
* rejected.
*
* For completely new families it should be set to 1 so that the
* validation is enforced for all attributes. For existing ones
* it should be set at least when new attributes are added to
* the enum used by the policy, and be set to the new value that
* was added to enforce strict validation from thereon.
*/
u16 strict_start_type;
/* private: use NLA_POLICY_*() to set */
const u32 bitfield32_valid;
const u32 mask;
const char *reject_message;
const struct nla_policy *nested_policy;
const struct netlink_range_validation *range;
const struct netlink_range_validation_signed *range_signed;
struct {
s16 min, max;
};
int (*validate)(const struct nlattr *attr,
struct netlink_ext_ack *extack);
};
};
#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
#define _NLA_POLICY_NESTED(maxattr, policy) \
{ .type = NLA_NESTED, .nested_policy = policy, .len = maxattr }
#define _NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
{ .type = NLA_NESTED_ARRAY, .nested_policy = policy, .len = maxattr }
#define NLA_POLICY_NESTED(policy) \
_NLA_POLICY_NESTED(ARRAY_SIZE(policy) - 1, policy)
#define NLA_POLICY_NESTED_ARRAY(policy) \
_NLA_POLICY_NESTED_ARRAY(ARRAY_SIZE(policy) - 1, policy)
#define NLA_POLICY_BITFIELD32(valid) \
{ .type = NLA_BITFIELD32, .bitfield32_valid = valid }
#define __NLA_IS_UINT_TYPE(tp) \
(tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || \
tp == NLA_U64 || tp == NLA_UINT || \
tp == NLA_BE16 || tp == NLA_BE32)
#define __NLA_IS_SINT_TYPE(tp) \
(tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64 || \
tp == NLA_SINT)
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_UINT_TYPE(tp) \
(__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp)) + tp)
#define NLA_ENSURE_UINT_OR_BINARY_TYPE(tp) \
(__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
tp == NLA_MSECS || \
tp == NLA_BINARY) + tp)
#define NLA_ENSURE_SINT_TYPE(tp) \
(__NLA_ENSURE(__NLA_IS_SINT_TYPE(tp)) + tp)
#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
(__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
__NLA_IS_SINT_TYPE(tp) || \
tp == NLA_MSECS || \
tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
(__NLA_ENSURE(tp != NLA_BITFIELD32 && \
tp != NLA_REJECT && \
tp != NLA_NESTED && \
tp != NLA_NESTED_ARRAY) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE, \
.min = _min, \
.max = _max \
}
#define NLA_POLICY_FULL_RANGE(tp, _range) { \
.type = NLA_ENSURE_UINT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE_PTR, \
.range = _range, \
}
#define NLA_POLICY_FULL_RANGE_SIGNED(tp, _range) { \
.type = NLA_ENSURE_SINT_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE_PTR, \
.range_signed = _range, \
}
#define NLA_POLICY_MIN(tp, _min) { \
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MIN, \
.min = _min, \
}
#define NLA_POLICY_MAX(tp, _max) { \
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
}
#define NLA_POLICY_MASK(tp, _mask) { \
.type = NLA_ENSURE_UINT_TYPE(tp), \
.validation_type = NLA_VALIDATE_MASK, \
.mask = _mask, \
}
#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
.type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
.validation_type = NLA_VALIDATE_FUNCTION, \
.validate = fn, \
.len = __VA_ARGS__ + 0, \
}
#define NLA_POLICY_EXACT_LEN(_len) NLA_POLICY_RANGE(NLA_BINARY, _len, _len)
#define NLA_POLICY_EXACT_LEN_WARN(_len) { \
.type = NLA_BINARY, \
.validation_type = NLA_VALIDATE_RANGE_WARN_TOO_LONG, \
.min = _len, \
.max = _len \
}
#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
#define NLA_POLICY_MAX_LEN(_len) NLA_POLICY_MAX(NLA_BINARY, _len)
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
* @nl_net: Network namespace
* @portid: Netlink PORTID of requesting application
* @skip_notify: Skip netlink notifications to user space
* @skip_notify_kernel: Skip selected in-kernel notifications
*/
struct nl_info {
struct nlmsghdr *nlh;
struct net *nl_net;
u32 portid;
u8 skip_notify:1,
skip_notify_kernel:1;
};
/**
* enum netlink_validation - netlink message/attribute validation levels
* @NL_VALIDATE_LIBERAL: Old-style "be liberal" validation, not caring about
* extra data at the end of the message, attributes being longer than
* they should be, or unknown attributes being present.
* @NL_VALIDATE_TRAILING: Reject junk data encountered after attribute parsing.
* @NL_VALIDATE_MAXTYPE: Reject attributes > max type; Together with _TRAILING
* this is equivalent to the old nla_parse_strict()/nlmsg_parse_strict().
* @NL_VALIDATE_UNSPEC: Reject attributes with NLA_UNSPEC in the policy.
* This can safely be set by the kernel when the given policy has no
* NLA_UNSPEC anymore, and can thus be used to ensure policy entries
* are enforced going forward.
* @NL_VALIDATE_STRICT_ATTRS: strict attribute policy parsing (e.g.
* U8, U16, U32 must have exact size, etc.)
* @NL_VALIDATE_NESTED: Check that NLA_F_NESTED is set for NLA_NESTED(_ARRAY)
* and unset for other policies.
*/
enum netlink_validation {
NL_VALIDATE_LIBERAL = 0,
NL_VALIDATE_TRAILING = BIT(0),
NL_VALIDATE_MAXTYPE = BIT(1),
NL_VALIDATE_UNSPEC = BIT(2),
NL_VALIDATE_STRICT_ATTRS = BIT(3),
NL_VALIDATE_NESTED = BIT(4),
};
#define NL_VALIDATE_DEPRECATED_STRICT (NL_VALIDATE_TRAILING |\
NL_VALIDATE_MAXTYPE)
#define NL_VALIDATE_STRICT (NL_VALIDATE_TRAILING |\
NL_VALIDATE_MAXTYPE |\
NL_VALIDATE_UNSPEC |\
NL_VALIDATE_STRICT_ATTRS |\
NL_VALIDATE_NESTED)
int netlink_rcv_skb(struct sk_buff *skb,
int (*cb)(struct sk_buff *, struct nlmsghdr *,
struct netlink_ext_ack *));
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
unsigned int group, int report, gfp_t flags);
int __nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack);
int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
int len, const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize);
char *nla_strdup(const struct nlattr *nla, gfp_t flags);
int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
int nla_strcmp(const struct nlattr *nla, const char *str);
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr);
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr);
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data);
void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr);
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr);
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_append(struct sk_buff *skb, int attrlen, const void *data);
/**************************************************************************
* Netlink Messages
**************************************************************************/
/**
* nlmsg_msg_size - length of netlink message not including padding
* @payload: length of message payload
*/
static inline int nlmsg_msg_size(int payload)
{
return NLMSG_HDRLEN + payload;
}
/**
* nlmsg_total_size - length of netlink message including padding
* @payload: length of message payload
*/
static inline int nlmsg_total_size(int payload)
{
return NLMSG_ALIGN(nlmsg_msg_size(payload));
}
/**
* nlmsg_padlen - length of padding at the message's tail
* @payload: length of message payload
*/
static inline int nlmsg_padlen(int payload)
{
return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
}
/**
* nlmsg_data - head of message payload
* @nlh: netlink message header
*/
static inline void *nlmsg_data(const struct nlmsghdr *nlh)
{
return (unsigned char *) nlh + NLMSG_HDRLEN;
}
/**
* nlmsg_len - length of message payload
* @nlh: netlink message header
*/
static inline int nlmsg_len(const struct nlmsghdr *nlh)
{
return nlh->nlmsg_len - NLMSG_HDRLEN;
}
/**
* nlmsg_payload - message payload if the data fits in the len
* @nlh: netlink message header
* @len: struct length
*
* Returns: The netlink message payload/data if the length is sufficient,
* otherwise NULL.
*/
static inline void *nlmsg_payload(const struct nlmsghdr *nlh, size_t len)
{
if (nlh->nlmsg_len < nlmsg_msg_size(len))
return NULL;
return nlmsg_data(nlh);
}
/**
* nlmsg_attrdata - head of attributes data
* @nlh: netlink message header
* @hdrlen: length of family specific header
*/
static inline struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
int hdrlen)
{
unsigned char *data = nlmsg_data(nlh);
return (struct nlattr *) (data + NLMSG_ALIGN(hdrlen));
}
/**
* nlmsg_attrlen - length of attributes data
* @nlh: netlink message header
* @hdrlen: length of family specific header
*/
static inline int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
{
return nlmsg_len(nlh) - NLMSG_ALIGN(hdrlen);
}
/**
* nlmsg_ok - check if the netlink message fits into the remaining bytes
* @nlh: netlink message header
* @remaining: number of bytes remaining in message stream
*/
static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
{
return (remaining >= (int) sizeof(struct nlmsghdr) &&
nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
nlh->nlmsg_len <= remaining);
}
/**
* nlmsg_next - next netlink message in message stream
* @nlh: netlink message header
* @remaining: number of bytes remaining in message stream
*
* Returns: the next netlink message in the message stream and
* decrements remaining by the size of the current message.
*/
static inline struct nlmsghdr *
nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
{
int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
*remaining -= totlen;
return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
}
/**
* nla_parse - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
* @extack: extended ACK pointer
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessible via the attribute type. Attributes with a type
* exceeding maxtype will be rejected, policy must be specified, attributes
* will be validated in the strictest way possible.
*
* Returns: 0 on success or a negative error code.
*/
static inline int nla_parse(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, head, len, policy,
NL_VALIDATE_STRICT, extack);
}
/**
* nla_parse_deprecated - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
* @extack: extended ACK pointer
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessible via the attribute type. Attributes with a type
* exceeding maxtype will be ignored and attributes from the policy are not
* always strictly validated (only for new attributes).
*
* Returns: 0 on success or a negative error code.
*/
static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, head, len, policy,
NL_VALIDATE_LIBERAL, extack);
}
/**
* nla_parse_deprecated_strict - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
* @extack: extended ACK pointer
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessible via the attribute type. Attributes with a type
* exceeding maxtype will be rejected as well as trailing data, but the
* policy is not completely strictly validated (only for new attributes).
*
* Returns: 0 on success or a negative error code.
*/
static inline int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype,
const struct nlattr *head,
int len,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, head, len, policy,
NL_VALIDATE_DEPRECATED_STRICT, extack);
}
/**
* __nlmsg_parse - parse attributes of a netlink message
* @nlh: netlink message header
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @validate: validation strictness
* @extack: extended ACK report struct
*
* See nla_parse()
*/
static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) { NL_SET_ERR_MSG(extack, "Invalid header length");
return -EINVAL;
}
return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), policy, validate,
extack);
}
/**
* nlmsg_parse - parse attributes of a netlink message
* @nlh: netlink message header
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse()
*/
static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
NL_VALIDATE_STRICT, extack);
}
/**
* nlmsg_parse_deprecated - parse attributes of a netlink message
* @nlh: netlink message header
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated()
*/
static inline int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
NL_VALIDATE_LIBERAL, extack);
}
/**
* nlmsg_parse_deprecated_strict - parse attributes of a netlink message
* @nlh: netlink message header
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated_strict()
*/
static inline int
nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
NL_VALIDATE_DEPRECATED_STRICT, extack);
}
/**
* nlmsg_find_attr - find a specific attribute in a netlink message
* @nlh: netlink message header
* @hdrlen: length of family specific header
* @attrtype: type of attribute to look for
*
* Returns: the first attribute which matches the specified type.
*/
static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
int hdrlen, int attrtype)
{
return nla_find(nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), attrtype);
}
/**
* nla_validate_deprecated - Validate a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in liberal mode.
* See documentation of struct nla_policy for more details.
*
* Returns: 0 on success or a negative error code.
*/
static inline int nla_validate_deprecated(const struct nlattr *head, int len,
int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_LIBERAL,
extack);
}
/**
* nla_validate - Validate a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in strict mode.
* See documentation of struct nla_policy for more details.
*
* Returns: 0 on success or a negative error code.
*/
static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_STRICT,
extack);
}
/**
* nlmsg_validate_deprecated - validate a netlink message including attributes
* @nlh: netlinket message header
* @hdrlen: length of family specific header
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*/
static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
int hdrlen, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
return __nla_validate(nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), maxtype,
policy, NL_VALIDATE_LIBERAL, extack);
}
/**
* nlmsg_report - need to report back to application?
* @nlh: netlink message header
*
* Returns: 1 if a report back to the application is requested.
*/
static inline int nlmsg_report(const struct nlmsghdr *nlh)
{
return nlh ? !!(nlh->nlmsg_flags & NLM_F_ECHO) : 0;
}
/**
* nlmsg_seq - return the seq number of netlink message
* @nlh: netlink message header
*
* Returns: 0 if netlink message is NULL
*/
static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
{
return nlh ? nlh->nlmsg_seq : 0;
}
/**
* nlmsg_for_each_attr - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @nlh: netlink message header
* @hdrlen: length of family specific header
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
nlmsg_attrlen(nlh, hdrlen), rem)
/**
* nlmsg_for_each_attr_type - iterate over a stream of attributes
* @pos: loop counter, set to the current attribute
* @type: required attribute type for @pos
* @nlh: netlink message header
* @hdrlen: length of the family specific header
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nlmsg_for_each_attr_type(pos, type, nlh, hdrlen, rem) \
nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
if (nla_type(pos) == type)
/**
* nlmsg_put - Add a new netlink message to an skb
* @skb: socket buffer to store message in
* @portid: netlink PORTID of requesting application
* @seq: sequence number of message
* @type: message type
* @payload: length of message payload
* @flags: message flags
*
* Returns: NULL if the tailroom of the skb is insufficient to store
* the message header and payload.
*/
static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
int type, int payload, int flags)
{
if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
return NULL;
return __nlmsg_put(skb, portid, seq, type, payload, flags);
}
/**
* nlmsg_append - Add more data to a nlmsg in a skb
* @skb: socket buffer to store message in
* @size: length of message payload
*
* Append data to an existing nlmsg, used when constructing a message
* with multiple fixed-format headers (which is rare).
* Returns: NULL if the tailroom of the skb is insufficient to store
* the extra payload.
*/
static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
{
if (unlikely(skb_tailroom(skb) < NLMSG_ALIGN(size)))
return NULL;
if (NLMSG_ALIGN(size) - size) memset(skb_tail_pointer(skb) + size, 0,
NLMSG_ALIGN(size) - size);
return __skb_put(skb, NLMSG_ALIGN(size));
}
/**
* nlmsg_put_answer - Add a new callback based netlink message to an skb
* @skb: socket buffer to store message in
* @cb: netlink callback
* @type: message type
* @payload: length of message payload
* @flags: message flags
*
* Returns: NULL if the tailroom of the skb is insufficient to store
* the message header and payload.
*/
static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
struct netlink_callback *cb,
int type, int payload,
int flags)
{
return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
type, payload, flags);
}
/**
* nlmsg_new - Allocate a new netlink message
* @payload: size of the message payload
* @flags: the type of memory to allocate.
*
* Use NLMSG_DEFAULT_SIZE if the size of the payload isn't known
* and a good default is needed.
*/
static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
{
return alloc_skb(nlmsg_total_size(payload), flags);
}
/**
* nlmsg_new_large - Allocate a new netlink message with non-contiguous
* physical memory
* @payload: size of the message payload
*
* The allocated skb is unable to have frag page for shinfo->frags*,
* as the NULL setting for skb->head in netlink_skb_destructor() will
* bypass most of the handling in skb_release_data()
*/
static inline struct sk_buff *nlmsg_new_large(size_t payload)
{
return netlink_alloc_large_skb(nlmsg_total_size(payload), 0);
}
/**
* nlmsg_end - Finalize a netlink message
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
*
* Corrects the netlink message header to include the appended
* attributes. Only necessary if attributes have been added to
* the message.
*/
static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
}
/**
* nlmsg_get_pos - return current position in netlink message
* @skb: socket buffer the message is stored in
*
* Returns: a pointer to the current tail of the message.
*/
static inline void *nlmsg_get_pos(struct sk_buff *skb)
{
return skb_tail_pointer(skb);
}
/**
* nlmsg_trim - Trim message to a mark
* @skb: socket buffer the message is stored in
* @mark: mark to trim to
*
* Trims the message to the provided mark.
*/
static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
{
if (mark) { WARN_ON((unsigned char *) mark < skb->data); skb_trim(skb, (unsigned char *) mark - skb->data);
}
}
/**
* nlmsg_cancel - Cancel construction of a netlink message
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
*
* Removes the complete netlink message including all
* attributes from the socket buffer again.
*/
static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlmsg_trim(skb, nlh);
}
/**
* nlmsg_free - drop a netlink message
* @skb: socket buffer of netlink message
*/
static inline void nlmsg_free(struct sk_buff *skb)
{
kfree_skb(skb);}
/**
* nlmsg_consume - free a netlink message
* @skb: socket buffer of netlink message
*/
static inline void nlmsg_consume(struct sk_buff *skb)
{
consume_skb(skb);
}
/**
* nlmsg_multicast_filtered - multicast a netlink message with filter function
* @sk: netlink socket to spread messages to
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: multicast group id
* @flags: allocation flags
* @filter: filter function
* @filter_data: filter function private data
*
* Return: 0 on success, negative error code for failure.
*/
static inline int nlmsg_multicast_filtered(struct sock *sk, struct sk_buff *skb,
u32 portid, unsigned int group,
gfp_t flags,
netlink_filter_fn filter,
void *filter_data)
{
int err;
NETLINK_CB(skb).dst_group = group;
err = netlink_broadcast_filtered(sk, skb, portid, group, flags,
filter, filter_data);
if (err > 0)
err = 0;
return err;
}
/**
* nlmsg_multicast - multicast a netlink message
* @sk: netlink socket to spread messages to
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: multicast group id
* @flags: allocation flags
*/
static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags)
{
return nlmsg_multicast_filtered(sk, skb, portid, group, flags,
NULL, NULL);
}
/**
* nlmsg_unicast - unicast a netlink message
* @sk: netlink socket to spread message to
* @skb: netlink message as socket buffer
* @portid: netlink portid of the destination socket
*/
static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
{
int err;
err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
if (err > 0)
err = 0;
return err;
}
/**
* nlmsg_for_each_msg - iterate over a stream of messages
* @pos: loop counter, set to current message
* @head: head of message stream
* @len: length of message stream
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nlmsg_for_each_msg(pos, head, len, rem) \
for (pos = head, rem = len; \
nlmsg_ok(pos, rem); \
pos = nlmsg_next(pos, &(rem)))
/**
* nl_dump_check_consistent - check if sequence is consistent and advertise if not
* @cb: netlink callback structure that stores the sequence number
* @nlh: netlink message header to write the flag to
*
* This function checks if the sequence (generation) number changed during dump
* and if it did, advertises it in the netlink message header.
*
* The correct way to use it is to set cb->seq to the generation counter when
* all locks for dumping have been acquired, and then call this function for
* each message that is generated.
*
* Note that due to initialisation concerns, 0 is an invalid sequence number
* and must not be used by code that uses this functionality.
*/
static inline void
nl_dump_check_consistent(struct netlink_callback *cb,
struct nlmsghdr *nlh)
{
if (cb->prev_seq && cb->seq != cb->prev_seq)
nlh->nlmsg_flags |= NLM_F_DUMP_INTR;
cb->prev_seq = cb->seq;
}
/**************************************************************************
* Netlink Attributes
**************************************************************************/
/**
* nla_attr_size - length of attribute not including padding
* @payload: length of payload
*/
static inline int nla_attr_size(int payload)
{
return NLA_HDRLEN + payload;
}
/**
* nla_total_size - total length of attribute including padding
* @payload: length of payload
*/
static inline int nla_total_size(int payload)
{
return NLA_ALIGN(nla_attr_size(payload));
}
/**
* nla_padlen - length of padding at the tail of attribute
* @payload: length of payload
*/
static inline int nla_padlen(int payload)
{
return nla_total_size(payload) - nla_attr_size(payload);
}
/**
* nla_type - attribute type
* @nla: netlink attribute
*/
static inline int nla_type(const struct nlattr *nla)
{
return nla->nla_type & NLA_TYPE_MASK;
}
/**
* nla_data - head of payload
* @nla: netlink attribute
*/
static inline void *nla_data(const struct nlattr *nla)
{
return (char *) nla + NLA_HDRLEN;
}
/**
* nla_len - length of payload
* @nla: netlink attribute
*/
static inline u16 nla_len(const struct nlattr *nla)
{
return nla->nla_len - NLA_HDRLEN;
}
/**
* nla_ok - check if the netlink attribute fits into the remaining bytes
* @nla: netlink attribute
* @remaining: number of bytes remaining in attribute stream
*/
static inline int nla_ok(const struct nlattr *nla, int remaining)
{
return remaining >= (int) sizeof(*nla) && nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
}
/**
* nla_next - next netlink attribute in attribute stream
* @nla: netlink attribute
* @remaining: number of bytes remaining in attribute stream
*
* Returns: the next netlink attribute in the attribute stream and
* decrements remaining by the size of the current attribute.
*/
static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
{
unsigned int totlen = NLA_ALIGN(nla->nla_len);
*remaining -= totlen;
return (struct nlattr *) ((char *) nla + totlen);
}
/**
* nla_find_nested - find attribute in a set of nested attributes
* @nla: attribute containing the nested attributes
* @attrtype: type of attribute to look for
*
* Returns: the first attribute which matches the specified type.
*/
static inline struct nlattr *
nla_find_nested(const struct nlattr *nla, int attrtype)
{
return nla_find(nla_data(nla), nla_len(nla), attrtype);
}
/**
* nla_parse_nested - parse nested attributes
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @nla: attribute containing the nested attributes
* @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse()
*/
static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
const struct nlattr *nla,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
if (!(nla->nla_type & NLA_F_NESTED)) {
NL_SET_ERR_MSG_ATTR(extack, nla, "NLA_F_NESTED is missing");
return -EINVAL;
}
return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
NL_VALIDATE_STRICT, extack);
}
/**
* nla_parse_nested_deprecated - parse nested attributes
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @nla: attribute containing the nested attributes
* @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated()
*/
static inline int nla_parse_nested_deprecated(struct nlattr *tb[], int maxtype,
const struct nlattr *nla,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
NL_VALIDATE_LIBERAL, extack);
}
/**
* nla_put_u8 - Add a u8 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
{
/* temporary variables to work around GCC PR81715 with asan-stack=1 */
u8 tmp = value; return nla_put(skb, attrtype, sizeof(u8), &tmp);
}
/**
* nla_put_u16 - Add a u16 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
{
u16 tmp = value;
return nla_put(skb, attrtype, sizeof(u16), &tmp);
}
/**
* nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
{
__be16 tmp = value;
return nla_put(skb, attrtype, sizeof(__be16), &tmp);
}
/**
* nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
{
__be16 tmp = value;
return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
* nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
{
__le16 tmp = value;
return nla_put(skb, attrtype, sizeof(__le16), &tmp);
}
/**
* nla_put_u32 - Add a u32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
{
u32 tmp = value;
return nla_put(skb, attrtype, sizeof(u32), &tmp);
}
/**
* nla_put_uint - Add a variable-size unsigned int to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_uint(struct sk_buff *skb, int attrtype, u64 value)
{
u64 tmp64 = value;
u32 tmp32 = value;
if (tmp64 == tmp32) return nla_put_u32(skb, attrtype, tmp32); return nla_put(skb, attrtype, sizeof(u64), &tmp64);
}
/**
* nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
{
__be32 tmp = value;
return nla_put(skb, attrtype, sizeof(__be32), &tmp);
}
/**
* nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
{
__be32 tmp = value;
return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
* nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
{
__le32 tmp = value;
return nla_put(skb, attrtype, sizeof(__le32), &tmp);
}
/**
* nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
* @padattr: attribute type for the padding
*/
static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
u64 value, int padattr)
{
u64 tmp = value;
return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
/**
* nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
* @padattr: attribute type for the padding
*/
static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
__be64 tmp = value;
return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
}
/**
* nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
* @padattr: attribute type for the padding
*/
static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
__be64 tmp = value;
return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
padattr);
}
/**
* nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
* @padattr: attribute type for the padding
*/
static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
int padattr)
{
__le64 tmp = value;
return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
}
/**
* nla_put_s8 - Add a s8 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
{
s8 tmp = value;
return nla_put(skb, attrtype, sizeof(s8), &tmp);
}
/**
* nla_put_s16 - Add a s16 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
{
s16 tmp = value;
return nla_put(skb, attrtype, sizeof(s16), &tmp);
}
/**
* nla_put_s32 - Add a s32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
{
s32 tmp = value;
return nla_put(skb, attrtype, sizeof(s32), &tmp);
}
/**
* nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
* @padattr: attribute type for the padding
*/
static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
int padattr)
{
s64 tmp = value;
return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
}
/**
* nla_put_sint - Add a variable-size signed int to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
*/
static inline int nla_put_sint(struct sk_buff *skb, int attrtype, s64 value)
{
s64 tmp64 = value;
s32 tmp32 = value;
if (tmp64 == tmp32)
return nla_put_s32(skb, attrtype, tmp32);
return nla_put(skb, attrtype, sizeof(s64), &tmp64);
}
/**
* nla_put_string - Add a string netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @str: NUL terminated string
*/
static inline int nla_put_string(struct sk_buff *skb, int attrtype,
const char *str)
{
return nla_put(skb, attrtype, strlen(str) + 1, str);
}
/**
* nla_put_flag - Add a flag netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
*/
static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
{
return nla_put(skb, attrtype, 0, NULL);
}
/**
* nla_put_msecs - Add a msecs netlink attribute to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @njiffies: number of jiffies to convert to msecs
* @padattr: attribute type for the padding
*/
static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
unsigned long njiffies, int padattr)
{
u64 tmp = jiffies_to_msecs(njiffies);
return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
/**
* nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
* buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @addr: IPv4 address
*/
static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
__be32 addr)
{
__be32 tmp = addr;
return nla_put_be32(skb, attrtype, tmp);
}
/**
* nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
* buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @addr: IPv6 address
*/
static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
const struct in6_addr *addr)
{
return nla_put(skb, attrtype, sizeof(*addr), addr);
}
/**
* nla_put_bitfield32 - Add a bitfield32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: value carrying bits
* @selector: selector of valid bits
*/
static inline int nla_put_bitfield32(struct sk_buff *skb, int attrtype,
__u32 value, __u32 selector)
{
struct nla_bitfield32 tmp = { value, selector, };
return nla_put(skb, attrtype, sizeof(tmp), &tmp);
}
/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
*/
static inline u32 nla_get_u32(const struct nlattr *nla)
{
return *(u32 *) nla_data(nla);
}
/**
* nla_get_u32_default - return payload of u32 attribute or default
* @nla: u32 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline u32 nla_get_u32_default(const struct nlattr *nla, u32 defvalue)
{
if (!nla)
return defvalue;
return nla_get_u32(nla);
}
/**
* nla_get_be32 - return payload of __be32 attribute
* @nla: __be32 netlink attribute
*/
static inline __be32 nla_get_be32(const struct nlattr *nla)
{
return *(__be32 *) nla_data(nla);
}
/**
* nla_get_be32_default - return payload of be32 attribute or default
* @nla: __be32 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline __be32 nla_get_be32_default(const struct nlattr *nla,
__be32 defvalue)
{
if (!nla)
return defvalue;
return nla_get_be32(nla);
}
/**
* nla_get_le32 - return payload of __le32 attribute
* @nla: __le32 netlink attribute
*/
static inline __le32 nla_get_le32(const struct nlattr *nla)
{
return *(__le32 *) nla_data(nla);
}
/**
* nla_get_le32_default - return payload of le32 attribute or default
* @nla: __le32 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline __le32 nla_get_le32_default(const struct nlattr *nla,
__le32 defvalue)
{
if (!nla)
return defvalue;
return nla_get_le32(nla);
}
/**
* nla_get_u16 - return payload of u16 attribute
* @nla: u16 netlink attribute
*/
static inline u16 nla_get_u16(const struct nlattr *nla)
{
return *(u16 *) nla_data(nla);
}
/**
* nla_get_u16_default - return payload of u16 attribute or default
* @nla: u16 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline u16 nla_get_u16_default(const struct nlattr *nla, u16 defvalue)
{
if (!nla)
return defvalue;
return nla_get_u16(nla);
}
/**
* nla_get_be16 - return payload of __be16 attribute
* @nla: __be16 netlink attribute
*/
static inline __be16 nla_get_be16(const struct nlattr *nla)
{
return *(__be16 *) nla_data(nla);
}
/**
* nla_get_be16_default - return payload of be16 attribute or default
* @nla: __be16 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline __be16 nla_get_be16_default(const struct nlattr *nla,
__be16 defvalue)
{
if (!nla)
return defvalue;
return nla_get_be16(nla);
}
/**
* nla_get_le16 - return payload of __le16 attribute
* @nla: __le16 netlink attribute
*/
static inline __le16 nla_get_le16(const struct nlattr *nla)
{
return *(__le16 *) nla_data(nla);
}
/**
* nla_get_le16_default - return payload of le16 attribute or default
* @nla: __le16 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline __le16 nla_get_le16_default(const struct nlattr *nla,
__le16 defvalue)
{
if (!nla)
return defvalue;
return nla_get_le16(nla);
}
/**
* nla_get_u8 - return payload of u8 attribute
* @nla: u8 netlink attribute
*/
static inline u8 nla_get_u8(const struct nlattr *nla)
{
return *(u8 *) nla_data(nla);
}
/**
* nla_get_u8_default - return payload of u8 attribute or default
* @nla: u8 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline u8 nla_get_u8_default(const struct nlattr *nla, u8 defvalue)
{
if (!nla)
return defvalue;
return nla_get_u8(nla);
}
/**
* nla_get_u64 - return payload of u64 attribute
* @nla: u64 netlink attribute
*/
static inline u64 nla_get_u64(const struct nlattr *nla)
{
u64 tmp;
nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}
/**
* nla_get_u64_default - return payload of u64 attribute or default
* @nla: u64 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline u64 nla_get_u64_default(const struct nlattr *nla, u64 defvalue)
{
if (!nla)
return defvalue;
return nla_get_u64(nla);
}
/**
* nla_get_uint - return payload of uint attribute
* @nla: uint netlink attribute
*/
static inline u64 nla_get_uint(const struct nlattr *nla)
{
if (nla_len(nla) == sizeof(u32)) return nla_get_u32(nla);
return nla_get_u64(nla);
}
/**
* nla_get_uint_default - return payload of uint attribute or default
* @nla: uint netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline u64 nla_get_uint_default(const struct nlattr *nla, u64 defvalue)
{
if (!nla)
return defvalue;
return nla_get_uint(nla);
}
/**
* nla_get_be64 - return payload of __be64 attribute
* @nla: __be64 netlink attribute
*/
static inline __be64 nla_get_be64(const struct nlattr *nla)
{
__be64 tmp;
nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}
/**
* nla_get_be64_default - return payload of be64 attribute or default
* @nla: __be64 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline __be64 nla_get_be64_default(const struct nlattr *nla,
__be64 defvalue)
{
if (!nla)
return defvalue;
return nla_get_be64(nla);
}
/**
* nla_get_le64 - return payload of __le64 attribute
* @nla: __le64 netlink attribute
*/
static inline __le64 nla_get_le64(const struct nlattr *nla)
{
return *(__le64 *) nla_data(nla);
}
/**
* nla_get_le64_default - return payload of le64 attribute or default
* @nla: __le64 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline __le64 nla_get_le64_default(const struct nlattr *nla,
__le64 defvalue)
{
if (!nla)
return defvalue;
return nla_get_le64(nla);
}
/**
* nla_get_s32 - return payload of s32 attribute
* @nla: s32 netlink attribute
*/
static inline s32 nla_get_s32(const struct nlattr *nla)
{
return *(s32 *) nla_data(nla);
}
/**
* nla_get_s32_default - return payload of s32 attribute or default
* @nla: s32 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline s32 nla_get_s32_default(const struct nlattr *nla, s32 defvalue)
{
if (!nla)
return defvalue;
return nla_get_s32(nla);
}
/**
* nla_get_s16 - return payload of s16 attribute
* @nla: s16 netlink attribute
*/
static inline s16 nla_get_s16(const struct nlattr *nla)
{
return *(s16 *) nla_data(nla);
}
/**
* nla_get_s16_default - return payload of s16 attribute or default
* @nla: s16 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline s16 nla_get_s16_default(const struct nlattr *nla, s16 defvalue)
{
if (!nla)
return defvalue;
return nla_get_s16(nla);
}
/**
* nla_get_s8 - return payload of s8 attribute
* @nla: s8 netlink attribute
*/
static inline s8 nla_get_s8(const struct nlattr *nla)
{
return *(s8 *) nla_data(nla);
}
/**
* nla_get_s8_default - return payload of s8 attribute or default
* @nla: s8 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline s8 nla_get_s8_default(const struct nlattr *nla, s8 defvalue)
{
if (!nla)
return defvalue;
return nla_get_s8(nla);
}
/**
* nla_get_s64 - return payload of s64 attribute
* @nla: s64 netlink attribute
*/
static inline s64 nla_get_s64(const struct nlattr *nla)
{
s64 tmp;
nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}
/**
* nla_get_s64_default - return payload of s64 attribute or default
* @nla: s64 netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline s64 nla_get_s64_default(const struct nlattr *nla, s64 defvalue)
{
if (!nla)
return defvalue;
return nla_get_s64(nla);
}
/**
* nla_get_sint - return payload of uint attribute
* @nla: uint netlink attribute
*/
static inline s64 nla_get_sint(const struct nlattr *nla)
{
if (nla_len(nla) == sizeof(s32)) return nla_get_s32(nla);
return nla_get_s64(nla);
}
/**
* nla_get_sint_default - return payload of sint attribute or default
* @nla: sint netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline s64 nla_get_sint_default(const struct nlattr *nla, s64 defvalue)
{
if (!nla)
return defvalue;
return nla_get_sint(nla);
}
/**
* nla_get_flag - return payload of flag attribute
* @nla: flag netlink attribute
*/
static inline int nla_get_flag(const struct nlattr *nla)
{
return !!nla;
}
/**
* nla_get_msecs - return payload of msecs attribute
* @nla: msecs netlink attribute
*
* Returns: the number of milliseconds in jiffies.
*/
static inline unsigned long nla_get_msecs(const struct nlattr *nla)
{
u64 msecs = nla_get_u64(nla);
return msecs_to_jiffies((unsigned long) msecs);
}
/**
* nla_get_msecs_default - return payload of msecs attribute or default
* @nla: msecs netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline unsigned long nla_get_msecs_default(const struct nlattr *nla,
unsigned long defvalue)
{
if (!nla)
return defvalue;
return nla_get_msecs(nla);
}
/**
* nla_get_in_addr - return payload of IPv4 address attribute
* @nla: IPv4 address netlink attribute
*/
static inline __be32 nla_get_in_addr(const struct nlattr *nla)
{
return *(__be32 *) nla_data(nla);
}
/**
* nla_get_in_addr_default - return payload of be32 attribute or default
* @nla: IPv4 address netlink attribute, may be %NULL
* @defvalue: default value to use if @nla is %NULL
*
* Return: the value of the attribute, or the default value if not present
*/
static inline __be32 nla_get_in_addr_default(const struct nlattr *nla,
__be32 defvalue)
{
if (!nla)
return defvalue;
return nla_get_in_addr(nla);
}
/**
* nla_get_in6_addr - return payload of IPv6 address attribute
* @nla: IPv6 address netlink attribute
*/
static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
{
struct in6_addr tmp;
nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}
/**
* nla_get_bitfield32 - return payload of 32 bitfield attribute
* @nla: nla_bitfield32 attribute
*/
static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla)
{
struct nla_bitfield32 tmp;
nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}
/**
* nla_memdup - duplicate attribute memory (kmemdup)
* @src: netlink attribute to duplicate from
* @gfp: GFP mask
*/
static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp)
{
return kmemdup_noprof(nla_data(src), nla_len(src), gfp);
}
#define nla_memdup(...) alloc_hooks(nla_memdup_noprof(__VA_ARGS__))
/**
* nla_nest_start_noflag - Start a new level of nested attributes
* @skb: socket buffer to add attributes to
* @attrtype: attribute type of container
*
* This function exists for backward compatibility to use in APIs which never
* marked their nest attributes with NLA_F_NESTED flag. New APIs should use
* nla_nest_start() which sets the flag.
*
* Returns: the container attribute or NULL on error
*/
static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
int attrtype)
{
struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb); if (nla_put(skb, attrtype, 0, NULL) < 0) return NULL;
return start;
}
/**
* nla_nest_start - Start a new level of nested attributes, with NLA_F_NESTED
* @skb: socket buffer to add attributes to
* @attrtype: attribute type of container
*
* Unlike nla_nest_start_noflag(), mark the nest attribute with NLA_F_NESTED
* flag. This is the preferred function to use in new code.
*
* Returns: the container attribute or NULL on error
*/
static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
{
return nla_nest_start_noflag(skb, attrtype | NLA_F_NESTED);
}
/**
* nla_nest_end - Finalize nesting of attributes
* @skb: socket buffer the attributes are stored in
* @start: container attribute
*
* Corrects the container attribute header to include the all
* appended attributes.
*
* Returns: the total data length of the skb.
*/
static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
{
start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start; return skb->len;
}
/**
* nla_nest_cancel - Cancel nesting of attributes
* @skb: socket buffer the message is stored in
* @start: container attribute
*
* Removes the container attribute and including all nested
* attributes. Returns -EMSGSIZE
*/
static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
{
nlmsg_trim(skb, start);
}
/**
* nla_put_empty_nest - Create an empty nest
* @skb: socket buffer the message is stored in
* @attrtype: attribute type of the container
*
* This function is a helper for creating empty nests.
*
* Returns: 0 when successful or -EMSGSIZE on failure.
*/
static inline int nla_put_empty_nest(struct sk_buff *skb, int attrtype)
{
return nla_nest_start(skb, attrtype) ? 0 : -EMSGSIZE;
}
/**
* __nla_validate_nested - Validate a stream of nested attributes
* @start: container attribute
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the nested attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
* ignored. See documentation of struct nla_policy for more details.
*
* Returns: 0 on success or a negative error code.
*/
static inline int __nla_validate_nested(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack)
{
return __nla_validate(nla_data(start), nla_len(start), maxtype, policy,
validate, extack);
}
static inline int
nla_validate_nested(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate_nested(start, maxtype, policy,
NL_VALIDATE_STRICT, extack);
}
static inline int
nla_validate_nested_deprecated(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate_nested(start, maxtype, policy,
NL_VALIDATE_LIBERAL, extack);
}
/**
* nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
* @skb: socket buffer the message is stored in
*
* Return: true if padding is needed to align the next attribute (nla_data()) to
* a 64-bit aligned area.
*/
static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/* The nlattr header is 4 bytes in size, that's why we test
* if the skb->data _is_ aligned. A NOP attribute, plus
* nlattr header for next attribute, will make nla_data()
* 8-byte aligned.
*/
if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
return true;
#endif
return false;
}
/**
* nla_align_64bit - 64-bit align the nla_data() of next attribute
* @skb: socket buffer the message is stored in
* @padattr: attribute type for the padding
*
* Conditionally emit a padding netlink attribute in order to make
* the next attribute we emit have a 64-bit aligned nla_data() area.
* This will only be done in architectures which do not have
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
*
* Returns: zero on success or a negative error code.
*/
static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
{
if (nla_need_padding_for_64bit(skb) &&
!nla_reserve(skb, padattr, 0))
return -EMSGSIZE;
return 0;
}
/**
* nla_total_size_64bit - total length of attribute including padding
* @payload: length of payload
*/
static inline int nla_total_size_64bit(int payload)
{
return NLA_ALIGN(nla_attr_size(payload))
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ NLA_ALIGN(nla_attr_size(0))
#endif
;
}
/**
* nla_for_each_attr - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @head: head of attribute stream
* @len: length of attribute stream
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nla_for_each_attr(pos, head, len, rem) \
for (pos = head, rem = len; \
nla_ok(pos, rem); \
pos = nla_next(pos, &(rem)))
/**
* nla_for_each_attr_type - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @type: required attribute type for @pos
* @head: head of attribute stream
* @len: length of attribute stream
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nla_for_each_attr_type(pos, type, head, len, rem) \
nla_for_each_attr(pos, head, len, rem) \
if (nla_type(pos) == type)
/**
* nla_for_each_nested - iterate over nested attributes
* @pos: loop counter, set to current attribute
* @nla: attribute containing the nested attributes
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nla_for_each_nested(pos, nla, rem) \
nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
/**
* nla_for_each_nested_type - iterate over nested attributes
* @pos: loop counter, set to current attribute
* @type: required attribute type for @pos
* @nla: attribute containing the nested attributes
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nla_for_each_nested_type(pos, type, nla, rem) \
nla_for_each_nested(pos, nla, rem) \
if (nla_type(pos) == type)
/**
* nla_is_last - Test if attribute is last in stream
* @nla: attribute to test
* @rem: bytes remaining in stream
*/
static inline bool nla_is_last(const struct nlattr *nla, int rem)
{
return nla->nla_len == rem;
}
void nla_get_range_unsigned(const struct nla_policy *pt,
struct netlink_range_validation *range);
void nla_get_range_signed(const struct nla_policy *pt,
struct netlink_range_validation_signed *range);
struct netlink_policy_dump_state;
int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
const struct nla_policy *policy,
unsigned int maxtype);
int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
const struct nla_policy *policy,
unsigned int maxtype);
bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state);
int netlink_policy_dump_write(struct sk_buff *skb,
struct netlink_policy_dump_state *state);
int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt);
int netlink_policy_dump_write_attr(struct sk_buff *skb,
const struct nla_policy *pt,
int nestattr);
void netlink_policy_dump_free(struct netlink_policy_dump_state *state);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
#include <linux/bits.h>
#include <linux/typecheck.h>
#include <uapi/linux/kernel.h>
#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
* Defined here because those may be needed by architecture-specific static
* inlines.
*/
#include <asm-generic/bitops/generic-non-atomic.h>
/*
* Many architecture-specific non-atomic bitops contain inline asm code and due
* to that the compiler can't optimize them to compile-time expressions or
* constants. In contrary, generic_*() helpers are defined in pure C and
* compilers optimize them just well.
* Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
* equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
* the arguments can be resolved at compile time. That expression itself is a
* constant and doesn't bring any functional changes to the rest of cases.
* The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
* passing a bitmap from .bss or .data (-> `!!addr` is always true).
*/
#define bitop(op, nr, addr) \
((__builtin_constant_p(nr) && \
__builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
(uintptr_t)(addr) != (uintptr_t)NULL && \
__builtin_constant_p(*(const unsigned long *)(addr))) ? \
const##op(nr, addr) : op(nr, addr))
/*
* The following macros are non-atomic versions of their non-underscored
* counterparts.
*/
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
/* Check that the bitops prototypes are sane */
#define __check_bitop_pr(name) \
static_assert(__same_type(arch_##name, generic_##name) && \
__same_type(const_##name, generic_##name) && \
__same_type(_##name, generic_##name))
__check_bitop_pr(__set_bit);
__check_bitop_pr(__clear_bit);
__check_bitop_pr(__change_bit);
__check_bitop_pr(__test_and_set_bit);
__check_bitop_pr(__test_and_clear_bit);
__check_bitop_pr(__test_and_change_bit);
__check_bitop_pr(test_bit);
__check_bitop_pr(test_bit_acquire);
#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count)
{
int order;
order = fls(count);
return order; /* We could be slightly more clever with -1 here... */
}
static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
* rol64 - rotate a 64-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u64 rol64(__u64 word, unsigned int shift)
{
return (word << (shift & 63)) | (word >> ((-shift) & 63));
}
/**
* ror64 - rotate a 64-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u64 ror64(__u64 word, unsigned int shift)
{
return (word >> (shift & 63)) | (word << ((-shift) & 63));
}
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << (shift & 31)) | (word >> ((-shift) & 31));
}
/**
* ror32 - rotate a 32-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 ror32(__u32 word, unsigned int shift)
{
return (word >> (shift & 31)) | (word << ((-shift) & 31));
}
/**
* rol16 - rotate a 16-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u16 rol16(__u16 word, unsigned int shift)
{
return (word << (shift & 15)) | (word >> ((-shift) & 15));
}
/**
* ror16 - rotate a 16-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u16 ror16(__u16 word, unsigned int shift)
{
return (word >> (shift & 15)) | (word << ((-shift) & 15));
}
/**
* rol8 - rotate an 8-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u8 rol8(__u8 word, unsigned int shift)
{
return (word << (shift & 7)) | (word >> ((-shift) & 7));
}
/**
* ror8 - rotate an 8-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u8 ror8(__u8 word, unsigned int shift)
{
return (word >> (shift & 7)) | (word << ((-shift) & 7));
}
/**
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
*
* This is safe to use for 16- and 8-bit types as well.
*/
static __always_inline __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
}
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
static __always_inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
static inline unsigned int fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
return fls64(l);
}
static inline int get_count_order(unsigned int count)
{
if (count == 0)
return -1;
return fls(--count);
}
/**
* get_count_order_long - get order after rounding @l up to power of 2
* @l: parameter
*
* it is same as get_count_order() but with long type parameter
*/
static inline int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
return (int)fls_long(--l);
}
/**
* parity8 - get the parity of an u8 value
* @value: the value to be examined
*
* Determine the parity of the u8 argument.
*
* Returns:
* 0 for even parity, 1 for odd parity
*
* Note: This function informs you about the current parity. Example to bail
* out when parity is odd:
*
* if (parity8(val) == 1)
* return -EBADMSG;
*
* If you need to calculate a parity bit, you need to draw the conclusion from
* this result yourself. Example to enforce odd parity, parity bit is bit 7:
*
* if (parity8(val) == 0)
* val ^= BIT(7);
*/
static inline int parity8(u8 val)
{
/*
* One explanation of this algorithm:
* https://funloop.org/codex/problem/parity/README.html
*/
val ^= val >> 4;
return (0x6996 >> (val & 0xf)) & 1;
}
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
* On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
static inline __attribute_const__ unsigned int __ffs64(u64 word)
{
#if BITS_PER_LONG == 32
if (((u32)word) == 0UL)
return __ffs((u32)(word >> 32)) + 32;
#elif BITS_PER_LONG != 64
#error BITS_PER_LONG not 32 or 64
#endif
return __ffs((unsigned long)word);
}
/**
* fns - find N'th set bit in a word
* @word: The word to search
* @n: Bit to find
*/
static inline unsigned int fns(unsigned long word, unsigned int n)
{
while (word && n--)
word &= word - 1;
return word ? __ffs(word) : BITS_PER_LONG;
}
/**
* assign_bit - Assign value to a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
* @value: the value to assign
*/
#define assign_bit(nr, addr, value) \
((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr)))
#define __assign_bit(nr, addr, value) \
((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr)))
/**
* __ptr_set_bit - Set bit in a pointer's value
* @nr: the bit to set
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* __ptr_set_bit(bit, &p);
*/
#define __ptr_set_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
__set_bit(nr, (unsigned long *)(addr)); \
})
/**
* __ptr_clear_bit - Clear bit in a pointer's value
* @nr: the bit to clear
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* __ptr_clear_bit(bit, &p);
*/
#define __ptr_clear_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
__clear_bit(nr, (unsigned long *)(addr)); \
})
/**
* __ptr_test_bit - Test bit in a pointer's value
* @nr: the bit to test
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* if (__ptr_test_bit(bit, &p)) {
* ...
* } else {
* ...
* }
*/
#define __ptr_test_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
test_bit(nr, (unsigned long *)(addr)); \
})
#ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, mask, bits) \
({ \
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
old__ = READ_ONCE(*(ptr)); \
do { \
new__ = (old__ & ~mask__) | bits__; \
} while (!try_cmpxchg(ptr, &old__, new__)); \
\
old__; \
})
#endif
#ifndef bit_clear_unless
#define bit_clear_unless(ptr, clear, test) \
({ \
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
old__ = READ_ONCE(*(ptr)); \
do { \
if (old__ & test__) \
break; \
new__ = old__ & ~clear__; \
} while (!try_cmpxchg(ptr, &old__, new__)); \
\
!(old__ & test__); \
})
#endif
#endif /* __KERNEL__ */
#endif
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
* & Swedish University of Agricultural Sciences.
*
* Jens Laas <jens.laas@data.slu.se> Swedish University of
* Agricultural Sciences.
*
* Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
*
* This work is based on the LPC-trie which is originally described in:
*
* An experimental study of compression methods for dynamic tries
* Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
* https://www.csc.kth.se/~snilsson/software/dyntrie2/
*
* IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
* IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
*
* Code from fib_hash has been reused which includes the following header:
*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 FIB: lookup engine and maintenance routines.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Substantial contributions to this work comes from:
*
* David S. Miller, <davem@davemloft.net>
* Stephen Hemminger <shemminger@osdl.org>
* Paul E. McKenney <paulmck@us.ibm.com>
* Patrick McHardy <kaber@trash.net>
*/
#include <linux/cache.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/rcupdate_wait.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/notifier.h>
#include <net/net_namespace.h>
#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/fib_notifier.h>
#include <trace/events/fib.h>
#include "fib_lookup.h"
static int call_fib_entry_notifier(struct notifier_block *nb,
enum fib_event_type event_type, u32 dst,
int dst_len, struct fib_alias *fa,
struct netlink_ext_ack *extack)
{
struct fib_entry_notifier_info info = {
.info.extack = extack,
.dst = dst,
.dst_len = dst_len,
.fi = fa->fa_info,
.dscp = fa->fa_dscp,
.type = fa->fa_type,
.tb_id = fa->tb_id,
};
return call_fib4_notifier(nb, event_type, &info.info);
}
static int call_fib_entry_notifiers(struct net *net,
enum fib_event_type event_type, u32 dst,
int dst_len, struct fib_alias *fa,
struct netlink_ext_ack *extack)
{
struct fib_entry_notifier_info info = {
.info.extack = extack,
.dst = dst,
.dst_len = dst_len,
.fi = fa->fa_info,
.dscp = fa->fa_dscp,
.type = fa->fa_type,
.tb_id = fa->tb_id,
};
return call_fib4_notifiers(net, event_type, &info.info);
}
#define MAX_STAT_DEPTH 32
#define KEYLENGTH (8*sizeof(t_key))
#define KEY_MAX ((t_key)~0)
typedef unsigned int t_key;
#define IS_TRIE(n) ((n)->pos >= KEYLENGTH)
#define IS_TNODE(n) ((n)->bits)
#define IS_LEAF(n) (!(n)->bits)
struct key_vector {
t_key key;
unsigned char pos; /* 2log(KEYLENGTH) bits needed */
unsigned char bits; /* 2log(KEYLENGTH) bits needed */
unsigned char slen;
union {
/* This list pointer if valid if (pos | bits) == 0 (LEAF) */
struct hlist_head leaf;
/* This array is valid if (pos | bits) > 0 (TNODE) */
DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode);
};
};
struct tnode {
struct rcu_head rcu;
t_key empty_children; /* KEYLENGTH bits needed */
t_key full_children; /* KEYLENGTH bits needed */
struct key_vector __rcu *parent;
struct key_vector kv[1];
#define tn_bits kv[0].bits
};
#define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n])
#define LEAF_SIZE TNODE_SIZE(1)
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats {
unsigned int gets;
unsigned int backtrack;
unsigned int semantic_match_passed;
unsigned int semantic_match_miss;
unsigned int null_node_hit;
unsigned int resize_node_skipped;
};
#endif
struct trie_stat {
unsigned int totdepth;
unsigned int maxdepth;
unsigned int tnodes;
unsigned int leaves;
unsigned int nullpointers;
unsigned int prefixes;
unsigned int nodesizes[MAX_STAT_DEPTH];
};
struct trie {
struct key_vector kv[1];
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats;
#endif
};
static struct key_vector *resize(struct trie *t, struct key_vector *tn);
static unsigned int tnode_free_size;
/*
* synchronize_rcu after call_rcu for outstanding dirty memory; it should be
* especially useful before resizing the root node with PREEMPT_NONE configs;
* the value was obtained experimentally, aiming to avoid visible slowdown.
*/
unsigned int sysctl_fib_sync_mem = 512 * 1024;
unsigned int sysctl_fib_sync_mem_min = 64 * 1024;
unsigned int sysctl_fib_sync_mem_max = 64 * 1024 * 1024;
static struct kmem_cache *fn_alias_kmem __ro_after_init;
static struct kmem_cache *trie_leaf_kmem __ro_after_init;
static inline struct tnode *tn_info(struct key_vector *kv)
{
return container_of(kv, struct tnode, kv[0]);
}
/* caller must hold RTNL */
#define node_parent(tn) rtnl_dereference(tn_info(tn)->parent)
#define get_child(tn, i) rtnl_dereference((tn)->tnode[i])
/* caller must hold RCU read lock or RTNL */
#define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent)
#define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i])
/* wrapper for rcu_assign_pointer */
static inline void node_set_parent(struct key_vector *n, struct key_vector *tp)
{
if (n)
rcu_assign_pointer(tn_info(n)->parent, tp);
}
#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p)
/* This provides us with the number of children in this node, in the case of a
* leaf this will return 0 meaning none of the children are accessible.
*/
static inline unsigned long child_length(const struct key_vector *tn)
{
return (1ul << tn->bits) & ~(1ul);
}
#define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos)
static inline unsigned long get_index(t_key key, struct key_vector *kv)
{
unsigned long index = key ^ kv->key;
if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos))
return 0;
return index >> kv->pos;
}
/* To understand this stuff, an understanding of keys and all their bits is
* necessary. Every node in the trie has a key associated with it, but not
* all of the bits in that key are significant.
*
* Consider a node 'n' and its parent 'tp'.
*
* If n is a leaf, every bit in its key is significant. Its presence is
* necessitated by path compression, since during a tree traversal (when
* searching for a leaf - unless we are doing an insertion) we will completely
* ignore all skipped bits we encounter. Thus we need to verify, at the end of
* a potentially successful search, that we have indeed been walking the
* correct key path.
*
* Note that we can never "miss" the correct key in the tree if present by
* following the wrong path. Path compression ensures that segments of the key
* that are the same for all keys with a given prefix are skipped, but the
* skipped part *is* identical for each node in the subtrie below the skipped
* bit! trie_insert() in this implementation takes care of that.
*
* if n is an internal node - a 'tnode' here, the various parts of its key
* have many different meanings.
*
* Example:
* _________________________________________________________________
* | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
* -----------------------------------------------------------------
* 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
*
* _________________________________________________________________
* | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
* -----------------------------------------------------------------
* 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
*
* tp->pos = 22
* tp->bits = 3
* n->pos = 13
* n->bits = 4
*
* First, let's just ignore the bits that come before the parent tp, that is
* the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
* point we do not use them for anything.
*
* The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
* index into the parent's child array. That is, they will be used to find
* 'n' among tp's children.
*
* The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits
* for the node n.
*
* All the bits we have seen so far are significant to the node n. The rest
* of the bits are really not needed or indeed known in n->key.
*
* The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
* n's child array, and will of course be different for each child.
*
* The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown
* at this point.
*/
static const int halve_threshold = 25;
static const int inflate_threshold = 50;
static const int halve_threshold_root = 15;
static const int inflate_threshold_root = 30;
static inline void alias_free_mem_rcu(struct fib_alias *fa)
{
kfree_rcu(fa, rcu);
}
#define TNODE_VMALLOC_MAX \
ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
static void __node_free_rcu(struct rcu_head *head)
{
struct tnode *n = container_of(head, struct tnode, rcu);
if (!n->tn_bits)
kmem_cache_free(trie_leaf_kmem, n);
else
kvfree(n);
}
#define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
static struct tnode *tnode_alloc(int bits)
{
size_t size;
/* verify bits is within bounds */
if (bits > TNODE_VMALLOC_MAX)
return NULL;
/* determine size and verify it is non-zero and didn't overflow */
size = TNODE_SIZE(1ul << bits);
if (size <= PAGE_SIZE)
return kzalloc(size, GFP_KERNEL);
else
return vzalloc(size);
}
static inline void empty_child_inc(struct key_vector *n)
{
tn_info(n)->empty_children++;
if (!tn_info(n)->empty_children)
tn_info(n)->full_children++;
}
static inline void empty_child_dec(struct key_vector *n)
{
if (!tn_info(n)->empty_children)
tn_info(n)->full_children--;
tn_info(n)->empty_children--;
}
static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
{
struct key_vector *l;
struct tnode *kv;
kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
if (!kv)
return NULL;
/* initialize key vector */
l = kv->kv;
l->key = key;
l->pos = 0;
l->bits = 0;
l->slen = fa->fa_slen;
/* link leaf to fib alias */
INIT_HLIST_HEAD(&l->leaf);
hlist_add_head(&fa->fa_list, &l->leaf);
return l;
}
static struct key_vector *tnode_new(t_key key, int pos, int bits)
{
unsigned int shift = pos + bits;
struct key_vector *tn;
struct tnode *tnode;
/* verify bits and pos their msb bits clear and values are valid */
BUG_ON(!bits || (shift > KEYLENGTH));
tnode = tnode_alloc(bits);
if (!tnode)
return NULL;
pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
sizeof(struct key_vector *) << bits);
if (bits == KEYLENGTH)
tnode->full_children = 1;
else
tnode->empty_children = 1ul << bits;
tn = tnode->kv;
tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
tn->pos = pos;
tn->bits = bits;
tn->slen = pos;
return tn;
}
/* Check whether a tnode 'n' is "full", i.e. it is an internal node
* and no bits are skipped. See discussion in dyntree paper p. 6
*/
static inline int tnode_full(struct key_vector *tn, struct key_vector *n)
{
return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
}
/* Add a child at position i overwriting the old value.
* Update the value of full_children and empty_children.
*/
static void put_child(struct key_vector *tn, unsigned long i,
struct key_vector *n)
{
struct key_vector *chi = get_child(tn, i);
int isfull, wasfull;
BUG_ON(i >= child_length(tn));
/* update emptyChildren, overflow into fullChildren */
if (!n && chi)
empty_child_inc(tn);
if (n && !chi)
empty_child_dec(tn);
/* update fullChildren */
wasfull = tnode_full(tn, chi);
isfull = tnode_full(tn, n);
if (wasfull && !isfull)
tn_info(tn)->full_children--;
else if (!wasfull && isfull)
tn_info(tn)->full_children++;
if (n && (tn->slen < n->slen))
tn->slen = n->slen;
rcu_assign_pointer(tn->tnode[i], n);
}
static void update_children(struct key_vector *tn)
{
unsigned long i;
/* update all of the child parent pointers */
for (i = child_length(tn); i;) {
struct key_vector *inode = get_child(tn, --i);
if (!inode)
continue;
/* Either update the children of a tnode that
* already belongs to us or update the child
* to point to ourselves.
*/
if (node_parent(inode) == tn)
update_children(inode);
else
node_set_parent(inode, tn);
}
}
static inline void put_child_root(struct key_vector *tp, t_key key,
struct key_vector *n)
{
if (IS_TRIE(tp))
rcu_assign_pointer(tp->tnode[0], n);
else
put_child(tp, get_index(key, tp), n);
}
static inline void tnode_free_init(struct key_vector *tn)
{
tn_info(tn)->rcu.next = NULL;
}
static inline void tnode_free_append(struct key_vector *tn,
struct key_vector *n)
{
tn_info(n)->rcu.next = tn_info(tn)->rcu.next;
tn_info(tn)->rcu.next = &tn_info(n)->rcu;
}
static void tnode_free(struct key_vector *tn)
{
struct callback_head *head = &tn_info(tn)->rcu;
while (head) {
head = head->next;
tnode_free_size += TNODE_SIZE(1ul << tn->bits);
node_free(tn);
tn = container_of(head, struct tnode, rcu)->kv;
}
if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
tnode_free_size = 0;
synchronize_net();
}
}
static struct key_vector *replace(struct trie *t,
struct key_vector *oldtnode,
struct key_vector *tn)
{
struct key_vector *tp = node_parent(oldtnode);
unsigned long i;
/* setup the parent pointer out of and back into this node */
NODE_INIT_PARENT(tn, tp);
put_child_root(tp, tn->key, tn);
/* update all of the child parent pointers */
update_children(tn);
/* all pointers should be clean so we are done */
tnode_free(oldtnode);
/* resize children now that oldtnode is freed */
for (i = child_length(tn); i;) {
struct key_vector *inode = get_child(tn, --i);
/* resize child node */
if (tnode_full(tn, inode))
tn = resize(t, inode);
}
return tp;
}
static struct key_vector *inflate(struct trie *t,
struct key_vector *oldtnode)
{
struct key_vector *tn;
unsigned long i;
t_key m;
pr_debug("In inflate\n");
tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
if (!tn)
goto notnode;
/* prepare oldtnode to be freed */
tnode_free_init(oldtnode);
/* Assemble all of the pointers in our cluster, in this case that
* represents all of the pointers out of our allocated nodes that
* point to existing tnodes and the links between our allocated
* nodes.
*/
for (i = child_length(oldtnode), m = 1u << tn->pos; i;) {
struct key_vector *inode = get_child(oldtnode, --i);
struct key_vector *node0, *node1;
unsigned long j, k;
/* An empty child */
if (!inode)
continue;
/* A leaf or an internal node with skipped bits */
if (!tnode_full(oldtnode, inode)) {
put_child(tn, get_index(inode->key, tn), inode);
continue;
}
/* drop the node in the old tnode free list */
tnode_free_append(oldtnode, inode);
/* An internal node with two children */
if (inode->bits == 1) {
put_child(tn, 2 * i + 1, get_child(inode, 1));
put_child(tn, 2 * i, get_child(inode, 0));
continue;
}
/* We will replace this node 'inode' with two new
* ones, 'node0' and 'node1', each with half of the
* original children. The two new nodes will have
* a position one bit further down the key and this
* means that the "significant" part of their keys
* (see the discussion near the top of this file)
* will differ by one bit, which will be "0" in
* node0's key and "1" in node1's key. Since we are
* moving the key position by one step, the bit that
* we are moving away from - the bit at position
* (tn->pos) - is the one that will differ between
* node0 and node1. So... we synthesize that bit in the
* two new keys.
*/
node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
if (!node1)
goto nomem;
node0 = tnode_new(inode->key, inode->pos, inode->bits - 1);
tnode_free_append(tn, node1);
if (!node0)
goto nomem;
tnode_free_append(tn, node0);
/* populate child pointers in new nodes */
for (k = child_length(inode), j = k / 2; j;) {
put_child(node1, --j, get_child(inode, --k));
put_child(node0, j, get_child(inode, j));
put_child(node1, --j, get_child(inode, --k));
put_child(node0, j, get_child(inode, j));
}
/* link new nodes to parent */
NODE_INIT_PARENT(node1, tn);
NODE_INIT_PARENT(node0, tn);
/* link parent to nodes */
put_child(tn, 2 * i + 1, node1);
put_child(tn, 2 * i, node0);
}
/* setup the parent pointers into and out of this node */
return replace(t, oldtnode, tn);
nomem:
/* all pointers should be clean so we are done */
tnode_free(tn);
notnode:
return NULL;
}
static struct key_vector *halve(struct trie *t,
struct key_vector *oldtnode)
{
struct key_vector *tn;
unsigned long i;
pr_debug("In halve\n");
tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
if (!tn)
goto notnode;
/* prepare oldtnode to be freed */
tnode_free_init(oldtnode);
/* Assemble all of the pointers in our cluster, in this case that
* represents all of the pointers out of our allocated nodes that
* point to existing tnodes and the links between our allocated
* nodes.
*/
for (i = child_length(oldtnode); i;) {
struct key_vector *node1 = get_child(oldtnode, --i);
struct key_vector *node0 = get_child(oldtnode, --i);
struct key_vector *inode;
/* At least one of the children is empty */
if (!node1 || !node0) {
put_child(tn, i / 2, node1 ? : node0);
continue;
}
/* Two nonempty children */
inode = tnode_new(node0->key, oldtnode->pos, 1);
if (!inode)
goto nomem;
tnode_free_append(tn, inode);
/* initialize pointers out of node */
put_child(inode, 1, node1);
put_child(inode, 0, node0);
NODE_INIT_PARENT(inode, tn);
/* link parent to node */
put_child(tn, i / 2, inode);
}
/* setup the parent pointers into and out of this node */
return replace(t, oldtnode, tn);
nomem:
/* all pointers should be clean so we are done */
tnode_free(tn);
notnode:
return NULL;
}
static struct key_vector *collapse(struct trie *t,
struct key_vector *oldtnode)
{
struct key_vector *n, *tp;
unsigned long i;
/* scan the tnode looking for that one child that might still exist */
for (n = NULL, i = child_length(oldtnode); !n && i;)
n = get_child(oldtnode, --i);
/* compress one level */
tp = node_parent(oldtnode);
put_child_root(tp, oldtnode->key, n);
node_set_parent(n, tp);
/* drop dead node */
node_free(oldtnode);
return tp;
}
static unsigned char update_suffix(struct key_vector *tn)
{
unsigned char slen = tn->pos;
unsigned long stride, i;
unsigned char slen_max;
/* only vector 0 can have a suffix length greater than or equal to
* tn->pos + tn->bits, the second highest node will have a suffix
* length at most of tn->pos + tn->bits - 1
*/
slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen);
/* search though the list of children looking for nodes that might
* have a suffix greater than the one we currently have. This is
* why we start with a stride of 2 since a stride of 1 would
* represent the nodes with suffix length equal to tn->pos
*/
for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) {
struct key_vector *n = get_child(tn, i);
if (!n || (n->slen <= slen))
continue;
/* update stride and slen based on new value */
stride <<= (n->slen - slen);
slen = n->slen;
i &= ~(stride - 1);
/* stop searching if we have hit the maximum possible value */
if (slen >= slen_max)
break;
}
tn->slen = slen;
return slen;
}
/* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
* the Helsinki University of Technology and Matti Tikkanen of Nokia
* Telecommunications, page 6:
* "A node is doubled if the ratio of non-empty children to all
* children in the *doubled* node is at least 'high'."
*
* 'high' in this instance is the variable 'inflate_threshold'. It
* is expressed as a percentage, so we multiply it with
* child_length() and instead of multiplying by 2 (since the
* child array will be doubled by inflate()) and multiplying
* the left-hand side by 100 (to handle the percentage thing) we
* multiply the left-hand side by 50.
*
* The left-hand side may look a bit weird: child_length(tn)
* - tn->empty_children is of course the number of non-null children
* in the current node. tn->full_children is the number of "full"
* children, that is non-null tnodes with a skip value of 0.
* All of those will be doubled in the resulting inflated tnode, so
* we just count them one extra time here.
*
* A clearer way to write this would be:
*
* to_be_doubled = tn->full_children;
* not_to_be_doubled = child_length(tn) - tn->empty_children -
* tn->full_children;
*
* new_child_length = child_length(tn) * 2;
*
* new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
* new_child_length;
* if (new_fill_factor >= inflate_threshold)
*
* ...and so on, tho it would mess up the while () loop.
*
* anyway,
* 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
* inflate_threshold
*
* avoid a division:
* 100 * (not_to_be_doubled + 2*to_be_doubled) >=
* inflate_threshold * new_child_length
*
* expand not_to_be_doubled and to_be_doubled, and shorten:
* 100 * (child_length(tn) - tn->empty_children +
* tn->full_children) >= inflate_threshold * new_child_length
*
* expand new_child_length:
* 100 * (child_length(tn) - tn->empty_children +
* tn->full_children) >=
* inflate_threshold * child_length(tn) * 2
*
* shorten again:
* 50 * (tn->full_children + child_length(tn) -
* tn->empty_children) >= inflate_threshold *
* child_length(tn)
*
*/
static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn)
{
unsigned long used = child_length(tn);
unsigned long threshold = used;
/* Keep root node larger */
threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold;
used -= tn_info(tn)->empty_children;
used += tn_info(tn)->full_children;
/* if bits == KEYLENGTH then pos = 0, and will fail below */
return (used > 1) && tn->pos && ((50 * used) >= threshold);
}
static inline bool should_halve(struct key_vector *tp, struct key_vector *tn)
{
unsigned long used = child_length(tn);
unsigned long threshold = used;
/* Keep root node larger */
threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold;
used -= tn_info(tn)->empty_children;
/* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
}
static inline bool should_collapse(struct key_vector *tn)
{
unsigned long used = child_length(tn);
used -= tn_info(tn)->empty_children;
/* account for bits == KEYLENGTH case */
if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children)
used -= KEY_MAX;
/* One child or none, time to drop us from the trie */
return used < 2;
}
#define MAX_WORK 10
static struct key_vector *resize(struct trie *t, struct key_vector *tn)
{
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats = t->stats;
#endif
struct key_vector *tp = node_parent(tn);
unsigned long cindex = get_index(tn->key, tp);
int max_work = MAX_WORK;
pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
tn, inflate_threshold, halve_threshold);
/* track the tnode via the pointer from the parent instead of
* doing it ourselves. This way we can let RCU fully do its
* thing without us interfering
*/
BUG_ON(tn != get_child(tp, cindex));
/* Double as long as the resulting node has a number of
* nonempty nodes that are above the threshold.
*/
while (should_inflate(tp, tn) && max_work) {
tp = inflate(t, tn);
if (!tp) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->resize_node_skipped);
#endif
break;
}
max_work--;
tn = get_child(tp, cindex);
}
/* update parent in case inflate failed */
tp = node_parent(tn);
/* Return if at least one inflate is run */
if (max_work != MAX_WORK)
return tp;
/* Halve as long as the number of empty children in this
* node is above threshold.
*/
while (should_halve(tp, tn) && max_work) {
tp = halve(t, tn);
if (!tp) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->resize_node_skipped);
#endif
break;
}
max_work--;
tn = get_child(tp, cindex);
}
/* Only one child remains */
if (should_collapse(tn))
return collapse(t, tn);
/* update parent in case halve failed */
return node_parent(tn);
}
static void node_pull_suffix(struct key_vector *tn, unsigned char slen)
{
unsigned char node_slen = tn->slen;
while ((node_slen > tn->pos) && (node_slen > slen)) {
slen = update_suffix(tn);
if (node_slen == slen)
break;
tn = node_parent(tn);
node_slen = tn->slen;
}
}
static void node_push_suffix(struct key_vector *tn, unsigned char slen)
{
while (tn->slen < slen) {
tn->slen = slen;
tn = node_parent(tn);
}
}
/* rcu_read_lock needs to be hold by caller from readside */
static struct key_vector *fib_find_node(struct trie *t,
struct key_vector **tp, u32 key)
{
struct key_vector *pn, *n = t->kv;
unsigned long index = 0;
do {
pn = n;
n = get_child_rcu(n, index);
if (!n)
break;
index = get_cindex(key, n);
/* This bit of code is a bit tricky but it combines multiple
* checks into a single check. The prefix consists of the
* prefix plus zeros for the bits in the cindex. The index
* is the difference between the key and this value. From
* this we can actually derive several pieces of data.
* if (index >= (1ul << bits))
* we have a mismatch in skip bits and failed
* else
* we know the value is cindex
*
* This check is safe even if bits == KEYLENGTH due to the
* fact that we can only allocate a node with 32 bits if a
* long is greater than 32 bits.
*/
if (index >= (1ul << n->bits)) {
n = NULL;
break;
}
/* keep searching until we find a perfect match leaf or NULL */
} while (IS_TNODE(n));
*tp = pn;
return n;
}
/* Return the first fib alias matching DSCP with
* priority less than or equal to PRIO.
* If 'find_first' is set, return the first matching
* fib alias, regardless of DSCP and priority.
*/
static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
dscp_t dscp, u32 prio, u32 tb_id,
bool find_first)
{
struct fib_alias *fa;
if (!fah)
return NULL;
hlist_for_each_entry(fa, fah, fa_list) {
/* Avoid Sparse warning when using dscp_t in inequalities */
u8 __fa_dscp = inet_dscp_to_dsfield(fa->fa_dscp);
u8 __dscp = inet_dscp_to_dsfield(dscp);
if (fa->fa_slen < slen)
continue;
if (fa->fa_slen != slen)
break;
if (fa->tb_id > tb_id)
continue;
if (fa->tb_id != tb_id)
break;
if (find_first)
return fa;
if (__fa_dscp > __dscp)
continue;
if (fa->fa_info->fib_priority >= prio || __fa_dscp < __dscp)
return fa;
}
return NULL;
}
static struct fib_alias *
fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri)
{
u8 slen = KEYLENGTH - fri->dst_len;
struct key_vector *l, *tp;
struct fib_table *tb;
struct fib_alias *fa;
struct trie *t;
tb = fib_get_table(net, fri->tb_id);
if (!tb)
return NULL;
t = (struct trie *)tb->tb_data;
l = fib_find_node(t, &tp, be32_to_cpu(fri->dst));
if (!l)
return NULL;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
if (fa->fa_slen == slen && fa->tb_id == fri->tb_id &&
fa->fa_dscp == fri->dscp && fa->fa_info == fri->fi &&
fa->fa_type == fri->type)
return fa;
}
return NULL;
}
void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
{
u8 fib_notify_on_flag_change;
struct fib_alias *fa_match;
struct sk_buff *skb;
int err;
rcu_read_lock();
fa_match = fib_find_matching_alias(net, fri);
if (!fa_match)
goto out;
/* These are paired with the WRITE_ONCE() happening in this function.
* The reason is that we are only protected by RCU at this point.
*/
if (READ_ONCE(fa_match->offload) == fri->offload &&
READ_ONCE(fa_match->trap) == fri->trap &&
READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
WRITE_ONCE(fa_match->offload, fri->offload);
WRITE_ONCE(fa_match->trap, fri->trap);
fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change);
/* 2 means send notifications only if offload_failed was changed. */
if (fib_notify_on_flag_change == 2 &&
READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
if (!fib_notify_on_flag_change)
goto out;
skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC);
if (!skb) {
err = -ENOBUFS;
goto errout;
}
err = fib_dump_info(skb, 0, 0, RTM_NEWROUTE, fri, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_IPV4_ROUTE, NULL, GFP_ATOMIC);
goto out;
errout:
rtnl_set_sk_err(net, RTNLGRP_IPV4_ROUTE, err);
out:
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(fib_alias_hw_flags_set);
static void trie_rebalance(struct trie *t, struct key_vector *tn)
{
while (!IS_TRIE(tn))
tn = resize(t, tn);
}
static int fib_insert_node(struct trie *t, struct key_vector *tp,
struct fib_alias *new, t_key key)
{
struct key_vector *n, *l;
l = leaf_new(key, new);
if (!l)
goto noleaf;
/* retrieve child from parent node */
n = get_child(tp, get_index(key, tp));
/* Case 2: n is a LEAF or a TNODE and the key doesn't match.
*
* Add a new tnode here
* first tnode need some special handling
* leaves us in position for handling as case 3
*/
if (n) {
struct key_vector *tn;
tn = tnode_new(key, __fls(key ^ n->key), 1);
if (!tn)
goto notnode;
/* initialize routes out of node */
NODE_INIT_PARENT(tn, tp);
put_child(tn, get_index(key, tn) ^ 1, n);
/* start adding routes into the node */
put_child_root(tp, key, tn);
node_set_parent(n, tn);
/* parent now has a NULL spot where the leaf can go */
tp = tn;
}
/* Case 3: n is NULL, and will just insert a new leaf */
node_push_suffix(tp, new->fa_slen);
NODE_INIT_PARENT(l, tp);
put_child_root(tp, key, l);
trie_rebalance(t, tp);
return 0;
notnode:
node_free(l);
noleaf:
return -ENOMEM;
}
static int fib_insert_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *new,
struct fib_alias *fa, t_key key)
{
if (!l)
return fib_insert_node(t, tp, new, key);
if (fa) {
hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
} else {
struct fib_alias *last;
hlist_for_each_entry(last, &l->leaf, fa_list) {
if (new->fa_slen < last->fa_slen)
break;
if ((new->fa_slen == last->fa_slen) &&
(new->tb_id > last->tb_id))
break;
fa = last;
}
if (fa)
hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
else
hlist_add_head_rcu(&new->fa_list, &l->leaf);
}
/* if we added to the tail node then we need to update slen */
if (l->slen < new->fa_slen) {
l->slen = new->fa_slen;
node_push_suffix(tp, new->fa_slen);
}
return 0;
}
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old);
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *)tb->tb_data;
struct fib_alias *fa, *new_fa;
struct key_vector *l, *tp;
u16 nlflags = NLM_F_EXCL;
struct fib_info *fi;
u8 plen = cfg->fc_dst_len;
u8 slen = KEYLENGTH - plen;
dscp_t dscp;
u32 key;
int err;
key = ntohl(cfg->fc_dst);
pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
fi = fib_create_info(cfg, extack);
if (IS_ERR(fi)) {
err = PTR_ERR(fi);
goto err;
}
dscp = cfg->fc_dscp;
l = fib_find_node(t, &tp, key);
fa = l ? fib_find_alias(&l->leaf, slen, dscp, fi->fib_priority,
tb->tb_id, false) : NULL;
/* Now fa, if non-NULL, points to the first fib alias
* with the same keys [prefix,dscp,priority], if such key already
* exists or to the node before which we will insert new one.
*
* If fa is NULL, we will need to allocate a new one and
* insert to the tail of the section matching the suffix length
* of the new alias.
*/
if (fa && fa->fa_dscp == dscp &&
fa->fa_info->fib_priority == fi->fib_priority) {
struct fib_alias *fa_first, *fa_match;
err = -EEXIST;
if (cfg->fc_nlflags & NLM_F_EXCL)
goto out;
nlflags &= ~NLM_F_EXCL;
/* We have 2 goals:
* 1. Find exact match for type, scope, fib_info to avoid
* duplicate routes
* 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
*/
fa_match = NULL;
fa_first = fa;
hlist_for_each_entry_from(fa, fa_list) {
if ((fa->fa_slen != slen) ||
(fa->tb_id != tb->tb_id) ||
(fa->fa_dscp != dscp))
break;
if (fa->fa_info->fib_priority != fi->fib_priority)
break;
if (fa->fa_type == cfg->fc_type &&
fa->fa_info == fi) {
fa_match = fa;
break;
}
}
if (cfg->fc_nlflags & NLM_F_REPLACE) {
struct fib_info *fi_drop;
u8 state;
nlflags |= NLM_F_REPLACE;
fa = fa_first;
if (fa_match) {
if (fa == fa_match)
err = 0;
goto out;
}
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
goto out;
fi_drop = fa->fa_info;
new_fa->fa_dscp = fa->fa_dscp;
new_fa->fa_info = fi;
new_fa->fa_type = cfg->fc_type;
state = fa->fa_state;
new_fa->fa_state = state & ~FA_S_ACCESSED;
new_fa->fa_slen = fa->fa_slen;
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
new_fa->offload = 0;
new_fa->trap = 0;
new_fa->offload_failed = 0;
hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
if (fib_find_alias(&l->leaf, fa->fa_slen, 0, 0,
tb->tb_id, true) == new_fa) {
enum fib_event_type fib_event;
fib_event = FIB_EVENT_ENTRY_REPLACE;
err = call_fib_entry_notifiers(net, fib_event,
key, plen,
new_fa, extack);
if (err) {
hlist_replace_rcu(&new_fa->fa_list,
&fa->fa_list);
goto out_free_new_fa;
}
}
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
tb->tb_id, &cfg->fc_nlinfo, nlflags);
alias_free_mem_rcu(fa);
fib_release_info(fi_drop);
if (state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
goto succeeded;
}
/* Error if we find a perfect match which
* uses the same scope, type, and nexthop
* information.
*/
if (fa_match)
goto out;
if (cfg->fc_nlflags & NLM_F_APPEND)
nlflags |= NLM_F_APPEND;
else
fa = fa_first;
}
err = -ENOENT;
if (!(cfg->fc_nlflags & NLM_F_CREATE))
goto out;
nlflags |= NLM_F_CREATE;
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
goto out;
new_fa->fa_info = fi;
new_fa->fa_dscp = dscp;
new_fa->fa_type = cfg->fc_type;
new_fa->fa_state = 0;
new_fa->fa_slen = slen;
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
new_fa->offload = 0;
new_fa->trap = 0;
new_fa->offload_failed = 0;
/* Insert new entry to the list. */
err = fib_insert_alias(t, tp, l, new_fa, fa, key);
if (err)
goto out_free_new_fa;
/* The alias was already inserted, so the node must exist. */
l = l ? l : fib_find_node(t, &tp, key);
if (WARN_ON_ONCE(!l)) {
err = -ENOENT;
goto out_free_new_fa;
}
if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) ==
new_fa) {
enum fib_event_type fib_event;
fib_event = FIB_EVENT_ENTRY_REPLACE;
err = call_fib_entry_notifiers(net, fib_event, key, plen,
new_fa, extack);
if (err)
goto out_remove_new_fa;
}
if (!plen)
tb->tb_num_default++;
rt_cache_flush(cfg->fc_nlinfo.nl_net);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
return 0;
out_remove_new_fa:
fib_remove_alias(t, tp, l, new_fa);
out_free_new_fa:
kmem_cache_free(fn_alias_kmem, new_fa);
out:
fib_release_info(fi);
err:
return err;
}
static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
{
t_key prefix = n->key;
return (key ^ prefix) & (prefix | -prefix);
}
bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
const struct flowi4 *flp)
{
if (nhc->nhc_flags & RTNH_F_DEAD)
return false;
if (ip_ignore_linkdown(nhc->nhc_dev) &&
nhc->nhc_flags & RTNH_F_LINKDOWN &&
!(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
return false;
if (flp->flowi4_oif && flp->flowi4_oif != nhc->nhc_oif)
return false;
return true;
}
/* should be called with rcu_read_lock */
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags)
{
struct trie *t = (struct trie *) tb->tb_data;
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats __percpu *stats = t->stats;
#endif
const t_key key = ntohl(flp->daddr);
struct key_vector *n, *pn;
struct fib_alias *fa;
unsigned long index;
t_key cindex;
pn = t->kv;
cindex = 0;
n = get_child_rcu(pn, cindex);
if (!n) {
trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN);
return -EAGAIN;
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->gets);
#endif
/* Step 1: Travel to the longest prefix match in the trie */
for (;;) {
index = get_cindex(key, n);
/* This bit of code is a bit tricky but it combines multiple
* checks into a single check. The prefix consists of the
* prefix plus zeros for the "bits" in the prefix. The index
* is the difference between the key and this value. From
* this we can actually derive several pieces of data.
* if (index >= (1ul << bits))
* we have a mismatch in skip bits and failed
* else
* we know the value is cindex
*
* This check is safe even if bits == KEYLENGTH due to the
* fact that we can only allocate a node with 32 bits if a
* long is greater than 32 bits.
*/
if (index >= (1ul << n->bits))
break;
/* we have found a leaf. Prefixes have already been compared */
if (IS_LEAF(n))
goto found;
/* only record pn and cindex if we are going to be chopping
* bits later. Otherwise we are just wasting cycles.
*/
if (n->slen > n->pos) {
pn = n;
cindex = index;
}
n = get_child_rcu(n, index);
if (unlikely(!n))
goto backtrace;
}
/* Step 2: Sort out leaves and begin backtracing for longest prefix */
for (;;) {
/* record the pointer where our next node pointer is stored */
struct key_vector __rcu **cptr = n->tnode;
/* This test verifies that none of the bits that differ
* between the key and the prefix exist in the region of
* the lsb and higher in the prefix.
*/
if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
goto backtrace;
/* exit out and process leaf */
if (unlikely(IS_LEAF(n)))
break;
/* Don't bother recording parent info. Since we are in
* prefix match mode we will have to come back to wherever
* we started this traversal anyway
*/
while ((n = rcu_dereference(*cptr)) == NULL) {
backtrace:
#ifdef CONFIG_IP_FIB_TRIE_STATS
if (!n)
this_cpu_inc(stats->null_node_hit);
#endif
/* If we are at cindex 0 there are no more bits for
* us to strip at this level so we must ascend back
* up one level to see if there are any more bits to
* be stripped there.
*/
while (!cindex) {
t_key pkey = pn->key;
/* If we don't have a parent then there is
* nothing for us to do as we do not have any
* further nodes to parse.
*/
if (IS_TRIE(pn)) {
trace_fib_table_lookup(tb->tb_id, flp,
NULL, -EAGAIN);
return -EAGAIN;
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->backtrack);
#endif
/* Get Child's index */
pn = node_parent_rcu(pn);
cindex = get_index(pkey, pn);
}
/* strip the least significant bit from the cindex */
cindex &= cindex - 1;
/* grab pointer for next child node */
cptr = &pn->tnode[cindex];
}
}
found:
/* this line carries forward the xor from earlier in the function */
index = key ^ n->key;
/* Step 3: Process the leaf, if that fails fall back to backtracing */
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
struct fib_nh_common *nhc;
int nhsel, err;
if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
if (index >= (1ul << fa->fa_slen))
continue;
}
if (fa->fa_dscp && !fib_dscp_masked_match(fa->fa_dscp, flp))
continue;
/* Paired with WRITE_ONCE() in fib_release_info() */
if (READ_ONCE(fi->fib_dead))
continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
fib_alias_accessed(fa);
err = fib_props[fa->fa_type].error;
if (unlikely(err < 0)) {
out_reject:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_passed);
#endif
trace_fib_table_lookup(tb->tb_id, flp, NULL, err);
return err;
}
if (fi->fib_flags & RTNH_F_DEAD)
continue;
if (unlikely(fi->nh)) {
if (nexthop_is_blackhole(fi->nh)) {
err = fib_props[RTN_BLACKHOLE].error;
goto out_reject;
}
nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp,
&nhsel);
if (nhc)
goto set_result;
goto miss;
}
for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
nhc = fib_info_nhc(fi, nhsel);
if (!fib_lookup_good_nhc(nhc, fib_flags, flp))
continue;
set_result:
if (!(fib_flags & FIB_LOOKUP_NOREF))
refcount_inc(&fi->fib_clntref);
res->prefix = htonl(n->key);
res->prefixlen = KEYLENGTH - fa->fa_slen;
res->nh_sel = nhsel;
res->nhc = nhc;
res->type = fa->fa_type;
res->scope = fi->fib_scope;
res->dscp = fa->fa_dscp;
res->fi = fi;
res->table = tb;
res->fa_head = &n->leaf;
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_passed);
#endif
trace_fib_table_lookup(tb->tb_id, flp, nhc, err);
return err;
}
}
miss:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_miss);
#endif
goto backtrace;
}
EXPORT_SYMBOL_GPL(fib_table_lookup);
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old)
{
/* record the location of the previous list_info entry */
struct hlist_node **pprev = old->fa_list.pprev;
struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
/* remove the fib_alias from the list */
hlist_del_rcu(&old->fa_list);
/* if we emptied the list this leaf will be freed and we can sort
* out parent suffix lengths as a part of trie_rebalance
*/
if (hlist_empty(&l->leaf)) {
if (tp->slen == l->slen)
node_pull_suffix(tp, tp->pos);
put_child_root(tp, l->key, NULL);
node_free(l);
trie_rebalance(t, tp);
return;
}
/* only access fa if it is pointing at the last valid hlist_node */
if (*pprev)
return;
/* update the trie with the latest suffix length */
l->slen = fa->fa_slen;
node_pull_suffix(tp, fa->fa_slen);
}
static void fib_notify_alias_delete(struct net *net, u32 key,
struct hlist_head *fah,
struct fib_alias *fa_to_delete,
struct netlink_ext_ack *extack)
{
struct fib_alias *fa_next, *fa_to_notify;
u32 tb_id = fa_to_delete->tb_id;
u8 slen = fa_to_delete->fa_slen;
enum fib_event_type fib_event;
/* Do not notify if we do not care about the route. */
if (fib_find_alias(fah, slen, 0, 0, tb_id, true) != fa_to_delete)
return;
/* Determine if the route should be replaced by the next route in the
* list.
*/
fa_next = hlist_entry_safe(fa_to_delete->fa_list.next,
struct fib_alias, fa_list);
if (fa_next && fa_next->fa_slen == slen && fa_next->tb_id == tb_id) {
fib_event = FIB_EVENT_ENTRY_REPLACE;
fa_to_notify = fa_next;
} else {
fib_event = FIB_EVENT_ENTRY_DEL;
fa_to_notify = fa_to_delete;
}
call_fib_entry_notifiers(net, fib_event, key, KEYLENGTH - slen,
fa_to_notify, extack);
}
/* Caller must hold RTNL. */
int fib_table_delete(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *) tb->tb_data;
struct fib_alias *fa, *fa_to_delete;
struct key_vector *l, *tp;
u8 plen = cfg->fc_dst_len;
u8 slen = KEYLENGTH - plen;
dscp_t dscp;
u32 key;
key = ntohl(cfg->fc_dst);
l = fib_find_node(t, &tp, key);
if (!l)
return -ESRCH;
dscp = cfg->fc_dscp;
fa = fib_find_alias(&l->leaf, slen, dscp, 0, tb->tb_id, false);
if (!fa)
return -ESRCH;
pr_debug("Deleting %08x/%d dsfield=0x%02x t=%p\n", key, plen,
inet_dscp_to_dsfield(dscp), t);
fa_to_delete = NULL;
hlist_for_each_entry_from(fa, fa_list) {
struct fib_info *fi = fa->fa_info;
if ((fa->fa_slen != slen) ||
(fa->tb_id != tb->tb_id) ||
(fa->fa_dscp != dscp))
break;
if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
(cfg->fc_scope == RT_SCOPE_NOWHERE ||
fa->fa_info->fib_scope == cfg->fc_scope) &&
(!cfg->fc_prefsrc ||
fi->fib_prefsrc == cfg->fc_prefsrc) &&
(!cfg->fc_protocol ||
fi->fib_protocol == cfg->fc_protocol) &&
fib_nh_match(net, cfg, fi, extack) == 0 &&
fib_metrics_match(cfg, fi)) {
fa_to_delete = fa;
break;
}
}
if (!fa_to_delete)
return -ESRCH;
fib_notify_alias_delete(net, key, &l->leaf, fa_to_delete, extack);
rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
&cfg->fc_nlinfo, 0);
if (!plen)
tb->tb_num_default--;
fib_remove_alias(t, tp, l, fa_to_delete);
if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
fib_release_info(fa_to_delete->fa_info);
alias_free_mem_rcu(fa_to_delete);
return 0;
}
/* Scan for the next leaf starting at the provided key value */
static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
{
struct key_vector *pn, *n = *tn;
unsigned long cindex;
/* this loop is meant to try and find the key in the trie */
do {
/* record parent and next child index */
pn = n;
cindex = (key > pn->key) ? get_index(key, pn) : 0;
if (cindex >> pn->bits)
break;
/* descend into the next child */
n = get_child_rcu(pn, cindex++);
if (!n)
break;
/* guarantee forward progress on the keys */
if (IS_LEAF(n) && (n->key >= key))
goto found;
} while (IS_TNODE(n));
/* this loop will search for the next leaf with a greater key */
while (!IS_TRIE(pn)) {
/* if we exhausted the parent node we will need to climb */
if (cindex >= (1ul << pn->bits)) {
t_key pkey = pn->key;
pn = node_parent_rcu(pn);
cindex = get_index(pkey, pn) + 1;
continue;
}
/* grab the next available node */
n = get_child_rcu(pn, cindex++);
if (!n)
continue;
/* no need to compare keys since we bumped the index */
if (IS_LEAF(n))
goto found;
/* Rescan start scanning in new node */
pn = n;
cindex = 0;
}
*tn = pn;
return NULL; /* Root of trie */
found:
/* if we are at the limit for keys just return NULL for the tnode */
*tn = pn;
return n;
}
static void fib_trie_free(struct fib_table *tb)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
struct fib_alias *fa;
/* walk trie in reverse order and free everything */
for (;;) {
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
if (IS_TRIE(pn))
break;
n = pn;
pn = node_parent(pn);
/* drop emptied tnode */
put_child_root(pn, n->key, NULL);
node_free(n);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
hlist_del_rcu(&fa->fa_list);
alias_free_mem_rcu(fa);
}
put_child_root(pn, n->key, NULL);
node_free(n);
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
free_percpu(t->stats);
#endif
kfree(tb);
}
struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
{
struct trie *ot = (struct trie *)oldtb->tb_data;
struct key_vector *l, *tp = ot->kv;
struct fib_table *local_tb;
struct fib_alias *fa;
struct trie *lt;
t_key key = 0;
if (oldtb->tb_data == oldtb->__data)
return oldtb;
local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL);
if (!local_tb)
return NULL;
lt = (struct trie *)local_tb->tb_data;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
struct key_vector *local_l = NULL, *local_tp;
hlist_for_each_entry(fa, &l->leaf, fa_list) {
struct fib_alias *new_fa;
if (local_tb->tb_id != fa->tb_id)
continue;
/* clone fa for new local table */
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
goto out;
memcpy(new_fa, fa, sizeof(*fa));
/* insert clone into table */
if (!local_l)
local_l = fib_find_node(lt, &local_tp, l->key);
if (fib_insert_alias(lt, local_tp, local_l, new_fa,
NULL, l->key)) {
kmem_cache_free(fn_alias_kmem, new_fa);
goto out;
}
}
/* stop loop if key wrapped back to 0 */
key = l->key + 1;
if (key < l->key)
break;
}
return local_tb;
out:
fib_trie_free(local_tb);
return NULL;
}
/* Caller must hold RTNL */
void fib_table_flush_external(struct fib_table *tb)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
struct fib_alias *fa;
/* walk trie in reverse order */
for (;;) {
unsigned char slen = 0;
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
/* cannot resize the trie vector */
if (IS_TRIE(pn))
break;
/* update the suffix to address pulled leaves */
if (pn->slen > pn->pos)
update_suffix(pn);
/* resize completed node */
pn = resize(t, pn);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
/* if alias was cloned to local then we just
* need to remove the local copy from main
*/
if (tb->tb_id != fa->tb_id) {
hlist_del_rcu(&fa->fa_list);
alias_free_mem_rcu(fa);
continue;
}
/* record local slen */
slen = fa->fa_slen;
}
/* update leaf slen */
n->slen = slen;
if (hlist_empty(&n->leaf)) {
put_child_root(pn, n->key, NULL);
node_free(n);
}
}
}
/* Caller must hold RTNL. */
int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
{
struct trie *t = (struct trie *)tb->tb_data;
struct nl_info info = { .nl_net = net };
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
struct fib_alias *fa;
int found = 0;
/* walk trie in reverse order */
for (;;) {
unsigned char slen = 0;
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
/* cannot resize the trie vector */
if (IS_TRIE(pn))
break;
/* update the suffix to address pulled leaves */
if (pn->slen > pn->pos)
update_suffix(pn);
/* resize completed node */
pn = resize(t, pn);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (!fi || tb->tb_id != fa->tb_id ||
(!(fi->fib_flags & RTNH_F_DEAD) &&
!fib_props[fa->fa_type].error)) {
slen = fa->fa_slen;
continue;
}
/* Do not flush error routes if network namespace is
* not being dismantled
*/
if (!flush_all && fib_props[fa->fa_type].error) {
slen = fa->fa_slen;
continue;
}
fib_notify_alias_delete(net, n->key, &n->leaf, fa,
NULL);
if (fi->pfsrc_removed)
rtmsg_fib(RTM_DELROUTE, htonl(n->key), fa,
KEYLENGTH - fa->fa_slen, tb->tb_id, &info, 0);
hlist_del_rcu(&fa->fa_list);
fib_release_info(fa->fa_info);
alias_free_mem_rcu(fa);
found++;
}
/* update leaf slen */
n->slen = slen;
if (hlist_empty(&n->leaf)) {
put_child_root(pn, n->key, NULL);
node_free(n);
}
}
pr_debug("trie_flush found=%d\n", found);
return found;
}
/* derived from fib_trie_free */
static void __fib_info_notify_update(struct net *net, struct fib_table *tb,
struct nl_info *info)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct fib_alias *fa;
for (;;) {
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
if (IS_TRIE(pn))
break;
pn = node_parent(pn);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry(fa, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (!fi || !fi->nh_updated || fa->tb_id != tb->tb_id)
continue;
rtmsg_fib(RTM_NEWROUTE, htonl(n->key), fa,
KEYLENGTH - fa->fa_slen, tb->tb_id,
info, NLM_F_REPLACE);
}
}
}
void fib_info_notify_update(struct net *net, struct nl_info *info)
{
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist,
lockdep_rtnl_is_held())
__fib_info_notify_update(net, tb, info);
}
}
static int fib_leaf_notify(struct key_vector *l, struct fib_table *tb,
struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
struct fib_alias *fa;
int last_slen = -1;
int err;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (!fi)
continue;
/* local and main table can share the same trie,
* so don't notify twice for the same entry.
*/
if (tb->tb_id != fa->tb_id)
continue;
if (fa->fa_slen == last_slen)
continue;
last_slen = fa->fa_slen;
err = call_fib_entry_notifier(nb, FIB_EVENT_ENTRY_REPLACE,
l->key, KEYLENGTH - fa->fa_slen,
fa, extack);
if (err)
return err;
}
return 0;
}
static int fib_table_notify(struct fib_table *tb, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *l, *tp = t->kv;
t_key key = 0;
int err;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
err = fib_leaf_notify(l, tb, nb, extack);
if (err)
return err;
key = l->key + 1;
/* stop in case of wrap around */
if (key < l->key)
break;
}
return 0;
}
int fib_notify(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
unsigned int h;
int err;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
err = fib_table_notify(tb, nb, extack);
if (err)
return err;
}
}
return 0;
}
static void __trie_free_rcu(struct rcu_head *head)
{
struct fib_table *tb = container_of(head, struct fib_table, rcu);
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie *t = (struct trie *)tb->tb_data;
if (tb->tb_data == tb->__data)
free_percpu(t->stats);
#endif /* CONFIG_IP_FIB_TRIE_STATS */
kfree(tb);
}
void fib_free_table(struct fib_table *tb)
{
call_rcu(&tb->rcu, __trie_free_rcu);
}
static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
struct sk_buff *skb, struct netlink_callback *cb,
struct fib_dump_filter *filter)
{
unsigned int flags = NLM_F_MULTI;
__be32 xkey = htonl(l->key);
int i, s_i, i_fa, s_fa, err;
struct fib_alias *fa;
if (filter->filter_set ||
!filter->dump_exceptions || !filter->dump_routes)
flags |= NLM_F_DUMP_FILTERED;
s_i = cb->args[4];
s_fa = cb->args[5];
i = 0;
/* rcu_read_lock is hold by caller */
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
if (i < s_i)
goto next;
i_fa = 0;
if (tb->tb_id != fa->tb_id)
goto next;
if (filter->filter_set) {
if (filter->rt_type && fa->fa_type != filter->rt_type)
goto next;
if ((filter->protocol &&
fi->fib_protocol != filter->protocol))
goto next;
if (filter->dev &&
!fib_info_nh_uses_dev(fi, filter->dev))
goto next;
}
if (filter->dump_routes) {
if (!s_fa) {
struct fib_rt_info fri;
fri.fi = fi;
fri.tb_id = tb->tb_id;
fri.dst = xkey;
fri.dst_len = KEYLENGTH - fa->fa_slen;
fri.dscp = fa->fa_dscp;
fri.type = fa->fa_type;
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWROUTE, &fri, flags);
if (err < 0)
goto stop;
}
i_fa++;
}
if (filter->dump_exceptions) {
err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
&i_fa, s_fa, flags);
if (err < 0)
goto stop;
}
next:
i++;
}
cb->args[4] = i;
return skb->len;
stop:
cb->args[4] = i;
cb->args[5] = i_fa;
return err;
}
/* rcu_read_lock needs to be hold by caller from readside */
int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
struct netlink_callback *cb, struct fib_dump_filter *filter)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *l, *tp = t->kv;
/* Dump starting at last key.
* Note: 0.0.0.0/0 (ie default) is first key.
*/
int count = cb->args[2];
t_key key = cb->args[3];
/* First time here, count and key are both always 0. Count > 0
* and key == 0 means the dump has wrapped around and we are done.
*/
if (count && !key)
return 0;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
err = fn_trie_dump_leaf(l, tb, skb, cb, filter);
if (err < 0) {
cb->args[3] = key;
cb->args[2] = count;
return err;
}
++count;
key = l->key + 1;
memset(&cb->args[4], 0,
sizeof(cb->args) - 4*sizeof(cb->args[0]));
/* stop loop if key wrapped back to 0 */
if (key < l->key)
break;
}
cb->args[3] = key;
cb->args[2] = count;
return 0;
}
void __init fib_trie_init(void)
{
fn_alias_kmem = kmem_cache_create("ip_fib_alias",
sizeof(struct fib_alias),
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
LEAF_SIZE,
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
}
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
struct trie *t;
size_t sz = sizeof(*tb);
if (!alias)
sz += sizeof(struct trie);
tb = kzalloc(sz, GFP_KERNEL);
if (!tb)
return NULL;
tb->tb_id = id;
tb->tb_num_default = 0;
tb->tb_data = (alias ? alias->__data : tb->__data);
if (alias)
return tb;
t = (struct trie *) tb->tb_data;
t->kv[0].pos = KEYLENGTH;
t->kv[0].slen = KEYLENGTH;
#ifdef CONFIG_IP_FIB_TRIE_STATS
t->stats = alloc_percpu(struct trie_use_stats);
if (!t->stats) {
kfree(tb);
tb = NULL;
}
#endif
return tb;
}
#ifdef CONFIG_PROC_FS
/* Depth first Trie walk iterator */
struct fib_trie_iter {
struct seq_net_private p;
struct fib_table *tb;
struct key_vector *tnode;
unsigned int index;
unsigned int depth;
};
static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
{
unsigned long cindex = iter->index;
struct key_vector *pn = iter->tnode;
t_key pkey;
pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
iter->tnode, iter->index, iter->depth);
while (!IS_TRIE(pn)) {
while (cindex < child_length(pn)) {
struct key_vector *n = get_child_rcu(pn, cindex++);
if (!n)
continue;
if (IS_LEAF(n)) {
iter->tnode = pn;
iter->index = cindex;
} else {
/* push down one level */
iter->tnode = n;
iter->index = 0;
++iter->depth;
}
return n;
}
/* Current node exhausted, pop back up */
pkey = pn->key;
pn = node_parent_rcu(pn);
cindex = get_index(pkey, pn) + 1;
--iter->depth;
}
/* record root node so further searches know we are done */
iter->tnode = pn;
iter->index = 0;
return NULL;
}
static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
struct trie *t)
{
struct key_vector *n, *pn;
if (!t)
return NULL;
pn = t->kv;
n = rcu_dereference(pn->tnode[0]);
if (!n)
return NULL;
if (IS_TNODE(n)) {
iter->tnode = n;
iter->index = 0;
iter->depth = 1;
} else {
iter->tnode = pn;
iter->index = 0;
iter->depth = 0;
}
return n;
}
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
struct key_vector *n;
struct fib_trie_iter iter;
memset(s, 0, sizeof(*s));
rcu_read_lock();
for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
if (IS_LEAF(n)) {
struct fib_alias *fa;
s->leaves++;
s->totdepth += iter.depth;
if (iter.depth > s->maxdepth)
s->maxdepth = iter.depth;
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list)
++s->prefixes;
} else {
s->tnodes++;
if (n->bits < MAX_STAT_DEPTH)
s->nodesizes[n->bits]++;
s->nullpointers += tn_info(n)->empty_children;
}
}
rcu_read_unlock();
}
/*
* This outputs /proc/net/fib_triestats
*/
static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
{
unsigned int i, max, pointers, bytes, avdepth;
if (stat->leaves)
avdepth = stat->totdepth*100 / stat->leaves;
else
avdepth = 0;
seq_printf(seq, "\tAver depth: %u.%02d\n",
avdepth / 100, avdepth % 100);
seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
bytes = LEAF_SIZE * stat->leaves;
seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
bytes += sizeof(struct fib_alias) * stat->prefixes;
seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
bytes += TNODE_SIZE(0) * stat->tnodes;
max = MAX_STAT_DEPTH;
while (max > 0 && stat->nodesizes[max-1] == 0)
max--;
pointers = 0;
for (i = 1; i < max; i++)
if (stat->nodesizes[i] != 0) {
seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
pointers += (1<<i) * stat->nodesizes[i];
}
seq_putc(seq, '\n');
seq_printf(seq, "\tPointers: %u\n", pointers);
bytes += sizeof(struct key_vector *) * pointers;
seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
}
#ifdef CONFIG_IP_FIB_TRIE_STATS
static void trie_show_usage(struct seq_file *seq,
const struct trie_use_stats __percpu *stats)
{
struct trie_use_stats s = { 0 };
int cpu;
/* loop through all of the CPUs and gather up the stats */
for_each_possible_cpu(cpu) {
const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
s.gets += pcpu->gets;
s.backtrack += pcpu->backtrack;
s.semantic_match_passed += pcpu->semantic_match_passed;
s.semantic_match_miss += pcpu->semantic_match_miss;
s.null_node_hit += pcpu->null_node_hit;
s.resize_node_skipped += pcpu->resize_node_skipped;
}
seq_printf(seq, "\nCounters:\n---------\n");
seq_printf(seq, "gets = %u\n", s.gets);
seq_printf(seq, "backtracks = %u\n", s.backtrack);
seq_printf(seq, "semantic match passed = %u\n",
s.semantic_match_passed);
seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
}
#endif /* CONFIG_IP_FIB_TRIE_STATS */
static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
{
if (tb->tb_id == RT_TABLE_LOCAL)
seq_puts(seq, "Local:\n");
else if (tb->tb_id == RT_TABLE_MAIN)
seq_puts(seq, "Main:\n");
else
seq_printf(seq, "Id %d:\n", tb->tb_id);
}
static int fib_triestat_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
unsigned int h;
seq_printf(seq,
"Basic info: size of leaf:"
" %zd bytes, size of tnode: %zd bytes.\n",
LEAF_SIZE, TNODE_SIZE(0));
rcu_read_lock();
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
struct trie *t = (struct trie *) tb->tb_data;
struct trie_stat stat;
if (!t)
continue;
fib_table_print(seq, tb);
trie_collect_stats(t, &stat);
trie_show_stats(seq, &stat);
#ifdef CONFIG_IP_FIB_TRIE_STATS
trie_show_usage(seq, t->stats);
#endif
}
cond_resched_rcu();
}
rcu_read_unlock();
return 0;
}
static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
loff_t idx = 0;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
struct key_vector *n;
for (n = fib_trie_get_first(iter,
(struct trie *) tb->tb_data);
n; n = fib_trie_get_next(iter))
if (pos == idx++) {
iter->tb = tb;
return n;
}
}
}
return NULL;
}
static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return fib_trie_get_idx(seq, *pos);
}
static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct fib_table *tb = iter->tb;
struct hlist_node *tb_node;
unsigned int h;
struct key_vector *n;
++*pos;
/* next node in same table */
n = fib_trie_get_next(iter);
if (n)
return n;
/* walk rest of this hash chain */
h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
if (n)
goto found;
}
/* new hash chain */
while (++h < FIB_TABLE_HASHSZ) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
if (n)
goto found;
}
}
return NULL;
found:
iter->tb = tb;
return n;
}
static void fib_trie_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static void seq_indent(struct seq_file *seq, int n)
{
while (n-- > 0)
seq_puts(seq, " ");
}
static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
{
switch (s) {
case RT_SCOPE_UNIVERSE: return "universe";
case RT_SCOPE_SITE: return "site";
case RT_SCOPE_LINK: return "link";
case RT_SCOPE_HOST: return "host";
case RT_SCOPE_NOWHERE: return "nowhere";
default:
snprintf(buf, len, "scope=%d", s);
return buf;
}
}
static const char *const rtn_type_names[__RTN_MAX] = {
[RTN_UNSPEC] = "UNSPEC",
[RTN_UNICAST] = "UNICAST",
[RTN_LOCAL] = "LOCAL",
[RTN_BROADCAST] = "BROADCAST",
[RTN_ANYCAST] = "ANYCAST",
[RTN_MULTICAST] = "MULTICAST",
[RTN_BLACKHOLE] = "BLACKHOLE",
[RTN_UNREACHABLE] = "UNREACHABLE",
[RTN_PROHIBIT] = "PROHIBIT",
[RTN_THROW] = "THROW",
[RTN_NAT] = "NAT",
[RTN_XRESOLVE] = "XRESOLVE",
};
static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
{
if (t < __RTN_MAX && rtn_type_names[t])
return rtn_type_names[t];
snprintf(buf, len, "type %u", t);
return buf;
}
/* Pretty print the trie */
static int fib_trie_seq_show(struct seq_file *seq, void *v)
{
const struct fib_trie_iter *iter = seq->private;
struct key_vector *n = v;
if (IS_TRIE(node_parent_rcu(n)))
fib_table_print(seq, iter->tb);
if (IS_TNODE(n)) {
__be32 prf = htonl(n->key);
seq_indent(seq, iter->depth-1);
seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
&prf, KEYLENGTH - n->pos - n->bits, n->bits,
tn_info(n)->full_children,
tn_info(n)->empty_children);
} else {
__be32 val = htonl(n->key);
struct fib_alias *fa;
seq_indent(seq, iter->depth);
seq_printf(seq, " |-- %pI4\n", &val);
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
char buf1[32], buf2[32];
seq_indent(seq, iter->depth + 1);
seq_printf(seq, " /%zu %s %s",
KEYLENGTH - fa->fa_slen,
rtn_scope(buf1, sizeof(buf1),
fa->fa_info->fib_scope),
rtn_type(buf2, sizeof(buf2),
fa->fa_type));
if (fa->fa_dscp)
seq_printf(seq, " tos=%d",
inet_dscp_to_dsfield(fa->fa_dscp));
seq_putc(seq, '\n');
}
}
return 0;
}
static const struct seq_operations fib_trie_seq_ops = {
.start = fib_trie_seq_start,
.next = fib_trie_seq_next,
.stop = fib_trie_seq_stop,
.show = fib_trie_seq_show,
};
struct fib_route_iter {
struct seq_net_private p;
struct fib_table *main_tb;
struct key_vector *tnode;
loff_t pos;
t_key key;
};
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
loff_t pos)
{
struct key_vector *l, **tp = &iter->tnode;
t_key key;
/* use cached location of previously found key */
if (iter->pos > 0 && pos >= iter->pos) {
key = iter->key;
} else {
iter->pos = 1;
key = 0;
}
pos -= iter->pos;
while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
key = l->key + 1;
iter->pos++;
l = NULL;
/* handle unlikely case of a key wrap */
if (!key)
break;
}
if (l)
iter->key = l->key; /* remember it */
else
iter->pos = 0; /* forget it */
return l;
}
static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct fib_route_iter *iter = seq->private;
struct fib_table *tb;
struct trie *t;
rcu_read_lock();
tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
if (!tb)
return NULL;
iter->main_tb = tb;
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
if (*pos != 0)
return fib_route_get_idx(iter, *pos);
iter->pos = 0;
iter->key = KEY_MAX;
return SEQ_START_TOKEN;
}
static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct fib_route_iter *iter = seq->private;
struct key_vector *l = NULL;
t_key key = iter->key + 1;
++*pos;
/* only allow key of 0 for start of sequence */
if ((v == SEQ_START_TOKEN) || key)
l = leaf_walk_rcu(&iter->tnode, key);
if (l) {
iter->key = l->key;
iter->pos++;
} else {
iter->pos = 0;
}
return l;
}
static void fib_route_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static unsigned int fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
{
unsigned int flags = 0;
if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
flags = RTF_REJECT;
if (fi) {
const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
if (nhc->nhc_gw.ipv4)
flags |= RTF_GATEWAY;
}
if (mask == htonl(0xFFFFFFFF))
flags |= RTF_HOST;
flags |= RTF_UP;
return flags;
}
/*
* This outputs /proc/net/route.
* The format of the file is not supposed to be changed
* and needs to be same as fib_hash output to avoid breaking
* legacy utilities
*/
static int fib_route_seq_show(struct seq_file *seq, void *v)
{
struct fib_route_iter *iter = seq->private;
struct fib_table *tb = iter->main_tb;
struct fib_alias *fa;
struct key_vector *l = v;
__be32 prefix;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
"\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
"\tWindow\tIRTT");
return 0;
}
prefix = htonl(l->key);
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
__be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen);
unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
if ((fa->fa_type == RTN_BROADCAST) ||
(fa->fa_type == RTN_MULTICAST))
continue;
if (fa->tb_id != tb->tb_id)
continue;
seq_setwidth(seq, 127);
if (fi) {
struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
__be32 gw = 0;
if (nhc->nhc_gw_family == AF_INET)
gw = nhc->nhc_gw.ipv4;
seq_printf(seq,
"%s\t%08X\t%08X\t%04X\t%d\t%u\t"
"%u\t%08X\t%d\t%u\t%u",
nhc->nhc_dev ? nhc->nhc_dev->name : "*",
prefix, gw, flags, 0, 0,
fi->fib_priority,
mask,
(fi->fib_advmss ?
fi->fib_advmss + 40 : 0),
fi->fib_window,
fi->fib_rtt >> 3);
} else {
seq_printf(seq,
"*\t%08X\t%08X\t%04X\t%d\t%u\t"
"%u\t%08X\t%d\t%u\t%u",
prefix, 0, flags, 0, 0, 0,
mask, 0, 0, 0);
}
seq_pad(seq, '\n');
}
return 0;
}
static const struct seq_operations fib_route_seq_ops = {
.start = fib_route_seq_start,
.next = fib_route_seq_next,
.stop = fib_route_seq_stop,
.show = fib_route_seq_show,
};
int __net_init fib_proc_init(struct net *net)
{
if (!proc_create_net("fib_trie", 0444, net->proc_net, &fib_trie_seq_ops,
sizeof(struct fib_trie_iter)))
goto out1; if (!proc_create_net_single("fib_triestat", 0444, net->proc_net,
fib_triestat_seq_show, NULL))
goto out2;
if (!proc_create_net("route", 0444, net->proc_net, &fib_route_seq_ops,
sizeof(struct fib_route_iter)))
goto out3;
return 0;
out3:
remove_proc_entry("fib_triestat", net->proc_net);
out2:
remove_proc_entry("fib_trie", net->proc_net);
out1:
return -ENOMEM;
}
void __net_exit fib_proc_exit(struct net *net)
{
remove_proc_entry("fib_trie", net->proc_net);
remove_proc_entry("fib_triestat", net->proc_net);
remove_proc_entry("route", net->proc_net);
}
#endif /* CONFIG_PROC_FS */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SWAPOPS_H
#define _LINUX_SWAPOPS_H
#include <linux/radix-tree.h>
#include <linux/bug.h>
#include <linux/mm_types.h>
#ifdef CONFIG_MMU
#ifdef CONFIG_SWAP
#include <linux/swapfile.h>
#endif /* CONFIG_SWAP */
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
* the low-order bits.
*
* We arrange the `type' and `offset' fields so that `type' is at the six
* high-order bits of the swp_entry_t and `offset' is right-aligned in the
* remaining bits. Although `type' itself needs only five bits, we allow for
* shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
*
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
*/
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
* Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
* store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
* can use the extra bits to store other information besides PFN.
*/
#ifdef MAX_PHYSMEM_BITS
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
#else /* MAX_PHYSMEM_BITS */
#define SWP_PFN_BITS min_t(int, \
sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
SWP_TYPE_SHIFT)
#endif /* MAX_PHYSMEM_BITS */
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
/**
* Migration swap entry specific bitfield definitions. Layout:
*
* |----------+--------------------|
* | swp_type | swp_offset |
* |----------+--------+-+-+-------|
* | | resv |D|A| PFN |
* |----------+--------+-+-+-------|
*
* @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
* @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
*
* Note: A/D bits will be stored in migration entries iff there're enough
* free bits in arch specific swp offset. By default we'll ignore A/D bits
* when migrating a page. Please refer to migration_entry_supports_ad()
* for more information. If there're more bits besides PFN and A/D bits,
* they should be reserved and always be zeros.
*/
#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
static inline bool is_pfn_swap_entry(swp_entry_t entry);
/* Clear all flags but only keep swp_entry_t related information */
static inline pte_t pte_swp_clear_flags(pte_t pte)
{
if (pte_swp_exclusive(pte))
pte = pte_swp_clear_exclusive(pte);
if (pte_swp_soft_dirty(pte))
pte = pte_swp_clear_soft_dirty(pte);
if (pte_swp_uffd_wp(pte))
pte = pte_swp_clear_uffd_wp(pte);
return pte;
}
/*
* Store a type+offset into a swp_entry_t in an arch-independent format
*/
static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
{
swp_entry_t ret;
ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
return ret;
}
/*
* Extract the `type' field from a swp_entry_t. The swp_entry_t is in
* arch-independent format
*/
static inline unsigned swp_type(swp_entry_t entry)
{
return (entry.val >> SWP_TYPE_SHIFT);
}
/*
* Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
* arch-independent format
*/
static inline pgoff_t swp_offset(swp_entry_t entry)
{
return entry.val & SWP_OFFSET_MASK;
}
/*
* This should only be called upon a pfn swap entry to get the PFN stored
* in the swap entry. Please refers to is_pfn_swap_entry() for definition
* of pfn swap entry.
*/
static inline unsigned long swp_offset_pfn(swp_entry_t entry)
{
VM_BUG_ON(!is_pfn_swap_entry(entry));
return swp_offset(entry) & SWP_PFN_MASK;
}
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
return !pte_none(pte) && !pte_present(pte);
}
/*
* Convert the arch-dependent pte representation of a swp_entry_t into an
* arch-independent swp_entry_t.
*/
static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
pte = pte_swp_clear_flags(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
/*
* Convert the arch-independent representation of a swp_entry_t into the
* arch-dependent pte representation.
*/
static inline pte_t swp_entry_to_pte(swp_entry_t entry)
{
swp_entry_t arch_entry;
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
return __swp_entry_to_pte(arch_entry);
}
static inline swp_entry_t radix_to_swp_entry(void *arg)
{
swp_entry_t entry;
entry.val = xa_to_value(arg);
return entry;
}
static inline void *swp_to_radix_entry(swp_entry_t entry)
{
return xa_mk_value(entry.val);
}
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
return swp_entry(SWP_DEVICE_READ, offset);
}
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
return swp_entry(SWP_DEVICE_WRITE, offset);
}
static inline bool is_device_private_entry(swp_entry_t entry)
{
int type = swp_type(entry);
return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
}
static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
}
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
}
static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_DEVICE_EXCLUSIVE;
}
#else /* CONFIG_DEVICE_PRIVATE */
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline bool is_device_private_entry(swp_entry_t entry)
{
return false;
}
static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
return false;
}
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
return false;
}
#endif /* CONFIG_DEVICE_PRIVATE */
#ifdef CONFIG_MIGRATION
static inline int is_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
swp_type(entry) == SWP_MIGRATION_WRITE);
}
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
}
static inline int is_readable_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
}
static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
}
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
return swp_entry(SWP_MIGRATION_READ, offset);
}
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
}
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
return swp_entry(SWP_MIGRATION_WRITE, offset);
}
/*
* Returns whether the host has large enough swap offset field to support
* carrying over pgtable A/D bits for page migrations. The result is
* pretty much arch specific.
*/
static inline bool migration_entry_supports_ad(void)
{
#ifdef CONFIG_SWAP
return swap_migration_ad_supported;
#else /* CONFIG_SWAP */
return false;
#endif /* CONFIG_SWAP */
}
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{
if (migration_entry_supports_ad())
return swp_entry(swp_type(entry),
swp_offset(entry) | SWP_MIG_YOUNG);
return entry;
}
static inline bool is_migration_entry_young(swp_entry_t entry)
{
if (migration_entry_supports_ad())
return swp_offset(entry) & SWP_MIG_YOUNG;
/* Keep the old behavior of aging page after migration */
return false;
}
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
if (migration_entry_supports_ad())
return swp_entry(swp_type(entry),
swp_offset(entry) | SWP_MIG_DIRTY);
return entry;
}
static inline bool is_migration_entry_dirty(swp_entry_t entry)
{
if (migration_entry_supports_ad())
return swp_offset(entry) & SWP_MIG_DIRTY;
/* Keep the old behavior of clean page after migration */
return false;
}
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
#else /* CONFIG_MIGRATION */
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline int is_migration_entry(swp_entry_t swp)
{
return 0;
}
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte) { }
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
}
static inline int is_readable_migration_entry(swp_entry_t entry)
{
return 0;
}
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{
return entry;
}
static inline bool is_migration_entry_young(swp_entry_t entry)
{
return false;
}
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
return entry;
}
static inline bool is_migration_entry_dirty(swp_entry_t entry)
{
return false;
}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
/*
* Support for hardware poisoned pages
*/
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
BUG_ON(!PageLocked(page));
return swp_entry(SWP_HWPOISON, page_to_pfn(page));
}
static inline int is_hwpoison_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_HWPOISON;
}
#else
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
return swp_entry(0, 0);
}
static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}
#endif
typedef unsigned long pte_marker;
#define PTE_MARKER_UFFD_WP BIT(0)
/*
* "Poisoned" here is meant in the very general sense of "future accesses are
* invalid", instead of referring very specifically to hardware memory errors.
* This marker is meant to represent any of various different causes of this.
*
* Note that, when encountered by the faulting logic, PTEs with this marker will
* result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error
* logic.
*/
#define PTE_MARKER_POISONED BIT(1)
/*
* Indicates that, on fault, this PTE will case a SIGSEGV signal to be
* sent. This means guard markers behave in effect as if the region were mapped
* PROT_NONE, rather than if they were a memory hole or equivalent.
*/
#define PTE_MARKER_GUARD BIT(2)
#define PTE_MARKER_MASK (BIT(3) - 1)
static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
{
return swp_entry(SWP_PTE_MARKER, marker);
}
static inline bool is_pte_marker_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_PTE_MARKER;
}
static inline pte_marker pte_marker_get(swp_entry_t entry)
{
return swp_offset(entry) & PTE_MARKER_MASK;
}
static inline bool is_pte_marker(pte_t pte)
{
return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
}
static inline pte_t make_pte_marker(pte_marker marker)
{
return swp_entry_to_pte(make_pte_marker_entry(marker));
}
static inline swp_entry_t make_poisoned_swp_entry(void)
{
return make_pte_marker_entry(PTE_MARKER_POISONED);
}
static inline int is_poisoned_swp_entry(swp_entry_t entry)
{
return is_pte_marker_entry(entry) &&
(pte_marker_get(entry) & PTE_MARKER_POISONED);
}
static inline swp_entry_t make_guard_swp_entry(void)
{
return make_pte_marker_entry(PTE_MARKER_GUARD);
}
static inline int is_guard_swp_entry(swp_entry_t entry)
{
return is_pte_marker_entry(entry) &&
(pte_marker_get(entry) & PTE_MARKER_GUARD);
}
/*
* This is a special version to check pte_none() just to cover the case when
* the pte is a pte marker. It existed because in many cases the pte marker
* should be seen as a none pte; it's just that we have stored some information
* onto the none pte so it becomes not-none any more.
*
* It should be used when the pte is file-backed, ram-based and backing
* userspace pages, like shmem. It is not needed upon pgtables that do not
* support pte markers at all. For example, it's not needed on anonymous
* memory, kernel-only memory (including when the system is during-boot),
* non-ram based generic file-system. It's fine to be used even there, but the
* extra pte marker check will be pure overhead.
*/
static inline int pte_none_mostly(pte_t pte)
{
return pte_none(pte) || is_pte_marker(pte);
}
static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
{
struct page *p = pfn_to_page(swp_offset_pfn(entry));
/*
* Any use of migration entries may only occur while the
* corresponding page is locked
*/
BUG_ON(is_migration_entry(entry) && !PageLocked(p));
return p;
}
static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
{
struct folio *folio = pfn_folio(swp_offset_pfn(entry));
/*
* Any use of migration entries may only occur while the
* corresponding folio is locked
*/
BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
return folio;
}
/*
* A pfn swap entry is a special type of swap entry that always has a pfn stored
* in the swap offset. They can either be used to represent unaddressable device
* memory, to restrict access to a page undergoing migration or to represent a
* pfn which has been hwpoisoned and unmapped.
*/
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{
/* Make sure the swp offset can always store the needed fields */
BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
return is_migration_entry(entry) || is_device_private_entry(entry) ||
is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
}
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page);
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
struct page *new);
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{
swp_entry_t arch_entry;
if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
if (pmd_swp_uffd_wp(pmd))
pmd = pmd_swp_clear_uffd_wp(pmd);
arch_entry = __pmd_to_swp_entry(pmd);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
swp_entry_t arch_entry;
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
return __swp_entry_to_pmd(arch_entry);
}
static inline int is_pmd_migration_entry(pmd_t pmd)
{
return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
}
#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
BUILD_BUG();
}
static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
struct page *new)
{
BUILD_BUG();
}
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{
return swp_entry(0, 0);
}
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
return __pmd(0);
}
static inline int is_pmd_migration_entry(pmd_t pmd)
{
return 0;
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
}
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_PGTABLE_INVERT_H
#define _ASM_PGTABLE_INVERT_H 1
#ifndef __ASSEMBLER__
/*
* A clear pte value is special, and doesn't get inverted.
*
* Note that even users that only pass a pgprot_t (rather
* than a full pte) won't trigger the special zero case,
* because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED
* set. So the all zero case really is limited to just the
* cleared page table entry case.
*/
static inline bool __pte_needs_invert(u64 val)
{
return val && !(val & _PAGE_PRESENT);
}
/* Get a mask to xor with the page table entry to get the correct pfn. */
static inline u64 protnone_mask(u64 val)
{
return __pte_needs_invert(val) ? ~0ull : 0;
}
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
{
/*
* When a PTE transitions from NONE to !NONE or vice-versa
* invert the PFN part to stop speculation.
* pte_pfn undoes this when needed.
*/
if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
val = (val & ~mask) | (~val & mask);
return val;
}
#endif /* __ASSEMBLER__ */
#endif
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Sleepable Read-Copy Update mechanism for mutual exclusion,
* tree variant.
*
* Copyright (C) IBM Corporation, 2017
*
* Author: Paul McKenney <paulmck@linux.ibm.com>
*/
#ifndef _LINUX_SRCU_TREE_H
#define _LINUX_SRCU_TREE_H
#include <linux/rcu_node_tree.h>
#include <linux/completion.h>
struct srcu_node;
struct srcu_struct;
/* One element of the srcu_data srcu_ctrs array. */
struct srcu_ctr {
atomic_long_t srcu_locks; /* Locks per CPU. */
atomic_long_t srcu_unlocks; /* Unlocks per CPU. */
};
/*
* Per-CPU structure feeding into leaf srcu_node, similar in function
* to rcu_node.
*/
struct srcu_data {
/* Read-side state. */
struct srcu_ctr srcu_ctrs[2]; /* Locks and unlocks per CPU. */
int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
/* Values: SRCU_READ_FLAVOR_.* */
/* Update-side state. */
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
bool srcu_cblist_invoking; /* Invoking these CBs? */
struct timer_list delay_work; /* Delay for CB invoking */
struct work_struct work; /* Context for CB invoking. */
struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
struct srcu_node *mynode; /* Leaf srcu_node. */
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
int cpu;
struct srcu_struct *ssp;
};
/*
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
/* if greater than ->srcu_gp_seq. */
unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
struct srcu_node *srcu_parent; /* Next up in tree. */
int grplo; /* Least CPU for node. */
int grphi; /* Biggest CPU for node. */
};
/*
* Per-SRCU-domain structure, update-side data linked from srcu_struct.
*/
struct srcu_usage {
struct srcu_node *node; /* Combining tree. */
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
int srcu_size_state; /* Small-to-big transition state. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
spinlock_t __private lock; /* Protect counters and size state. */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
unsigned long srcu_gp_start; /* Last GP start timestamp (jiffies) */
unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
unsigned long srcu_size_jiffies; /* Current contention-measurement interval. */
unsigned long srcu_n_lock_retries; /* Contention events in current interval. */
unsigned long srcu_n_exp_nodelay; /* # expedited no-delays in current GP phase. */
bool sda_is_static; /* May ->sda be passed to free_percpu()? */
unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
struct completion srcu_barrier_completion;
/* Awaken barrier rq at end. */
atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
/* callback for the barrier */
/* operation. */
unsigned long reschedule_jiffies;
unsigned long reschedule_count;
struct delayed_work work;
struct srcu_struct *srcu_ssp;
};
/*
* Per-SRCU-domain structure, similar in function to rcu_state.
*/
struct srcu_struct {
struct srcu_ctr __percpu *srcu_ctrp;
struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
struct lockdep_map dep_map;
struct srcu_usage *srcu_sup; /* Update-side data. */
};
// Values for size state variable (->srcu_size_state). Once the state
// has been set to SRCU_SIZE_ALLOC, the grace-period code advances through
// this state machine one step per grace period until the SRCU_SIZE_BIG state
// is reached. Otherwise, the state machine remains in the SRCU_SIZE_SMALL
// state indefinitely.
#define SRCU_SIZE_SMALL 0 // No srcu_node combining tree, ->node == NULL
#define SRCU_SIZE_ALLOC 1 // An srcu_node tree is being allocated, initialized,
// and then referenced by ->node. It will not be used.
#define SRCU_SIZE_WAIT_BARRIER 2 // The srcu_node tree starts being used by everything
// except call_srcu(), especially by srcu_barrier().
// By the end of this state, all CPUs and threads
// are aware of this tree's existence.
#define SRCU_SIZE_WAIT_CALL 3 // The srcu_node tree starts being used by call_srcu().
// By the end of this state, all of the call_srcu()
// invocations that were running on a non-boot CPU
// and using the boot CPU's callback queue will have
// completed.
#define SRCU_SIZE_WAIT_CBS1 4 // Don't trust the ->srcu_have_cbs[] grace-period
#define SRCU_SIZE_WAIT_CBS2 5 // sequence elements or the ->srcu_data_have_cbs[]
#define SRCU_SIZE_WAIT_CBS3 6 // CPU-bitmask elements until all four elements of
#define SRCU_SIZE_WAIT_CBS4 7 // each array have been initialized.
#define SRCU_SIZE_BIG 8 // The srcu_node combining tree is fully initialized
// and all aspects of it are being put to use.
/* Values for state variable (bottom bits of ->srcu_gp_seq). */
#define SRCU_STATE_IDLE 0
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
/*
* Values for initializing gp sequence fields. Higher values allow wrap arounds to
* occur earlier.
* The second value with state is useful in the case of static initialization of
* srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
* lower bits (or else it will appear to be already initialized within
* the call check_init_srcu_struct()).
*/
#define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
#define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
#define __SRCU_USAGE_INIT(name) \
{ \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
.srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
.srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
}
#define __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
.srcu_sup = &usage_name, \
__SRCU_DEP_MAP_INIT(name)
#define __SRCU_STRUCT_INIT_MODULE(name, usage_name) \
{ \
__SRCU_STRUCT_INIT_COMMON(name, usage_name) \
}
#define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name) \
{ \
.sda = &pcpu_name, \
.srcu_ctrp = &pcpu_name.srcu_ctrs[0], \
__SRCU_STRUCT_INIT_COMMON(name, usage_name) \
}
/*
* Define and initialize a srcu struct at build time.
* Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
*
* Note that although DEFINE_STATIC_SRCU() hides the name from other
* files, the per-CPU variable rules nevertheless require that the
* chosen name be globally unique. These rules also prohibit use of
* DEFINE_STATIC_SRCU() within a function. If these rules are too
* restrictive, declare the srcu_struct manually. For example, in
* each file:
*
* static struct srcu_struct my_srcu;
*
* Then, before the first use of each my_srcu, manually initialize it:
*
* init_srcu_struct(&my_srcu);
*
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
*/
#ifdef MODULE
# define __DEFINE_SRCU(name, is_static) \
static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage); \
extern struct srcu_struct * const __srcu_struct_##name; \
struct srcu_struct * const __srcu_struct_##name \
__section("___srcu_struct_ptrs") = &name
#else
# define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
is_static struct srcu_struct name = \
__SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data)
#endif
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
// Converts a per-CPU pointer to an ->srcu_ctrs[] array element to that
// element's index.
static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
{
return scpp - &ssp->sda->srcu_ctrs[0];
}
// Converts an integer to a per-CPU pointer to the corresponding
// ->srcu_ctrs[] array element.
static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
{
return &ssp->sda->srcu_ctrs[idx];
}
/*
* Counts the new reader in the appropriate per-CPU element of the
* srcu_struct. Returns a pointer that must be passed to the matching
* srcu_read_unlock_fast().
*
* Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
* critical sections either because they disables interrupts, because
* they are a single instruction, or because they are read-modify-write
* atomic operations, depending on the whims of the architecture.
* This matters because the SRCU-fast grace-period mechanism uses either
* synchronize_rcu() or synchronize_rcu_expedited(), that is, RCU,
* *not* SRCU, in order to eliminate the need for the read-side smp_mb()
* invocations that are used by srcu_read_lock() and srcu_read_unlock().
* The __srcu_read_unlock_fast() function also relies on this same RCU
* (again, *not* SRCU) trick to eliminate the need for smp_mb().
*
* The key point behind this RCU trick is that if any part of a given
* RCU reader precedes the beginning of a given RCU grace period, then
* the entirety of that RCU reader and everything preceding it happens
* before the end of that same RCU grace period. Similarly, if any part
* of a given RCU reader follows the end of a given RCU grace period,
* then the entirety of that RCU reader and everything following it
* happens after the beginning of that same RCU grace period. Therefore,
* the operations labeled Y in __srcu_read_lock_fast() and those labeled Z
* in __srcu_read_unlock_fast() are ordered against the corresponding SRCU
* read-side critical section from the viewpoint of the SRCU grace period.
* This is all the ordering that is required, hence no calls to smp_mb().
*
* This means that __srcu_read_lock_fast() is not all that fast
* on architectures that support NMIs but do not supply NMI-safe
* implementations of this_cpu_inc().
*/
static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader.
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
return scp;
}
/*
* Removes the count for the old reader from the appropriate
* per-CPU element of the srcu_struct. Note that this may well be a
* different CPU than that which was incremented by the corresponding
* srcu_read_lock_fast(), but it must be within the same task.
*
* Please see the __srcu_read_lock_fast() function's header comment for
* information on implicit RCU readers and NMI safety.
*/
static inline void notrace
__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
{
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
}
void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
// Record reader usage even for CONFIG_PROVE_RCU=n kernels. This is
// needed only for flavors that require grace-period smp_mb() calls to be
// promoted to synchronize_rcu().
static inline void srcu_check_read_flavor_force(struct srcu_struct *ssp, int read_flavor)
{
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
if (likely(READ_ONCE(sdp->srcu_reader_flavor) & read_flavor))
return;
// Note that the cmpxchg() in __srcu_check_read_flavor() is fully ordered.
__srcu_check_read_flavor(ssp, read_flavor);
}
// Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels.
static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
{
if (IS_ENABLED(CONFIG_PROVE_RCU))
__srcu_check_read_flavor(ssp, read_flavor);
}
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* SHA-1 and HMAC-SHA1 library functions
*/
#include <crypto/hmac.h>
#include <crypto/sha1.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/unaligned.h>
#include <linux/wordpart.h>
static const struct sha1_block_state sha1_iv = {
.h = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
/*
* If you have 32 registers or more, the compiler can (and should)
* try to change the array[] accesses into registers. However, on
* machines with less than ~25 registers, that won't really work,
* and at least gcc will make an unholy mess of it.
*
* So to avoid that mess which just slows things down, we force
* the stores to memory to actually happen (we might be better off
* with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
* suggested by Artur Skawina - that will also make gcc unable to
* try to do the silly "optimize away loads" part because it won't
* see what the value will be).
*
* Ben Herrenschmidt reports that on PPC, the C version comes close
* to the optimized asm with this (ie on PPC you don't want that
* 'volatile', since there are lots of registers).
*
* On ARM we get the best code generation by forcing a full memory barrier
* between each SHA_ROUND, otherwise gcc happily get wild with spilling and
* the stack frame size simply explode and performance goes down the drain.
*/
#ifdef CONFIG_X86
#define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
#elif defined(CONFIG_ARM)
#define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
#else
#define setW(x, val) (W(x) = (val))
#endif
/* This "rolls" over the 512-bit array */
#define W(x) (array[(x)&15])
/*
* Where do we get the source from? The first 16 iterations get it from
* the input data, the next mix it from the 512-bit array.
*/
#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
__u32 TEMP = input(t); setW(t, TEMP); \
E += TEMP + rol32(A,5) + (fn) + (constant); \
B = ror32(B, 2); \
TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
/**
* sha1_transform - single block SHA1 transform (deprecated)
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
* @array: 16 words of workspace (see note)
*
* This function executes SHA-1's internal compression function. It updates the
* 160-bit internal state (@digest) with a single 512-bit data block (@data).
*
* Don't use this function. SHA-1 is no longer considered secure. And even if
* you do have to use SHA-1, this isn't the correct way to hash something with
* SHA-1 as this doesn't handle padding and finalization.
*
* Note: If the hash is security sensitive, the caller should be sure
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
void sha1_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
unsigned int i = 0;
A = digest[0];
B = digest[1];
C = digest[2];
D = digest[3];
E = digest[4];
/* Round 1 - iterations 0-16 take their input from 'data' */
for (; i < 16; ++i)
T_0_15(i, A, B, C, D, E);
/* Round 1 - tail. Input from 512-bit mixing array */
for (; i < 20; ++i)
T_16_19(i, A, B, C, D, E);
/* Round 2 */
for (; i < 40; ++i)
T_20_39(i, A, B, C, D, E);
/* Round 3 */
for (; i < 60; ++i)
T_40_59(i, A, B, C, D, E);
/* Round 4 */
for (; i < 80; ++i)
T_60_79(i, A, B, C, D, E);
digest[0] += A;
digest[1] += B;
digest[2] += C;
digest[3] += D;
digest[4] += E;
}
EXPORT_SYMBOL(sha1_transform);
/**
* sha1_init_raw - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
void sha1_init_raw(__u32 *buf)
{
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
buf[2] = 0x98badcfe;
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
EXPORT_SYMBOL(sha1_init_raw);
static void __maybe_unused sha1_blocks_generic(struct sha1_block_state *state,
const u8 *data, size_t nblocks)
{
u32 workspace[SHA1_WORKSPACE_WORDS];
do {
sha1_transform(state->h, data, workspace);
data += SHA1_BLOCK_SIZE;
} while (--nblocks);
memzero_explicit(workspace, sizeof(workspace));
}
#ifdef CONFIG_CRYPTO_LIB_SHA1_ARCH
#include "sha1.h" /* $(SRCARCH)/sha1.h */
#else
#define sha1_blocks sha1_blocks_generic
#endif
void sha1_init(struct sha1_ctx *ctx)
{
ctx->state = sha1_iv;
ctx->bytecount = 0;
}
EXPORT_SYMBOL_GPL(sha1_init);
void sha1_update(struct sha1_ctx *ctx, const u8 *data, size_t len)
{
size_t partial = ctx->bytecount % SHA1_BLOCK_SIZE;
ctx->bytecount += len;
if (partial + len >= SHA1_BLOCK_SIZE) {
size_t nblocks;
if (partial) {
size_t l = SHA1_BLOCK_SIZE - partial;
memcpy(&ctx->buf[partial], data, l);
data += l;
len -= l;
sha1_blocks(&ctx->state, ctx->buf, 1);
}
nblocks = len / SHA1_BLOCK_SIZE;
len %= SHA1_BLOCK_SIZE;
if (nblocks) {
sha1_blocks(&ctx->state, data, nblocks);
data += nblocks * SHA1_BLOCK_SIZE;
}
partial = 0;
}
if (len)
memcpy(&ctx->buf[partial], data, len);
}
EXPORT_SYMBOL_GPL(sha1_update);
static void __sha1_final(struct sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE])
{
u64 bitcount = ctx->bytecount << 3;
size_t partial = ctx->bytecount % SHA1_BLOCK_SIZE;
ctx->buf[partial++] = 0x80;
if (partial > SHA1_BLOCK_SIZE - 8) {
memset(&ctx->buf[partial], 0, SHA1_BLOCK_SIZE - partial);
sha1_blocks(&ctx->state, ctx->buf, 1);
partial = 0;
}
memset(&ctx->buf[partial], 0, SHA1_BLOCK_SIZE - 8 - partial);
*(__be64 *)&ctx->buf[SHA1_BLOCK_SIZE - 8] = cpu_to_be64(bitcount);
sha1_blocks(&ctx->state, ctx->buf, 1);
for (size_t i = 0; i < SHA1_DIGEST_SIZE; i += 4)
put_unaligned_be32(ctx->state.h[i / 4], out + i);
}
void sha1_final(struct sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE])
{
__sha1_final(ctx, out);
memzero_explicit(ctx, sizeof(*ctx));
}
EXPORT_SYMBOL_GPL(sha1_final);
void sha1(const u8 *data, size_t len, u8 out[SHA1_DIGEST_SIZE])
{
struct sha1_ctx ctx;
sha1_init(&ctx);
sha1_update(&ctx, data, len);
sha1_final(&ctx, out);
}
EXPORT_SYMBOL_GPL(sha1);
static void __hmac_sha1_preparekey(struct sha1_block_state *istate,
struct sha1_block_state *ostate,
const u8 *raw_key, size_t raw_key_len)
{
union {
u8 b[SHA1_BLOCK_SIZE];
unsigned long w[SHA1_BLOCK_SIZE / sizeof(unsigned long)];
} derived_key = { 0 };
if (unlikely(raw_key_len > SHA1_BLOCK_SIZE))
sha1(raw_key, raw_key_len, derived_key.b);
else
memcpy(derived_key.b, raw_key, raw_key_len);
for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++)
derived_key.w[i] ^= REPEAT_BYTE(HMAC_IPAD_VALUE); *istate = sha1_iv;
sha1_blocks(istate, derived_key.b, 1);
for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++)
derived_key.w[i] ^= REPEAT_BYTE(HMAC_OPAD_VALUE ^
HMAC_IPAD_VALUE);
*ostate = sha1_iv;
sha1_blocks(ostate, derived_key.b, 1);
memzero_explicit(&derived_key, sizeof(derived_key));
}
void hmac_sha1_preparekey(struct hmac_sha1_key *key,
const u8 *raw_key, size_t raw_key_len)
{
__hmac_sha1_preparekey(&key->istate, &key->ostate,
raw_key, raw_key_len);
}
EXPORT_SYMBOL_GPL(hmac_sha1_preparekey);
void hmac_sha1_init(struct hmac_sha1_ctx *ctx, const struct hmac_sha1_key *key)
{
ctx->sha_ctx.state = key->istate;
ctx->sha_ctx.bytecount = SHA1_BLOCK_SIZE;
ctx->ostate = key->ostate;
}
EXPORT_SYMBOL_GPL(hmac_sha1_init);
void hmac_sha1_init_usingrawkey(struct hmac_sha1_ctx *ctx,
const u8 *raw_key, size_t raw_key_len)
{
__hmac_sha1_preparekey(&ctx->sha_ctx.state, &ctx->ostate,
raw_key, raw_key_len);
ctx->sha_ctx.bytecount = SHA1_BLOCK_SIZE;
}
EXPORT_SYMBOL_GPL(hmac_sha1_init_usingrawkey);
void hmac_sha1_final(struct hmac_sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE])
{
/* Generate the padded input for the outer hash in ctx->sha_ctx.buf. */
__sha1_final(&ctx->sha_ctx, ctx->sha_ctx.buf);
memset(&ctx->sha_ctx.buf[SHA1_DIGEST_SIZE], 0,
SHA1_BLOCK_SIZE - SHA1_DIGEST_SIZE);
ctx->sha_ctx.buf[SHA1_DIGEST_SIZE] = 0x80;
*(__be32 *)&ctx->sha_ctx.buf[SHA1_BLOCK_SIZE - 4] =
cpu_to_be32(8 * (SHA1_BLOCK_SIZE + SHA1_DIGEST_SIZE));
/* Compute the outer hash, which gives the HMAC value. */
sha1_blocks(&ctx->ostate, ctx->sha_ctx.buf, 1);
for (size_t i = 0; i < SHA1_DIGEST_SIZE; i += 4)
put_unaligned_be32(ctx->ostate.h[i / 4], out + i);
memzero_explicit(ctx, sizeof(*ctx));
}
EXPORT_SYMBOL_GPL(hmac_sha1_final);
void hmac_sha1(const struct hmac_sha1_key *key,
const u8 *data, size_t data_len, u8 out[SHA1_DIGEST_SIZE])
{
struct hmac_sha1_ctx ctx;
hmac_sha1_init(&ctx, key);
hmac_sha1_update(&ctx, data, data_len);
hmac_sha1_final(&ctx, out);
}
EXPORT_SYMBOL_GPL(hmac_sha1);
void hmac_sha1_usingrawkey(const u8 *raw_key, size_t raw_key_len,
const u8 *data, size_t data_len,
u8 out[SHA1_DIGEST_SIZE])
{
struct hmac_sha1_ctx ctx;
hmac_sha1_init_usingrawkey(&ctx, raw_key, raw_key_len);
hmac_sha1_update(&ctx, data, data_len);
hmac_sha1_final(&ctx, out);
}
EXPORT_SYMBOL_GPL(hmac_sha1_usingrawkey);
#ifdef sha1_mod_init_arch
static int __init sha1_mod_init(void)
{
sha1_mod_init_arch();
return 0;
}
subsys_initcall(sha1_mod_init);
static void __exit sha1_mod_exit(void)
{
}
module_exit(sha1_mod_exit);
#endif
MODULE_DESCRIPTION("SHA-1 and HMAC-SHA1 library functions");
MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0
// Generated by scripts/atomic/gen-atomic-long.sh
// DO NOT MODIFY THIS FILE DIRECTLY
#ifndef _LINUX_ATOMIC_LONG_H
#define _LINUX_ATOMIC_LONG_H
#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
#else
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
#define atomic_long_cond_read_acquire atomic_cond_read_acquire
#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
#endif
/**
* raw_atomic_long_read() - atomic load with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically loads the value of @v with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_read() elsewhere.
*
* Return: The value loaded from @v.
*/
static __always_inline long
raw_atomic_long_read(const atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_read(v);
#else
return raw_atomic_read(v);
#endif
}
/**
* raw_atomic_long_read_acquire() - atomic load with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically loads the value of @v with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_read_acquire() elsewhere.
*
* Return: The value loaded from @v.
*/
static __always_inline long
raw_atomic_long_read_acquire(const atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_read_acquire(v);
#else
return raw_atomic_read_acquire(v);
#endif
}
/**
* raw_atomic_long_set() - atomic set with relaxed ordering
* @v: pointer to atomic_long_t
* @i: long value to assign
*
* Atomically sets @v to @i with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_set() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_set(atomic_long_t *v, long i)
{
#ifdef CONFIG_64BIT
raw_atomic64_set(v, i);
#else
raw_atomic_set(v, i);
#endif
}
/**
* raw_atomic_long_set_release() - atomic set with release ordering
* @v: pointer to atomic_long_t
* @i: long value to assign
*
* Atomically sets @v to @i with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_set_release() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_set_release(atomic_long_t *v, long i)
{
#ifdef CONFIG_64BIT
raw_atomic64_set_release(v, i);
#else
raw_atomic_set_release(v, i);
#endif
}
/**
* raw_atomic_long_add() - atomic add with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_add(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_add(i, v);
#else
raw_atomic_add(i, v);
#endif
}
/**
* raw_atomic_long_add_return() - atomic add with full ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_return() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_add_return(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_return(i, v);
#else
return raw_atomic_add_return(i, v);
#endif
}
/**
* raw_atomic_long_add_return_acquire() - atomic add with acquire ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_return_acquire() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_return_acquire(i, v);
#else
return raw_atomic_add_return_acquire(i, v);
#endif
}
/**
* raw_atomic_long_add_return_release() - atomic add with release ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_return_release() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_add_return_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_return_release(i, v);
#else
return raw_atomic_add_return_release(i, v);
#endif
}
/**
* raw_atomic_long_add_return_relaxed() - atomic add with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_return_relaxed() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_return_relaxed(i, v);
#else
return raw_atomic_add_return_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_fetch_add() - atomic add with full ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_add() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_add(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_add(i, v);
#else
return raw_atomic_fetch_add(i, v);
#endif
}
/**
* raw_atomic_long_fetch_add_acquire() - atomic add with acquire ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_add_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_add_acquire(i, v);
#else
return raw_atomic_fetch_add_acquire(i, v);
#endif
}
/**
* raw_atomic_long_fetch_add_release() - atomic add with release ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_add_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_add_release(i, v);
#else
return raw_atomic_fetch_add_release(i, v);
#endif
}
/**
* raw_atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_add_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_add_relaxed(i, v);
#else
return raw_atomic_fetch_add_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_sub() - atomic subtract with relaxed ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_sub() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_sub(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_sub(i, v);
#else
raw_atomic_sub(i, v);
#endif
}
/**
* raw_atomic_long_sub_return() - atomic subtract with full ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_sub_return() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_sub_return(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_sub_return(i, v);
#else
return raw_atomic_sub_return(i, v);
#endif
}
/**
* raw_atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_sub_return_acquire() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_sub_return_acquire(i, v);
#else
return raw_atomic_sub_return_acquire(i, v);
#endif
}
/**
* raw_atomic_long_sub_return_release() - atomic subtract with release ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_sub_return_release() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_sub_return_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_sub_return_release(i, v);
#else
return raw_atomic_sub_return_release(i, v);
#endif
}
/**
* raw_atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_sub_return_relaxed() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_sub_return_relaxed(i, v);
#else
return raw_atomic_sub_return_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_fetch_sub() - atomic subtract with full ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_sub() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_sub(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_sub(i, v);
#else
return raw_atomic_fetch_sub(i, v);
#endif
}
/**
* raw_atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_sub_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_sub_acquire(i, v);
#else
return raw_atomic_fetch_sub_acquire(i, v);
#endif
}
/**
* raw_atomic_long_fetch_sub_release() - atomic subtract with release ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_sub_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_sub_release(i, v);
#else
return raw_atomic_fetch_sub_release(i, v);
#endif
}
/**
* raw_atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_sub_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_sub_relaxed(i, v);
#else
return raw_atomic_fetch_sub_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_inc() - atomic increment with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_inc() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_inc(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_inc(v);
#else
raw_atomic_inc(v);
#endif
}
/**
* raw_atomic_long_inc_return() - atomic increment with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_inc_return() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_inc_return(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_inc_return(v);
#else
return raw_atomic_inc_return(v);
#endif
}
/**
* raw_atomic_long_inc_return_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_inc_return_acquire() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_inc_return_acquire(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_inc_return_acquire(v);
#else
return raw_atomic_inc_return_acquire(v);
#endif
}
/**
* raw_atomic_long_inc_return_release() - atomic increment with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_inc_return_release() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_inc_return_release(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_inc_return_release(v);
#else
return raw_atomic_inc_return_release(v);
#endif
}
/**
* raw_atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_inc_return_relaxed() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_inc_return_relaxed(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_inc_return_relaxed(v);
#else
return raw_atomic_inc_return_relaxed(v);
#endif
}
/**
* raw_atomic_long_fetch_inc() - atomic increment with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_inc() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_inc(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_inc(v);
#else
return raw_atomic_fetch_inc(v);
#endif
}
/**
* raw_atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_inc_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_inc_acquire(v);
#else
return raw_atomic_fetch_inc_acquire(v);
#endif
}
/**
* raw_atomic_long_fetch_inc_release() - atomic increment with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_inc_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_inc_release(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_inc_release(v);
#else
return raw_atomic_fetch_inc_release(v);
#endif
}
/**
* raw_atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_inc_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_inc_relaxed(v);
#else
return raw_atomic_fetch_inc_relaxed(v);
#endif
}
/**
* raw_atomic_long_dec() - atomic decrement with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_dec() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_dec(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_dec(v);
#else
raw_atomic_dec(v);
#endif
}
/**
* raw_atomic_long_dec_return() - atomic decrement with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_dec_return() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_dec_return(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_dec_return(v);
#else
return raw_atomic_dec_return(v);
#endif
}
/**
* raw_atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_dec_return_acquire() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_dec_return_acquire(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_dec_return_acquire(v);
#else
return raw_atomic_dec_return_acquire(v);
#endif
}
/**
* raw_atomic_long_dec_return_release() - atomic decrement with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_dec_return_release() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_dec_return_release(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_dec_return_release(v);
#else
return raw_atomic_dec_return_release(v);
#endif
}
/**
* raw_atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_dec_return_relaxed() elsewhere.
*
* Return: The updated value of @v.
*/
static __always_inline long
raw_atomic_long_dec_return_relaxed(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_dec_return_relaxed(v);
#else
return raw_atomic_dec_return_relaxed(v);
#endif
}
/**
* raw_atomic_long_fetch_dec() - atomic decrement with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_dec() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_dec(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_dec(v);
#else
return raw_atomic_fetch_dec(v);
#endif
}
/**
* raw_atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_dec_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_dec_acquire(v);
#else
return raw_atomic_fetch_dec_acquire(v);
#endif
}
/**
* raw_atomic_long_fetch_dec_release() - atomic decrement with release ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_dec_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_dec_release(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_dec_release(v);
#else
return raw_atomic_fetch_dec_release(v);
#endif
}
/**
* raw_atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_dec_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_dec_relaxed(v);
#else
return raw_atomic_fetch_dec_relaxed(v);
#endif
}
/**
* raw_atomic_long_and() - atomic bitwise AND with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_and() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_and(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_and(i, v);
#else
raw_atomic_and(i, v);
#endif
}
/**
* raw_atomic_long_fetch_and() - atomic bitwise AND with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_and() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_and(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_and(i, v);
#else
return raw_atomic_fetch_and(i, v);
#endif
}
/**
* raw_atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_and_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_and_acquire(i, v);
#else
return raw_atomic_fetch_and_acquire(i, v);
#endif
}
/**
* raw_atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_and_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_and_release(i, v);
#else
return raw_atomic_fetch_and_release(i, v);
#endif
}
/**
* raw_atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_and_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_and_relaxed(i, v);
#else
return raw_atomic_fetch_and_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_andnot() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_andnot(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_andnot(i, v);
#else
raw_atomic_andnot(i, v);
#endif
}
/**
* raw_atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_andnot() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_andnot(i, v);
#else
return raw_atomic_fetch_andnot(i, v);
#endif
}
/**
* raw_atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_andnot_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_andnot_acquire(i, v);
#else
return raw_atomic_fetch_andnot_acquire(i, v);
#endif
}
/**
* raw_atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_andnot_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_andnot_release(i, v);
#else
return raw_atomic_fetch_andnot_release(i, v);
#endif
}
/**
* raw_atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_andnot_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_andnot_relaxed(i, v);
#else
return raw_atomic_fetch_andnot_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_or() - atomic bitwise OR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_or() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_or(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_or(i, v);
#else
raw_atomic_or(i, v);
#endif
}
/**
* raw_atomic_long_fetch_or() - atomic bitwise OR with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_or() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_or(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_or(i, v);
#else
return raw_atomic_fetch_or(i, v);
#endif
}
/**
* raw_atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_or_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_or_acquire(i, v);
#else
return raw_atomic_fetch_or_acquire(i, v);
#endif
}
/**
* raw_atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_or_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_or_release(i, v);
#else
return raw_atomic_fetch_or_release(i, v);
#endif
}
/**
* raw_atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v | @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_or_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_or_relaxed(i, v);
#else
return raw_atomic_fetch_or_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_xor() - atomic bitwise XOR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_xor() elsewhere.
*
* Return: Nothing.
*/
static __always_inline void
raw_atomic_long_xor(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
raw_atomic64_xor(i, v);
#else
raw_atomic_xor(i, v);
#endif
}
/**
* raw_atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_xor() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_xor(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_xor(i, v);
#else
return raw_atomic_fetch_xor(i, v);
#endif
}
/**
* raw_atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_xor_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_xor_acquire(i, v);
#else
return raw_atomic_fetch_xor_acquire(i, v);
#endif
}
/**
* raw_atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_xor_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_xor_release(i, v);
#else
return raw_atomic_fetch_xor_release(i, v);
#endif
}
/**
* raw_atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
* @i: long value
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_xor_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_xor_relaxed(i, v);
#else
return raw_atomic_fetch_xor_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_xchg() - atomic exchange with full ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_xchg() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_xchg(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_xchg(v, new);
#else
return raw_atomic_xchg(v, new);
#endif
}
/**
* raw_atomic_long_xchg_acquire() - atomic exchange with acquire ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_xchg_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_xchg_acquire(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_xchg_acquire(v, new);
#else
return raw_atomic_xchg_acquire(v, new);
#endif
}
/**
* raw_atomic_long_xchg_release() - atomic exchange with release ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_xchg_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_xchg_release(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_xchg_release(v, new);
#else
return raw_atomic_xchg_release(v, new);
#endif
}
/**
* raw_atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
* @v: pointer to atomic_long_t
* @new: long value to assign
*
* Atomically updates @v to @new with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_xchg_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_xchg_relaxed(v, new);
#else
return raw_atomic_xchg_relaxed(v, new);
#endif
}
/**
* raw_atomic_long_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_cmpxchg(v, old, new);
#else
return raw_atomic_cmpxchg(v, old, new);
#endif
}
/**
* raw_atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_cmpxchg_acquire(v, old, new);
#else
return raw_atomic_cmpxchg_acquire(v, old, new);
#endif
}
/**
* raw_atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_cmpxchg_release(v, old, new);
#else
return raw_atomic_cmpxchg_release(v, old, new);
#endif
}
/**
* raw_atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic_long_t
* @old: long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_cmpxchg_relaxed(v, old, new);
#else
return raw_atomic_cmpxchg_relaxed(v, old, new);
#endif
}
/**
* raw_atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_try_cmpxchg(v, (s64 *)old, new);
#else
return raw_atomic_try_cmpxchg(v, (int *)old, new);
#endif
}
/**
* raw_atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
#else
return raw_atomic_try_cmpxchg_acquire(v, (int *)old, new);
#endif
}
/**
* raw_atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
#else
return raw_atomic_try_cmpxchg_release(v, (int *)old, new);
#endif
}
/**
* raw_atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
* @v: pointer to atomic_long_t
* @old: pointer to long value to compare with
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
*
* Return: @true if the exchange occured, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
#ifdef CONFIG_64BIT
return raw_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
#else
return raw_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
#endif
}
/**
* raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
* @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_sub_and_test() elsewhere.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_sub_and_test(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_sub_and_test(i, v);
#else
return raw_atomic_sub_and_test(i, v);
#endif
}
/**
* raw_atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - 1) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_dec_and_test() elsewhere.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_dec_and_test(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_dec_and_test(v);
#else
return raw_atomic_dec_and_test(v);
#endif
}
/**
* raw_atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + 1) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_inc_and_test() elsewhere.
*
* Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_inc_and_test(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_inc_and_test(v);
#else
return raw_atomic_inc_and_test(v);
#endif
}
/**
* raw_atomic_long_add_negative() - atomic add and test if negative with full ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with full ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_negative() elsewhere.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_add_negative(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_negative(i, v);
#else
return raw_atomic_add_negative(i, v);
#endif
}
/**
* raw_atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with acquire ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_negative_acquire() elsewhere.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_negative_acquire(i, v);
#else
return raw_atomic_add_negative_acquire(i, v);
#endif
}
/**
* raw_atomic_long_add_negative_release() - atomic add and test if negative with release ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with release ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_negative_release() elsewhere.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_add_negative_release(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_negative_release(i, v);
#else
return raw_atomic_add_negative_release(i, v);
#endif
}
/**
* raw_atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
* @i: long value to add
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v + @i) with relaxed ordering.
*
* Safe to use in noinstr code; prefer atomic_long_add_negative_relaxed() elsewhere.
*
* Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_negative_relaxed(i, v);
#else
return raw_atomic_add_negative_relaxed(i, v);
#endif
}
/**
* raw_atomic_long_fetch_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic_long_t
* @a: long value to add
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere.
*
* Return: The original value of @v.
*/
static __always_inline long
raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
#ifdef CONFIG_64BIT
return raw_atomic64_fetch_add_unless(v, a, u);
#else
return raw_atomic_fetch_add_unless(v, a, u);
#endif
}
/**
* raw_atomic_long_add_unless() - atomic add unless value with full ordering
* @v: pointer to atomic_long_t
* @a: long value to add
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
#ifdef CONFIG_64BIT
return raw_atomic64_add_unless(v, a, u);
#else
return raw_atomic_add_unless(v, a, u);
#endif
}
/**
* raw_atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
* @v: pointer to atomic_long_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_inc_not_zero(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_inc_not_zero(v);
#else
return raw_atomic_inc_not_zero(v);
#endif
}
/**
* raw_atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
* @v: pointer to atomic_long_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_inc_unless_negative(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_inc_unless_negative(v);
#else
return raw_atomic_inc_unless_negative(v);
#endif
}
/**
* raw_atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
* @v: pointer to atomic_long_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere.
*
* Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_dec_unless_positive(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_dec_unless_positive(v);
#else
return raw_atomic_dec_unless_positive(v);
#endif
}
/**
* raw_atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
* @v: pointer to atomic_long_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere.
*
* Return: The old value of (@v - 1), regardless of whether @v was updated.
*/
static __always_inline long
raw_atomic_long_dec_if_positive(atomic_long_t *v)
{
#ifdef CONFIG_64BIT
return raw_atomic64_dec_if_positive(v);
#else
return raw_atomic_dec_if_positive(v);
#endif
}
#endif /* _LINUX_ATOMIC_LONG_H */
// eadf183c3600b8b92b91839dd3be6bcc560c752d
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller.
*/
#ifndef _CRYPTO_INTERNAL_CIPHER_H
#define _CRYPTO_INTERNAL_CIPHER_H
#include <crypto/algapi.h>
struct crypto_cipher {
struct crypto_tfm base;
};
/**
* DOC: Single Block Cipher API
*
* The single block cipher API is used with the ciphers of type
* CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
*
* Using the single block cipher API calls, operations with the basic cipher
* primitive can be implemented. These cipher primitives exclude any block
* chaining operations including IV handling.
*
* The purpose of this single block cipher API is to support the implementation
* of templates or other concepts that only need to perform the cipher operation
* on one block at a time. Templates invoke the underlying cipher primitive
* block-wise and process either the input or the output data of these cipher
* operations.
*/
static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
{
return (struct crypto_cipher *)tfm;
}
/**
* crypto_alloc_cipher() - allocate single block cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* single block cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for a single block cipher. The returned struct
* crypto_cipher is the cipher handle that is required for any subsequent API
* invocation for that single block cipher.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_CIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
{
return &tfm->base;
}
/**
* crypto_free_cipher() - zeroize and free the single block cipher handle
* @tfm: cipher handle to be freed
*/
static inline void crypto_free_cipher(struct crypto_cipher *tfm)
{
crypto_free_tfm(crypto_cipher_tfm(tfm));
}
/**
* crypto_has_cipher() - Search for the availability of a single block cipher
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* single block cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Return: true when the single block cipher is known to the kernel crypto API;
* false otherwise
*/
static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_CIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return crypto_has_alg(alg_name, type, mask);
}
/**
* crypto_cipher_blocksize() - obtain block size for cipher
* @tfm: cipher handle
*
* The block size for the single block cipher referenced with the cipher handle
* tfm is returned. The caller may use that information to allocate appropriate
* memory for the data returned by the encryption or decryption operation
*
* Return: block size of cipher
*/
static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
}
static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
{
return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
}
static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
{
return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
}
static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
u32 flags)
{
crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
}
static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
u32 flags)
{
crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
}
/**
* crypto_cipher_setkey() - set key for cipher
* @tfm: cipher handle
* @key: buffer holding the key
* @keylen: length of the key in bytes
*
* The caller provided key is set for the single block cipher referenced by the
* cipher handle.
*
* Note, the key length determines the cipher type. Many block ciphers implement
* different cipher modes depending on the key size, such as AES-128 vs AES-192
* vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
* is performed.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_cipher_setkey(struct crypto_cipher *tfm,
const u8 *key, unsigned int keylen);
/**
* crypto_cipher_encrypt_one() - encrypt one block of plaintext
* @tfm: cipher handle
* @dst: points to the buffer that will be filled with the ciphertext
* @src: buffer holding the plaintext to be encrypted
*
* Invoke the encryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src);
/**
* crypto_cipher_decrypt_one() - decrypt one block of ciphertext
* @tfm: cipher handle
* @dst: points to the buffer that will be filled with the plaintext
* @src: buffer holding the ciphertext to be decrypted
*
* Invoke the decryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src);
struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher);
struct crypto_cipher_spawn {
struct crypto_spawn base;
};
static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_CIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
static inline struct crypto_alg *crypto_spawn_cipher_alg(
struct crypto_cipher_spawn *spawn)
{
return spawn->base.alg;
}
static inline struct crypto_cipher *crypto_spawn_cipher(
struct crypto_cipher_spawn *spawn)
{
u32 type = CRYPTO_ALG_TYPE_CIPHER;
u32 mask = CRYPTO_ALG_TYPE_MASK;
return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
}
static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
{
return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
}
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* mm/percpu.c - percpu memory allocator
*
* Copyright (C) 2009 SUSE Linux Products GmbH
* Copyright (C) 2009 Tejun Heo <tj@kernel.org>
*
* Copyright (C) 2017 Facebook Inc.
* Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
*
* The percpu allocator handles both static and dynamic areas. Percpu
* areas are allocated in chunks which are divided into units. There is
* a 1-to-1 mapping for units to possible cpus. These units are grouped
* based on NUMA properties of the machine.
*
* c0 c1 c2
* ------------------- ------------------- ------------
* | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
* ------------------- ...... ------------------- .... ------------
*
* Allocation is done by offsets into a unit's address space. Ie., an
* area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
* c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
* and even sparse. Access is handled by configuring percpu base
* registers according to the cpu to unit mappings and offsetting the
* base address using pcpu_unit_size.
*
* There is special consideration for the first chunk which must handle
* the static percpu variables in the kernel image as allocation services
* are not online yet. In short, the first chunk is structured like so:
*
* <Static | [Reserved] | Dynamic>
*
* The static data is copied from the original section managed by the
* linker. The reserved section, if non-zero, primarily manages static
* percpu variables from kernel modules. Finally, the dynamic section
* takes care of normal allocations.
*
* The allocator organizes chunks into lists according to free size and
* memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
* flag should be passed. All memcg-aware allocations are sharing one set
* of chunks and all unaccounted allocations and allocations performed
* by processes belonging to the root memory cgroup are using the second set.
*
* The allocator tries to allocate from the fullest chunk first. Each chunk
* is managed by a bitmap with metadata blocks. The allocation map is updated
* on every allocation and free to reflect the current state while the boundary
* map is only updated on allocation. Each metadata block contains
* information to help mitigate the need to iterate over large portions
* of the bitmap. The reverse mapping from page to chunk is stored in
* the page's index. Lastly, units are lazily backed and grow in unison.
*
* There is a unique conversion that goes on here between bytes and bits.
* Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
* tracks the number of pages it is responsible for in nr_pages. Helper
* functions are used to convert from between the bytes, bits, and blocks.
* All hints are managed in bits unless explicitly stated.
*
* To use this allocator, arch code should do the following:
*
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
* regular address to percpu pointer and back if they need to be
* different from the default
*
* - use pcpu_setup_first_chunk() during percpu area initialization to
* setup the first chunk containing the kernel static percpu area
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/pfn.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/kmemleak.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/memcontrol.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#define CREATE_TRACE_POINTS
#include <trace/events/percpu.h>
#include "percpu-internal.h"
/*
* The slots are sorted by the size of the biggest continuous free area.
* 1-31 bytes share the same slot.
*/
#define PCPU_SLOT_BASE_SHIFT 5
/* chunks in slots below this are subject to being sidelined on failed alloc */
#define PCPU_SLOT_FAIL_THRESHOLD 3
#define PCPU_EMPTY_POP_PAGES_LOW 2
#define PCPU_EMPTY_POP_PAGES_HIGH 4
#ifdef CONFIG_SMP
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr) \
(void __percpu *)((unsigned long)(addr) - \
(unsigned long)pcpu_base_addr + \
(unsigned long)__per_cpu_start)
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr) \
(void __force *)((unsigned long)(ptr) + \
(unsigned long)pcpu_base_addr - \
(unsigned long)__per_cpu_start)
#endif
#else /* CONFIG_SMP */
/* on UP, it's always identity mapped */
#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
#endif /* CONFIG_SMP */
static int pcpu_unit_pages __ro_after_init;
static int pcpu_unit_size __ro_after_init;
static int pcpu_nr_units __ro_after_init;
static int pcpu_atom_size __ro_after_init;
int pcpu_nr_slots __ro_after_init;
static int pcpu_free_slot __ro_after_init;
int pcpu_sidelined_slot __ro_after_init;
int pcpu_to_depopulate_slot __ro_after_init;
static size_t pcpu_chunk_struct_size __ro_after_init;
/* cpus with the lowest and highest unit addresses */
static unsigned int pcpu_low_unit_cpu __ro_after_init;
static unsigned int pcpu_high_unit_cpu __ro_after_init;
/* the address of the first chunk which starts with the kernel static area */
void *pcpu_base_addr __ro_after_init;
static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
/* group information, used for vm allocation */
static int pcpu_nr_groups __ro_after_init;
static const unsigned long *pcpu_group_offsets __ro_after_init;
static const size_t *pcpu_group_sizes __ro_after_init;
/*
* The first chunk which always exists. Note that unlike other
* chunks, this one can be allocated and mapped in several different
* ways and thus often doesn't live in the vmalloc area.
*/
struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
/*
* Optional reserved chunk. This chunk reserves part of the first
* chunk and serves it for reserved allocations. When the reserved
* region doesn't exist, the following variable is NULL.
*/
struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
/*
* The number of empty populated pages, protected by pcpu_lock.
* The reserved chunk doesn't contribute to the count.
*/
int pcpu_nr_empty_pop_pages;
/*
* The number of populated pages in use by the allocator, protected by
* pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
* allocated/deallocated, it is allocated/deallocated in all units of a chunk
* and increments/decrements this count by 1).
*/
static unsigned long pcpu_nr_populated;
/*
* Balance work is used to populate or destroy chunks asynchronously. We
* try to keep the number of populated free pages between
* PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
* empty chunk.
*/
static void pcpu_balance_workfn(struct work_struct *work);
static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
static bool pcpu_async_enabled __read_mostly;
static bool pcpu_atomic_alloc_failed;
static void pcpu_schedule_balance_work(void)
{
if (pcpu_async_enabled)
schedule_work(&pcpu_balance_work);
}
/**
* pcpu_addr_in_chunk - check if the address is served from this chunk
* @chunk: chunk of interest
* @addr: percpu address
*
* RETURNS:
* True if the address is served from this chunk.
*/
static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
{
void *start_addr, *end_addr;
if (!chunk)
return false;
start_addr = chunk->base_addr + chunk->start_offset;
end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
chunk->end_offset;
return addr >= start_addr && addr < end_addr;
}
static int __pcpu_size_to_slot(int size)
{
int highbit = fls(size); /* size is in bytes */
return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
}
static int pcpu_size_to_slot(int size)
{
if (size == pcpu_unit_size) return pcpu_free_slot; return __pcpu_size_to_slot(size);}
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
{
const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk_md->contig_hint == 0) return 0; return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);}
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
page->private = (unsigned long)pcpu;
}
/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
return (struct pcpu_chunk *)page->private;
}
static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
{
return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
}
static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
{
return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
}
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
{
return (unsigned long)chunk->base_addr +
pcpu_unit_page_offset(cpu, page_idx);
}
/*
* The following are helper functions to help access bitmaps and convert
* between bitmap offsets to address offsets.
*/
static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
{
return chunk->alloc_map +
(index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
}
static unsigned long pcpu_off_to_block_index(int off)
{
return off / PCPU_BITMAP_BLOCK_BITS;
}
static unsigned long pcpu_off_to_block_off(int off)
{
return off & (PCPU_BITMAP_BLOCK_BITS - 1);
}
static unsigned long pcpu_block_off_to_off(int index, int off)
{
return index * PCPU_BITMAP_BLOCK_BITS + off;
}
/**
* pcpu_check_block_hint - check against the contig hint
* @block: block of interest
* @bits: size of allocation
* @align: alignment of area (max PAGE_SIZE)
*
* Check to see if the allocation can fit in the block's contig hint.
* Note, a chunk uses the same hints as a block so this can also check against
* the chunk's contig hint.
*/
static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
size_t align)
{
int bit_off = ALIGN(block->contig_hint_start, align) -
block->contig_hint_start;
return bit_off + bits <= block->contig_hint;
}
/*
* pcpu_next_hint - determine which hint to use
* @block: block of interest
* @alloc_bits: size of allocation
*
* This determines if we should scan based on the scan_hint or first_free.
* In general, we want to scan from first_free to fulfill allocations by
* first fit. However, if we know a scan_hint at position scan_hint_start
* cannot fulfill an allocation, we can begin scanning from there knowing
* the contig_hint will be our fallback.
*/
static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
{
/*
* The three conditions below determine if we can skip past the
* scan_hint. First, does the scan hint exist. Second, is the
* contig_hint after the scan_hint (possibly not true iff
* contig_hint == scan_hint). Third, is the allocation request
* larger than the scan_hint.
*/
if (block->scan_hint && block->contig_hint_start > block->scan_hint_start &&
alloc_bits > block->scan_hint)
return block->scan_hint_start + block->scan_hint;
return block->first_free;
}
/**
* pcpu_next_md_free_region - finds the next hint free area
* @chunk: chunk of interest
* @bit_off: chunk offset
* @bits: size of free area
*
* Helper function for pcpu_for_each_md_free_region. It checks
* block->contig_hint and performs aggregation across blocks to find the
* next hint. It modifies bit_off and bits in-place to be consumed in the
* loop.
*/
static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
int *bits)
{
int i = pcpu_off_to_block_index(*bit_off);
int block_off = pcpu_off_to_block_off(*bit_off);
struct pcpu_block_md *block;
*bits = 0;
for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
block++, i++) {
/* handles contig area across blocks */
if (*bits) { *bits += block->left_free; if (block->left_free == PCPU_BITMAP_BLOCK_BITS) continue;
return;
}
/*
* This checks three things. First is there a contig_hint to
* check. Second, have we checked this hint before by
* comparing the block_off. Third, is this the same as the
* right contig hint. In the last case, it spills over into
* the next block and should be handled by the contig area
* across blocks code.
*/
*bits = block->contig_hint; if (*bits && block->contig_hint_start >= block_off &&
*bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
*bit_off = pcpu_block_off_to_off(i,
block->contig_hint_start);
return;
}
/* reset to satisfy the second predicate above */
block_off = 0;
*bits = block->right_free;
*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
}
}
/**
* pcpu_next_fit_region - finds fit areas for a given allocation request
* @chunk: chunk of interest
* @alloc_bits: size of allocation
* @align: alignment of area (max PAGE_SIZE)
* @bit_off: chunk offset
* @bits: size of free area
*
* Finds the next free region that is viable for use with a given size and
* alignment. This only returns if there is a valid area to be used for this
* allocation. block->first_free is returned if the allocation request fits
* within the block to see if the request can be fulfilled prior to the contig
* hint.
*/
static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
int align, int *bit_off, int *bits)
{
int i = pcpu_off_to_block_index(*bit_off);
int block_off = pcpu_off_to_block_off(*bit_off);
struct pcpu_block_md *block;
*bits = 0;
for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); block++, i++) {
/* handles contig area across blocks */
if (*bits) { *bits += block->left_free; if (*bits >= alloc_bits)
return;
if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
continue;
}
/* check block->contig_hint */
*bits = ALIGN(block->contig_hint_start, align) -
block->contig_hint_start;
/*
* This uses the block offset to determine if this has been
* checked in the prior iteration.
*/
if (block->contig_hint &&
block->contig_hint_start >= block_off &&
block->contig_hint >= *bits + alloc_bits) {
int start = pcpu_next_hint(block, alloc_bits); *bits += alloc_bits + block->contig_hint_start -
start;
*bit_off = pcpu_block_off_to_off(i, start);
return;
}
/* reset to satisfy the second predicate above */
block_off = 0;
*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
align);
*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
*bit_off = pcpu_block_off_to_off(i, *bit_off);
if (*bits >= alloc_bits)
return;
}
/* no valid offsets were found - fail condition */
*bit_off = pcpu_chunk_map_bits(chunk);}
/*
* Metadata free area iterators. These perform aggregation of free areas
* based on the metadata blocks and return the offset @bit_off and size in
* bits of the free area @bits. pcpu_for_each_fit_region only returns when
* a fit is found for the allocation request.
*/
#define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
(bit_off) < pcpu_chunk_map_bits((chunk)); \
(bit_off) += (bits) + 1, \
pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
&(bits)); \
(bit_off) < pcpu_chunk_map_bits((chunk)); \
(bit_off) += (bits), \
pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
&(bits)))
/**
* pcpu_mem_zalloc - allocate memory
* @size: bytes to allocate
* @gfp: allocation flags
*
* Allocate @size bytes. If @size is smaller than PAGE_SIZE,
* kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
* This is to facilitate passing through whitelisted flags. The
* returned memory is always zeroed.
*
* RETURNS:
* Pointer to the allocated area on success, NULL on failure.
*/
static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
{
if (WARN_ON_ONCE(!slab_is_available()))
return NULL;
if (size <= PAGE_SIZE)
return kzalloc(size, gfp);
else
return __vmalloc(size, gfp | __GFP_ZERO);
}
/**
* pcpu_mem_free - free memory
* @ptr: memory to free
*
* Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
*/
static void pcpu_mem_free(void *ptr)
{
kvfree(ptr);
}
static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
bool move_front)
{
if (chunk != pcpu_reserved_chunk) {
if (move_front)
list_move(&chunk->list, &pcpu_chunk_lists[slot]);
else
list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
}
}
static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
{
__pcpu_chunk_move(chunk, slot, true);
}
/**
* pcpu_chunk_relocate - put chunk in the appropriate chunk slot
* @chunk: chunk of interest
* @oslot: the previous slot it was on
*
* This function is called after an allocation or free changed @chunk.
* New slot according to the changed state is determined and @chunk is
* moved to the slot. Note that the reserved chunk is never put on
* chunk slots.
*
* CONTEXT:
* pcpu_lock.
*/
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
int nslot = pcpu_chunk_slot(chunk);
/* leave isolated chunks in-place */
if (chunk->isolated)
return;
if (oslot != nslot) __pcpu_chunk_move(chunk, nslot, oslot < nslot);}
static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
{
lockdep_assert_held(&pcpu_lock);
if (!chunk->isolated) {
chunk->isolated = true;
pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
}
list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
}
static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
{
lockdep_assert_held(&pcpu_lock); if (chunk->isolated) {
chunk->isolated = false;
pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
pcpu_chunk_relocate(chunk, -1);
}
}
/*
* pcpu_update_empty_pages - update empty page counters
* @chunk: chunk of interest
* @nr: nr of empty pages
*
* This is used to keep track of the empty pages now based on the premise
* a md_block covers a page. The hint update functions recognize if a block
* is made full or broken to calculate deltas for keeping track of free pages.
*/
static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
{
chunk->nr_empty_pop_pages += nr;
if (chunk != pcpu_reserved_chunk && !chunk->isolated) pcpu_nr_empty_pop_pages += nr;
}
/*
* pcpu_region_overlap - determines if two regions overlap
* @a: start of first region, inclusive
* @b: end of first region, exclusive
* @x: start of second region, inclusive
* @y: end of second region, exclusive
*
* This is used to determine if the hint region [a, b) overlaps with the
* allocated region [x, y).
*/
static inline bool pcpu_region_overlap(int a, int b, int x, int y)
{
return (a < y) && (x < b);
}
/**
* pcpu_block_update - updates a block given a free area
* @block: block of interest
* @start: start offset in block
* @end: end offset in block
*
* Updates a block given a known free area. The region [start, end) is
* expected to be the entirety of the free area within a block. Chooses
* the best starting offset if the contig hints are equal.
*/
static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
{
int contig = end - start;
block->first_free = min(block->first_free, start);
if (start == 0)
block->left_free = contig;
if (end == block->nr_bits) block->right_free = contig; if (contig > block->contig_hint) {
/* promote the old contig_hint to be the new scan_hint */
if (start > block->contig_hint_start) { if (block->contig_hint > block->scan_hint) {
block->scan_hint_start =
block->contig_hint_start;
block->scan_hint = block->contig_hint;
} else if (start < block->scan_hint_start) {
/*
* The old contig_hint == scan_hint. But, the
* new contig is larger so hold the invariant
* scan_hint_start < contig_hint_start.
*/
block->scan_hint = 0;
}
} else {
block->scan_hint = 0;
}
block->contig_hint_start = start;
block->contig_hint = contig;
} else if (contig == block->contig_hint) { if (block->contig_hint_start && (!start ||
__ffs(start) > __ffs(block->contig_hint_start))) {
/* start has a better alignment so use it */
block->contig_hint_start = start; if (start < block->scan_hint_start &&
block->contig_hint > block->scan_hint)
block->scan_hint = 0;
} else if (start > block->scan_hint_start ||
block->contig_hint > block->scan_hint) {
/*
* Knowing contig == contig_hint, update the scan_hint
* if it is farther than or larger than the current
* scan_hint.
*/
block->scan_hint_start = start; block->scan_hint = contig;
}
} else {
/*
* The region is smaller than the contig_hint. So only update
* the scan_hint if it is larger than or equal and farther than
* the current scan_hint.
*/
if ((start < block->contig_hint_start &&
(contig > block->scan_hint || (contig == block->scan_hint &&
start > block->scan_hint_start)))) {
block->scan_hint_start = start; block->scan_hint = contig;
}
}
}
/*
* pcpu_block_update_scan - update a block given a free area from a scan
* @chunk: chunk of interest
* @bit_off: chunk offset
* @bits: size of free area
*
* Finding the final allocation spot first goes through pcpu_find_block_fit()
* to find a block that can hold the allocation and then pcpu_alloc_area()
* where a scan is used. When allocations require specific alignments,
* we can inadvertently create holes which will not be seen in the alloc
* or free paths.
*
* This takes a given free area hole and updates a block as it may change the
* scan_hint. We need to scan backwards to ensure we don't miss free bits
* from alignment.
*/
static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
int bits)
{
int s_off = pcpu_off_to_block_off(bit_off);
int e_off = s_off + bits;
int s_index, l_bit;
struct pcpu_block_md *block;
if (e_off > PCPU_BITMAP_BLOCK_BITS)
return;
s_index = pcpu_off_to_block_index(bit_off);
block = chunk->md_blocks + s_index;
/* scan backwards in case of alignment skipping free bits */
l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); s_off = (s_off == l_bit) ? 0 : l_bit + 1; pcpu_block_update(block, s_off, e_off);
}
/**
* pcpu_chunk_refresh_hint - updates metadata about a chunk
* @chunk: chunk of interest
* @full_scan: if we should scan from the beginning
*
* Iterates over the metadata blocks to find the largest contig area.
* A full scan can be avoided on the allocation path as this is triggered
* if we broke the contig_hint. In doing so, the scan_hint will be before
* the contig_hint or after if the scan_hint == contig_hint. This cannot
* be prevented on freeing as we want to find the largest area possibly
* spanning blocks.
*/
static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
{
struct pcpu_block_md *chunk_md = &chunk->chunk_md;
int bit_off, bits;
/* promote scan_hint to contig_hint */
if (!full_scan && chunk_md->scan_hint) {
bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
chunk_md->contig_hint_start = chunk_md->scan_hint_start;
chunk_md->contig_hint = chunk_md->scan_hint;
chunk_md->scan_hint = 0;
} else {
bit_off = chunk_md->first_free; chunk_md->contig_hint = 0;
}
bits = 0;
pcpu_for_each_md_free_region(chunk, bit_off, bits)
pcpu_block_update(chunk_md, bit_off, bit_off + bits);
}
/**
* pcpu_block_refresh_hint
* @chunk: chunk of interest
* @index: index of the metadata block
*
* Scans over the block beginning at first_free and updates the block
* metadata accordingly.
*/
static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
{
struct pcpu_block_md *block = chunk->md_blocks + index;
unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
unsigned int start, end; /* region start, region end */
/* promote scan_hint to contig_hint */
if (block->scan_hint) {
start = block->scan_hint_start + block->scan_hint;
block->contig_hint_start = block->scan_hint_start;
block->contig_hint = block->scan_hint;
block->scan_hint = 0;
} else {
start = block->first_free;
block->contig_hint = 0;
}
block->right_free = 0;
/* iterate over free areas and update the contig hints */
for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
pcpu_block_update(block, start, end);
}
/**
* pcpu_block_update_hint_alloc - update hint on allocation path
* @chunk: chunk of interest
* @bit_off: chunk offset
* @bits: size of request
*
* Updates metadata for the allocation path. The metadata only has to be
* refreshed by a full scan iff the chunk's contig hint is broken. Block level
* scans are required if the block's contig hint is broken.
*/
static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
int bits)
{
struct pcpu_block_md *chunk_md = &chunk->chunk_md;
int nr_empty_pages = 0;
struct pcpu_block_md *s_block, *e_block, *block;
int s_index, e_index; /* block indexes of the freed allocation */
int s_off, e_off; /* block offsets of the freed allocation */
/*
* Calculate per block offsets.
* The calculation uses an inclusive range, but the resulting offsets
* are [start, end). e_index always points to the last block in the
* range.
*/
s_index = pcpu_off_to_block_index(bit_off);
e_index = pcpu_off_to_block_index(bit_off + bits - 1);
s_off = pcpu_off_to_block_off(bit_off);
e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
s_block = chunk->md_blocks + s_index;
e_block = chunk->md_blocks + e_index;
/*
* Update s_block.
*/
if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
nr_empty_pages++;
/*
* block->first_free must be updated if the allocation takes its place.
* If the allocation breaks the contig_hint, a scan is required to
* restore this hint.
*/
if (s_off == s_block->first_free)
s_block->first_free = find_next_zero_bit(
pcpu_index_alloc_map(chunk, s_index),
PCPU_BITMAP_BLOCK_BITS,
s_off + bits);
if (pcpu_region_overlap(s_block->scan_hint_start, s_block->scan_hint_start + s_block->scan_hint,
s_off,
s_off + bits))
s_block->scan_hint = 0; if (pcpu_region_overlap(s_block->contig_hint_start, s_block->contig_hint_start + s_block->contig_hint,
s_off,
s_off + bits)) {
/* block contig hint is broken - scan to fix it */
if (!s_off)
s_block->left_free = 0;
pcpu_block_refresh_hint(chunk, s_index);
} else {
/* update left and right contig manually */
s_block->left_free = min(s_block->left_free, s_off); if (s_index == e_index)
s_block->right_free = min_t(int, s_block->right_free,
PCPU_BITMAP_BLOCK_BITS - e_off);
else
s_block->right_free = 0;
}
/*
* Update e_block.
*/
if (s_index != e_index) {
if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
nr_empty_pages++;
/*
* When the allocation is across blocks, the end is along
* the left part of the e_block.
*/
e_block->first_free = find_next_zero_bit(
pcpu_index_alloc_map(chunk, e_index),
PCPU_BITMAP_BLOCK_BITS, e_off);
if (e_off == PCPU_BITMAP_BLOCK_BITS) {
/* reset the block */
e_block++;
} else {
if (e_off > e_block->scan_hint_start)
e_block->scan_hint = 0;
e_block->left_free = 0; if (e_off > e_block->contig_hint_start) {
/* contig hint is broken - scan to fix it */
pcpu_block_refresh_hint(chunk, e_index);
} else {
e_block->right_free =
min_t(int, e_block->right_free,
PCPU_BITMAP_BLOCK_BITS - e_off);
}
}
/* update in-between md_blocks */
nr_empty_pages += (e_index - s_index - 1); for (block = s_block + 1; block < e_block; block++) { block->scan_hint = 0;
block->contig_hint = 0;
block->left_free = 0;
block->right_free = 0;
}
}
/*
* If the allocation is not atomic, some blocks may not be
* populated with pages, while we account it here. The number
* of pages will be added back with pcpu_chunk_populated()
* when populating pages.
*/
if (nr_empty_pages) pcpu_update_empty_pages(chunk, -nr_empty_pages); if (pcpu_region_overlap(chunk_md->scan_hint_start, chunk_md->scan_hint_start +
chunk_md->scan_hint,
bit_off,
bit_off + bits))
chunk_md->scan_hint = 0;
/*
* The only time a full chunk scan is required is if the chunk
* contig hint is broken. Otherwise, it means a smaller space
* was used and therefore the chunk contig hint is still correct.
*/
if (pcpu_region_overlap(chunk_md->contig_hint_start, chunk_md->contig_hint_start +
chunk_md->contig_hint,
bit_off,
bit_off + bits))
pcpu_chunk_refresh_hint(chunk, false);
}
/**
* pcpu_block_update_hint_free - updates the block hints on the free path
* @chunk: chunk of interest
* @bit_off: chunk offset
* @bits: size of request
*
* Updates metadata for the allocation path. This avoids a blind block
* refresh by making use of the block contig hints. If this fails, it scans
* forward and backward to determine the extent of the free area. This is
* capped at the boundary of blocks.
*
* A chunk update is triggered if a page becomes free, a block becomes free,
* or the free spans across blocks. This tradeoff is to minimize iterating
* over the block metadata to update chunk_md->contig_hint.
* chunk_md->contig_hint may be off by up to a page, but it will never be more
* than the available space. If the contig hint is contained in one block, it
* will be accurate.
*/
static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
int bits)
{
int nr_empty_pages = 0;
struct pcpu_block_md *s_block, *e_block, *block;
int s_index, e_index; /* block indexes of the freed allocation */
int s_off, e_off; /* block offsets of the freed allocation */
int start, end; /* start and end of the whole free area */
/*
* Calculate per block offsets.
* The calculation uses an inclusive range, but the resulting offsets
* are [start, end). e_index always points to the last block in the
* range.
*/
s_index = pcpu_off_to_block_index(bit_off);
e_index = pcpu_off_to_block_index(bit_off + bits - 1);
s_off = pcpu_off_to_block_off(bit_off);
e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
s_block = chunk->md_blocks + s_index;
e_block = chunk->md_blocks + e_index;
/*
* Check if the freed area aligns with the block->contig_hint.
* If it does, then the scan to find the beginning/end of the
* larger free area can be avoided.
*
* start and end refer to beginning and end of the free area
* within each their respective blocks. This is not necessarily
* the entire free area as it may span blocks past the beginning
* or end of the block.
*/
start = s_off;
if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
start = s_block->contig_hint_start;
} else {
/*
* Scan backwards to find the extent of the free area.
* find_last_bit returns the starting bit, so if the start bit
* is returned, that means there was no last bit and the
* remainder of the chunk is free.
*/
int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
start);
start = (start == l_bit) ? 0 : l_bit + 1;
}
end = e_off;
if (e_off == e_block->contig_hint_start)
end = e_block->contig_hint_start + e_block->contig_hint;
else
end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
PCPU_BITMAP_BLOCK_BITS, end);
/* update s_block */
e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
nr_empty_pages++;
pcpu_block_update(s_block, start, e_off);
/* freeing in the same block */
if (s_index != e_index) {
/* update e_block */
if (end == PCPU_BITMAP_BLOCK_BITS)
nr_empty_pages++;
pcpu_block_update(e_block, 0, end);
/* reset md_blocks in the middle */
nr_empty_pages += (e_index - s_index - 1);
for (block = s_block + 1; block < e_block; block++) {
block->first_free = 0;
block->scan_hint = 0;
block->contig_hint_start = 0;
block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
block->left_free = PCPU_BITMAP_BLOCK_BITS;
block->right_free = PCPU_BITMAP_BLOCK_BITS;
}
}
if (nr_empty_pages)
pcpu_update_empty_pages(chunk, nr_empty_pages);
/*
* Refresh chunk metadata when the free makes a block free or spans
* across blocks. The contig_hint may be off by up to a page, but if
* the contig_hint is contained in a block, it will be accurate with
* the else condition below.
*/
if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
pcpu_chunk_refresh_hint(chunk, true);
else
pcpu_block_update(&chunk->chunk_md,
pcpu_block_off_to_off(s_index, start),
end);
}
/**
* pcpu_is_populated - determines if the region is populated
* @chunk: chunk of interest
* @bit_off: chunk offset
* @bits: size of area
* @next_off: return value for the next offset to start searching
*
* For atomic allocations, check if the backing pages are populated.
*
* RETURNS:
* Bool if the backing pages are populated.
* next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
*/
static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
int *next_off)
{
unsigned int start, end;
start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
start = find_next_zero_bit(chunk->populated, end, start);
if (start >= end)
return true;
end = find_next_bit(chunk->populated, end, start + 1);
*next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
return false;
}
/**
* pcpu_find_block_fit - finds the block index to start searching
* @chunk: chunk of interest
* @alloc_bits: size of request in allocation units
* @align: alignment of area (max PAGE_SIZE bytes)
* @pop_only: use populated regions only
*
* Given a chunk and an allocation spec, find the offset to begin searching
* for a free region. This iterates over the bitmap metadata blocks to
* find an offset that will be guaranteed to fit the requirements. It is
* not quite first fit as if the allocation does not fit in the contig hint
* of a block or chunk, it is skipped. This errs on the side of caution
* to prevent excess iteration. Poor alignment can cause the allocator to
* skip over blocks and chunks that have valid free areas.
*
* RETURNS:
* The offset in the bitmap to begin searching.
* -1 if no offset is found.
*/
static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
size_t align, bool pop_only)
{
struct pcpu_block_md *chunk_md = &chunk->chunk_md;
int bit_off, bits, next_off;
/*
* This is an optimization to prevent scanning by assuming if the
* allocation cannot fit in the global hint, there is memory pressure
* and creating a new chunk would happen soon.
*/
if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
return -1;
bit_off = pcpu_next_hint(chunk_md, alloc_bits);
bits = 0;
pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
&next_off))
break;
bit_off = next_off;
bits = 0;
}
if (bit_off == pcpu_chunk_map_bits(chunk))
return -1;
return bit_off;
}
/*
* pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
* @map: the address to base the search on
* @size: the bitmap size in bits
* @start: the bitnumber to start searching at
* @nr: the number of zeroed bits we're looking for
* @align_mask: alignment mask for zero area
* @largest_off: offset of the largest area skipped
* @largest_bits: size of the largest area skipped
*
* The @align_mask should be one less than a power of 2.
*
* This is a modified version of bitmap_find_next_zero_area_off() to remember
* the largest area that was skipped. This is imperfect, but in general is
* good enough. The largest remembered region is the largest failed region
* seen. This does not include anything we possibly skipped due to alignment.
* pcpu_block_update_scan() does scan backwards to try and recover what was
* lost to alignment. While this can cause scanning to miss earlier possible
* free areas, smaller allocations will eventually fill those holes.
*/
static unsigned long pcpu_find_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned long nr,
unsigned long align_mask,
unsigned long *largest_off,
unsigned long *largest_bits)
{
unsigned long index, end, i, area_off, area_bits;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = __ALIGN_MASK(index, align_mask);
area_off = index;
end = index + nr;
if (end > size)
return end;
i = find_next_bit(map, end, index);
if (i < end) {
area_bits = i - area_off;
/* remember largest unused area with best alignment */
if (area_bits > *largest_bits ||
(area_bits == *largest_bits && *largest_off && (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { *largest_off = area_off; *largest_bits = area_bits;
}
start = i + 1;
goto again;
}
return index;
}
/**
* pcpu_alloc_area - allocates an area from a pcpu_chunk
* @chunk: chunk of interest
* @alloc_bits: size of request in allocation units
* @align: alignment of area (max PAGE_SIZE)
* @start: bit_off to start searching
*
* This function takes in a @start offset to begin searching to fit an
* allocation of @alloc_bits with alignment @align. It needs to scan
* the allocation map because if it fits within the block's contig hint,
* @start will be block->first_free. This is an attempt to fill the
* allocation prior to breaking the contig hint. The allocation and
* boundary maps are updated accordingly if it confirms a valid
* free area.
*
* RETURNS:
* Allocated addr offset in @chunk on success.
* -1 if no matching area is found.
*/
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
size_t align, int start)
{
struct pcpu_block_md *chunk_md = &chunk->chunk_md;
size_t align_mask = (align) ? (align - 1) : 0;
unsigned long area_off = 0, area_bits = 0;
int bit_off, end, oslot;
lockdep_assert_held(&pcpu_lock); oslot = pcpu_chunk_slot(chunk);
/*
* Search to find a fit.
*/
end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
pcpu_chunk_map_bits(chunk));
bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
align_mask, &area_off, &area_bits);
if (bit_off >= end)
return -1;
if (area_bits) pcpu_block_update_scan(chunk, area_off, area_bits);
/* update alloc map */
bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
/* update boundary map */
set_bit(bit_off, chunk->bound_map);
bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
set_bit(bit_off + alloc_bits, chunk->bound_map);
chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
/* update first free bit */
if (bit_off == chunk_md->first_free)
chunk_md->first_free = find_next_zero_bit(
chunk->alloc_map,
pcpu_chunk_map_bits(chunk),
bit_off + alloc_bits);
pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
pcpu_chunk_relocate(chunk, oslot);
return bit_off * PCPU_MIN_ALLOC_SIZE;}
/**
* pcpu_free_area - frees the corresponding offset
* @chunk: chunk of interest
* @off: addr offset into chunk
*
* This function determines the size of an allocation to free using
* the boundary bitmap and clears the allocation map.
*
* RETURNS:
* Number of freed bytes.
*/
static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
{
struct pcpu_block_md *chunk_md = &chunk->chunk_md;
int bit_off, bits, end, oslot, freed;
lockdep_assert_held(&pcpu_lock);
pcpu_stats_area_dealloc(chunk);
oslot = pcpu_chunk_slot(chunk);
bit_off = off / PCPU_MIN_ALLOC_SIZE;
/* find end index */
end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
bit_off + 1);
bits = end - bit_off;
bitmap_clear(chunk->alloc_map, bit_off, bits);
freed = bits * PCPU_MIN_ALLOC_SIZE;
/* update metadata */
chunk->free_bytes += freed;
/* update first free bit */
chunk_md->first_free = min(chunk_md->first_free, bit_off);
pcpu_block_update_hint_free(chunk, bit_off, bits);
pcpu_chunk_relocate(chunk, oslot);
return freed;
}
static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
{
block->scan_hint = 0;
block->contig_hint = nr_bits;
block->left_free = nr_bits;
block->right_free = nr_bits;
block->first_free = 0;
block->nr_bits = nr_bits;
}
static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
{
struct pcpu_block_md *md_block;
/* init the chunk's block */
pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
for (md_block = chunk->md_blocks;
md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
md_block++)
pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
}
/**
* pcpu_alloc_first_chunk - creates chunks that serve the first chunk
* @tmp_addr: the start of the region served
* @map_size: size of the region served
*
* This is responsible for creating the chunks that serve the first chunk. The
* base_addr is page aligned down of @tmp_addr while the region end is page
* aligned up. Offsets are kept track of to determine the region served. All
* this is done to appease the bitmap allocator in avoiding partial blocks.
*
* RETURNS:
* Chunk serving the region at @tmp_addr of @map_size.
*/
static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
int map_size)
{
struct pcpu_chunk *chunk;
unsigned long aligned_addr;
int start_offset, offset_bits, region_size, region_bits;
size_t alloc_size;
/* region calculations */
aligned_addr = tmp_addr & PAGE_MASK;
start_offset = tmp_addr - aligned_addr;
region_size = ALIGN(start_offset + map_size, PAGE_SIZE);
/* allocate chunk */
alloc_size = struct_size(chunk, populated,
BITS_TO_LONGS(region_size >> PAGE_SHIFT));
chunk = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
INIT_LIST_HEAD(&chunk->list);
chunk->base_addr = (void *)aligned_addr;
chunk->start_offset = start_offset;
chunk->end_offset = region_size - chunk->start_offset - map_size;
chunk->nr_pages = region_size >> PAGE_SHIFT;
region_bits = pcpu_chunk_map_bits(chunk);
alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
chunk->alloc_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size =
BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
chunk->bound_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
chunk->md_blocks = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
#ifdef NEED_PCPUOBJ_EXT
/* first chunk is free to use */
chunk->obj_exts = NULL;
#endif
pcpu_init_md_blocks(chunk);
/* manage populated page bitmap */
chunk->immutable = true;
bitmap_fill(chunk->populated, chunk->nr_pages);
chunk->nr_populated = chunk->nr_pages;
chunk->nr_empty_pop_pages = chunk->nr_pages;
chunk->free_bytes = map_size;
if (chunk->start_offset) {
/* hide the beginning of the bitmap */
offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
bitmap_set(chunk->alloc_map, 0, offset_bits);
set_bit(0, chunk->bound_map);
set_bit(offset_bits, chunk->bound_map);
chunk->chunk_md.first_free = offset_bits;
pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
}
if (chunk->end_offset) {
/* hide the end of the bitmap */
offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
bitmap_set(chunk->alloc_map,
pcpu_chunk_map_bits(chunk) - offset_bits,
offset_bits);
set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
chunk->bound_map);
set_bit(region_bits, chunk->bound_map);
pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
- offset_bits, offset_bits);
}
return chunk;
}
static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
{
struct pcpu_chunk *chunk;
int region_bits;
chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
if (!chunk)
return NULL;
INIT_LIST_HEAD(&chunk->list);
chunk->nr_pages = pcpu_unit_pages;
region_bits = pcpu_chunk_map_bits(chunk);
chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
sizeof(chunk->alloc_map[0]), gfp);
if (!chunk->alloc_map)
goto alloc_map_fail;
chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
sizeof(chunk->bound_map[0]), gfp);
if (!chunk->bound_map)
goto bound_map_fail;
chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
sizeof(chunk->md_blocks[0]), gfp);
if (!chunk->md_blocks)
goto md_blocks_fail;
#ifdef NEED_PCPUOBJ_EXT
if (need_pcpuobj_ext()) {
chunk->obj_exts =
pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
sizeof(struct pcpuobj_ext), gfp);
if (!chunk->obj_exts)
goto objcg_fail;
}
#endif
pcpu_init_md_blocks(chunk);
/* init metadata */
chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
return chunk;
#ifdef NEED_PCPUOBJ_EXT
objcg_fail:
pcpu_mem_free(chunk->md_blocks);
#endif
md_blocks_fail:
pcpu_mem_free(chunk->bound_map);
bound_map_fail:
pcpu_mem_free(chunk->alloc_map);
alloc_map_fail:
pcpu_mem_free(chunk);
return NULL;
}
static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
if (!chunk)
return;
#ifdef NEED_PCPUOBJ_EXT
pcpu_mem_free(chunk->obj_exts);
#endif
pcpu_mem_free(chunk->md_blocks);
pcpu_mem_free(chunk->bound_map);
pcpu_mem_free(chunk->alloc_map);
pcpu_mem_free(chunk);
}
/**
* pcpu_chunk_populated - post-population bookkeeping
* @chunk: pcpu_chunk which got populated
* @page_start: the start page
* @page_end: the end page
*
* Pages in [@page_start,@page_end) have been populated to @chunk. Update
* the bookkeeping information accordingly. Must be called after each
* successful population.
*/
static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
int page_end)
{
int nr = page_end - page_start;
lockdep_assert_held(&pcpu_lock);
bitmap_set(chunk->populated, page_start, nr);
chunk->nr_populated += nr;
pcpu_nr_populated += nr;
pcpu_update_empty_pages(chunk, nr);
}
/**
* pcpu_chunk_depopulated - post-depopulation bookkeeping
* @chunk: pcpu_chunk which got depopulated
* @page_start: the start page
* @page_end: the end page
*
* Pages in [@page_start,@page_end) have been depopulated from @chunk.
* Update the bookkeeping information accordingly. Must be called after
* each successful depopulation.
*/
static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
int nr = page_end - page_start;
lockdep_assert_held(&pcpu_lock);
bitmap_clear(chunk->populated, page_start, nr);
chunk->nr_populated -= nr;
pcpu_nr_populated -= nr;
pcpu_update_empty_pages(chunk, -nr);
}
/*
* Chunk management implementation.
*
* To allow different implementations, chunk alloc/free and
* [de]population are implemented in a separate file which is pulled
* into this file and compiled together. The following functions
* should be implemented.
*
* pcpu_populate_chunk - populate the specified range of a chunk
* pcpu_depopulate_chunk - depopulate the specified range of a chunk
* pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
* pcpu_create_chunk - create a new chunk
* pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
* pcpu_addr_to_page - translate address to physical address
* pcpu_verify_alloc_info - check alloc_info is acceptable during init
*/
static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end, gfp_t gfp);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end);
static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end);
static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
#ifdef CONFIG_NEED_PER_CPU_KM
#include "percpu-km.c"
#else
#include "percpu-vm.c"
#endif
/**
* pcpu_chunk_addr_search - determine chunk containing specified address
* @addr: address for which the chunk needs to be determined.
*
* This is an internal function that handles all but static allocations.
* Static percpu address values should never be passed into the allocator.
*
* RETURNS:
* The address of the found chunk.
*/
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
/* is it in the dynamic region (first chunk)? */
if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
return pcpu_first_chunk;
/* is it in the reserved region? */
if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
return pcpu_reserved_chunk;
/*
* The address is relative to unit0 which might be unused and
* thus unmapped. Offset the address to the unit space of the
* current processor before looking it up in the vmalloc
* space. Note that any possible cpu id can be used here, so
* there's no need to worry about preemption or cpu hotplug.
*/
addr += pcpu_unit_offsets[raw_smp_processor_id()];
return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
}
#ifdef CONFIG_MEMCG
static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
struct obj_cgroup **objcgp)
{
struct obj_cgroup *objcg;
if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
return true;
objcg = current_obj_cgroup();
if (!objcg)
return true;
if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size)))
return false;
*objcgp = objcg;
return true;
}
static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
struct pcpu_chunk *chunk, int off,
size_t size)
{
if (!objcg)
return;
if (likely(chunk && chunk->obj_exts)) { obj_cgroup_get(objcg);
chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = objcg;
rcu_read_lock(); mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, pcpu_obj_full_size(size)); rcu_read_unlock();
} else {
obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
}
}
static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
{
struct obj_cgroup *objcg;
if (unlikely(!chunk->obj_exts))
return;
objcg = chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup;
if (!objcg)
return;
chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = NULL;
obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
rcu_read_lock();
mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
-pcpu_obj_full_size(size));
rcu_read_unlock();
obj_cgroup_put(objcg);
}
#else /* CONFIG_MEMCG */
static bool
pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
{
return true;
}
static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
struct pcpu_chunk *chunk, int off,
size_t size)
{
}
static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
{
}
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MEM_ALLOC_PROFILING
static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
size_t size)
{
if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) {
alloc_tag_add(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag,
current->alloc_tag, size);
}
}
static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
{
if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts))
alloc_tag_sub(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, size);
}
#else
static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
size_t size)
{
}
static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
{
}
#endif
/**
* pcpu_alloc - the percpu allocator
* @size: size of area to allocate in bytes
* @align: alignment of area (max PAGE_SIZE)
* @reserved: allocate from the reserved chunk if available
* @gfp: allocation flags
*
* Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
* contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
* then no warning will be triggered on invalid or failed allocation
* requests.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
gfp_t gfp)
{
gfp_t pcpu_gfp;
bool is_atomic;
bool do_warn;
struct obj_cgroup *objcg = NULL;
static atomic_t warn_limit = ATOMIC_INIT(10);
struct pcpu_chunk *chunk, *next;
const char *err;
int slot, off, cpu, ret;
unsigned long flags;
void __percpu *ptr;
size_t bits, bit_align;
gfp = current_gfp_context(gfp);
/* whitelisted flags that can be passed to the backing allocators */
pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); is_atomic = !gfpflags_allow_blocking(gfp); do_warn = !(gfp & __GFP_NOWARN);
/*
* There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
* therefore alignment must be a minimum of that many bytes.
* An allocation may have internal fragmentation from rounding up
* of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
*/
if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
align = PCPU_MIN_ALLOC_SIZE;
size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
bits = size >> PCPU_MIN_ALLOC_SHIFT;
bit_align = align >> PCPU_MIN_ALLOC_SHIFT; if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
!is_power_of_2(align))) {
WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
size, align);
return NULL;
}
if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
return NULL;
if (!is_atomic) {
/*
* pcpu_balance_workfn() allocates memory under this mutex,
* and it may wait for memory reclaim. Allow current task
* to become OOM victim, in case of memory pressure.
*/
if (gfp & __GFP_NOFAIL) {
mutex_lock(&pcpu_alloc_mutex);
} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
return NULL;
}
}
spin_lock_irqsave(&pcpu_lock, flags);
/* serve reserved allocations from the reserved chunk if available */
if (reserved && pcpu_reserved_chunk) {
chunk = pcpu_reserved_chunk;
off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
if (off < 0) {
err = "alloc from reserved chunk failed";
goto fail_unlock;
}
off = pcpu_alloc_area(chunk, bits, bit_align, off);
if (off >= 0)
goto area_found;
err = "alloc from reserved chunk failed";
goto fail_unlock;
}
restart:
/* search through normal chunks */
for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) { list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
list) {
off = pcpu_find_block_fit(chunk, bits, bit_align,
is_atomic);
if (off < 0) {
if (slot < PCPU_SLOT_FAIL_THRESHOLD) pcpu_chunk_move(chunk, 0); continue;
}
off = pcpu_alloc_area(chunk, bits, bit_align, off);
if (off >= 0) {
pcpu_reintegrate_chunk(chunk);
goto area_found;
}
}
}
spin_unlock_irqrestore(&pcpu_lock, flags);
if (is_atomic) {
err = "atomic alloc failed, no space left";
goto fail;
}
/* No space left. Create a new chunk. */
if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
chunk = pcpu_create_chunk(pcpu_gfp);
if (!chunk) { err = "failed to allocate new chunk";
goto fail;
}
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_chunk_relocate(chunk, -1);
} else {
spin_lock_irqsave(&pcpu_lock, flags);
}
goto restart;
area_found:
pcpu_stats_area_alloc(chunk, size);
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) pcpu_schedule_balance_work(); spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate if not all pages are already there */
if (!is_atomic) {
unsigned int page_end, rs, re;
rs = PFN_DOWN(off);
page_end = PFN_UP(off + size);
for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) { WARN_ON(chunk->immutable); ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
spin_lock_irqsave(&pcpu_lock, flags);
if (ret) {
pcpu_free_area(chunk, off);
err = "failed to populate";
goto fail_unlock;
}
pcpu_chunk_populated(chunk, rs, re);
spin_unlock_irqrestore(&pcpu_lock, flags);
}
mutex_unlock(&pcpu_alloc_mutex);
}
/* clear the areas and return address relative to base address */
for_each_possible_cpu(cpu) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
kmemleak_alloc_percpu(ptr, size, gfp);
trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
chunk->base_addr, off, ptr,
pcpu_obj_full_size(size), gfp);
pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
pcpu_alloc_tag_alloc_hook(chunk, off, size);
return ptr;
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
fail:
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); if (do_warn) { int remaining = atomic_dec_if_positive(&warn_limit); if (remaining >= 0) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
size, align, is_atomic, err);
if (!is_atomic)
dump_stack();
if (remaining == 0) pr_info("limit reached, disable warning\n");
}
}
if (is_atomic) {
/* see the flag handling in pcpu_balance_workfn() */
pcpu_atomic_alloc_failed = true;
pcpu_schedule_balance_work();
} else {
mutex_unlock(&pcpu_alloc_mutex);
}
pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); return NULL;
}
EXPORT_SYMBOL_GPL(pcpu_alloc_noprof);
/**
* pcpu_balance_free - manage the amount of free chunks
* @empty_only: free chunks only if there are no populated pages
*
* If empty_only is %false, reclaim all fully free chunks regardless of the
* number of populated pages. Otherwise, only reclaim chunks that have no
* populated pages.
*
* CONTEXT:
* pcpu_lock (can be dropped temporarily)
*/
static void pcpu_balance_free(bool empty_only)
{
LIST_HEAD(to_free);
struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
struct pcpu_chunk *chunk, *next;
lockdep_assert_held(&pcpu_lock);
/*
* There's no reason to keep around multiple unused chunks and VM
* areas can be scarce. Destroy all free chunks except for one.
*/
list_for_each_entry_safe(chunk, next, free_head, list) {
WARN_ON(chunk->immutable);
/* spare the first one */
if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
continue;
if (!empty_only || chunk->nr_empty_pop_pages == 0)
list_move(&chunk->list, &to_free);
}
if (list_empty(&to_free))
return;
spin_unlock_irq(&pcpu_lock);
list_for_each_entry_safe(chunk, next, &to_free, list) {
unsigned int rs, re;
for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
pcpu_depopulate_chunk(chunk, rs, re);
spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, rs, re);
spin_unlock_irq(&pcpu_lock);
}
pcpu_destroy_chunk(chunk);
cond_resched();
}
spin_lock_irq(&pcpu_lock);
}
/**
* pcpu_balance_populated - manage the amount of populated pages
*
* Maintain a certain amount of populated pages to satisfy atomic allocations.
* It is possible that this is called when physical memory is scarce causing
* OOM killer to be triggered. We should avoid doing so until an actual
* allocation causes the failure as it is possible that requests can be
* serviced from already backed regions.
*
* CONTEXT:
* pcpu_lock (can be dropped temporarily)
*/
static void pcpu_balance_populated(void)
{
/* gfp flags passed to underlying allocators */
const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
struct pcpu_chunk *chunk;
int slot, nr_to_pop, ret;
lockdep_assert_held(&pcpu_lock);
/*
* Ensure there are certain number of free populated pages for
* atomic allocs. Fill up from the most packed so that atomic
* allocs don't increase fragmentation. If atomic allocation
* failed previously, always populate the maximum amount. This
* should prevent atomic allocs larger than PAGE_SIZE from keeping
* failing indefinitely; however, large atomic allocs are not
* something we support properly and can be highly unreliable and
* inefficient.
*/
retry_pop:
if (pcpu_atomic_alloc_failed) {
nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
/* best effort anyway, don't worry about synchronization */
pcpu_atomic_alloc_failed = false;
} else {
nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
pcpu_nr_empty_pop_pages,
0, PCPU_EMPTY_POP_PAGES_HIGH);
}
for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
unsigned int nr_unpop = 0, rs, re;
if (!nr_to_pop)
break;
list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
nr_unpop = chunk->nr_pages - chunk->nr_populated;
if (nr_unpop)
break;
}
if (!nr_unpop)
continue;
/* @chunk can't go away while pcpu_alloc_mutex is held */
for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
int nr = min_t(int, re - rs, nr_to_pop);
spin_unlock_irq(&pcpu_lock);
ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
cond_resched();
spin_lock_irq(&pcpu_lock);
if (!ret) {
nr_to_pop -= nr;
pcpu_chunk_populated(chunk, rs, rs + nr);
} else {
nr_to_pop = 0;
}
if (!nr_to_pop)
break;
}
}
if (nr_to_pop) {
/* ran out of chunks to populate, create a new one and retry */
spin_unlock_irq(&pcpu_lock);
chunk = pcpu_create_chunk(gfp);
cond_resched();
spin_lock_irq(&pcpu_lock);
if (chunk) {
pcpu_chunk_relocate(chunk, -1);
goto retry_pop;
}
}
}
/**
* pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
*
* Scan over chunks in the depopulate list and try to release unused populated
* pages back to the system. Depopulated chunks are sidelined to prevent
* repopulating these pages unless required. Fully free chunks are reintegrated
* and freed accordingly (1 is kept around). If we drop below the empty
* populated pages threshold, reintegrate the chunk if it has empty free pages.
* Each chunk is scanned in the reverse order to keep populated pages close to
* the beginning of the chunk.
*
* CONTEXT:
* pcpu_lock (can be dropped temporarily)
*
*/
static void pcpu_reclaim_populated(void)
{
struct pcpu_chunk *chunk;
struct pcpu_block_md *block;
int freed_page_start, freed_page_end;
int i, end;
bool reintegrate;
lockdep_assert_held(&pcpu_lock);
/*
* Once a chunk is isolated to the to_depopulate list, the chunk is no
* longer discoverable to allocations whom may populate pages. The only
* other accessor is the free path which only returns area back to the
* allocator not touching the populated bitmap.
*/
while ((chunk = list_first_entry_or_null(
&pcpu_chunk_lists[pcpu_to_depopulate_slot],
struct pcpu_chunk, list))) {
WARN_ON(chunk->immutable);
/*
* Scan chunk's pages in the reverse order to keep populated
* pages close to the beginning of the chunk.
*/
freed_page_start = chunk->nr_pages;
freed_page_end = 0;
reintegrate = false;
for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
/* no more work to do */
if (chunk->nr_empty_pop_pages == 0)
break;
/* reintegrate chunk to prevent atomic alloc failures */
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
reintegrate = true;
break;
}
/*
* If the page is empty and populated, start or
* extend the (i, end) range. If i == 0, decrease
* i and perform the depopulation to cover the last
* (first) page in the chunk.
*/
block = chunk->md_blocks + i;
if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
test_bit(i, chunk->populated)) {
if (end == -1)
end = i;
if (i > 0)
continue;
i--;
}
/* depopulate if there is an active range */
if (end == -1)
continue;
spin_unlock_irq(&pcpu_lock);
pcpu_depopulate_chunk(chunk, i + 1, end + 1);
cond_resched();
spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, i + 1, end + 1);
freed_page_start = min(freed_page_start, i + 1);
freed_page_end = max(freed_page_end, end + 1);
/* reset the range and continue */
end = -1;
}
/* batch tlb flush per chunk to amortize cost */
if (freed_page_start < freed_page_end) {
spin_unlock_irq(&pcpu_lock);
pcpu_post_unmap_tlb_flush(chunk,
freed_page_start,
freed_page_end);
cond_resched();
spin_lock_irq(&pcpu_lock);
}
if (reintegrate || chunk->free_bytes == pcpu_unit_size)
pcpu_reintegrate_chunk(chunk);
else
list_move_tail(&chunk->list,
&pcpu_chunk_lists[pcpu_sidelined_slot]);
}
}
/**
* pcpu_balance_workfn - manage the amount of free chunks and populated pages
* @work: unused
*
* For each chunk type, manage the number of fully free chunks and the number of
* populated pages. An important thing to consider is when pages are freed and
* how they contribute to the global counts.
*/
static void pcpu_balance_workfn(struct work_struct *work)
{
/*
* pcpu_balance_free() is called twice because the first time we may
* trim pages in the active pcpu_nr_empty_pop_pages which may cause us
* to grow other chunks. This then gives pcpu_reclaim_populated() time
* to move fully free chunks to the active list to be freed if
* appropriate.
*
* Enforce GFP_NOIO allocations because we have pcpu_alloc users
* constrained to GFP_NOIO/NOFS contexts and they could form lock
* dependency through pcpu_alloc_mutex
*/
unsigned int flags = memalloc_noio_save();
mutex_lock(&pcpu_alloc_mutex);
spin_lock_irq(&pcpu_lock);
pcpu_balance_free(false);
pcpu_reclaim_populated();
pcpu_balance_populated();
pcpu_balance_free(true);
spin_unlock_irq(&pcpu_lock);
mutex_unlock(&pcpu_alloc_mutex);
memalloc_noio_restore(flags);
}
/**
* free_percpu - free percpu area
* @ptr: pointer to area to free
*
* Free percpu area @ptr.
*
* CONTEXT:
* Can be called from atomic context.
*/
void free_percpu(void __percpu *ptr)
{
void *addr;
struct pcpu_chunk *chunk;
unsigned long flags;
int size, off;
bool need_balance = false;
if (!ptr)
return;
kmemleak_free_percpu(ptr);
addr = __pcpu_ptr_to_addr(ptr);
chunk = pcpu_chunk_addr_search(addr);
off = addr - chunk->base_addr;
spin_lock_irqsave(&pcpu_lock, flags);
size = pcpu_free_area(chunk, off);
pcpu_alloc_tag_free_hook(chunk, off, size);
pcpu_memcg_free_hook(chunk, off, size);
/*
* If there are more than one fully free chunks, wake up grim reaper.
* If the chunk is isolated, it may be in the process of being
* reclaimed. Let reclaim manage cleaning up of that chunk.
*/
if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
struct pcpu_chunk *pos;
list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
if (pos != chunk) {
need_balance = true;
break;
}
} else if (pcpu_should_reclaim_chunk(chunk)) {
pcpu_isolate_chunk(chunk);
need_balance = true;
}
trace_percpu_free_percpu(chunk->base_addr, off, ptr);
spin_unlock_irqrestore(&pcpu_lock, flags);
if (need_balance)
pcpu_schedule_balance_work();
}
EXPORT_SYMBOL_GPL(free_percpu);
bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
{
#ifdef CONFIG_SMP
const size_t static_size = __per_cpu_end - __per_cpu_start;
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
unsigned int cpu;
for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu);
void *va = (void *)addr;
if (va >= start && va < start + static_size) { if (can_addr) {
*can_addr = (unsigned long) (va - start);
*can_addr += (unsigned long)
per_cpu_ptr(base, get_boot_cpu_id());
}
return true;
}
}
#endif
/* on UP, can't distinguish from other static vars, always false */
return false;}
/**
* is_kernel_percpu_address - test whether address is from static percpu area
* @addr: address to test
*
* Test whether @addr belongs to in-kernel static percpu area. Module
* static percpu areas are not considered. For those, use
* is_module_percpu_address().
*
* RETURNS:
* %true if @addr is from in-kernel static percpu area, %false otherwise.
*/
bool is_kernel_percpu_address(unsigned long addr)
{
return __is_kernel_percpu_address(addr, NULL);
}
/**
* per_cpu_ptr_to_phys - convert translated percpu address to physical address
* @addr: the address to be converted to physical address
*
* Given @addr which is dereferenceable address obtained via one of
* percpu access macros, this function translates it into its physical
* address. The caller is responsible for ensuring @addr stays valid
* until this function finishes.
*
* percpu allocator has special setup for the first chunk, which currently
* supports either embedding in linear address space or vmalloc mapping,
* and, from the second one, the backing allocator (currently either vm or
* km) provides translation.
*
* The addr can be translated simply without checking if it falls into the
* first chunk. But the current code reflects better how percpu allocator
* actually works, and the verification can discover both bugs in percpu
* allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
* code.
*
* RETURNS:
* The physical address for @addr.
*/
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
bool in_first_chunk = false;
unsigned long first_low, first_high;
unsigned int cpu;
/*
* The following test on unit_low/high isn't strictly
* necessary but will speed up lookups of addresses which
* aren't in the first chunk.
*
* The address check is against full chunk sizes. pcpu_base_addr
* points to the beginning of the first chunk including the
* static region. Assumes good intent as the first chunk may
* not be full (ie. < pcpu_unit_pages in size).
*/
first_low = (unsigned long)pcpu_base_addr +
pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
first_high = (unsigned long)pcpu_base_addr +
pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
if ((unsigned long)addr >= first_low &&
(unsigned long)addr < first_high) {
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(base, cpu);
if (addr >= start && addr < start + pcpu_unit_size) {
in_first_chunk = true;
break;
}
}
}
if (in_first_chunk) {
if (!is_vmalloc_addr(addr))
return __pa(addr);
else
return page_to_phys(vmalloc_to_page(addr)) +
offset_in_page(addr);
} else
return page_to_phys(pcpu_addr_to_page(addr)) +
offset_in_page(addr);
}
/**
* pcpu_alloc_alloc_info - allocate percpu allocation info
* @nr_groups: the number of groups
* @nr_units: the number of units
*
* Allocate ai which is large enough for @nr_groups groups containing
* @nr_units units. The returned ai's groups[0].cpu_map points to the
* cpu_map array which is long enough for @nr_units and filled with
* NR_CPUS. It's the caller's responsibility to initialize cpu_map
* pointer of other groups.
*
* RETURNS:
* Pointer to the allocated pcpu_alloc_info on success, NULL on
* failure.
*/
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
int nr_units)
{
struct pcpu_alloc_info *ai;
size_t base_size, ai_size;
void *ptr;
int unit;
base_size = ALIGN(struct_size(ai, groups, nr_groups),
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
if (!ptr)
return NULL;
ai = ptr;
ptr += base_size;
ai->groups[0].cpu_map = ptr;
for (unit = 0; unit < nr_units; unit++)
ai->groups[0].cpu_map[unit] = NR_CPUS;
ai->nr_groups = nr_groups;
ai->__ai_size = PFN_ALIGN(ai_size);
return ai;
}
/**
* pcpu_free_alloc_info - free percpu allocation info
* @ai: pcpu_alloc_info to free
*
* Free @ai which was allocated by pcpu_alloc_alloc_info().
*/
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
memblock_free(ai, ai->__ai_size);
}
/**
* pcpu_dump_alloc_info - print out information about pcpu_alloc_info
* @lvl: loglevel
* @ai: allocation info to dump
*
* Print out information about @ai using loglevel @lvl.
*/
static void pcpu_dump_alloc_info(const char *lvl,
const struct pcpu_alloc_info *ai)
{
int group_width = 1, cpu_width = 1, width;
char empty_str[] = "--------";
int alloc = 0, alloc_end = 0;
int group, v;
int upa, apl; /* units per alloc, allocs per line */
v = ai->nr_groups;
while (v /= 10)
group_width++;
v = num_possible_cpus();
while (v /= 10)
cpu_width++;
empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
upa = ai->alloc_size / ai->unit_size;
width = upa * (cpu_width + 1) + group_width + 3;
apl = rounddown_pow_of_two(max(60 / width, 1));
printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
for (group = 0; group < ai->nr_groups; group++) {
const struct pcpu_group_info *gi = &ai->groups[group];
int unit = 0, unit_end = 0;
BUG_ON(gi->nr_units % upa);
for (alloc_end += gi->nr_units / upa;
alloc < alloc_end; alloc++) {
if (!(alloc % apl)) {
pr_cont("\n");
printk("%spcpu-alloc: ", lvl);
}
pr_cont("[%0*d] ", group_width, group);
for (unit_end += upa; unit < unit_end; unit++)
if (gi->cpu_map[unit] != NR_CPUS)
pr_cont("%0*d ",
cpu_width, gi->cpu_map[unit]);
else
pr_cont("%s ", empty_str);
}
}
pr_cont("\n");
}
/**
* pcpu_setup_first_chunk - initialize the first percpu chunk
* @ai: pcpu_alloc_info describing how to percpu area is shaped
* @base_addr: mapped address
*
* Initialize the first percpu chunk which contains the kernel static
* percpu area. This function is to be called from arch percpu area
* setup path.
*
* @ai contains all information necessary to initialize the first
* chunk and prime the dynamic percpu allocator.
*
* @ai->static_size is the size of static percpu area.
*
* @ai->reserved_size, if non-zero, specifies the amount of bytes to
* reserve after the static area in the first chunk. This reserves
* the first chunk such that it's available only through reserved
* percpu allocation. This is primarily used to serve module percpu
* static areas on architectures where the addressing model has
* limited offset range for symbol relocations to guarantee module
* percpu symbols fall inside the relocatable range.
*
* @ai->dyn_size determines the number of bytes available for dynamic
* allocation in the first chunk. The area between @ai->static_size +
* @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
*
* @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
* and equal to or larger than @ai->static_size + @ai->reserved_size +
* @ai->dyn_size.
*
* @ai->atom_size is the allocation atom size and used as alignment
* for vm areas.
*
* @ai->alloc_size is the allocation size and always multiple of
* @ai->atom_size. This is larger than @ai->atom_size if
* @ai->unit_size is larger than @ai->atom_size.
*
* @ai->nr_groups and @ai->groups describe virtual memory layout of
* percpu areas. Units which should be colocated are put into the
* same group. Dynamic VM areas will be allocated according to these
* groupings. If @ai->nr_groups is zero, a single group containing
* all units is assumed.
*
* The caller should have mapped the first chunk at @base_addr and
* copied static data to each unit.
*
* The first chunk will always contain a static and a dynamic region.
* However, the static region is not managed by any chunk. If the first
* chunk also contains a reserved region, it is served by two chunks -
* one for the reserved region and one for the dynamic region. They
* share the same vm, but use offset regions in the area allocation map.
* The chunk serving the dynamic region is circulated in the chunk slots
* and available for dynamic allocation like any other chunk.
*/
void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr)
{
size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
size_t static_size, dyn_size;
unsigned long *group_offsets;
size_t *group_sizes;
unsigned long *unit_off;
unsigned int cpu;
int *unit_map;
int group, unit, i;
unsigned long tmp_addr;
size_t alloc_size;
#define PCPU_SETUP_BUG_ON(cond) do { \
if (unlikely(cond)) { \
pr_emerg("failed to initialize, %s\n", #cond); \
pr_emerg("cpu_possible_mask=%*pb\n", \
cpumask_pr_args(cpu_possible_mask)); \
pcpu_dump_alloc_info(KERN_EMERG, ai); \
BUG(); \
} \
} while (0)
/* sanity checks */
PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
#ifdef CONFIG_SMP
PCPU_SETUP_BUG_ON(!ai->static_size);
PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
#endif
PCPU_SETUP_BUG_ON(!base_addr);
PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */
alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
group_offsets = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
group_sizes = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
unit_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
unit_off = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
pcpu_low_unit_cpu = NR_CPUS;
pcpu_high_unit_cpu = NR_CPUS;
for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
const struct pcpu_group_info *gi = &ai->groups[group];
group_offsets[group] = gi->base_offset;
group_sizes[group] = gi->nr_units * ai->unit_size;
for (i = 0; i < gi->nr_units; i++) {
cpu = gi->cpu_map[i];
if (cpu == NR_CPUS)
continue;
PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
unit_map[cpu] = unit + i;
unit_off[cpu] = gi->base_offset + i * ai->unit_size;
/* determine low/high unit_cpu */
if (pcpu_low_unit_cpu == NR_CPUS ||
unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
pcpu_low_unit_cpu = cpu;
if (pcpu_high_unit_cpu == NR_CPUS ||
unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
pcpu_high_unit_cpu = cpu;
}
}
pcpu_nr_units = unit;
for_each_possible_cpu(cpu)
PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
pcpu_dump_alloc_info(KERN_DEBUG, ai);
pcpu_nr_groups = ai->nr_groups;
pcpu_group_offsets = group_offsets;
pcpu_group_sizes = group_sizes;
pcpu_unit_map = unit_map;
pcpu_unit_offsets = unit_off;
/* determine basic parameters */
pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
pcpu_atom_size = ai->atom_size;
pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
BITS_TO_LONGS(pcpu_unit_pages));
pcpu_stats_save_ai(ai);
/*
* Allocate chunk slots. The slots after the active slots are:
* sidelined_slot - isolated, depopulated chunks
* free_slot - fully free chunks
* to_depopulate_slot - isolated, chunks to depopulate
*/
pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
pcpu_free_slot = pcpu_sidelined_slot + 1;
pcpu_to_depopulate_slot = pcpu_free_slot + 1;
pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
pcpu_chunk_lists = memblock_alloc_or_panic(pcpu_nr_slots *
sizeof(pcpu_chunk_lists[0]),
SMP_CACHE_BYTES);
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
/*
* The end of the static region needs to be aligned with the
* minimum allocation size as this offsets the reserved and
* dynamic region. The first chunk ends page aligned by
* expanding the dynamic region, therefore the dynamic region
* can be shrunk to compensate while still staying above the
* configured sizes.
*/
static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
dyn_size = ai->dyn_size - (static_size - ai->static_size);
/*
* Initialize first chunk:
* This chunk is broken up into 3 parts:
* < static | [reserved] | dynamic >
* - static - there is no backing chunk because these allocations can
* never be freed.
* - reserved (pcpu_reserved_chunk) - exists primarily to serve
* allocations from module load.
* - dynamic (pcpu_first_chunk) - serves the dynamic part of the first
* chunk.
*/
tmp_addr = (unsigned long)base_addr + static_size;
if (ai->reserved_size)
pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr,
ai->reserved_size);
tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size);
pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
pcpu_chunk_relocate(pcpu_first_chunk, -1);
/* include all regions of the first chunk */
pcpu_nr_populated += PFN_DOWN(size_sum);
pcpu_stats_chunk_alloc();
trace_percpu_create_chunk(base_addr);
/* we're done */
pcpu_base_addr = base_addr;
}
#ifdef CONFIG_SMP
const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
[PCPU_FC_AUTO] = "auto",
[PCPU_FC_EMBED] = "embed",
[PCPU_FC_PAGE] = "page",
};
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
static int __init percpu_alloc_setup(char *str)
{
if (!str)
return -EINVAL;
if (0)
/* nada */;
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
else if (!strcmp(str, "embed"))
pcpu_chosen_fc = PCPU_FC_EMBED;
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
else if (!strcmp(str, "page"))
pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
else
pr_warn("unknown allocator %s specified\n", str);
return 0;
}
early_param("percpu_alloc", percpu_alloc_setup);
/*
* pcpu_embed_first_chunk() is used by the generic percpu setup.
* Build it if needed by the arch config or the generic setup is going
* to be used.
*/
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
#define BUILD_EMBED_FIRST_CHUNK
#endif
/* build pcpu_page_first_chunk() iff needed by the arch config */
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
#define BUILD_PAGE_FIRST_CHUNK
#endif
/* pcpu_build_alloc_info() is used by both embed and page first chunk */
#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
/**
* pcpu_build_alloc_info - build alloc_info considering distances between CPUs
* @reserved_size: the size of reserved percpu area in bytes
* @dyn_size: minimum free size for dynamic allocation in bytes
* @atom_size: allocation atom size
* @cpu_distance_fn: callback to determine distance between cpus, optional
*
* This function determines grouping of units, their mappings to cpus
* and other parameters considering needed percpu size, allocation
* atom size and distances between CPUs.
*
* Groups are always multiples of atom size and CPUs which are of
* LOCAL_DISTANCE both ways are grouped together and share space for
* units in the same group. The returned configuration is guaranteed
* to have CPUs on different nodes on different groups and >=75% usage
* of allocated virtual address space.
*
* RETURNS:
* On success, pointer to the new allocation_info is returned. On
* failure, ERR_PTR value is returned.
*/
static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
static int group_map[NR_CPUS] __initdata;
static int group_cnt[NR_CPUS] __initdata;
static struct cpumask mask __initdata;
const size_t static_size = __per_cpu_end - __per_cpu_start;
int nr_groups = 1, nr_units = 0;
size_t size_sum, min_unit_size, alloc_size;
int upa, max_upa, best_upa; /* units_per_alloc */
int last_allocs, group, unit;
unsigned int cpu, tcpu;
struct pcpu_alloc_info *ai;
unsigned int *cpu_map;
/* this function may be called multiple times */
memset(group_map, 0, sizeof(group_map));
memset(group_cnt, 0, sizeof(group_cnt));
cpumask_clear(&mask);
/* calculate size_sum and ensure dyn_size is enough for early alloc */
size_sum = PFN_ALIGN(static_size + reserved_size +
max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
dyn_size = size_sum - static_size - reserved_size;
/*
* Determine min_unit_size, alloc_size and max_upa such that
* alloc_size is multiple of atom_size and is the smallest
* which can accommodate 4k aligned segments which are equal to
* or larger than min_unit_size.
*/
min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
/* determine the maximum # of units that can fit in an allocation */
alloc_size = roundup(min_unit_size, atom_size);
upa = alloc_size / min_unit_size;
while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
upa--;
max_upa = upa;
cpumask_copy(&mask, cpu_possible_mask);
/* group cpus according to their proximity */
for (group = 0; !cpumask_empty(&mask); group++) {
/* pop the group's first cpu */
cpu = cpumask_first(&mask);
group_map[cpu] = group;
group_cnt[group]++;
cpumask_clear_cpu(cpu, &mask);
for_each_cpu(tcpu, &mask) {
if (!cpu_distance_fn ||
(cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
group_map[tcpu] = group;
group_cnt[group]++;
cpumask_clear_cpu(tcpu, &mask);
}
}
}
nr_groups = group;
/*
* Wasted space is caused by a ratio imbalance of upa to group_cnt.
* Expand the unit_size until we use >= 75% of the units allocated.
* Related to atom_size, which could be much larger than the unit_size.
*/
last_allocs = INT_MAX;
best_upa = 0;
for (upa = max_upa; upa; upa--) {
int allocs = 0, wasted = 0;
if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
continue;
for (group = 0; group < nr_groups; group++) {
int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
allocs += this_allocs;
wasted += this_allocs * upa - group_cnt[group];
}
/*
* Don't accept if wastage is over 1/3. The
* greater-than comparison ensures upa==1 always
* passes the following check.
*/
if (wasted > num_possible_cpus() / 3)
continue;
/* and then don't consume more memory */
if (allocs > last_allocs)
break;
last_allocs = allocs;
best_upa = upa;
}
BUG_ON(!best_upa);
upa = best_upa;
/* allocate and fill alloc_info */
for (group = 0; group < nr_groups; group++)
nr_units += roundup(group_cnt[group], upa);
ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
if (!ai)
return ERR_PTR(-ENOMEM);
cpu_map = ai->groups[0].cpu_map;
for (group = 0; group < nr_groups; group++) {
ai->groups[group].cpu_map = cpu_map;
cpu_map += roundup(group_cnt[group], upa);
}
ai->static_size = static_size;
ai->reserved_size = reserved_size;
ai->dyn_size = dyn_size;
ai->unit_size = alloc_size / upa;
ai->atom_size = atom_size;
ai->alloc_size = alloc_size;
for (group = 0, unit = 0; group < nr_groups; group++) {
struct pcpu_group_info *gi = &ai->groups[group];
/*
* Initialize base_offset as if all groups are located
* back-to-back. The caller should update this to
* reflect actual allocation.
*/
gi->base_offset = unit * ai->unit_size;
for_each_possible_cpu(cpu)
if (group_map[cpu] == group)
gi->cpu_map[gi->nr_units++] = cpu;
gi->nr_units = roundup(gi->nr_units, upa);
unit += gi->nr_units;
}
BUG_ON(unit != nr_units);
return ai;
}
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
#ifdef CONFIG_NUMA
int node = NUMA_NO_NODE;
void *ptr;
if (cpu_to_nd_fn)
node = cpu_to_nd_fn(cpu);
if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
ptr = memblock_alloc_from(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
cpu, size, (u64)__pa(ptr));
} else {
ptr = memblock_alloc_try_nid(size, align, goal,
MEMBLOCK_ALLOC_ACCESSIBLE,
node);
pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
cpu, size, node, (u64)__pa(ptr));
}
return ptr;
#else
return memblock_alloc_from(size, align, goal);
#endif
}
static void __init pcpu_fc_free(void *ptr, size_t size)
{
memblock_free(ptr, size);
}
#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
#if defined(BUILD_EMBED_FIRST_CHUNK)
/**
* pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
* @reserved_size: the size of reserved percpu area in bytes
* @dyn_size: minimum free size for dynamic allocation in bytes
* @atom_size: allocation atom size
* @cpu_distance_fn: callback to determine distance between cpus, optional
* @cpu_to_nd_fn: callback to convert cpu to it's node, optional
*
* This is a helper to ease setting up embedded first percpu chunk and
* can be called where pcpu_setup_first_chunk() is expected.
*
* If this function is used to setup the first chunk, it is allocated
* by calling pcpu_fc_alloc and used as-is without being mapped into
* vmalloc area. Allocations are always whole multiples of @atom_size
* aligned to @atom_size.
*
* This enables the first chunk to piggy back on the linear physical
* mapping which often uses larger page size. Please note that this
* can result in very sparse cpu->unit mapping on NUMA machines thus
* requiring large vmalloc address space. Don't use this allocator if
* vmalloc space is not orders of magnitude larger than distances
* between node memory addresses (ie. 32bit NUMA machines).
*
* @dyn_size specifies the minimum dynamic area size.
*
* If the needed size is smaller than the minimum or specified unit
* size, the leftover is returned using pcpu_fc_free.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
{
void *base = (void *)ULONG_MAX;
void **areas = NULL;
struct pcpu_alloc_info *ai;
size_t size_sum, areas_size;
unsigned long max_distance;
int group, i, highest_group, rc = 0;
ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
cpu_distance_fn);
if (IS_ERR(ai))
return PTR_ERR(ai);
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
if (!areas) {
rc = -ENOMEM;
goto out_free;
}
/* allocate, copy and determine base address & max_distance */
highest_group = 0;
for (group = 0; group < ai->nr_groups; group++) {
struct pcpu_group_info *gi = &ai->groups[group];
unsigned int cpu = NR_CPUS;
void *ptr;
for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
cpu = gi->cpu_map[i];
BUG_ON(cpu == NR_CPUS);
/* allocate space for the whole group */
ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
if (!ptr) {
rc = -ENOMEM;
goto out_free_areas;
}
/* kmemleak tracks the percpu allocations separately */
kmemleak_ignore_phys(__pa(ptr));
areas[group] = ptr;
base = min(ptr, base);
if (ptr > areas[highest_group])
highest_group = group;
}
max_distance = areas[highest_group] - base;
max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
/* warn if maximum distance is further than 75% of vmalloc space */
if (max_distance > VMALLOC_TOTAL * 3 / 4) {
pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
max_distance, VMALLOC_TOTAL);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
/* and fail if we have fallback */
rc = -EINVAL;
goto out_free_areas;
#endif
}
/*
* Copy data and free unused parts. This should happen after all
* allocations are complete; otherwise, we may end up with
* overlapping groups.
*/
for (group = 0; group < ai->nr_groups; group++) {
struct pcpu_group_info *gi = &ai->groups[group];
void *ptr = areas[group];
for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
if (gi->cpu_map[i] == NR_CPUS) {
/* unused unit, free whole */
pcpu_fc_free(ptr, ai->unit_size);
continue;
}
/* copy and return the unused part */
memcpy(ptr, __per_cpu_start, ai->static_size);
pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
}
}
/* base address is now known, determine group base offsets */
for (group = 0; group < ai->nr_groups; group++) {
ai->groups[group].base_offset = areas[group] - base;
}
pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
ai->dyn_size, ai->unit_size);
pcpu_setup_first_chunk(ai, base);
goto out_free;
out_free_areas:
for (group = 0; group < ai->nr_groups; group++)
if (areas[group])
pcpu_fc_free(areas[group],
ai->groups[group].nr_units * ai->unit_size);
out_free:
pcpu_free_alloc_info(ai);
if (areas)
memblock_free(areas, areas_size);
return rc;
}
#endif /* BUILD_EMBED_FIRST_CHUNK */
#ifdef BUILD_PAGE_FIRST_CHUNK
#include <linux/pgalloc.h>
#ifndef P4D_TABLE_SIZE
#define P4D_TABLE_SIZE PAGE_SIZE
#endif
#ifndef PUD_TABLE_SIZE
#define PUD_TABLE_SIZE PAGE_SIZE
#endif
#ifndef PMD_TABLE_SIZE
#define PMD_TABLE_SIZE PAGE_SIZE
#endif
#ifndef PTE_TABLE_SIZE
#define PTE_TABLE_SIZE PAGE_SIZE
#endif
void __init __weak pcpu_populate_pte(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
if (pgd_none(*pgd)) {
p4d = memblock_alloc_or_panic(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
pgd_populate_kernel(addr, pgd, p4d);
}
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) {
pud = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
p4d_populate_kernel(addr, p4d, pud);
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
pmd = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
pud_populate(&init_mm, pud, pmd);
}
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) {
pte_t *new;
new = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
pmd_populate_kernel(&init_mm, pmd, new);
}
return;
}
/**
* pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
* @reserved_size: the size of reserved percpu area in bytes
* @cpu_to_nd_fn: callback to convert cpu to it's node, optional
*
* This is a helper to ease setting up page-remapped first percpu
* chunk and can be called where pcpu_setup_first_chunk() is expected.
*
* This is the basic allocator. Static percpu area is allocated
* page-by-page into vmalloc area.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
{
static struct vm_struct vm;
struct pcpu_alloc_info *ai;
char psize_str[16];
int unit_pages;
size_t pages_size;
struct page **pages;
int unit, i, j, rc = 0;
int upa;
int nr_g0_units;
snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
if (IS_ERR(ai))
return PTR_ERR(ai);
BUG_ON(ai->nr_groups != 1);
upa = ai->alloc_size/ai->unit_size;
nr_g0_units = roundup(num_possible_cpus(), upa);
if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
pcpu_free_alloc_info(ai);
return -EINVAL;
}
unit_pages = ai->unit_size >> PAGE_SHIFT;
/* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0]));
pages = memblock_alloc_or_panic(pages_size, SMP_CACHE_BYTES);
/* allocate pages */
j = 0;
for (unit = 0; unit < num_possible_cpus(); unit++) {
unsigned int cpu = ai->groups[0].cpu_map[unit];
for (i = 0; i < unit_pages; i++) {
void *ptr;
ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
if (!ptr) {
pr_warn("failed to allocate %s page for cpu%u\n",
psize_str, cpu);
goto enomem;
}
/* kmemleak tracks the percpu allocations separately */
kmemleak_ignore_phys(__pa(ptr));
pages[j++] = virt_to_page(ptr);
}
}
/* allocate vm area, map the pages and copy static data */
vm.flags = VM_ALLOC;
vm.size = num_possible_cpus() * ai->unit_size;
vm_area_register_early(&vm, PAGE_SIZE);
for (unit = 0; unit < num_possible_cpus(); unit++) {
unsigned long unit_addr =
(unsigned long)vm.addr + unit * ai->unit_size;
for (i = 0; i < unit_pages; i++)
pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
/* pte already populated, the following shouldn't fail */
rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
unit_pages);
if (rc < 0)
panic("failed to map percpu area, err=%d\n", rc);
flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
/* copy static data */
memcpy((void *)unit_addr, __per_cpu_start, ai->static_size);
}
/* we're ready, commit */
pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
unit_pages, psize_str, ai->static_size,
ai->reserved_size, ai->dyn_size);
pcpu_setup_first_chunk(ai, vm.addr);
goto out_free_ar;
enomem:
while (--j >= 0)
pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
rc = -ENOMEM;
out_free_ar:
memblock_free(pages, pages_size);
pcpu_free_alloc_info(ai);
return rc;
}
#endif /* BUILD_PAGE_FIRST_CHUNK */
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
/*
* Generic SMP percpu area setup.
*
* The embedding helper is used because its behavior closely resembles
* the original non-dynamic generic percpu area setup. This is
* important because many archs have addressing restrictions and might
* fail if the percpu area is located far away from the previous
* location. As an added bonus, in non-NUMA cases, embedding is
* generally a good idea TLB-wise because percpu area can piggy back
* on the physical linear memory mapping which uses large page
* mappings on applicable archs.
*/
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc;
/*
* Always reserve area for module percpu variables. That's
* what the legacy allocator did.
*/
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
PAGE_SIZE, NULL, NULL);
if (rc < 0)
panic("Failed to initialize percpu areas.");
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}
#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
#else /* CONFIG_SMP */
/*
* UP percpu area setup.
*
* UP always uses km-based percpu allocator with identity mapping.
* Static percpu variables are indistinguishable from the usual static
* variables and don't require any special preparation.
*/
void __init setup_per_cpu_areas(void)
{
const size_t unit_size =
roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
PERCPU_DYNAMIC_RESERVE));
struct pcpu_alloc_info *ai;
void *fc;
ai = pcpu_alloc_alloc_info(1, 1);
fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!ai || !fc)
panic("Failed to allocate memory for percpu areas.");
/* kmemleak tracks the percpu allocations separately */
kmemleak_ignore_phys(__pa(fc));
ai->dyn_size = unit_size;
ai->unit_size = unit_size;
ai->atom_size = unit_size;
ai->alloc_size = unit_size;
ai->groups[0].nr_units = 1;
ai->groups[0].cpu_map[0] = 0;
pcpu_setup_first_chunk(ai, fc);
pcpu_free_alloc_info(ai);
}
#endif /* CONFIG_SMP */
/*
* pcpu_nr_pages - calculate total number of populated backing pages
*
* This reflects the number of pages populated to back chunks. Metadata is
* excluded in the number exposed in meminfo as the number of backing pages
* scales with the number of cpus and can quickly outweigh the memory used for
* metadata. It also keeps this calculation nice and simple.
*
* RETURNS:
* Total number of populated backing pages in use by the allocator.
*/
unsigned long pcpu_nr_pages(void)
{
return data_race(READ_ONCE(pcpu_nr_populated)) * pcpu_nr_units;
}
/*
* Percpu allocator is initialized early during boot when neither slab or
* workqueue is available. Plug async management until everything is up
* and running.
*/
static int __init percpu_enable_async(void)
{
pcpu_async_enabled = true;
return 0;
}
subsys_initcall(percpu_enable_async);
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Credentials management - see Documentation/security/credentials.rst
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#ifndef _LINUX_CRED_H
#define _LINUX_CRED_H
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/key.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
struct cred;
struct inode;
/*
* COW Supplementary groups list
*/
struct group_info {
refcount_t usage;
int ngroups;
kgid_t gid[];
} __randomize_layout;
/**
* get_group_info - Get a reference to a group info structure
* @group_info: The group info to reference
*
* This gets a reference to a set of supplementary groups.
*
* If the caller is accessing a task's credentials, they must hold the RCU read
* lock when reading.
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
refcount_inc(&gi->usage);
return gi;
}
/**
* put_group_info - Release a reference to a group info structure
* @group_info: The group info to release
*/
#define put_group_info(group_info) \
do { \
if (refcount_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
#ifdef CONFIG_MULTIUSER
extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
extern int groups_search(const struct group_info *, kgid_t);
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern bool may_setgroups(void);
extern void groups_sort(struct group_info *);
#else
static inline void groups_free(struct group_info *group_info)
{
}
static inline int in_group_p(kgid_t grp)
{
return 1;
}
static inline int in_egroup_p(kgid_t grp)
{
return 1;
}
static inline int groups_search(const struct group_info *group_info, kgid_t grp)
{
return 1;
}
#endif
/*
* The security context of a task
*
* The parts of the context break down into two categories:
*
* (1) The objective context of a task. These parts are used when some other
* task is attempting to affect this one.
*
* (2) The subjective context. These details are used when the task is acting
* upon another object, be that a file, a task, a key or whatever.
*
* Note that some members of this structure belong to both categories - the
* LSM security pointer for instance.
*
* A task has two security pointers. task->real_cred points to the objective
* context that defines that task's actual details. The objective part of this
* context is used whenever that task is acted upon.
*
* task->cred points to the subjective context that defines the details of how
* that task is going to act upon another object. This may be overridden
* temporarily to point to another security context, but normally points to the
* same context as task->real_cred.
*/
struct cred {
atomic_long_t usage;
kuid_t uid; /* real UID of the task */
kgid_t gid; /* real GID of the task */
kuid_t suid; /* saved UID of the task */
kgid_t sgid; /* saved GID of the task */
kuid_t euid; /* effective UID of the task */
kgid_t egid; /* effective GID of the task */
kuid_t fsuid; /* UID for VFS ops */
kgid_t fsgid; /* GID for VFS ops */
unsigned securebits; /* SUID-less security management */
kernel_cap_t cap_inheritable; /* caps our children can inherit */
kernel_cap_t cap_permitted; /* caps we're permitted */
kernel_cap_t cap_effective; /* caps we can actually use */
kernel_cap_t cap_bset; /* capability bounding set */
kernel_cap_t cap_ambient; /* Ambient capability set */
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
struct key *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
#endif
#ifdef CONFIG_SECURITY
void *security; /* LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct ucounts *ucounts;
struct group_info *group_info; /* supplementary groups for euid/fsgid */
/* RCU deletion */
union {
int non_rcu; /* Can we skip RCU deletion? */
struct rcu_head rcu; /* RCU deletion hook */
};
} __randomize_layout;
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
extern int copy_creds(struct task_struct *, u64);
extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
extern int set_cred_ucounts(struct cred *);
static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
return cap_issubset(cred->cap_ambient,
cap_intersect(cred->cap_permitted,
cred->cap_inheritable));
}
static inline const struct cred *override_creds(const struct cred *override_cred)
{
return rcu_replace_pointer(current->cred, override_cred, 1);
}
static inline const struct cred *revert_creds(const struct cred *revert_cred)
{
return rcu_replace_pointer(current->cred, revert_cred, 1);
}
/**
* get_cred_many - Get references on a set of credentials
* @cred: The credentials to reference
* @nr: Number of references to acquire
*
* Get references on the specified set of credentials. The caller must release
* all acquired reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
* usage count. The purpose of this is to attempt to catch at compile time the
* accidental alteration of a set of credentials that should be considered
* immutable.
*/
static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
nonconst_cred->non_rcu = 0;
atomic_long_add(nr, &nonconst_cred->usage);
return cred;
}
/*
* get_cred - Get a reference on a set of credentials
* @cred: The credentials to reference
*
* Get a reference on the specified set of credentials. The caller must
* release the reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials.
*/
static inline const struct cred *get_cred(const struct cred *cred)
{
return get_cred_many(cred, 1);
}
static inline const struct cred *get_cred_rcu(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return NULL;
if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
return NULL;
nonconst_cred->non_rcu = 0;
return cred;
}
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
* @nr: Number of references to release
*
* Release a reference to a set of credentials, deleting them when the last ref
* is released. If %NULL is passed, nothing is done.
*
* This takes a const pointer to a set of credentials because the credentials
* on task_struct are attached by const pointers to prevent accidental
* alteration of otherwise immutable credential sets.
*/
static inline void put_cred_many(const struct cred *_cred, int nr)
{
struct cred *cred = (struct cred *) _cred;
if (cred) { if (atomic_long_sub_and_test(nr, &cred->usage)) __put_cred(cred);
}
}
/*
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
*
* Release a reference to a set of credentials, deleting them when the last ref
* is released. If %NULL is passed, nothing is done.
*/
static inline void put_cred(const struct cred *cred)
{
put_cred_many(cred, 1);}
DEFINE_FREE(put_cred, struct cred *, if (!IS_ERR_OR_NULL(_T)) put_cred(_T))
/**
* current_cred - Access the current task's subjective credentials
*
* Access the subjective credentials of the current task. RCU-safe,
* since nobody else can modify it.
*/
#define current_cred() \
rcu_dereference_protected(current->cred, 1)
/**
* current_real_cred - Access the current task's objective credentials
*
* Access the objective credentials of the current task. RCU-safe,
* since nobody else can modify it.
*/
#define current_real_cred() \
rcu_dereference_protected(current->real_cred, 1)
/**
* __task_cred - Access a task's objective credentials
* @task: The task to query
*
* Access the objective credentials of a task. The caller must hold the RCU
* readlock.
*
* The result of this function should not be passed directly to get_cred();
* rather get_task_cred() should be used instead.
*/
#define __task_cred(task) \
rcu_dereference((task)->real_cred)
/**
* get_current_cred - Get the current task's subjective credentials
*
* Get the subjective credentials of the current task, pinning them so that
* they can't go away. Accessing the current task's credentials directly is
* not permitted.
*/
#define get_current_cred() \
(get_cred(current_cred()))
/**
* get_current_user - Get the current task's user_struct
*
* Get the user record of the current task, pinning it so that it can't go
* away.
*/
#define get_current_user() \
({ \
struct user_struct *__u; \
const struct cred *__cred; \
__cred = current_cred(); \
__u = get_uid(__cred->user); \
__u; \
})
/**
* get_current_groups - Get the current task's supplementary group list
*
* Get the supplementary group list of the current task, pinning it so that it
* can't go away.
*/
#define get_current_groups() \
({ \
struct group_info *__groups; \
const struct cred *__cred; \
__cred = current_cred(); \
__groups = get_group_info(__cred->group_info); \
__groups; \
})
#define task_cred_xxx(task, xxx) \
({ \
__typeof__(((struct cred *)NULL)->xxx) ___val; \
rcu_read_lock(); \
___val = __task_cred((task))->xxx; \
rcu_read_unlock(); \
___val; \
})
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
#define task_ucounts(task) (task_cred_xxx((task), ucounts))
#define current_cred_xxx(xxx) \
({ \
current_cred()->xxx; \
})
#define current_uid() (current_cred_xxx(uid))
#define current_gid() (current_cred_xxx(gid))
#define current_euid() (current_cred_xxx(euid))
#define current_egid() (current_cred_xxx(egid))
#define current_suid() (current_cred_xxx(suid))
#define current_sgid() (current_cred_xxx(sgid))
#define current_fsuid() (current_cred_xxx(fsuid))
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
#define current_ucounts() (current_cred_xxx(ucounts))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
#define current_user_ns() (current_cred_xxx(user_ns))
#else
static inline struct user_namespace *current_user_ns(void)
{
return &init_user_ns;
}
#endif
#define current_uid_gid(_uid, _gid) \
do { \
const struct cred *__cred; \
__cred = current_cred(); \
*(_uid) = __cred->uid; \
*(_gid) = __cred->gid; \
} while(0)
#define current_euid_egid(_euid, _egid) \
do { \
const struct cred *__cred; \
__cred = current_cred(); \
*(_euid) = __cred->euid; \
*(_egid) = __cred->egid; \
} while(0)
#define current_fsuid_fsgid(_fsuid, _fsgid) \
do { \
const struct cred *__cred; \
__cred = current_cred(); \
*(_fsuid) = __cred->fsuid; \
*(_fsgid) = __cred->fsgid; \
} while(0)
#endif /* _LINUX_CRED_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
linux/include/linux/rbtree.h
To use rbtrees you'll have to implement your own insert and search cores.
This will avoid us to use callbacks and to drop drammatically performances.
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
See Documentation/core-api/rbtree.rst for documentation and samples.
*/
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
#include <linux/container_of.h>
#include <linux/rbtree_types.h>
#include <linux/stddef.h>
#include <linux/rcupdate.h>
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
((node)->__rb_parent_color = (unsigned long)(node))
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
*rb_link = node;
}
static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
rcu_assign_pointer(*rb_link, node);
}
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/**
* rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
* given type allowing the backing memory of @pos to be invalidated
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*
* rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
* list_for_each_entry_safe() and allows the iteration to continue independent
* of changes to @pos by the body of the loop.
*
* Note, however, that it cannot handle other modifications that re-order the
* rbtree it is iterating over. This includes calling rb_erase() on @pos, as
* rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
typeof(*pos), field); 1; }); \
pos = n)
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
static inline void rb_insert_color_cached(struct rb_node *node,
struct rb_root_cached *root,
bool leftmost)
{
if (leftmost)
root->rb_leftmost = node;
rb_insert_color(node, &root->rb_root);
}
static inline struct rb_node *
rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{
struct rb_node *leftmost = NULL;
if (root->rb_leftmost == node)
leftmost = root->rb_leftmost = rb_next(node); rb_erase(node, &root->rb_root);
return leftmost;
}
static inline void rb_replace_node_cached(struct rb_node *victim,
struct rb_node *new,
struct rb_root_cached *root)
{
if (root->rb_leftmost == victim)
root->rb_leftmost = new;
rb_replace_node(victim, new, &root->rb_root);
}
/*
* The below helper functions use 2 operators with 3 different
* calling conventions. The operators are related like:
*
* comp(a->key,b) < 0 := less(a,b)
* comp(a->key,b) > 0 := less(b,a)
* comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
*
* If these operators define a partial order on the elements we make no
* guarantee on which of the elements matching the key is found. See
* rb_find().
*
* The reason for this is to allow the find() interface without requiring an
* on-stack dummy object, which might not be feasible due to object size.
*/
/**
* rb_add_cached() - insert @node into the leftmost cached tree @tree
* @node: node to insert
* @tree: leftmost cached tree to insert @node into
* @less: operator defining the (partial) node order
*
* Returns @node when it is the new leftmost, or NULL.
*/
static __always_inline struct rb_node *
rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
bool (*less)(struct rb_node *, const struct rb_node *))
{
struct rb_node **link = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
bool leftmost = true;
while (*link) {
parent = *link;
if (less(node, parent)) {
link = &parent->rb_left;
} else {
link = &parent->rb_right;
leftmost = false;
}
}
rb_link_node(node, parent, link); rb_insert_color_cached(node, tree, leftmost);
return leftmost ? node : NULL;
}
/**
* rb_add() - insert @node into @tree
* @node: node to insert
* @tree: tree to insert @node into
* @less: operator defining the (partial) node order
*/
static __always_inline void
rb_add(struct rb_node *node, struct rb_root *tree,
bool (*less)(struct rb_node *, const struct rb_node *))
{
struct rb_node **link = &tree->rb_node;
struct rb_node *parent = NULL;
while (*link) {
parent = *link;
if (less(node, parent))
link = &parent->rb_left;
else
link = &parent->rb_right;
}
rb_link_node(node, parent, link);
rb_insert_color(node, tree);
}
/**
* rb_find_add_cached() - find equivalent @node in @tree, or add @node
* @node: node to look-for / insert
* @tree: tree to search / modify
* @cmp: operator defining the node order
*
* Returns the rb_node matching @node, or NULL when no match is found and @node
* is inserted.
*/
static __always_inline struct rb_node *
rb_find_add_cached(struct rb_node *node, struct rb_root_cached *tree,
int (*cmp)(const struct rb_node *new, const struct rb_node *exist))
{
bool leftmost = true;
struct rb_node **link = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
int c;
while (*link) {
parent = *link;
c = cmp(node, parent);
if (c < 0) {
link = &parent->rb_left;
} else if (c > 0) {
link = &parent->rb_right;
leftmost = false;
} else {
return parent;
}
}
rb_link_node(node, parent, link);
rb_insert_color_cached(node, tree, leftmost);
return NULL;
}
/**
* rb_find_add() - find equivalent @node in @tree, or add @node
* @node: node to look-for / insert
* @tree: tree to search / modify
* @cmp: operator defining the node order
*
* Returns the rb_node matching @node, or NULL when no match is found and @node
* is inserted.
*/
static __always_inline struct rb_node *
rb_find_add(struct rb_node *node, struct rb_root *tree,
int (*cmp)(struct rb_node *, const struct rb_node *))
{
struct rb_node **link = &tree->rb_node;
struct rb_node *parent = NULL;
int c;
while (*link) {
parent = *link;
c = cmp(node, parent);
if (c < 0)
link = &parent->rb_left;
else if (c > 0)
link = &parent->rb_right;
else
return parent;
}
rb_link_node(node, parent, link);
rb_insert_color(node, tree);
return NULL;
}
/**
* rb_find_add_rcu() - find equivalent @node in @tree, or add @node
* @node: node to look-for / insert
* @tree: tree to search / modify
* @cmp: operator defining the node order
*
* Adds a Store-Release for link_node.
*
* Returns the rb_node matching @node, or NULL when no match is found and @node
* is inserted.
*/
static __always_inline struct rb_node *
rb_find_add_rcu(struct rb_node *node, struct rb_root *tree,
int (*cmp)(struct rb_node *, const struct rb_node *))
{
struct rb_node **link = &tree->rb_node;
struct rb_node *parent = NULL;
int c;
while (*link) {
parent = *link;
c = cmp(node, parent);
if (c < 0)
link = &parent->rb_left;
else if (c > 0)
link = &parent->rb_right;
else
return parent;
}
rb_link_node_rcu(node, parent, link);
rb_insert_color(node, tree);
return NULL;
}
/**
* rb_find() - find @key in tree @tree
* @key: key to match
* @tree: tree to search
* @cmp: operator defining the node order
*
* Returns the rb_node matching @key or NULL.
*/
static __always_inline struct rb_node *
rb_find(const void *key, const struct rb_root *tree,
int (*cmp)(const void *key, const struct rb_node *))
{
struct rb_node *node = tree->rb_node;
while (node) {
int c = cmp(key, node);
if (c < 0)
node = node->rb_left;
else if (c > 0)
node = node->rb_right;
else
return node;
}
return NULL;
}
/**
* rb_find_rcu() - find @key in tree @tree
* @key: key to match
* @tree: tree to search
* @cmp: operator defining the node order
*
* Notably, tree descent vs concurrent tree rotations is unsound and can result
* in false-negatives.
*
* Returns the rb_node matching @key or NULL.
*/
static __always_inline struct rb_node *
rb_find_rcu(const void *key, const struct rb_root *tree,
int (*cmp)(const void *key, const struct rb_node *))
{
struct rb_node *node = tree->rb_node;
while (node) {
int c = cmp(key, node);
if (c < 0)
node = rcu_dereference_raw(node->rb_left);
else if (c > 0)
node = rcu_dereference_raw(node->rb_right);
else
return node;
}
return NULL;
}
/**
* rb_find_first() - find the first @key in @tree
* @key: key to match
* @tree: tree to search
* @cmp: operator defining node order
*
* Returns the leftmost node matching @key, or NULL.
*/
static __always_inline struct rb_node *
rb_find_first(const void *key, const struct rb_root *tree,
int (*cmp)(const void *key, const struct rb_node *))
{
struct rb_node *node = tree->rb_node;
struct rb_node *match = NULL;
while (node) {
int c = cmp(key, node);
if (c <= 0) {
if (!c)
match = node;
node = node->rb_left;
} else if (c > 0) {
node = node->rb_right;
}
}
return match;
}
/**
* rb_next_match() - find the next @key in @tree
* @key: key to match
* @tree: tree to search
* @cmp: operator defining node order
*
* Returns the next node matching @key, or NULL.
*/
static __always_inline struct rb_node *
rb_next_match(const void *key, struct rb_node *node,
int (*cmp)(const void *key, const struct rb_node *))
{
node = rb_next(node);
if (node && cmp(key, node))
node = NULL;
return node;
}
/**
* rb_for_each() - iterates a subtree matching @key
* @node: iterator
* @key: key to match
* @tree: tree to search
* @cmp: operator defining node order
*/
#define rb_for_each(node, key, tree, cmp) \
for ((node) = rb_find_first((key), (tree), (cmp)); \
(node); (node) = rb_next_match((key), (node), (cmp)))
#endif /* _LINUX_RBTREE_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/* audit.c -- Auditing support
* Gateway between the kernel (e.g., selinux) and the user-space audit daemon.
* System-call specific features have moved to auditsc.c
*
* Copyright 2003-2007 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
*
* Goals: 1) Integrate fully with Security Modules.
* 2) Minimal run-time overhead:
* a) Minimal when syscall auditing is disabled (audit_enable=0).
* b) Small when syscall auditing is enabled and no audit record
* is generated (defer as much work as possible to record
* generation time):
* i) context is allocated,
* ii) names from getname are stored without a copy, and
* iii) inode information stored from path_lookup.
* 3) Ability to disable syscall auditing at boot time (audit=0).
* 4) Usable by other parts of the kernel (if audit_log* is called,
* then a syscall record will be generated automatically for the
* current syscall).
* 5) Netlink interface to user-space.
* 6) Support low-overhead kernel-based filtering to minimize the
* information that must be passed to user-space.
*
* Audit userspace, documentation, tests, and bug/issue trackers:
* https://github.com/linux-audit
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/file.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/pid.h>
#include <linux/audit.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/security.h>
#include <linux/lsm_hooks.h>
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <net/netns/generic.h>
#include "audit.h"
/* No auditing will take place until audit_initialized == AUDIT_INITIALIZED.
* (Initialization happens after skb_init is called.) */
#define AUDIT_DISABLED -1
#define AUDIT_UNINITIALIZED 0
#define AUDIT_INITIALIZED 1
static int audit_initialized = AUDIT_UNINITIALIZED;
u32 audit_enabled = AUDIT_OFF;
bool audit_ever_enabled = !!AUDIT_OFF;
EXPORT_SYMBOL_GPL(audit_enabled);
/* Default state when kernel boots without any parameters. */
static u32 audit_default = AUDIT_OFF;
/* If auditing cannot proceed, audit_failure selects what happens. */
static u32 audit_failure = AUDIT_FAIL_PRINTK;
/* private audit network namespace index */
static unsigned int audit_net_id;
/* Number of modules that provide a security context.
List of lsms that provide a security context */
static u32 audit_subj_secctx_cnt;
static u32 audit_obj_secctx_cnt;
static const struct lsm_id *audit_subj_lsms[MAX_LSM_COUNT];
static const struct lsm_id *audit_obj_lsms[MAX_LSM_COUNT];
/**
* struct audit_net - audit private network namespace data
* @sk: communication socket
*/
struct audit_net {
struct sock *sk;
};
/**
* struct auditd_connection - kernel/auditd connection state
* @pid: auditd PID
* @portid: netlink portid
* @net: the associated network namespace
* @rcu: RCU head
*
* Description:
* This struct is RCU protected; you must either hold the RCU lock for reading
* or the associated spinlock for writing.
*/
struct auditd_connection {
struct pid *pid;
u32 portid;
struct net *net;
struct rcu_head rcu;
};
static struct auditd_connection __rcu *auditd_conn;
static DEFINE_SPINLOCK(auditd_conn_lock);
/* If audit_rate_limit is non-zero, limit the rate of sending audit records
* to that number per second. This prevents DoS attacks, but results in
* audit records being dropped. */
static u32 audit_rate_limit;
/* Number of outstanding audit_buffers allowed.
* When set to zero, this means unlimited. */
static u32 audit_backlog_limit = 64;
#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
/* The identity of the user shutting down the audit system. */
static kuid_t audit_sig_uid = INVALID_UID;
static pid_t audit_sig_pid = -1;
static struct lsm_prop audit_sig_lsm;
/* Records can be lost in several ways:
0) [suppressed in audit_alloc]
1) out of memory in audit_log_start [kmalloc of struct audit_buffer]
2) out of memory in audit_log_move [alloc_skb]
3) suppressed due to audit_rate_limit
4) suppressed due to audit_backlog_limit
*/
static atomic_t audit_lost = ATOMIC_INIT(0);
/* Monotonically increasing sum of time the kernel has spent
* waiting while the backlog limit is exceeded.
*/
static atomic_t audit_backlog_wait_time_actual = ATOMIC_INIT(0);
/* Hash for inode-based rules */
struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
static struct kmem_cache *audit_buffer_cache;
/* queue msgs to send via kauditd_task */
static struct sk_buff_head audit_queue;
/* queue msgs due to temporary unicast send problems */
static struct sk_buff_head audit_retry_queue;
/* queue msgs waiting for new auditd connection */
static struct sk_buff_head audit_hold_queue;
/* queue servicing thread */
static struct task_struct *kauditd_task;
static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
/* waitqueue for callers who are blocked on the audit backlog */
static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION,
.mask = -1,
.features = 0,
.lock = 0,};
static char *audit_feature_names[2] = {
"only_unset_loginuid",
"loginuid_immutable",
};
/**
* struct audit_ctl_mutex - serialize requests from userspace
* @lock: the mutex used for locking
* @owner: the task which owns the lock
*
* Description:
* This is the lock struct used to ensure we only process userspace requests
* in an orderly fashion. We can't simply use a mutex/lock here because we
* need to track lock ownership so we don't end up blocking the lock owner in
* audit_log_start() or similar.
*/
static struct audit_ctl_mutex {
struct mutex lock;
void *owner;
} audit_cmd_mutex;
/* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting
* audit records. Since printk uses a 1024 byte buffer, this buffer
* should be at least that large. */
#define AUDIT_BUFSIZ 1024
/* The audit_buffer is used when formatting an audit record. The caller
* locks briefly to get the record off the freelist or to allocate the
* buffer, and locks briefly to send the buffer to the netlink layer or
* to place it on a transmit queue. Multiple audit_buffers can be in
* use simultaneously. */
struct audit_buffer {
struct sk_buff *skb; /* the skb for audit_log functions */
struct sk_buff_head skb_list; /* formatted skbs, ready to send */
struct audit_context *ctx; /* NULL or associated context */
struct audit_stamp stamp; /* audit stamp for these records */
gfp_t gfp_mask;
};
struct audit_reply {
__u32 portid;
struct net *net;
struct sk_buff *skb;
};
/**
* auditd_test_task - Check to see if a given task is an audit daemon
* @task: the task to check
*
* Description:
* Return 1 if the task is a registered audit daemon, 0 otherwise.
*/
int auditd_test_task(struct task_struct *task)
{
int rc;
struct auditd_connection *ac;
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
rc = (ac && ac->pid == task_tgid(task) ? 1 : 0);
rcu_read_unlock();
return rc;
}
/**
* audit_ctl_lock - Take the audit control lock
*/
void audit_ctl_lock(void)
{
mutex_lock(&audit_cmd_mutex.lock);
audit_cmd_mutex.owner = current;
}
/**
* audit_ctl_unlock - Drop the audit control lock
*/
void audit_ctl_unlock(void)
{
audit_cmd_mutex.owner = NULL;
mutex_unlock(&audit_cmd_mutex.lock);
}
/**
* audit_ctl_owner_current - Test to see if the current task owns the lock
*
* Description:
* Return true if the current task owns the audit control lock, false if it
* doesn't own the lock.
*/
static bool audit_ctl_owner_current(void)
{
return (current == audit_cmd_mutex.owner);
}
/**
* auditd_pid_vnr - Return the auditd PID relative to the namespace
*
* Description:
* Returns the PID in relation to the namespace, 0 on failure.
*/
static pid_t auditd_pid_vnr(void)
{
pid_t pid;
const struct auditd_connection *ac;
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac || !ac->pid)
pid = 0;
else
pid = pid_vnr(ac->pid);
rcu_read_unlock();
return pid;
}
/**
* audit_cfg_lsm - Identify a security module as providing a secctx.
* @lsmid: LSM identity
* @flags: which contexts are provided
*
* Description:
* Increments the count of the security modules providing a secctx.
* If the LSM id is already in the list leave it alone.
*/
void audit_cfg_lsm(const struct lsm_id *lsmid, int flags)
{
int i;
if (flags & AUDIT_CFG_LSM_SECCTX_SUBJECT) {
for (i = 0 ; i < audit_subj_secctx_cnt; i++)
if (audit_subj_lsms[i] == lsmid)
return;
audit_subj_lsms[audit_subj_secctx_cnt++] = lsmid;
}
if (flags & AUDIT_CFG_LSM_SECCTX_OBJECT) {
for (i = 0 ; i < audit_obj_secctx_cnt; i++)
if (audit_obj_lsms[i] == lsmid)
return;
audit_obj_lsms[audit_obj_secctx_cnt++] = lsmid;
}
}
/**
* audit_get_sk - Return the audit socket for the given network namespace
* @net: the destination network namespace
*
* Description:
* Returns the sock pointer if valid, NULL otherwise. The caller must ensure
* that a reference is held for the network namespace while the sock is in use.
*/
static struct sock *audit_get_sk(const struct net *net)
{
struct audit_net *aunet;
if (!net)
return NULL;
aunet = net_generic(net, audit_net_id);
return aunet->sk;
}
void audit_panic(const char *message)
{
switch (audit_failure) {
case AUDIT_FAIL_SILENT:
break;
case AUDIT_FAIL_PRINTK:
if (printk_ratelimit())
pr_err("%s\n", message);
break;
case AUDIT_FAIL_PANIC:
panic("audit: %s\n", message);
break;
}
}
static inline int audit_rate_check(void)
{
static unsigned long last_check = 0;
static int messages = 0;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
int retval = 0;
if (!audit_rate_limit)
return 1;
spin_lock_irqsave(&lock, flags);
if (++messages < audit_rate_limit) {
retval = 1;
} else {
now = jiffies;
if (time_after(now, last_check + HZ)) {
last_check = now;
messages = 0;
retval = 1;
}
}
spin_unlock_irqrestore(&lock, flags);
return retval;
}
/**
* audit_log_lost - conditionally log lost audit message event
* @message: the message stating reason for lost audit message
*
* Emit at least 1 message per second, even if audit_rate_check is
* throttling.
* Always increment the lost messages counter.
*/
void audit_log_lost(const char *message)
{
static unsigned long last_msg = 0;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
int print;
atomic_inc(&audit_lost);
print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
if (!print) {
spin_lock_irqsave(&lock, flags);
now = jiffies;
if (time_after(now, last_msg + HZ)) {
print = 1;
last_msg = now;
}
spin_unlock_irqrestore(&lock, flags);
}
if (print) {
if (printk_ratelimit())
pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
atomic_read(&audit_lost),
audit_rate_limit,
audit_backlog_limit);
audit_panic(message);
}
}
static int audit_log_config_change(char *function_name, u32 new, u32 old,
int allow_changes)
{
struct audit_buffer *ab;
int rc = 0;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return rc;
audit_log_format(ab, "op=set %s=%u old=%u ", function_name, new, old);
audit_log_session_info(ab);
rc = audit_log_task_context(ab);
if (rc)
allow_changes = 0; /* Something weird, deny request */
audit_log_format(ab, " res=%d", allow_changes);
audit_log_end(ab);
return rc;
}
static int audit_do_config_change(char *function_name, u32 *to_change, u32 new)
{
int allow_changes, rc = 0;
u32 old = *to_change;
/* check if we are locked */
if (audit_enabled == AUDIT_LOCKED)
allow_changes = 0;
else
allow_changes = 1;
if (audit_enabled != AUDIT_OFF) {
rc = audit_log_config_change(function_name, new, old, allow_changes);
if (rc)
allow_changes = 0;
}
/* If we are allowed, make the change */
if (allow_changes == 1)
*to_change = new;
/* Not allowed, update reason */
else if (rc == 0)
rc = -EPERM;
return rc;
}
static int audit_set_rate_limit(u32 limit)
{
return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
}
static int audit_set_backlog_limit(u32 limit)
{
return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
}
static int audit_set_backlog_wait_time(u32 timeout)
{
return audit_do_config_change("audit_backlog_wait_time",
&audit_backlog_wait_time, timeout);
}
static int audit_set_enabled(u32 state)
{
int rc;
if (state > AUDIT_LOCKED)
return -EINVAL;
rc = audit_do_config_change("audit_enabled", &audit_enabled, state);
if (!rc)
audit_ever_enabled |= !!state;
return rc;
}
static int audit_set_failure(u32 state)
{
if (state != AUDIT_FAIL_SILENT
&& state != AUDIT_FAIL_PRINTK
&& state != AUDIT_FAIL_PANIC)
return -EINVAL;
return audit_do_config_change("audit_failure", &audit_failure, state);
}
/**
* auditd_conn_free - RCU helper to release an auditd connection struct
* @rcu: RCU head
*
* Description:
* Drop any references inside the auditd connection tracking struct and free
* the memory.
*/
static void auditd_conn_free(struct rcu_head *rcu)
{
struct auditd_connection *ac;
ac = container_of(rcu, struct auditd_connection, rcu);
put_pid(ac->pid);
put_net(ac->net);
kfree(ac);
}
/**
* auditd_set - Set/Reset the auditd connection state
* @pid: auditd PID
* @portid: auditd netlink portid
* @net: auditd network namespace pointer
* @skb: the netlink command from the audit daemon
* @ack: netlink ack flag, cleared if ack'd here
*
* Description:
* This function will obtain and drop network namespace references as
* necessary. Returns zero on success, negative values on failure.
*/
static int auditd_set(struct pid *pid, u32 portid, struct net *net,
struct sk_buff *skb, bool *ack)
{
unsigned long flags;
struct auditd_connection *ac_old, *ac_new;
struct nlmsghdr *nlh;
if (!pid || !net)
return -EINVAL;
ac_new = kzalloc(sizeof(*ac_new), GFP_KERNEL);
if (!ac_new)
return -ENOMEM;
ac_new->pid = get_pid(pid);
ac_new->portid = portid;
ac_new->net = get_net(net);
/* send the ack now to avoid a race with the queue backlog */
if (*ack) {
nlh = nlmsg_hdr(skb);
netlink_ack(skb, nlh, 0, NULL);
*ack = false;
}
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
rcu_assign_pointer(auditd_conn, ac_new);
spin_unlock_irqrestore(&auditd_conn_lock, flags);
if (ac_old)
call_rcu(&ac_old->rcu, auditd_conn_free);
return 0;
}
/**
* kauditd_printk_skb - Print the audit record to the ring buffer
* @skb: audit record
*
* Whatever the reason, this packet may not make it to the auditd connection
* so write it via printk so the information isn't completely lost.
*/
static void kauditd_printk_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
char *data = nlmsg_data(nlh);
if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
}
/**
* kauditd_rehold_skb - Handle a audit record send failure in the hold queue
* @skb: audit record
* @error: error code (unused)
*
* Description:
* This should only be used by the kauditd_thread when it fails to flush the
* hold queue.
*/
static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error)
{
/* put the record back in the queue */
skb_queue_tail(&audit_hold_queue, skb);
}
/**
* kauditd_hold_skb - Queue an audit record, waiting for auditd
* @skb: audit record
* @error: error code
*
* Description:
* Queue the audit record, waiting for an instance of auditd. When this
* function is called we haven't given up yet on sending the record, but things
* are not looking good. The first thing we want to do is try to write the
* record via printk and then see if we want to try and hold on to the record
* and queue it, if we have room. If we want to hold on to the record, but we
* don't have room, record a record lost message.
*/
static void kauditd_hold_skb(struct sk_buff *skb, int error)
{
/* at this point it is uncertain if we will ever send this to auditd so
* try to send the message via printk before we go any further */
kauditd_printk_skb(skb);
/* can we just silently drop the message? */
if (!audit_default)
goto drop;
/* the hold queue is only for when the daemon goes away completely,
* not -EAGAIN failures; if we are in a -EAGAIN state requeue the
* record on the retry queue unless it's full, in which case drop it
*/
if (error == -EAGAIN) {
if (!audit_backlog_limit ||
skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_retry_queue, skb);
return;
}
audit_log_lost("kauditd retry queue overflow");
goto drop;
}
/* if we have room in the hold queue, queue the message */
if (!audit_backlog_limit ||
skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_hold_queue, skb);
return;
}
/* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow");
drop:
kfree_skb(skb);
}
/**
* kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
* @skb: audit record
* @error: error code (unused)
*
* Description:
* Not as serious as kauditd_hold_skb() as we still have a connected auditd,
* but for some reason we are having problems sending it audit records so
* queue the given record and attempt to resend.
*/
static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error)
{
if (!audit_backlog_limit ||
skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_retry_queue, skb);
return;
}
/* we have to drop the record, send it via printk as a last effort */
kauditd_printk_skb(skb);
audit_log_lost("kauditd retry queue overflow");
kfree_skb(skb);
}
/**
* auditd_reset - Disconnect the auditd connection
* @ac: auditd connection state
*
* Description:
* Break the auditd/kauditd connection and move all the queued records into the
* hold queue in case auditd reconnects. It is important to note that the @ac
* pointer should never be dereferenced inside this function as it may be NULL
* or invalid, you can only compare the memory address! If @ac is NULL then
* the connection will always be reset.
*/
static void auditd_reset(const struct auditd_connection *ac)
{
unsigned long flags;
struct sk_buff *skb;
struct auditd_connection *ac_old;
/* if it isn't already broken, break the connection */
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
if (ac && ac != ac_old) {
/* someone already registered a new auditd connection */
spin_unlock_irqrestore(&auditd_conn_lock, flags);
return;
}
rcu_assign_pointer(auditd_conn, NULL);
spin_unlock_irqrestore(&auditd_conn_lock, flags);
if (ac_old)
call_rcu(&ac_old->rcu, auditd_conn_free);
/* flush the retry queue to the hold queue, but don't touch the main
* queue since we need to process that normally for multicast */
while ((skb = skb_dequeue(&audit_retry_queue)))
kauditd_hold_skb(skb, -ECONNREFUSED);
}
/**
* auditd_send_unicast_skb - Send a record via unicast to auditd
* @skb: audit record
*
* Description:
* Send a skb to the audit daemon, returns positive/zero values on success and
* negative values on failure; in all cases the skb will be consumed by this
* function. If the send results in -ECONNREFUSED the connection with auditd
* will be reset. This function may sleep so callers should not hold any locks
* where this would cause a problem.
*/
static int auditd_send_unicast_skb(struct sk_buff *skb)
{
int rc;
u32 portid;
struct net *net;
struct sock *sk;
struct auditd_connection *ac;
/* NOTE: we can't call netlink_unicast while in the RCU section so
* take a reference to the network namespace and grab local
* copies of the namespace, the sock, and the portid; the
* namespace and sock aren't going to go away while we hold a
* reference and if the portid does become invalid after the RCU
* section netlink_unicast() should safely return an error */
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac) {
rcu_read_unlock();
kfree_skb(skb);
rc = -ECONNREFUSED;
goto err;
}
net = get_net(ac->net);
sk = audit_get_sk(net);
portid = ac->portid;
rcu_read_unlock();
rc = netlink_unicast(sk, skb, portid, 0);
put_net(net);
if (rc < 0)
goto err;
return rc;
err:
if (ac && rc == -ECONNREFUSED)
auditd_reset(ac);
return rc;
}
/**
* kauditd_send_queue - Helper for kauditd_thread to flush skb queues
* @sk: the sending sock
* @portid: the netlink destination
* @queue: the skb queue to process
* @retry_limit: limit on number of netlink unicast failures
* @skb_hook: per-skb hook for additional processing
* @err_hook: hook called if the skb fails the netlink unicast send
*
* Description:
* Run through the given queue and attempt to send the audit records to auditd,
* returns zero on success, negative values on failure. It is up to the caller
* to ensure that the @sk is valid for the duration of this function.
*
*/
static int kauditd_send_queue(struct sock *sk, u32 portid,
struct sk_buff_head *queue,
unsigned int retry_limit,
void (*skb_hook)(struct sk_buff *skb),
void (*err_hook)(struct sk_buff *skb, int error))
{
int rc = 0;
struct sk_buff *skb = NULL;
struct sk_buff *skb_tail;
unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
skb_tail = skb_peek_tail(queue);
while ((skb != skb_tail) && (skb = skb_dequeue(queue))) {
/* call the skb_hook for each skb we touch */
if (skb_hook)
(*skb_hook)(skb);
/* can we send to anyone via unicast? */
if (!sk) {
if (err_hook)
(*err_hook)(skb, -ECONNREFUSED);
continue;
}
retry:
/* grab an extra skb reference in case of error */
skb_get(skb);
rc = netlink_unicast(sk, skb, portid, 0);
if (rc < 0) {
/* send failed - try a few times unless fatal error */
if (++failed >= retry_limit ||
rc == -ECONNREFUSED || rc == -EPERM) {
sk = NULL;
if (err_hook)
(*err_hook)(skb, rc);
if (rc == -EAGAIN)
rc = 0;
/* continue to drain the queue */
continue;
} else
goto retry;
} else {
/* skb sent - drop the extra reference and continue */
consume_skb(skb);
failed = 0;
}
}
return (rc >= 0 ? 0 : rc);
}
/*
* kauditd_send_multicast_skb - Send a record to any multicast listeners
* @skb: audit record
*
* Description:
* Write a multicast message to anyone listening in the initial network
* namespace. This function doesn't consume an skb as might be expected since
* it has to copy it anyways.
*/
static void kauditd_send_multicast_skb(struct sk_buff *skb)
{
struct sk_buff *copy;
struct sock *sock = audit_get_sk(&init_net);
struct nlmsghdr *nlh;
/* NOTE: we are not taking an additional reference for init_net since
* we don't have to worry about it going away */
if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
return;
/*
* The seemingly wasteful skb_copy() rather than bumping the refcount
* using skb_get() is necessary because non-standard mods are made to
* the skb by the original kaudit unicast socket send routine. The
* existing auditd daemon assumes this breakage. Fixing this would
* require co-ordinating a change in the established protocol between
* the kaudit kernel subsystem and the auditd userspace code. There is
* no reason for new multicast clients to continue with this
* non-compliance.
*/
copy = skb_copy(skb, GFP_KERNEL);
if (!copy)
return;
nlh = nlmsg_hdr(copy);
nlh->nlmsg_len = skb->len;
nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
}
/**
* kauditd_thread - Worker thread to send audit records to userspace
* @dummy: unused
*/
static int kauditd_thread(void *dummy)
{
int rc;
u32 portid = 0;
struct net *net = NULL;
struct sock *sk = NULL;
struct auditd_connection *ac;
#define UNICAST_RETRIES 5
set_freezable();
while (!kthread_should_stop()) {
/* NOTE: see the lock comments in auditd_send_unicast_skb() */
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac) {
rcu_read_unlock();
goto main_queue;
}
net = get_net(ac->net);
sk = audit_get_sk(net);
portid = ac->portid;
rcu_read_unlock();
/* attempt to flush the hold queue */
rc = kauditd_send_queue(sk, portid,
&audit_hold_queue, UNICAST_RETRIES,
NULL, kauditd_rehold_skb);
if (rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
}
/* attempt to flush the retry queue */
rc = kauditd_send_queue(sk, portid,
&audit_retry_queue, UNICAST_RETRIES,
NULL, kauditd_hold_skb);
if (rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
}
main_queue:
/* process the main queue - do the multicast send and attempt
* unicast, dump failed record sends to the retry queue; if
* sk == NULL due to previous failures we will just do the
* multicast send and move the record to the hold queue */
rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
kauditd_send_multicast_skb,
(sk ?
kauditd_retry_skb : kauditd_hold_skb));
if (ac && rc < 0)
auditd_reset(ac);
sk = NULL;
/* drop our netns reference, no auditd sends past this line */
if (net) {
put_net(net);
net = NULL;
}
/* we have processed all the queues so wake everyone */
wake_up(&audit_backlog_wait);
/* NOTE: we want to wake up if there is anything on the queue,
* regardless of if an auditd is connected, as we need to
* do the multicast send and rotate records from the
* main queue to the retry/hold queues */
wait_event_freezable(kauditd_wait,
(skb_queue_len(&audit_queue) ? 1 : 0));
}
return 0;
}
int audit_send_list_thread(void *_dest)
{
struct audit_netlink_list *dest = _dest;
struct sk_buff *skb;
struct sock *sk = audit_get_sk(dest->net);
/* wait for parent to finish and send an ACK */
audit_ctl_lock();
audit_ctl_unlock();
while ((skb = __skb_dequeue(&dest->q)) != NULL)
netlink_unicast(sk, skb, dest->portid, 0);
put_net(dest->net);
kfree(dest);
return 0;
}
struct sk_buff *audit_make_reply(int seq, int type, int done,
int multi, const void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
void *data;
int flags = multi ? NLM_F_MULTI : 0;
int t = done ? NLMSG_DONE : type;
skb = nlmsg_new(size, GFP_KERNEL);
if (!skb)
return NULL;
nlh = nlmsg_put(skb, 0, seq, t, size, flags);
if (!nlh)
goto out_kfree_skb;
data = nlmsg_data(nlh);
memcpy(data, payload, size);
return skb;
out_kfree_skb:
kfree_skb(skb);
return NULL;
}
static void audit_free_reply(struct audit_reply *reply)
{
if (!reply)
return;
kfree_skb(reply->skb);
if (reply->net)
put_net(reply->net);
kfree(reply);
}
static int audit_send_reply_thread(void *arg)
{
struct audit_reply *reply = (struct audit_reply *)arg;
audit_ctl_lock();
audit_ctl_unlock();
/* Ignore failure. It'll only happen if the sender goes away,
because our timeout is set to infinite. */
netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0);
reply->skb = NULL;
audit_free_reply(reply);
return 0;
}
/**
* audit_send_reply - send an audit reply message via netlink
* @request_skb: skb of request we are replying to (used to target the reply)
* @seq: sequence number
* @type: audit message type
* @done: done (last) flag
* @multi: multi-part message flag
* @payload: payload data
* @size: payload size
*
* Allocates a skb, builds the netlink message, and sends it to the port id.
*/
static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
int multi, const void *payload, int size)
{
struct task_struct *tsk;
struct audit_reply *reply;
reply = kzalloc(sizeof(*reply), GFP_KERNEL);
if (!reply)
return;
reply->skb = audit_make_reply(seq, type, done, multi, payload, size);
if (!reply->skb)
goto err;
reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk));
reply->portid = NETLINK_CB(request_skb).portid;
tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
if (IS_ERR(tsk))
goto err;
return;
err:
audit_free_reply(reply);
}
/*
* Check for appropriate CAP_AUDIT_ capabilities on incoming audit
* control messages.
*/
static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
{
int err = 0;
/* Only support initial user namespace for now. */
/*
* We return ECONNREFUSED because it tricks userspace into thinking
* that audit was not configured into the kernel. Lots of users
* configure their PAM stack (because that's what the distro does)
* to reject login if unable to send messages to audit. If we return
* ECONNREFUSED the PAM stack thinks the kernel does not have audit
* configured in and will let login proceed. If we return EPERM
* userspace will reject all logins. This should be removed when we
* support non init namespaces!!
*/
if (current_user_ns() != &init_user_ns)
return -ECONNREFUSED;
switch (msg_type) {
case AUDIT_LIST:
case AUDIT_ADD:
case AUDIT_DEL:
return -EOPNOTSUPP;
case AUDIT_GET:
case AUDIT_SET:
case AUDIT_GET_FEATURE:
case AUDIT_SET_FEATURE:
case AUDIT_LIST_RULES:
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
case AUDIT_SIGNAL_INFO:
case AUDIT_TTY_GET:
case AUDIT_TTY_SET:
case AUDIT_TRIM:
case AUDIT_MAKE_EQUIV:
/* Only support auditd and auditctl in initial pid namespace
* for now. */
if (task_active_pid_ns(current) != &init_pid_ns)
return -EPERM;
if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
err = -EPERM;
break;
case AUDIT_USER:
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
if (!netlink_capable(skb, CAP_AUDIT_WRITE))
err = -EPERM;
break;
default: /* bad msg */
err = -EINVAL;
}
return err;
}
static void audit_log_common_recv_msg(struct audit_context *context,
struct audit_buffer **ab, u16 msg_type)
{
uid_t uid = from_kuid(&init_user_ns, current_uid());
pid_t pid = task_tgid_nr(current);
if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
*ab = NULL;
return;
}
*ab = audit_log_start(context, GFP_KERNEL, msg_type);
if (unlikely(!*ab))
return;
audit_log_format(*ab, "pid=%d uid=%u ", pid, uid);
audit_log_session_info(*ab);
audit_log_task_context(*ab);
}
static inline void audit_log_user_recv_msg(struct audit_buffer **ab,
u16 msg_type)
{
audit_log_common_recv_msg(NULL, ab, msg_type);
}
static int is_audit_feature_set(int i)
{
return af.features & AUDIT_FEATURE_TO_MASK(i);
}
static int audit_get_feature(struct sk_buff *skb)
{
u32 seq;
seq = nlmsg_hdr(skb)->nlmsg_seq;
audit_send_reply(skb, seq, AUDIT_GET_FEATURE, 0, 0, &af, sizeof(af));
return 0;
}
static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature,
u32 old_lock, u32 new_lock, int res)
{
struct audit_buffer *ab;
if (audit_enabled == AUDIT_OFF)
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_FEATURE_CHANGE);
if (!ab)
return;
audit_log_task_info(ab);
audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
audit_feature_names[which], !!old_feature, !!new_feature,
!!old_lock, !!new_lock, res);
audit_log_end(ab);
}
static int audit_set_feature(struct audit_features *uaf)
{
int i;
BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
/* if there is ever a version 2 we should handle that here */
for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
u32 feature = AUDIT_FEATURE_TO_MASK(i);
u32 old_feature, new_feature, old_lock, new_lock;
/* if we are not changing this feature, move along */
if (!(feature & uaf->mask))
continue;
old_feature = af.features & feature;
new_feature = uaf->features & feature;
new_lock = (uaf->lock | af.lock) & feature;
old_lock = af.lock & feature;
/* are we changing a locked feature? */
if (old_lock && (new_feature != old_feature)) {
audit_log_feature_change(i, old_feature, new_feature,
old_lock, new_lock, 0);
return -EPERM;
}
}
/* nothing invalid, do the changes */
for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
u32 feature = AUDIT_FEATURE_TO_MASK(i);
u32 old_feature, new_feature, old_lock, new_lock;
/* if we are not changing this feature, move along */
if (!(feature & uaf->mask))
continue;
old_feature = af.features & feature;
new_feature = uaf->features & feature;
old_lock = af.lock & feature;
new_lock = (uaf->lock | af.lock) & feature;
if (new_feature != old_feature)
audit_log_feature_change(i, old_feature, new_feature,
old_lock, new_lock, 1);
if (new_feature)
af.features |= feature;
else
af.features &= ~feature;
af.lock |= new_lock;
}
return 0;
}
static int audit_replace(struct pid *pid)
{
pid_t pvnr;
struct sk_buff *skb;
pvnr = pid_vnr(pid);
skb = audit_make_reply(0, AUDIT_REPLACE, 0, 0, &pvnr, sizeof(pvnr));
if (!skb)
return -ENOMEM;
return auditd_send_unicast_skb(skb);
}
static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
bool *ack)
{
u32 seq;
void *data;
int data_len;
int err;
struct audit_buffer *ab;
u16 msg_type = nlh->nlmsg_type;
struct audit_sig_info *sig_data;
struct lsm_context lsmctx = { NULL, 0, 0 };
err = audit_netlink_ok(skb, msg_type);
if (err)
return err;
seq = nlh->nlmsg_seq;
data = nlmsg_data(nlh);
data_len = nlmsg_len(nlh);
switch (msg_type) {
case AUDIT_GET: {
struct audit_status s;
memset(&s, 0, sizeof(s));
s.enabled = audit_enabled;
s.failure = audit_failure;
/* NOTE: use pid_vnr() so the PID is relative to the current
* namespace */
s.pid = auditd_pid_vnr();
s.rate_limit = audit_rate_limit;
s.backlog_limit = audit_backlog_limit;
s.lost = atomic_read(&audit_lost);
s.backlog = skb_queue_len(&audit_queue);
s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
s.backlog_wait_time = audit_backlog_wait_time;
s.backlog_wait_time_actual = atomic_read(&audit_backlog_wait_time_actual);
audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_SET: {
struct audit_status s;
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
if (s.mask & AUDIT_STATUS_ENABLED) {
err = audit_set_enabled(s.enabled);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_FAILURE) {
err = audit_set_failure(s.failure);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_PID) {
/* NOTE: we are using the vnr PID functions below
* because the s.pid value is relative to the
* namespace of the caller; at present this
* doesn't matter much since you can really only
* run auditd from the initial pid namespace, but
* something to keep in mind if this changes */
pid_t new_pid = s.pid;
pid_t auditd_pid;
struct pid *req_pid = task_tgid(current);
/* Sanity check - PID values must match. Setting
* pid to 0 is how auditd ends auditing. */
if (new_pid && (new_pid != pid_vnr(req_pid)))
return -EINVAL;
/* test the auditd connection */
audit_replace(req_pid);
auditd_pid = auditd_pid_vnr();
if (auditd_pid) {
/* replacing a healthy auditd is not allowed */
if (new_pid) {
audit_log_config_change("audit_pid",
new_pid, auditd_pid, 0);
return -EEXIST;
}
/* only current auditd can unregister itself */
if (pid_vnr(req_pid) != auditd_pid) {
audit_log_config_change("audit_pid",
new_pid, auditd_pid, 0);
return -EACCES;
}
}
if (new_pid) {
/* register a new auditd connection */
err = auditd_set(req_pid,
NETLINK_CB(skb).portid,
sock_net(NETLINK_CB(skb).sk),
skb, ack);
if (audit_enabled != AUDIT_OFF)
audit_log_config_change("audit_pid",
new_pid,
auditd_pid,
err ? 0 : 1);
if (err)
return err;
/* try to process any backlog */
wake_up_interruptible(&kauditd_wait);
} else {
if (audit_enabled != AUDIT_OFF)
audit_log_config_change("audit_pid",
new_pid,
auditd_pid, 1);
/* unregister the auditd connection */
auditd_reset(NULL);
}
}
if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
err = audit_set_rate_limit(s.rate_limit);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_BACKLOG_LIMIT) {
err = audit_set_backlog_limit(s.backlog_limit);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) {
if (sizeof(s) > (size_t)nlh->nlmsg_len)
return -EINVAL;
if (s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME)
return -EINVAL;
err = audit_set_backlog_wait_time(s.backlog_wait_time);
if (err < 0)
return err;
}
if (s.mask == AUDIT_STATUS_LOST) {
u32 lost = atomic_xchg(&audit_lost, 0);
audit_log_config_change("lost", 0, lost, 1);
return lost;
}
if (s.mask == AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL) {
u32 actual = atomic_xchg(&audit_backlog_wait_time_actual, 0);
audit_log_config_change("backlog_wait_time_actual", 0, actual, 1);
return actual;
}
break;
}
case AUDIT_GET_FEATURE:
err = audit_get_feature(skb);
if (err)
return err;
break;
case AUDIT_SET_FEATURE:
if (data_len < sizeof(struct audit_features))
return -EINVAL;
err = audit_set_feature(data);
if (err)
return err;
break;
case AUDIT_USER:
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
if (!audit_enabled && msg_type != AUDIT_USER_AVC)
return 0;
/* exit early if there isn't at least one character to print */
if (data_len < 2)
return -EINVAL;
err = audit_filter(msg_type, AUDIT_FILTER_USER);
if (err == 1) { /* match or error */
char *str = data;
err = 0;
if (msg_type == AUDIT_USER_TTY) {
err = tty_audit_push();
if (err)
break;
}
audit_log_user_recv_msg(&ab, msg_type);
if (msg_type != AUDIT_USER_TTY) {
/* ensure NULL termination */
str[data_len - 1] = '\0';
audit_log_format(ab, " msg='%.*s'",
AUDIT_MESSAGE_TEXT_MAX,
str);
} else {
audit_log_format(ab, " data=");
if (str[data_len - 1] == '\0')
data_len--;
audit_log_n_untrustedstring(ab, str, data_len);
}
audit_log_end(ab);
}
break;
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
if (data_len < sizeof(struct audit_rule_data))
return -EINVAL;
if (audit_enabled == AUDIT_LOCKED) {
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=%s audit_enabled=%d res=0",
msg_type == AUDIT_ADD_RULE ?
"add_rule" : "remove_rule",
audit_enabled);
audit_log_end(ab);
return -EPERM;
}
err = audit_rule_change(msg_type, seq, data, data_len);
break;
case AUDIT_LIST_RULES:
err = audit_list_rules_send(skb, seq);
break;
case AUDIT_TRIM:
audit_trim_trees();
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=trim res=1");
audit_log_end(ab);
break;
case AUDIT_MAKE_EQUIV: {
void *bufp = data;
u32 sizes[2];
size_t msglen = data_len;
char *old, *new;
err = -EINVAL;
if (msglen < 2 * sizeof(u32))
break;
memcpy(sizes, bufp, 2 * sizeof(u32));
bufp += 2 * sizeof(u32);
msglen -= 2 * sizeof(u32);
old = audit_unpack_string(&bufp, &msglen, sizes[0]);
if (IS_ERR(old)) {
err = PTR_ERR(old);
break;
}
new = audit_unpack_string(&bufp, &msglen, sizes[1]);
if (IS_ERR(new)) {
err = PTR_ERR(new);
kfree(old);
break;
}
/* OK, here comes... */
err = audit_tag_tree(old, new);
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=make_equiv old=");
audit_log_untrustedstring(ab, old);
audit_log_format(ab, " new=");
audit_log_untrustedstring(ab, new);
audit_log_format(ab, " res=%d", !err);
audit_log_end(ab);
kfree(old);
kfree(new);
break;
}
case AUDIT_SIGNAL_INFO:
if (lsmprop_is_set(&audit_sig_lsm)) {
err = security_lsmprop_to_secctx(&audit_sig_lsm,
&lsmctx, LSM_ID_UNDEF);
if (err < 0)
return err;
}
sig_data = kmalloc(struct_size(sig_data, ctx, lsmctx.len),
GFP_KERNEL);
if (!sig_data) {
if (lsmprop_is_set(&audit_sig_lsm))
security_release_secctx(&lsmctx);
return -ENOMEM;
}
sig_data->uid = from_kuid(&init_user_ns, audit_sig_uid);
sig_data->pid = audit_sig_pid;
if (lsmprop_is_set(&audit_sig_lsm)) {
memcpy(sig_data->ctx, lsmctx.context, lsmctx.len);
security_release_secctx(&lsmctx);
}
audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0,
sig_data, struct_size(sig_data, ctx,
lsmctx.len));
kfree(sig_data);
break;
case AUDIT_TTY_GET: {
struct audit_tty_status s;
unsigned int t;
t = READ_ONCE(current->signal->audit_tty);
s.enabled = t & AUDIT_TTY_ENABLE;
s.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_TTY_SET: {
struct audit_tty_status s, old;
struct audit_buffer *ab;
unsigned int t;
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
/* check if new data is valid */
if ((s.enabled != 0 && s.enabled != 1) ||
(s.log_passwd != 0 && s.log_passwd != 1))
err = -EINVAL;
if (err)
t = READ_ONCE(current->signal->audit_tty);
else {
t = s.enabled | (-s.log_passwd & AUDIT_TTY_LOG_PASSWD);
t = xchg(¤t->signal->audit_tty, t);
}
old.enabled = t & AUDIT_TTY_ENABLE;
old.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=tty_set old-enabled=%d new-enabled=%d"
" old-log_passwd=%d new-log_passwd=%d res=%d",
old.enabled, s.enabled, old.log_passwd,
s.log_passwd, !err);
audit_log_end(ab);
break;
}
default:
err = -EINVAL;
break;
}
return err < 0 ? err : 0;
}
/**
* audit_receive - receive messages from a netlink control socket
* @skb: the message buffer
*
* Parse the provided skb and deal with any messages that may be present,
* malformed skbs are discarded.
*/
static void audit_receive(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
bool ack;
/*
* len MUST be signed for nlmsg_next to be able to dec it below 0
* if the nlmsg_len was not aligned
*/
int len;
int err;
nlh = nlmsg_hdr(skb);
len = skb->len;
audit_ctl_lock();
while (nlmsg_ok(nlh, len)) {
ack = nlh->nlmsg_flags & NLM_F_ACK;
err = audit_receive_msg(skb, nlh, &ack);
/* send an ack if the user asked for one and audit_receive_msg
* didn't already do it, or if there was an error. */
if (ack || err)
netlink_ack(skb, nlh, err, NULL);
nlh = nlmsg_next(nlh, &len);
}
audit_ctl_unlock();
/* can't block with the ctrl lock, so penalize the sender now */
if (audit_backlog_limit &&
(skb_queue_len(&audit_queue) > audit_backlog_limit)) {
DECLARE_WAITQUEUE(wait, current);
/* wake kauditd to try and flush the queue */
wake_up_interruptible(&kauditd_wait);
add_wait_queue_exclusive(&audit_backlog_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(audit_backlog_wait_time);
remove_wait_queue(&audit_backlog_wait, &wait);
}
}
/* Log information about who is connecting to the audit multicast socket */
static void audit_log_multicast(int group, const char *op, int err)
{
const struct cred *cred;
struct tty_struct *tty;
char comm[sizeof(current->comm)];
struct audit_buffer *ab;
if (!audit_enabled)
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_EVENT_LISTENER);
if (!ab)
return;
cred = current_cred();
tty = audit_get_tty();
audit_log_format(ab, "pid=%u uid=%u auid=%u tty=%s ses=%u",
task_tgid_nr(current),
from_kuid(&init_user_ns, cred->uid),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
tty ? tty_name(tty) : "(none)",
audit_get_sessionid(current));
audit_put_tty(tty);
audit_log_task_context(ab); /* subj= */
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_d_path_exe(ab, current->mm); /* exe= */
audit_log_format(ab, " nl-mcgrp=%d op=%s res=%d", group, op, !err);
audit_log_end(ab);
}
/* Run custom bind function on netlink socket group connect or bind requests. */
static int audit_multicast_bind(struct net *net, int group)
{
int err = 0;
if (!capable(CAP_AUDIT_READ))
err = -EPERM;
audit_log_multicast(group, "connect", err);
return err;
}
static void audit_multicast_unbind(struct net *net, int group)
{
audit_log_multicast(group, "disconnect", 0);
}
static int __net_init audit_net_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = audit_receive,
.bind = audit_multicast_bind,
.unbind = audit_multicast_unbind,
.flags = NL_CFG_F_NONROOT_RECV,
.groups = AUDIT_NLGRP_MAX,
};
struct audit_net *aunet = net_generic(net, audit_net_id);
aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
if (aunet->sk == NULL) {
audit_panic("cannot initialize netlink socket in namespace");
return -ENOMEM;
}
/* limit the timeout in case auditd is blocked/stopped */
aunet->sk->sk_sndtimeo = HZ / 10; return 0;}
static void __net_exit audit_net_exit(struct net *net)
{
struct audit_net *aunet = net_generic(net, audit_net_id);
/* NOTE: you would think that we would want to check the auditd
* connection and potentially reset it here if it lives in this
* namespace, but since the auditd connection tracking struct holds a
* reference to this namespace (see auditd_set()) we are only ever
* going to get here after that connection has been released */
netlink_kernel_release(aunet->sk);
}
static struct pernet_operations audit_net_ops __net_initdata = {
.init = audit_net_init,
.exit = audit_net_exit,
.id = &audit_net_id,
.size = sizeof(struct audit_net),
};
/* Initialize audit support at boot time. */
static int __init audit_init(void)
{
int i;
if (audit_initialized == AUDIT_DISABLED)
return 0;
audit_buffer_cache = KMEM_CACHE(audit_buffer, SLAB_PANIC);
skb_queue_head_init(&audit_queue);
skb_queue_head_init(&audit_retry_queue);
skb_queue_head_init(&audit_hold_queue);
for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
INIT_LIST_HEAD(&audit_inode_hash[i]);
mutex_init(&audit_cmd_mutex.lock);
audit_cmd_mutex.owner = NULL;
pr_info("initializing netlink subsys (%s)\n",
str_enabled_disabled(audit_default));
register_pernet_subsys(&audit_net_ops);
audit_initialized = AUDIT_INITIALIZED;
kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
if (IS_ERR(kauditd_task)) {
int err = PTR_ERR(kauditd_task);
panic("audit: failed to start the kauditd thread (%d)\n", err);
}
audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL,
"state=initialized audit_enabled=%u res=1",
audit_enabled);
return 0;
}
postcore_initcall(audit_init);
/*
* Process kernel command-line parameter at boot time.
* audit={0|off} or audit={1|on}.
*/
static int __init audit_enable(char *str)
{
if (!strcasecmp(str, "off") || !strcmp(str, "0"))
audit_default = AUDIT_OFF;
else if (!strcasecmp(str, "on") || !strcmp(str, "1"))
audit_default = AUDIT_ON;
else {
pr_err("audit: invalid 'audit' parameter value (%s)\n", str);
audit_default = AUDIT_ON;
}
if (audit_default == AUDIT_OFF)
audit_initialized = AUDIT_DISABLED;
if (audit_set_enabled(audit_default))
pr_err("audit: error setting audit state (%d)\n",
audit_default);
pr_info("%s\n", audit_default ?
"enabled (after initialization)" : "disabled (until reboot)");
return 1;
}
__setup("audit=", audit_enable);
/* Process kernel command-line parameter at boot time.
* audit_backlog_limit=<n> */
static int __init audit_backlog_limit_set(char *str)
{
u32 audit_backlog_limit_arg;
pr_info("audit_backlog_limit: ");
if (kstrtouint(str, 0, &audit_backlog_limit_arg)) {
pr_cont("using default of %u, unable to parse %s\n",
audit_backlog_limit, str);
return 1;
}
audit_backlog_limit = audit_backlog_limit_arg;
pr_cont("%d\n", audit_backlog_limit);
return 1;
}
__setup("audit_backlog_limit=", audit_backlog_limit_set);
static void audit_buffer_free(struct audit_buffer *ab)
{
struct sk_buff *skb;
if (!ab)
return;
while ((skb = skb_dequeue(&ab->skb_list)))
kfree_skb(skb);
kmem_cache_free(audit_buffer_cache, ab);
}
static struct audit_buffer *audit_buffer_alloc(struct audit_context *ctx,
gfp_t gfp_mask, int type)
{
struct audit_buffer *ab;
ab = kmem_cache_alloc(audit_buffer_cache, gfp_mask);
if (!ab)
return NULL;
skb_queue_head_init(&ab->skb_list);
ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask);
if (!ab->skb)
goto err;
skb_queue_tail(&ab->skb_list, ab->skb);
if (!nlmsg_put(ab->skb, 0, 0, type, 0, 0))
goto err;
ab->ctx = ctx;
ab->gfp_mask = gfp_mask;
return ab;
err:
audit_buffer_free(ab);
return NULL;
}
/**
* audit_serial - compute a serial number for the audit record
*
* Compute a serial number for the audit record. Audit records are
* written to user-space as soon as they are generated, so a complete
* audit record may be written in several pieces. The timestamp of the
* record and this serial number are used by the user-space tools to
* determine which pieces belong to the same audit record. The
* (timestamp,serial) tuple is unique for each syscall and is live from
* syscall entry to syscall exit.
*
* NOTE: Another possibility is to store the formatted records off the
* audit context (for those records that have a context), and emit them
* all at syscall exit. However, this could delay the reporting of
* significant errors until syscall exit (or never, if the system
* halts).
*/
unsigned int audit_serial(void)
{
static atomic_t serial = ATOMIC_INIT(0);
return atomic_inc_return(&serial);
}
static inline void audit_get_stamp(struct audit_context *ctx,
struct audit_stamp *stamp)
{
if (!ctx || !auditsc_get_stamp(ctx, stamp)) {
ktime_get_coarse_real_ts64(&stamp->ctime);
stamp->serial = audit_serial();
}
}
/**
* audit_log_start - obtain an audit buffer
* @ctx: audit_context (may be NULL)
* @gfp_mask: type of allocation
* @type: audit message type
*
* Returns audit_buffer pointer on success or NULL on error.
*
* Obtain an audit buffer. This routine does locking to obtain the
* audit buffer, but then no locking is required for calls to
* audit_log_*format. If the task (ctx) is a task that is currently in a
* syscall, then the syscall is marked as auditable and an audit record
* will be written at syscall exit. If there is no associated task, then
* task context (ctx) should be NULL.
*/
struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
int type)
{
struct audit_buffer *ab;
if (audit_initialized != AUDIT_INITIALIZED)
return NULL;
if (unlikely(!audit_filter(type, AUDIT_FILTER_EXCLUDE)))
return NULL;
/* NOTE: don't ever fail/sleep on these two conditions:
* 1. auditd generated record - since we need auditd to drain the
* queue; also, when we are checking for auditd, compare PIDs using
* task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
* using a PID anchored in the caller's namespace
* 2. generator holding the audit_cmd_mutex - we don't want to block
* while holding the mutex, although we do penalize the sender
* later in audit_receive() when it is safe to block
*/
if (!(auditd_test_task(current) || audit_ctl_owner_current())) {
long stime = audit_backlog_wait_time;
while (audit_backlog_limit &&
(skb_queue_len(&audit_queue) > audit_backlog_limit)) {
/* wake kauditd to try and flush the queue */
wake_up_interruptible(&kauditd_wait);
/* sleep if we are allowed and we haven't exhausted our
* backlog wait limit */
if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
long rtime = stime;
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&audit_backlog_wait,
&wait);
set_current_state(TASK_UNINTERRUPTIBLE);
stime = schedule_timeout(rtime);
atomic_add(rtime - stime, &audit_backlog_wait_time_actual);
remove_wait_queue(&audit_backlog_wait, &wait);
} else {
if (audit_rate_check() && printk_ratelimit())
pr_warn("audit_backlog=%d > audit_backlog_limit=%d\n",
skb_queue_len(&audit_queue),
audit_backlog_limit);
audit_log_lost("backlog limit exceeded");
return NULL;
}
}
}
ab = audit_buffer_alloc(ctx, gfp_mask, type);
if (!ab) {
audit_log_lost("out of memory in audit_log_start");
return NULL;
}
audit_get_stamp(ab->ctx, &ab->stamp);
/* cancel dummy context to enable supporting records */
if (ctx)
ctx->dummy = 0;
audit_log_format(ab, "audit(%llu.%03lu:%u): ",
(unsigned long long)ab->stamp.ctime.tv_sec,
ab->stamp.ctime.tv_nsec/1000000,
ab->stamp.serial);
return ab;
}
/**
* audit_expand - expand skb in the audit buffer
* @ab: audit_buffer
* @extra: space to add at tail of the skb
*
* Returns 0 (no space) on failed expansion, or available space if
* successful.
*/
static inline int audit_expand(struct audit_buffer *ab, int extra)
{
struct sk_buff *skb = ab->skb;
int oldtail = skb_tailroom(skb);
int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask);
int newtail = skb_tailroom(skb);
if (ret < 0) {
audit_log_lost("out of memory in audit_expand");
return 0;
}
skb->truesize += newtail - oldtail;
return newtail;
}
/*
* Format an audit message into the audit buffer. If there isn't enough
* room in the audit buffer, more room will be allocated and vsnprint
* will be called a second time. Currently, we assume that a printk
* can't format message larger than 1024 bytes, so we don't either.
*/
static __printf(2, 0)
void audit_log_vformat(struct audit_buffer *ab, const char *fmt, va_list args)
{
int len, avail;
struct sk_buff *skb;
va_list args2;
if (!ab)
return;
BUG_ON(!ab->skb);
skb = ab->skb;
avail = skb_tailroom(skb);
if (avail == 0) {
avail = audit_expand(ab, AUDIT_BUFSIZ);
if (!avail)
goto out;
}
va_copy(args2, args);
len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args);
if (len >= avail) {
/* The printk buffer is 1024 bytes long, so if we get
* here and AUDIT_BUFSIZ is at least 1024, then we can
* log everything that printk could have logged. */
avail = audit_expand(ab,
max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
if (!avail)
goto out_va_end;
len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
}
if (len > 0)
skb_put(skb, len);
out_va_end:
va_end(args2);
out:
return;
}
/**
* audit_log_format - format a message into the audit buffer.
* @ab: audit_buffer
* @fmt: format string
* @...: optional parameters matching @fmt string
*
* All the work is done in audit_log_vformat.
*/
void audit_log_format(struct audit_buffer *ab, const char *fmt, ...)
{
va_list args;
if (!ab)
return;
va_start(args, fmt);
audit_log_vformat(ab, fmt, args);
va_end(args);
}
/**
* audit_log_n_hex - convert a buffer to hex and append it to the audit skb
* @ab: the audit_buffer
* @buf: buffer to convert to hex
* @len: length of @buf to be converted
*
* No return value; failure to expand is silently ignored.
*
* This function will take the passed buf and convert it into a string of
* ascii hex digits. The new string is placed onto the skb.
*/
void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
size_t len)
{
int i, avail, new_len;
unsigned char *ptr;
struct sk_buff *skb;
if (!ab)
return;
BUG_ON(!ab->skb);
skb = ab->skb;
avail = skb_tailroom(skb);
new_len = len<<1;
if (new_len >= avail) {
/* Round the buffer request up to the next multiple */
new_len = AUDIT_BUFSIZ*(((new_len-avail)/AUDIT_BUFSIZ) + 1);
avail = audit_expand(ab, new_len);
if (!avail)
return;
}
ptr = skb_tail_pointer(skb);
for (i = 0; i < len; i++)
ptr = hex_byte_pack_upper(ptr, buf[i]);
*ptr = 0;
skb_put(skb, len << 1); /* new string is twice the old string */
}
/*
* Format a string of no more than slen characters into the audit buffer,
* enclosed in quote marks.
*/
void audit_log_n_string(struct audit_buffer *ab, const char *string,
size_t slen)
{
int avail, new_len;
unsigned char *ptr;
struct sk_buff *skb;
if (!ab)
return;
BUG_ON(!ab->skb);
skb = ab->skb;
avail = skb_tailroom(skb);
new_len = slen + 3; /* enclosing quotes + null terminator */
if (new_len > avail) {
avail = audit_expand(ab, new_len);
if (!avail)
return;
}
ptr = skb_tail_pointer(skb);
*ptr++ = '"';
memcpy(ptr, string, slen);
ptr += slen;
*ptr++ = '"';
*ptr = 0;
skb_put(skb, slen + 2); /* don't include null terminator */
}
/**
* audit_string_contains_control - does a string need to be logged in hex
* @string: string to be checked
* @len: max length of the string to check
*/
bool audit_string_contains_control(const char *string, size_t len)
{
const unsigned char *p;
for (p = string; p < (const unsigned char *)string + len; p++) {
if (*p == '"' || *p < 0x21 || *p > 0x7e)
return true;
}
return false;
}
/**
* audit_log_n_untrustedstring - log a string that may contain random characters
* @ab: audit_buffer
* @string: string to be logged
* @len: length of string (not including trailing null)
*
* This code will escape a string that is passed to it if the string
* contains a control character, unprintable character, double quote mark,
* or a space. Unescaped strings will start and end with a double quote mark.
* Strings that are escaped are printed in hex (2 digits per char).
*
* The caller specifies the number of characters in the string to log, which may
* or may not be the entire string.
*/
void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string,
size_t len)
{
if (audit_string_contains_control(string, len))
audit_log_n_hex(ab, string, len);
else
audit_log_n_string(ab, string, len);
}
/**
* audit_log_untrustedstring - log a string that may contain random characters
* @ab: audit_buffer
* @string: string to be logged
*
* Same as audit_log_n_untrustedstring(), except that strlen is used to
* determine string length.
*/
void audit_log_untrustedstring(struct audit_buffer *ab, const char *string)
{
audit_log_n_untrustedstring(ab, string, strlen(string));
}
/* This is a helper-function to print the escaped d_path */
void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
const struct path *path)
{
char *p, *pathname;
if (prefix)
audit_log_format(ab, "%s", prefix);
/* We will allow 11 spaces for ' (deleted)' to be appended */
pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
if (!pathname) {
audit_log_format(ab, "\"<no_memory>\"");
return;
}
p = d_path(path, pathname, PATH_MAX+11);
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
audit_log_format(ab, "\"<too_long>\"");
} else
audit_log_untrustedstring(ab, p);
kfree(pathname);
}
void audit_log_session_info(struct audit_buffer *ab)
{
unsigned int sessionid = audit_get_sessionid(current);
uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
audit_log_format(ab, "auid=%u ses=%u", auid, sessionid);
}
void audit_log_key(struct audit_buffer *ab, char *key)
{
audit_log_format(ab, " key=");
if (key)
audit_log_untrustedstring(ab, key);
else
audit_log_format(ab, "(null)");
}
/**
* audit_buffer_aux_new - Add an aux record buffer to the skb list
* @ab: audit_buffer
* @type: message type
*
* Aux records are allocated and added to the skb list of
* the "main" record. The ab->skb is reset to point to the
* aux record on its creation. When the aux record in complete
* ab->skb has to be reset to point to the "main" record.
* This allows the audit_log_ functions to be ignorant of
* which kind of record it is logging to. It also avoids adding
* special data for aux records.
*
* On success ab->skb will point to the new aux record.
* Returns 0 on success, -ENOMEM should allocation fail.
*/
static int audit_buffer_aux_new(struct audit_buffer *ab, int type)
{
WARN_ON(ab->skb != skb_peek(&ab->skb_list));
ab->skb = nlmsg_new(AUDIT_BUFSIZ, ab->gfp_mask);
if (!ab->skb)
goto err;
if (!nlmsg_put(ab->skb, 0, 0, type, 0, 0))
goto err;
skb_queue_tail(&ab->skb_list, ab->skb);
audit_log_format(ab, "audit(%llu.%03lu:%u): ",
(unsigned long long)ab->stamp.ctime.tv_sec,
ab->stamp.ctime.tv_nsec/1000000,
ab->stamp.serial);
return 0;
err:
kfree_skb(ab->skb);
ab->skb = skb_peek(&ab->skb_list);
return -ENOMEM;
}
/**
* audit_buffer_aux_end - Switch back to the "main" record from an aux record
* @ab: audit_buffer
*
* Restores the "main" audit record to ab->skb.
*/
static void audit_buffer_aux_end(struct audit_buffer *ab)
{
ab->skb = skb_peek(&ab->skb_list);
}
/**
* audit_log_subj_ctx - Add LSM subject information
* @ab: audit_buffer
* @prop: LSM subject properties.
*
* Add a subj= field and, if necessary, a AUDIT_MAC_TASK_CONTEXTS record.
*/
int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop)
{
struct lsm_context ctx;
char *space = "";
int error;
int i;
security_current_getlsmprop_subj(prop);
if (!lsmprop_is_set(prop))
return 0;
if (audit_subj_secctx_cnt < 2) {
error = security_lsmprop_to_secctx(prop, &ctx, LSM_ID_UNDEF);
if (error < 0) {
if (error != -EINVAL)
goto error_path;
return 0;
}
audit_log_format(ab, " subj=%s", ctx.context);
security_release_secctx(&ctx);
return 0;
}
/* Multiple LSMs provide contexts. Include an aux record. */
audit_log_format(ab, " subj=?");
error = audit_buffer_aux_new(ab, AUDIT_MAC_TASK_CONTEXTS);
if (error)
goto error_path;
for (i = 0; i < audit_subj_secctx_cnt; i++) {
error = security_lsmprop_to_secctx(prop, &ctx,
audit_subj_lsms[i]->id);
if (error < 0) {
/*
* Don't print anything. An LSM like BPF could
* claim to support contexts, but only do so under
* certain conditions.
*/
if (error == -EOPNOTSUPP)
continue;
if (error != -EINVAL)
audit_panic("error in audit_log_subj_ctx");
} else {
audit_log_format(ab, "%ssubj_%s=%s", space,
audit_subj_lsms[i]->name, ctx.context);
space = " ";
security_release_secctx(&ctx);
}
}
audit_buffer_aux_end(ab);
return 0;
error_path:
audit_panic("error in audit_log_subj_ctx");
return error;
}
EXPORT_SYMBOL(audit_log_subj_ctx);
int audit_log_task_context(struct audit_buffer *ab)
{
struct lsm_prop prop;
security_current_getlsmprop_subj(&prop);
return audit_log_subj_ctx(ab, &prop);
}
EXPORT_SYMBOL(audit_log_task_context);
int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop)
{
int i;
int rc;
int error = 0;
char *space = "";
struct lsm_context ctx;
if (audit_obj_secctx_cnt < 2) {
error = security_lsmprop_to_secctx(prop, &ctx, LSM_ID_UNDEF);
if (error < 0) {
if (error != -EINVAL)
goto error_path;
return error;
}
audit_log_format(ab, " obj=%s", ctx.context);
security_release_secctx(&ctx);
return 0;
}
audit_log_format(ab, " obj=?");
error = audit_buffer_aux_new(ab, AUDIT_MAC_OBJ_CONTEXTS);
if (error)
goto error_path;
for (i = 0; i < audit_obj_secctx_cnt; i++) {
rc = security_lsmprop_to_secctx(prop, &ctx,
audit_obj_lsms[i]->id);
if (rc < 0) {
audit_log_format(ab, "%sobj_%s=?", space,
audit_obj_lsms[i]->name);
if (rc != -EINVAL)
audit_panic("error in audit_log_obj_ctx");
error = rc;
} else {
audit_log_format(ab, "%sobj_%s=%s", space,
audit_obj_lsms[i]->name, ctx.context);
security_release_secctx(&ctx);
}
space = " ";
}
audit_buffer_aux_end(ab);
return error;
error_path:
audit_panic("error in audit_log_obj_ctx");
return error;
}
void audit_log_d_path_exe(struct audit_buffer *ab,
struct mm_struct *mm)
{
struct file *exe_file;
if (!mm)
goto out_null;
exe_file = get_mm_exe_file(mm);
if (!exe_file)
goto out_null;
audit_log_d_path(ab, " exe=", &exe_file->f_path);
fput(exe_file);
return;
out_null:
audit_log_format(ab, " exe=(null)");
}
struct tty_struct *audit_get_tty(void)
{
struct tty_struct *tty = NULL;
unsigned long flags;
spin_lock_irqsave(¤t->sighand->siglock, flags);
if (current->signal)
tty = tty_kref_get(current->signal->tty);
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return tty;
}
void audit_put_tty(struct tty_struct *tty)
{
tty_kref_put(tty);
}
void audit_log_task_info(struct audit_buffer *ab)
{
const struct cred *cred;
char comm[sizeof(current->comm)];
struct tty_struct *tty;
if (!ab)
return;
cred = current_cred();
tty = audit_get_tty();
audit_log_format(ab,
" ppid=%d pid=%d auid=%u uid=%u gid=%u"
" euid=%u suid=%u fsuid=%u"
" egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
task_ppid_nr(current),
task_tgid_nr(current),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
from_kuid(&init_user_ns, cred->uid),
from_kgid(&init_user_ns, cred->gid),
from_kuid(&init_user_ns, cred->euid),
from_kuid(&init_user_ns, cred->suid),
from_kuid(&init_user_ns, cred->fsuid),
from_kgid(&init_user_ns, cred->egid),
from_kgid(&init_user_ns, cred->sgid),
from_kgid(&init_user_ns, cred->fsgid),
tty ? tty_name(tty) : "(none)",
audit_get_sessionid(current));
audit_put_tty(tty);
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_d_path_exe(ab, current->mm);
audit_log_task_context(ab);
}
EXPORT_SYMBOL(audit_log_task_info);
/**
* audit_log_path_denied - report a path restriction denial
* @type: audit message type (AUDIT_ANOM_LINK, AUDIT_ANOM_CREAT, etc)
* @operation: specific operation name
*/
void audit_log_path_denied(int type, const char *operation)
{
struct audit_buffer *ab;
if (!audit_enabled)
return;
/* Generate log with subject, operation, outcome. */
ab = audit_log_start(audit_context(), GFP_KERNEL, type);
if (!ab)
return;
audit_log_format(ab, "op=%s", operation);
audit_log_task_info(ab);
audit_log_format(ab, " res=0");
audit_log_end(ab);
}
/* global counter which is incremented every time something logs in */
static atomic_t session_id = ATOMIC_INIT(0);
static int audit_set_loginuid_perm(kuid_t loginuid)
{
/* if we are unset, we don't need privs */
if (!audit_loginuid_set(current))
return 0;
/* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/
if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE))
return -EPERM;
/* it is set, you need permission */
if (!capable(CAP_AUDIT_CONTROL))
return -EPERM;
/* reject if this is not an unset and we don't allow that */
if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID)
&& uid_valid(loginuid))
return -EPERM;
return 0;
}
static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
unsigned int oldsessionid,
unsigned int sessionid, int rc)
{
struct audit_buffer *ab;
uid_t uid, oldloginuid, loginuid;
struct tty_struct *tty;
if (!audit_enabled)
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_LOGIN);
if (!ab)
return;
uid = from_kuid(&init_user_ns, task_uid(current));
oldloginuid = from_kuid(&init_user_ns, koldloginuid);
loginuid = from_kuid(&init_user_ns, kloginuid);
tty = audit_get_tty();
audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
audit_log_task_context(ab);
audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
oldloginuid, loginuid, tty ? tty_name(tty) : "(none)",
oldsessionid, sessionid, !rc);
audit_put_tty(tty);
audit_log_end(ab);
}
/**
* audit_set_loginuid - set current task's loginuid
* @loginuid: loginuid value
*
* Returns 0.
*
* Called (set) from fs/proc/base.c::proc_loginuid_write().
*/
int audit_set_loginuid(kuid_t loginuid)
{
unsigned int oldsessionid, sessionid = AUDIT_SID_UNSET;
kuid_t oldloginuid;
int rc;
oldloginuid = audit_get_loginuid(current);
oldsessionid = audit_get_sessionid(current);
rc = audit_set_loginuid_perm(loginuid);
if (rc)
goto out;
/* are we setting or clearing? */
if (uid_valid(loginuid)) {
sessionid = (unsigned int)atomic_inc_return(&session_id);
if (unlikely(sessionid == AUDIT_SID_UNSET))
sessionid = (unsigned int)atomic_inc_return(&session_id);
}
current->sessionid = sessionid;
current->loginuid = loginuid;
out:
audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc);
return rc;
}
/**
* audit_signal_info - record signal info for shutting down audit subsystem
* @sig: signal value
* @t: task being signaled
*
* If the audit subsystem is being terminated, record the task (pid)
* and uid that is doing that.
*/
int audit_signal_info(int sig, struct task_struct *t)
{
kuid_t uid = current_uid(), auid;
if (auditd_test_task(t) &&
(sig == SIGTERM || sig == SIGHUP ||
sig == SIGUSR1 || sig == SIGUSR2)) {
audit_sig_pid = task_tgid_nr(current);
auid = audit_get_loginuid(current);
if (uid_valid(auid))
audit_sig_uid = auid;
else
audit_sig_uid = uid;
security_current_getlsmprop_subj(&audit_sig_lsm);
}
return audit_signal_info_syscall(t);
}
/**
* __audit_log_end - enqueue one audit record
* @skb: the buffer to send
*/
static void __audit_log_end(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
if (audit_rate_check()) {
/* setup the netlink header, see the comments in
* kauditd_send_multicast_skb() for length quirks */
nlh = nlmsg_hdr(skb);
nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
/* queue the netlink packet */
skb_queue_tail(&audit_queue, skb);
} else {
audit_log_lost("rate limit exceeded");
kfree_skb(skb);
}
}
/**
* audit_log_end - end one audit record
* @ab: the audit_buffer
*
* We can not do a netlink send inside an irq context because it blocks (last
* arg, flags, is not set to MSG_DONTWAIT), so the audit buffer is placed on a
* queue and a kthread is scheduled to remove them from the queue outside the
* irq context. May be called in any context.
*/
void audit_log_end(struct audit_buffer *ab)
{
struct sk_buff *skb;
if (!ab)
return;
while ((skb = skb_dequeue(&ab->skb_list)))
__audit_log_end(skb);
/* poke the kauditd thread */
wake_up_interruptible(&kauditd_wait);
audit_buffer_free(ab);
}
/**
* audit_log - Log an audit record
* @ctx: audit context
* @gfp_mask: type of allocation
* @type: audit message type
* @fmt: format string to use
* @...: variable parameters matching the format string
*
* This is a convenience function that calls audit_log_start,
* audit_log_vformat, and audit_log_end. It may be called
* in any context.
*/
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{
struct audit_buffer *ab;
va_list args;
ab = audit_log_start(ctx, gfp_mask, type);
if (ab) {
va_start(args, fmt);
audit_log_vformat(ab, fmt, args);
va_end(args);
audit_log_end(ab);
}
}
EXPORT_SYMBOL(audit_log_start);
EXPORT_SYMBOL(audit_log_end);
EXPORT_SYMBOL(audit_log_format);
EXPORT_SYMBOL(audit_log);
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* RNG: Random Number Generator algorithms under the crypto API
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _CRYPTO_RNG_H
#define _CRYPTO_RNG_H
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
struct crypto_rng;
/**
* struct rng_alg - random number generator definition
*
* @generate: The function defined by this variable obtains a
* random number. The random number generator transform
* must generate the random number out of the context
* provided with this call, plus any additional data
* if provided to the call.
* @seed: Seed or reseed the random number generator. With the
* invocation of this function call, the random number
* generator shall become ready for generation. If the
* random number generator requires a seed for setting
* up a new state, the seed must be provided by the
* consumer while invoking this function. The required
* size of the seed is defined with @seedsize .
* @set_ent: Set entropy that would otherwise be obtained from
* entropy source. Internal use only.
* @seedsize: The seed size required for a random number generator
* initialization defined with this variable. Some
* random number generators does not require a seed
* as the seeding is implemented internally without
* the need of support by the consumer. In this case,
* the seed size is set to zero.
* @base: Common crypto API algorithm data structure.
*/
struct rng_alg {
int (*generate)(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen);
int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
unsigned int len);
unsigned int seedsize;
struct crypto_alg base;
};
struct crypto_rng {
struct crypto_tfm base;
};
extern struct crypto_rng *crypto_default_rng;
int crypto_get_default_rng(void);
void crypto_put_default_rng(void);
/**
* DOC: Random number generator API
*
* The random number generator API is used with the ciphers of type
* CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
*/
/**
* crypto_alloc_rng() -- allocate RNG handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* message digest cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for a random number generator. The returned struct
* crypto_rng is the cipher handle that is required for any subsequent
* API invocation for that random number generator.
*
* For all random number generators, this call creates a new private copy of
* the random number generator that does not share a state with other
* instances. The only exception is the "krng" random number generator which
* is a kernel crypto API use case for the get_random_bytes() function of the
* /dev/random driver.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
{
return &tfm->base;
}
static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
{
return container_of(alg, struct rng_alg, base);
}
/**
* crypto_rng_alg() - obtain 'struct rng_alg' pointer from RNG handle
* @tfm: RNG handle
*
* Return: Pointer to 'struct rng_alg', derived from @tfm RNG handle
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
return __crypto_rng_alg(crypto_rng_tfm(tfm)->__crt_alg);
}
/**
* crypto_free_rng() - zeroize and free RNG handle
* @tfm: cipher handle to be freed
*
* If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
* @src: Input buffer holding additional data, may be NULL
* @slen: Length of additional data
* @dst: output buffer holding the random numbers
* @dlen: length of the output buffer
*
* This function fills the caller-allocated buffer with random
* numbers using the random number generator referenced by the
* cipher handle.
*
* Return: 0 function was successful; < 0 if an error occurred
*/
static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
* crypto_rng_get_bytes() - get random number
* @tfm: cipher handle
* @rdata: output buffer holding the random numbers
* @dlen: length of the output buffer
*
* This function fills the caller-allocated buffer with random numbers using the
* random number generator referenced by the cipher handle.
*
* Return: 0 function was successful; < 0 if an error occurred
*/
static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
u8 *rdata, unsigned int dlen)
{
return crypto_rng_generate(tfm, NULL, 0, rdata, dlen);
}
/**
* crypto_rng_reset() - re-initialize the RNG
* @tfm: cipher handle
* @seed: seed input data
* @slen: length of the seed input data
*
* The reset function completely re-initializes the random number generator
* referenced by the cipher handle by clearing the current state. The new state
* is initialized with the caller provided seed or automatically, depending
* on the random number generator type (the ANSI X9.31 RNG requires
* caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
* The seed is provided as a parameter to this function call. The provided seed
* should have the length of the seed size defined for the random number
* generator as defined by crypto_rng_seedsize.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed,
unsigned int slen);
/**
* crypto_rng_seedsize() - obtain seed size of RNG
* @tfm: cipher handle
*
* The function returns the seed size for the random number generator
* referenced by the cipher handle. This value may be zero if the random
* number generator does not implement or require a reseeding. For example,
* the SP800-90A DRBGs implement an automated reseeding after reaching a
* pre-defined threshold.
*
* Return: seed size for the random number generator
*/
static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
{
return crypto_rng_alg(tfm)->seedsize;
}
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* Process number limiting controller for cgroups.
*
* Used to allow a cgroup hierarchy to stop any new processes from fork()ing
* after a certain limit is reached.
*
* Since it is trivial to hit the task limit without hitting any kmemcg limits
* in place, PIDs are a fundamental resource. As such, PID exhaustion must be
* preventable in the scope of a cgroup hierarchy by allowing resource limiting
* of the number of tasks in a cgroup.
*
* In order to use the `pids` controller, set the maximum number of tasks in
* pids.max (this is not available in the root cgroup for obvious reasons). The
* number of processes currently in the cgroup is given by pids.current.
* Organisational operations are not blocked by cgroup policies, so it is
* possible to have pids.current > pids.max. However, it is not possible to
* violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
* would cause a cgroup policy to be violated.
*
* To set a cgroup to have no limit, set pids.max to "max". This is the default
* for all new cgroups (N.B. that PID limits are hierarchical, so the most
* stringent limit in the hierarchy is followed).
*
* pids.current tracks all child cgroup hierarchies, so parent/pids.current is
* a superset of parent/child/pids.current.
*
* Copyright (C) 2015 Aleksa Sarai <cyphar@cyphar.com>
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cgroup.h>
#include <linux/slab.h>
#include <linux/sched/task.h>
#define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
#define PIDS_MAX_STR "max"
enum pidcg_event {
/* Fork failed in subtree because this pids_cgroup limit was hit. */
PIDCG_MAX,
/* Fork failed in this pids_cgroup because ancestor limit was hit. */
PIDCG_FORKFAIL,
NR_PIDCG_EVENTS,
};
struct pids_cgroup {
struct cgroup_subsys_state css;
/*
* Use 64-bit types so that we can safely represent "max" as
* %PIDS_MAX = (%PID_MAX_LIMIT + 1).
*/
atomic64_t counter;
atomic64_t limit;
int64_t watermark;
/* Handles for pids.events[.local] */
struct cgroup_file events_file;
struct cgroup_file events_local_file;
atomic64_t events[NR_PIDCG_EVENTS];
atomic64_t events_local[NR_PIDCG_EVENTS];
};
static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
{
return container_of(css, struct pids_cgroup, css);
}
static struct pids_cgroup *parent_pids(struct pids_cgroup *pids)
{
return css_pids(pids->css.parent);
}
static struct cgroup_subsys_state *
pids_css_alloc(struct cgroup_subsys_state *parent)
{
struct pids_cgroup *pids;
pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL);
if (!pids)
return ERR_PTR(-ENOMEM);
atomic64_set(&pids->limit, PIDS_MAX);
return &pids->css;
}
static void pids_css_free(struct cgroup_subsys_state *css)
{
kfree(css_pids(css));
}
static void pids_update_watermark(struct pids_cgroup *p, int64_t nr_pids)
{
/*
* This is racy, but we don't need perfectly accurate tallying of
* the watermark, and this lets us avoid extra atomic overhead.
*/
if (nr_pids > READ_ONCE(p->watermark))
WRITE_ONCE(p->watermark, nr_pids);
}
/**
* pids_cancel - uncharge the local pid count
* @pids: the pid cgroup state
* @num: the number of pids to cancel
*
* This function will WARN if the pid count goes under 0, because such a case is
* a bug in the pids controller proper.
*/
static void pids_cancel(struct pids_cgroup *pids, int num)
{
/*
* A negative count (or overflow for that matter) is invalid,
* and indicates a bug in the `pids` controller proper.
*/
WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter));
}
/**
* pids_uncharge - hierarchically uncharge the pid count
* @pids: the pid cgroup state
* @num: the number of pids to uncharge
*/
static void pids_uncharge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p;
for (p = pids; parent_pids(p); p = parent_pids(p))
pids_cancel(p, num);
}
/**
* pids_charge - hierarchically charge the pid count
* @pids: the pid cgroup state
* @num: the number of pids to charge
*
* This function does *not* follow the pid limit set. It cannot fail and the new
* pid count may exceed the limit. This is only used for reverting failed
* attaches, where there is no other way out than violating the limit.
*/
static void pids_charge(struct pids_cgroup *pids, int num)
{
struct pids_cgroup *p;
for (p = pids; parent_pids(p); p = parent_pids(p)) {
int64_t new = atomic64_add_return(num, &p->counter);
pids_update_watermark(p, new);
}
}
/**
* pids_try_charge - hierarchically try to charge the pid count
* @pids: the pid cgroup state
* @num: the number of pids to charge
* @fail: storage of pid cgroup causing the fail
*
* This function follows the set limit. It will fail if the charge would cause
* the new value to exceed the hierarchical limit. Returns 0 if the charge
* succeeded, otherwise -EAGAIN.
*/
static int pids_try_charge(struct pids_cgroup *pids, int num, struct pids_cgroup **fail)
{
struct pids_cgroup *p, *q;
for (p = pids; parent_pids(p); p = parent_pids(p)) { int64_t new = atomic64_add_return(num, &p->counter);
int64_t limit = atomic64_read(&p->limit);
/*
* Since new is capped to the maximum number of pid_t, if
* p->limit is %PIDS_MAX then we know that this test will never
* fail.
*/
if (new > limit) {
*fail = p;
goto revert;
}
/*
* Not technically accurate if we go over limit somewhere up
* the hierarchy, but that's tolerable for the watermark.
*/
pids_update_watermark(p, new);
}
return 0;
revert:
for (q = pids; q != p; q = parent_pids(q)) pids_cancel(q, num);
pids_cancel(p, num);
return -EAGAIN;
}
static int pids_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *dst_css;
cgroup_taskset_for_each(task, dst_css, tset) {
struct pids_cgroup *pids = css_pids(dst_css);
struct cgroup_subsys_state *old_css;
struct pids_cgroup *old_pids;
/*
* No need to pin @old_css between here and cancel_attach()
* because cgroup core protects it from being freed before
* the migration completes or fails.
*/
old_css = task_css(task, pids_cgrp_id);
old_pids = css_pids(old_css);
pids_charge(pids, 1);
pids_uncharge(old_pids, 1);
}
return 0;
}
static void pids_cancel_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *dst_css;
cgroup_taskset_for_each(task, dst_css, tset) {
struct pids_cgroup *pids = css_pids(dst_css);
struct cgroup_subsys_state *old_css;
struct pids_cgroup *old_pids;
old_css = task_css(task, pids_cgrp_id);
old_pids = css_pids(old_css);
pids_charge(old_pids, 1);
pids_uncharge(pids, 1);
}
}
static void pids_event(struct pids_cgroup *pids_forking,
struct pids_cgroup *pids_over_limit)
{
struct pids_cgroup *p = pids_forking;
/* Only log the first time limit is hit. */
if (atomic64_inc_return(&p->events_local[PIDCG_FORKFAIL]) == 1) {
pr_info("cgroup: fork rejected by pids controller in ");
pr_cont_cgroup_path(p->css.cgroup);
pr_cont("\n");
}
if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) ||
cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) {
cgroup_file_notify(&p->events_local_file); return;
}
atomic64_inc(&pids_over_limit->events_local[PIDCG_MAX]);
cgroup_file_notify(&pids_over_limit->events_local_file);
for (p = pids_over_limit; parent_pids(p); p = parent_pids(p)) {
atomic64_inc(&p->events[PIDCG_MAX]);
cgroup_file_notify(&p->events_file);
}
}
/*
* task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
* on cgroup_threadgroup_change_begin() held by the copy_process().
*/
static int pids_can_fork(struct task_struct *task, struct css_set *cset)
{
struct pids_cgroup *pids, *pids_over_limit;
int err;
pids = css_pids(cset->subsys[pids_cgrp_id]);
err = pids_try_charge(pids, 1, &pids_over_limit);
if (err)
pids_event(pids, pids_over_limit); return err;}
static void pids_cancel_fork(struct task_struct *task, struct css_set *cset)
{
struct pids_cgroup *pids;
pids = css_pids(cset->subsys[pids_cgrp_id]);
pids_uncharge(pids, 1);
}
static void pids_release(struct task_struct *task)
{
struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
pids_uncharge(pids, 1);
}
static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cgroup_subsys_state *css = of_css(of);
struct pids_cgroup *pids = css_pids(css);
int64_t limit;
int err;
buf = strstrip(buf);
if (!strcmp(buf, PIDS_MAX_STR)) {
limit = PIDS_MAX;
goto set_limit;
}
err = kstrtoll(buf, 0, &limit);
if (err)
return err;
if (limit < 0 || limit >= PIDS_MAX)
return -EINVAL;
set_limit:
/*
* Limit updates don't need to be mutex'd, since it isn't
* critical that any racing fork()s follow the new limit.
*/
atomic64_set(&pids->limit, limit);
return nbytes;
}
static int pids_max_show(struct seq_file *sf, void *v)
{
struct cgroup_subsys_state *css = seq_css(sf);
struct pids_cgroup *pids = css_pids(css);
int64_t limit = atomic64_read(&pids->limit);
if (limit >= PIDS_MAX)
seq_printf(sf, "%s\n", PIDS_MAX_STR);
else
seq_printf(sf, "%lld\n", limit);
return 0;
}
static s64 pids_current_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct pids_cgroup *pids = css_pids(css);
return atomic64_read(&pids->counter);
}
static s64 pids_peak_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct pids_cgroup *pids = css_pids(css);
return READ_ONCE(pids->watermark);
}
static int __pids_events_show(struct seq_file *sf, bool local)
{
struct pids_cgroup *pids = css_pids(seq_css(sf));
enum pidcg_event pe = PIDCG_MAX;
atomic64_t *events;
if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) ||
cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) {
pe = PIDCG_FORKFAIL;
local = true;
}
events = local ? pids->events_local : pids->events;
seq_printf(sf, "max %lld\n", (s64)atomic64_read(&events[pe]));
return 0;
}
static int pids_events_show(struct seq_file *sf, void *v)
{
__pids_events_show(sf, false);
return 0;
}
static int pids_events_local_show(struct seq_file *sf, void *v)
{
__pids_events_show(sf, true);
return 0;
}
static struct cftype pids_files[] = {
{
.name = "max",
.write = pids_max_write,
.seq_show = pids_max_show,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "current",
.read_s64 = pids_current_read,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "peak",
.flags = CFTYPE_NOT_ON_ROOT,
.read_s64 = pids_peak_read,
},
{
.name = "events",
.seq_show = pids_events_show,
.file_offset = offsetof(struct pids_cgroup, events_file),
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "events.local",
.seq_show = pids_events_local_show,
.file_offset = offsetof(struct pids_cgroup, events_local_file),
.flags = CFTYPE_NOT_ON_ROOT,
},
{ } /* terminate */
};
static struct cftype pids_files_legacy[] = {
{
.name = "max",
.write = pids_max_write,
.seq_show = pids_max_show,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "current",
.read_s64 = pids_current_read,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "peak",
.flags = CFTYPE_NOT_ON_ROOT,
.read_s64 = pids_peak_read,
},
{
.name = "events",
.seq_show = pids_events_show,
.file_offset = offsetof(struct pids_cgroup, events_file),
.flags = CFTYPE_NOT_ON_ROOT,
},
{ } /* terminate */
};
struct cgroup_subsys pids_cgrp_subsys = {
.css_alloc = pids_css_alloc,
.css_free = pids_css_free,
.can_attach = pids_can_attach,
.cancel_attach = pids_cancel_attach,
.can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork,
.release = pids_release,
.legacy_cftypes = pids_files_legacy,
.dfl_cftypes = pids_files,
.threaded = true,
};
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
* Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
*/
#include <linux/sched.h> /* test_thread_flag(), ... */
#include <linux/sched/task_stack.h> /* task_stack_*(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/memblock.h> /* max_low_pfn */
#include <linux/kfence.h> /* kfence_handle_page_fault */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <linux/context_tracking.h> /* exception_enter(), ... */
#include <linux/uaccess.h> /* faulthandler_disabled() */
#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
#include <linux/mm_types.h>
#include <linux/mm.h> /* find_and_lock_vma() */
#include <linux/vmalloc.h>
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/fixmap.h> /* VSYSCALL_ADDR */
#include <asm/vsyscall.h> /* emulate_vsyscall */
#include <asm/vm86.h> /* struct vm86 */
#include <asm/mmu_context.h> /* vma_pkey() */
#include <asm/efi.h> /* efi_crash_gracefully_on_page_fault()*/
#include <asm/desc.h> /* store_idt(), ... */
#include <asm/cpu_entry_area.h> /* exception stack */
#include <asm/pgtable_areas.h> /* VMALLOC_START, ... */
#include <asm/kvm_para.h> /* kvm_handle_async_pf */
#include <asm/vdso.h> /* fixup_vdso_exception() */
#include <asm/irq_stack.h>
#include <asm/fred.h>
#include <asm/sev.h> /* snp_dump_hva_rmpentry() */
#define CREATE_TRACE_POINTS
#include <trace/events/exceptions.h>
/*
* Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace:
*/
static nokprobe_inline int
kmmio_fault(struct pt_regs *regs, unsigned long addr)
{
if (unlikely(is_kmmio_active()))
if (kmmio_handler(regs, addr) == 1)
return -1;
return 0;
}
/*
* Prefetch quirks:
*
* 32-bit mode:
*
* Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
* Check that here and ignore it. This is AMD erratum #91.
*
* 64-bit mode:
*
* Sometimes the CPU reports invalid exceptions on prefetch.
* Check that here and ignore it.
*
* Opcode checker based on code by Richard Brunner.
*/
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
unsigned char opcode, int *prefetch)
{
unsigned char instr_hi = opcode & 0xf0;
unsigned char instr_lo = opcode & 0x0f;
switch (instr_hi) {
case 0x20:
case 0x30:
/*
* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
* In X86_64 long mode, the CPU will signal invalid
* opcode if some of these prefixes are present so
* X86_64 will never get here anyway
*/
return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
case 0x40:
/*
* In 64-bit mode 0x40..0x4F are valid REX prefixes
*/
return (!user_mode(regs) || user_64bit_mode(regs));
#endif
case 0x60:
/* 0x64 thru 0x67 are valid prefixes in all modes. */
return (instr_lo & 0xC) == 0x4;
case 0xF0:
/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
return !instr_lo || (instr_lo>>1) == 1;
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
if (get_kernel_nofault(opcode, instr))
return 0;
*prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
return 0;
default:
return 0;
}
}
static bool is_amd_k8_pre_npt(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
return unlikely(IS_ENABLED(CONFIG_CPU_SUP_AMD) &&
c->x86_vendor == X86_VENDOR_AMD &&
c->x86 == 0xf && c->x86_model < 0x40);
}
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
{
unsigned char *max_instr;
unsigned char *instr;
int prefetch = 0;
/* Erratum #91 affects AMD K8, pre-NPT CPUs */
if (!is_amd_k8_pre_npt())
return 0;
/*
* If it was a exec (instruction fetch) fault on NX page, then
* do not ignore the fault:
*/
if (error_code & X86_PF_INSTR)
return 0;
instr = (void *)convert_ip_to_linear(current, regs);
max_instr = instr + 15;
/*
* This code has historically always bailed out if IP points to a
* not-present page (e.g. due to a race). No one has ever
* complained about this.
*/
pagefault_disable();
while (instr < max_instr) {
unsigned char opcode;
if (user_mode(regs)) {
if (get_user(opcode, (unsigned char __user *) instr))
break;
} else {
if (get_kernel_nofault(opcode, instr))
break;
}
instr++;
if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
break;
}
pagefault_enable();
return prefetch;
}
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
unsigned index = pgd_index(address);
pgd_t *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd += index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
return NULL;
/*
* set_pgd(pgd, *pgd_k); here would be useless on PAE
* and redundant with the set_pmd() on non-PAE. As would
* set_p4d/set_pud.
*/
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d_k))
return NULL;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
return NULL;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (pmd_present(*pmd) != pmd_present(*pmd_k))
set_pmd(pmd, *pmd_k);
if (!pmd_present(*pmd_k))
return NULL;
else
BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
return pmd_k;
}
/*
* Handle a fault on the vmalloc or module mapping area
*
* This is needed because there is a race condition between the time
* when the vmalloc mapping code updates the PMD to the point in time
* where it synchronizes this update with the other page-tables in the
* system.
*
* In this race window another thread/CPU can map an area on the same
* PMD, finds it already present and does not synchronize it with the
* rest of the system yet. As a result v[mz]alloc might return areas
* which are not mapped in every page-table in the system, causing an
* unhandled page-fault when they are accessed.
*/
static noinline int vmalloc_fault(unsigned long address)
{
unsigned long pgd_paddr;
pmd_t *pmd_k;
pte_t *pte_k;
/* Make sure we are in vmalloc area: */
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "current" here. We might be inside
* an interrupt in the middle of a task switch..
*/
pgd_paddr = read_cr3_pa();
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
if (!pmd_k)
return -1;
if (pmd_leaf(*pmd_k))
return 0;
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
return 0;
}
NOKPROBE_SYMBOL(vmalloc_fault);
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
for (addr = start & PMD_MASK;
addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
addr += PMD_SIZE) {
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock;
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
vmalloc_sync_one(page_address(page), addr);
spin_unlock(pgt_lock);
}
spin_unlock(&pgd_lock);
}
}
static bool low_pfn(unsigned long pfn)
{
return pfn < max_low_pfn;
}
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3_pa());
pgd_t *pgd = &base[pgd_index(address)];
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
#ifdef CONFIG_X86_PAE
pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
goto out;
#define pr_pde pr_cont
#else
#define pr_pde pr_info
#endif
p4d = p4d_offset(pgd, address);
pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
#undef pr_pde
/*
* We must not directly access the pte in the highpte
* case if the page table is located in highmem.
* And let's rather not kmap-atomic the pte, just in case
* it's allocated already:
*/
if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
pr_cont("\n");
}
#else /* CONFIG_X86_64: */
#ifdef CONFIG_CPU_SUP_AMD
static const char errata93_warning[] =
KERN_ERR
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
#endif
static int bad_address(void *p)
{
unsigned long dummy;
return get_kernel_nofault(dummy, (unsigned long *)p);
}
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3_pa());
pgd_t *pgd = base + pgd_index(address);
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (bad_address(pgd))
goto bad;
pr_info("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd))
goto out;
p4d = p4d_offset(pgd, address);
if (bad_address(p4d))
goto bad;
pr_cont("P4D %lx ", p4d_val(*p4d));
if (!p4d_present(*p4d) || p4d_leaf(*p4d))
goto out;
pud = pud_offset(p4d, address);
if (bad_address(pud))
goto bad;
pr_cont("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud) || pud_leaf(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (bad_address(pmd))
goto bad;
pr_cont("PMD %lx ", pmd_val(*pmd));
if (!pmd_present(*pmd) || pmd_leaf(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
if (bad_address(pte))
goto bad;
pr_cont("PTE %lx", pte_val(*pte));
out:
pr_cont("\n");
return;
bad:
pr_info("BAD\n");
}
#endif /* CONFIG_X86_64 */
/*
* Workaround for K8 erratum #93 & buggy BIOS.
*
* BIOS SMM functions are required to use a specific workaround
* to avoid corruption of the 64bit RIP register on C stepping K8.
*
* A lot of BIOS that didn't get tested properly miss this.
*
* The OS sees this as a page fault with the upper 32bits of RIP cleared.
* Try to work around it here.
*
* Note we only handle faults in kernel here.
* Does nothing on 32-bit.
*/
static int is_errata93(struct pt_regs *regs, unsigned long address)
{
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
|| boot_cpu_data.x86 != 0xf)
return 0;
if (user_mode(regs))
return 0;
if (address != regs->ip)
return 0;
if ((address >> 32) != 0)
return 0;
address |= 0xffffffffUL << 32; if ((address >= (u64)_stext && address <= (u64)_etext) || (address >= MODULES_VADDR && address <= MODULES_END)) { printk_once(errata93_warning); regs->ip = address; return 1;
}
#endif
return 0;
}
/*
* Work around K8 erratum #100 K8 in compat mode occasionally jumps
* to illegal addresses >4GB.
*
* We catch this in the page fault handler because these addresses
* are not reachable. Just detect this case and return. Any code
* segment in LDT is compatibility mode.
*/
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
return 1;
#endif
return 0;
}
/* Pentium F0 0F C7 C8 bug workaround: */
static int is_f00f_bug(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
if (boot_cpu_has_bug(X86_BUG_F00F) && !(error_code & X86_PF_USER) &&
idt_is_f00f_address(address)) {
handle_invalid_op(regs);
return 1;
}
#endif
return 0;
}
static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
{
u32 offset = (index >> 3) * sizeof(struct desc_struct);
unsigned long addr;
struct ldttss_desc desc;
if (index == 0) {
pr_alert("%s: NULL\n", name);
return;
}
if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
return;
}
if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
sizeof(struct ldttss_desc))) {
pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
name, index);
return;
}
addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
#ifdef CONFIG_X86_64
addr |= ((u64)desc.base3 << 32);
#endif
pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
}
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
if (!oops_may_print())
return;
if (error_code & X86_PF_INSTR) {
unsigned int level;
bool nx, rw;
pgd_t *pgd;
pte_t *pte;
pgd = __va(read_cr3_pa());
pgd += pgd_index(address);
pte = lookup_address_in_pgd_attr(pgd, address, &level, &nx, &rw);
if (pte && pte_present(*pte) && (!pte_exec(*pte) || nx))
pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
from_kuid(&init_user_ns, current_uid()));
if (pte && pte_present(*pte) && pte_exec(*pte) && !nx &&
(pgd_flags(*pgd) & _PAGE_USER) &&
(__read_cr4() & X86_CR4_SMEP))
pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
from_kuid(&init_user_ns, current_uid()));
}
if (address < PAGE_SIZE && !user_mode(regs))
pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
(void *)address);
else
pr_alert("BUG: unable to handle page fault for address: %px\n",
(void *)address);
pr_alert("#PF: %s %s in %s mode\n",
(error_code & X86_PF_USER) ? "user" : "supervisor",
(error_code & X86_PF_INSTR) ? "instruction fetch" :
(error_code & X86_PF_WRITE) ? "write access" :
"read access",
user_mode(regs) ? "user" : "kernel");
pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
!(error_code & X86_PF_PROT) ? "not-present page" :
(error_code & X86_PF_RSVD) ? "reserved bit violation" :
(error_code & X86_PF_PK) ? "protection keys violation" :
(error_code & X86_PF_RMP) ? "RMP violation" :
"permissions violation");
if (!(error_code & X86_PF_USER) && user_mode(regs)) {
struct desc_ptr idt, gdt;
u16 ldtr, tr;
/*
* This can happen for quite a few reasons. The more obvious
* ones are faults accessing the GDT, or LDT. Perhaps
* surprisingly, if the CPU tries to deliver a benign or
* contributory exception from user code and gets a page fault
* during delivery, the page fault can be delivered as though
* it originated directly from user code. This could happen
* due to wrong permissions on the IDT, GDT, LDT, TSS, or
* kernel or IST stack.
*/
store_idt(&idt);
/* Usable even on Xen PV -- it's just slow. */
native_store_gdt(&gdt);
pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
idt.address, idt.size, gdt.address, gdt.size);
store_ldt(ldtr);
show_ldttss(&gdt, "LDTR", ldtr);
store_tr(tr);
show_ldttss(&gdt, "TR", tr);
}
dump_pagetable(address);
if (error_code & X86_PF_RMP)
snp_dump_hva_rmpentry(address);
}
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
struct task_struct *tsk;
unsigned long flags;
int sig;
flags = oops_begin();
tsk = current;
sig = SIGKILL;
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
tsk->comm, address);
dump_pagetable(address);
if (__die("Bad pagetable", regs, error_code))
sig = 0;
oops_end(flags, regs, sig);
}
static void sanitize_error_code(unsigned long address,
unsigned long *error_code)
{
/*
* To avoid leaking information about the kernel page
* table layout, pretend that user-mode accesses to
* kernel addresses are always protection faults.
*
* NB: This means that failed vsyscalls with vsyscall=none
* will have the PROT bit. This doesn't leak any
* information and does not appear to cause any problems.
*/
if (address >= TASK_SIZE_MAX) *error_code |= X86_PF_PROT;
}
static void set_signal_archinfo(unsigned long address,
unsigned long error_code)
{
struct task_struct *tsk = current;
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | X86_PF_USER;
tsk->thread.cr2 = address;
}
static noinline void
page_fault_oops(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
#ifdef CONFIG_VMAP_STACK
struct stack_info info;
#endif
unsigned long flags;
int sig;
if (user_mode(regs)) {
/*
* Implicit kernel access from user mode? Skip the stack
* overflow and EFI special cases.
*/
goto oops;
}
#ifdef CONFIG_VMAP_STACK
/*
* Stack overflow? During boot, we can fault near the initial
* stack in the direct map, but that's not an overflow -- check
* that we're in vmalloc space to avoid this.
*/
if (is_vmalloc_addr((void *)address) &&
get_stack_guard_info((void *)address, &info)) {
/*
* We're likely to be running with very little stack space
* left. It's plausible that we'd hit this condition but
* double-fault even before we get this far, in which case
* we're fine: the double-fault handler will deal with it.
*
* We don't want to make it all the way into the oops code
* and then double-fault, though, because we're likely to
* break the console driver and lose most of the stack dump.
*/
call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*),
handle_stack_overflow,
ASM_CALL_ARG3,
, [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
BUG();
}
#endif
/*
* Buggy firmware could access regions which might page fault. If
* this happens, EFI has a special OOPS path that will try to
* avoid hanging the system.
*/
if (IS_ENABLED(CONFIG_EFI))
efi_crash_gracefully_on_page_fault(address);
/* Only not-present faults should be handled by KFENCE. */
if (!(error_code & X86_PF_PROT) &&
kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
return;
oops:
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice:
*/
flags = oops_begin();
show_fault_oops(regs, error_code, address);
if (task_stack_end_corrupted(current))
printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
sig = SIGKILL;
if (__die("Oops", regs, error_code))
sig = 0;
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_DEFAULT "CR2: %016lx\n", address);
oops_end(flags, regs, sig);
}
static noinline void
kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int signal, int si_code,
u32 pkey)
{
WARN_ON_ONCE(user_mode(regs));
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
return;
/*
* AMD erratum #91 manifests as a spurious page fault on a PREFETCH
* instruction.
*/
if (is_prefetch(regs, error_code, address))
return;
page_fault_oops(regs, error_code, address);}
/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
*/
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct task_struct *tsk)
{
const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
/* This is a racy snapshot, but it's better than nothing. */
int cpu = raw_smp_processor_id(); if (!unhandled_signal(tsk, SIGSEGV))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
loglvl, tsk->comm, task_pid_nr(tsk), address,
(void *)regs->ip, (void *)regs->sp, error_code);
print_vma_addr(KERN_CONT " in ", regs->ip);
/*
* Dump the likely CPU where the fatal segfault happened.
* This can help identify faulty hardware.
*/
printk(KERN_CONT " likely on CPU %d (core %d, socket %d)", cpu,
topology_core_id(cpu), topology_physical_package_id(cpu));
printk(KERN_CONT "\n");
show_opcodes(regs, loglvl);
}
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, u32 pkey, int si_code)
{
struct task_struct *tsk = current;
if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address,
SIGSEGV, si_code, pkey);
return;
}
if (!(error_code & X86_PF_USER)) {
/* Implicit user access to kernel memory -- just oops */
page_fault_oops(regs, error_code, address);
return;
}
/*
* User mode accesses just cause a SIGSEGV.
* It's possible to have interrupts off here:
*/
local_irq_enable();
/*
* Valid to do another page fault here because this one came
* from user space:
*/
if (is_prefetch(regs, error_code, address))
return;
if (is_errata100(regs, address))
return;
sanitize_error_code(address, &error_code); if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
return;
if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); set_signal_archinfo(address, error_code); if (si_code == SEGV_PKUERR) force_sig_pkuerr((void __user *)address, pkey);
else
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
local_irq_disable();}
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
}
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct mm_struct *mm,
struct vm_area_struct *vma, u32 pkey, int si_code)
{
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
if (mm)
mmap_read_unlock(mm);
else
vma_end_read(vma);
__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
}
static inline bool bad_area_access_from_pkeys(unsigned long error_code,
struct vm_area_struct *vma)
{
/* This code is always called on the current mm */
bool foreign = false;
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return false;
if (error_code & X86_PF_PK)
return true;
/* this checks permission keys on the VMA: */
if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
(error_code & X86_PF_INSTR), foreign))
return true;
return false;
}
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct mm_struct *mm,
struct vm_area_struct *vma)
{
/*
* This OSPKE check is not strictly necessary at runtime.
* But, doing it this way allows compiler optimizations
* if pkeys are compiled out.
*/
if (bad_area_access_from_pkeys(error_code, vma)) {
/*
* A protection key fault means that the PKRU value did not allow
* access to some PTE. Userspace can figure out what PKRU was
* from the XSAVE state. This function captures the pkey from
* the vma and passes it to userspace so userspace can discover
* which protection key was set on the PTE.
*
* If we get here, we know that the hardware signaled a X86_PF_PK
* fault and that there was a VMA once we got in the fault
* handler. It does *not* guarantee that the VMA we find here
* was the one that we faulted on.
*
* 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
* 2. T1 : set PKRU to deny access to pkey=4, touches page
* 3. T1 : faults...
* 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
* 5. T1 : enters fault handler, takes mmap_lock, etc...
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
u32 pkey = vma_pkey(vma);
__bad_area(regs, error_code, address, mm, vma, pkey, SEGV_PKUERR);
} else {
__bad_area(regs, error_code, address, mm, vma, 0, SEGV_ACCERR);
}
}
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
vm_fault_t fault)
{
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address,
SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
return;
}
/* User-space => ok to do another page fault: */
if (is_prefetch(regs, error_code, address))
return;
sanitize_error_code(address, &error_code); if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
return;
set_signal_archinfo(address, error_code);
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
struct task_struct *tsk = current;
unsigned lsb = 0;
pr_err(
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
tsk->comm, tsk->pid, address);
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
return;
}
#endif
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
}
static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
return 0;
if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
return 0;
return 1;
}
/*
* Handle a spurious fault caused by a stale TLB entry.
*
* This allows us to lazily refresh the TLB when increasing the
* permissions of a kernel page (RO -> RW or NX -> X). Doing it
* eagerly is very expensive since that implies doing a full
* cross-processor TLB flush, even if no stale TLB entries exist
* on other processors.
*
* Spurious faults may only occur if the TLB contains an entry with
* fewer permission than the page table entry. Non-present (P = 0)
* and reserved bit (R = 1) faults are never spurious.
*
* There are no security implications to leaving a stale TLB when
* increasing the permissions on a page.
*
* Returns non-zero if a spurious fault was handled, zero otherwise.
*
* See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
* (Optional Invalidation).
*/
static noinline int
spurious_kernel_fault(unsigned long error_code, unsigned long address)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret;
/*
* Only writes to RO or instruction fetches from NX may cause
* spurious faults.
*
* These could be from user or supervisor accesses but the TLB
* is only lazily flushed after a kernel mapping protection
* change, so user accesses are not expected to cause spurious
* faults.
*/
if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
error_code != (X86_PF_INSTR | X86_PF_PROT))
return 0;
pgd = init_mm.pgd + pgd_index(address);
if (!pgd_present(*pgd))
return 0;
p4d = p4d_offset(pgd, address);
if (!p4d_present(*p4d))
return 0;
if (p4d_leaf(*p4d))
return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
return 0;
if (pud_leaf(*pud))
return spurious_kernel_fault_check(error_code, (pte_t *) pud);
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
if (pmd_leaf(*pmd))
return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
pte = pte_offset_kernel(pmd, address);
if (!pte_present(*pte))
return 0;
ret = spurious_kernel_fault_check(error_code, pte);
if (!ret)
return 0;
/*
* Make sure we have permissions in PMD.
* If not, then there's a bug in the page tables:
*/
ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
return ret;
}
NOKPROBE_SYMBOL(spurious_kernel_fault);
int show_unhandled_signals = 1;
static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma)
{
/* This is only called for the current mm, so: */
bool foreign = false;
/*
* Read or write was blocked by protection keys. This is
* always an unconditional error and can never result in
* a follow-up action to resolve the fault, like a COW.
*/
if (error_code & X86_PF_PK) return 1;
/*
* SGX hardware blocked the access. This usually happens
* when the enclave memory contents have been destroyed, like
* after a suspend/resume cycle. In any case, the kernel can't
* fix the cause of the fault. Handle the fault as an access
* error even in cases where no actual access violation
* occurred. This allows userspace to rebuild the enclave in
* response to the signal.
*/
if (unlikely(error_code & X86_PF_SGX))
return 1;
/*
* Make sure to check the VMA so that we do not perform
* faults just to hit a X86_PF_PK as soon as we fill in a
* page.
*/
if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
(error_code & X86_PF_INSTR), foreign))
return 1;
/*
* Shadow stack accesses (PF_SHSTK=1) are only permitted to
* shadow stack VMAs. All other accesses result in an error.
*/
if (error_code & X86_PF_SHSTK) {
if (unlikely(!(vma->vm_flags & VM_SHADOW_STACK)))
return 1;
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
return 0;
}
if (error_code & X86_PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(vma->vm_flags & VM_SHADOW_STACK))
return 1;
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
return 0;
}
/* read, present: */
if (unlikely(error_code & X86_PF_PROT))
return 1;
/* read, not present: */
if (unlikely(!vma_is_accessible(vma)))
return 1;
return 0;}
bool fault_in_kernel_space(unsigned long address)
{
/*
* On 64-bit systems, the vsyscall page is at an address above
* TASK_SIZE_MAX, but is not considered part of the kernel
* address space.
*/
if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address)) return false; return address >= TASK_SIZE_MAX;}
/*
* Called for all faults where 'address' is part of the kernel address
* space. Might get called for faults that originate from *code* that
* ran in userspace or the kernel.
*/
static void
do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
unsigned long address)
{
/*
* Protection keys exceptions only happen on user pages. We
* have no user pages in the kernel portion of the address
* space, so do not expect them here.
*/
WARN_ON_ONCE(hw_error_code & X86_PF_PK);
#ifdef CONFIG_X86_32
/*
* We can fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* Before doing this on-demand faulting, ensure that the
* fault is not any of the following:
* 1. A fault on a PTE with a reserved bit set.
* 2. A fault caused by a user-mode access. (Do not demand-
* fault kernel memory due to user-mode accesses).
* 3. A fault caused by a page-level protection violation.
* (A demand fault would be on a non-present page which
* would have X86_PF_PROT==0).
*
* This is only needed to close a race condition on x86-32 in
* the vmalloc mapping/unmapping code. See the comment above
* vmalloc_fault() for details. On x86-64 the race does not
* exist as the vmalloc mappings don't need to be synchronized
* there.
*/
if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
}
#endif
if (is_f00f_bug(regs, hw_error_code, address))
return;
/* Was the fault spurious, caused by lazy TLB invalidation? */
if (spurious_kernel_fault(hw_error_code, address))
return;
/* kprobes don't want to hook the spurious faults: */
if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
* Note, despite being a "bad area", there are quite a few
* acceptable reasons to get here, such as erratum fixups
* and handling kernel code that can fault, like get_user().
*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock:
*/
bad_area_nosemaphore(regs, hw_error_code, address);
}
NOKPROBE_SYMBOL(do_kern_addr_fault);
/*
* Handle faults in the user portion of the address space. Nothing in here
* should check X86_PF_USER without a specific justification: for almost
* all purposes, we should treat a normal kernel access to user memory
* (e.g. get_user(), put_user(), etc.) the same as the WRUSS instruction.
* The one exception is AC flag handling, which is, per the x86
* architecture, special for WRUSS.
*/
static inline
void do_user_addr_fault(struct pt_regs *regs,
unsigned long error_code,
unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current; mm = tsk->mm;
if (unlikely((error_code & (X86_PF_USER | X86_PF_INSTR)) == X86_PF_INSTR)) {
/*
* Whoops, this is kernel mode code trying to execute from
* user memory. Unless this is AMD erratum #93, which
* corrupts RIP such that it looks like a user address,
* this is unrecoverable. Don't even try to look up the
* VMA or look for extable entries.
*/
if (is_errata93(regs, address))
return;
page_fault_oops(regs, error_code, address);
return;
}
/* kprobes don't want to hook the spurious faults: */
if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF))) return;
/*
* Reserved bits are never expected to be set on
* entries in the user portion of the page tables.
*/
if (unlikely(error_code & X86_PF_RSVD)) pgtable_bad(regs, error_code, address);
/*
* If SMAP is on, check for invalid kernel (supervisor) access to user
* pages in the user address space. The odd case here is WRUSS,
* which, according to the preliminary documentation, does not respect
* SMAP and will have the USER bit set so, in all cases, SMAP
* enforcement appears to be consistent with the USER bit.
*/
if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
!(error_code & X86_PF_USER) &&
!(regs->flags & X86_EFLAGS_AC))) {
/*
* No extable entry here. This was a kernel access to an
* invalid pointer. get_kernel_nofault() will not get here.
*/
page_fault_oops(regs, error_code, address);
return;
}
/*
* If we're in an interrupt, have no user context or are running
* in a region with pagefaults disabled then we must not take the fault
*/
if (unlikely(faulthandler_disabled() || !mm)) { bad_area_nosemaphore(regs, error_code, address); return;
}
/* Legacy check - remove this after verifying that it doesn't trigger */
if (WARN_ON_ONCE(!(regs->flags & X86_EFLAGS_IF))) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* Read-only permissions can not be expressed in shadow stack PTEs.
* Treat all shadow stack accesses as WRITE faults. This ensures
* that the MM will prepare everything (e.g., break COW) such that
* maybe_mkwrite() can create a proper shadow stack PTE.
*/
if (error_code & X86_PF_SHSTK)
flags |= FAULT_FLAG_WRITE;
if (error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE;
if (error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION;
/*
* We set FAULT_FLAG_USER based on the register state, not
* based on X86_PF_USER. User space accesses that cause
* system page faults are still user accesses.
*/
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
#ifdef CONFIG_X86_64
/*
* Faults in the vsyscall page might need emulation. The
* vsyscall page is at a high address (>PAGE_OFFSET), but is
* considered to be part of the user address space.
*
* The vsyscall page does not have a "real" VMA, so do this
* emulation before we go searching for VMAs.
*
* PKRU never rejects instruction fetches, so we don't need
* to consider the PF_PK bit.
*/
if (is_vsyscall_vaddr(address)) { if (emulate_vsyscall(error_code, regs, address))
return;
}
#endif
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, address);
if (!vma) goto lock_mmap; if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address, NULL, vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
return;
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
if (fault & VM_FAULT_MAJOR)
flags |= FAULT_FLAG_TRIED;
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) kernelmode_fixup_or_oops(regs, error_code, address,
SIGBUS, BUS_ADRERR,
ARCH_DEFAULT_PKEY);
return;
}
lock_mmap:
retry:
vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address, mm, vma);
return;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
* we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
*
* Note that handle_userfault() may also release and reacquire mmap_lock
* (and not return with VM_FAULT_RETRY), when returning to userland to
* repeat the page fault later with a VM_FAULT_NOPAGE retval
* (potentially after handling any pending signal during the return to
* userland). The return to userland is identified whenever
* FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
/*
* Quick path to respond to signals. The core mm code
* has unlocked the mm for us if we get here.
*/
if (!user_mode(regs)) kernelmode_fixup_or_oops(regs, error_code, address,
SIGBUS, BUS_ADRERR,
ARCH_DEFAULT_PKEY);
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
/*
* If we need to retry the mmap_lock has already been released,
* and if there is a fatal signal pending there is no guarantee
* that we made any progress. Handle this case first.
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
goto retry;
}
mmap_read_unlock(mm);
done:
if (likely(!(fault & VM_FAULT_ERROR)))
return;
if (fatal_signal_pending(current) && !user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address,
0, 0, ARCH_DEFAULT_PKEY);
return;
}
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address,
SIGSEGV, SEGV_MAPERR,
ARCH_DEFAULT_PKEY);
return;
}
/*
* We ran out of memory, call the OOM killer, and return the
* userspace (which will retry the fault, or kill us if we got
* oom-killed):
*/
pagefault_out_of_memory();
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else if (fault & VM_FAULT_SIGSEGV)
bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}
}
NOKPROBE_SYMBOL(do_user_addr_fault);
static __always_inline void
trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
if (user_mode(regs))
trace_page_fault_user(address, regs, error_code);
else
trace_page_fault_kernel(address, regs, error_code);
}
static __always_inline void
handle_page_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
trace_page_fault_entries(regs, error_code, address);
if (unlikely(kmmio_fault(regs, address)))
return;
/* Was the fault on kernel-controlled part of the address space? */
if (unlikely(fault_in_kernel_space(address))) {
do_kern_addr_fault(regs, error_code, address);
} else {
do_user_addr_fault(regs, error_code, address);
/*
* User address page fault handling might have reenabled
* interrupts. Fixing up all potential exit points of
* do_user_addr_fault() and its leaf functions is just not
* doable w/o creating an unholy mess or turning the code
* upside down.
*/
local_irq_disable();
}
}
DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
{
irqentry_state_t state;
unsigned long address;
address = cpu_feature_enabled(X86_FEATURE_FRED) ? fred_event_data(regs) : read_cr2();
/*
* KVM uses #PF vector to deliver 'page not present' events to guests
* (asynchronous page fault mechanism). The event happens when a
* userspace task is trying to access some valid (from guest's point of
* view) memory which is not currently mapped by the host (e.g. the
* memory is swapped out). Note, the corresponding "page ready" event
* which is injected when the memory becomes available, is delivered via
* an interrupt mechanism and not a #PF exception
* (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
*
* We are relying on the interrupted context being sane (valid RSP,
* relevant locks not held, etc.), which is fine as long as the
* interrupted context had IF=1. We are also relying on the KVM
* async pf type field and CR2 being read consistently instead of
* getting values from real and async page faults mixed up.
*
* Fingers crossed.
*
* The async #PF handling code takes care of idtentry handling
* itself.
*/
if (kvm_handle_async_pf(regs, (u32)address))
return;
/*
* Entry handling for valid #PF from kernel mode is slightly
* different: RCU is already watching and ct_irq_enter() must not
* be invoked because a kernel fault on a user space address might
* sleep.
*
* In case the fault hit a RCU idle region the conditional entry
* code reenabled RCU to avoid subsequent wreckage which helps
* debuggability.
*/
state = irqentry_enter(regs);
instrumentation_begin();
handle_page_fault(regs, error_code, address);
instrumentation_end();
irqentry_exit(regs, state);
}
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/hashtable.h>
#include <rdma/rdma_netlink.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <rdma/rdma_counter.h>
#include "core_priv.h"
#include "restrack.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("core kernel InfiniBand API");
MODULE_LICENSE("Dual BSD/GPL");
struct workqueue_struct *ib_comp_wq;
struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
static struct workqueue_struct *ib_unreg_wq;
/*
* Each of the three rwsem locks (devices, clients, client_data) protects the
* xarray of the same name. Specifically it allows the caller to assert that
* the MARK will/will not be changing under the lock, and for devices and
* clients, that the value in the xarray is still a valid pointer. Change of
* the MARK is linked to the object state, so holding the lock and testing the
* MARK also asserts that the contained object is in a certain state.
*
* This is used to build a two stage register/unregister flow where objects
* can continue to be in the xarray even though they are still in progress to
* register/unregister.
*
* The xarray itself provides additional locking, and restartable iteration,
* which is also relied on.
*
* Locks should not be nested, with the exception of client_data, which is
* allowed to nest under the read side of the other two locks.
*
* The devices_rwsem also protects the device name list, any change or
* assignment of device name must also hold the write side to guarantee unique
* names.
*/
/*
* devices contains devices that have had their names assigned. The
* devices may not be registered. Users that care about the registration
* status need to call ib_device_try_get() on the device to ensure it is
* registered, and keep it registered, for the required duration.
*
*/
static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(devices_rwsem);
#define DEVICE_REGISTERED XA_MARK_1
static u32 highest_client_id;
#define CLIENT_REGISTERED XA_MARK_1
static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(clients_rwsem);
static void ib_client_put(struct ib_client *client)
{
if (refcount_dec_and_test(&client->uses))
complete(&client->uses_zero);
}
/*
* If client_data is registered then the corresponding client must also still
* be registered.
*/
#define CLIENT_DATA_REGISTERED XA_MARK_1
unsigned int rdma_dev_net_id;
/*
* A list of net namespaces is maintained in an xarray. This is necessary
* because we can't get the locking right using the existing net ns list. We
* would require a init_net callback after the list is updated.
*/
static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
/*
* rwsem to protect accessing the rdma_nets xarray entries.
*/
static DECLARE_RWSEM(rdma_nets_rwsem);
bool ib_devices_shared_netns = true;
module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
MODULE_PARM_DESC(netns_mode,
"Share device among net namespaces; default=1 (shared)");
/**
* rdma_dev_access_netns() - Return whether an rdma device can be accessed
* from a specified net namespace or not.
* @dev: Pointer to rdma device which needs to be checked
* @net: Pointer to net namesapce for which access to be checked
*
* When the rdma device is in shared mode, it ignores the net namespace.
* When the rdma device is exclusive to a net namespace, rdma device net
* namespace is checked against the specified one.
*/
bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
{
return (ib_devices_shared_netns ||
net_eq(read_pnet(&dev->coredev.rdma_net), net));
}
EXPORT_SYMBOL(rdma_dev_access_netns);
/**
* rdma_dev_has_raw_cap() - Returns whether a specified rdma device has
* CAP_NET_RAW capability or not.
*
* @dev: Pointer to rdma device whose capability to be checked
*
* Returns true if a rdma device's owning user namespace has CAP_NET_RAW
* capability, otherwise false. When rdma subsystem is in legacy shared network,
* namespace mode, the default net namespace is considered.
*/
bool rdma_dev_has_raw_cap(const struct ib_device *dev)
{
const struct net *net;
/* Network namespace is the resource whose user namespace
* to be considered. When in shared mode, there is no reliable
* network namespace resource, so consider the default net namespace.
*/
if (ib_devices_shared_netns)
net = &init_net;
else
net = read_pnet(&dev->coredev.rdma_net);
return ns_capable(net->user_ns, CAP_NET_RAW);
}
EXPORT_SYMBOL(rdma_dev_has_raw_cap);
/*
* xarray has this behavior where it won't iterate over NULL values stored in
* allocated arrays. So we need our own iterator to see all values stored in
* the array. This does the same thing as xa_for_each except that it also
* returns NULL valued entries if the array is allocating. Simplified to only
* work on simple xarrays.
*/
static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
xa_mark_t filter)
{
XA_STATE(xas, xa, *indexp);
void *entry;
rcu_read_lock();
do {
entry = xas_find_marked(&xas, ULONG_MAX, filter);
if (xa_is_zero(entry))
break;
} while (xas_retry(&xas, entry));
rcu_read_unlock();
if (entry) {
*indexp = xas.xa_index;
if (xa_is_zero(entry))
return NULL;
return entry;
}
return XA_ERROR(-ENOENT);
}
#define xan_for_each_marked(xa, index, entry, filter) \
for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
!xa_is_err(entry); \
(index)++, entry = xan_find_marked(xa, &(index), filter))
/* RCU hash table mapping netdevice pointers to struct ib_port_data */
static DEFINE_SPINLOCK(ndev_hash_lock);
static DECLARE_HASHTABLE(ndev_hash, 5);
static void free_netdevs(struct ib_device *ib_dev);
static void ib_unregister_work(struct work_struct *work);
static void __ib_unregister_device(struct ib_device *device);
static int ib_security_change(struct notifier_block *nb, unsigned long event,
void *lsm_data);
static void ib_policy_change_task(struct work_struct *work);
static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
struct va_format *vaf)
{
if (ibdev && ibdev->dev.parent)
dev_printk_emit(level[1] - '0',
ibdev->dev.parent,
"%s %s %s: %pV",
dev_driver_string(ibdev->dev.parent),
dev_name(ibdev->dev.parent),
dev_name(&ibdev->dev),
vaf);
else if (ibdev)
printk("%s%s: %pV",
level, dev_name(&ibdev->dev), vaf);
else
printk("%s(NULL ib_device): %pV", level, vaf);
}
#define define_ibdev_printk_level(func, level) \
void func(const struct ib_device *ibdev, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
__ibdev_printk(level, ibdev, &vaf); \
\
va_end(args); \
} \
EXPORT_SYMBOL(func);
define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
define_ibdev_printk_level(ibdev_err, KERN_ERR);
define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
define_ibdev_printk_level(ibdev_info, KERN_INFO);
static struct notifier_block ibdev_lsm_nb = {
.notifier_call = ib_security_change,
};
static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
struct net *net);
/* Pointer to the RCU head at the start of the ib_port_data array */
struct ib_port_data_rcu {
struct rcu_head rcu_head;
struct ib_port_data pdata[];
};
static void ib_device_check_mandatory(struct ib_device *device)
{
#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
static const struct {
size_t offset;
char *name;
} mandatory_table[] = {
IB_MANDATORY_FUNC(query_device),
IB_MANDATORY_FUNC(query_port),
IB_MANDATORY_FUNC(alloc_pd),
IB_MANDATORY_FUNC(dealloc_pd),
IB_MANDATORY_FUNC(create_qp),
IB_MANDATORY_FUNC(modify_qp),
IB_MANDATORY_FUNC(destroy_qp),
IB_MANDATORY_FUNC(post_send),
IB_MANDATORY_FUNC(post_recv),
IB_MANDATORY_FUNC(create_cq),
IB_MANDATORY_FUNC(destroy_cq),
IB_MANDATORY_FUNC(poll_cq),
IB_MANDATORY_FUNC(req_notify_cq),
IB_MANDATORY_FUNC(get_dma_mr),
IB_MANDATORY_FUNC(reg_user_mr),
IB_MANDATORY_FUNC(dereg_mr),
IB_MANDATORY_FUNC(get_port_immutable)
};
int i;
device->kverbs_provider = true;
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) &device->ops +
mandatory_table[i].offset)) {
device->kverbs_provider = false;
break;
}
}
}
/*
* Caller must perform ib_device_put() to return the device reference count
* when ib_device_get_by_index() returns valid device pointer.
*/
struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
{
struct ib_device *device;
down_read(&devices_rwsem);
device = xa_load(&devices, index);
if (device) {
if (!rdma_dev_access_netns(device, net)) {
device = NULL;
goto out;
}
if (!ib_device_try_get(device))
device = NULL;
}
out:
up_read(&devices_rwsem);
return device;
}
/**
* ib_device_put - Release IB device reference
* @device: device whose reference to be released
*
* ib_device_put() releases reference to the IB device to allow it to be
* unregistered and eventually free.
*/
void ib_device_put(struct ib_device *device)
{
if (refcount_dec_and_test(&device->refcount))
complete(&device->unreg_completion);
}
EXPORT_SYMBOL(ib_device_put);
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
unsigned long index;
xa_for_each (&devices, index, device)
if (!strcmp(name, dev_name(&device->dev)))
return device;
return NULL;
}
/**
* ib_device_get_by_name - Find an IB device by name
* @name: The name to look for
* @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
*
* Find and hold an ib_device by its name. The caller must call
* ib_device_put() on the returned pointer.
*/
struct ib_device *ib_device_get_by_name(const char *name,
enum rdma_driver_id driver_id)
{
struct ib_device *device;
down_read(&devices_rwsem);
device = __ib_device_get_by_name(name);
if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
device->ops.driver_id != driver_id)
device = NULL;
if (device) {
if (!ib_device_try_get(device))
device = NULL;
}
up_read(&devices_rwsem);
return device;
}
EXPORT_SYMBOL(ib_device_get_by_name);
static int rename_compat_devs(struct ib_device *device)
{
struct ib_core_device *cdev;
unsigned long index;
int ret = 0;
mutex_lock(&device->compat_devs_mutex);
xa_for_each (&device->compat_devs, index, cdev) {
ret = device_rename(&cdev->dev, dev_name(&device->dev));
if (ret) {
dev_warn(&cdev->dev,
"Fail to rename compatdev to new name %s\n",
dev_name(&device->dev));
break;
}
}
mutex_unlock(&device->compat_devs_mutex);
return ret;
}
int ib_device_rename(struct ib_device *ibdev, const char *name)
{
unsigned long index;
void *client_data;
int ret;
down_write(&devices_rwsem);
if (!strcmp(name, dev_name(&ibdev->dev))) {
up_write(&devices_rwsem);
return 0;
}
if (__ib_device_get_by_name(name)) {
up_write(&devices_rwsem);
return -EEXIST;
}
ret = device_rename(&ibdev->dev, name);
if (ret) {
up_write(&devices_rwsem);
return ret;
}
strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
ret = rename_compat_devs(ibdev);
downgrade_write(&devices_rwsem);
down_read(&ibdev->client_data_rwsem);
xan_for_each_marked(&ibdev->client_data, index, client_data,
CLIENT_DATA_REGISTERED) {
struct ib_client *client = xa_load(&clients, index);
if (!client || !client->rename)
continue;
client->rename(ibdev, client_data);
}
up_read(&ibdev->client_data_rwsem);
rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT);
up_read(&devices_rwsem);
return 0;
}
int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim)
{
if (use_dim > 1)
return -EINVAL;
ibdev->use_cq_dim = use_dim;
return 0;
}
static int alloc_name(struct ib_device *ibdev, const char *name)
{
struct ib_device *device;
unsigned long index;
struct ida inuse;
int rc;
int i;
lockdep_assert_held_write(&devices_rwsem);
ida_init(&inuse);
xa_for_each (&devices, index, device) {
char buf[IB_DEVICE_NAME_MAX];
if (sscanf(dev_name(&device->dev), name, &i) != 1)
continue;
if (i < 0 || i >= INT_MAX)
continue;
snprintf(buf, sizeof buf, name, i);
if (strcmp(buf, dev_name(&device->dev)) != 0)
continue;
rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
if (rc < 0)
goto out;
}
rc = ida_alloc(&inuse, GFP_KERNEL);
if (rc < 0)
goto out;
rc = dev_set_name(&ibdev->dev, name, rc);
out:
ida_destroy(&inuse);
return rc;
}
static void ib_device_release(struct device *device)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
free_netdevs(dev);
WARN_ON(refcount_read(&dev->refcount));
if (dev->hw_stats_data)
ib_device_release_hw_stats(dev->hw_stats_data);
if (dev->port_data) {
ib_cache_release_one(dev);
ib_security_release_port_pkey_list(dev);
rdma_counter_release(dev);
kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
pdata[0]),
rcu_head);
}
mutex_destroy(&dev->subdev_lock);
mutex_destroy(&dev->unregistration_lock);
mutex_destroy(&dev->compat_devs_mutex);
xa_destroy(&dev->compat_devs);
xa_destroy(&dev->client_data);
kfree_rcu(dev, rcu_head);
}
static int ib_device_uevent(const struct device *device,
struct kobj_uevent_env *env)
{
if (add_uevent_var(env, "NAME=%s", dev_name(device)))
return -ENOMEM;
/*
* It would be nice to pass the node GUID with the event...
*/
return 0;
}
static const void *net_namespace(const struct device *d)
{
const struct ib_core_device *coredev =
container_of(d, struct ib_core_device, dev);
return read_pnet(&coredev->rdma_net);
}
static struct class ib_class = {
.name = "infiniband",
.dev_release = ib_device_release,
.dev_uevent = ib_device_uevent,
.ns_type = &net_ns_type_operations,
.namespace = net_namespace,
};
static void rdma_init_coredev(struct ib_core_device *coredev,
struct ib_device *dev, struct net *net)
{
bool is_full_dev = &dev->coredev == coredev;
/* This BUILD_BUG_ON is intended to catch layout change
* of union of ib_core_device and device.
* dev must be the first element as ib_core and providers
* driver uses it. Adding anything in ib_core_device before
* device will break this assumption.
*/
BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
offsetof(struct ib_device, dev));
coredev->dev.class = &ib_class;
coredev->dev.groups = dev->groups;
/*
* Don't expose hw counters outside of the init namespace.
*/
if (!is_full_dev && dev->hw_stats_attr_index)
coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
device_initialize(&coredev->dev);
coredev->owner = dev;
INIT_LIST_HEAD(&coredev->port_list);
write_pnet(&coredev->rdma_net, net);
}
/**
* _ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
* @net: network namespace device should be located in, namespace
* must stay valid until ib_register_device() is completed.
*
* Low-level drivers should use ib_alloc_device() to allocate &struct
* ib_device. @size is the size of the structure to be allocated,
* including any private data used by the low-level driver.
* ib_dealloc_device() must be used to free structures allocated with
* ib_alloc_device().
*/
struct ib_device *_ib_alloc_device(size_t size, struct net *net)
{
struct ib_device *device;
unsigned int i;
if (WARN_ON(size < sizeof(struct ib_device)))
return NULL;
device = kzalloc(size, GFP_KERNEL);
if (!device)
return NULL;
if (rdma_restrack_init(device)) {
kfree(device);
return NULL;
}
/* ib_devices_shared_netns can't change while we have active namespaces
* in the system which means either init_net is passed or the user has
* no idea what they are doing.
*
* To avoid breaking backward compatibility, when in shared mode,
* force to init the device in the init_net.
*/
net = ib_devices_shared_netns ? &init_net : net;
rdma_init_coredev(&device->coredev, device, net);
INIT_LIST_HEAD(&device->event_handler_list);
spin_lock_init(&device->qp_open_list_lock);
init_rwsem(&device->event_handler_rwsem);
mutex_init(&device->unregistration_lock);
/*
* client_data needs to be alloc because we don't want our mark to be
* destroyed if the user stores NULL in the client data.
*/
xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
init_rwsem(&device->client_data_rwsem);
xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
mutex_init(&device->compat_devs_mutex);
init_completion(&device->unreg_completion);
INIT_WORK(&device->unregistration_work, ib_unregister_work);
spin_lock_init(&device->cq_pools_lock);
for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
INIT_LIST_HEAD(&device->cq_pools[i]);
rwlock_init(&device->cache_lock);
device->uverbs_cmd_mask =
BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) |
BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) |
BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) |
BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) |
BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) |
BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) |
BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) |
BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ);
mutex_init(&device->subdev_lock);
INIT_LIST_HEAD(&device->subdev_list_head);
INIT_LIST_HEAD(&device->subdev_list);
return device;
}
EXPORT_SYMBOL(_ib_alloc_device);
/**
* ib_dealloc_device - free an IB device struct
* @device:structure to free
*
* Free a structure allocated with ib_alloc_device().
*/
void ib_dealloc_device(struct ib_device *device)
{
if (device->ops.dealloc_driver)
device->ops.dealloc_driver(device);
/*
* ib_unregister_driver() requires all devices to remain in the xarray
* while their ops are callable. The last op we call is dealloc_driver
* above. This is needed to create a fence on op callbacks prior to
* allowing the driver module to unload.
*/
down_write(&devices_rwsem);
if (xa_load(&devices, device->index) == device)
xa_erase(&devices, device->index);
up_write(&devices_rwsem);
/* Expedite releasing netdev references */
free_netdevs(device);
WARN_ON(!xa_empty(&device->compat_devs));
WARN_ON(!xa_empty(&device->client_data));
WARN_ON(refcount_read(&device->refcount));
rdma_restrack_clean(device);
/* Balances with device_initialize */
put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
/*
* add_client_context() and remove_client_context() must be safe against
* parallel calls on the same device - registration/unregistration of both the
* device and client can be occurring in parallel.
*
* The routines need to be a fence, any caller must not return until the add
* or remove is fully completed.
*/
static int add_client_context(struct ib_device *device,
struct ib_client *client)
{
int ret = 0;
if (!device->kverbs_provider && !client->no_kverbs_req)
return 0;
down_write(&device->client_data_rwsem);
/*
* So long as the client is registered hold both the client and device
* unregistration locks.
*/
if (!refcount_inc_not_zero(&client->uses))
goto out_unlock;
refcount_inc(&device->refcount);
/*
* Another caller to add_client_context got here first and has already
* completely initialized context.
*/
if (xa_get_mark(&device->client_data, client->client_id,
CLIENT_DATA_REGISTERED))
goto out;
ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
GFP_KERNEL));
if (ret)
goto out;
downgrade_write(&device->client_data_rwsem);
if (client->add) {
if (client->add(device)) {
/*
* If a client fails to add then the error code is
* ignored, but we won't call any more ops on this
* client.
*/
xa_erase(&device->client_data, client->client_id);
up_read(&device->client_data_rwsem);
ib_device_put(device);
ib_client_put(client);
return 0;
}
}
/* Readers shall not see a client until add has been completed */
xa_set_mark(&device->client_data, client->client_id,
CLIENT_DATA_REGISTERED);
up_read(&device->client_data_rwsem);
return 0;
out:
ib_device_put(device);
ib_client_put(client);
out_unlock:
up_write(&device->client_data_rwsem);
return ret;
}
static void remove_client_context(struct ib_device *device,
unsigned int client_id)
{
struct ib_client *client;
void *client_data;
down_write(&device->client_data_rwsem);
if (!xa_get_mark(&device->client_data, client_id,
CLIENT_DATA_REGISTERED)) {
up_write(&device->client_data_rwsem);
return;
}
client_data = xa_load(&device->client_data, client_id);
xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
client = xa_load(&clients, client_id);
up_write(&device->client_data_rwsem);
/*
* Notice we cannot be holding any exclusive locks when calling the
* remove callback as the remove callback can recurse back into any
* public functions in this module and thus try for any locks those
* functions take.
*
* For this reason clients and drivers should not call the
* unregistration functions will holdling any locks.
*/
if (client->remove)
client->remove(device, client_data);
xa_erase(&device->client_data, client_id);
ib_device_put(device);
ib_client_put(client);
}
static int alloc_port_data(struct ib_device *device)
{
struct ib_port_data_rcu *pdata_rcu;
u32 port;
if (device->port_data)
return 0;
/* This can only be called once the physical port range is defined */
if (WARN_ON(!device->phys_port_cnt))
return -EINVAL;
/* Reserve U32_MAX so the logic to go over all the ports is sane */
if (WARN_ON(device->phys_port_cnt == U32_MAX))
return -EINVAL;
/*
* device->port_data is indexed directly by the port number to make
* access to this data as efficient as possible.
*
* Therefore port_data is declared as a 1 based array with potential
* empty slots at the beginning.
*/
pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
size_add(rdma_end_port(device), 1)),
GFP_KERNEL);
if (!pdata_rcu)
return -ENOMEM;
/*
* The rcu_head is put in front of the port data array and the stored
* pointer is adjusted since we never need to see that member until
* kfree_rcu.
*/
device->port_data = pdata_rcu->pdata;
rdma_for_each_port (device, port) {
struct ib_port_data *pdata = &device->port_data[port];
pdata->ib_dev = device;
spin_lock_init(&pdata->pkey_list_lock);
INIT_LIST_HEAD(&pdata->pkey_list);
spin_lock_init(&pdata->netdev_lock);
INIT_HLIST_NODE(&pdata->ndev_hash_link);
}
return 0;
}
static int verify_immutable(const struct ib_device *dev, u32 port)
{
return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
rdma_max_mad_size(dev, port) != 0);
}
static int setup_port_data(struct ib_device *device)
{
u32 port;
int ret;
ret = alloc_port_data(device);
if (ret)
return ret;
rdma_for_each_port (device, port) {
struct ib_port_data *pdata = &device->port_data[port];
ret = device->ops.get_port_immutable(device, port,
&pdata->immutable);
if (ret)
return ret;
if (verify_immutable(device, port))
return -EINVAL;
}
return 0;
}
/**
* ib_port_immutable_read() - Read rdma port's immutable data
* @dev: IB device
* @port: port number whose immutable data to read. It starts with index 1 and
* valid upto including rdma_end_port().
*/
const struct ib_port_immutable*
ib_port_immutable_read(struct ib_device *dev, unsigned int port)
{
WARN_ON(!rdma_is_port_valid(dev, port));
return &dev->port_data[port].immutable;
}
EXPORT_SYMBOL(ib_port_immutable_read);
void ib_get_device_fw_str(struct ib_device *dev, char *str)
{
if (dev->ops.get_dev_fw_str)
dev->ops.get_dev_fw_str(dev, str);
else
str[0] = '\0';
}
EXPORT_SYMBOL(ib_get_device_fw_str);
static void ib_policy_change_task(struct work_struct *work)
{
struct ib_device *dev;
unsigned long index;
down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
unsigned int i;
rdma_for_each_port (dev, i) {
u64 sp;
ib_get_cached_subnet_prefix(dev, i, &sp);
ib_security_cache_change(dev, i, sp);
}
}
up_read(&devices_rwsem);
}
static int ib_security_change(struct notifier_block *nb, unsigned long event,
void *lsm_data)
{
if (event != LSM_POLICY_CHANGE)
return NOTIFY_DONE;
schedule_work(&ib_policy_change_work);
ib_mad_agent_security_change();
return NOTIFY_OK;
}
static void compatdev_release(struct device *dev)
{
struct ib_core_device *cdev =
container_of(dev, struct ib_core_device, dev);
kfree(cdev);
}
static int add_one_compat_dev(struct ib_device *device,
struct rdma_dev_net *rnet)
{
struct ib_core_device *cdev;
int ret;
lockdep_assert_held(&rdma_nets_rwsem);
if (!ib_devices_shared_netns)
return 0;
/*
* Create and add compat device in all namespaces other than where it
* is currently bound to.
*/
if (net_eq(read_pnet(&rnet->net),
read_pnet(&device->coredev.rdma_net)))
return 0;
/*
* The first of init_net() or ib_register_device() to take the
* compat_devs_mutex wins and gets to add the device. Others will wait
* for completion here.
*/
mutex_lock(&device->compat_devs_mutex);
cdev = xa_load(&device->compat_devs, rnet->id);
if (cdev) {
ret = 0;
goto done;
}
ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
if (ret)
goto done;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev) {
ret = -ENOMEM;
goto cdev_err;
}
cdev->dev.parent = device->dev.parent;
rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
cdev->dev.release = compatdev_release;
ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
if (ret)
goto add_err;
ret = device_add(&cdev->dev);
if (ret)
goto add_err;
ret = ib_setup_port_attrs(cdev);
if (ret)
goto port_err;
ret = xa_err(xa_store(&device->compat_devs, rnet->id,
cdev, GFP_KERNEL));
if (ret)
goto insert_err;
mutex_unlock(&device->compat_devs_mutex);
return 0;
insert_err:
ib_free_port_attrs(cdev);
port_err:
device_del(&cdev->dev);
add_err:
put_device(&cdev->dev);
cdev_err:
xa_release(&device->compat_devs, rnet->id);
done:
mutex_unlock(&device->compat_devs_mutex);
return ret;
}
static void remove_one_compat_dev(struct ib_device *device, u32 id)
{
struct ib_core_device *cdev;
mutex_lock(&device->compat_devs_mutex);
cdev = xa_erase(&device->compat_devs, id);
mutex_unlock(&device->compat_devs_mutex);
if (cdev) {
ib_free_port_attrs(cdev);
device_del(&cdev->dev);
put_device(&cdev->dev);
}
}
static void remove_compat_devs(struct ib_device *device)
{
struct ib_core_device *cdev;
unsigned long index;
xa_for_each (&device->compat_devs, index, cdev)
remove_one_compat_dev(device, index);
}
static int add_compat_devs(struct ib_device *device)
{
struct rdma_dev_net *rnet;
unsigned long index;
int ret = 0;
lockdep_assert_held(&devices_rwsem);
down_read(&rdma_nets_rwsem);
xa_for_each (&rdma_nets, index, rnet) {
ret = add_one_compat_dev(device, rnet);
if (ret)
break;
}
up_read(&rdma_nets_rwsem);
return ret;
}
static void remove_all_compat_devs(void)
{
struct ib_compat_device *cdev;
struct ib_device *dev;
unsigned long index;
down_read(&devices_rwsem);
xa_for_each (&devices, index, dev) {
unsigned long c_index = 0;
/* Hold nets_rwsem so that any other thread modifying this
* system param can sync with this thread.
*/
down_read(&rdma_nets_rwsem);
xa_for_each (&dev->compat_devs, c_index, cdev)
remove_one_compat_dev(dev, c_index);
up_read(&rdma_nets_rwsem);
}
up_read(&devices_rwsem);
}
static int add_all_compat_devs(void)
{
struct rdma_dev_net *rnet;
struct ib_device *dev;
unsigned long index;
int ret = 0;
down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
unsigned long net_index = 0;
/* Hold nets_rwsem so that any other thread modifying this
* system param can sync with this thread.
*/
down_read(&rdma_nets_rwsem);
xa_for_each (&rdma_nets, net_index, rnet) {
ret = add_one_compat_dev(dev, rnet);
if (ret)
break;
}
up_read(&rdma_nets_rwsem);
}
up_read(&devices_rwsem);
if (ret)
remove_all_compat_devs();
return ret;
}
int rdma_compatdev_set(u8 enable)
{
struct rdma_dev_net *rnet;
unsigned long index;
int ret = 0;
down_write(&rdma_nets_rwsem);
if (ib_devices_shared_netns == enable) {
up_write(&rdma_nets_rwsem);
return 0;
}
/* enable/disable of compat devices is not supported
* when more than default init_net exists.
*/
xa_for_each (&rdma_nets, index, rnet) {
ret++;
break;
}
if (!ret)
ib_devices_shared_netns = enable;
up_write(&rdma_nets_rwsem);
if (ret)
return -EBUSY;
if (enable)
ret = add_all_compat_devs();
else
remove_all_compat_devs();
return ret;
}
static void rdma_dev_exit_net(struct net *net)
{
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
struct ib_device *dev;
unsigned long index;
int ret;
down_write(&rdma_nets_rwsem);
/*
* Prevent the ID from being re-used and hide the id from xa_for_each.
*/
ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
WARN_ON(ret);
up_write(&rdma_nets_rwsem);
down_read(&devices_rwsem);
xa_for_each (&devices, index, dev) {
get_device(&dev->dev);
/*
* Release the devices_rwsem so that pontentially blocking
* device_del, doesn't hold the devices_rwsem for too long.
*/
up_read(&devices_rwsem);
remove_one_compat_dev(dev, rnet->id);
/*
* If the real device is in the NS then move it back to init.
*/
rdma_dev_change_netns(dev, net, &init_net);
put_device(&dev->dev);
down_read(&devices_rwsem);
}
up_read(&devices_rwsem);
rdma_nl_net_exit(rnet);
xa_erase(&rdma_nets, rnet->id);
}
static __net_init int rdma_dev_init_net(struct net *net)
{
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
unsigned long index;
struct ib_device *dev;
int ret;
write_pnet(&rnet->net, net);
ret = rdma_nl_net_init(rnet);
if (ret)
return ret;
/* No need to create any compat devices in default init_net. */
if (net_eq(net, &init_net))
return 0;
ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
if (ret) {
rdma_nl_net_exit(rnet);
return ret;
}
down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
/* Hold nets_rwsem so that netlink command cannot change
* system configuration for device sharing mode.
*/
down_read(&rdma_nets_rwsem);
ret = add_one_compat_dev(dev, rnet);
up_read(&rdma_nets_rwsem);
if (ret)
break;
}
up_read(&devices_rwsem);
if (ret)
rdma_dev_exit_net(net);
return ret;
}
/*
* Assign the unique string device name and the unique device index. This is
* undone by ib_dealloc_device.
*/
static int assign_name(struct ib_device *device, const char *name)
{
static u32 last_id;
int ret;
down_write(&devices_rwsem);
/* Assign a unique name to the device */
if (strchr(name, '%'))
ret = alloc_name(device, name);
else
ret = dev_set_name(&device->dev, name);
if (ret)
goto out;
if (__ib_device_get_by_name(dev_name(&device->dev))) {
ret = -ENFILE;
goto out;
}
strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
&last_id, GFP_KERNEL);
if (ret > 0)
ret = 0;
out:
up_write(&devices_rwsem);
return ret;
}
/*
* setup_device() allocates memory and sets up data that requires calling the
* device ops, this is the only reason these actions are not done during
* ib_alloc_device. It is undone by ib_dealloc_device().
*/
static int setup_device(struct ib_device *device)
{
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
int ret;
ib_device_check_mandatory(device);
ret = setup_port_data(device);
if (ret) {
dev_warn(&device->dev, "Couldn't create per-port data\n");
return ret;
}
memset(&device->attrs, 0, sizeof(device->attrs));
ret = device->ops.query_device(device, &device->attrs, &uhw);
if (ret) {
dev_warn(&device->dev,
"Couldn't query the device attributes\n");
return ret;
}
return 0;
}
static void disable_device(struct ib_device *device)
{
u32 cid;
WARN_ON(!refcount_read(&device->refcount));
down_write(&devices_rwsem);
xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
up_write(&devices_rwsem);
/*
* Remove clients in LIFO order, see assign_client_id. This could be
* more efficient if xarray learns to reverse iterate. Since no new
* clients can be added to this ib_device past this point we only need
* the maximum possible client_id value here.
*/
down_read(&clients_rwsem);
cid = highest_client_id;
up_read(&clients_rwsem);
while (cid) {
cid--;
remove_client_context(device, cid);
}
ib_cq_pool_cleanup(device);
/* Pairs with refcount_set in enable_device */
ib_device_put(device);
wait_for_completion(&device->unreg_completion);
/*
* compat devices must be removed after device refcount drops to zero.
* Otherwise init_net() may add more compatdevs after removing compat
* devices and before device is disabled.
*/
remove_compat_devs(device);
}
/*
* An enabled device is visible to all clients and to all the public facing
* APIs that return a device pointer. This always returns with a new get, even
* if it fails.
*/
static int enable_device_and_get(struct ib_device *device)
{
struct ib_client *client;
unsigned long index;
int ret = 0;
/*
* One ref belongs to the xa and the other belongs to this
* thread. This is needed to guard against parallel unregistration.
*/
refcount_set(&device->refcount, 2);
down_write(&devices_rwsem);
xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
/*
* By using downgrade_write() we ensure that no other thread can clear
* DEVICE_REGISTERED while we are completing the client setup.
*/
downgrade_write(&devices_rwsem);
if (device->ops.enable_driver) {
ret = device->ops.enable_driver(device);
if (ret)
goto out;
}
down_read(&clients_rwsem);
xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
ret = add_client_context(device, client);
if (ret)
break;
}
up_read(&clients_rwsem);
if (!ret)
ret = add_compat_devs(device);
out:
up_read(&devices_rwsem);
return ret;
}
static void prevent_dealloc_device(struct ib_device *ib_dev)
{
}
static void ib_device_notify_register(struct ib_device *device)
{
struct net_device *netdev;
u32 port;
int ret;
down_read(&devices_rwsem);
/* Mark for userspace that device is ready */
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
if (ret)
goto out;
rdma_for_each_port(device, port) {
netdev = ib_device_get_netdev(device, port);
if (!netdev)
continue;
ret = rdma_nl_notify_event(device, port,
RDMA_NETDEV_ATTACH_EVENT);
dev_put(netdev);
if (ret)
goto out;
}
out:
up_read(&devices_rwsem);
}
/**
* ib_register_device - Register an IB device with IB core
* @device: Device to register
* @name: unique string device name. This may include a '%' which will
* cause a unique index to be added to the passed device name.
* @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
* device will be used. In this case the caller should fully
* setup the ibdev for DMA. This usually means using dma_virt_ops.
*
* Low-level drivers use ib_register_device() to register their
* devices with the IB core. All registered clients will receive a
* callback for each device that is added. @device must be allocated
* with ib_alloc_device().
*
* If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
* asynchronously then the device pointer may become freed as soon as this
* function returns.
*/
int ib_register_device(struct ib_device *device, const char *name,
struct device *dma_device)
{
int ret;
ret = assign_name(device, name);
if (ret)
return ret;
/*
* If the caller does not provide a DMA capable device then the IB core
* will set up ib_sge and scatterlist structures that stash the kernel
* virtual address into the address field.
*/
WARN_ON(dma_device && !dma_device->dma_parms);
device->dma_device = dma_device;
ret = setup_device(device);
if (ret)
return ret;
ret = ib_cache_setup_one(device);
if (ret) {
dev_warn(&device->dev,
"Couldn't set up InfiniBand P_Key/GID cache\n");
return ret;
}
device->groups[0] = &ib_dev_attr_group;
device->groups[1] = device->ops.device_group;
ret = ib_setup_device_attrs(device);
if (ret)
goto cache_cleanup;
ib_device_register_rdmacg(device);
rdma_counter_init(device);
/*
* Ensure that ADD uevent is not fired because it
* is too early amd device is not initialized yet.
*/
dev_set_uevent_suppress(&device->dev, true);
ret = device_add(&device->dev);
if (ret)
goto cg_cleanup;
ret = ib_setup_port_attrs(&device->coredev);
if (ret) {
dev_warn(&device->dev,
"Couldn't register device with driver model\n");
goto dev_cleanup;
}
ret = enable_device_and_get(device);
if (ret) {
void (*dealloc_fn)(struct ib_device *);
/*
* If we hit this error flow then we don't want to
* automatically dealloc the device since the caller is
* expected to call ib_dealloc_device() after
* ib_register_device() fails. This is tricky due to the
* possibility for a parallel unregistration along with this
* error flow. Since we have a refcount here we know any
* parallel flow is stopped in disable_device and will see the
* special dealloc_driver pointer, causing the responsibility to
* ib_dealloc_device() to revert back to this thread.
*/
dealloc_fn = device->ops.dealloc_driver;
device->ops.dealloc_driver = prevent_dealloc_device;
ib_device_put(device);
__ib_unregister_device(device);
device->ops.dealloc_driver = dealloc_fn;
dev_set_uevent_suppress(&device->dev, false);
return ret;
}
dev_set_uevent_suppress(&device->dev, false);
ib_device_notify_register(device);
ib_device_put(device);
return 0;
dev_cleanup:
device_del(&device->dev);
cg_cleanup:
dev_set_uevent_suppress(&device->dev, false);
ib_device_unregister_rdmacg(device);
cache_cleanup:
ib_cache_cleanup_one(device);
return ret;
}
EXPORT_SYMBOL(ib_register_device);
/* Callers must hold a get on the device. */
static void __ib_unregister_device(struct ib_device *ib_dev)
{
struct ib_device *sub, *tmp;
mutex_lock(&ib_dev->subdev_lock);
list_for_each_entry_safe_reverse(sub, tmp,
&ib_dev->subdev_list_head,
subdev_list) {
list_del(&sub->subdev_list);
ib_dev->ops.del_sub_dev(sub);
ib_device_put(ib_dev);
}
mutex_unlock(&ib_dev->subdev_lock);
/*
* We have a registration lock so that all the calls to unregister are
* fully fenced, once any unregister returns the device is truly
* unregistered even if multiple callers are unregistering it at the
* same time. This also interacts with the registration flow and
* provides sane semantics if register and unregister are racing.
*/
mutex_lock(&ib_dev->unregistration_lock);
if (!refcount_read(&ib_dev->refcount))
goto out;
disable_device(ib_dev);
rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT);
/* Expedite removing unregistered pointers from the hash table */
free_netdevs(ib_dev);
ib_free_port_attrs(&ib_dev->coredev);
device_del(&ib_dev->dev);
ib_device_unregister_rdmacg(ib_dev);
ib_cache_cleanup_one(ib_dev);
/*
* Drivers using the new flow may not call ib_dealloc_device except
* in error unwind prior to registration success.
*/
if (ib_dev->ops.dealloc_driver &&
ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
ib_dealloc_device(ib_dev);
}
out:
mutex_unlock(&ib_dev->unregistration_lock);
}
/**
* ib_unregister_device - Unregister an IB device
* @ib_dev: The device to unregister
*
* Unregister an IB device. All clients will receive a remove callback.
*
* Callers should call this routine only once, and protect against races with
* registration. Typically it should only be called as part of a remove
* callback in an implementation of driver core's struct device_driver and
* related.
*
* If ops.dealloc_driver is used then ib_dev will be freed upon return from
* this function.
*/
void ib_unregister_device(struct ib_device *ib_dev)
{
get_device(&ib_dev->dev);
__ib_unregister_device(ib_dev);
put_device(&ib_dev->dev);
}
EXPORT_SYMBOL(ib_unregister_device);
/**
* ib_unregister_device_and_put - Unregister a device while holding a 'get'
* @ib_dev: The device to unregister
*
* This is the same as ib_unregister_device(), except it includes an internal
* ib_device_put() that should match a 'get' obtained by the caller.
*
* It is safe to call this routine concurrently from multiple threads while
* holding the 'get'. When the function returns the device is fully
* unregistered.
*
* Drivers using this flow MUST use the driver_unregister callback to clean up
* their resources associated with the device and dealloc it.
*/
void ib_unregister_device_and_put(struct ib_device *ib_dev)
{
WARN_ON(!ib_dev->ops.dealloc_driver);
get_device(&ib_dev->dev);
ib_device_put(ib_dev);
__ib_unregister_device(ib_dev);
put_device(&ib_dev->dev);
}
EXPORT_SYMBOL(ib_unregister_device_and_put);
/**
* ib_unregister_driver - Unregister all IB devices for a driver
* @driver_id: The driver to unregister
*
* This implements a fence for device unregistration. It only returns once all
* devices associated with the driver_id have fully completed their
* unregistration and returned from ib_unregister_device*().
*
* If device's are not yet unregistered it goes ahead and starts unregistering
* them.
*
* This does not block creation of new devices with the given driver_id, that
* is the responsibility of the caller.
*/
void ib_unregister_driver(enum rdma_driver_id driver_id)
{
struct ib_device *ib_dev;
unsigned long index;
down_read(&devices_rwsem);
xa_for_each (&devices, index, ib_dev) {
if (ib_dev->ops.driver_id != driver_id)
continue;
get_device(&ib_dev->dev);
up_read(&devices_rwsem);
WARN_ON(!ib_dev->ops.dealloc_driver);
__ib_unregister_device(ib_dev);
put_device(&ib_dev->dev);
down_read(&devices_rwsem);
}
up_read(&devices_rwsem);
}
EXPORT_SYMBOL(ib_unregister_driver);
static void ib_unregister_work(struct work_struct *work)
{
struct ib_device *ib_dev =
container_of(work, struct ib_device, unregistration_work);
__ib_unregister_device(ib_dev);
put_device(&ib_dev->dev);
}
/**
* ib_unregister_device_queued - Unregister a device using a work queue
* @ib_dev: The device to unregister
*
* This schedules an asynchronous unregistration using a WQ for the device. A
* driver should use this to avoid holding locks while doing unregistration,
* such as holding the RTNL lock.
*
* Drivers using this API must use ib_unregister_driver before module unload
* to ensure that all scheduled unregistrations have completed.
*/
void ib_unregister_device_queued(struct ib_device *ib_dev)
{
WARN_ON(!refcount_read(&ib_dev->refcount));
WARN_ON(!ib_dev->ops.dealloc_driver);
get_device(&ib_dev->dev);
if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work))
put_device(&ib_dev->dev);
}
EXPORT_SYMBOL(ib_unregister_device_queued);
/*
* The caller must pass in a device that has the kref held and the refcount
* released. If the device is in cur_net and still registered then it is moved
* into net.
*/
static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
struct net *net)
{
int ret2 = -EINVAL;
int ret;
mutex_lock(&device->unregistration_lock);
/*
* If a device not under ib_device_get() or if the unregistration_lock
* is not held, the namespace can be changed, or it can be unregistered.
* Check again under the lock.
*/
if (refcount_read(&device->refcount) == 0 ||
!net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
ret = -ENODEV;
goto out;
}
kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
disable_device(device);
/*
* At this point no one can be using the device, so it is safe to
* change the namespace.
*/
write_pnet(&device->coredev.rdma_net, net);
down_read(&devices_rwsem);
/*
* Currently rdma devices are system wide unique. So the device name
* is guaranteed free in the new namespace. Publish the new namespace
* at the sysfs level.
*/
ret = device_rename(&device->dev, dev_name(&device->dev));
up_read(&devices_rwsem);
if (ret) {
dev_warn(&device->dev,
"%s: Couldn't rename device after namespace change\n",
__func__);
/* Try and put things back and re-enable the device */
write_pnet(&device->coredev.rdma_net, cur_net);
}
ret2 = enable_device_and_get(device);
if (ret2) {
/*
* This shouldn't really happen, but if it does, let the user
* retry at later point. So don't disable the device.
*/
dev_warn(&device->dev,
"%s: Couldn't re-enable device after namespace change\n",
__func__);
}
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ib_device_put(device);
out:
mutex_unlock(&device->unregistration_lock);
if (ret)
return ret;
return ret2;
}
int ib_device_set_netns_put(struct sk_buff *skb,
struct ib_device *dev, u32 ns_fd)
{
struct net *net;
int ret;
net = get_net_ns_by_fd(ns_fd);
if (IS_ERR(net)) {
ret = PTR_ERR(net);
goto net_err;
}
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
goto ns_err;
}
/*
* All the ib_clients, including uverbs, are reset when the namespace is
* changed and this cannot be blocked waiting for userspace to do
* something, so disassociation is mandatory.
*/
if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) {
ret = -EOPNOTSUPP;
goto ns_err;
}
get_device(&dev->dev);
ib_device_put(dev);
ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
put_device(&dev->dev);
put_net(net);
return ret;
ns_err:
put_net(net);
net_err:
ib_device_put(dev);
return ret;
}
static struct pernet_operations rdma_dev_net_ops = {
.init = rdma_dev_init_net,
.exit = rdma_dev_exit_net,
.id = &rdma_dev_net_id,
.size = sizeof(struct rdma_dev_net),
};
static int assign_client_id(struct ib_client *client)
{
int ret;
lockdep_assert_held(&clients_rwsem);
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
* registration order.
*/
client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
return ret;
highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
return 0;
}
static void remove_client_id(struct ib_client *client)
{
down_write(&clients_rwsem);
xa_erase(&clients, client->client_id);
for (; highest_client_id; highest_client_id--)
if (xa_load(&clients, highest_client_id - 1))
break;
up_write(&clients_rwsem);
}
/**
* ib_register_client - Register an IB client
* @client:Client to register
*
* Upper level users of the IB drivers can use ib_register_client() to
* register callbacks for IB device addition and removal. When an IB
* device is added, each registered client's add method will be called
* (in the order the clients were registered), and when a device is
* removed, each client's remove method will be called (in the reverse
* order that clients were registered). In addition, when
* ib_register_client() is called, the client will receive an add
* callback for all devices already registered.
*/
int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
unsigned long index;
bool need_unreg = false;
int ret;
refcount_set(&client->uses, 1);
init_completion(&client->uses_zero);
/*
* The devices_rwsem is held in write mode to ensure that a racing
* ib_register_device() sees a consisent view of clients and devices.
*/
down_write(&devices_rwsem);
down_write(&clients_rwsem);
ret = assign_client_id(client);
if (ret)
goto out;
need_unreg = true;
xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
ret = add_client_context(device, client);
if (ret)
goto out;
}
ret = 0;
out:
up_write(&clients_rwsem);
up_write(&devices_rwsem);
if (need_unreg && ret)
ib_unregister_client(client);
return ret;
}
EXPORT_SYMBOL(ib_register_client);
/**
* ib_unregister_client - Unregister an IB client
* @client:Client to unregister
*
* Upper level users use ib_unregister_client() to remove their client
* registration. When ib_unregister_client() is called, the client
* will receive a remove callback for each IB device still registered.
*
* This is a full fence, once it returns no client callbacks will be called,
* or are running in another thread.
*/
void ib_unregister_client(struct ib_client *client)
{
struct ib_device *device;
unsigned long index;
down_write(&clients_rwsem);
ib_client_put(client);
xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
up_write(&clients_rwsem);
/* We do not want to have locks while calling client->remove() */
rcu_read_lock();
xa_for_each (&devices, index, device) {
if (!ib_device_try_get(device))
continue;
rcu_read_unlock();
remove_client_context(device, client->client_id);
ib_device_put(device);
rcu_read_lock();
}
rcu_read_unlock();
/*
* remove_client_context() is not a fence, it can return even though a
* removal is ongoing. Wait until all removals are completed.
*/
wait_for_completion(&client->uses_zero);
remove_client_id(client);
}
EXPORT_SYMBOL(ib_unregister_client);
static int __ib_get_global_client_nl_info(const char *client_name,
struct ib_client_nl_info *res)
{
struct ib_client *client;
unsigned long index;
int ret = -ENOENT;
down_read(&clients_rwsem);
xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
if (strcmp(client->name, client_name) != 0)
continue;
if (!client->get_global_nl_info) {
ret = -EOPNOTSUPP;
break;
}
ret = client->get_global_nl_info(res);
if (WARN_ON(ret == -ENOENT))
ret = -EINVAL;
if (!ret && res->cdev)
get_device(res->cdev);
break;
}
up_read(&clients_rwsem);
return ret;
}
static int __ib_get_client_nl_info(struct ib_device *ibdev,
const char *client_name,
struct ib_client_nl_info *res)
{
unsigned long index;
void *client_data;
int ret = -ENOENT;
down_read(&ibdev->client_data_rwsem);
xan_for_each_marked (&ibdev->client_data, index, client_data,
CLIENT_DATA_REGISTERED) {
struct ib_client *client = xa_load(&clients, index);
if (!client || strcmp(client->name, client_name) != 0)
continue;
if (!client->get_nl_info) {
ret = -EOPNOTSUPP;
break;
}
ret = client->get_nl_info(ibdev, client_data, res);
if (WARN_ON(ret == -ENOENT))
ret = -EINVAL;
/*
* The cdev is guaranteed valid as long as we are inside the
* client_data_rwsem as remove_one can't be called. Keep it
* valid for the caller.
*/
if (!ret && res->cdev)
get_device(res->cdev);
break;
}
up_read(&ibdev->client_data_rwsem);
return ret;
}
/**
* ib_get_client_nl_info - Fetch the nl_info from a client
* @ibdev: IB device
* @client_name: Name of the client
* @res: Result of the query
*/
int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
struct ib_client_nl_info *res)
{
int ret;
if (ibdev)
ret = __ib_get_client_nl_info(ibdev, client_name, res);
else
ret = __ib_get_global_client_nl_info(client_name, res);
#ifdef CONFIG_MODULES
if (ret == -ENOENT) {
request_module("rdma-client-%s", client_name);
if (ibdev)
ret = __ib_get_client_nl_info(ibdev, client_name, res);
else
ret = __ib_get_global_client_nl_info(client_name, res);
}
#endif
if (ret) {
if (ret == -ENOENT)
return -EOPNOTSUPP;
return ret;
}
if (WARN_ON(!res->cdev))
return -EINVAL;
return 0;
}
/**
* ib_set_client_data - Set IB client context
* @device:Device to set context for
* @client:Client to set context for
* @data:Context to set
*
* ib_set_client_data() sets client context data that can be retrieved with
* ib_get_client_data(). This can only be called while the client is
* registered to the device, once the ib_client remove() callback returns this
* cannot be called.
*/
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data)
{
void *rc;
if (WARN_ON(IS_ERR(data)))
data = NULL;
rc = xa_store(&device->client_data, client->client_id, data,
GFP_KERNEL);
WARN_ON(xa_is_err(rc));
}
EXPORT_SYMBOL(ib_set_client_data);
/**
* ib_register_event_handler - Register an IB event handler
* @event_handler:Handler to register
*
* ib_register_event_handler() registers an event handler that will be
* called back when asynchronous IB events occur (as defined in
* chapter 11 of the InfiniBand Architecture Specification). This
* callback occurs in workqueue context.
*/
void ib_register_event_handler(struct ib_event_handler *event_handler)
{
down_write(&event_handler->device->event_handler_rwsem);
list_add_tail(&event_handler->list,
&event_handler->device->event_handler_list);
up_write(&event_handler->device->event_handler_rwsem);
}
EXPORT_SYMBOL(ib_register_event_handler);
/**
* ib_unregister_event_handler - Unregister an event handler
* @event_handler:Handler to unregister
*
* Unregister an event handler registered with
* ib_register_event_handler().
*/
void ib_unregister_event_handler(struct ib_event_handler *event_handler)
{
down_write(&event_handler->device->event_handler_rwsem);
list_del(&event_handler->list);
up_write(&event_handler->device->event_handler_rwsem);
}
EXPORT_SYMBOL(ib_unregister_event_handler);
void ib_dispatch_event_clients(struct ib_event *event)
{
struct ib_event_handler *handler;
down_read(&event->device->event_handler_rwsem);
list_for_each_entry(handler, &event->device->event_handler_list, list)
handler->handler(handler, event);
up_read(&event->device->event_handler_rwsem);
}
static int iw_query_port(struct ib_device *device,
u32 port_num,
struct ib_port_attr *port_attr)
{
struct in_device *inetdev;
struct net_device *netdev;
memset(port_attr, 0, sizeof(*port_attr));
netdev = ib_device_get_netdev(device, port_num);
if (!netdev)
return -ENODEV;
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev)) {
port_attr->state = IB_PORT_DOWN;
port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} else {
rcu_read_lock();
inetdev = __in_dev_get_rcu(netdev);
if (inetdev && inetdev->ifa_list) {
port_attr->state = IB_PORT_ACTIVE;
port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
port_attr->state = IB_PORT_INIT;
port_attr->phys_state =
IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
}
rcu_read_unlock();
}
dev_put(netdev);
return device->ops.query_port(device, port_num, port_attr);
}
static int __ib_query_port(struct ib_device *device,
u32 port_num,
struct ib_port_attr *port_attr)
{
int err;
memset(port_attr, 0, sizeof(*port_attr));
err = device->ops.query_port(device, port_num, port_attr);
if (err || port_attr->subnet_prefix)
return err;
if (rdma_port_get_link_layer(device, port_num) !=
IB_LINK_LAYER_INFINIBAND)
return 0;
ib_get_cached_subnet_prefix(device, port_num,
&port_attr->subnet_prefix);
return 0;
}
/**
* ib_query_port - Query IB port attributes
* @device:Device to query
* @port_num:Port number to query
* @port_attr:Port attributes
*
* ib_query_port() returns the attributes of a port through the
* @port_attr pointer.
*/
int ib_query_port(struct ib_device *device,
u32 port_num,
struct ib_port_attr *port_attr)
{
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
if (rdma_protocol_iwarp(device, port_num))
return iw_query_port(device, port_num, port_attr);
else
return __ib_query_port(device, port_num, port_attr);
}
EXPORT_SYMBOL(ib_query_port);
static void add_ndev_hash(struct ib_port_data *pdata)
{
unsigned long flags;
might_sleep();
spin_lock_irqsave(&ndev_hash_lock, flags);
if (hash_hashed(&pdata->ndev_hash_link)) {
hash_del_rcu(&pdata->ndev_hash_link);
spin_unlock_irqrestore(&ndev_hash_lock, flags);
/*
* We cannot do hash_add_rcu after a hash_del_rcu until the
* grace period
*/
synchronize_rcu();
spin_lock_irqsave(&ndev_hash_lock, flags);
}
if (pdata->netdev)
hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
(uintptr_t)pdata->netdev);
spin_unlock_irqrestore(&ndev_hash_lock, flags);
}
/**
* ib_device_set_netdev - Associate the ib_dev with an underlying net_device
* @ib_dev: Device to modify
* @ndev: net_device to affiliate, may be NULL
* @port: IB port the net_device is connected to
*
* Drivers should use this to link the ib_device to a netdev so the netdev
* shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
* affiliated with any port.
*
* The caller must ensure that the given ndev is not unregistered or
* unregistering, and that either the ib_device is unregistered or
* ib_device_set_netdev() is called with NULL when the ndev sends a
* NETDEV_UNREGISTER event.
*/
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
u32 port)
{
enum rdma_nl_notify_event_type etype;
struct net_device *old_ndev;
struct ib_port_data *pdata;
unsigned long flags;
int ret;
if (!rdma_is_port_valid(ib_dev, port))
return -EINVAL;
/*
* Drivers wish to call this before ib_register_driver, so we have to
* setup the port data early.
*/
ret = alloc_port_data(ib_dev);
if (ret)
return ret;
pdata = &ib_dev->port_data[port];
spin_lock_irqsave(&pdata->netdev_lock, flags);
old_ndev = rcu_dereference_protected(
pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
if (old_ndev == ndev) {
spin_unlock_irqrestore(&pdata->netdev_lock, flags);
return 0;
}
rcu_assign_pointer(pdata->netdev, ndev);
netdev_put(old_ndev, &pdata->netdev_tracker);
netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
spin_unlock_irqrestore(&pdata->netdev_lock, flags);
add_ndev_hash(pdata);
/* Make sure that the device is registered before we send events */
if (xa_load(&devices, ib_dev->index) != ib_dev)
return 0;
etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT;
rdma_nl_notify_event(ib_dev, port, etype);
return 0;
}
EXPORT_SYMBOL(ib_device_set_netdev);
static void free_netdevs(struct ib_device *ib_dev)
{
unsigned long flags;
u32 port;
if (!ib_dev->port_data)
return;
rdma_for_each_port (ib_dev, port) {
struct ib_port_data *pdata = &ib_dev->port_data[port];
struct net_device *ndev;
spin_lock_irqsave(&pdata->netdev_lock, flags);
ndev = rcu_dereference_protected(
pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
if (ndev) {
spin_lock(&ndev_hash_lock);
hash_del_rcu(&pdata->ndev_hash_link);
spin_unlock(&ndev_hash_lock);
/*
* If this is the last dev_put there is still a
* synchronize_rcu before the netdev is kfreed, so we
* can continue to rely on unlocked pointer
* comparisons after the put
*/
rcu_assign_pointer(pdata->netdev, NULL);
netdev_put(ndev, &pdata->netdev_tracker);
}
spin_unlock_irqrestore(&pdata->netdev_lock, flags);
}
}
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
u32 port)
{
struct ib_port_data *pdata;
struct net_device *res;
if (!rdma_is_port_valid(ib_dev, port))
return NULL;
if (!ib_dev->port_data)
return NULL;
pdata = &ib_dev->port_data[port];
/*
* New drivers should use ib_device_set_netdev() not the legacy
* get_netdev().
*/
if (ib_dev->ops.get_netdev)
res = ib_dev->ops.get_netdev(ib_dev, port);
else {
spin_lock(&pdata->netdev_lock);
res = rcu_dereference_protected(
pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
dev_hold(res);
spin_unlock(&pdata->netdev_lock);
}
return res;
}
EXPORT_SYMBOL(ib_device_get_netdev);
/**
* ib_query_netdev_port - Query the port number of a net_device
* associated with an ibdev
* @ibdev: IB device
* @ndev: Network device
* @port: IB port the net_device is connected to
*/
int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
u32 *port)
{
struct net_device *ib_ndev;
u32 port_num;
rdma_for_each_port(ibdev, port_num) {
ib_ndev = ib_device_get_netdev(ibdev, port_num);
if (ndev == ib_ndev) {
*port = port_num;
dev_put(ib_ndev);
return 0;
}
dev_put(ib_ndev);
}
return -ENOENT;
}
EXPORT_SYMBOL(ib_query_netdev_port);
/**
* ib_device_get_by_netdev - Find an IB device associated with a netdev
* @ndev: netdev to locate
* @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
*
* Find and hold an ib_device that is associated with a netdev via
* ib_device_set_netdev(). The caller must call ib_device_put() on the
* returned pointer.
*/
struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
enum rdma_driver_id driver_id)
{
struct ib_device *res = NULL;
struct ib_port_data *cur;
rcu_read_lock();
hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
(uintptr_t)ndev) {
if (rcu_access_pointer(cur->netdev) == ndev &&
(driver_id == RDMA_DRIVER_UNKNOWN ||
cur->ib_dev->ops.driver_id == driver_id) &&
ib_device_try_get(cur->ib_dev)) {
res = cur->ib_dev;
break;
}
}
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL(ib_device_get_by_netdev);
/**
* ib_enum_roce_netdev - enumerate all RoCE ports
* @ib_dev : IB device we want to query
* @filter: Should we call the callback?
* @filter_cookie: Cookie passed to filter
* @cb: Callback to call for each found RoCE ports
* @cookie: Cookie passed back to the callback
*
* Enumerates all of the physical RoCE ports of ib_dev
* which are related to netdevice and calls callback() on each
* device for which filter() function returns non zero.
*/
void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_filter filter,
void *filter_cookie,
roce_netdev_callback cb,
void *cookie)
{
u32 port;
rdma_for_each_port (ib_dev, port)
if (rdma_protocol_roce(ib_dev, port)) {
struct net_device *idev =
ib_device_get_netdev(ib_dev, port);
if (filter(ib_dev, port, idev, filter_cookie))
cb(ib_dev, port, idev, cookie);
dev_put(idev);
}
}
/**
* ib_enum_all_roce_netdevs - enumerate all RoCE devices
* @filter: Should we call the callback?
* @filter_cookie: Cookie passed to filter
* @cb: Callback to call for each found RoCE ports
* @cookie: Cookie passed back to the callback
*
* Enumerates all RoCE devices' physical ports which are related
* to netdevices and calls callback() on each device for which
* filter() function returns non zero.
*/
void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
void *filter_cookie,
roce_netdev_callback cb,
void *cookie)
{
struct ib_device *dev;
unsigned long index;
down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
up_read(&devices_rwsem);
}
/*
* ib_enum_all_devs - enumerate all ib_devices
* @cb: Callback to call for each found ib_device
*
* Enumerates all ib_devices and calls callback() on each device.
*/
int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
struct netlink_callback *cb)
{
unsigned long index;
struct ib_device *dev;
unsigned int idx = 0;
int ret = 0;
down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
continue;
ret = nldev_cb(dev, skb, cb, idx);
if (ret)
break;
idx++;
}
up_read(&devices_rwsem);
return ret;
}
/**
* ib_query_pkey - Get P_Key table entry
* @device:Device to query
* @port_num:Port number to query
* @index:P_Key table index to query
* @pkey:Returned P_Key
*
* ib_query_pkey() fetches the specified P_Key table entry.
*/
int ib_query_pkey(struct ib_device *device,
u32 port_num, u16 index, u16 *pkey)
{
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
if (!device->ops.query_pkey)
return -EOPNOTSUPP;
return device->ops.query_pkey(device, port_num, index, pkey);
}
EXPORT_SYMBOL(ib_query_pkey);
/**
* ib_modify_device - Change IB device attributes
* @device:Device to modify
* @device_modify_mask:Mask of attributes to change
* @device_modify:New attribute values
*
* ib_modify_device() changes a device's attributes as specified by
* the @device_modify_mask and @device_modify structure.
*/
int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify)
{
if (!device->ops.modify_device)
return -EOPNOTSUPP;
return device->ops.modify_device(device, device_modify_mask,
device_modify);
}
EXPORT_SYMBOL(ib_modify_device);
/**
* ib_modify_port - Modifies the attributes for the specified port.
* @device: The device to modify.
* @port_num: The number of the port to modify.
* @port_modify_mask: Mask used to specify which attributes of the port
* to change.
* @port_modify: New attribute values for the port.
*
* ib_modify_port() changes a port's attributes as specified by the
* @port_modify_mask and @port_modify structure.
*/
int ib_modify_port(struct ib_device *device,
u32 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
int rc;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
if (device->ops.modify_port)
rc = device->ops.modify_port(device, port_num,
port_modify_mask,
port_modify);
else if (rdma_protocol_roce(device, port_num) &&
((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
(port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
rc = 0;
else
rc = -EOPNOTSUPP;
return rc;
}
EXPORT_SYMBOL(ib_modify_port);
/**
* ib_find_gid - Returns the port number and GID table index where
* a specified GID value occurs. Its searches only for IB link layer.
* @device: The device to query.
* @gid: The GID value to search for.
* @port_num: The port number of the device where the GID value was found.
* @index: The index into the GID table where the GID was found. This
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
u32 *port_num, u16 *index)
{
union ib_gid tmp_gid;
u32 port;
int ret, i;
rdma_for_each_port (device, port) {
if (!rdma_protocol_ib(device, port))
continue;
for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
++i) {
ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
continue;
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
*port_num = port;
if (index)
*index = i;
return 0;
}
}
}
return -ENOENT;
}
EXPORT_SYMBOL(ib_find_gid);
/**
* ib_find_pkey - Returns the PKey table index where a specified
* PKey value occurs.
* @device: The device to query.
* @port_num: The port number of the device to search for the PKey.
* @pkey: The PKey value to search for.
* @index: The index into the PKey table where the PKey was found.
*/
int ib_find_pkey(struct ib_device *device,
u32 port_num, u16 pkey, u16 *index)
{
int ret, i;
u16 tmp_pkey;
int partial_ix = -1;
for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
++i) {
ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
if (ret)
return ret;
if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
/* if there is full-member pkey take it.*/
if (tmp_pkey & 0x8000) {
*index = i;
return 0;
}
if (partial_ix < 0)
partial_ix = i;
}
}
/*no full-member, if exists take the limited*/
if (partial_ix >= 0) {
*index = partial_ix;
return 0;
}
return -ENOENT;
}
EXPORT_SYMBOL(ib_find_pkey);
/**
* ib_get_net_dev_by_params() - Return the appropriate net_dev
* for a received CM request
* @dev: An RDMA device on which the request has been received.
* @port: Port number on the RDMA device.
* @pkey: The Pkey the request came on.
* @gid: A GID that the net_dev uses to communicate.
* @addr: Contains the IP address that the request specified as its
* destination.
*
*/
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
u32 port,
u16 pkey,
const union ib_gid *gid,
const struct sockaddr *addr)
{
struct net_device *net_dev = NULL;
unsigned long index;
void *client_data;
if (!rdma_protocol_ib(dev, port))
return NULL;
/*
* Holding the read side guarantees that the client will not become
* unregistered while we are calling get_net_dev_by_params()
*/
down_read(&dev->client_data_rwsem);
xan_for_each_marked (&dev->client_data, index, client_data,
CLIENT_DATA_REGISTERED) {
struct ib_client *client = xa_load(&clients, index);
if (!client || !client->get_net_dev_by_params)
continue;
net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
addr, client_data);
if (net_dev)
break;
}
up_read(&dev->client_data_rwsem);
return net_dev;
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
{
struct ib_device_ops *dev_ops = &dev->ops;
#define SET_DEVICE_OP(ptr, name) \
do { \
if (ops->name) \
if (!((ptr)->name)) \
(ptr)->name = ops->name; \
} while (0)
#define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
if (ops->driver_id != RDMA_DRIVER_UNKNOWN) {
WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN &&
dev_ops->driver_id != ops->driver_id);
dev_ops->driver_id = ops->driver_id;
}
if (ops->owner) {
WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner);
dev_ops->owner = ops->owner;
}
if (ops->uverbs_abi_ver)
dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver;
dev_ops->uverbs_no_driver_id_binding |=
ops->uverbs_no_driver_id_binding;
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, add_sub_dev);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
SET_DEVICE_OP(dev_ops, alloc_dmah);
SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
SET_DEVICE_OP(dev_ops, alloc_mw);
SET_DEVICE_OP(dev_ops, alloc_pd);
SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
SET_DEVICE_OP(dev_ops, alloc_ucontext);
SET_DEVICE_OP(dev_ops, alloc_xrcd);
SET_DEVICE_OP(dev_ops, attach_mcast);
SET_DEVICE_OP(dev_ops, check_mr_status);
SET_DEVICE_OP(dev_ops, counter_alloc_stats);
SET_DEVICE_OP(dev_ops, counter_bind_qp);
SET_DEVICE_OP(dev_ops, counter_dealloc);
SET_DEVICE_OP(dev_ops, counter_init);
SET_DEVICE_OP(dev_ops, counter_unbind_qp);
SET_DEVICE_OP(dev_ops, counter_update_stats);
SET_DEVICE_OP(dev_ops, create_ah);
SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_cq_umem);
SET_DEVICE_OP(dev_ops, create_flow);
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
SET_DEVICE_OP(dev_ops, create_user_ah);
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
SET_DEVICE_OP(dev_ops, dealloc_dmah);
SET_DEVICE_OP(dev_ops, dealloc_driver);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
SET_DEVICE_OP(dev_ops, dealloc_xrcd);
SET_DEVICE_OP(dev_ops, del_gid);
SET_DEVICE_OP(dev_ops, del_sub_dev);
SET_DEVICE_OP(dev_ops, dereg_mr);
SET_DEVICE_OP(dev_ops, destroy_ah);
SET_DEVICE_OP(dev_ops, destroy_counters);
SET_DEVICE_OP(dev_ops, destroy_cq);
SET_DEVICE_OP(dev_ops, destroy_flow);
SET_DEVICE_OP(dev_ops, destroy_flow_action);
SET_DEVICE_OP(dev_ops, destroy_qp);
SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
SET_DEVICE_OP(dev_ops, destroy_srq);
SET_DEVICE_OP(dev_ops, destroy_wq);
SET_DEVICE_OP(dev_ops, device_group);
SET_DEVICE_OP(dev_ops, detach_mcast);
SET_DEVICE_OP(dev_ops, disassociate_ucontext);
SET_DEVICE_OP(dev_ops, drain_rq);
SET_DEVICE_OP(dev_ops, drain_sq);
SET_DEVICE_OP(dev_ops, enable_driver);
SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry);
SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw);
SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw);
SET_DEVICE_OP(dev_ops, fill_res_qp_entry);
SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw);
SET_DEVICE_OP(dev_ops, fill_res_srq_entry);
SET_DEVICE_OP(dev_ops, fill_res_srq_entry_raw);
SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
SET_DEVICE_OP(dev_ops, get_link_layer);
SET_DEVICE_OP(dev_ops, get_netdev);
SET_DEVICE_OP(dev_ops, get_numa_node);
SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
SET_DEVICE_OP(dev_ops, get_vf_guid);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, iw_accept);
SET_DEVICE_OP(dev_ops, iw_add_ref);
SET_DEVICE_OP(dev_ops, iw_connect);
SET_DEVICE_OP(dev_ops, iw_create_listen);
SET_DEVICE_OP(dev_ops, iw_destroy_listen);
SET_DEVICE_OP(dev_ops, iw_get_qp);
SET_DEVICE_OP(dev_ops, iw_reject);
SET_DEVICE_OP(dev_ops, iw_rem_ref);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
SET_DEVICE_OP(dev_ops, mmap);
SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
SET_DEVICE_OP(dev_ops, modify_srq);
SET_DEVICE_OP(dev_ops, modify_wq);
SET_DEVICE_OP(dev_ops, peek_cq);
SET_DEVICE_OP(dev_ops, pre_destroy_cq);
SET_DEVICE_OP(dev_ops, poll_cq);
SET_DEVICE_OP(dev_ops, port_groups);
SET_DEVICE_OP(dev_ops, post_destroy_cq);
SET_DEVICE_OP(dev_ops, post_recv);
SET_DEVICE_OP(dev_ops, post_send);
SET_DEVICE_OP(dev_ops, post_srq_recv);
SET_DEVICE_OP(dev_ops, process_mad);
SET_DEVICE_OP(dev_ops, query_ah);
SET_DEVICE_OP(dev_ops, query_device);
SET_DEVICE_OP(dev_ops, query_gid);
SET_DEVICE_OP(dev_ops, query_pkey);
SET_DEVICE_OP(dev_ops, query_port);
SET_DEVICE_OP(dev_ops, query_qp);
SET_DEVICE_OP(dev_ops, query_srq);
SET_DEVICE_OP(dev_ops, query_ucontext);
SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
SET_DEVICE_OP(dev_ops, read_counters);
SET_DEVICE_OP(dev_ops, reg_dm_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
SET_DEVICE_OP(dev_ops, req_notify_cq);
SET_DEVICE_OP(dev_ops, rereg_user_mr);
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
SET_DEVICE_OP(dev_ops, report_port_event);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
SET_OBJ_SIZE(dev_ops, ib_cq);
SET_OBJ_SIZE(dev_ops, ib_dmah);
SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_qp);
SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
SET_OBJ_SIZE(dev_ops, ib_xrcd);
SET_OBJ_SIZE(dev_ops, rdma_counter);
}
EXPORT_SYMBOL(ib_set_device_ops);
int ib_add_sub_device(struct ib_device *parent,
enum rdma_nl_dev_type type,
const char *name)
{
struct ib_device *sub;
int ret = 0;
if (!parent->ops.add_sub_dev || !parent->ops.del_sub_dev)
return -EOPNOTSUPP;
if (!ib_device_try_get(parent))
return -EINVAL;
sub = parent->ops.add_sub_dev(parent, type, name);
if (IS_ERR(sub)) {
ib_device_put(parent);
return PTR_ERR(sub);
}
sub->type = type;
sub->parent = parent;
mutex_lock(&parent->subdev_lock);
list_add_tail(&parent->subdev_list_head, &sub->subdev_list);
mutex_unlock(&parent->subdev_lock);
return ret;
}
EXPORT_SYMBOL(ib_add_sub_device);
int ib_del_sub_device_and_put(struct ib_device *sub)
{
struct ib_device *parent = sub->parent;
if (!parent)
return -EOPNOTSUPP;
mutex_lock(&parent->subdev_lock);
list_del(&sub->subdev_list);
mutex_unlock(&parent->subdev_lock);
ib_device_put(sub);
parent->ops.del_sub_dev(sub);
ib_device_put(parent);
return 0;
}
EXPORT_SYMBOL(ib_del_sub_device_and_put);
#ifdef CONFIG_INFINIBAND_VIRT_DMA
int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
sg_dma_address(s) = (uintptr_t)sg_virt(s);
sg_dma_len(s) = s->length;
}
return nents;
}
EXPORT_SYMBOL(ib_dma_virt_map_sg);
#endif /* CONFIG_INFINIBAND_VIRT_DMA */
static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NL_LS_OP_SET_TIMEOUT] = {
.doit = ib_nl_handle_set_timeout,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NL_LS_OP_IP_RESOLVE] = {
.doit = ib_nl_handle_ip_res_resp,
.flags = RDMA_NL_ADMIN_PERM,
},
};
void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
{
enum ib_port_state curr_state;
struct ib_event ibevent = {};
u32 port;
if (ib_query_netdev_port(ibdev, ndev, &port))
return;
curr_state = ib_get_curr_port_state(ndev);
write_lock_irq(&ibdev->cache_lock);
if (ibdev->port_data[port].cache.last_port_state == curr_state) {
write_unlock_irq(&ibdev->cache_lock);
return;
}
ibdev->port_data[port].cache.last_port_state = curr_state;
write_unlock_irq(&ibdev->cache_lock);
ibevent.event = (curr_state == IB_PORT_DOWN) ?
IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
ibevent.device = ibdev;
ibevent.element.port_num = port;
ib_dispatch_event(&ibevent);
}
EXPORT_SYMBOL(ib_dispatch_port_state_event);
static void handle_port_event(struct net_device *ndev, unsigned long event)
{
struct ib_device *ibdev;
/* Currently, link events in bonding scenarios are still
* reported by drivers that support bonding.
*/
if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
return;
ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
if (!ibdev)
return;
if (ibdev->ops.report_port_event) {
ibdev->ops.report_port_event(ibdev, ndev, event);
goto put_ibdev;
}
ib_dispatch_port_state_event(ibdev, ndev);
put_ibdev:
ib_device_put(ibdev);
};
static int ib_netdevice_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct ib_device *ibdev;
u32 port;
switch (event) {
case NETDEV_CHANGENAME:
ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
if (!ibdev)
return NOTIFY_DONE;
if (ib_query_netdev_port(ibdev, ndev, &port)) {
ib_device_put(ibdev);
break;
}
rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
ib_device_put(ibdev);
break;
case NETDEV_UP:
case NETDEV_CHANGE:
case NETDEV_DOWN:
handle_port_event(ndev, event); break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block nb_netdevice = {
.notifier_call = ib_netdevice_event,
};
static int __init ib_core_init(void)
{
int ret = -ENOMEM;
ib_wq = alloc_workqueue("infiniband", 0, 0);
if (!ib_wq)
return -ENOMEM;
ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
if (!ib_unreg_wq)
goto err;
ib_comp_wq = alloc_workqueue("ib-comp-wq",
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!ib_comp_wq)
goto err_unbound;
ib_comp_unbound_wq =
alloc_workqueue("ib-comp-unb-wq",
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
if (!ib_comp_unbound_wq)
goto err_comp;
ret = class_register(&ib_class);
if (ret) {
pr_warn("Couldn't create InfiniBand device class\n");
goto err_comp_unbound;
}
rdma_nl_init();
ret = addr_init();
if (ret) {
pr_warn("Couldn't init IB address resolution\n");
goto err_ibnl;
}
ret = ib_mad_init();
if (ret) {
pr_warn("Couldn't init IB MAD\n");
goto err_addr;
}
ret = ib_sa_init();
if (ret) {
pr_warn("Couldn't init SA\n");
goto err_mad;
}
ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
if (ret) {
pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
goto err_sa;
}
ret = register_pernet_device(&rdma_dev_net_ops);
if (ret) {
pr_warn("Couldn't init compat dev. ret %d\n", ret);
goto err_compat;
}
nldev_init();
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
ret = roce_gid_mgmt_init();
if (ret) {
pr_warn("Couldn't init RoCE GID management\n");
goto err_parent;
}
register_netdevice_notifier(&nb_netdevice);
return 0;
err_parent:
rdma_nl_unregister(RDMA_NL_LS);
nldev_exit();
unregister_pernet_device(&rdma_dev_net_ops);
err_compat:
unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
err_sa:
ib_sa_cleanup();
err_mad:
ib_mad_cleanup();
err_addr:
addr_cleanup();
err_ibnl:
class_unregister(&ib_class);
err_comp_unbound:
destroy_workqueue(ib_comp_unbound_wq);
err_comp:
destroy_workqueue(ib_comp_wq);
err_unbound:
destroy_workqueue(ib_unreg_wq);
err:
destroy_workqueue(ib_wq);
return ret;
}
static void __exit ib_core_cleanup(void)
{
unregister_netdevice_notifier(&nb_netdevice);
roce_gid_mgmt_cleanup();
rdma_nl_unregister(RDMA_NL_LS);
nldev_exit();
unregister_pernet_device(&rdma_dev_net_ops);
unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
ib_sa_cleanup();
ib_mad_cleanup();
addr_cleanup();
rdma_nl_exit();
class_unregister(&ib_class);
destroy_workqueue(ib_comp_unbound_wq);
destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
destroy_workqueue(ib_unreg_wq);
WARN_ON(!xa_empty(&clients));
WARN_ON(!xa_empty(&devices));
}
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
/* ib core relies on netdev stack to first register net_ns_type_operations
* ns kobject type before ib_core initialization.
*/
fs_initcall(ib_core_init);
module_exit(ib_core_cleanup);
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel thread helper functions.
* Copyright (C) 2004 IBM Corporation, Rusty Russell.
* Copyright (C) 2009 Red Hat, Inc.
*
* Creation is done via kthreadd, so that we get a clean environment
* even if we're invoked from userspace (think modprobe, hotplug cpu,
* etc.).
*/
#include <uapi/linux/sched/types.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/cgroup.h>
#include <linux/cpuset.h>
#include <linux/unistd.h>
#include <linux/file.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/numa.h>
#include <linux/sched/isolation.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
static LIST_HEAD(kthreads_hotplug);
static DEFINE_MUTEX(kthreads_hotplug_lock);
struct kthread_create_info
{
/* Information passed to kthread() from kthreadd. */
char *full_name;
int (*threadfn)(void *data);
void *data;
int node;
/* Result passed back to kthread_create() from kthreadd. */
struct task_struct *result;
struct completion *done;
struct list_head list;
};
struct kthread {
unsigned long flags;
unsigned int cpu;
unsigned int node;
int started;
int result;
int (*threadfn)(void *);
void *data;
struct completion parked;
struct completion exited;
#ifdef CONFIG_BLK_CGROUP
struct cgroup_subsys_state *blkcg_css;
#endif
/* To store the full name if task comm is truncated. */
char *full_name;
struct task_struct *task;
struct list_head hotplug_node;
struct cpumask *preferred_affinity;
};
enum KTHREAD_BITS {
KTHREAD_IS_PER_CPU = 0,
KTHREAD_SHOULD_STOP,
KTHREAD_SHOULD_PARK,
};
static inline struct kthread *to_kthread(struct task_struct *k)
{
WARN_ON(!(k->flags & PF_KTHREAD));
return k->worker_private;
}
/*
* Variant of to_kthread() that doesn't assume @p is a kthread.
*
* When "(p->flags & PF_KTHREAD)" is set the task is a kthread and will
* always remain a kthread. For kthreads p->worker_private always
* points to a struct kthread. For tasks that are not kthreads
* p->worker_private is used to point to other things.
*
* Return NULL for any task that is not a kthread.
*/
static inline struct kthread *__to_kthread(struct task_struct *p)
{
void *kthread = p->worker_private; if (kthread && !(p->flags & PF_KTHREAD))
kthread = NULL;
return kthread;
}
void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
{
struct kthread *kthread = to_kthread(tsk);
if (!kthread || !kthread->full_name) {
strscpy(buf, tsk->comm, buf_size);
return;
}
strscpy_pad(buf, kthread->full_name, buf_size);
}
bool set_kthread_struct(struct task_struct *p)
{
struct kthread *kthread;
if (WARN_ON_ONCE(to_kthread(p)))
return false;
kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
if (!kthread)
return false;
init_completion(&kthread->exited);
init_completion(&kthread->parked);
INIT_LIST_HEAD(&kthread->hotplug_node);
p->vfork_done = &kthread->exited;
kthread->task = p;
kthread->node = tsk_fork_get_node(current);
p->worker_private = kthread;
return true;
}
void free_kthread_struct(struct task_struct *k)
{
struct kthread *kthread;
/*
* Can be NULL if kmalloc() in set_kthread_struct() failed.
*/
kthread = to_kthread(k);
if (!kthread)
return;
#ifdef CONFIG_BLK_CGROUP
WARN_ON_ONCE(kthread->blkcg_css);
#endif
k->worker_private = NULL;
kfree(kthread->full_name);
kfree(kthread);
}
/**
* kthread_should_stop - should this kthread return now?
*
* When someone calls kthread_stop() on your kthread, it will be woken
* and this will return true. You should then return, and your return
* value will be passed through to kthread_stop().
*/
bool kthread_should_stop(void)
{
return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
}
EXPORT_SYMBOL(kthread_should_stop);
static bool __kthread_should_park(struct task_struct *k)
{
return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
}
/**
* kthread_should_park - should this kthread park now?
*
* When someone calls kthread_park() on your kthread, it will be woken
* and this will return true. You should then do the necessary
* cleanup and call kthread_parkme()
*
* Similar to kthread_should_stop(), but this keeps the thread alive
* and in a park position. kthread_unpark() "restarts" the thread and
* calls the thread function again.
*/
bool kthread_should_park(void)
{
return __kthread_should_park(current);
}
EXPORT_SYMBOL_GPL(kthread_should_park);
bool kthread_should_stop_or_park(void)
{
struct kthread *kthread = __to_kthread(current);
if (!kthread)
return false;
return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
}
/**
* kthread_freezable_should_stop - should this freezable kthread return now?
* @was_frozen: optional out parameter, indicates whether %current was frozen
*
* kthread_should_stop() for freezable kthreads, which will enter
* refrigerator if necessary. This function is safe from kthread_stop() /
* freezer deadlock and freezable kthreads should use this function instead
* of calling try_to_freeze() directly.
*/
bool kthread_freezable_should_stop(bool *was_frozen)
{
bool frozen = false;
might_sleep();
if (unlikely(freezing(current)))
frozen = __refrigerator(true);
if (was_frozen)
*was_frozen = frozen;
return kthread_should_stop();
}
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
/**
* kthread_func - return the function specified on kthread creation
* @task: kthread task in question
*
* Returns NULL if the task is not a kthread.
*/
void *kthread_func(struct task_struct *task)
{
struct kthread *kthread = __to_kthread(task);
if (kthread)
return kthread->threadfn;
return NULL;
}
EXPORT_SYMBOL_GPL(kthread_func);
/**
* kthread_data - return data value specified on kthread creation
* @task: kthread task in question
*
* Return the data value specified when kthread @task was created.
* The caller is responsible for ensuring the validity of @task when
* calling this function.
*/
void *kthread_data(struct task_struct *task)
{
return to_kthread(task)->data;
}
EXPORT_SYMBOL_GPL(kthread_data);
/**
* kthread_probe_data - speculative version of kthread_data()
* @task: possible kthread task in question
*
* @task could be a kthread task. Return the data value specified when it
* was created if accessible. If @task isn't a kthread task or its data is
* inaccessible for any reason, %NULL is returned. This function requires
* that @task itself is safe to dereference.
*/
void *kthread_probe_data(struct task_struct *task)
{
struct kthread *kthread = __to_kthread(task);
void *data = NULL;
if (kthread)
copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
return data;
}
static void __kthread_parkme(struct kthread *self)
{
for (;;) {
/*
* TASK_PARKED is a special state; we must serialize against
* possible pending wakeups to avoid store-store collisions on
* task->state.
*
* Such a collision might possibly result in the task state
* changin from TASK_PARKED and us failing the
* wait_task_inactive() in kthread_park().
*/
set_special_state(TASK_PARKED);
if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
break;
/*
* Thread is going to call schedule(), do not preempt it,
* or the caller of kthread_park() may spend more time in
* wait_task_inactive().
*/
preempt_disable();
complete(&self->parked);
schedule_preempt_disabled();
preempt_enable();
}
__set_current_state(TASK_RUNNING);
}
void kthread_parkme(void)
{
__kthread_parkme(to_kthread(current));
}
EXPORT_SYMBOL_GPL(kthread_parkme);
/**
* kthread_exit - Cause the current kthread return @result to kthread_stop().
* @result: The integer value to return to kthread_stop().
*
* While kthread_exit can be called directly, it exists so that
* functions which do some additional work in non-modular code such as
* module_put_and_kthread_exit can be implemented.
*
* Does not return.
*/
void __noreturn kthread_exit(long result)
{
struct kthread *kthread = to_kthread(current);
kthread->result = result;
if (!list_empty(&kthread->hotplug_node)) {
mutex_lock(&kthreads_hotplug_lock);
list_del(&kthread->hotplug_node);
mutex_unlock(&kthreads_hotplug_lock);
if (kthread->preferred_affinity) {
kfree(kthread->preferred_affinity);
kthread->preferred_affinity = NULL;
}
}
do_exit(0);
}
EXPORT_SYMBOL(kthread_exit);
/**
* kthread_complete_and_exit - Exit the current kthread.
* @comp: Completion to complete
* @code: The integer value to return to kthread_stop().
*
* If present, complete @comp and then return code to kthread_stop().
*
* A kernel thread whose module may be removed after the completion of
* @comp can use this function to exit safely.
*
* Does not return.
*/
void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
{
if (comp)
complete(comp);
kthread_exit(code);
}
EXPORT_SYMBOL(kthread_complete_and_exit);
static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpumask)
{
const struct cpumask *pref;
if (kthread->preferred_affinity) {
pref = kthread->preferred_affinity;
} else {
if (WARN_ON_ONCE(kthread->node == NUMA_NO_NODE))
return;
pref = cpumask_of_node(kthread->node);
}
cpumask_and(cpumask, pref, housekeeping_cpumask(HK_TYPE_KTHREAD));
if (cpumask_empty(cpumask))
cpumask_copy(cpumask, housekeeping_cpumask(HK_TYPE_KTHREAD));
}
static void kthread_affine_node(void)
{
struct kthread *kthread = to_kthread(current);
cpumask_var_t affinity;
WARN_ON_ONCE(kthread_is_per_cpu(current));
if (kthread->node == NUMA_NO_NODE) {
housekeeping_affine(current, HK_TYPE_KTHREAD);
} else {
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
WARN_ON_ONCE(1);
return;
}
mutex_lock(&kthreads_hotplug_lock);
WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
/*
* The node cpumask is racy when read from kthread() but:
* - a racing CPU going down will either fail on the subsequent
* call to set_cpus_allowed_ptr() or be migrated to housekeepers
* afterwards by the scheduler.
* - a racing CPU going up will be handled by kthreads_online_cpu()
*/
kthread_fetch_affinity(kthread, affinity);
set_cpus_allowed_ptr(current, affinity);
mutex_unlock(&kthreads_hotplug_lock);
free_cpumask_var(affinity);
}
}
static int kthread(void *_create)
{
static const struct sched_param param = { .sched_priority = 0 };
/* Copy data: it's on kthread's stack */
struct kthread_create_info *create = _create;
int (*threadfn)(void *data) = create->threadfn;
void *data = create->data;
struct completion *done;
struct kthread *self;
int ret;
self = to_kthread(current);
/* Release the structure when caller killed by a fatal signal. */
done = xchg(&create->done, NULL);
if (!done) {
kfree(create->full_name);
kfree(create);
kthread_exit(-EINTR);
}
self->full_name = create->full_name;
self->threadfn = threadfn;
self->data = data;
/*
* The new thread inherited kthreadd's priority and CPU mask. Reset
* back to default in case they have been changed.
*/
sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
create->result = current;
/*
* Thread is going to call schedule(), do not preempt it,
* or the creator may spend more time in wait_task_inactive().
*/
preempt_disable();
complete(done);
schedule_preempt_disabled();
preempt_enable();
self->started = 1;
if (!(current->flags & PF_NO_SETAFFINITY) && !self->preferred_affinity)
kthread_affine_node();
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
cgroup_kthread_ready();
__kthread_parkme(self);
ret = threadfn(data);
}
kthread_exit(ret);
}
/* called from kernel_clone() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
if (tsk == kthreadd_task) return tsk->pref_node_fork;
#endif
return NUMA_NO_NODE;
}
static void create_kthread(struct kthread_create_info *create)
{
int pid;
#ifdef CONFIG_NUMA
current->pref_node_fork = create->node;
#endif
/* We want our own signal handler (we take no signals by default). */
pid = kernel_thread(kthread, create, create->full_name,
CLONE_FS | CLONE_FILES | SIGCHLD);
if (pid < 0) {
/* Release the structure when caller killed by a fatal signal. */
struct completion *done = xchg(&create->done, NULL);
kfree(create->full_name);
if (!done) {
kfree(create);
return;
}
create->result = ERR_PTR(pid);
complete(done);
}
}
static __printf(4, 0)
struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
void *data, int node,
const char namefmt[],
va_list args)
{
DECLARE_COMPLETION_ONSTACK(done);
struct task_struct *task;
struct kthread_create_info *create = kmalloc(sizeof(*create),
GFP_KERNEL);
if (!create)
return ERR_PTR(-ENOMEM);
create->threadfn = threadfn;
create->data = data;
create->node = node;
create->done = &done;
create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
if (!create->full_name) {
task = ERR_PTR(-ENOMEM);
goto free_create;
}
spin_lock(&kthread_create_lock);
list_add_tail(&create->list, &kthread_create_list);
spin_unlock(&kthread_create_lock);
wake_up_process(kthreadd_task);
/*
* Wait for completion in killable state, for I might be chosen by
* the OOM killer while kthreadd is trying to allocate memory for
* new kernel thread.
*/
if (unlikely(wait_for_completion_killable(&done))) {
/*
* If I was killed by a fatal signal before kthreadd (or new
* kernel thread) calls complete(), leave the cleanup of this
* structure to that thread.
*/
if (xchg(&create->done, NULL))
return ERR_PTR(-EINTR);
/*
* kthreadd (or new kernel thread) will call complete()
* shortly.
*/
wait_for_completion(&done);
}
task = create->result;
free_create:
kfree(create); return task;}
/**
* kthread_create_on_node - create a kthread.
* @threadfn: the function to run until signal_pending(current).
* @data: data ptr for @threadfn.
* @node: task and thread structures for the thread are allocated on this node
* @namefmt: printf-style name for the thread.
*
* Description: This helper function creates and names a kernel
* thread. The thread will be stopped: use wake_up_process() to start
* it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
* is affine to all CPUs.
*
* If thread is going to be bound on a particular cpu, give its node
* in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
* When woken, the thread will run @threadfn() with @data as its
* argument. @threadfn() can either return directly if it is a
* standalone thread for which no one will call kthread_stop(), or
* return when 'kthread_should_stop()' is true (which means
* kthread_stop() has been called). The return value should be zero
* or a negative error number; it will be passed to kthread_stop().
*
* Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
*/
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data, int node,
const char namefmt[],
...)
{
struct task_struct *task;
va_list args;
va_start(args, namefmt);
task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
va_end(args);
return task;
}
EXPORT_SYMBOL(kthread_create_on_node);
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
{
unsigned long flags;
if (!wait_task_inactive(p, state)) {
WARN_ON(1);
return;
}
/* It's safe because the task is inactive. */
raw_spin_lock_irqsave(&p->pi_lock, flags);
do_set_cpus_allowed(p, mask);
p->flags |= PF_NO_SETAFFINITY;
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
{
__kthread_bind_mask(p, cpumask_of(cpu), state);
}
void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
struct kthread *kthread = to_kthread(p);
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
WARN_ON_ONCE(kthread->started);
}
/**
* kthread_bind - bind a just-created kthread to a cpu.
* @p: thread created by kthread_create().
* @cpu: cpu (might not be online, must be possible) for @k to run on.
*
* Description: This function is equivalent to set_cpus_allowed(),
* except that @cpu doesn't need to be online, and the thread must be
* stopped (i.e., just returned from kthread_create()).
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
struct kthread *kthread = to_kthread(p);
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
WARN_ON_ONCE(kthread->started);
}
EXPORT_SYMBOL(kthread_bind);
/**
* kthread_create_on_cpu - Create a cpu bound kthread
* @threadfn: the function to run until signal_pending(current).
* @data: data ptr for @threadfn.
* @cpu: The cpu on which the thread should be bound,
* @namefmt: printf-style name for the thread. Format is restricted
* to "name.*%u". Code fills in cpu number.
*
* Description: This helper function creates and names a kernel thread
*/
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
void *data, unsigned int cpu,
const char *namefmt)
{
struct task_struct *p;
p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
cpu);
if (IS_ERR(p))
return p;
kthread_bind(p, cpu);
/* CPU hotplug need to bind once again when unparking the thread. */
to_kthread(p)->cpu = cpu;
return p;
}
EXPORT_SYMBOL(kthread_create_on_cpu);
void kthread_set_per_cpu(struct task_struct *k, int cpu)
{
struct kthread *kthread = to_kthread(k);
if (!kthread)
return;
WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
if (cpu < 0) {
clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
return;
}
kthread->cpu = cpu;
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
}
bool kthread_is_per_cpu(struct task_struct *p)
{
struct kthread *kthread = __to_kthread(p);
if (!kthread)
return false; return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);}
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
*
* Sets kthread_should_park() for @k to return false, wakes it, and
* waits for it to return. If the thread is marked percpu then its
* bound to the cpu again.
*/
void kthread_unpark(struct task_struct *k)
{
struct kthread *kthread = to_kthread(k);
if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
return;
/*
* Newly created kthread was parked when the CPU was offline.
* The binding was lost and we need to set it again.
*/
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
__kthread_bind(k, kthread->cpu, TASK_PARKED);
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
/*
* __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
*/
wake_up_state(k, TASK_PARKED);
}
EXPORT_SYMBOL_GPL(kthread_unpark);
/**
* kthread_park - park a thread created by kthread_create().
* @k: thread created by kthread_create().
*
* Sets kthread_should_park() for @k to return true, wakes it, and
* waits for it to return. This can also be called after kthread_create()
* instead of calling wake_up_process(): the thread will park without
* calling threadfn().
*
* Returns 0 if the thread is parked, -ENOSYS if the thread exited.
* If called by the kthread itself just the park bit is set.
*/
int kthread_park(struct task_struct *k)
{
struct kthread *kthread = to_kthread(k);
if (WARN_ON(k->flags & PF_EXITING))
return -ENOSYS;
if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
return -EBUSY;
set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
if (k != current) {
wake_up_process(k);
/*
* Wait for __kthread_parkme() to complete(), this means we
* _will_ have TASK_PARKED and are about to call schedule().
*/
wait_for_completion(&kthread->parked);
/*
* Now wait for that schedule() to complete and the task to
* get scheduled out.
*/
WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
}
return 0;
}
EXPORT_SYMBOL_GPL(kthread_park);
/**
* kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create().
*
* Sets kthread_should_stop() for @k to return true, wakes it, and
* waits for it to exit. This can also be called after kthread_create()
* instead of calling wake_up_process(): the thread will exit without
* calling threadfn().
*
* If threadfn() may call kthread_exit() itself, the caller must ensure
* task_struct can't go away.
*
* Returns the result of threadfn(), or %-EINTR if wake_up_process()
* was never called.
*/
int kthread_stop(struct task_struct *k)
{
struct kthread *kthread;
int ret;
trace_sched_kthread_stop(k);
get_task_struct(k);
kthread = to_kthread(k);
set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
kthread_unpark(k);
set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
wake_up_process(k);
wait_for_completion(&kthread->exited);
ret = kthread->result;
put_task_struct(k);
trace_sched_kthread_stop_ret(ret);
return ret;
}
EXPORT_SYMBOL(kthread_stop);
/**
* kthread_stop_put - stop a thread and put its task struct
* @k: thread created by kthread_create().
*
* Stops a thread created by kthread_create() and put its task_struct.
* Only use when holding an extra task struct reference obtained by
* calling get_task_struct().
*/
int kthread_stop_put(struct task_struct *k)
{
int ret;
ret = kthread_stop(k);
put_task_struct(k);
return ret;
}
EXPORT_SYMBOL(kthread_stop_put);
int kthreadd(void *unused)
{
static const char comm[TASK_COMM_LEN] = "kthreadd";
struct task_struct *tsk = current;
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, comm);
ignore_signals(tsk);
set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
cgroup_init_kthreadd();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&kthread_create_list))
schedule();
__set_current_state(TASK_RUNNING);
spin_lock(&kthread_create_lock);
while (!list_empty(&kthread_create_list)) {
struct kthread_create_info *create;
create = list_entry(kthread_create_list.next,
struct kthread_create_info, list);
list_del_init(&create->list);
spin_unlock(&kthread_create_lock);
create_kthread(create);
spin_lock(&kthread_create_lock);
}
spin_unlock(&kthread_create_lock);
}
return 0;
}
int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
{
struct kthread *kthread = to_kthread(p);
cpumask_var_t affinity;
unsigned long flags;
int ret = 0;
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) {
WARN_ON(1);
return -EINVAL;
}
WARN_ON_ONCE(kthread->preferred_affinity);
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
return -ENOMEM;
kthread->preferred_affinity = kzalloc(sizeof(struct cpumask), GFP_KERNEL);
if (!kthread->preferred_affinity) {
ret = -ENOMEM;
goto out;
}
mutex_lock(&kthreads_hotplug_lock);
cpumask_copy(kthread->preferred_affinity, mask);
WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
kthread_fetch_affinity(kthread, affinity);
/* It's safe because the task is inactive. */
raw_spin_lock_irqsave(&p->pi_lock, flags);
do_set_cpus_allowed(p, affinity);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
mutex_unlock(&kthreads_hotplug_lock);
out:
free_cpumask_var(affinity);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_affine_preferred);
/*
* Re-affine kthreads according to their preferences
* and the newly online CPU. The CPU down part is handled
* by select_fallback_rq() which default re-affines to
* housekeepers from other nodes in case the preferred
* affinity doesn't apply anymore.
*/
static int kthreads_online_cpu(unsigned int cpu)
{
cpumask_var_t affinity;
struct kthread *k;
int ret;
guard(mutex)(&kthreads_hotplug_lock);
if (list_empty(&kthreads_hotplug))
return 0;
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
return -ENOMEM;
ret = 0;
list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
kthread_is_per_cpu(k->task))) {
ret = -EINVAL;
continue;
}
kthread_fetch_affinity(k, affinity);
set_cpus_allowed_ptr(k->task, affinity);
}
free_cpumask_var(affinity);
return ret;
}
static int kthreads_init(void)
{
return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online",
kthreads_online_cpu, NULL);
}
early_initcall(kthreads_init);
void __kthread_init_worker(struct kthread_worker *worker,
const char *name,
struct lock_class_key *key)
{
memset(worker, 0, sizeof(struct kthread_worker));
raw_spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
INIT_LIST_HEAD(&worker->delayed_work_list);
}
EXPORT_SYMBOL_GPL(__kthread_init_worker);
/**
* kthread_worker_fn - kthread function to process kthread_worker
* @worker_ptr: pointer to initialized kthread_worker
*
* This function implements the main cycle of kthread worker. It processes
* work_list until it is stopped with kthread_stop(). It sleeps when the queue
* is empty.
*
* The works are not allowed to keep any locks, disable preemption or interrupts
* when they finish. There is defined a safe point for freezing when one work
* finishes and before a new one is started.
*
* Also the works must not be handled by more than one worker at the same time,
* see also kthread_queue_work().
*/
int kthread_worker_fn(void *worker_ptr)
{
struct kthread_worker *worker = worker_ptr;
struct kthread_work *work;
/*
* FIXME: Update the check and remove the assignment when all kthread
* worker users are created using kthread_create_worker*() functions.
*/
WARN_ON(worker->task && worker->task != current);
worker->task = current;
if (worker->flags & KTW_FREEZABLE)
set_freezable();
repeat:
set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
raw_spin_lock_irq(&worker->lock);
worker->task = NULL;
raw_spin_unlock_irq(&worker->lock);
return 0;
}
work = NULL;
raw_spin_lock_irq(&worker->lock);
if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
raw_spin_unlock_irq(&worker->lock);
if (work) {
kthread_work_func_t func = work->func;
__set_current_state(TASK_RUNNING);
trace_sched_kthread_work_execute_start(work);
work->func(work);
/*
* Avoid dereferencing work after this point. The trace
* event only cares about the address.
*/
trace_sched_kthread_work_execute_end(work, func);
} else if (!freezing(current)) {
schedule();
} else {
/*
* Handle the case where the current remains
* TASK_INTERRUPTIBLE. try_to_freeze() expects
* the current to be TASK_RUNNING.
*/
__set_current_state(TASK_RUNNING);
}
try_to_freeze();
cond_resched();
goto repeat;
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);
static __printf(3, 0) struct kthread_worker *
__kthread_create_worker_on_node(unsigned int flags, int node,
const char namefmt[], va_list args)
{
struct kthread_worker *worker;
struct task_struct *task;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker)
return ERR_PTR(-ENOMEM);
kthread_init_worker(worker);
task = __kthread_create_on_node(kthread_worker_fn, worker,
node, namefmt, args);
if (IS_ERR(task))
goto fail_task;
worker->flags = flags;
worker->task = task;
return worker;
fail_task:
kfree(worker);
return ERR_CAST(task);
}
/**
* kthread_create_worker_on_node - create a kthread worker
* @flags: flags modifying the default behavior of the worker
* @node: task structure for the thread is allocated on this node
* @namefmt: printf-style name for the kthread worker (task).
*
* Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
* when the needed structures could not get allocated, and ERR_PTR(-EINTR)
* when the caller was killed by a fatal signal.
*/
struct kthread_worker *
kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], ...)
{
struct kthread_worker *worker;
va_list args;
va_start(args, namefmt);
worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
va_end(args);
return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_node);
/**
* kthread_create_worker_on_cpu - create a kthread worker and bind it
* to a given CPU and the associated NUMA node.
* @cpu: CPU number
* @flags: flags modifying the default behavior of the worker
* @namefmt: printf-style name for the thread. Format is restricted
* to "name.*%u". Code fills in cpu number.
*
* Use a valid CPU number if you want to bind the kthread worker
* to the given CPU and the associated NUMA node.
*
* A good practice is to add the cpu number also into the worker name.
* For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
*
* CPU hotplug:
* The kthread worker API is simple and generic. It just provides a way
* to create, use, and destroy workers.
*
* It is up to the API user how to handle CPU hotplug. They have to decide
* how to handle pending work items, prevent queuing new ones, and
* restore the functionality when the CPU goes off and on. There are a
* few catches:
*
* - CPU affinity gets lost when it is scheduled on an offline CPU.
*
* - The worker might not exist when the CPU was off when the user
* created the workers.
*
* Good practice is to implement two CPU hotplug callbacks and to
* destroy/create the worker when the CPU goes down/up.
*
* Return:
* The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
* when the needed structures could not get allocated, and ERR_PTR(-EINTR)
* when the caller was killed by a fatal signal.
*/
struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[])
{
struct kthread_worker *worker;
worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
if (!IS_ERR(worker))
kthread_bind(worker->task, cpu);
return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);
/*
* Returns true when the work could not be queued at the moment.
* It happens when it is already pending in a worker list
* or when it is being cancelled.
*/
static inline bool queuing_blocked(struct kthread_worker *worker,
struct kthread_work *work)
{
lockdep_assert_held(&worker->lock);
return !list_empty(&work->node) || work->canceling;
}
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
struct kthread_work *work)
{
lockdep_assert_held(&worker->lock);
WARN_ON_ONCE(!list_empty(&work->node));
/* Do not use a work with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker && work->worker != worker);
}
/* insert @work before @pos in @worker */
static void kthread_insert_work(struct kthread_worker *worker,
struct kthread_work *work,
struct list_head *pos)
{
kthread_insert_work_sanity_check(worker, work);
trace_sched_kthread_work_queue_work(worker, work);
list_add_tail(&work->node, pos);
work->worker = worker;
if (!worker->current_work && likely(worker->task))
wake_up_process(worker->task);
}
/**
* kthread_queue_work - queue a kthread_work
* @worker: target kthread_worker
* @work: kthread_work to queue
*
* Queue @work to work processor @task for async execution. @task
* must have been created with kthread_create_worker(). Returns %true
* if @work was successfully queued, %false if it was already pending.
*
* Reinitialize the work if it needs to be used by another worker.
* For example, when the worker was stopped and started again.
*/
bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work)
{
bool ret = false;
unsigned long flags;
raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
kthread_insert_work(worker, work, &worker->work_list);
ret = true;
}
raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_work);
/**
* kthread_delayed_work_timer_fn - callback that queues the associated kthread
* delayed work when the timer expires.
* @t: pointer to the expired timer
*
* The format of the function is defined by struct timer_list.
* It should have been called from irqsafe timer with irq already off.
*/
void kthread_delayed_work_timer_fn(struct timer_list *t)
{
struct kthread_delayed_work *dwork = timer_container_of(dwork, t,
timer);
struct kthread_work *work = &dwork->work;
struct kthread_worker *worker = work->worker;
unsigned long flags;
/*
* This might happen when a pending work is reinitialized.
* It means that it is used a wrong way.
*/
if (WARN_ON_ONCE(!worker))
return;
raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node);
if (!work->canceling)
kthread_insert_work(worker, work, &worker->work_list);
raw_spin_unlock_irqrestore(&worker->lock, flags);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
static void __kthread_queue_delayed_work(struct kthread_worker *worker,
struct kthread_delayed_work *dwork,
unsigned long delay)
{
struct timer_list *timer = &dwork->timer;
struct kthread_work *work = &dwork->work;
WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
/*
* If @delay is 0, queue @dwork->work immediately. This is for
* both optimization and correctness. The earliest @timer can
* expire is on the closest next tick and delayed_work users depend
* on that there's no such delay when @delay is 0.
*/
if (!delay) {
kthread_insert_work(worker, work, &worker->work_list);
return;
}
/* Be paranoid and try to detect possible races already now. */
kthread_insert_work_sanity_check(worker, work);
list_add(&work->node, &worker->delayed_work_list);
work->worker = worker;
timer->expires = jiffies + delay;
add_timer(timer);
}
/**
* kthread_queue_delayed_work - queue the associated kthread work
* after a delay.
* @worker: target kthread_worker
* @dwork: kthread_delayed_work to queue
* @delay: number of jiffies to wait before queuing
*
* If the work has not been pending it starts a timer that will queue
* the work after the given @delay. If @delay is zero, it queues the
* work immediately.
*
* Return: %false if the @work has already been pending. It means that
* either the timer was running or the work was queued. It returns %true
* otherwise.
*/
bool kthread_queue_delayed_work(struct kthread_worker *worker,
struct kthread_delayed_work *dwork,
unsigned long delay)
{
struct kthread_work *work = &dwork->work;
unsigned long flags;
bool ret = false;
raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
__kthread_queue_delayed_work(worker, dwork, delay);
ret = true;
}
raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
struct kthread_flush_work {
struct kthread_work work;
struct completion done;
};
static void kthread_flush_work_fn(struct kthread_work *work)
{
struct kthread_flush_work *fwork =
container_of(work, struct kthread_flush_work, work);
complete(&fwork->done);
}
/**
* kthread_flush_work - flush a kthread_work
* @work: work to flush
*
* If @work is queued or executing, wait for it to finish execution.
*/
void kthread_flush_work(struct kthread_work *work)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
struct kthread_worker *worker;
bool noop = false;
worker = work->worker;
if (!worker)
return;
raw_spin_lock_irq(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
if (!list_empty(&work->node))
kthread_insert_work(worker, &fwork.work, work->node.next);
else if (worker->current_work == work)
kthread_insert_work(worker, &fwork.work,
worker->work_list.next);
else
noop = true;
raw_spin_unlock_irq(&worker->lock);
if (!noop)
wait_for_completion(&fwork.done);
}
EXPORT_SYMBOL_GPL(kthread_flush_work);
/*
* Make sure that the timer is neither set nor running and could
* not manipulate the work list_head any longer.
*
* The function is called under worker->lock. The lock is temporary
* released but the timer can't be set again in the meantime.
*/
static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
unsigned long *flags)
{
struct kthread_delayed_work *dwork =
container_of(work, struct kthread_delayed_work, work);
struct kthread_worker *worker = work->worker;
/*
* timer_delete_sync() must be called to make sure that the timer
* callback is not running. The lock must be temporary released
* to avoid a deadlock with the callback. In the meantime,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
raw_spin_unlock_irqrestore(&worker->lock, *flags);
timer_delete_sync(&dwork->timer);
raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}
/*
* This function removes the work from the worker queue.
*
* It is called under worker->lock. The caller must make sure that
* the timer used by delayed work is not running, e.g. by calling
* kthread_cancel_delayed_work_timer().
*
* The work might still be in use when this function finishes. See the
* current_work proceed by the worker.
*
* Return: %true if @work was pending and successfully canceled,
* %false if @work was not pending
*/
static bool __kthread_cancel_work(struct kthread_work *work)
{
/*
* Try to remove the work from a worker list. It might either
* be from worker->work_list or from worker->delayed_work_list.
*/
if (!list_empty(&work->node)) {
list_del_init(&work->node);
return true;
}
return false;
}
/**
* kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
* @worker: kthread worker to use
* @dwork: kthread delayed work to queue
* @delay: number of jiffies to wait before queuing
*
* If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
* modify @dwork's timer so that it expires after @delay. If @delay is zero,
* @work is guaranteed to be queued immediately.
*
* Return: %false if @dwork was idle and queued, %true otherwise.
*
* A special case is when the work is being canceled in parallel.
* It might be caused either by the real kthread_cancel_delayed_work_sync()
* or yet another kthread_mod_delayed_work() call. We let the other command
* win and return %true here. The return value can be used for reference
* counting and the number of queued works stays the same. Anyway, the caller
* is supposed to synchronize these operations a reasonable way.
*
* This function is safe to call from any context including IRQ handler.
* See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
* for details.
*/
bool kthread_mod_delayed_work(struct kthread_worker *worker,
struct kthread_delayed_work *dwork,
unsigned long delay)
{
struct kthread_work *work = &dwork->work;
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */
if (!work->worker) {
ret = false;
goto fast_queue;
}
/* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker);
/*
* Temporary cancel the work but do not fight with another command
* that is canceling the work as well.
*
* It is a bit tricky because of possible races with another
* mod_delayed_work() and cancel_delayed_work() callers.
*
* The timer must be canceled first because worker->lock is released
* when doing so. But the work can be removed from the queue (list)
* only when it can be queued again so that the return value can
* be used for reference counting.
*/
kthread_cancel_delayed_work_timer(work, &flags);
if (work->canceling) {
/* The number of works in the queue does not change. */
ret = true;
goto out;
}
ret = __kthread_cancel_work(work);
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
{
struct kthread_worker *worker = work->worker;
unsigned long flags;
int ret = false;
if (!worker)
goto out;
raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
if (is_dwork)
kthread_cancel_delayed_work_timer(work, &flags);
ret = __kthread_cancel_work(work);
if (worker->current_work != work)
goto out_fast;
/*
* The work is in progress and we need to wait with the lock released.
* In the meantime, block any queuing by setting the canceling counter.
*/
work->canceling++;
raw_spin_unlock_irqrestore(&worker->lock, flags);
kthread_flush_work(work);
raw_spin_lock_irqsave(&worker->lock, flags);
work->canceling--;
out_fast:
raw_spin_unlock_irqrestore(&worker->lock, flags);
out:
return ret;
}
/**
* kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
* @work: the kthread work to cancel
*
* Cancel @work and wait for its execution to finish. This function
* can be used even if the work re-queues itself. On return from this
* function, @work is guaranteed to be not pending or executing on any CPU.
*
* kthread_cancel_work_sync(&delayed_work->work) must not be used for
* delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
*
* The caller must ensure that the worker on which @work was last
* queued can't be destroyed before this function returns.
*
* Return: %true if @work was pending, %false otherwise.
*/
bool kthread_cancel_work_sync(struct kthread_work *work)
{
return __kthread_cancel_work_sync(work, false);
}
EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
/**
* kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
* wait for it to finish.
* @dwork: the kthread delayed work to cancel
*
* This is kthread_cancel_work_sync() for delayed works.
*
* Return: %true if @dwork was pending, %false otherwise.
*/
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
{
return __kthread_cancel_work_sync(&dwork->work, true);
}
EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
/**
* kthread_flush_worker - flush all current works on a kthread_worker
* @worker: worker to flush
*
* Wait until all currently executing or pending works on @worker are
* finished.
*/
void kthread_flush_worker(struct kthread_worker *worker)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
kthread_queue_work(worker, &fwork.work);
wait_for_completion(&fwork.done);
}
EXPORT_SYMBOL_GPL(kthread_flush_worker);
/**
* kthread_destroy_worker - destroy a kthread worker
* @worker: worker to be destroyed
*
* Flush and destroy @worker. The simple flush is enough because the kthread
* worker API is used only in trivial scenarios. There are no multi-step state
* machines needed.
*
* Note that this function is not responsible for handling delayed work, so
* caller should be responsible for queuing or canceling all delayed work items
* before invoke this function.
*/
void kthread_destroy_worker(struct kthread_worker *worker)
{
struct task_struct *task;
task = worker->task;
if (WARN_ON(!task))
return;
kthread_flush_worker(worker);
kthread_stop(task);
WARN_ON(!list_empty(&worker->delayed_work_list));
WARN_ON(!list_empty(&worker->work_list));
kfree(worker);
}
EXPORT_SYMBOL(kthread_destroy_worker);
/**
* kthread_use_mm - make the calling kthread operate on an address space
* @mm: address space to operate on
*/
void kthread_use_mm(struct mm_struct *mm)
{
struct mm_struct *active_mm;
struct task_struct *tsk = current;
WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
WARN_ON_ONCE(tsk->mm);
/*
* It is possible for mm to be the same as tsk->active_mm, but
* we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
* because these references are not equivalent.
*/
mmgrab(mm);
task_lock(tsk);
/* Hold off tlb flush IPIs while switching mm's */
local_irq_disable();
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
membarrier_update_current_mm(mm);
switch_mm_irqs_off(active_mm, mm, tsk);
local_irq_enable();
task_unlock(tsk);
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
#endif
/*
* When a kthread starts operating on an address space, the loop
* in membarrier_{private,global}_expedited() may not observe
* that tsk->mm, and not issue an IPI. Membarrier requires a
* memory barrier after storing to tsk->mm, before accessing
* user-space memory. A full memory barrier for membarrier
* {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
* mmdrop_lazy_tlb().
*/
mmdrop_lazy_tlb(active_mm);
}
EXPORT_SYMBOL_GPL(kthread_use_mm);
/**
* kthread_unuse_mm - reverse the effect of kthread_use_mm()
* @mm: address space to operate on
*/
void kthread_unuse_mm(struct mm_struct *mm)
{
struct task_struct *tsk = current;
WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
WARN_ON_ONCE(!tsk->mm);
task_lock(tsk);
/*
* When a kthread stops operating on an address space, the loop
* in membarrier_{private,global}_expedited() may not observe
* that tsk->mm, and not issue an IPI. Membarrier requires a
* memory barrier after accessing user-space memory, before
* clearing tsk->mm.
*/
smp_mb__after_spinlock();
local_irq_disable();
tsk->mm = NULL;
membarrier_update_current_mm(NULL);
mmgrab_lazy_tlb(mm);
/* active_mm is still 'mm' */
enter_lazy_tlb(mm, tsk);
local_irq_enable();
task_unlock(tsk);
mmdrop(mm);
}
EXPORT_SYMBOL_GPL(kthread_unuse_mm);
#ifdef CONFIG_BLK_CGROUP
/**
* kthread_associate_blkcg - associate blkcg to current kthread
* @css: the cgroup info
*
* Current thread must be a kthread. The thread is running jobs on behalf of
* other threads. In some cases, we expect the jobs attach cgroup info of
* original threads instead of that of current thread. This function stores
* original thread's cgroup info in current kthread context for later
* retrieval.
*/
void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{
struct kthread *kthread;
if (!(current->flags & PF_KTHREAD))
return;
kthread = to_kthread(current);
if (!kthread)
return;
if (kthread->blkcg_css) {
css_put(kthread->blkcg_css);
kthread->blkcg_css = NULL;
}
if (css) {
css_get(css);
kthread->blkcg_css = css;
}
}
EXPORT_SYMBOL(kthread_associate_blkcg);
/**
* kthread_blkcg - get associated blkcg css of current kthread
*
* Current thread must be a kthread.
*/
struct cgroup_subsys_state *kthread_blkcg(void)
{
struct kthread *kthread;
if (current->flags & PF_KTHREAD) { kthread = to_kthread(current); if (kthread) return kthread->blkcg_css;
}
return NULL;}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* A hash table (hashtab) maintains associations between
* key values and datum values. The type of the key values
* and the type of the datum values is arbitrary. The
* functions for hash computation and key comparison are
* provided by the creator of the table.
*
* Author : Stephen Smalley, <stephen.smalley.work@gmail.com>
*/
#ifndef _SS_HASHTAB_H_
#define _SS_HASHTAB_H_
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/sched.h>
#define HASHTAB_MAX_NODES U32_MAX
struct hashtab_key_params {
u32 (*hash)(const void *key); /* hash func */
int (*cmp)(const void *key1, const void *key2); /* comparison func */
};
struct hashtab_node {
void *key;
void *datum;
struct hashtab_node *next;
};
struct hashtab {
struct hashtab_node **htable; /* hash table */
u32 size; /* number of slots in hash table */
u32 nel; /* number of elements in hash table */
};
struct hashtab_info {
u32 slots_used;
u32 max_chain_len;
u64 chain2_len_sum;
};
/*
* Initializes a new hash table with the specified characteristics.
*
* Returns -ENOMEM if insufficient space is available or 0 otherwise.
*/
int hashtab_init(struct hashtab *h, u32 nel_hint);
int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst, void *key,
void *datum);
/*
* Inserts the specified (key, datum) pair into the specified hash table.
*
* Returns -ENOMEM on memory allocation error,
* -EEXIST if there is already an entry with the same key,
* -EINVAL for general errors or
0 otherwise.
*/
static inline int hashtab_insert(struct hashtab *h, void *key, void *datum,
struct hashtab_key_params key_params)
{
u32 hvalue;
struct hashtab_node *prev, *cur;
cond_resched();
if (!h->size || h->nel == HASHTAB_MAX_NODES)
return -EINVAL;
hvalue = key_params.hash(key) & (h->size - 1);
prev = NULL;
cur = h->htable[hvalue];
while (cur) {
int cmp = key_params.cmp(key, cur->key);
if (cmp == 0)
return -EEXIST;
if (cmp < 0)
break;
prev = cur;
cur = cur->next;
}
return __hashtab_insert(h, prev ? &prev->next : &h->htable[hvalue], key,
datum);
}
/*
* Searches for the entry with the specified key in the hash table.
*
* Returns NULL if no entry has the specified key or
* the datum of the entry otherwise.
*/
static inline void *hashtab_search(struct hashtab *h, const void *key,
struct hashtab_key_params key_params)
{
u32 hvalue;
struct hashtab_node *cur;
if (!h->size) return NULL; hvalue = key_params.hash(key) & (h->size - 1);
cur = h->htable[hvalue];
while (cur) { int cmp = key_params.cmp(key, cur->key); if (cmp == 0) return cur->datum; if (cmp < 0)
break;
cur = cur->next;
}
return NULL;
}
/*
* Destroys the specified hash table.
*/
void hashtab_destroy(struct hashtab *h);
/*
* Applies the specified apply function to (key,datum,args)
* for each entry in the specified hash table.
*
* The order in which the function is applied to the entries
* is dependent upon the internal structure of the hash table.
*
* If apply returns a non-zero status, then hashtab_map will cease
* iterating through the hash table and will propagate the error
* return to its caller.
*/
int hashtab_map(struct hashtab *h, int (*apply)(void *k, void *d, void *args),
void *args);
int hashtab_duplicate(struct hashtab *new, const struct hashtab *orig,
int (*copy)(struct hashtab_node *new,
const struct hashtab_node *orig, void *args),
int (*destroy)(void *k, void *d, void *args), void *args);
#ifdef CONFIG_SECURITY_SELINUX_DEBUG
/* Fill info with some hash table statistics */
void hashtab_stat(struct hashtab *h, struct hashtab_info *info);
#else
static inline void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
{
return;
}
#endif
#endif /* _SS_HASHTAB_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* async.c: Asynchronous function calls for boot performance
*
* (C) Copyright 2009 Intel Corporation
* Author: Arjan van de Ven <arjan@linux.intel.com>
*/
/*
Goals and Theory of Operation
The primary goal of this feature is to reduce the kernel boot time,
by doing various independent hardware delays and discovery operations
decoupled and not strictly serialized.
More specifically, the asynchronous function call concept allows
certain operations (primarily during system boot) to happen
asynchronously, out of order, while these operations still
have their externally visible parts happen sequentially and in-order.
(not unlike how out-of-order CPUs retire their instructions in order)
Key to the asynchronous function call implementation is the concept of
a "sequence cookie" (which, although it has an abstracted type, can be
thought of as a monotonically incrementing number).
The async core will assign each scheduled event such a sequence cookie and
pass this to the called functions.
The asynchronously called function should before doing a globally visible
operation, such as registering device numbers, call the
async_synchronize_cookie() function and pass in its own cookie. The
async_synchronize_cookie() function will make sure that all asynchronous
operations that were scheduled prior to the operation corresponding with the
cookie have completed.
Subsystem/driver initialization code that scheduled asynchronous probe
functions, but which shares global resources with other drivers/subsystems
that do not use the asynchronous call feature, need to do a full
synchronization with the async_synchronize_full() function, before returning
from their init function. This is to maintain strict ordering between the
asynchronous and synchronous parts of the kernel.
*/
#include <linux/async.h>
#include <linux/atomic.h>
#include <linux/export.h>
#include <linux/ktime.h>
#include <linux/pid.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "workqueue_internal.h"
static async_cookie_t next_cookie = 1;
#define MAX_WORK 32768
#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
static LIST_HEAD(async_global_pending); /* pending from all registered doms */
static ASYNC_DOMAIN(async_dfl_domain);
static DEFINE_SPINLOCK(async_lock);
static struct workqueue_struct *async_wq;
struct async_entry {
struct list_head domain_list;
struct list_head global_list;
struct work_struct work;
async_cookie_t cookie;
async_func_t func;
void *data;
struct async_domain *domain;
};
static DECLARE_WAIT_QUEUE_HEAD(async_done);
static atomic_t entry_count;
static long long microseconds_since(ktime_t start)
{
ktime_t now = ktime_get();
return ktime_to_ns(ktime_sub(now, start)) >> 10;
}
static async_cookie_t lowest_in_progress(struct async_domain *domain)
{
struct async_entry *first = NULL;
async_cookie_t ret = ASYNC_COOKIE_MAX;
unsigned long flags;
spin_lock_irqsave(&async_lock, flags);
if (domain) {
if (!list_empty(&domain->pending))
first = list_first_entry(&domain->pending,
struct async_entry, domain_list);
} else {
if (!list_empty(&async_global_pending))
first = list_first_entry(&async_global_pending,
struct async_entry, global_list);
}
if (first)
ret = first->cookie;
spin_unlock_irqrestore(&async_lock, flags);
return ret;
}
/*
* pick the first pending entry and run it
*/
static void async_run_entry_fn(struct work_struct *work)
{
struct async_entry *entry =
container_of(work, struct async_entry, work);
unsigned long flags;
ktime_t calltime;
/* 1) run (and print duration) */
pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
entry->func, task_pid_nr(current));
calltime = ktime_get();
entry->func(entry->data, entry->cookie);
pr_debug("initcall %lli_%pS returned after %lld usecs\n",
(long long)entry->cookie, entry->func,
microseconds_since(calltime));
/* 2) remove self from the pending queues */
spin_lock_irqsave(&async_lock, flags);
list_del_init(&entry->domain_list);
list_del_init(&entry->global_list);
/* 3) free the entry */
kfree(entry);
atomic_dec(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
/* 4) wake up any waiters */
wake_up(&async_done);
}
static async_cookie_t __async_schedule_node_domain(async_func_t func,
void *data, int node,
struct async_domain *domain,
struct async_entry *entry)
{
async_cookie_t newcookie;
unsigned long flags;
INIT_LIST_HEAD(&entry->domain_list);
INIT_LIST_HEAD(&entry->global_list);
INIT_WORK(&entry->work, async_run_entry_fn);
entry->func = func;
entry->data = data;
entry->domain = domain;
spin_lock_irqsave(&async_lock, flags);
/* allocate cookie and queue */
newcookie = entry->cookie = next_cookie++;
list_add_tail(&entry->domain_list, &domain->pending);
if (domain->registered)
list_add_tail(&entry->global_list, &async_global_pending);
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
/* schedule for execution */
queue_work_node(node, async_wq, &entry->work);
return newcookie;
}
/**
* async_schedule_node_domain - NUMA specific version of async_schedule_domain
* @func: function to execute asynchronously
* @data: data pointer to pass to the function
* @node: NUMA node that we want to schedule this on or close to
* @domain: the domain
*
* Returns an async_cookie_t that may be used for checkpointing later.
* @domain may be used in the async_synchronize_*_domain() functions to
* wait within a certain synchronization domain rather than globally.
*
* Note: This function may be called from atomic or non-atomic contexts.
*
* The node requested will be honored on a best effort basis. If the node
* has no CPUs associated with it then the work is distributed among all
* available CPUs.
*/
async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
int node, struct async_domain *domain)
{
struct async_entry *entry;
unsigned long flags;
async_cookie_t newcookie;
/* allow irq-off callers */
entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
/*
* If we're out of memory or if there's too much work
* pending already, we execute synchronously.
*/
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
kfree(entry);
spin_lock_irqsave(&async_lock, flags);
newcookie = next_cookie++;
spin_unlock_irqrestore(&async_lock, flags);
/* low on memory.. run synchronously */
func(data, newcookie);
return newcookie;
}
return __async_schedule_node_domain(func, data, node, domain, entry);
}
EXPORT_SYMBOL_GPL(async_schedule_node_domain);
/**
* async_schedule_node - NUMA specific version of async_schedule
* @func: function to execute asynchronously
* @data: data pointer to pass to the function
* @node: NUMA node that we want to schedule this on or close to
*
* Returns an async_cookie_t that may be used for checkpointing later.
* Note: This function may be called from atomic or non-atomic contexts.
*
* The node requested will be honored on a best effort basis. If the node
* has no CPUs associated with it then the work is distributed among all
* available CPUs.
*/
async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
{
return async_schedule_node_domain(func, data, node, &async_dfl_domain);
}
EXPORT_SYMBOL_GPL(async_schedule_node);
/**
* async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
* @func: function to execute asynchronously
* @dev: device argument to be passed to function
*
* @dev is used as both the argument for the function and to provide NUMA
* context for where to run the function.
*
* If the asynchronous execution of @func is scheduled successfully, return
* true. Otherwise, do nothing and return false, unlike async_schedule_dev()
* that will run the function synchronously then.
*/
bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
{
struct async_entry *entry;
entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
/* Give up if there is no memory or too much work. */
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
kfree(entry);
return false;
}
__async_schedule_node_domain(func, dev, dev_to_node(dev),
&async_dfl_domain, entry);
return true;
}
/**
* async_synchronize_full - synchronize all asynchronous function calls
*
* This function waits until all asynchronous function calls have been done.
*/
void async_synchronize_full(void)
{
async_synchronize_full_domain(NULL);
}
EXPORT_SYMBOL_GPL(async_synchronize_full);
/**
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
* @domain: the domain to synchronize
*
* This function waits until all asynchronous function calls for the
* synchronization domain specified by @domain have been done.
*/
void async_synchronize_full_domain(struct async_domain *domain)
{
async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
}
EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
/**
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
* @cookie: async_cookie_t to use as checkpoint
* @domain: the domain to synchronize (%NULL for all registered domains)
*
* This function waits until all asynchronous function calls for the
* synchronization domain specified by @domain submitted prior to @cookie
* have been done.
*/
void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
{
ktime_t starttime;
pr_debug("async_waiting @ %i\n", task_pid_nr(current));
starttime = ktime_get();
wait_event(async_done, lowest_in_progress(domain) >= cookie);
pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
microseconds_since(starttime));
}
EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
/**
* async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
* @cookie: async_cookie_t to use as checkpoint
*
* This function waits until all asynchronous function calls prior to @cookie
* have been done.
*/
void async_synchronize_cookie(async_cookie_t cookie)
{
async_synchronize_cookie_domain(cookie, &async_dfl_domain);
}
EXPORT_SYMBOL_GPL(async_synchronize_cookie);
/**
* current_is_async - is %current an async worker task?
*
* Returns %true if %current is an async worker task.
*/
bool current_is_async(void)
{ struct worker *worker = current_wq_worker(); return worker && worker->current_func == async_run_entry_fn;}
EXPORT_SYMBOL_GPL(current_is_async);
void __init async_init(void)
{
/*
* Async can schedule a number of interdependent work items. However,
* unbound workqueues can handle only upto min_active interdependent
* work items. The default min_active of 8 isn't sufficient for async
* and can lead to stalls. Let's use a dedicated workqueue with raised
* min_active.
*/
async_wq = alloc_workqueue("async", WQ_UNBOUND, 0);
BUG_ON(!async_wq);
workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_SIGNAL_H
#define _LINUX_SCHED_SIGNAL_H
#include <linux/rculist.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/jobctl.h>
#include <linux/sched/task.h>
#include <linux/cred.h>
#include <linux/refcount.h>
#include <linux/pid.h>
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
/*
* Types defining task->signal and task->sighand and APIs using them:
*/
struct sighand_struct {
spinlock_t siglock;
refcount_t count;
wait_queue_head_t signalfd_wqh;
struct k_sigaction action[_NSIG];
};
/*
* Per-process accounting stats:
*/
struct pacct_struct {
int ac_flag;
long ac_exitcode;
unsigned long ac_mem;
u64 ac_utime, ac_stime;
unsigned long ac_minflt, ac_majflt;
};
struct cpu_itimer {
u64 expires;
u64 incr;
};
/*
* This is the atomic variant of task_cputime, which can be used for
* storing and updating task_cputime statistics without locking.
*/
struct task_cputime_atomic {
atomic64_t utime;
atomic64_t stime;
atomic64_t sum_exec_runtime;
};
#define INIT_CPUTIME_ATOMIC \
(struct task_cputime_atomic) { \
.utime = ATOMIC64_INIT(0), \
.stime = ATOMIC64_INIT(0), \
.sum_exec_runtime = ATOMIC64_INIT(0), \
}
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime_atomic: atomic thread group interval timers.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
struct task_cputime_atomic cputime_atomic;
};
struct multiprocess_signals {
sigset_t signal;
struct hlist_node node;
};
struct core_thread {
struct task_struct *task;
struct core_thread *next;
};
struct core_state {
atomic_t nr_threads;
struct core_thread dumper;
struct completion startup;
};
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
* the locking of signal_struct.
*/
struct signal_struct {
refcount_t sigcnt;
atomic_t live;
int nr_threads;
int quick_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
/* current thread group signal load-balancing target: */
struct task_struct *curr_target;
/* shared signal handling: */
struct sigpending shared_pending;
/* For collecting multiprocess signals during fork */
struct hlist_head multiprocess;
/* thread group exit support */
int group_exit_code;
/* notify group_exec_task when notify_count is less or equal to 0 */
int notify_count;
struct task_struct *group_exec_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
struct core_state *core_state; /* coredumping support */
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
* to this process instead of 'init'. The service manager is
* able to receive SIGCHLD signals and is able to investigate
* the process until it calls wait(). All children of this
* process will inherit a flag if they should look for a
* child_subreaper process at exit.
*/
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;
#ifdef CONFIG_POSIX_TIMERS
/* POSIX.1b Interval Timers */
unsigned int timer_create_restore_ids:1;
atomic_t next_posix_timer_id;
struct hlist_head posix_timers;
struct hlist_head ignored_posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
ktime_t it_real_incr;
/*
* ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
* CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
* values are defined to 0 and 1 respectively
*/
struct cpu_itimer it[2];
/*
* Thread group totals for process CPU timers.
* See thread_group_cputimer(), et al, for details.
*/
struct thread_group_cputimer cputimer;
#endif
/* Empty if CONFIG_POSIX_TIMERS=n */
struct posix_cputimers posix_cputimers;
/* PID/PID hash table linkage. */
struct pid *pids[PIDTYPE_MAX];
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
#endif
struct pid *tty_old_pgrp;
/* boolean value for session group leader */
int leader;
struct tty_struct *tty; /* NULL if no tty */
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup *autogroup;
#endif
/*
* Cumulative resource counters for dead threads in the group,
* and for reaped dead child processes forked by this group.
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
u64 utime, stime, cutime, cstime;
u64 gtime;
u64 cgtime;
struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
unsigned long maxrss, cmaxrss;
struct task_io_accounting ioac;
/*
* Cumulative ns of schedule CPU time fo dead threads in the
* group, not including a zombie group leader, (This only differs
* from jiffies_to_ns(utime + stime) if sched_clock uses something
* other than jiffies.)
*/
unsigned long long sum_sched_runtime;
/*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
* to get both rlim_cur and rlim_max atomically, and either one
* alone is a single word that can safely be read normally.
* getrlimit/setrlimit use task_lock(current->group_leader) to
* protect this instead of the siglock, because they really
* have no need to disable irqs.
*/
struct rlimit rlim[RLIM_NLIMITS];
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pacct_struct pacct; /* per-process accounting information */
#endif
#ifdef CONFIG_TASKSTATS
struct taskstats *stats;
#endif
#ifdef CONFIG_AUDIT
unsigned audit_tty;
struct tty_audit_buf *tty_audit_buf;
#endif
#ifdef CONFIG_CGROUPS
struct rw_semaphore cgroup_threadgroup_rwsem;
#endif
/*
* Thread is the potential origin of an oom condition; kill first on
* oom
*/
bool oom_flag_origin;
short oom_score_adj; /* OOM kill score adjustment */
short oom_score_adj_min; /* OOM kill score adjustment min value.
* Only settable by CAP_SYS_RESOURCE. */
struct mm_struct *oom_mm; /* recorded mm when the thread group got
* killed by the oom killer */
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace)
* Deprecated do not use in new code.
* Use exec_update_lock instead.
*/
struct rw_semaphore exec_update_lock; /* Held while task_struct is
* being updated during exec,
* and may have inconsistent
* permissions.
*/
} __randomize_layout;
/*
* Bits in flags field of signal_struct.
*/
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
/*
* Pending notifications to parent.
*/
#define SIGNAL_CLD_STOPPED 0x00000010
#define SIGNAL_CLD_CONTINUED 0x00000020
#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
SIGNAL_STOP_CONTINUED)
static inline void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
struct task_struct *task = current;
kernel_siginfo_t __info;
enum pid_type __type;
int ret;
spin_lock_irq(&task->sighand->siglock);
ret = dequeue_signal(&task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
}
static inline void kernel_signal_stop(void)
{
spin_lock_irq(¤t->sighand->siglock);
if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
current->jobctl |= JOBCTL_STOPPED;
set_special_state(TASK_STOPPED);
}
spin_unlock_irq(¤t->sighand->siglock);
schedule();
}
int force_sig_fault_to_task(int sig, int code, void __user *addr,
struct task_struct *t);
int force_sig_fault(int sig, int code, void __user *addr);
int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
int force_sig_mceerr(int code, void __user *, short);
int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
int force_sig_pkuerr(void __user *addr, u32 pkey);
int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
struct task_struct *t);
int force_sig_seccomp(int syscall, int reason, bool force_coredump);
extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern void force_sigsegv(int sig);
extern int force_sig_info(struct kernel_siginfo *);
extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int);
extern void force_fatal_sig(int);
extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline void clear_notify_signal(void)
{
clear_thread_flag(TIF_NOTIFY_SIGNAL);
smp_mb__after_atomic();
}
/*
* Returns 'true' if kick_process() is needed to force a transition from
* user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
*/
static inline bool __set_notify_signal(struct task_struct *task)
{
return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
!wake_up_state(task, TASK_INTERRUPTIBLE);
}
/*
* Called to break out of interruptible wait loops, and enter the
* exit_to_user_mode_loop().
*/
static inline void set_notify_signal(struct task_struct *task)
{
if (__set_notify_signal(task))
kick_process(task);
}
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return -ERESTARTNOINTR;
}
static inline int task_sigpending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
static inline int signal_pending(struct task_struct *p)
{
/*
* TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
* behavior in terms of ensuring that we break out of wait loops
* so that notify signal callbacks can be processed.
*/
if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
return 1;
return task_sigpending(p);
}
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
}
static inline int fatal_signal_pending(struct task_struct *p)
{
return task_sigpending(p) && __fatal_signal_pending(p);
}
static inline int signal_pending_state(unsigned int state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
if (!signal_pending(p))
return 0;
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
/*
* This should only be used in fault handlers to decide whether we
* should stop the current fault routine to handle the signals
* instead, especially with the case where we've got interrupted with
* a VM_FAULT_RETRY.
*/
static inline bool fault_signal_pending(vm_fault_t fault_flags,
struct pt_regs *regs)
{ return unlikely((fault_flags & VM_FAULT_RETRY) &&
(fatal_signal_pending(current) ||
(user_mode(regs) && signal_pending(current))));
}
/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
unsigned int state = 0;
if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
state = TASK_WAKEKILL | __TASK_TRACED;
}
signal_wake_up_state(t, state);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
unsigned int state = 0;
if (resume) {
t->jobctl &= ~JOBCTL_TRACED;
state = __TASK_TRACED;
}
signal_wake_up_state(t, state);
}
void task_join_group_stop(struct task_struct *task);
#ifdef TIF_RESTORE_SIGMASK
/*
* Legacy restore_sigmask accessors. These are inefficient on
* SMP architectures because they require atomic operations.
*/
/**
* set_restore_sigmask() - make sure saved_sigmask processing gets done
*
* This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
* will run before returning to user mode, to process the flag. For
* all callers, TIF_SIGPENDING is already set or it's no harm to set
* it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
* arch code will notice on return to user mode, in case those bits
* are scarce. We set TIF_SIGPENDING here to ensure that the arch
* signal code always gets run when TIF_RESTORE_SIGMASK is set.
*/
static inline void set_restore_sigmask(void)
{
set_thread_flag(TIF_RESTORE_SIGMASK);
}
static inline void clear_tsk_restore_sigmask(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
}
static inline void clear_restore_sigmask(void)
{
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
static inline bool test_tsk_restore_sigmask(struct task_struct *task)
{
return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
}
static inline bool test_restore_sigmask(void)
{
return test_thread_flag(TIF_RESTORE_SIGMASK);
}
static inline bool test_and_clear_restore_sigmask(void)
{
return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
}
#else /* TIF_RESTORE_SIGMASK */
/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
static inline void set_restore_sigmask(void)
{
current->restore_sigmask = true;
}
static inline void clear_tsk_restore_sigmask(struct task_struct *task)
{
task->restore_sigmask = false;
}
static inline void clear_restore_sigmask(void)
{
current->restore_sigmask = false;
}
static inline bool test_restore_sigmask(void)
{
return current->restore_sigmask;
}
static inline bool test_tsk_restore_sigmask(struct task_struct *task)
{
return task->restore_sigmask;
}
static inline bool test_and_clear_restore_sigmask(void)
{
if (!current->restore_sigmask)
return false;
current->restore_sigmask = false;
return true;
}
#endif
static inline void restore_saved_sigmask(void)
{
if (test_and_clear_restore_sigmask())
__set_current_blocked(¤t->saved_sigmask);
}
extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
static inline void restore_saved_sigmask_unless(bool interrupted)
{
if (interrupted)
WARN_ON(!signal_pending(current));
else
restore_saved_sigmask();
}
static inline sigset_t *sigmask_to_save(void)
{
sigset_t *res = ¤t->blocked;
if (unlikely(test_restore_sigmask()))
res = ¤t->saved_sigmask;
return res;
}
static inline int kill_cad_pid(int sig, int priv)
{
return kill_pid(cad_pid, sig, priv);
}
/* These can be the second arg to send_sig_info/send_group_sig_info. */
#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
static inline int __on_sig_stack(unsigned long sp)
{
#ifdef CONFIG_STACK_GROWSUP
return sp >= current->sas_ss_sp &&
sp - current->sas_ss_sp < current->sas_ss_size;
#else
return sp > current->sas_ss_sp &&
sp - current->sas_ss_sp <= current->sas_ss_size;
#endif
}
/*
* True if we are on the alternate signal stack.
*/
static inline int on_sig_stack(unsigned long sp)
{
/*
* If the signal stack is SS_AUTODISARM then, by construction, we
* can't be on the signal stack unless user code deliberately set
* SS_AUTODISARM when we were already on it.
*
* This improves reliability: if user state gets corrupted such that
* the stack pointer points very close to the end of the signal stack,
* then this check will enable the signal to be handled anyway.
*/
if (current->sas_ss_flags & SS_AUTODISARM)
return 0;
return __on_sig_stack(sp);
}
static inline int sas_ss_flags(unsigned long sp)
{
if (!current->sas_ss_size)
return SS_DISABLE;
return on_sig_stack(sp) ? SS_ONSTACK : 0;
}
static inline void sas_ss_reset(struct task_struct *p)
{
p->sas_ss_sp = 0;
p->sas_ss_size = 0;
p->sas_ss_flags = SS_DISABLE;
}
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
#ifdef CONFIG_STACK_GROWSUP
return current->sas_ss_sp;
#else
return current->sas_ss_sp + current->sas_ss_size;
#endif
return sp;
}
extern void __cleanup_sighand(struct sighand_struct *);
extern void flush_itimer_signals(void);
#define tasklist_empty() \
list_empty(&init_task.tasks)
#define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
extern bool current_is_single_threaded(void);
/*
* Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
* otherwise next_thread(t) will never reach g after list_del_rcu(g).
*/
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
#define for_other_threads(p, t) \
for (t = p; (t = next_thread(t)) != p; )
#define __for_each_thread(signal, t) \
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
lockdep_is_held(&tasklist_lock))
#define for_each_thread(p, t) \
__for_each_thread((p)->signal, t)
/* Careful: this is a double loop, 'break' won't work as expected. */
#define for_each_process_thread(p, t) \
for_each_process(p) for_each_thread(p, t)
typedef int (*proc_visitor)(struct task_struct *p, void *data);
void walk_process_tree(struct task_struct *top, proc_visitor, void *);
static inline
struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
{
struct pid *pid;
if (type == PIDTYPE_PID)
pid = task_pid(task);
else
pid = task->signal->pids[type];
return pid;
}
static inline struct pid *task_tgid(struct task_struct *task)
{
return task->signal->pids[PIDTYPE_TGID];
}
/*
* Without tasklist or RCU lock it is not safe to dereference
* the result of task_pgrp/task_session even if task == current,
* we can race with another thread doing sys_setsid/sys_setpgid.
*/
static inline struct pid *task_pgrp(struct task_struct *task)
{
return task->signal->pids[PIDTYPE_PGID];
}
static inline struct pid *task_session(struct task_struct *task)
{
return task->signal->pids[PIDTYPE_SID];
}
static inline int get_nr_threads(struct task_struct *task)
{
return task->signal->nr_threads;
}
static inline bool thread_group_leader(struct task_struct *p)
{
return p->exit_signal >= 0;
}
static inline
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
return p1->signal == p2->signal;
}
/*
* returns NULL if p is the last thread in the thread group
*/
static inline struct task_struct *__next_thread(struct task_struct *p)
{
return list_next_or_null_rcu(&p->signal->thread_head,
&p->thread_node,
struct task_struct,
thread_node);
}
static inline struct task_struct *next_thread(struct task_struct *p)
{
return __next_thread(p) ?: p->group_leader;
}
static inline int thread_group_empty(struct task_struct *p)
{
return thread_group_leader(p) &&
list_is_last(&p->thread_node, &p->signal->thread_head);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
unsigned long *flags);
static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
unsigned long *flags)
{
struct sighand_struct *ret;
ret = __lock_task_sighand(task, flags);
(void)__cond_lock(&task->sighand->siglock, ret);
return ret;
}
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
{
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
#ifdef CONFIG_LOCKDEP
extern void lockdep_assert_task_sighand_held(struct task_struct *task);
#else
static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
#endif
static inline unsigned long task_rlimit(const struct task_struct *task,
unsigned int limit)
{
return READ_ONCE(task->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *task,
unsigned int limit)
{
return READ_ONCE(task->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
{
return task_rlimit(current, limit);
}
static inline unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max(current, limit);
}
#endif /* _LINUX_SCHED_SIGNAL_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* generic net pointers
*/
#ifndef __NET_GENERIC_H__
#define __NET_GENERIC_H__
#include <linux/bug.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
/*
* Generic net pointers are to be used by modules to put some private
* stuff on the struct net without explicit struct net modification
*
* The rules are simple:
* 1. set pernet_operations->id. After register_pernet_device you
* will have the id of your private pointer.
* 2. set pernet_operations->size to have the code allocate and free
* a private structure pointed to from struct net.
* 3. do not change this pointer while the net is alive;
* 4. do not try to have any private reference on the net_generic object.
*
* After accomplishing all of the above, the private pointer can be
* accessed with the net_generic() call.
*/
struct net_generic {
union {
struct {
unsigned int len;
struct rcu_head rcu;
} s;
DECLARE_FLEX_ARRAY(void *, ptr);
};
};
static inline void *net_generic(const struct net *net, unsigned int id)
{
struct net_generic *ng;
void *ptr;
rcu_read_lock(); ng = rcu_dereference(net->gen); ptr = ng->ptr[id]; rcu_read_unlock();
return ptr;
}
#endif
/* inflate.c -- zlib decompression
* Copyright (C) 1995-2005 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Based on zlib 1.2.3 but modified for the Linux Kernel by
* Richard Purdie <richard@openedhand.com>
*
* Changes mainly for static instead of dynamic memory allocation
*
*/
#include <linux/zutil.h>
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#include "infutil.h"
/* architecture-specific bits */
#ifdef CONFIG_ZLIB_DFLTCC
# include "../zlib_dfltcc/dfltcc_inflate.h"
#else
#define INFLATE_RESET_HOOK(strm) do {} while (0)
#define INFLATE_TYPEDO_HOOK(strm, flush) do {} while (0)
#define INFLATE_NEED_UPDATEWINDOW(strm) 1
#define INFLATE_NEED_CHECKSUM(strm) 1
#endif
int zlib_inflate_workspacesize(void)
{
return sizeof(struct inflate_workspace);
}
int zlib_inflateReset(z_streamp strm)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
strm->total_in = strm->total_out = state->total = 0;
strm->msg = NULL;
strm->adler = 1; /* to support ill-conceived Java test suite */
state->mode = HEAD;
state->last = 0;
state->havedict = 0;
state->dmax = 32768U;
state->hold = 0;
state->bits = 0;
state->lencode = state->distcode = state->next = state->codes;
/* Initialise Window */
state->wsize = 1U << state->wbits;
state->write = 0;
state->whave = 0;
INFLATE_RESET_HOOK(strm);
return Z_OK;
}
int zlib_inflateInit2(z_streamp strm, int windowBits)
{
struct inflate_state *state;
if (strm == NULL) return Z_STREAM_ERROR;
strm->msg = NULL; /* in case we return an error */
state = &WS(strm)->inflate_state;
strm->state = (struct internal_state *)state;
if (windowBits < 0) {
state->wrap = 0;
windowBits = -windowBits;
}
else {
state->wrap = (windowBits >> 4) + 1;
}
if (windowBits < 8 || windowBits > 15) {
return Z_STREAM_ERROR;
}
state->wbits = (unsigned)windowBits;
#ifdef CONFIG_ZLIB_DFLTCC
/*
* DFLTCC requires the window to be page aligned.
* Thus, we overallocate and take the aligned portion of the buffer.
*/
state->window = PTR_ALIGN(&WS(strm)->working_window[0], PAGE_SIZE);
#else
state->window = &WS(strm)->working_window[0];
#endif
return zlib_inflateReset(strm);
}
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. This returns fixed tables from inffixed.h.
*/
static void zlib_fixedtables(struct inflate_state *state)
{
# include "inffixed.h"
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
/*
Update the window with the last wsize (normally 32K) bytes written before
returning. This is only called when a window is already in use, or when
output has been written during this inflate call, but the end of the deflate
stream has not been reached yet. It is also called to window dictionary data
when a dictionary is loaded.
Providing output buffers larger than 32K to inflate() should provide a speed
advantage, since only the last 32K of output is copied to the sliding window
upon return from inflate(), and since all distances after the first 32K of
output will fall in the output data, making match copies simpler and faster.
The advantage may be dependent on the size of the processor's data caches.
*/
static void zlib_updatewindow(z_streamp strm, unsigned out)
{
struct inflate_state *state;
unsigned copy, dist;
state = (struct inflate_state *)strm->state;
/* copy state->wsize or less output bytes into the circular window */
copy = out - strm->avail_out;
if (copy >= state->wsize) {
memcpy(state->window, strm->next_out - state->wsize, state->wsize);
state->write = 0;
state->whave = state->wsize;
}
else {
dist = state->wsize - state->write;
if (dist > copy) dist = copy;
memcpy(state->window + state->write, strm->next_out - copy, dist);
copy -= dist;
if (copy) {
memcpy(state->window, strm->next_out - copy, copy);
state->write = copy;
state->whave = state->wsize;
}
else {
state->write += dist;
if (state->write == state->wsize) state->write = 0;
if (state->whave < state->wsize) state->whave += dist;
}
}
}
/*
* At the end of a Deflate-compressed PPP packet, we expect to have seen
* a `stored' block type value but not the (zero) length bytes.
*/
/*
Returns true if inflate is currently at the end of a block generated by
Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
implementation to provide an additional safety check. PPP uses
Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
block. When decompressing, PPP checks that at the end of input packet,
inflate is waiting for these length bytes.
*/
static int zlib_inflateSyncPacket(z_streamp strm)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->mode == STORED && state->bits == 0) {
state->mode = TYPE;
return Z_OK;
}
return Z_DATA_ERROR;
}
/* Macros for inflate(): */
/* check function to use adler32() for zlib or crc32() for gzip */
#define UPDATE(check, buf, len) zlib_adler32(check, buf, len)
/* Load registers with state in inflate() for speed */
#define LOAD() \
do { \
put = strm->next_out; \
left = strm->avail_out; \
next = strm->next_in; \
have = strm->avail_in; \
hold = state->hold; \
bits = state->bits; \
} while (0)
/* Restore state from registers in inflate() */
#define RESTORE() \
do { \
strm->next_out = put; \
strm->avail_out = left; \
strm->next_in = next; \
strm->avail_in = have; \
state->hold = hold; \
state->bits = bits; \
} while (0)
/* Clear the input bit accumulator */
#define INITBITS() \
do { \
hold = 0; \
bits = 0; \
} while (0)
/* Get a byte of input into the bit accumulator, or return from inflate()
if there is no input available. */
#define PULLBYTE() \
do { \
if (have == 0) goto inf_leave; \
have--; \
hold += (unsigned long)(*next++) << bits; \
bits += 8; \
} while (0)
/* Assure that there are at least n bits in the bit accumulator. If there is
not enough available input to do that, then return from inflate(). */
#define NEEDBITS(n) \
do { \
while (bits < (unsigned)(n)) \
PULLBYTE(); \
} while (0)
/* Return the low n bits of the bit accumulator (n < 16) */
#define BITS(n) \
((unsigned)hold & ((1U << (n)) - 1))
/* Remove n bits from the bit accumulator */
#define DROPBITS(n) \
do { \
hold >>= (n); \
bits -= (unsigned)(n); \
} while (0)
/* Remove zero to seven bits as needed to go to a byte boundary */
#define BYTEBITS() \
do { \
hold >>= bits & 7; \
bits -= bits & 7; \
} while (0)
/*
inflate() uses a state machine to process as much input data and generate as
much output data as possible before returning. The state machine is
structured roughly as follows:
for (;;) switch (state) {
...
case STATEn:
if (not enough input data or output space to make progress)
return;
... make progress ...
state = STATEm;
break;
...
}
so when inflate() is called again, the same case is attempted again, and
if the appropriate resources are provided, the machine proceeds to the
next state. The NEEDBITS() macro is usually the way the state evaluates
whether it can proceed or should return. NEEDBITS() does the return if
the requested bits are not available. The typical use of the BITS macros
is:
NEEDBITS(n);
... do something with BITS(n) ...
DROPBITS(n);
where NEEDBITS(n) either returns from inflate() if there isn't enough
input left to load n bits into the accumulator, or it continues. BITS(n)
gives the low n bits in the accumulator. When done, DROPBITS(n) drops
the low n bits off the accumulator. INITBITS() clears the accumulator
and sets the number of available bits to zero. BYTEBITS() discards just
enough bits to put the accumulator on a byte boundary. After BYTEBITS()
and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
if there is no input available. The decoding of variable length codes uses
PULLBYTE() directly in order to pull just enough bytes to decode the next
code, and no more.
Some states loop until they get enough input, making sure that enough
state information is maintained to continue the loop where it left off
if NEEDBITS() returns in the loop. For example, want, need, and keep
would all have to actually be part of the saved state in case NEEDBITS()
returns:
case STATEw:
while (want < need) {
NEEDBITS(n);
keep[want++] = BITS(n);
DROPBITS(n);
}
state = STATEx;
case STATEx:
As shown above, if the next state is also the next case, then the break
is omitted.
A state may also return if there is not enough output space available to
complete that state. Those states are copying stored data, writing a
literal byte, and copying a matching string.
When returning, a "goto inf_leave" is used to update the total counters,
update the check value, and determine whether any progress has been made
during that inflate() call in order to return the proper return code.
Progress is defined as a change in either strm->avail_in or strm->avail_out.
When there is a window, goto inf_leave will update the window with the last
output written. If a goto inf_leave occurs in the middle of decompression
and there is no window currently, goto inf_leave will create one and copy
output to the window for the next call of inflate().
In this implementation, the flush parameter of inflate() only affects the
return code (per zlib.h). inflate() always writes as much as possible to
strm->next_out, given the space available and the provided input--the effect
documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
the allocation of and copying into a sliding window until necessary, which
provides the effect documented in zlib.h for Z_FINISH when the entire input
stream available. So the only thing the flush parameter actually does is:
when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
will return Z_BUF_ERROR if it has not reached the end of the stream.
*/
int zlib_inflate(z_streamp strm, int flush)
{
struct inflate_state *state;
const unsigned char *next; /* next input */
unsigned char *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */
unsigned bits; /* bits in bit buffer */
unsigned in, out; /* save starting available input and output */
unsigned copy; /* number of stored or match bytes to copy */
unsigned char *from; /* where to copy match bytes from */
code this; /* current decoding table entry */
code last; /* parent table entry */
unsigned len; /* length to copy for repeats, bits to drop */
int ret; /* return code */
static const unsigned short order[19] = /* permutation of code lengths */
{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
/* Do not check for strm->next_out == NULL here as ppc zImage
inflates to strm->next_out = 0 */
if (strm == NULL || strm->state == NULL ||
(strm->next_in == NULL && strm->avail_in != 0))
return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
LOAD();
in = have;
out = left;
ret = Z_OK;
for (;;)
switch (state->mode) {
case HEAD:
if (state->wrap == 0) {
state->mode = TYPEDO;
break;
}
NEEDBITS(16);
if (
((BITS(8) << 8) + (hold >> 8)) % 31) {
strm->msg = (char *)"incorrect header check";
state->mode = BAD;
break;
}
if (BITS(4) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
DROPBITS(4);
len = BITS(4) + 8;
if (len > state->wbits) {
strm->msg = (char *)"invalid window size";
state->mode = BAD;
break;
}
state->dmax = 1U << len;
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
INITBITS();
break;
case DICTID:
NEEDBITS(32);
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
fallthrough;
case DICT:
if (state->havedict == 0) {
RESTORE();
return Z_NEED_DICT;
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
fallthrough;
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
fallthrough;
case TYPEDO:
INFLATE_TYPEDO_HOOK(strm, flush);
if (state->last) {
BYTEBITS();
state->mode = CHECK;
break;
}
NEEDBITS(3);
state->last = BITS(1);
DROPBITS(1);
switch (BITS(2)) {
case 0: /* stored block */
state->mode = STORED;
break;
case 1: /* fixed block */
zlib_fixedtables(state);
state->mode = LEN; /* decode codes */
break;
case 2: /* dynamic block */
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
break;
case STORED:
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
fallthrough;
case COPY:
copy = state->length;
if (copy) {
if (copy > have) copy = have;
if (copy > left) copy = left;
if (copy == 0) goto inf_leave;
memcpy(put, next, copy);
have -= copy;
next += copy;
left -= copy;
put += copy;
state->length -= copy;
break;
}
state->mode = TYPE;
break;
case TABLE:
NEEDBITS(14);
state->nlen = BITS(5) + 257;
DROPBITS(5);
state->ndist = BITS(5) + 1;
DROPBITS(5);
state->ncode = BITS(4) + 4;
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
state->mode = BAD;
break;
}
#endif
state->have = 0;
state->mode = LENLENS;
fallthrough;
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
state->lens[order[state->have++]] = (unsigned short)BITS(3);
DROPBITS(3);
}
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (code const *)(state->next);
state->lenbits = 7;
ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
state->mode = BAD;
break;
}
state->have = 0;
state->mode = CODELENS;
fallthrough;
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
this = state->lencode[BITS(state->lenbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if (this.val < 16) {
NEEDBITS(this.bits);
DROPBITS(this.bits);
state->lens[state->have++] = this.val;
}
else {
if (this.val == 16) {
NEEDBITS(this.bits + 2);
DROPBITS(this.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
len = state->lens[state->have - 1];
copy = 3 + BITS(2);
DROPBITS(2);
}
else if (this.val == 17) {
NEEDBITS(this.bits + 3);
DROPBITS(this.bits);
len = 0;
copy = 3 + BITS(3);
DROPBITS(3);
}
else {
NEEDBITS(this.bits + 7);
DROPBITS(this.bits);
len = 0;
copy = 11 + BITS(7);
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
while (copy--)
state->lens[state->have++] = (unsigned short)len;
}
}
/* handle error breaks in while */
if (state->mode == BAD) break;
/* build code tables */
state->next = state->codes;
state->lencode = (code const *)(state->next);
state->lenbits = 9;
ret = zlib_inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
state->distcode = (code const *)(state->next);
state->distbits = 6;
ret = zlib_inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
state->mode = BAD;
break;
}
state->mode = LEN;
fallthrough;
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
inflate_fast(strm, out);
LOAD();
break;
}
for (;;) {
this = state->lencode[BITS(state->lenbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if (this.op && (this.op & 0xf0) == 0) {
last = this;
for (;;) {
this = state->lencode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + this.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
}
DROPBITS(this.bits);
state->length = (unsigned)this.val;
if ((int)(this.op) == 0) {
state->mode = LIT;
break;
}
if (this.op & 32) {
state->mode = TYPE;
break;
}
if (this.op & 64) {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
fallthrough;
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->length += BITS(state->extra);
DROPBITS(state->extra);
}
state->mode = DIST;
fallthrough;
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if ((this.op & 0xf0) == 0) {
last = this;
for (;;) {
this = state->distcode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + this.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
}
DROPBITS(this.bits);
if (this.op & 64) {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
fallthrough;
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->offset += BITS(state->extra);
DROPBITS(state->extra);
}
#ifdef INFLATE_STRICT
if (state->offset > state->dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
if (state->offset > state->whave + out - left) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
state->mode = MATCH;
fallthrough;
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
if (state->offset > copy) { /* copy from window */
copy = state->offset - copy;
if (copy > state->write) {
copy -= state->write;
from = state->window + (state->wsize - copy);
}
else
from = state->window + (state->write - copy);
if (copy > state->length) copy = state->length;
}
else { /* copy from output */
from = put - state->offset;
copy = state->length;
}
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = *from++;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
case LIT:
if (left == 0) goto inf_leave;
*put++ = (unsigned char)(state->length);
left--;
state->mode = LEN;
break;
case CHECK:
if (state->wrap) {
NEEDBITS(32);
out -= left;
strm->total_out += out;
state->total += out;
if (INFLATE_NEED_CHECKSUM(strm) && out)
strm->adler = state->check =
UPDATE(state->check, put - out, out);
out = left;
if ((
REVERSE(hold)) != state->check) {
strm->msg = (char *)"incorrect data check";
state->mode = BAD;
break;
}
INITBITS();
}
state->mode = DONE;
fallthrough;
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
case BAD:
ret = Z_DATA_ERROR;
goto inf_leave;
case MEM:
return Z_MEM_ERROR;
case SYNC:
default:
return Z_STREAM_ERROR;
}
/*
Return from inflate(), updating the total counts and the check value.
If there was no progress during the inflate() call, return a buffer
error. Call zlib_updatewindow() to create and/or update the window state.
*/
inf_leave:
RESTORE();
if (INFLATE_NEED_UPDATEWINDOW(strm) &&
(state->wsize || (state->mode < CHECK && out != strm->avail_out)))
zlib_updatewindow(strm, out);
in -= strm->avail_in;
out -= strm->avail_out;
strm->total_in += in;
strm->total_out += out;
state->total += out;
if (INFLATE_NEED_CHECKSUM(strm) && state->wrap && out)
strm->adler = state->check =
UPDATE(state->check, strm->next_out - out, out);
strm->data_type = state->bits + (state->last ? 64 : 0) +
(state->mode == TYPE ? 128 : 0);
if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
strm->avail_out != 0 && strm->avail_in == 0)
return zlib_inflateSyncPacket(strm);
if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
ret = Z_BUF_ERROR;
return ret;
}
int zlib_inflateEnd(z_streamp strm)
{
if (strm == NULL || strm->state == NULL)
return Z_STREAM_ERROR;
return Z_OK;
}
/*
* This subroutine adds the data at next_in/avail_in to the output history
* without performing any output. The output buffer must be "caught up";
* i.e. no pending output but this should always be the case. The state must
* be waiting on the start of a block (i.e. mode == TYPE or HEAD). On exit,
* the output will also be caught up, and the checksum will have been updated
* if need be.
*/
int zlib_inflateIncomp(z_stream *z)
{
struct inflate_state *state = (struct inflate_state *)z->state;
Byte *saved_no = z->next_out;
uInt saved_ao = z->avail_out;
if (state->mode != TYPE && state->mode != HEAD)
return Z_DATA_ERROR;
/* Setup some variables to allow misuse of updateWindow */
z->avail_out = 0;
z->next_out = (unsigned char*)z->next_in + z->avail_in;
zlib_updatewindow(z, z->avail_in);
/* Restore saved variables */
z->avail_out = saved_ao;
z->next_out = saved_no;
z->adler = state->check =
UPDATE(state->check, z->next_in, z->avail_in);
z->total_out += z->avail_in;
z->total_in += z->avail_in;
z->next_in += z->avail_in;
state->total += z->avail_in;
z->avail_in = 0;
return Z_OK;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Kernel timekeeping code and accessor functions. Based on code from
* timer.c, moved in commit 8524070b7982.
*/
#include <linux/timekeeper_internal.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kobject.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/clock.h>
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/tick.h>
#include <linux/stop_machine.h>
#include <linux/pvclock_gtod.h>
#include <linux/compiler.h>
#include <linux/audit.h>
#include <linux/random.h>
#include <vdso/auxclock.h>
#include "tick-internal.h"
#include "ntp_internal.h"
#include "timekeeping_internal.h"
#define TK_CLEAR_NTP (1 << 0)
#define TK_CLOCK_WAS_SET (1 << 1)
#define TK_UPDATE_ALL (TK_CLEAR_NTP | TK_CLOCK_WAS_SET)
enum timekeeping_adv_mode {
/* Update timekeeper when a tick has passed */
TK_ADV_TICK,
/* Update timekeeper on a direct frequency change */
TK_ADV_FREQ
};
/*
* The most important data for readout fits into a single 64 byte
* cache line.
*/
struct tk_data {
seqcount_raw_spinlock_t seq;
struct timekeeper timekeeper;
struct timekeeper shadow_timekeeper;
raw_spinlock_t lock;
} ____cacheline_aligned;
static struct tk_data timekeeper_data[TIMEKEEPERS_MAX];
/* The core timekeeper */
#define tk_core (timekeeper_data[TIMEKEEPER_CORE])
#ifdef CONFIG_POSIX_AUX_CLOCKS
static inline bool tk_get_aux_ts64(unsigned int tkid, struct timespec64 *ts)
{
return ktime_get_aux_ts64(CLOCK_AUX + tkid - TIMEKEEPER_AUX_FIRST, ts);
}
static inline bool tk_is_aux(const struct timekeeper *tk)
{
return tk->id >= TIMEKEEPER_AUX_FIRST && tk->id <= TIMEKEEPER_AUX_LAST;
}
#else
static inline bool tk_get_aux_ts64(unsigned int tkid, struct timespec64 *ts)
{
return false;
}
static inline bool tk_is_aux(const struct timekeeper *tk)
{
return false;
}
#endif
static inline void tk_update_aux_offs(struct timekeeper *tk, ktime_t offs)
{
tk->offs_aux = offs;
tk->monotonic_to_aux = ktime_to_timespec64(offs);
}
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
/**
* struct tk_fast - NMI safe timekeeper
* @seq: Sequence counter for protecting updates. The lowest bit
* is the index for the tk_read_base array
* @base: tk_read_base array. Access is indexed by the lowest bit of
* @seq.
*
* See @update_fast_timekeeper() below.
*/
struct tk_fast {
seqcount_latch_t seq;
struct tk_read_base base[2];
};
/* Suspend-time cycles value for halted fast timekeeper. */
static u64 cycles_at_suspend;
static u64 dummy_clock_read(struct clocksource *cs)
{
if (timekeeping_suspended)
return cycles_at_suspend;
return local_clock();
}
static struct clocksource dummy_clock = {
.read = dummy_clock_read,
};
/*
* Boot time initialization which allows local_clock() to be utilized
* during early boot when clocksources are not available. local_clock()
* returns nanoseconds already so no conversion is required, hence mult=1
* and shift=0. When the first proper clocksource is installed then
* the fast time keepers are updated with the correct values.
*/
#define FAST_TK_INIT \
{ \
.clock = &dummy_clock, \
.mask = CLOCKSOURCE_MASK(64), \
.mult = 1, \
.shift = 0, \
}
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
.seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
.base[0] = FAST_TK_INIT,
.base[1] = FAST_TK_INIT,
};
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
.seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
.base[0] = FAST_TK_INIT,
.base[1] = FAST_TK_INIT,
};
#ifdef CONFIG_POSIX_AUX_CLOCKS
static __init void tk_aux_setup(void);
static void tk_aux_update_clocksource(void);
static void tk_aux_advance(void);
#else
static inline void tk_aux_setup(void) { }
static inline void tk_aux_update_clocksource(void) { }
static inline void tk_aux_advance(void) { }
#endif
unsigned long timekeeper_lock_irqsave(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&tk_core.lock, flags);
return flags;
}
void timekeeper_unlock_irqrestore(unsigned long flags)
{
raw_spin_unlock_irqrestore(&tk_core.lock, flags);
}
/*
* Multigrain timestamps require tracking the latest fine-grained timestamp
* that has been issued, and never returning a coarse-grained timestamp that is
* earlier than that value.
*
* mg_floor represents the latest fine-grained time that has been handed out as
* a file timestamp on the system. This is tracked as a monotonic ktime_t, and
* converted to a realtime clock value on an as-needed basis.
*
* Maintaining mg_floor ensures the multigrain interfaces never issue a
* timestamp earlier than one that has been previously issued.
*
* The exception to this rule is when there is a backward realtime clock jump. If
* such an event occurs, a timestamp can appear to be earlier than a previous one.
*/
static __cacheline_aligned_in_smp atomic64_t mg_floor;
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
tk->xtime_sec++;
}
while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
tk->raw_sec++;
}
}
static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
{
struct timespec64 ts;
ts.tv_sec = tk->xtime_sec;
ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
return ts;
}
static inline struct timespec64 tk_xtime_coarse(const struct timekeeper *tk)
{
struct timespec64 ts;
ts.tv_sec = tk->xtime_sec;
ts.tv_nsec = tk->coarse_nsec;
return ts;
}
/*
* Update the nanoseconds part for the coarse time keepers. They can't rely
* on xtime_nsec because xtime_nsec could be adjusted by a small negative
* amount when the multiplication factor of the clock is adjusted, which
* could cause the coarse clocks to go slightly backwards. See
* timekeeping_apply_adjustment(). Thus we keep a separate copy for the coarse
* clockids which only is updated when the clock has been set or we have
* accumulated time.
*/
static inline void tk_update_coarse_nsecs(struct timekeeper *tk)
{
tk->coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
}
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec = ts->tv_sec;
tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
tk_update_coarse_nsecs(tk);
}
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec += ts->tv_sec;
tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
tk_normalize_xtime(tk);
tk_update_coarse_nsecs(tk);
}
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
{
struct timespec64 tmp;
/*
* Verify consistency of: offset_real = -wall_to_monotonic
* before modifying anything
*/
set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
-tk->wall_to_monotonic.tv_nsec);
WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
tk->wall_to_monotonic = wtm;
set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
/* Paired with READ_ONCE() in ktime_mono_to_any() */
WRITE_ONCE(tk->offs_real, timespec64_to_ktime(tmp));
WRITE_ONCE(tk->offs_tai, ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)));
}
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
{
/* Paired with READ_ONCE() in ktime_mono_to_any() */
WRITE_ONCE(tk->offs_boot, ktime_add(tk->offs_boot, delta));
/*
* Timespec representation for VDSO update to avoid 64bit division
* on every update.
*/
tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
}
/*
* tk_clock_read - atomic clocksource read() helper
*
* This helper is necessary to use in the read paths because, while the
* seqcount ensures we don't return a bad value while structures are updated,
* it doesn't protect from potential crashes. There is the possibility that
* the tkr's clocksource may change between the read reference, and the
* clock reference passed to the read function. This can cause crashes if
* the wrong clocksource is passed to the wrong read function.
* This isn't necessary to use when holding the tk_core.lock or doing
* a read of the fast-timekeeper tkrs (which is protected by its own locking
* and update logic).
*/
static inline u64 tk_clock_read(const struct tk_read_base *tkr)
{
struct clocksource *clock = READ_ONCE(tkr->clock);
return clock->read(clock);
}
/**
* tk_setup_internals - Set up internals to use clocksource clock.
*
* @tk: The target timekeeper to setup.
* @clock: Pointer to clocksource.
*
* Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
* pair and interval request.
*
* Unless you're the timekeeping code, you should not be using this!
*/
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
{
u64 interval;
u64 tmp, ntpinterval;
struct clocksource *old_clock;
++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
tk->tkr_mono.mask = clock->mask;
tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH;
tmp <<= clock->shift;
ntpinterval = tmp;
tmp += clock->mult/2;
do_div(tmp, clock->mult);
if (tmp == 0)
tmp = 1;
interval = (u64) tmp;
tk->cycle_interval = interval;
/* Go back from cycles -> shifted ns */
tk->xtime_interval = interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
int shift_change = clock->shift - old_clock->shift;
if (shift_change < 0) {
tk->tkr_mono.xtime_nsec >>= -shift_change;
tk->tkr_raw.xtime_nsec >>= -shift_change;
} else {
tk->tkr_mono.xtime_nsec <<= shift_change;
tk->tkr_raw.xtime_nsec <<= shift_change;
}
}
tk->tkr_mono.shift = clock->shift;
tk->tkr_raw.shift = clock->shift;
tk->ntp_error = 0;
tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
/*
* The timekeeper keeps its own mult values for the currently
* active clocksource. These value will be adjusted via NTP
* to counteract clock drifting.
*/
tk->tkr_mono.mult = clock->mult;
tk->tkr_raw.mult = clock->mult;
tk->ntp_err_mult = 0;
tk->skip_second_overflow = 0;
}
/* Timekeeper helper functions. */
static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
{
return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift);
}
static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
{
/* Calculate the delta since the last update_wall_time() */
u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
/*
* This detects both negative motion and the case where the delta
* overflows the multiplication with tkr->mult.
*/
if (unlikely(delta > tkr->clock->max_cycles)) {
/*
* Handle clocksource inconsistency between CPUs to prevent
* time from going backwards by checking for the MSB of the
* mask being set in the delta.
*/
if (delta & ~(mask >> 1)) return tkr->xtime_nsec >> tkr->shift;
return delta_to_ns_safe(tkr, delta);
}
return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
}
static __always_inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
{
return timekeeping_cycles_to_ns(tkr, tk_clock_read(tkr));
}
/**
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
* @tkr: Timekeeping readout base from which we take the update
* @tkf: Pointer to NMI safe timekeeper
*
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
*
* Employ the latch technique; see @write_seqcount_latch.
*
* So if a NMI hits the update of base[0] then it will use base[1]
* which is still consistent. In the worst case this can result is a
* slightly wrong timestamp (a few nanoseconds). See
* @ktime_get_mono_fast_ns.
*/
static void update_fast_timekeeper(const struct tk_read_base *tkr,
struct tk_fast *tkf)
{
struct tk_read_base *base = tkf->base;
/* Force readers off to base[1] */
write_seqcount_latch_begin(&tkf->seq);
/* Update base[0] */
memcpy(base, tkr, sizeof(*base));
/* Force readers back to base[0] */
write_seqcount_latch(&tkf->seq);
/* Update base[1] */
memcpy(base + 1, base, sizeof(*base));
write_seqcount_latch_end(&tkf->seq);
}
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
{
struct tk_read_base *tkr;
unsigned int seq;
u64 now;
do {
seq = read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
now += timekeeping_get_ns(tkr);
} while (read_seqcount_latch_retry(&tkf->seq, seq));
return now;
}
/**
* ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
*
* This timestamp is not guaranteed to be monotonic across an update.
* The timestamp is calculated by:
*
* now = base_mono + clock_delta * slope
*
* So if the update lowers the slope, readers who are forced to the
* not yet updated second array are still using the old steeper slope.
*
* tmono
* ^
* | o n
* | o n
* | u
* | o
* |o
* |12345678---> reader order
*
* o = old slope
* u = update
* n = new slope
*
* So reader 6 will observe time going backwards versus reader 5.
*
* While other CPUs are likely to be able to observe that, the only way
* for a CPU local observation is when an NMI hits in the middle of
* the update. Timestamps taken from that NMI context might be ahead
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
u64 notrace ktime_get_mono_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_mono);
}
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
/**
* ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
*
* Contrary to ktime_get_mono_fast_ns() this is always correct because the
* conversion factor is not affected by NTP/PTP correction.
*/
u64 notrace ktime_get_raw_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
/**
* ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
*
* To keep it NMI safe since we're accessing from tracing, we're not using a
* separate timekeeper with updates to monotonic clock and boot offset
* protected with seqcounts. This has the following minor side effects:
*
* (1) Its possible that a timestamp be taken after the boot offset is updated
* but before the timekeeper is updated. If this happens, the new boot offset
* is added to the old timekeeping making the clock appear to update slightly
* earlier:
* CPU 0 CPU 1
* timekeeping_inject_sleeptime64()
* __timekeeping_inject_sleeptime(tk, delta);
* timestamp();
* timekeeping_update_staged(tkd, TK_CLEAR_NTP...);
*
* (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
* partially updated. Since the tk->offs_boot update is a rare event, this
* should be a rare occurrence which postprocessing should be able to handle.
*
* The caveats vs. timestamp ordering as documented for ktime_get_mono_fast_ns()
* apply as well.
*/
u64 notrace ktime_get_boot_fast_ns(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
}
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
/**
* ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
*
* The same limitations as described for ktime_get_boot_fast_ns() apply. The
* mono time and the TAI offset are not read atomically which may yield wrong
* readouts. However, an update of the TAI offset is an rare event e.g., caused
* by settime or adjtimex with an offset. The user of this function has to deal
* with the possibility of wrong timestamps in post processing.
*/
u64 notrace ktime_get_tai_fast_ns(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
}
EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
/**
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
*
* See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
*/
u64 ktime_get_real_fast_ns(void)
{
struct tk_fast *tkf = &tk_fast_mono;
struct tk_read_base *tkr;
u64 baser, delta;
unsigned int seq;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
baser = ktime_to_ns(tkr->base_real);
delta = timekeeping_get_ns(tkr);
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
return baser + delta;
}
EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
*
* It generally is unsafe to access the clocksource after timekeeping has been
* suspended, so take a snapshot of the readout base of @tk and use it as the
* fast timekeeper's readout base while suspended. It will return the same
* number of cycles every time until timekeeping is resumed at which time the
* proper readout base for the fast timekeeper will be restored automatically.
*/
static void halt_fast_timekeeper(const struct timekeeper *tk)
{
static struct tk_read_base tkr_dummy;
const struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tk_clock_read(tkr);
tkr_dummy.clock = &dummy_clock;
tkr_dummy.base_real = tkr->base + tk->offs_real;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
{
raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
}
/**
* pvclock_gtod_register_notifier - register a pvclock timedata update listener
* @nb: Pointer to the notifier block to register
*/
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
struct timekeeper *tk = &tk_core.timekeeper;
int ret;
guard(raw_spinlock_irqsave)(&tk_core.lock);
ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
update_pvclock_gtod(tk, true);
return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
/**
* pvclock_gtod_unregister_notifier - unregister a pvclock
* timedata update listener
* @nb: Pointer to the notifier block to unregister
*/
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
guard(raw_spinlock_irqsave)(&tk_core.lock);
return raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
}
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
/*
* tk_update_leap_state - helper to update the next_leap_ktime
*/
static inline void tk_update_leap_state(struct timekeeper *tk)
{
tk->next_leap_ktime = ntp_get_next_leap(tk->id);
if (tk->next_leap_ktime != KTIME_MAX)
/* Convert to monotonic time */
tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
}
/*
* Leap state update for both shadow and the real timekeeper
* Separate to spare a full memcpy() of the timekeeper.
*/
static void tk_update_leap_state_all(struct tk_data *tkd)
{
write_seqcount_begin(&tkd->seq);
tk_update_leap_state(&tkd->shadow_timekeeper);
tkd->timekeeper.next_leap_ktime = tkd->shadow_timekeeper.next_leap_ktime;
write_seqcount_end(&tkd->seq);
}
/*
* Update the ktime_t based scalar nsec members of the timekeeper
*/
static inline void tk_update_ktime_data(struct timekeeper *tk)
{
u64 seconds;
u32 nsec;
/*
* The xtime based monotonic readout is:
* nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
* The ktime based monotonic readout is:
* nsec = base_mono + now();
* ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
*/
seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
nsec = (u32) tk->wall_to_monotonic.tv_nsec;
tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
/*
* The sum of the nanoseconds portions of xtime and
* wall_to_monotonic can be greater/equal one second. Take
* this into account before updating tk->ktime_sec.
*/
nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
if (nsec >= NSEC_PER_SEC)
seconds++;
tk->ktime_sec = seconds;
/* Update the monotonic raw base */
tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
}
/*
* Restore the shadow timekeeper from the real timekeeper.
*/
static void timekeeping_restore_shadow(struct tk_data *tkd)
{
lockdep_assert_held(&tkd->lock);
memcpy(&tkd->shadow_timekeeper, &tkd->timekeeper, sizeof(tkd->timekeeper));
}
static void timekeeping_update_from_shadow(struct tk_data *tkd, unsigned int action)
{
struct timekeeper *tk = &tkd->shadow_timekeeper;
lockdep_assert_held(&tkd->lock);
/*
* Block out readers before running the updates below because that
* updates VDSO and other time related infrastructure. Not blocking
* the readers might let a reader see time going backwards when
* reading from the VDSO after the VDSO update and then reading in
* the kernel from the timekeeper before that got updated.
*/
write_seqcount_begin(&tkd->seq);
if (action & TK_CLEAR_NTP) {
tk->ntp_error = 0;
ntp_clear(tk->id);
}
tk_update_leap_state(tk);
tk_update_ktime_data(tk);
tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
if (tk->id == TIMEKEEPER_CORE) {
update_vsyscall(tk);
update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
} else if (tk_is_aux(tk)) {
vdso_time_update_aux(tk);
}
if (action & TK_CLOCK_WAS_SET)
tk->clock_was_set_seq++;
/*
* Update the real timekeeper.
*
* We could avoid this memcpy() by switching pointers, but that has
* the downside that the reader side does not longer benefit from
* the cacheline optimized data layout of the timekeeper and requires
* another indirection.
*/
memcpy(&tkd->timekeeper, tk, sizeof(*tk));
write_seqcount_end(&tkd->seq);
}
/**
* timekeeping_forward_now - update clock to the current time
* @tk: Pointer to the timekeeper to update
*
* Forward the current clock to update its state since the last call to
* update_wall_time(). This is useful before significant clock changes,
* as it avoids having to deal with this time offset explicitly.
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
u64 cycle_now, delta;
cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
tk->tkr_mono.clock->max_raw_delta);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
while (delta > 0) {
u64 max = tk->tkr_mono.clock->max_cycles;
u64 incr = delta < max ? delta : max;
tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
tk_normalize_xtime(tk);
delta -= incr;
}
tk_update_coarse_nsecs(tk);
}
/**
* ktime_get_real_ts64 - Returns the time of day in a timespec64.
* @ts: pointer to the timespec to be set
*
* Returns the time of day in a timespec64 (WARN if suspended).
*/
void ktime_get_real_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->xtime_sec;
nsecs = timekeeping_get_ns(&tk->tkr_mono);
} while (read_seqcount_retry(&tk_core.seq, seq));
ts->tv_nsec = 0;
timespec64_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(ktime_get_real_ts64);
ktime_t ktime_get(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base;
u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = tk->tkr_mono.base;
nsecs = timekeeping_get_ns(&tk->tkr_mono); } while (read_seqcount_retry(&tk_core.seq, seq));
return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get);
u32 ktime_get_resolution_ns(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
u32 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
} while (read_seqcount_retry(&tk_core.seq, seq));
return nsecs;
}
EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
static ktime_t *offsets[TK_OFFS_MAX] = {
[TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
[TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
[TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
};
ktime_t ktime_get_with_offset(enum tk_offsets offs)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base, *offset = offsets[offs];
u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
nsecs = timekeeping_get_ns(&tk->tkr_mono); } while (read_seqcount_retry(&tk_core.seq, seq));
return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get_with_offset);
ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
{
struct timekeeper *tk = &tk_core.timekeeper;
ktime_t base, *offset = offsets[offs];
unsigned int seq;
u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
nsecs = tk->coarse_nsec;
} while (read_seqcount_retry(&tk_core.seq, seq));
return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
/**
* ktime_mono_to_any() - convert monotonic time to any other time
* @tmono: time to convert.
* @offs: which offset to use
*/
ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
{
ktime_t *offset = offsets[offs];
unsigned int seq;
ktime_t tconv;
if (IS_ENABLED(CONFIG_64BIT)) {
/*
* Paired with WRITE_ONCE()s in tk_set_wall_to_mono() and
* tk_update_sleep_time().
*/
return ktime_add(tmono, READ_ONCE(*offset));
}
do {
seq = read_seqcount_begin(&tk_core.seq);
tconv = ktime_add(tmono, *offset);
} while (read_seqcount_retry(&tk_core.seq, seq));
return tconv;
}
EXPORT_SYMBOL_GPL(ktime_mono_to_any);
/**
* ktime_get_raw - Returns the raw monotonic time in ktime_t format
*/
ktime_t ktime_get_raw(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
base = tk->tkr_raw.base;
nsecs = timekeeping_get_ns(&tk->tkr_raw);
} while (read_seqcount_retry(&tk_core.seq, seq));
return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get_raw);
/**
* ktime_get_ts64 - get the monotonic clock in timespec64 format
* @ts: pointer to timespec variable
*
* The function calculates the monotonic clock from the realtime
* clock and the wall_to_monotonic offset and stores the result
* in normalized timespec64 format in the variable pointed to by @ts.
*/
void ktime_get_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct timespec64 tomono;
unsigned int seq;
u64 nsec;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->xtime_sec;
nsec = timekeeping_get_ns(&tk->tkr_mono);
tomono = tk->wall_to_monotonic;
} while (read_seqcount_retry(&tk_core.seq, seq));
ts->tv_sec += tomono.tv_sec;
ts->tv_nsec = 0;
timespec64_add_ns(ts, nsec + tomono.tv_nsec);
}
EXPORT_SYMBOL_GPL(ktime_get_ts64);
/**
* ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
*
* Returns the seconds portion of CLOCK_MONOTONIC with a single non
* serialized read. tk->ktime_sec is of type 'unsigned long' so this
* works on both 32 and 64 bit systems. On 32 bit systems the readout
* covers ~136 years of uptime which should be enough to prevent
* premature wrap arounds.
*/
time64_t ktime_get_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
WARN_ON(timekeeping_suspended);
return tk->ktime_sec;
}
EXPORT_SYMBOL_GPL(ktime_get_seconds);
/**
* ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
*
* Returns the wall clock seconds since 1970.
*
* For 64bit systems the fast access to tk->xtime_sec is preserved. On
* 32bit systems the access must be protected with the sequence
* counter to provide "atomic" access to the 64bit tk->xtime_sec
* value.
*/
time64_t ktime_get_real_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
time64_t seconds;
unsigned int seq;
if (IS_ENABLED(CONFIG_64BIT))
return tk->xtime_sec;
do {
seq = read_seqcount_begin(&tk_core.seq);
seconds = tk->xtime_sec;
} while (read_seqcount_retry(&tk_core.seq, seq));
return seconds;
}
EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
/**
* __ktime_get_real_seconds - Unprotected access to CLOCK_REALTIME seconds
*
* The same as ktime_get_real_seconds() but without the sequence counter
* protection. This function is used in restricted contexts like the x86 MCE
* handler and in KGDB. It's unprotected on 32-bit vs. concurrent half
* completed modification and only to be used for such critical contexts.
*
* Returns: Racy snapshot of the CLOCK_REALTIME seconds value
*/
noinstr time64_t __ktime_get_real_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
return tk->xtime_sec;
}
/**
* ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
* @systime_snapshot: pointer to struct receiving the system time snapshot
*/
void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base_raw;
ktime_t base_real;
ktime_t base_boot;
u64 nsec_raw;
u64 nsec_real;
u64 now;
WARN_ON_ONCE(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
now = tk_clock_read(&tk->tkr_mono);
systime_snapshot->cs_id = tk->tkr_mono.clock->id;
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
base_real = ktime_add(tk->tkr_mono.base,
tk_core.timekeeper.offs_real);
base_boot = ktime_add(tk->tkr_mono.base,
tk_core.timekeeper.offs_boot);
base_raw = tk->tkr_raw.base;
nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
} while (read_seqcount_retry(&tk_core.seq, seq));
systime_snapshot->cycles = now;
systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
systime_snapshot->boot = ktime_add_ns(base_boot, nsec_real);
systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_snapshot);
/* Scale base by mult/div checking for overflow */
static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
{
u64 tmp, rem;
tmp = div64_u64_rem(*base, div, &rem);
if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
return -EOVERFLOW;
tmp *= mult;
rem = div64_u64(rem * mult, div);
*base = tmp + rem;
return 0;
}
/**
* adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
* @history: Snapshot representing start of history
* @partial_history_cycles: Cycle offset into history (fractional part)
* @total_history_cycles: Total history length in cycles
* @discontinuity: True indicates clock was set on history period
* @ts: Cross timestamp that should be adjusted using
* partial/total ratio
*
* Helper function used by get_device_system_crosststamp() to correct the
* crosstimestamp corresponding to the start of the current interval to the
* system counter value (timestamp point) provided by the driver. The
* total_history_* quantities are the total history starting at the provided
* reference point and ending at the start of the current interval. The cycle
* count between the driver timestamp point and the start of the current
* interval is partial_history_cycles.
*/
static int adjust_historical_crosststamp(struct system_time_snapshot *history,
u64 partial_history_cycles,
u64 total_history_cycles,
bool discontinuity,
struct system_device_crosststamp *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 corr_raw, corr_real;
bool interp_forward;
int ret;
if (total_history_cycles == 0 || partial_history_cycles == 0)
return 0;
/* Interpolate shortest distance from beginning or end of history */
interp_forward = partial_history_cycles > total_history_cycles / 2;
partial_history_cycles = interp_forward ?
total_history_cycles - partial_history_cycles :
partial_history_cycles;
/*
* Scale the monotonic raw time delta by:
* partial_history_cycles / total_history_cycles
*/
corr_raw = (u64)ktime_to_ns(
ktime_sub(ts->sys_monoraw, history->raw));
ret = scale64_check_overflow(partial_history_cycles,
total_history_cycles, &corr_raw);
if (ret)
return ret;
/*
* If there is a discontinuity in the history, scale monotonic raw
* correction by:
* mult(real)/mult(raw) yielding the realtime correction
* Otherwise, calculate the realtime correction similar to monotonic
* raw calculation
*/
if (discontinuity) {
corr_real = mul_u64_u32_div
(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
} else {
corr_real = (u64)ktime_to_ns(
ktime_sub(ts->sys_realtime, history->real));
ret = scale64_check_overflow(partial_history_cycles,
total_history_cycles, &corr_real);
if (ret)
return ret;
}
/* Fixup monotonic raw and real time time values */
if (interp_forward) {
ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
ts->sys_realtime = ktime_add_ns(history->real, corr_real);
} else {
ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
}
return 0;
}
/*
* timestamp_in_interval - true if ts is chronologically in [start, end]
*
* True if ts occurs chronologically at or after start, and before or at end.
*/
static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
{
if (ts >= start && ts <= end)
return true;
if (start > end && (ts >= start || ts <= end))
return true;
return false;
}
static bool convert_clock(u64 *val, u32 numerator, u32 denominator)
{
u64 rem, res;
if (!numerator || !denominator)
return false;
res = div64_u64_rem(*val, denominator, &rem) * numerator;
*val = res + div_u64(rem * numerator, denominator);
return true;
}
static bool convert_base_to_cs(struct system_counterval_t *scv)
{
struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
struct clocksource_base *base;
u32 num, den;
/* The timestamp was taken from the time keeper clock source */
if (cs->id == scv->cs_id)
return true;
/*
* Check whether cs_id matches the base clock. Prevent the compiler from
* re-evaluating @base as the clocksource might change concurrently.
*/
base = READ_ONCE(cs->base);
if (!base || base->id != scv->cs_id)
return false;
num = scv->use_nsecs ? cs->freq_khz : base->numerator;
den = scv->use_nsecs ? USEC_PER_SEC : base->denominator;
if (!convert_clock(&scv->cycles, num, den))
return false;
scv->cycles += base->offset;
return true;
}
static bool convert_cs_to_base(u64 *cycles, enum clocksource_ids base_id)
{
struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
struct clocksource_base *base;
/*
* Check whether base_id matches the base clock. Prevent the compiler from
* re-evaluating @base as the clocksource might change concurrently.
*/
base = READ_ONCE(cs->base);
if (!base || base->id != base_id)
return false;
*cycles -= base->offset;
if (!convert_clock(cycles, base->denominator, base->numerator))
return false;
return true;
}
static bool convert_ns_to_cs(u64 *delta)
{
struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
if (BITS_TO_BYTES(fls64(*delta) + tkr->shift) >= sizeof(*delta))
return false;
*delta = div_u64((*delta << tkr->shift) - tkr->xtime_nsec, tkr->mult);
return true;
}
/**
* ktime_real_to_base_clock() - Convert CLOCK_REALTIME timestamp to a base clock timestamp
* @treal: CLOCK_REALTIME timestamp to convert
* @base_id: base clocksource id
* @cycles: pointer to store the converted base clock timestamp
*
* Converts a supplied, future realtime clock value to the corresponding base clock value.
*
* Return: true if the conversion is successful, false otherwise.
*/
bool ktime_real_to_base_clock(ktime_t treal, enum clocksource_ids base_id, u64 *cycles)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
u64 delta;
do {
seq = read_seqcount_begin(&tk_core.seq);
if ((u64)treal < tk->tkr_mono.base_real)
return false;
delta = (u64)treal - tk->tkr_mono.base_real;
if (!convert_ns_to_cs(&delta))
return false;
*cycles = tk->tkr_mono.cycle_last + delta;
if (!convert_cs_to_base(cycles, base_id))
return false;
} while (read_seqcount_retry(&tk_core.seq, seq));
return true;
}
EXPORT_SYMBOL_GPL(ktime_real_to_base_clock);
/**
* get_device_system_crosststamp - Synchronously capture system/device timestamp
* @get_time_fn: Callback to get simultaneous device time and
* system counter from the device driver
* @ctx: Context passed to get_time_fn()
* @history_begin: Historical reference point used to interpolate system
* time when counter provided by the driver is before the current interval
* @xtstamp: Receives simultaneously captured system and device time
*
* Reads a timestamp from a device and correlates it to system time
*/
int get_device_system_crosststamp(int (*get_time_fn)
(ktime_t *device_time,
struct system_counterval_t *sys_counterval,
void *ctx),
void *ctx,
struct system_time_snapshot *history_begin,
struct system_device_crosststamp *xtstamp)
{
struct system_counterval_t system_counterval = {};
struct timekeeper *tk = &tk_core.timekeeper;
u64 cycles, now, interval_start;
unsigned int clock_was_set_seq = 0;
ktime_t base_real, base_raw;
u64 nsec_real, nsec_raw;
u8 cs_was_changed_seq;
unsigned int seq;
bool do_interp;
int ret;
do {
seq = read_seqcount_begin(&tk_core.seq);
/*
* Try to synchronously capture device time and a system
* counter value calling back into the device driver
*/
ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
if (ret)
return ret;
/*
* Verify that the clocksource ID associated with the captured
* system counter value is the same as for the currently
* installed timekeeper clocksource
*/
if (system_counterval.cs_id == CSID_GENERIC ||
!convert_base_to_cs(&system_counterval))
return -ENODEV;
cycles = system_counterval.cycles;
/*
* Check whether the system counter value provided by the
* device driver is on the current timekeeping interval.
*/
now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last;
if (!timestamp_in_interval(interval_start, now, cycles)) {
clock_was_set_seq = tk->clock_was_set_seq;
cs_was_changed_seq = tk->cs_was_changed_seq;
cycles = interval_start;
do_interp = true;
} else {
do_interp = false;
}
base_real = ktime_add(tk->tkr_mono.base,
tk_core.timekeeper.offs_real);
base_raw = tk->tkr_raw.base;
nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
} while (read_seqcount_retry(&tk_core.seq, seq));
xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
/*
* Interpolate if necessary, adjusting back from the start of the
* current interval
*/
if (do_interp) {
u64 partial_history_cycles, total_history_cycles;
bool discontinuity;
/*
* Check that the counter value is not before the provided
* history reference and that the history doesn't cross a
* clocksource change
*/
if (!history_begin ||
!timestamp_in_interval(history_begin->cycles,
cycles, system_counterval.cycles) ||
history_begin->cs_was_changed_seq != cs_was_changed_seq)
return -EINVAL;
partial_history_cycles = cycles - system_counterval.cycles;
total_history_cycles = cycles - history_begin->cycles;
discontinuity =
history_begin->clock_was_set_seq != clock_was_set_seq;
ret = adjust_historical_crosststamp(history_begin,
partial_history_cycles,
total_history_cycles,
discontinuity, xtstamp);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
/**
* timekeeping_clocksource_has_base - Check whether the current clocksource
* is based on given a base clock
* @id: base clocksource ID
*
* Note: The return value is a snapshot which can become invalid right
* after the function returns.
*
* Return: true if the timekeeper clocksource has a base clock with @id,
* false otherwise
*/
bool timekeeping_clocksource_has_base(enum clocksource_ids id)
{
/*
* This is a snapshot, so no point in using the sequence
* count. Just prevent the compiler from re-evaluating @base as the
* clocksource might change concurrently.
*/
struct clocksource_base *base = READ_ONCE(tk_core.timekeeper.tkr_mono.clock->base);
return base ? base->id == id : false;
}
EXPORT_SYMBOL_GPL(timekeeping_clocksource_has_base);
/**
* do_settimeofday64 - Sets the time of day.
* @ts: pointer to the timespec64 variable containing the new time
*
* Sets the time of day to the new time and update NTP and notify hrtimers
*/
int do_settimeofday64(const struct timespec64 *ts)
{
struct timespec64 ts_delta, xt;
if (!timespec64_valid_settod(ts))
return -EINVAL;
scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
struct timekeeper *tks = &tk_core.shadow_timekeeper;
timekeeping_forward_now(tks);
xt = tk_xtime(tks);
ts_delta = timespec64_sub(*ts, xt);
if (timespec64_compare(&tks->wall_to_monotonic, &ts_delta) > 0) {
timekeeping_restore_shadow(&tk_core);
return -EINVAL;
}
tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, ts_delta));
tk_set_xtime(tks, ts);
timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
}
/* Signal hrtimers about time change */
clock_was_set(CLOCK_SET_WALL);
audit_tk_injoffset(ts_delta);
add_device_randomness(ts, sizeof(*ts));
return 0;
}
EXPORT_SYMBOL(do_settimeofday64);
static inline bool timekeeper_is_core_tk(struct timekeeper *tk)
{
return !IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS) || tk->id == TIMEKEEPER_CORE;
}
/**
* __timekeeping_inject_offset - Adds or subtracts from the current time.
* @tkd: Pointer to the timekeeper to modify
* @ts: Pointer to the timespec variable containing the offset
*
* Adds or subtracts an offset value from the current time.
*/
static int __timekeeping_inject_offset(struct tk_data *tkd, const struct timespec64 *ts)
{
struct timekeeper *tks = &tkd->shadow_timekeeper;
struct timespec64 tmp;
if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
timekeeping_forward_now(tks);
if (timekeeper_is_core_tk(tks)) {
/* Make sure the proposed value is valid */
tmp = timespec64_add(tk_xtime(tks), *ts);
if (timespec64_compare(&tks->wall_to_monotonic, ts) > 0 ||
!timespec64_valid_settod(&tmp)) {
timekeeping_restore_shadow(tkd);
return -EINVAL;
}
tk_xtime_add(tks, ts);
tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, *ts));
} else {
struct tk_read_base *tkr_mono = &tks->tkr_mono;
ktime_t now, offs;
/* Get the current time */
now = ktime_add_ns(tkr_mono->base, timekeeping_get_ns(tkr_mono));
/* Add the relative offset change */
offs = ktime_add(tks->offs_aux, timespec64_to_ktime(*ts));
/* Prevent that the resulting time becomes negative */
if (ktime_add(now, offs) < 0) {
timekeeping_restore_shadow(tkd);
return -EINVAL;
}
tk_update_aux_offs(tks, offs);
}
timekeeping_update_from_shadow(tkd, TK_UPDATE_ALL);
return 0;
}
static int timekeeping_inject_offset(const struct timespec64 *ts)
{
int ret;
scoped_guard (raw_spinlock_irqsave, &tk_core.lock)
ret = __timekeeping_inject_offset(&tk_core, ts);
/* Signal hrtimers about time change */
if (!ret)
clock_was_set(CLOCK_SET_WALL);
return ret;
}
/*
* Indicates if there is an offset between the system clock and the hardware
* clock/persistent clock/rtc.
*/
int persistent_clock_is_local;
/*
* Adjust the time obtained from the CMOS to be UTC time instead of
* local time.
*
* This is ugly, but preferable to the alternatives. Otherwise we
* would either need to write a program to do it in /etc/rc (and risk
* confusion if the program gets run more than once; it would also be
* hard to make the program warp the clock precisely n hours) or
* compile in the timezone information into the kernel. Bad, bad....
*
* - TYT, 1992-01-01
*
* The best thing to do is to keep the CMOS clock in universal time (UTC)
* as real UNIX machines always do it. This avoids all headaches about
* daylight saving times and warping kernel clocks.
*/
void timekeeping_warp_clock(void)
{
if (sys_tz.tz_minuteswest != 0) {
struct timespec64 adjust;
persistent_clock_is_local = 1;
adjust.tv_sec = sys_tz.tz_minuteswest * 60;
adjust.tv_nsec = 0;
timekeeping_inject_offset(&adjust);
}
}
/*
* __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
*/
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
{
tk->tai_offset = tai_offset;
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
}
/*
* change_clocksource - Swaps clocksources if a new one is available
*
* Accumulates current time interval and initializes new clocksource
*/
static int change_clocksource(void *data)
{
struct clocksource *new = data, *old = NULL;
/*
* If the clocksource is in a module, get a module reference.
* Succeeds for built-in code (owner == NULL) as well. Abort if the
* reference can't be acquired.
*/
if (!try_module_get(new->owner))
return 0;
/* Abort if the device can't be enabled */
if (new->enable && new->enable(new) != 0) {
module_put(new->owner);
return 0;
}
scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
struct timekeeper *tks = &tk_core.shadow_timekeeper;
timekeeping_forward_now(tks);
old = tks->tkr_mono.clock;
tk_setup_internals(tks, new);
timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
}
tk_aux_update_clocksource();
if (old) {
if (old->disable)
old->disable(old);
module_put(old->owner);
}
return 0;
}
/**
* timekeeping_notify - Install a new clock source
* @clock: pointer to the clock source
*
* This function is called from clocksource.c after a new, better clock
* source has been registered. The caller holds the clocksource_mutex.
*/
int timekeeping_notify(struct clocksource *clock)
{
struct timekeeper *tk = &tk_core.timekeeper;
if (tk->tkr_mono.clock == clock)
return 0;
stop_machine(change_clocksource, clock, NULL);
tick_clock_notify();
return tk->tkr_mono.clock == clock ? 0 : -1;
}
/**
* ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
* @ts: pointer to the timespec64 to be set
*
* Returns the raw monotonic time (completely un-modified by ntp)
*/
void ktime_get_raw_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->raw_sec;
nsecs = timekeeping_get_ns(&tk->tkr_raw);
} while (read_seqcount_retry(&tk_core.seq, seq));
ts->tv_nsec = 0;
timespec64_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(ktime_get_raw_ts64);
/**
* ktime_get_clock_ts64 - Returns time of a clock in a timespec
* @id: POSIX clock ID of the clock to read
* @ts: Pointer to the timespec64 to be set
*
* The timestamp is invalidated (@ts->sec is set to -1) if the
* clock @id is not available.
*/
void ktime_get_clock_ts64(clockid_t id, struct timespec64 *ts)
{
/* Invalidate time stamp */
ts->tv_sec = -1;
ts->tv_nsec = 0;
switch (id) {
case CLOCK_REALTIME:
ktime_get_real_ts64(ts);
return;
case CLOCK_MONOTONIC:
ktime_get_ts64(ts);
return;
case CLOCK_MONOTONIC_RAW:
ktime_get_raw_ts64(ts);
return;
case CLOCK_AUX ... CLOCK_AUX_LAST:
if (IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS))
ktime_get_aux_ts64(id, ts);
return;
default:
WARN_ON_ONCE(1);
}
}
EXPORT_SYMBOL_GPL(ktime_get_clock_ts64);
/**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/
int timekeeping_valid_for_hres(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
int ret;
do {
seq = read_seqcount_begin(&tk_core.seq);
ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
} while (read_seqcount_retry(&tk_core.seq, seq));
return ret;
}
/**
* timekeeping_max_deferment - Returns max time the clocksource can be deferred
*/
u64 timekeeping_max_deferment(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
u64 ret;
do {
seq = read_seqcount_begin(&tk_core.seq);
ret = tk->tkr_mono.clock->max_idle_ns;
} while (read_seqcount_retry(&tk_core.seq, seq));
return ret;
}
/**
* read_persistent_clock64 - Return time from the persistent clock.
* @ts: Pointer to the storage for the readout value
*
* Weak dummy function for arches that do not yet support it.
* Reads the time from the battery backed persistent clock.
* Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
*
* XXX - Do be sure to remove it once all arches implement it.
*/
void __weak read_persistent_clock64(struct timespec64 *ts)
{
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
/**
* read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
* from the boot.
* @wall_time: current time as returned by persistent clock
* @boot_offset: offset that is defined as wall_time - boot_time
*
* Weak dummy function for arches that do not yet support it.
*
* The default function calculates offset based on the current value of
* local_clock(). This way architectures that support sched_clock() but don't
* support dedicated boot time clock will provide the best estimate of the
* boot time.
*/
void __weak __init
read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
struct timespec64 *boot_offset)
{
read_persistent_clock64(wall_time);
*boot_offset = ns_to_timespec64(local_clock());
}
static __init void tkd_basic_setup(struct tk_data *tkd, enum timekeeper_ids tk_id, bool valid)
{
raw_spin_lock_init(&tkd->lock);
seqcount_raw_spinlock_init(&tkd->seq, &tkd->lock);
tkd->timekeeper.id = tkd->shadow_timekeeper.id = tk_id;
tkd->timekeeper.clock_valid = tkd->shadow_timekeeper.clock_valid = valid;
}
/*
* Flag reflecting whether timekeeping_resume() has injected sleeptime.
*
* The flag starts of false and is only set when a suspend reaches
* timekeeping_suspend(), timekeeping_resume() sets it to false when the
* timekeeper clocksource is not stopping across suspend and has been
* used to update sleep time. If the timekeeper clocksource has stopped
* then the flag stays true and is used by the RTC resume code to decide
* whether sleeptime must be injected and if so the flag gets false then.
*
* If a suspend fails before reaching timekeeping_resume() then the flag
* stays false and prevents erroneous sleeptime injection.
*/
static bool suspend_timing_needed;
/* Flag for if there is a persistent clock on this platform */
static bool persistent_clock_exists;
/*
* timekeeping_init - Initializes the clocksource and common timekeeping values
*/
void __init timekeeping_init(void)
{
struct timespec64 wall_time, boot_offset, wall_to_mono;
struct timekeeper *tks = &tk_core.shadow_timekeeper;
struct clocksource *clock;
tkd_basic_setup(&tk_core, TIMEKEEPER_CORE, true);
tk_aux_setup();
read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
if (timespec64_valid_settod(&wall_time) &&
timespec64_to_ns(&wall_time) > 0) {
persistent_clock_exists = true;
} else if (timespec64_to_ns(&wall_time) != 0) {
pr_warn("Persistent clock returned invalid value");
wall_time = (struct timespec64){0};
}
if (timespec64_compare(&wall_time, &boot_offset) < 0)
boot_offset = (struct timespec64){0};
/*
* We want set wall_to_mono, so the following is true:
* wall time + wall_to_mono = boot time
*/
wall_to_mono = timespec64_sub(boot_offset, wall_time);
guard(raw_spinlock_irqsave)(&tk_core.lock);
ntp_init();
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
tk_setup_internals(tks, clock);
tk_set_xtime(tks, &wall_time);
tks->raw_sec = 0;
tk_set_wall_to_mono(tks, wall_to_mono);
timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
}
/* time in seconds when suspend began for persistent clock */
static struct timespec64 timekeeping_suspend_time;
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
* @tk: Pointer to the timekeeper to be updated
* @delta: Pointer to the delta value in timespec64 format
*
* Takes a timespec offset measuring a suspend interval and properly
* adds the sleep offset to the timekeeping variables.
*/
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
const struct timespec64 *delta)
{
if (!timespec64_valid_strict(delta)) {
printk_deferred(KERN_WARNING
"__timekeeping_inject_sleeptime: Invalid "
"sleep delta value!\n");
return;
}
tk_xtime_add(tk, delta);
tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
tk_debug_account_sleep_time(delta);
}
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
/*
* We have three kinds of time sources to use for sleep time
* injection, the preference order is:
* 1) non-stop clocksource
* 2) persistent clock (ie: RTC accessible when irqs are off)
* 3) RTC
*
* 1) and 2) are used by timekeeping, 3) by RTC subsystem.
* If system has neither 1) nor 2), 3) will be used finally.
*
*
* If timekeeping has injected sleeptime via either 1) or 2),
* 3) becomes needless, so in this case we don't need to call
* rtc_resume(), and this is what timekeeping_rtc_skipresume()
* means.
*/
bool timekeeping_rtc_skipresume(void)
{
return !suspend_timing_needed;
}
/*
* 1) can be determined whether to use or not only when doing
* timekeeping_resume() which is invoked after rtc_suspend(),
* so we can't skip rtc_suspend() surely if system has 1).
*
* But if system has 2), 2) will definitely be used, so in this
* case we don't need to call rtc_suspend(), and this is what
* timekeeping_rtc_skipsuspend() means.
*/
bool timekeeping_rtc_skipsuspend(void)
{
return persistent_clock_exists;
}
/**
* timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
* @delta: pointer to a timespec64 delta value
*
* This hook is for architectures that cannot support read_persistent_clock64
* because their RTC/persistent clock is only accessible when irqs are enabled.
* and also don't have an effective nonstop clocksource.
*
* This function should only be called by rtc_resume(), and allows
* a suspend offset to be injected into the timekeeping values.
*/
void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
{
scoped_guard(raw_spinlock_irqsave, &tk_core.lock) {
struct timekeeper *tks = &tk_core.shadow_timekeeper;
suspend_timing_needed = false;
timekeeping_forward_now(tks);
__timekeeping_inject_sleeptime(tks, delta);
timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
}
/* Signal hrtimers about time change */
clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
}
#endif
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
*/
void timekeeping_resume(void)
{
struct timekeeper *tks = &tk_core.shadow_timekeeper;
struct clocksource *clock = tks->tkr_mono.clock;
struct timespec64 ts_new, ts_delta;
bool inject_sleeptime = false;
u64 cycle_now, nsec;
unsigned long flags;
read_persistent_clock64(&ts_new);
clockevents_resume();
clocksource_resume();
raw_spin_lock_irqsave(&tk_core.lock, flags);
/*
* After system resumes, we need to calculate the suspended time and
* compensate it for the OS time. There are 3 sources that could be
* used: Nonstop clocksource during suspend, persistent clock and rtc
* device.
*
* One specific platform may have 1 or 2 or all of them, and the
* preference will be:
* suspend-nonstop clocksource -> persistent clock -> rtc
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
cycle_now = tk_clock_read(&tks->tkr_mono);
nsec = clocksource_stop_suspend_timing(clock, cycle_now);
if (nsec > 0) {
ts_delta = ns_to_timespec64(nsec);
inject_sleeptime = true;
} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
inject_sleeptime = true;
}
if (inject_sleeptime) {
suspend_timing_needed = false;
__timekeeping_inject_sleeptime(tks, &ts_delta);
}
/* Re-base the last cycle value */
tks->tkr_mono.cycle_last = cycle_now;
tks->tkr_raw.cycle_last = cycle_now;
tks->ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
raw_spin_unlock_irqrestore(&tk_core.lock, flags);
touch_softlockup_watchdog();
/* Resume the clockevent device(s) and hrtimers */
tick_resume();
/* Notify timerfd as resume is equivalent to clock_was_set() */
timerfd_resume();
}
int timekeeping_suspend(void)
{
struct timekeeper *tks = &tk_core.shadow_timekeeper;
struct timespec64 delta, delta_delta;
static struct timespec64 old_delta;
struct clocksource *curr_clock;
unsigned long flags;
u64 cycle_now;
read_persistent_clock64(&timekeeping_suspend_time);
/*
* On some systems the persistent_clock can not be detected at
* timekeeping_init by its return value, so if we see a valid
* value returned, update the persistent_clock_exists flag.
*/
if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
persistent_clock_exists = true;
suspend_timing_needed = true;
raw_spin_lock_irqsave(&tk_core.lock, flags);
timekeeping_forward_now(tks);
timekeeping_suspended = 1;
/*
* Since we've called forward_now, cycle_last stores the value
* just read from the current clocksource. Save this to potentially
* use in suspend timing.
*/
curr_clock = tks->tkr_mono.clock;
cycle_now = tks->tkr_mono.cycle_last;
clocksource_start_suspend_timing(curr_clock, cycle_now);
if (persistent_clock_exists) {
/*
* To avoid drift caused by repeated suspend/resumes,
* which each can add ~1 second drift error,
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
delta = timespec64_sub(tk_xtime(tks), timekeeping_suspend_time);
delta_delta = timespec64_sub(delta, old_delta);
if (abs(delta_delta.tv_sec) >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occurred and set old_delta to the current delta.
*/
old_delta = delta;
} else {
/* Otherwise try to adjust old_system to compensate */
timekeeping_suspend_time =
timespec64_add(timekeeping_suspend_time, delta_delta);
}
}
timekeeping_update_from_shadow(&tk_core, 0);
halt_fast_timekeeper(tks);
raw_spin_unlock_irqrestore(&tk_core.lock, flags);
tick_suspend();
clocksource_suspend();
clockevents_suspend();
return 0;
}
/* sysfs resume/suspend bits for timekeeping */
static struct syscore_ops timekeeping_syscore_ops = {
.resume = timekeeping_resume,
.suspend = timekeeping_suspend,
};
static int __init timekeeping_init_ops(void)
{
register_syscore_ops(&timekeeping_syscore_ops);
return 0;
}
device_initcall(timekeeping_init_ops);
/*
* Apply a multiplier adjustment to the timekeeper
*/
static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
s64 offset,
s32 mult_adj)
{
s64 interval = tk->cycle_interval;
if (mult_adj == 0) {
return;
} else if (mult_adj == -1) {
interval = -interval;
offset = -offset;
} else if (mult_adj != 1) {
interval *= mult_adj;
offset *= mult_adj;
}
/*
* So the following can be confusing.
*
* To keep things simple, lets assume mult_adj == 1 for now.
*
* When mult_adj != 1, remember that the interval and offset values
* have been appropriately scaled so the math is the same.
*
* The basic idea here is that we're increasing the multiplier
* by one, this causes the xtime_interval to be incremented by
* one cycle_interval. This is because:
* xtime_interval = cycle_interval * mult
* So if mult is being incremented by one:
* xtime_interval = cycle_interval * (mult + 1)
* Its the same as:
* xtime_interval = (cycle_interval * mult) + cycle_interval
* Which can be shortened to:
* xtime_interval += cycle_interval
*
* So offset stores the non-accumulated cycles. Thus the current
* time (in shifted nanoseconds) is:
* now = (offset * adj) + xtime_nsec
* Now, even though we're adjusting the clock frequency, we have
* to keep time consistent. In other words, we can't jump back
* in time, and we also want to avoid jumping forward in time.
*
* So given the same offset value, we need the time to be the same
* both before and after the freq adjustment.
* now = (offset * adj_1) + xtime_nsec_1
* now = (offset * adj_2) + xtime_nsec_2
* So:
* (offset * adj_1) + xtime_nsec_1 =
* (offset * adj_2) + xtime_nsec_2
* And we know:
* adj_2 = adj_1 + 1
* So:
* (offset * adj_1) + xtime_nsec_1 =
* (offset * (adj_1+1)) + xtime_nsec_2
* (offset * adj_1) + xtime_nsec_1 =
* (offset * adj_1) + offset + xtime_nsec_2
* Canceling the sides:
* xtime_nsec_1 = offset + xtime_nsec_2
* Which gives us:
* xtime_nsec_2 = xtime_nsec_1 - offset
* Which simplifies to:
* xtime_nsec -= offset
*/
if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
/* NTP adjustment caused clocksource mult overflow */
WARN_ON_ONCE(1);
return;
}
tk->tkr_mono.mult += mult_adj;
tk->xtime_interval += interval;
tk->tkr_mono.xtime_nsec -= offset;
}
/*
* Adjust the timekeeper's multiplier to the correct frequency
* and also to reduce the accumulated error value.
*/
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
{
u64 ntp_tl = ntp_tick_length(tk->id);
u32 mult;
/*
* Determine the multiplier from the current NTP tick length.
* Avoid expensive division when the tick length doesn't change.
*/
if (likely(tk->ntp_tick == ntp_tl)) {
mult = tk->tkr_mono.mult - tk->ntp_err_mult;
} else {
tk->ntp_tick = ntp_tl;
mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
tk->xtime_remainder, tk->cycle_interval);
}
/*
* If the clock is behind the NTP time, increase the multiplier by 1
* to catch up with it. If it's ahead and there was a remainder in the
* tick division, the clock will slow down. Otherwise it will stay
* ahead until the tick length changes to a non-divisible value.
*/
tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
mult += tk->ntp_err_mult;
timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
if (unlikely(tk->tkr_mono.clock->maxadj &&
(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
> tk->tkr_mono.clock->maxadj))) {
printk_once(KERN_WARNING
"Adjusting %s more than 11%% (%ld vs %ld)\n",
tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
}
/*
* It may be possible that when we entered this function, xtime_nsec
* was very small. Further, if we're slightly speeding the clocksource
* in the code above, its possible the required corrective factor to
* xtime_nsec could cause it to underflow.
*
* Now, since we have already accumulated the second and the NTP
* subsystem has been notified via second_overflow(), we need to skip
* the next update.
*/
if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
tk->tkr_mono.shift;
tk->xtime_sec--;
tk->skip_second_overflow = 1;
}
}
/*
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
* Helper function that accumulates the nsecs greater than a second
* from the xtime_nsec field to the xtime_secs field.
* It also calls into the NTP code to handle leapsecond processing.
*/
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
{
u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
unsigned int clock_set = 0;
while (tk->tkr_mono.xtime_nsec >= nsecps) {
int leap;
tk->tkr_mono.xtime_nsec -= nsecps;
tk->xtime_sec++;
/*
* Skip NTP update if this second was accumulated before,
* i.e. xtime_nsec underflowed in timekeeping_adjust()
*/
if (unlikely(tk->skip_second_overflow)) {
tk->skip_second_overflow = 0;
continue;
}
/* Figure out if its a leap sec and apply if needed */
leap = second_overflow(tk->id, tk->xtime_sec);
if (unlikely(leap)) {
struct timespec64 ts;
tk->xtime_sec += leap;
ts.tv_sec = leap;
ts.tv_nsec = 0;
tk_set_wall_to_mono(tk,
timespec64_sub(tk->wall_to_monotonic, ts));
__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
clock_set = TK_CLOCK_WAS_SET;
}
}
return clock_set;
}
/*
* logarithmic_accumulation - shifted accumulation of cycles
*
* This functions accumulates a shifted interval of cycles into
* a shifted interval nanoseconds. Allows for O(log) accumulation
* loop.
*
* Returns the unconsumed cycles.
*/
static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
u32 shift, unsigned int *clock_set)
{
u64 interval = tk->cycle_interval << shift;
u64 snsec_per_sec;
/* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval)
return offset;
/* Accumulate one shifted interval */
offset -= interval;
tk->tkr_mono.cycle_last += interval;
tk->tkr_raw.cycle_last += interval;
tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
tk->tkr_raw.xtime_nsec -= snsec_per_sec;
tk->raw_sec++;
}
/* Accumulate error between NTP and clock interval */
tk->ntp_error += tk->ntp_tick << shift;
tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
(tk->ntp_error_shift + shift);
return offset;
}
/*
* timekeeping_advance - Updates the timekeeper to the current time and
* current NTP tick length
*/
static bool __timekeeping_advance(struct tk_data *tkd, enum timekeeping_adv_mode mode)
{
struct timekeeper *tk = &tkd->shadow_timekeeper;
struct timekeeper *real_tk = &tkd->timekeeper;
unsigned int clock_set = 0;
int shift = 0, maxshift;
u64 offset, orig_offset;
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
return false;
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
tk->tkr_mono.clock->max_raw_delta);
orig_offset = offset;
/* Check if there's really nothing to do */
if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
return false;
/*
* With NO_HZ we may have to accumulate many cycle_intervals
* (think "ticks") worth of time at once. To do this efficiently,
* we calculate the largest doubling multiple of cycle_intervals
* that is smaller than the offset. We then accumulate that
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
*/
shift = ilog2(offset) - ilog2(tk->cycle_interval);
shift = max(0, shift);
/* Bound shift to one less than what overflows tick_length */
maxshift = (64 - (ilog2(ntp_tick_length(tk->id)) + 1)) - 1;
shift = min(shift, maxshift);
while (offset >= tk->cycle_interval) {
offset = logarithmic_accumulation(tk, offset, shift, &clock_set);
if (offset < tk->cycle_interval<<shift)
shift--;
}
/* Adjust the multiplier to correct NTP error */
timekeeping_adjust(tk, offset);
/*
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
clock_set |= accumulate_nsecs_to_secs(tk);
/*
* To avoid inconsistencies caused adjtimex TK_ADV_FREQ calls
* making small negative adjustments to the base xtime_nsec
* value, only update the coarse clocks if we accumulated time
*/
if (orig_offset != offset)
tk_update_coarse_nsecs(tk);
timekeeping_update_from_shadow(tkd, clock_set);
return !!clock_set;
}
static bool timekeeping_advance(enum timekeeping_adv_mode mode)
{
guard(raw_spinlock_irqsave)(&tk_core.lock);
return __timekeeping_advance(&tk_core, mode);
}
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
* It also updates the enabled auxiliary clock timekeepers
*/
void update_wall_time(void)
{
if (timekeeping_advance(TK_ADV_TICK))
clock_was_set_delayed();
tk_aux_advance();
}
/**
* getboottime64 - Return the real time of system boot.
* @ts: pointer to the timespec64 to be set
*
* Returns the wall-time of boot in a timespec64.
*
* This is based on the wall_to_monotonic offset and the total suspend
* time. Calls to settimeofday will affect the value returned (which
* basically means that however wrong your real time clock is at boot time,
* you get the right time here).
*/
void getboottime64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
*ts = ktime_to_timespec64(t);
}
EXPORT_SYMBOL_GPL(getboottime64);
void ktime_get_coarse_real_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
do {
seq = read_seqcount_begin(&tk_core.seq);
*ts = tk_xtime_coarse(tk);
} while (read_seqcount_retry(&tk_core.seq, seq));
}
EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
/**
* ktime_get_coarse_real_ts64_mg - return latter of coarse grained time or floor
* @ts: timespec64 to be filled
*
* Fetch the global mg_floor value, convert it to realtime and compare it
* to the current coarse-grained time. Fill @ts with whichever is
* latest. Note that this is a filesystem-specific interface and should be
* avoided outside of that context.
*/
void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 floor = atomic64_read(&mg_floor);
ktime_t f_real, offset, coarse;
unsigned int seq;
do {
seq = read_seqcount_begin(&tk_core.seq); *ts = tk_xtime_coarse(tk);
offset = tk_core.timekeeper.offs_real;
} while (read_seqcount_retry(&tk_core.seq, seq));
coarse = timespec64_to_ktime(*ts);
f_real = ktime_add(floor, offset);
if (ktime_after(f_real, coarse)) *ts = ktime_to_timespec64(f_real);}
/**
* ktime_get_real_ts64_mg - attempt to update floor value and return result
* @ts: pointer to the timespec to be set
*
* Get a monotonic fine-grained time value and attempt to swap it into
* mg_floor. If that succeeds then accept the new floor value. If it fails
* then another task raced in during the interim time and updated the
* floor. Since any update to the floor must be later than the previous
* floor, either outcome is acceptable.
*
* Typically this will be called after calling ktime_get_coarse_real_ts64_mg(),
* and determining that the resulting coarse-grained timestamp did not effect
* a change in ctime. Any more recent floor value would effect a change to
* ctime, so there is no need to retry the atomic64_try_cmpxchg() on failure.
*
* @ts will be filled with the latest floor value, regardless of the outcome of
* the cmpxchg. Note that this is a filesystem specific interface and should be
* avoided outside of that context.
*/
void ktime_get_real_ts64_mg(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
ktime_t old = atomic64_read(&mg_floor);
ktime_t offset, mono;
unsigned int seq;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
ts->tv_sec = tk->xtime_sec;
mono = tk->tkr_mono.base;
nsecs = timekeeping_get_ns(&tk->tkr_mono);
offset = tk_core.timekeeper.offs_real;
} while (read_seqcount_retry(&tk_core.seq, seq));
mono = ktime_add_ns(mono, nsecs);
/*
* Attempt to update the floor with the new time value. As any
* update must be later then the existing floor, and would effect
* a change to ctime from the perspective of the current task,
* accept the resulting floor value regardless of the outcome of
* the swap.
*/
if (atomic64_try_cmpxchg(&mg_floor, &old, mono)) {
ts->tv_nsec = 0;
timespec64_add_ns(ts, nsecs);
timekeeping_inc_mg_floor_swaps();
} else {
/*
* Another task changed mg_floor since "old" was fetched.
* "old" has been updated with the latest value of "mg_floor".
* That value is newer than the previous floor value, which
* is enough to effect a change to ctime. Accept it.
*/
*ts = ktime_to_timespec64(ktime_add(old, offset));
}
}
void ktime_get_coarse_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct timespec64 now, mono;
unsigned int seq;
do {
seq = read_seqcount_begin(&tk_core.seq);
now = tk_xtime_coarse(tk);
mono = tk->wall_to_monotonic;
} while (read_seqcount_retry(&tk_core.seq, seq));
set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
}
EXPORT_SYMBOL(ktime_get_coarse_ts64);
/*
* Must hold jiffies_lock
*/
void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
calc_global_load();
}
/**
* ktime_get_update_offsets_now - hrtimer helper
* @cwsseq: pointer to check and store the clock was set sequence number
* @offs_real: pointer to storage for monotonic -> realtime offset
* @offs_boot: pointer to storage for monotonic -> boottime offset
* @offs_tai: pointer to storage for monotonic -> clock tai offset
*
* Returns current monotonic time and updates the offsets if the
* sequence number in @cwsseq and timekeeper.clock_was_set_seq are
* different.
*
* Called from hrtimer_interrupt() or retrigger_next_event()
*/
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
ktime_t *offs_boot, ktime_t *offs_tai)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base;
u64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
base = tk->tkr_mono.base;
nsecs = timekeeping_get_ns(&tk->tkr_mono);
base = ktime_add_ns(base, nsecs);
if (*cwsseq != tk->clock_was_set_seq) {
*cwsseq = tk->clock_was_set_seq;
*offs_real = tk->offs_real;
*offs_boot = tk->offs_boot;
*offs_tai = tk->offs_tai;
}
/* Handle leapsecond insertion adjustments */
if (unlikely(base >= tk->next_leap_ktime))
*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
} while (read_seqcount_retry(&tk_core.seq, seq));
return base;
}
/*
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
*/
static int timekeeping_validate_timex(const struct __kernel_timex *txc, bool aux_clock)
{
if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
return -EINVAL;
if (!(txc->modes & ADJ_OFFSET_READONLY) &&
!capable(CAP_SYS_TIME))
return -EPERM;
} else {
/* In order to modify anything, you gotta be super-user! */
if (txc->modes && !capable(CAP_SYS_TIME))
return -EPERM;
/*
* if the quartz is off by more than 10% then
* something is VERY wrong!
*/
if (txc->modes & ADJ_TICK &&
(txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ))
return -EINVAL;
}
if (txc->modes & ADJ_SETOFFSET) {
/* In order to inject time, you gotta be super-user! */
if (!capable(CAP_SYS_TIME))
return -EPERM;
/*
* Validate if a timespec/timeval used to inject a time
* offset is valid. Offsets can be positive or negative, so
* we don't check tv_sec. The value of the timeval/timespec
* is the sum of its fields,but *NOTE*:
* The field tv_usec/tv_nsec must always be non-negative and
* we can't have more nanoseconds/microseconds than a second.
*/
if (txc->time.tv_usec < 0)
return -EINVAL;
if (txc->modes & ADJ_NANO) {
if (txc->time.tv_usec >= NSEC_PER_SEC)
return -EINVAL;
} else {
if (txc->time.tv_usec >= USEC_PER_SEC)
return -EINVAL;
}
}
/*
* Check for potential multiplication overflows that can
* only happen on 64-bit systems:
*/
if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
if (LLONG_MIN / PPM_SCALE > txc->freq)
return -EINVAL;
if (LLONG_MAX / PPM_SCALE < txc->freq)
return -EINVAL;
}
if (aux_clock) {
/* Auxiliary clocks are similar to TAI and do not have leap seconds */
if (txc->status & (STA_INS | STA_DEL))
return -EINVAL;
/* No TAI offset setting */
if (txc->modes & ADJ_TAI)
return -EINVAL;
/* No PPS support either */
if (txc->status & (STA_PPSFREQ | STA_PPSTIME))
return -EINVAL;
}
return 0;
}
/**
* random_get_entropy_fallback - Returns the raw clock source value,
* used by random.c for platforms with no valid random_get_entropy().
*/
unsigned long random_get_entropy_fallback(void)
{
struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
struct clocksource *clock = READ_ONCE(tkr->clock);
if (unlikely(timekeeping_suspended || !clock))
return 0;
return clock->read(clock);
}
EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
struct adjtimex_result {
struct audit_ntp_data ad;
struct timespec64 delta;
bool clock_set;
};
static int __do_adjtimex(struct tk_data *tkd, struct __kernel_timex *txc,
struct adjtimex_result *result)
{
struct timekeeper *tks = &tkd->shadow_timekeeper;
bool aux_clock = !timekeeper_is_core_tk(tks);
struct timespec64 ts;
s32 orig_tai, tai;
int ret;
/* Validate the data before disabling interrupts */
ret = timekeeping_validate_timex(txc, aux_clock);
if (ret)
return ret;
add_device_randomness(txc, sizeof(*txc));
if (!aux_clock)
ktime_get_real_ts64(&ts);
else
tk_get_aux_ts64(tkd->timekeeper.id, &ts);
add_device_randomness(&ts, sizeof(ts));
guard(raw_spinlock_irqsave)(&tkd->lock);
if (!tks->clock_valid)
return -ENODEV;
if (txc->modes & ADJ_SETOFFSET) {
result->delta.tv_sec = txc->time.tv_sec;
result->delta.tv_nsec = txc->time.tv_usec;
if (!(txc->modes & ADJ_NANO))
result->delta.tv_nsec *= 1000;
ret = __timekeeping_inject_offset(tkd, &result->delta);
if (ret)
return ret;
result->clock_set = true;
}
orig_tai = tai = tks->tai_offset;
ret = ntp_adjtimex(tks->id, txc, &ts, &tai, &result->ad);
if (tai != orig_tai) {
__timekeeping_set_tai_offset(tks, tai);
timekeeping_update_from_shadow(tkd, TK_CLOCK_WAS_SET);
result->clock_set = true;
} else {
tk_update_leap_state_all(&tk_core);
}
/* Update the multiplier immediately if frequency was set directly */
if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
result->clock_set |= __timekeeping_advance(tkd, TK_ADV_FREQ);
return ret;
}
/**
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
* @txc: Pointer to kernel_timex structure containing NTP parameters
*/
int do_adjtimex(struct __kernel_timex *txc)
{
struct adjtimex_result result = { };
int ret;
ret = __do_adjtimex(&tk_core, txc, &result);
if (ret < 0)
return ret;
if (txc->modes & ADJ_SETOFFSET)
audit_tk_injoffset(result.delta);
audit_ntp_log(&result.ad);
if (result.clock_set)
clock_was_set(CLOCK_SET_WALL);
ntp_notify_cmos_timer(result.delta.tv_sec != 0);
return ret;
}
/*
* Invoked from NTP with the time keeper lock held, so lockless access is
* fine.
*/
long ktime_get_ntp_seconds(unsigned int id)
{
return timekeeper_data[id].timekeeper.xtime_sec;
}
#ifdef CONFIG_NTP_PPS
/**
* hardpps() - Accessor function to NTP __hardpps function
* @phase_ts: Pointer to timespec64 structure representing phase timestamp
* @raw_ts: Pointer to timespec64 structure representing raw timestamp
*/
void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
{
guard(raw_spinlock_irqsave)(&tk_core.lock);
__hardpps(phase_ts, raw_ts);
}
EXPORT_SYMBOL(hardpps);
#endif /* CONFIG_NTP_PPS */
#ifdef CONFIG_POSIX_AUX_CLOCKS
#include "posix-timers.h"
/*
* Bitmap for the activated auxiliary timekeepers to allow lockless quick
* checks in the hot paths without touching extra cache lines. If set, then
* the state of the corresponding timekeeper has to be re-checked under
* timekeeper::lock.
*/
static unsigned long aux_timekeepers;
static inline unsigned int clockid_to_tkid(unsigned int id)
{
return TIMEKEEPER_AUX_FIRST + id - CLOCK_AUX;
}
static inline struct tk_data *aux_get_tk_data(clockid_t id)
{
if (!clockid_aux_valid(id))
return NULL;
return &timekeeper_data[clockid_to_tkid(id)];
}
/* Invoked from timekeeping after a clocksource change */
static void tk_aux_update_clocksource(void)
{
unsigned long active = READ_ONCE(aux_timekeepers);
unsigned int id;
for_each_set_bit(id, &active, BITS_PER_LONG) {
struct tk_data *tkd = &timekeeper_data[id + TIMEKEEPER_AUX_FIRST];
struct timekeeper *tks = &tkd->shadow_timekeeper;
guard(raw_spinlock_irqsave)(&tkd->lock);
if (!tks->clock_valid)
continue;
timekeeping_forward_now(tks);
tk_setup_internals(tks, tk_core.timekeeper.tkr_mono.clock);
timekeeping_update_from_shadow(tkd, TK_UPDATE_ALL);
}
}
static void tk_aux_advance(void)
{
unsigned long active = READ_ONCE(aux_timekeepers);
unsigned int id;
/* Lockless quick check to avoid extra cache lines */
for_each_set_bit(id, &active, BITS_PER_LONG) {
struct tk_data *aux_tkd = &timekeeper_data[id + TIMEKEEPER_AUX_FIRST];
guard(raw_spinlock)(&aux_tkd->lock);
if (aux_tkd->shadow_timekeeper.clock_valid)
__timekeeping_advance(aux_tkd, TK_ADV_TICK);
}
}
/**
* ktime_get_aux - Get time for a AUX clock
* @id: ID of the clock to read (CLOCK_AUX...)
* @kt: Pointer to ktime_t to store the time stamp
*
* Returns: True if the timestamp is valid, false otherwise
*/
bool ktime_get_aux(clockid_t id, ktime_t *kt)
{
struct tk_data *aux_tkd = aux_get_tk_data(id);
struct timekeeper *aux_tk;
unsigned int seq;
ktime_t base;
u64 nsecs;
WARN_ON(timekeeping_suspended);
if (!aux_tkd)
return false;
aux_tk = &aux_tkd->timekeeper;
do {
seq = read_seqcount_begin(&aux_tkd->seq);
if (!aux_tk->clock_valid)
return false;
base = ktime_add(aux_tk->tkr_mono.base, aux_tk->offs_aux);
nsecs = timekeeping_get_ns(&aux_tk->tkr_mono);
} while (read_seqcount_retry(&aux_tkd->seq, seq));
*kt = ktime_add_ns(base, nsecs);
return true;
}
EXPORT_SYMBOL_GPL(ktime_get_aux);
/**
* ktime_get_aux_ts64 - Get time for a AUX clock
* @id: ID of the clock to read (CLOCK_AUX...)
* @ts: Pointer to timespec64 to store the time stamp
*
* Returns: True if the timestamp is valid, false otherwise
*/
bool ktime_get_aux_ts64(clockid_t id, struct timespec64 *ts)
{
ktime_t now;
if (!ktime_get_aux(id, &now))
return false;
*ts = ktime_to_timespec64(now);
return true;
}
EXPORT_SYMBOL_GPL(ktime_get_aux_ts64);
static int aux_get_res(clockid_t id, struct timespec64 *tp)
{
if (!clockid_aux_valid(id))
return -ENODEV;
tp->tv_sec = aux_clock_resolution_ns() / NSEC_PER_SEC;
tp->tv_nsec = aux_clock_resolution_ns() % NSEC_PER_SEC;
return 0;
}
static int aux_get_timespec(clockid_t id, struct timespec64 *tp)
{
return ktime_get_aux_ts64(id, tp) ? 0 : -ENODEV;
}
static int aux_clock_set(const clockid_t id, const struct timespec64 *tnew)
{
struct tk_data *aux_tkd = aux_get_tk_data(id);
struct timekeeper *aux_tks;
ktime_t tnow, nsecs;
if (!timespec64_valid_settod(tnew))
return -EINVAL;
if (!aux_tkd)
return -ENODEV;
aux_tks = &aux_tkd->shadow_timekeeper;
guard(raw_spinlock_irq)(&aux_tkd->lock);
if (!aux_tks->clock_valid)
return -ENODEV;
/* Forward the timekeeper base time */
timekeeping_forward_now(aux_tks);
/*
* Get the updated base time. tkr_mono.base has not been
* updated yet, so do that first. That makes the update
* in timekeeping_update_from_shadow() redundant, but
* that's harmless. After that @tnow can be calculated
* by using tkr_mono::cycle_last, which has been set
* by timekeeping_forward_now().
*/
tk_update_ktime_data(aux_tks);
nsecs = timekeeping_cycles_to_ns(&aux_tks->tkr_mono, aux_tks->tkr_mono.cycle_last);
tnow = ktime_add(aux_tks->tkr_mono.base, nsecs);
/*
* Calculate the new AUX offset as delta to @tnow ("monotonic").
* That avoids all the tk::xtime back and forth conversions as
* xtime ("realtime") is not applicable for auxiliary clocks and
* kept in sync with "monotonic".
*/
tk_update_aux_offs(aux_tks, ktime_sub(timespec64_to_ktime(*tnew), tnow));
timekeeping_update_from_shadow(aux_tkd, TK_UPDATE_ALL);
return 0;
}
static int aux_clock_adj(const clockid_t id, struct __kernel_timex *txc)
{
struct tk_data *aux_tkd = aux_get_tk_data(id);
struct adjtimex_result result = { };
if (!aux_tkd)
return -ENODEV;
/*
* @result is ignored for now as there are neither hrtimers nor a
* RTC related to auxiliary clocks for now.
*/
return __do_adjtimex(aux_tkd, txc, &result);
}
const struct k_clock clock_aux = {
.clock_getres = aux_get_res,
.clock_get_timespec = aux_get_timespec,
.clock_set = aux_clock_set,
.clock_adj = aux_clock_adj,
};
static void aux_clock_enable(clockid_t id)
{
struct tk_read_base *tkr_raw = &tk_core.timekeeper.tkr_raw;
struct tk_data *aux_tkd = aux_get_tk_data(id);
struct timekeeper *aux_tks = &aux_tkd->shadow_timekeeper;
/* Prevent the core timekeeper from changing. */
guard(raw_spinlock_irq)(&tk_core.lock);
/*
* Setup the auxiliary clock assuming that the raw core timekeeper
* clock frequency conversion is close enough. Userspace has to
* adjust for the deviation via clock_adjtime(2).
*/
guard(raw_spinlock_nested)(&aux_tkd->lock);
/* Remove leftovers of a previous registration */
memset(aux_tks, 0, sizeof(*aux_tks));
/* Restore the timekeeper id */
aux_tks->id = aux_tkd->timekeeper.id;
/* Setup the timekeeper based on the current system clocksource */
tk_setup_internals(aux_tks, tkr_raw->clock);
/* Mark it valid and set it live */
aux_tks->clock_valid = true;
timekeeping_update_from_shadow(aux_tkd, TK_UPDATE_ALL);
}
static void aux_clock_disable(clockid_t id)
{
struct tk_data *aux_tkd = aux_get_tk_data(id);
guard(raw_spinlock_irq)(&aux_tkd->lock);
aux_tkd->shadow_timekeeper.clock_valid = false;
timekeeping_update_from_shadow(aux_tkd, TK_UPDATE_ALL);
}
static DEFINE_MUTEX(aux_clock_mutex);
static ssize_t aux_clock_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
/* Lazy atoi() as name is "0..7" */
int id = kobj->name[0] & 0x7;
bool enable;
if (!capable(CAP_SYS_TIME))
return -EPERM;
if (kstrtobool(buf, &enable) < 0)
return -EINVAL;
guard(mutex)(&aux_clock_mutex);
if (enable == test_bit(id, &aux_timekeepers))
return count;
if (enable) {
aux_clock_enable(CLOCK_AUX + id);
set_bit(id, &aux_timekeepers);
} else {
aux_clock_disable(CLOCK_AUX + id);
clear_bit(id, &aux_timekeepers);
}
return count;
}
static ssize_t aux_clock_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
unsigned long active = READ_ONCE(aux_timekeepers);
/* Lazy atoi() as name is "0..7" */
int id = kobj->name[0] & 0x7;
return sysfs_emit(buf, "%d\n", test_bit(id, &active));
}
static struct kobj_attribute aux_clock_enable_attr = __ATTR_RW(aux_clock_enable);
static struct attribute *aux_clock_enable_attrs[] = {
&aux_clock_enable_attr.attr,
NULL
};
static const struct attribute_group aux_clock_enable_attr_group = {
.attrs = aux_clock_enable_attrs,
};
static int __init tk_aux_sysfs_init(void)
{
struct kobject *auxo, *tko = kobject_create_and_add("time", kernel_kobj);
int ret = -ENOMEM;
if (!tko)
return ret;
auxo = kobject_create_and_add("aux_clocks", tko);
if (!auxo)
goto err_clean;
for (int i = 0; i < MAX_AUX_CLOCKS; i++) {
char id[2] = { [0] = '0' + i, };
struct kobject *clk = kobject_create_and_add(id, auxo);
if (!clk) {
ret = -ENOMEM;
goto err_clean;
}
ret = sysfs_create_group(clk, &aux_clock_enable_attr_group);
if (ret)
goto err_clean;
}
return 0;
err_clean:
kobject_put(auxo);
kobject_put(tko);
return ret;
}
late_initcall(tk_aux_sysfs_init);
static __init void tk_aux_setup(void)
{
for (int i = TIMEKEEPER_AUX_FIRST; i <= TIMEKEEPER_AUX_LAST; i++)
tkd_basic_setup(&timekeeper_data[i], i, false);
}
#endif /* CONFIG_POSIX_AUX_CLOCKS */
// SPDX-License-Identifier: GPL-2.0-only
/* Connection state tracking for netfilter. This is separated from,
but required by, the NAT layer; it can also be used by an iptables
extension. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (C) 2005-2012 Patrick McHardy <kaber@trash.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/siphash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>
#include <net/ip.h>
#include "nf_internals.h"
__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
EXPORT_SYMBOL_GPL(nf_conntrack_locks);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash);
struct conntrack_gc_work {
struct delayed_work dwork;
u32 next_bucket;
u32 avg_timeout;
u32 count;
u32 start_time;
bool exiting;
bool early_drop;
};
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
/* serialize hash resizes and nf_ct_iterate_cleanup */
static DEFINE_MUTEX(nf_conntrack_mutex);
#define GC_SCAN_INTERVAL_MAX (60ul * HZ)
#define GC_SCAN_INTERVAL_MIN (1ul * HZ)
/* clamp timeouts to this value (TCP unacked) */
#define GC_SCAN_INTERVAL_CLAMP (300ul * HZ)
/* Initial bias pretending we have 100 entries at the upper bound so we don't
* wakeup often just because we have three entries with a 1s timeout while still
* allowing non-idle machines to wakeup more often when needed.
*/
#define GC_SCAN_INITIAL_COUNT 100
#define GC_SCAN_INTERVAL_INIT GC_SCAN_INTERVAL_MAX
#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
#define GC_SCAN_EXPIRED_MAX (64000u / HZ)
#define MIN_CHAINLEN 50u
#define MAX_CHAINLEN (80u - MIN_CHAINLEN)
static struct conntrack_gc_work conntrack_gc_work;
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
/* 1) Acquire the lock */
spin_lock(lock);
/* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
* It pairs with the smp_store_release() in nf_conntrack_all_unlock()
*/
if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
return;
/* fast path failed, unlock */
spin_unlock(lock);
/* Slow path 1) get global lock */
spin_lock(&nf_conntrack_locks_all_lock);
/* Slow path 2) get the lock we want */
spin_lock(lock);
/* Slow path 3) release the global lock */
spin_unlock(&nf_conntrack_locks_all_lock);
}
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
{
h1 %= CONNTRACK_LOCKS;
h2 %= CONNTRACK_LOCKS;
spin_unlock(&nf_conntrack_locks[h1]);
if (h1 != h2)
spin_unlock(&nf_conntrack_locks[h2]);
}
/* return true if we need to recompute hashes (in case hash table was resized) */
static bool nf_conntrack_double_lock(unsigned int h1, unsigned int h2,
unsigned int sequence)
{
h1 %= CONNTRACK_LOCKS;
h2 %= CONNTRACK_LOCKS;
if (h1 <= h2) {
nf_conntrack_lock(&nf_conntrack_locks[h1]);
if (h1 != h2)
spin_lock_nested(&nf_conntrack_locks[h2],
SINGLE_DEPTH_NESTING);
} else {
nf_conntrack_lock(&nf_conntrack_locks[h2]);
spin_lock_nested(&nf_conntrack_locks[h1],
SINGLE_DEPTH_NESTING);
}
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
nf_conntrack_double_unlock(h1, h2);
return true;
}
return false;
}
static void nf_conntrack_all_lock(void)
__acquires(&nf_conntrack_locks_all_lock)
{
int i;
spin_lock(&nf_conntrack_locks_all_lock);
/* For nf_contrack_locks_all, only the latest time when another
* CPU will see an update is controlled, by the "release" of the
* spin_lock below.
* The earliest time is not controlled, an thus KCSAN could detect
* a race when nf_conntract_lock() reads the variable.
* WRITE_ONCE() is used to ensure the compiler will not
* optimize the write.
*/
WRITE_ONCE(nf_conntrack_locks_all, true);
for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_lock(&nf_conntrack_locks[i]);
/* This spin_unlock provides the "release" to ensure that
* nf_conntrack_locks_all==true is visible to everyone that
* acquired spin_lock(&nf_conntrack_locks[]).
*/
spin_unlock(&nf_conntrack_locks[i]);
}
}
static void nf_conntrack_all_unlock(void)
__releases(&nf_conntrack_locks_all_lock)
{
/* All prior stores must be complete before we clear
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
* might observe the false value but not the entire
* critical section.
* It pairs with the smp_load_acquire() in nf_conntrack_lock()
*/
smp_store_release(&nf_conntrack_locks_all, false);
spin_unlock(&nf_conntrack_locks_all_lock);
}
unsigned int nf_conntrack_htable_size __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
seqcount_spinlock_t nf_conntrack_generation __read_mostly;
static siphash_aligned_key_t nf_conntrack_hash_rnd;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
unsigned int zoneid,
const struct net *net)
{
siphash_key_t key;
get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
key = nf_conntrack_hash_rnd;
key.key[0] ^= zoneid;
key.key[1] ^= net_hash_mix(net);
return siphash((void *)tuple,
offsetofend(struct nf_conntrack_tuple, dst.__nfct_hash_offsetend),
&key);
}
static u32 scale_hash(u32 hash)
{
return reciprocal_scale(hash, nf_conntrack_htable_size);
}
static u32 __hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple,
unsigned int zoneid,
unsigned int size)
{
return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);
}
static u32 hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple,
unsigned int zoneid)
{
return scale_hash(hash_conntrack_raw(tuple, zoneid, net));
}
static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{ struct {
__be16 sport;
__be16 dport;
} _inet_hdr, *inet_hdr;
/* Actually only need first 4 bytes to get ports. */
inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
if (!inet_hdr)
return false;
tuple->src.u.udp.port = inet_hdr->sport;
tuple->dst.u.udp.port = inet_hdr->dport;
return true;
}
static bool
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
unsigned int dataoff,
u_int16_t l3num,
u_int8_t protonum,
struct net *net,
struct nf_conntrack_tuple *tuple)
{
unsigned int size;
const __be32 *ap;
__be32 _addrs[8];
memset(tuple, 0, sizeof(*tuple));
tuple->src.l3num = l3num;
switch (l3num) {
case NFPROTO_IPV4:
nhoff += offsetof(struct iphdr, saddr);
size = 2 * sizeof(__be32);
break;
case NFPROTO_IPV6:
nhoff += offsetof(struct ipv6hdr, saddr);
size = sizeof(_addrs);
break;
default:
return true;
}
ap = skb_header_pointer(skb, nhoff, size, _addrs);
if (!ap)
return false;
switch (l3num) {
case NFPROTO_IPV4:
tuple->src.u3.ip = ap[0];
tuple->dst.u3.ip = ap[1];
break;
case NFPROTO_IPV6:
memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
break;
}
tuple->dst.protonum = protonum;
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
switch (protonum) {
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6:
return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
#endif
case IPPROTO_ICMP:
return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE:
return gre_pkt_to_tuple(skb, dataoff, net, tuple);
#endif
case IPPROTO_TCP:
case IPPROTO_UDP:
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
case IPPROTO_UDPLITE:
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP:
#endif
/* fallthrough */
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
default:
break;
}
return true;
}
static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
u_int8_t *protonum)
{
int dataoff = -1;
const struct iphdr *iph;
struct iphdr _iph;
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
if (!iph)
return -1;
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though.
*/
if (iph->frag_off & htons(IP_OFFSET))
return -1;
dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
/* Check bogus IP headers */
if (dataoff > skb->len) {
pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
nhoff, iph->ihl << 2, skb->len);
return -1;
}
return dataoff;
}
#if IS_ENABLED(CONFIG_IPV6)
static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
u8 *protonum)
{
int protoff = -1;
unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
__be16 frag_off;
u8 nexthdr;
if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
&nexthdr, sizeof(nexthdr)) != 0) {
pr_debug("can't get nexthdr\n");
return -1;
}
protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
/*
* (protoff == skb->len) means the packet has not data, just
* IPv6 and possibly extensions headers, but it is tracked anyway
*/
if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
pr_debug("can't find proto in pkt\n");
return -1;
}
*protonum = nexthdr;
return protoff;
}
#endif
static int get_l4proto(const struct sk_buff *skb,
unsigned int nhoff, u8 pf, u8 *l4num)
{
switch (pf) {
case NFPROTO_IPV4:
return ipv4_get_l4proto(skb, nhoff, l4num);
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
return ipv6_get_l4proto(skb, nhoff, l4num);
#endif
default:
*l4num = 0;
break;
}
return -1;
}
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num,
struct net *net, struct nf_conntrack_tuple *tuple)
{
u8 protonum;
int protoff;
protoff = get_l4proto(skb, nhoff, l3num, &protonum);
if (protoff <= 0)
return false;
return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig)
{
memset(inverse, 0, sizeof(*inverse));
inverse->src.l3num = orig->src.l3num;
switch (orig->src.l3num) {
case NFPROTO_IPV4:
inverse->src.u3.ip = orig->dst.u3.ip;
inverse->dst.u3.ip = orig->src.u3.ip;
break;
case NFPROTO_IPV6:
inverse->src.u3.in6 = orig->dst.u3.in6;
inverse->dst.u3.in6 = orig->src.u3.in6;
break;
default:
break;
}
inverse->dst.dir = !orig->dst.dir;
inverse->dst.protonum = orig->dst.protonum;
switch (orig->dst.protonum) {
case IPPROTO_ICMP:
return nf_conntrack_invert_icmp_tuple(inverse, orig);
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6:
return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
#endif
}
inverse->src.u.all = orig->dst.u.all;
inverse->dst.u.all = orig->src.u.all;
return true;
}
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
/* Generate a almost-unique pseudo-id for a given conntrack.
*
* intentionally doesn't re-use any of the seeds used for hash
* table location, we assume id gets exposed to userspace.
*
* Following nf_conn items do not change throughout lifetime
* of the nf_conn:
*
* 1. nf_conn address
* 2. nf_conn->master address (normally NULL)
* 3. the associated net namespace
* 4. the original direction tuple
*/
u32 nf_ct_get_id(const struct nf_conn *ct)
{
static siphash_aligned_key_t ct_id_seed;
unsigned long a, b, c, d;
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
a = (unsigned long)ct;
b = (unsigned long)ct->master;
c = (unsigned long)nf_ct_net(ct);
d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
&ct_id_seed);
#ifdef CONFIG_64BIT
return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
#else
return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
#endif
}
EXPORT_SYMBOL_GPL(nf_ct_get_id);
static u32 nf_conntrack_get_id(const struct nf_conntrack *nfct)
{
return nf_ct_get_id(nf_ct_to_nf_conn(nfct));
}
static void
clean_from_lists(struct nf_conn *ct)
{
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
/* Destroy all pending expectations */
nf_ct_remove_expectations(ct);
}
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
/* Released via nf_ct_destroy() */
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags)
{
struct nf_conn *tmpl, *p;
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
if (!tmpl)
return NULL;
p = tmpl;
tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
if (tmpl != p)
tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
} else {
tmpl = kzalloc(sizeof(*tmpl), flags);
if (!tmpl)
return NULL;
}
tmpl->status = IPS_TEMPLATE;
write_pnet(&tmpl->ct_net, net);
nf_ct_zone_add(tmpl, zone);
refcount_set(&tmpl->ct_general.use, 1);
return tmpl;
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
void nf_ct_tmpl_free(struct nf_conn *tmpl)
{
kfree(tmpl->ext);
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
kfree((char *)tmpl - tmpl->proto.tmpl_padto);
else
kfree(tmpl);
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
static void destroy_gre_conntrack(struct nf_conn *ct)
{
#ifdef CONFIG_NF_CT_PROTO_GRE
struct nf_conn *master = ct->master;
if (master)
nf_ct_gre_keymap_destroy(master);
#endif
}
void nf_ct_destroy(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
WARN_ON(refcount_read(&nfct->use) != 0);
if (unlikely(nf_ct_is_template(ct))) {
nf_ct_tmpl_free(ct);
return;
}
if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
destroy_gre_conntrack(ct);
/* Expectations will have been removed in clean_from_lists,
* except TFTP can create an expectation on the first packet,
* before connection is in the list, so we need to clean here,
* too.
*/
nf_ct_remove_expectations(ct);
if (ct->master)
nf_ct_put(ct->master);
nf_conntrack_free(ct);
}
EXPORT_SYMBOL(nf_ct_destroy);
static void __nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
unsigned int sequence;
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(hash, reply_hash, sequence));
clean_from_lists(ct);
nf_conntrack_double_unlock(hash, reply_hash);
}
static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
nf_ct_helper_destroy(ct);
local_bh_disable();
__nf_ct_delete_from_lists(ct);
local_bh_enable();
}
static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_net *cnet = nf_ct_pernet(nf_ct_net(ct));
spin_lock(&cnet->ecache.dying_lock);
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&cnet->ecache.dying_list);
spin_unlock(&cnet->ecache.dying_lock);
#endif
}
bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
struct nf_conn_tstamp *tstamp;
struct net *net;
if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
return false;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
tstamp->stop = ktime_get_real_ns();
if (timeout < 0)
tstamp->stop -= jiffies_to_nsecs(-timeout);
}
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0) {
/* destroy event was not delivered. nf_ct_put will
* be done by event cache worker on redelivery.
*/
nf_ct_helper_destroy(ct);
local_bh_disable();
__nf_ct_delete_from_lists(ct);
nf_ct_add_to_ecache_list(ct);
local_bh_enable();
nf_conntrack_ecache_work(nf_ct_net(ct), NFCT_ECACHE_DESTROY_FAIL);
return false;
}
net = nf_ct_net(ct);
if (nf_conntrack_ecache_dwork_pending(net))
nf_conntrack_ecache_work(net, NFCT_ECACHE_DESTROY_SENT);
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
return true;
}
EXPORT_SYMBOL_GPL(nf_ct_delete);
static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone,
const struct net *net)
{
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
/* A conntrack can be recreated with the equal tuple,
* so we need to check that the conntrack is confirmed
*/
return nf_ct_tuple_equal(tuple, &h->tuple) &&
nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
nf_ct_is_confirmed(ct) &&
net_eq(net, nf_ct_net(ct));
}
static inline bool
nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
{
return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
&ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
}
/* caller must hold rcu readlock and none of the nf_conntrack_locks */
static void nf_ct_gc_expired(struct nf_conn *ct)
{
if (!refcount_inc_not_zero(&ct->ct_general.use))
return;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (nf_ct_should_gc(ct))
nf_ct_kill(ct);
nf_ct_put(ct);
}
/*
* Warning :
* - Caller must take a reference on returned object
* and recheck nf_ct_tuple_equal(tuple, &h->tuple)
*/
static struct nf_conntrack_tuple_hash *
____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
struct hlist_nulls_node *n;
unsigned int bucket, hsize;
begin:
nf_conntrack_get_ht(&ct_hash, &hsize);
bucket = reciprocal_scale(hash, hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
struct nf_conn *ct;
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(ct)) {
nf_ct_gc_expired(ct);
continue;
}
if (nf_ct_key_equal(h, tuple, zone, net))
return h;
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(n) != bucket) {
NF_CT_STAT_INC_ATOMIC(net, search_restart);
goto begin;
}
return NULL;
}
/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
h = ____nf_conntrack_find(net, zone, tuple, hash);
if (h) {
/* We have a candidate that matches the tuple we're interested
* in, try to obtain a reference and re-check tuple
*/
ct = nf_ct_tuplehash_to_ctrack(h);
if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
/* re-check key after refcount */
smp_acquire__after_ctrl_dep();
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
return h;
/* TYPESAFE_BY_RCU recycled the candidate */
nf_ct_put(ct);
}
h = NULL;
}
return h;
}
struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
struct nf_conntrack_tuple_hash *thash;
rcu_read_lock();
thash = __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, zone_id, net));
if (thash)
goto out_unlock;
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
if (rid != zone_id)
thash = __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, rid, net));
out_unlock:
rcu_read_unlock();
return thash;
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
static void __nf_conntrack_hash_insert(struct nf_conn *ct,
unsigned int hash,
unsigned int reply_hash)
{
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&nf_conntrack_hash[hash]);
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[reply_hash]);
}
static bool nf_ct_ext_valid_pre(const struct nf_ct_ext *ext)
{
/* if ext->gen_id is not equal to nf_conntrack_ext_genid, some extensions
* may contain stale pointers to e.g. helper that has been removed.
*
* The helper can't clear this because the nf_conn object isn't in
* any hash and synchronize_rcu() isn't enough because associated skb
* might sit in a queue.
*/
return !ext || ext->gen_id == atomic_read(&nf_conntrack_ext_genid);
}
static bool nf_ct_ext_valid_post(struct nf_ct_ext *ext)
{
if (!ext)
return true;
if (ext->gen_id != atomic_read(&nf_conntrack_ext_genid))
return false;
/* inserted into conntrack table, nf_ct_iterate_cleanup()
* will find it. Disable nf_ct_ext_find() id check.
*/
WRITE_ONCE(ext->gen_id, 0);
return true;
}
int
nf_conntrack_hash_check_insert(struct nf_conn *ct)
{
const struct nf_conntrack_zone *zone;
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
unsigned int max_chainlen;
unsigned int chainlen = 0;
unsigned int sequence;
int err = -EEXIST;
zone = nf_ct_zone(ct);
if (!nf_ct_ext_valid_pre(ct->ext))
return -EAGAIN;
local_bh_disable();
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(hash, reply_hash, sequence));
max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen)
goto chaintoolong;
}
chainlen = 0;
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen)
goto chaintoolong;
}
/* If genid has changed, we can't insert anymore because ct
* extensions could have stale pointers and nf_ct_iterate_destroy
* might have completed its table scan already.
*
* Increment of the ext genid right after this check is fine:
* nf_ct_iterate_destroy blocks until locks are released.
*/
if (!nf_ct_ext_valid_post(ct->ext)) {
err = -EAGAIN;
goto out;
}
smp_wmb();
/* The caller holds a reference to this object */
refcount_set(&ct->ct_general.use, 2);
__nf_conntrack_hash_insert(ct, hash, reply_hash);
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert);
local_bh_enable();
return 0;
chaintoolong:
NF_CT_STAT_INC(net, chaintoolong);
err = -ENOSPC;
out:
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
return err;
}
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
unsigned int bytes)
{
struct nf_conn_acct *acct;
acct = nf_conn_acct_find(ct);
if (acct) {
struct nf_conn_counter *counter = acct->counter;
atomic64_add(packets, &counter[dir].packets);
atomic64_add(bytes, &counter[dir].bytes);
}
}
EXPORT_SYMBOL_GPL(nf_ct_acct_add);
static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
const struct nf_conn *loser_ct)
{
struct nf_conn_acct *acct;
acct = nf_conn_acct_find(loser_ct);
if (acct) {
struct nf_conn_counter *counter = acct->counter;
unsigned int bytes;
/* u32 should be fine since we must have seen one packet. */
bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
}
}
static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
{
struct nf_conn_tstamp *tstamp;
refcount_inc(&ct->ct_general.use);
/* set conntrack timestamp, if enabled. */
tstamp = nf_conn_tstamp_find(ct);
if (tstamp)
tstamp->start = ktime_get_real_ns();
}
/**
* nf_ct_match_reverse - check if ct1 and ct2 refer to identical flow
* @ct1: conntrack in hash table to check against
* @ct2: merge candidate
*
* returns true if ct1 and ct2 happen to refer to the same flow, but
* in opposing directions, i.e.
* ct1: a:b -> c:d
* ct2: c:d -> a:b
* for both directions. If so, @ct2 should not have been created
* as the skb should have been picked up as ESTABLISHED flow.
* But ct1 was not yet committed to hash table before skb that created
* ct2 had arrived.
*
* Note we don't compare netns because ct entries in different net
* namespace cannot clash to begin with.
*
* @return: true if ct1 and ct2 are identical when swapping origin/reply.
*/
static bool
nf_ct_match_reverse(const struct nf_conn *ct1, const struct nf_conn *ct2)
{
u16 id1, id2;
if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&ct2->tuplehash[IP_CT_DIR_REPLY].tuple))
return false;
if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
&ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
return false;
id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_ORIGINAL);
id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_REPLY);
if (id1 != id2)
return false;
id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_REPLY);
id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL);
return id1 == id2;
}
static int nf_ct_can_merge(const struct nf_conn *ct,
const struct nf_conn *loser_ct)
{
return nf_ct_match(ct, loser_ct) ||
nf_ct_match_reverse(ct, loser_ct);
}
/* caller must hold locks to prevent concurrent changes */
static int __nf_ct_resolve_clash(struct sk_buff *skb,
struct nf_conntrack_tuple_hash *h)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
enum ip_conntrack_info ctinfo;
struct nf_conn *loser_ct;
loser_ct = nf_ct_get(skb, &ctinfo);
if (nf_ct_can_merge(ct, loser_ct)) {
struct net *net = nf_ct_net(ct);
nf_conntrack_get(&ct->ct_general);
nf_ct_acct_merge(ct, ctinfo, loser_ct);
nf_ct_put(loser_ct);
nf_ct_set(skb, ct, ctinfo);
NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
}
return NF_DROP;
}
/**
* nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
*
* @skb: skb that causes the collision
* @repl_idx: hash slot for reply direction
*
* Called when origin or reply direction had a clash.
* The skb can be handled without packet drop provided the reply direction
* is unique or there the existing entry has the identical tuple in both
* directions.
*
* Caller must hold conntrack table locks to prevent concurrent updates.
*
* Returns NF_DROP if the clash could not be handled.
*/
static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
{
struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct net *net;
zone = nf_ct_zone(loser_ct);
net = nf_ct_net(loser_ct);
/* Reply direction must never result in a clash, unless both origin
* and reply tuples are identical.
*/
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
if (nf_ct_key_equal(h,
&loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
return __nf_ct_resolve_clash(skb, h);
}
/* We want the clashing entry to go away real soon: 1 second timeout. */
WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
/* IPS_NAT_CLASH removes the entry automatically on the first
* reply. Also prevents UDP tracker from moving the entry to
* ASSURED state, i.e. the entry can always be evicted under
* pressure.
*/
loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
__nf_conntrack_insert_prepare(loser_ct);
/* fake add for ORIGINAL dir: we want lookups to only find the entry
* already in the table. This also hides the clashing entry from
* ctnetlink iteration, i.e. conntrack -L won't show them.
*/
hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[repl_idx]);
/* confirmed bit must be set after hlist add, not before:
* loser_ct can still be visible to other cpu due to
* SLAB_TYPESAFE_BY_RCU.
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);
NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
}
/**
* nf_ct_resolve_clash - attempt to handle clash without packet drop
*
* @skb: skb that causes the clash
* @h: tuplehash of the clashing entry already in table
* @reply_hash: hash slot for reply direction
*
* A conntrack entry can be inserted to the connection tracking table
* if there is no existing entry with an identical tuple.
*
* If there is one, @skb (and the associated, unconfirmed conntrack) has
* to be dropped. In case @skb is retransmitted, next conntrack lookup
* will find the already-existing entry.
*
* The major problem with such packet drop is the extra delay added by
* the packet loss -- it will take some time for a retransmit to occur
* (or the sender to time out when waiting for a reply).
*
* This function attempts to handle the situation without packet drop.
*
* If @skb has no NAT transformation or if the colliding entries are
* exactly the same, only the to-be-confirmed conntrack entry is discarded
* and @skb is associated with the conntrack entry already in the table.
*
* Failing that, the new, unconfirmed conntrack is still added to the table
* provided that the collision only occurs in the ORIGINAL direction.
* The new entry will be added only in the non-clashing REPLY direction,
* so packets in the ORIGINAL direction will continue to match the existing
* entry. The new entry will also have a fixed timeout so it expires --
* due to the collision, it will only see reply traffic.
*
* Returns NF_DROP if the clash could not be resolved.
*/
static __cold noinline int
nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
u32 reply_hash)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
const struct nf_conntrack_l4proto *l4proto;
enum ip_conntrack_info ctinfo;
struct nf_conn *loser_ct;
struct net *net;
int ret;
loser_ct = nf_ct_get(skb, &ctinfo);
net = nf_ct_net(loser_ct);
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (!l4proto->allow_clash)
goto drop;
ret = __nf_ct_resolve_clash(skb, h);
if (ret == NF_ACCEPT)
return ret;
ret = nf_ct_resolve_clash_harder(skb, reply_hash);
if (ret == NF_ACCEPT)
return ret;
drop:
NF_CT_STAT_INC(net, drop);
NF_CT_STAT_INC(net, insert_failed);
return NF_DROP;
}
/* Confirm a connection given skb; places it in hash table */
int
__nf_conntrack_confirm(struct sk_buff *skb)
{
unsigned int chainlen = 0, sequence, max_chainlen;
const struct nf_conntrack_zone *zone;
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
int ret = NF_DROP;
ct = nf_ct_get(skb, &ctinfo);
net = nf_ct_net(ct);
/* ipt_REJECT uses nf_conntrack_attach to attach related
ICMP/TCP RST packets in other direction. Actual packet
which created connection will be IP_CT_NEW or for an
expected connection, IP_CT_RELATED. */
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
zone = nf_ct_zone(ct);
local_bh_disable();
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
/* reuse the hash saved before */
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = scale_hash(hash);
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(hash, reply_hash, sequence));
/* We're not in hash table, and we refuse to set up related
* connections for unconfirmed conns. But packet copies and
* REJECT will give spurious warnings here.
*/
/* Another skb with the same unconfirmed conntrack may
* win the race. This may happen for bridge(br_flood)
* or broadcast/multicast packets do skb_clone with
* unconfirmed conntrack.
*/
if (unlikely(nf_ct_is_confirmed(ct))) {
WARN_ON_ONCE(1);
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
return NF_DROP;
}
if (!nf_ct_ext_valid_pre(ct->ext)) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
/* We have to check the DYING flag after unlink to prevent
* a race against nf_ct_get_next_corpse() possibly called from
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
if (unlikely(nf_ct_is_dying(ct))) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen)
goto chaintoolong;
}
chainlen = 0;
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
if (chainlen++ > max_chainlen) {
chaintoolong:
NF_CT_STAT_INC(net, chaintoolong);
NF_CT_STAT_INC(net, insert_failed);
ret = NF_DROP;
goto dying;
}
}
/* Timeout is relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;
__nf_conntrack_insert_prepare(ct);
/* Since the lookup is lockless, hash insertion must be done after
* setting ct->timeout. The RCU barriers guarantee that no other CPU
* can find the conntrack before the above stores are visible.
*/
__nf_conntrack_hash_insert(ct, hash, reply_hash);
/* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
* skip entries that lack this bit. This happens when a CPU is looking
* at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
* or when another CPU encounters this entry right after the insertion
* but before the set-confirm-bit below. This bit must not be set until
* after __nf_conntrack_hash_insert().
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &ct->status);
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
/* ext area is still valid (rcu read lock is held,
* but will go out of scope soon, we need to remove
* this conntrack again.
*/
if (!nf_ct_ext_valid_post(ct->ext)) {
nf_ct_kill(ct);
NF_CT_STAT_INC_ATOMIC(net, drop);
return NF_DROP;
}
help = nfct_help(ct);
if (help && help->helper)
nf_conntrack_event_cache(IPCT_HELPER, ct);
nf_conntrack_event_cache(master_ct(ct) ?
IPCT_RELATED : IPCT_NEW, ct);
return NF_ACCEPT;
out:
ret = nf_ct_resolve_clash(skb, h, reply_hash);
dying:
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
return ret;
}
EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
/* Returns true if a connection corresponds to the tuple (required
for NAT). */
int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
struct net *net = nf_ct_net(ignored_conntrack);
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
unsigned int hash, hsize;
struct hlist_nulls_node *n;
struct nf_conn *ct;
zone = nf_ct_zone(ignored_conntrack);
rcu_read_lock();
begin:
nf_conntrack_get_ht(&ct_hash, &hsize);
hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (ct == ignored_conntrack)
continue;
if (nf_ct_is_expired(ct)) {
nf_ct_gc_expired(ct);
continue;
}
if (nf_ct_key_equal(h, tuple, zone, net)) {
/* Tuple is taken already, so caller will need to find
* a new source port to use.
*
* Only exception:
* If the *original tuples* are identical, then both
* conntracks refer to the same flow.
* This is a rare situation, it can occur e.g. when
* more than one UDP packet is sent from same socket
* in different threads.
*
* Let nf_ct_resolve_clash() deal with this later.
*/
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
continue;
NF_CT_STAT_INC_ATOMIC(net, found);
rcu_read_unlock();
return 1;
}
}
if (get_nulls_value(n) != hash) {
NF_CT_STAT_INC_ATOMIC(net, search_restart);
goto begin;
}
rcu_read_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
#define NF_CT_EVICTION_RANGE 8
/* There's a small race here where we may free a just-assured
connection. Too bad: we're in trouble anyway. */
static unsigned int early_drop_list(struct net *net,
struct hlist_nulls_head *head)
{
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
unsigned int drops = 0;
struct nf_conn *tmp;
hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
continue;
}
if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
!net_eq(nf_ct_net(tmp), net) ||
nf_ct_is_dying(tmp))
continue;
if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
/* load ->ct_net and ->status after refcount increase */
smp_acquire__after_ctrl_dep();
/* kill only if still in same netns -- might have moved due to
* SLAB_TYPESAFE_BY_RCU rules.
*
* We steal the timer reference. If that fails timer has
* already fired or someone else deleted it. Just drop ref
* and move to next entry.
*/
if (net_eq(nf_ct_net(tmp), net) &&
nf_ct_is_confirmed(tmp) &&
nf_ct_delete(tmp, 0, 0))
drops++;
nf_ct_put(tmp);
}
return drops;
}
static noinline int early_drop(struct net *net, unsigned int hash)
{
unsigned int i, bucket;
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
unsigned int hsize, drops;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hsize);
if (!i)
bucket = reciprocal_scale(hash, hsize);
else
bucket = (bucket + 1) % hsize;
drops = early_drop_list(net, &ct_hash[bucket]);
rcu_read_unlock();
if (drops) {
NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
return true;
}
}
return false;
}
static bool gc_worker_skip_ct(const struct nf_conn *ct)
{
return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
}
static bool gc_worker_can_early_drop(const struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
u8 protonum = nf_ct_protonum(ct);
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
return true;
l4proto = nf_ct_l4proto_find(protonum);
if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
return true;
return false;
}
static void gc_worker(struct work_struct *work)
{
unsigned int i, hashsz, nf_conntrack_max95 = 0;
u32 end_time, start_time = nfct_time_stamp;
struct conntrack_gc_work *gc_work;
unsigned int expired_count = 0;
unsigned long next_run;
s32 delta_time;
long count;
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
i = gc_work->next_bucket;
if (gc_work->early_drop)
nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
if (i == 0) {
gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT;
gc_work->count = GC_SCAN_INITIAL_COUNT;
gc_work->start_time = start_time;
}
next_run = gc_work->avg_timeout;
count = gc_work->count;
end_time = start_time + GC_SCAN_MAX_DURATION;
do {
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
struct hlist_nulls_node *n;
struct nf_conn *tmp;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hashsz);
if (i >= hashsz) {
rcu_read_unlock();
break;
}
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
struct nf_conntrack_net *cnet;
struct net *net;
long expires;
tmp = nf_ct_tuplehash_to_ctrack(h);
if (expired_count > GC_SCAN_EXPIRED_MAX) {
rcu_read_unlock();
gc_work->next_bucket = i;
gc_work->avg_timeout = next_run;
gc_work->count = count;
delta_time = nfct_time_stamp - gc_work->start_time;
/* re-sched immediately if total cycle time is exceeded */
next_run = delta_time < (s32)GC_SCAN_INTERVAL_MAX;
goto early_exit;
}
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
expired_count++;
continue;
}
expires = clamp(nf_ct_expires(tmp), GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_CLAMP);
expires = (expires - (long)next_run) / ++count;
next_run += expires;
if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
continue;
net = nf_ct_net(tmp);
cnet = nf_ct_pernet(net);
if (atomic_read(&cnet->count) < nf_conntrack_max95)
continue;
/* need to take reference to avoid possible races */
if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (gc_worker_skip_ct(tmp)) {
nf_ct_put(tmp);
continue;
}
if (gc_worker_can_early_drop(tmp)) {
nf_ct_kill(tmp);
expired_count++;
}
nf_ct_put(tmp);
}
/* could check get_nulls_value() here and restart if ct
* was moved to another chain. But given gc is best-effort
* we will just continue with next hash slot.
*/
rcu_read_unlock();
cond_resched();
i++;
delta_time = nfct_time_stamp - end_time;
if (delta_time > 0 && i < hashsz) {
gc_work->avg_timeout = next_run;
gc_work->count = count;
gc_work->next_bucket = i;
next_run = 0;
goto early_exit;
}
} while (i < hashsz);
gc_work->next_bucket = 0;
next_run = clamp(next_run, GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_MAX);
delta_time = max_t(s32, nfct_time_stamp - gc_work->start_time, 1);
if (next_run > (unsigned long)delta_time)
next_run -= delta_time;
else
next_run = 1;
early_exit:
if (gc_work->exiting)
return;
if (next_run)
gc_work->early_drop = false;
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
}
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
gc_work->exiting = false;
}
static struct nf_conn *
__nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp, u32 hash)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
unsigned int ct_count;
struct nf_conn *ct;
/* We don't want any race condition at early drop stage */
ct_count = atomic_inc_return(&cnet->count);
if (nf_conntrack_max && unlikely(ct_count > nf_conntrack_max)) {
if (!early_drop(net, hash)) {
if (!conntrack_gc_work.early_drop)
conntrack_gc_work.early_drop = true;
atomic_dec(&cnet->count);
if (net == &init_net)
net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
else
net_warn_ratelimited("nf_conntrack: table full in netns %u, dropping packet\n",
net->ns.inum);
return ERR_PTR(-ENOMEM);
}
}
/*
* Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_TYPESAFE_BY_RCU.
*/
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
if (ct == NULL)
goto out;
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
WRITE_ONCE(ct->timeout, 0);
write_pnet(&ct->ct_net, net);
memset_after(ct, 0, __nfct_init_offset);
nf_ct_zone_add(ct, zone);
/* Because we use RCU lookups, we set ct_general.use to zero before
* this is inserted in any list.
*/
refcount_set(&ct->ct_general.use, 0);
return ct;
out:
atomic_dec(&cnet->count);
return ERR_PTR(-ENOMEM);
}
struct nf_conn *nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp)
{
return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
void nf_conntrack_free(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_net *cnet;
/* A freed object has refcnt == 0, that's
* the golden rule for SLAB_TYPESAFE_BY_RCU
*/
WARN_ON(refcount_read(&ct->ct_general.use) != 0);
if (ct->status & IPS_SRC_NAT_DONE) {
const struct nf_nat_hook *nat_hook;
rcu_read_lock();
nat_hook = rcu_dereference(nf_nat_hook);
if (nat_hook)
nat_hook->remove_nat_bysrc(ct);
rcu_read_unlock();
}
kfree(ct->ext);
kmem_cache_free(nf_conntrack_cachep, ct);
cnet = nf_ct_pernet(net);
smp_mb__before_atomic();
atomic_dec(&cnet->count);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
/* Allocate a new conntrack: we return -ENOMEM if classification
failed due to stress. Otherwise it really is unclassifiable. */
static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_tuple *tuple,
struct sk_buff *skb,
unsigned int dataoff, u32 hash)
{
struct nf_conn *ct;
struct nf_conn_help *help;
struct nf_conntrack_tuple repl_tuple;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_ecache *ecache;
#endif
struct nf_conntrack_expect *exp = NULL;
const struct nf_conntrack_zone *zone;
struct nf_conn_timeout *timeout_ext;
struct nf_conntrack_zone tmp;
struct nf_conntrack_net *cnet;
if (!nf_ct_invert_tuple(&repl_tuple, tuple))
return NULL;
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
hash);
if (IS_ERR(ct))
return ERR_CAST(ct);
if (!nf_ct_add_synproxy(ct, tmpl)) {
nf_conntrack_free(ct);
return ERR_PTR(-ENOMEM);
}
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
if (timeout_ext)
nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
GFP_ATOMIC);
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
if ((ecache || net->ct.sysctl_events) &&
!nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
ecache ? ecache->expmask : 0,
GFP_ATOMIC)) {
nf_conntrack_free(ct);
return ERR_PTR(-ENOMEM);
}
#endif
cnet = nf_ct_pernet(net);
if (cnet->expect_count) {
spin_lock_bh(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple, !tmpl || nf_ct_is_confirmed(tmpl));
if (exp) {
/* Welcome, Mr. Bond. We've been expecting you... */
__set_bit(IPS_EXPECTED_BIT, &ct->status);
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
ct->master = exp->master;
if (exp->helper) {
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help)
rcu_assign_pointer(help->helper, exp->helper);
}
#ifdef CONFIG_NF_CONNTRACK_MARK
ct->mark = READ_ONCE(exp->master->mark);
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
ct->secmark = exp->master->secmark;
#endif
NF_CT_STAT_INC(net, expect_new);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
if (!exp && tmpl)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
/* Other CPU might have obtained a pointer to this object before it was
* released. Because refcount is 0, refcount_inc_not_zero() will fail.
*
* After refcount_set(1) it will succeed; ensure that zeroing of
* ct->status and the correct ct->net pointer are visible; else other
* core might observe CONFIRMED bit which means the entry is valid and
* in the hash table, but its not (anymore).
*/
smp_wmb();
/* Now it is going to be associated with an sk_buff, set refcount to 1. */
refcount_set(&ct->ct_general.use, 1);
if (exp) {
if (exp->expectfn)
exp->expectfn(ct, exp);
nf_ct_expect_put(exp);
}
return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
}
/* On success, returns 0, sets skb->_nfct | ctinfo */
static int
resolve_normal_ct(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u_int8_t protonum,
const struct nf_hook_state *state)
{
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
u32 hash, zone_id, rid;
struct nf_conn *ct;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
&tuple))
return 0;
/* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
hash = hash_conntrack_raw(&tuple, zone_id, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
if (!h) {
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
if (zone_id != rid) {
u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
}
}
if (!h) {
h = init_conntrack(state->net, tmpl, &tuple,
skb, dataoff, hash);
if (!h)
return 0;
if (IS_ERR(h))
return PTR_ERR(h);
}
ct = nf_ct_tuplehash_to_ctrack(h);
/* It exists; we have (non-exclusive) reference. */
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
ctinfo = IP_CT_ESTABLISHED_REPLY;
} else {
unsigned long status = READ_ONCE(ct->status);
/* Once we've had two way comms, always ESTABLISHED. */
if (likely(status & IPS_SEEN_REPLY))
ctinfo = IP_CT_ESTABLISHED;
else if (status & IPS_EXPECTED)
ctinfo = IP_CT_RELATED;
else
ctinfo = IP_CT_NEW;
}
nf_ct_set(skb, ct, ctinfo);
return 0;
}
/*
* icmp packets need special treatment to handle error messages that are
* related to a connection.
*
* Callers need to check if skb has a conntrack assigned when this
* helper returns; in such case skb belongs to an already known connection.
*/
static unsigned int __cold
nf_conntrack_handle_icmp(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u8 protonum,
const struct nf_hook_state *state)
{
int ret;
if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
#if IS_ENABLED(CONFIG_IPV6)
else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
#endif
else
return NF_ACCEPT;
if (ret <= 0)
NF_CT_STAT_INC_ATOMIC(state->net, error);
return ret;
}
static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
enum ip_conntrack_info ctinfo)
{
const unsigned int *timeout = nf_ct_timeout_lookup(ct);
if (!timeout)
timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Returns verdict for packet, or -1 for invalid. */
static int nf_conntrack_handle_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
switch (nf_ct_protonum(ct)) {
case IPPROTO_TCP:
return nf_conntrack_tcp_packet(ct, skb, dataoff,
ctinfo, state);
case IPPROTO_UDP:
return nf_conntrack_udp_packet(ct, skb, dataoff,
ctinfo, state);
case IPPROTO_ICMP:
return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ICMPV6:
return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
case IPPROTO_UDPLITE:
return nf_conntrack_udplite_packet(ct, skb, dataoff,
ctinfo, state);
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP:
return nf_conntrack_sctp_packet(ct, skb, dataoff,
ctinfo, state);
#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE:
return nf_conntrack_gre_packet(ct, skb, dataoff,
ctinfo, state);
#endif
}
return generic_packet(ct, skb, ctinfo);
}
unsigned int
nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct, *tmpl;
u_int8_t protonum;
int dataoff, ret;
tmpl = nf_ct_get(skb, &ctinfo);
if (tmpl || ctinfo == IP_CT_UNTRACKED) {
/* Previously seen (loopback or untracked)? Ignore. */
if ((tmpl && !nf_ct_is_template(tmpl)) ||
ctinfo == IP_CT_UNTRACKED)
return NF_ACCEPT;
skb->_nfct = 0;
}
/* rcu_read_lock()ed by nf_hook_thresh */
dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
if (dataoff <= 0) {
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
protonum, state);
if (ret <= 0) {
ret = -ret;
goto out;
}
/* ICMP[v6] protocol trackers may assign one conntrack. */
if (skb->_nfct)
goto out;
}
repeat:
ret = resolve_normal_ct(tmpl, skb, dataoff,
protonum, state);
if (ret < 0) {
/* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = NF_DROP;
goto out;
}
ct = nf_ct_get(skb, &ctinfo);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
nf_ct_put(ct);
skb->_nfct = 0;
/* Special case: TCP tracker reports an attempt to reopen a
* closed/aborted connection. We have to go back and create a
* fresh conntrack.
*/
if (ret == -NF_REPEAT)
goto repeat;
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
if (ret == NF_DROP)
NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = -ret;
goto out;
}
if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
!test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_REPLY, ct);
out:
if (tmpl)
nf_ct_put(tmpl);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_in);
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
void __nf_ct_refresh_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
u32 extra_jiffies,
unsigned int bytes)
{
/* Only update if this is not a fixed timeout */
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
goto acct;
/* If not in hash table, timer will not be active yet */
if (nf_ct_is_confirmed(ct))
extra_jiffies += nfct_time_stamp;
if (READ_ONCE(ct->timeout) != extra_jiffies)
WRITE_ONCE(ct->timeout, extra_jiffies);
acct:
if (bytes)
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
}
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
bool nf_ct_kill_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb)
{
nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
return nf_ct_delete(ct, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <linux/mutex.h>
/* Generic function for tcp/udp/sctp/dccp and alike. */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
[CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
[CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
};
EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t,
u_int32_t flags)
{
if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
if (!tb[CTA_PROTO_SRC_PORT])
return -EINVAL;
t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
}
if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
if (!tb[CTA_PROTO_DST_PORT])
return -EINVAL;
t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
}
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
unsigned int nf_ct_port_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
/* This ICMP is in reverse direction to the packet which caused it */
ct = nf_ct_get(skb, &ctinfo);
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
ctinfo = IP_CT_RELATED_REPLY;
else
ctinfo = IP_CT_RELATED;
/* Attach to new skbuff, and increment count */
nf_ct_set(nskb, ct, ctinfo);
nf_conntrack_get(skb_nfct(nskb));
}
/* This packet is coming from userspace via nf_queue, complete the packet
* processing after the helper invocation in nf_confirm().
*/
static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
const struct nf_conntrack_helper *helper;
const struct nf_conn_help *help;
int protoff;
help = nfct_help(ct);
if (!help)
return NF_ACCEPT;
helper = rcu_dereference(help->helper);
if (!helper)
return NF_ACCEPT;
if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
return NF_ACCEPT;
switch (nf_ct_l3num(ct)) {
case NFPROTO_IPV4:
protoff = skb_network_offset(skb) + ip_hdrlen(skb);
break;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6: {
__be16 frag_off;
u8 pnum;
pnum = ipv6_hdr(skb)->nexthdr;
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
&frag_off);
if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
return NF_ACCEPT;
break;
}
#endif
default:
return NF_ACCEPT;
}
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
}
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
return NF_ACCEPT;
return nf_confirm_cthelper(skb, ct, ctinfo);
}
static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
{
const struct nf_conntrack_tuple *src_tuple;
const struct nf_conntrack_tuple_hash *hash;
struct nf_conntrack_tuple srctuple;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
return true;
}
if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
NFPROTO_IPV4, dev_net(skb->dev),
&srctuple))
return false;
hash = nf_conntrack_find_get(dev_net(skb->dev),
&nf_ct_zone_dflt,
&srctuple);
if (!hash)
return false;
ct = nf_ct_tuplehash_to_ctrack(hash);
src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
nf_ct_put(ct);
return true;
}
/* Bring out ya dead! */
static struct nf_conn *
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
const struct nf_ct_iter_data *iter_data, unsigned int *bucket)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
spinlock_t *lockp;
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
if (hlist_nulls_empty(hslot))
continue;
lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
local_bh_disable();
nf_conntrack_lock(lockp);
hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
continue;
/* All nf_conn objects are added to hash table twice, one
* for original direction tuple, once for the reply tuple.
*
* Exception: In the IPS_NAT_CLASH case, only the reply
* tuple is added (the original tuple already existed for
* a different object).
*
* We only need to call the iterator once for each
* conntrack, so we just use the 'reply' direction
* tuple while iterating.
*/
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter_data->net &&
!net_eq(iter_data->net, nf_ct_net(ct)))
continue;
if (iter(ct, iter_data->data))
goto found;
}
spin_unlock(lockp);
local_bh_enable();
cond_resched();
}
return NULL;
found:
refcount_inc(&ct->ct_general.use);
spin_unlock(lockp);
local_bh_enable();
return ct;
}
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
const struct nf_ct_iter_data *iter_data)
{
unsigned int bucket = 0;
struct nf_conn *ct;
might_sleep();
mutex_lock(&nf_conntrack_mutex);
while ((ct = get_next_corpse(iter, iter_data, &bucket)) != NULL) {
/* Time to push up daises... */
nf_ct_delete(ct, iter_data->portid, iter_data->report);
nf_ct_put(ct);
cond_resched();
}
mutex_unlock(&nf_conntrack_mutex);
}
void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
const struct nf_ct_iter_data *iter_data)
{
struct net *net = iter_data->net;
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
might_sleep();
if (atomic_read(&cnet->count) == 0)
return;
nf_ct_iterate_cleanup(iter, iter_data);
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
/**
* nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
* @iter: callback to invoke for each conntrack
* @data: data to pass to @iter
*
* Like nf_ct_iterate_cleanup, but first marks conntracks on the
* unconfirmed list as dying (so they will not be inserted into
* main table).
*
* Can only be called in module exit path.
*/
void
nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
{
struct nf_ct_iter_data iter_data = {};
struct net *net;
down_read(&net_rwsem);
for_each_net(net) {
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
if (atomic_read(&cnet->count) == 0)
continue;
nf_queue_nf_hook_drop(net);
}
up_read(&net_rwsem);
/* Need to wait for netns cleanup worker to finish, if its
* running -- it might have deleted a net namespace from
* the global list, so hook drop above might not have
* affected all namespaces.
*/
net_ns_barrier();
/* a skb w. unconfirmed conntrack could have been reinjected just
* before we called nf_queue_nf_hook_drop().
*
* This makes sure its inserted into conntrack table.
*/
synchronize_net();
nf_ct_ext_bump_genid();
iter_data.data = data;
nf_ct_iterate_cleanup(iter, &iter_data);
/* Another cpu might be in a rcu read section with
* rcu protected pointer cleared in iter callback
* or hidden via nf_ct_ext_bump_genid() above.
*
* Wait until those are done.
*/
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
static int kill_all(struct nf_conn *i, void *data)
{
return 1;
}
void nf_conntrack_cleanup_start(void)
{
cleanup_nf_conntrack_bpf();
conntrack_gc_work.exiting = true;
}
void nf_conntrack_cleanup_end(void)
{
RCU_INIT_POINTER(nf_ct_hook, NULL);
cancel_delayed_work_sync(&conntrack_gc_work.dwork);
kvfree(nf_conntrack_hash);
nf_conntrack_proto_fini();
nf_conntrack_helper_fini();
nf_conntrack_expect_fini();
kmem_cache_destroy(nf_conntrack_cachep);
}
/*
* Mishearing the voices in his head, our hero wonders how he's
* supposed to kill the mall.
*/
void nf_conntrack_cleanup_net(struct net *net)
{
LIST_HEAD(single);
list_add(&net->exit_list, &single);
nf_conntrack_cleanup_net_list(&single);
}
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
{
struct nf_ct_iter_data iter_data = {};
struct net *net;
int busy;
/*
* This makes sure all current packets have passed through
* netfilter framework. Roll on, two-stage module
* delete...
*/
synchronize_rcu_expedited();
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
iter_data.net = net;
nf_ct_iterate_cleanup_net(kill_all, &iter_data);
if (atomic_read(&cnet->count) != 0)
busy = 1;
}
if (busy) {
schedule();
goto i_see_dead_people;
}
list_for_each_entry(net, net_exit_list, exit_list) {
nf_conntrack_ecache_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net);
free_percpu(net->ct.stat);
}
}
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
if (*sizep > (INT_MAX / sizeof(struct hlist_nulls_head)))
return NULL;
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
if (nr_slots > (INT_MAX / sizeof(struct hlist_nulls_head)))
return NULL;
hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
INIT_HLIST_NULLS_HEAD(&hash[i], i);
return hash;
}
EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
int nf_conntrack_hash_resize(unsigned int hashsize)
{
int i, bucket;
unsigned int old_size;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
if (!hashsize)
return -EINVAL;
hash = nf_ct_alloc_hashtable(&hashsize, 1);
if (!hash)
return -ENOMEM;
mutex_lock(&nf_conntrack_mutex);
old_size = nf_conntrack_htable_size;
if (old_size == hashsize) {
mutex_unlock(&nf_conntrack_mutex);
kvfree(hash);
return 0;
}
local_bh_disable();
nf_conntrack_all_lock();
write_seqcount_begin(&nf_conntrack_generation);
/* Lookups in the old hash might happen in parallel, which means we
* might get false negatives during connection lookup. New connections
* created because of a false negative won't make it into the hash
* though since that required taking the locks.
*/
for (i = 0; i < nf_conntrack_htable_size; i++) {
while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
unsigned int zone_id;
h = hlist_nulls_entry(nf_conntrack_hash[i].first,
struct nf_conntrack_tuple_hash, hnnode);
ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode);
zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));
bucket = __hash_conntrack(nf_ct_net(ct),
&h->tuple, zone_id, hashsize);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
old_hash = nf_conntrack_hash;
nf_conntrack_hash = hash;
nf_conntrack_htable_size = hashsize;
write_seqcount_end(&nf_conntrack_generation);
nf_conntrack_all_unlock();
local_bh_enable();
mutex_unlock(&nf_conntrack_mutex);
synchronize_net();
kvfree(old_hash);
return 0;
}
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
{
unsigned int hashsize;
int rc;
if (current->nsproxy->net_ns != &init_net)
return -EOPNOTSUPP;
/* On boot, we can set this without any fancy locking. */
if (!nf_conntrack_hash)
return param_set_uint(val, kp);
rc = kstrtouint(val, 0, &hashsize);
if (rc)
return rc;
return nf_conntrack_hash_resize(hashsize);
}
int nf_conntrack_init_start(void)
{
unsigned long nr_pages = totalram_pages();
int max_factor = 8;
int ret = -ENOMEM;
int i;
seqcount_spinlock_init(&nf_conntrack_generation,
&nf_conntrack_locks_all_lock);
for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_conntrack_locks[i]);
if (!nf_conntrack_htable_size) {
nf_conntrack_htable_size
= (((nr_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
if (BITS_PER_LONG >= 64 &&
nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
nf_conntrack_htable_size = 262144;
else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 65536;
if (nf_conntrack_htable_size < 1024)
nf_conntrack_htable_size = 1024;
/* Use a max. factor of one by default to keep the average
* hash chain length at 2 entries. Each entry has to be added
* twice (once for original direction, once for reply).
* When a table size is given we use the old value of 8 to
* avoid implicit reduction of the max entries setting.
*/
max_factor = 1;
}
nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
if (!nf_conntrack_hash)
return -ENOMEM;
nf_conntrack_max = max_factor * nf_conntrack_htable_size;
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn),
NFCT_INFOMASK + 1,
SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
if (!nf_conntrack_cachep)
goto err_cachep;
ret = nf_conntrack_expect_init();
if (ret < 0)
goto err_expect;
ret = nf_conntrack_helper_init();
if (ret < 0)
goto err_helper;
ret = nf_conntrack_proto_init();
if (ret < 0)
goto err_proto;
conntrack_gc_work_init(&conntrack_gc_work);
queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
ret = register_nf_conntrack_bpf();
if (ret < 0)
goto err_kfunc;
return 0;
err_kfunc:
cancel_delayed_work_sync(&conntrack_gc_work.dwork);
nf_conntrack_proto_fini();
err_proto:
nf_conntrack_helper_fini();
err_helper:
nf_conntrack_expect_fini();
err_expect:
kmem_cache_destroy(nf_conntrack_cachep);
err_cachep:
kvfree(nf_conntrack_hash);
return ret;
}
static void nf_conntrack_set_closing(struct nf_conntrack *nfct)
{
struct nf_conn *ct = nf_ct_to_nf_conn(nfct);
switch (nf_ct_protonum(ct)) {
case IPPROTO_TCP:
nf_conntrack_tcp_set_closing(ct);
break;
}
}
static const struct nf_ct_hook nf_conntrack_hook = {
.update = nf_conntrack_update,
.destroy = nf_ct_destroy,
.get_tuple_skb = nf_conntrack_get_tuple_skb,
.attach = nf_conntrack_attach,
.set_closing = nf_conntrack_set_closing,
.confirm = __nf_conntrack_confirm,
.get_id = nf_conntrack_get_id,
};
void nf_conntrack_init_end(void)
{
RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
}
/*
* We need to use special "null" values, not used in hash table
*/
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
int nf_conntrack_init_net(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
int ret = -ENOMEM;
BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
atomic_set(&cnet->count, 0);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat); if (!net->ct.stat)
return ret;
ret = nf_conntrack_expect_pernet_init(net);
if (ret < 0)
goto err_expect;
nf_conntrack_acct_pernet_init(net);
nf_conntrack_tstamp_pernet_init(net);
nf_conntrack_ecache_pernet_init(net);
nf_conntrack_proto_pernet_init(net);
return 0;
err_expect:
free_percpu(net->ct.stat);
return ret;
}
/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
int __nf_ct_change_timeout(struct nf_conn *ct, u64 timeout)
{
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
return -EPERM;
__nf_ct_set_timeout(ct, timeout);
if (test_bit(IPS_DYING_BIT, &ct->status))
return -ETIME;
return 0;
}
EXPORT_SYMBOL_GPL(__nf_ct_change_timeout);
void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off)
{
unsigned int bit;
/* Ignore these unchangable bits */
on &= ~IPS_UNCHANGEABLE_MASK;
off &= ~IPS_UNCHANGEABLE_MASK;
for (bit = 0; bit < __IPS_MAX_BIT; bit++) {
if (on & (1 << bit))
set_bit(bit, &ct->status);
else if (off & (1 << bit))
clear_bit(bit, &ct->status);
}
}
EXPORT_SYMBOL_GPL(__nf_ct_change_status);
int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status)
{
unsigned long d;
d = ct->status ^ status;
if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
/* unchangeable */
return -EBUSY;
if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
/* SEEN_REPLY bit can only be set */
return -EBUSY;
if (d & IPS_ASSURED && !(status & IPS_ASSURED))
/* ASSURED bit can only be set */
return -EBUSY;
__nf_ct_change_status(ct, status, 0);
return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_change_status_common);
/*
* DRBG: Deterministic Random Bits Generator
* Based on NIST Recommended DRBG from NIST SP800-90A with the following
* properties:
* * CTR DRBG with DF with AES-128, AES-192, AES-256 cores
* * Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
* * HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
* * with and without prediction resistance
*
* Copyright Stephan Mueller <smueller@chronox.de>, 2014
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* DRBG Usage
* ==========
* The SP 800-90A DRBG allows the user to specify a personalization string
* for initialization as well as an additional information string for each
* random number request. The following code fragments show how a caller
* uses the kernel crypto API to use the full functionality of the DRBG.
*
* Usage without any additional data
* ---------------------------------
* struct crypto_rng *drng;
* int err;
* char data[DATALEN];
*
* drng = crypto_alloc_rng(drng_name, 0, 0);
* err = crypto_rng_get_bytes(drng, &data, DATALEN);
* crypto_free_rng(drng);
*
*
* Usage with personalization string during initialization
* -------------------------------------------------------
* struct crypto_rng *drng;
* int err;
* char data[DATALEN];
* struct drbg_string pers;
* char personalization[11] = "some-string";
*
* drbg_string_fill(&pers, personalization, strlen(personalization));
* drng = crypto_alloc_rng(drng_name, 0, 0);
* // The reset completely re-initializes the DRBG with the provided
* // personalization string
* err = crypto_rng_reset(drng, &personalization, strlen(personalization));
* err = crypto_rng_get_bytes(drng, &data, DATALEN);
* crypto_free_rng(drng);
*
*
* Usage with additional information string during random number request
* ---------------------------------------------------------------------
* struct crypto_rng *drng;
* int err;
* char data[DATALEN];
* char addtl_string[11] = "some-string";
* string drbg_string addtl;
*
* drbg_string_fill(&addtl, addtl_string, strlen(addtl_string));
* drng = crypto_alloc_rng(drng_name, 0, 0);
* // The following call is a wrapper to crypto_rng_get_bytes() and returns
* // the same error codes.
* err = crypto_drbg_get_bytes_addtl(drng, &data, DATALEN, &addtl);
* crypto_free_rng(drng);
*
*
* Usage with personalization and additional information strings
* -------------------------------------------------------------
* Just mix both scenarios above.
*/
#include <crypto/drbg.h>
#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string_choices.h>
/***************************************************************
* Backend cipher definitions available to DRBG
***************************************************************/
/*
* The order of the DRBG definitions here matter: every DRBG is registered
* as stdrng. Each DRBG receives an increasing cra_priority values the later
* they are defined in this array (see drbg_fill_array).
*
* HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and the
* HMAC-SHA512 / SHA256 / AES 256 over other ciphers. Thus, the
* favored DRBGs are the latest entries in this array.
*/
static const struct drbg_core drbg_cores[] = {
#ifdef CONFIG_CRYPTO_DRBG_CTR
{
.flags = DRBG_CTR | DRBG_STRENGTH128,
.statelen = 32, /* 256 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes128",
.backend_cra_name = "aes",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH192,
.statelen = 40, /* 320 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes192",
.backend_cra_name = "aes",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH256,
.statelen = 48, /* 384 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes256",
.backend_cra_name = "aes",
},
#endif /* CONFIG_CRYPTO_DRBG_CTR */
#ifdef CONFIG_CRYPTO_DRBG_HASH
{
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 111, /* 888 bits */
.blocklen_bytes = 48,
.cra_name = "sha384",
.backend_cra_name = "sha384",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 111, /* 888 bits */
.blocklen_bytes = 64,
.cra_name = "sha512",
.backend_cra_name = "sha512",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 55, /* 440 bits */
.blocklen_bytes = 32,
.cra_name = "sha256",
.backend_cra_name = "sha256",
},
#endif /* CONFIG_CRYPTO_DRBG_HASH */
#ifdef CONFIG_CRYPTO_DRBG_HMAC
{
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 48, /* block length of cipher */
.blocklen_bytes = 48,
.cra_name = "hmac_sha384",
.backend_cra_name = "hmac(sha384)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 32, /* block length of cipher */
.blocklen_bytes = 32,
.cra_name = "hmac_sha256",
.backend_cra_name = "hmac(sha256)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 64, /* block length of cipher */
.blocklen_bytes = 64,
.cra_name = "hmac_sha512",
.backend_cra_name = "hmac(sha512)",
},
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
};
static int drbg_uninstantiate(struct drbg_state *drbg);
/******************************************************************
* Generic helper functions
******************************************************************/
/*
* Return strength of DRBG according to SP800-90A section 8.4
*
* @flags DRBG flags reference
*
* Return: normalized strength in *bytes* value or 32 as default
* to counter programming errors
*/
static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
{
switch (flags & DRBG_STRENGTH_MASK) {
case DRBG_STRENGTH128:
return 16;
case DRBG_STRENGTH192:
return 24;
case DRBG_STRENGTH256:
return 32;
default:
return 32;
}
}
/*
* FIPS 140-2 continuous self test for the noise source
* The test is performed on the noise source input data. Thus, the function
* implicitly knows the size of the buffer to be equal to the security
* strength.
*
* Note, this function disregards the nonce trailing the entropy data during
* initial seeding.
*
* drbg->drbg_mutex must have been taken.
*
* @drbg DRBG handle
* @entropy buffer of seed data to be checked
*
* return:
* 0 on success
* -EAGAIN on when the CTRNG is not yet primed
* < 0 on error
*/
static int drbg_fips_continuous_test(struct drbg_state *drbg,
const unsigned char *entropy)
{
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
int ret = 0;
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS))
return 0;
/* skip test if we test the overall system */
if (list_empty(&drbg->test_data.list))
return 0;
/* only perform test in FIPS mode */
if (!fips_enabled)
return 0;
if (!drbg->fips_primed) {
/* Priming of FIPS test */
memcpy(drbg->prev, entropy, entropylen);
drbg->fips_primed = true;
/* priming: another round is needed */
return -EAGAIN;
}
ret = memcmp(drbg->prev, entropy, entropylen);
if (!ret)
panic("DRBG continuous self test failed\n");
memcpy(drbg->prev, entropy, entropylen);
/* the test shall pass when the two values are not equal */
return 0;
}
/*
* Convert an integer into a byte representation of this integer.
* The byte representation is big-endian
*
* @val value to be converted
* @buf buffer holding the converted integer -- caller must ensure that
* buffer size is at least 32 bit
*/
#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
{
struct s {
__be32 conv;
};
struct s *conversion = (struct s *) buf;
conversion->conv = cpu_to_be32(val);
}
#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
/******************************************************************
* CTR DRBG callback functions
******************************************************************/
#ifdef CONFIG_CRYPTO_DRBG_CTR
#define CRYPTO_DRBG_CTR_STRING "CTR "
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes256");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes256");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
const unsigned char *key);
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg);
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
#define DRBG_OUTSCRATCHLEN 256
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
unsigned char *out, const unsigned char *key,
struct list_head *in)
{
int ret = 0;
struct drbg_string *curr = NULL;
struct drbg_string data;
short cnt = 0;
drbg_string_fill(&data, out, drbg_blocklen(drbg));
/* 10.4.3 step 2 / 4 */
drbg_kcapi_symsetkey(drbg, key);
list_for_each_entry(curr, in, list) {
const unsigned char *pos = curr->buf;
size_t len = curr->len;
/* 10.4.3 step 4.1 */
while (len) {
/* 10.4.3 step 4.2 */
if (drbg_blocklen(drbg) == cnt) {
cnt = 0;
ret = drbg_kcapi_sym(drbg, out, &data);
if (ret)
return ret;
}
out[cnt] ^= *pos;
pos++;
cnt++;
len--;
}
}
/* 10.4.3 step 4.2 for last block */
if (cnt)
ret = drbg_kcapi_sym(drbg, out, &data);
return ret;
}
/*
* scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
* (and drbg_ctr_bcc, but this function does not need any temporary buffers),
* the scratchpad is used as follows:
* drbg_ctr_update:
* temp
* start: drbg->scratchpad
* length: drbg_statelen(drbg) + drbg_blocklen(drbg)
* note: the cipher writing into this variable works
* blocklen-wise. Now, when the statelen is not a multiple
* of blocklen, the generateion loop below "spills over"
* by at most blocklen. Thus, we need to give sufficient
* memory.
* df_data
* start: drbg->scratchpad +
* drbg_statelen(drbg) + drbg_blocklen(drbg)
* length: drbg_statelen(drbg)
*
* drbg_ctr_df:
* pad
* start: df_data + drbg_statelen(drbg)
* length: drbg_blocklen(drbg)
* iv
* start: pad + drbg_blocklen(drbg)
* length: drbg_blocklen(drbg)
* temp
* start: iv + drbg_blocklen(drbg)
* length: drbg_satelen(drbg) + drbg_blocklen(drbg)
* note: temp is the buffer that the BCC function operates
* on. BCC operates blockwise. drbg_statelen(drbg)
* is sufficient when the DRBG state length is a multiple
* of the block size. For AES192 (and maybe other ciphers)
* this is not correct and the length for temp is
* insufficient (yes, that also means for such ciphers,
* the final output of all BCC rounds are truncated).
* Therefore, add drbg_blocklen(drbg) to cover all
* possibilities.
*/
/* Derivation Function for CTR DRBG as defined in 10.4.2 */
static int drbg_ctr_df(struct drbg_state *drbg,
unsigned char *df_data, size_t bytes_to_return,
struct list_head *seedlist)
{
int ret = -EFAULT;
unsigned char L_N[8];
/* S3 is input */
struct drbg_string S1, S2, S4, cipherin;
LIST_HEAD(bcc_list);
unsigned char *pad = df_data + drbg_statelen(drbg);
unsigned char *iv = pad + drbg_blocklen(drbg);
unsigned char *temp = iv + drbg_blocklen(drbg);
size_t padlen = 0;
unsigned int templen = 0;
/* 10.4.2 step 7 */
unsigned int i = 0;
/* 10.4.2 step 8 */
const unsigned char *K = (unsigned char *)
"\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
unsigned char *X;
size_t generated_len = 0;
size_t inputlen = 0;
struct drbg_string *seed = NULL;
memset(pad, 0, drbg_blocklen(drbg));
memset(iv, 0, drbg_blocklen(drbg));
/* 10.4.2 step 1 is implicit as we work byte-wise */
/* 10.4.2 step 2 */
if ((512/8) < bytes_to_return)
return -EINVAL;
/* 10.4.2 step 2 -- calculate the entire length of all input data */
list_for_each_entry(seed, seedlist, list)
inputlen += seed->len;
drbg_cpu_to_be32(inputlen, &L_N[0]);
/* 10.4.2 step 3 */
drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
/* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
/* wrap the padlen appropriately */
if (padlen)
padlen = drbg_blocklen(drbg) - padlen;
/*
* pad / padlen contains the 0x80 byte and the following zero bytes.
* As the calculated padlen value only covers the number of zero
* bytes, this value has to be incremented by one for the 0x80 byte.
*/
padlen++;
pad[0] = 0x80;
/* 10.4.2 step 4 -- first fill the linked list and then order it */
drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
list_add_tail(&S1.list, &bcc_list);
drbg_string_fill(&S2, L_N, sizeof(L_N));
list_add_tail(&S2.list, &bcc_list);
list_splice_tail(seedlist, &bcc_list);
drbg_string_fill(&S4, pad, padlen);
list_add_tail(&S4.list, &bcc_list);
/* 10.4.2 step 9 */
while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
/*
* 10.4.2 step 9.1 - the padding is implicit as the buffer
* holds zeros after allocation -- even the increment of i
* is irrelevant as the increment remains within length of i
*/
drbg_cpu_to_be32(i, iv);
/* 10.4.2 step 9.2 -- BCC and concatenation with temp */
ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
if (ret)
goto out;
/* 10.4.2 step 9.3 */
i++;
templen += drbg_blocklen(drbg);
}
/* 10.4.2 step 11 */
X = temp + (drbg_keylen(drbg));
drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
/* 10.4.2 step 12: overwriting of outval is implemented in next step */
/* 10.4.2 step 13 */
drbg_kcapi_symsetkey(drbg, temp);
while (generated_len < bytes_to_return) {
short blocklen = 0;
/*
* 10.4.2 step 13.1: the truncation of the key length is
* implicit as the key is only drbg_blocklen in size based on
* the implementation of the cipher function callback
*/
ret = drbg_kcapi_sym(drbg, X, &cipherin);
if (ret)
goto out;
blocklen = (drbg_blocklen(drbg) <
(bytes_to_return - generated_len)) ?
drbg_blocklen(drbg) :
(bytes_to_return - generated_len);
/* 10.4.2 step 13.2 and 14 */
memcpy(df_data + generated_len, X, blocklen);
generated_len += blocklen;
}
ret = 0;
out:
memset(iv, 0, drbg_blocklen(drbg));
memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
memset(pad, 0, drbg_blocklen(drbg));
return ret;
}
/*
* update function of CTR DRBG as defined in 10.2.1.2
*
* The reseed variable has an enhanced meaning compared to the update
* functions of the other DRBGs as follows:
* 0 => initial seed from initialization
* 1 => reseed via drbg_seed
* 2 => first invocation from drbg_ctr_update when addtl is present. In
* this case, the df_data scratchpad is not deleted so that it is
* available for another calls to prevent calling the DF function
* again.
* 3 => second invocation from drbg_ctr_update. When the update function
* was called with addtl, the df_data memory already contains the
* DFed addtl information and we do not need to call DF again.
*/
static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
int reseed)
{
int ret = -EFAULT;
/* 10.2.1.2 step 1 */
unsigned char *temp = drbg->scratchpad;
unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
drbg_blocklen(drbg);
if (3 > reseed)
memset(df_data, 0, drbg_statelen(drbg));
if (!reseed) {
/*
* The DRBG uses the CTR mode of the underlying AES cipher. The
* CTR mode increments the counter value after the AES operation
* but SP800-90A requires that the counter is incremented before
* the AES operation. Hence, we increment it at the time we set
* it by one.
*/
crypto_inc(drbg->V, drbg_blocklen(drbg));
ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
drbg_keylen(drbg));
if (ret)
goto out;
}
/* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
if (seed) {
ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
if (ret)
goto out;
}
ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg),
temp, drbg_statelen(drbg));
if (ret)
return ret;
/* 10.2.1.2 step 5 */
ret = crypto_skcipher_setkey(drbg->ctr_handle, temp,
drbg_keylen(drbg));
if (ret)
goto out;
/* 10.2.1.2 step 6 */
memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
/* See above: increment counter by one to compensate timing of CTR op */
crypto_inc(drbg->V, drbg_blocklen(drbg));
ret = 0;
out:
memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
if (2 != reseed)
memset(df_data, 0, drbg_statelen(drbg));
return ret;
}
/*
* scratchpad use: drbg_ctr_update is called independently from
* drbg_ctr_extract_bytes. Therefore, the scratchpad is reused
*/
/* Generate function of CTR DRBG as defined in 10.2.1.5.2 */
static int drbg_ctr_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct list_head *addtl)
{
int ret;
int len = min_t(int, buflen, INT_MAX);
/* 10.2.1.5.2 step 2 */
if (addtl && !list_empty(addtl)) {
ret = drbg_ctr_update(drbg, addtl, 2);
if (ret)
return 0;
}
/* 10.2.1.5.2 step 4.1 */
ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len);
if (ret)
return ret;
/* 10.2.1.5.2 step 6 */
ret = drbg_ctr_update(drbg, NULL, 3);
if (ret)
len = ret;
return len;
}
static const struct drbg_state_ops drbg_ctr_ops = {
.update = drbg_ctr_update,
.generate = drbg_ctr_generate,
.crypto_init = drbg_init_sym_kernel,
.crypto_fini = drbg_fini_sym_kernel,
};
#endif /* CONFIG_CRYPTO_DRBG_CTR */
/******************************************************************
* HMAC DRBG callback functions
******************************************************************/
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in);
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key);
static int drbg_init_hash_kernel(struct drbg_state *drbg);
static int drbg_fini_hash_kernel(struct drbg_state *drbg);
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
#ifdef CONFIG_CRYPTO_DRBG_HMAC
#define CRYPTO_DRBG_HMAC_STRING "HMAC "
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha512");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha512");
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384");
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256");
/* update function of HMAC DRBG as defined in 10.1.2.2 */
static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
int reseed)
{
int ret = -EFAULT;
int i = 0;
struct drbg_string seed1, seed2, vdata;
LIST_HEAD(seedlist);
LIST_HEAD(vdatalist);
if (!reseed) {
/* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */
memset(drbg->V, 1, drbg_statelen(drbg));
drbg_kcapi_hmacsetkey(drbg, drbg->C);
}
drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
list_add_tail(&seed1.list, &seedlist);
/* buffer of seed2 will be filled in for loop below with one byte */
drbg_string_fill(&seed2, NULL, 1);
list_add_tail(&seed2.list, &seedlist);
/* input data of seed is allowed to be NULL at this point */
if (seed)
list_splice_tail(seed, &seedlist); drbg_string_fill(&vdata, drbg->V, drbg_statelen(drbg));
list_add_tail(&vdata.list, &vdatalist);
for (i = 2; 0 < i; i--) {
/* first round uses 0x0, second 0x1 */
unsigned char prefix = DRBG_PREFIX0;
if (1 == i)
prefix = DRBG_PREFIX1;
/* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
seed2.buf = &prefix; ret = drbg_kcapi_hash(drbg, drbg->C, &seedlist); if (ret) return ret;
drbg_kcapi_hmacsetkey(drbg, drbg->C);
/* 10.1.2.2 step 2 and 5 -- HMAC for V */
ret = drbg_kcapi_hash(drbg, drbg->V, &vdatalist);
if (ret)
return ret;
/* 10.1.2.2 step 3 */
if (!seed)
return ret;
}
return 0;
}
/* generate function of HMAC DRBG as defined in 10.1.2.5 */
static int drbg_hmac_generate(struct drbg_state *drbg,
unsigned char *buf,
unsigned int buflen,
struct list_head *addtl)
{
int len = 0;
int ret = 0;
struct drbg_string data;
LIST_HEAD(datalist);
/* 10.1.2.5 step 2 */
if (addtl && !list_empty(addtl)) {
ret = drbg_hmac_update(drbg, addtl, 1);
if (ret)
return ret;
}
drbg_string_fill(&data, drbg->V, drbg_statelen(drbg)); list_add_tail(&data.list, &datalist); while (len < buflen) {
unsigned int outlen = 0;
/* 10.1.2.5 step 4.1 */
ret = drbg_kcapi_hash(drbg, drbg->V, &datalist);
if (ret)
return ret; outlen = (drbg_blocklen(drbg) < (buflen - len)) ? drbg_blocklen(drbg) : (buflen - len);
/* 10.1.2.5 step 4.2 */
memcpy(buf + len, drbg->V, outlen);
len += outlen;
}
/* 10.1.2.5 step 6 */
if (addtl && !list_empty(addtl))
ret = drbg_hmac_update(drbg, addtl, 1);
else
ret = drbg_hmac_update(drbg, NULL, 1); if (ret)
return ret;
return len;
}
static const struct drbg_state_ops drbg_hmac_ops = {
.update = drbg_hmac_update,
.generate = drbg_hmac_generate,
.crypto_init = drbg_init_hash_kernel,
.crypto_fini = drbg_fini_hash_kernel,
};
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
/******************************************************************
* Hash DRBG callback functions
******************************************************************/
#ifdef CONFIG_CRYPTO_DRBG_HASH
#define CRYPTO_DRBG_HASH_STRING "HASH "
MODULE_ALIAS_CRYPTO("drbg_pr_sha512");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha512");
MODULE_ALIAS_CRYPTO("drbg_pr_sha384");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha384");
MODULE_ALIAS_CRYPTO("drbg_pr_sha256");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha256");
/*
* Increment buffer
*
* @dst buffer to increment
* @add value to add
*/
static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
const unsigned char *add, size_t addlen)
{
/* implied: dstlen > addlen */
unsigned char *dstptr;
const unsigned char *addptr;
unsigned int remainder = 0;
size_t len = addlen;
dstptr = dst + (dstlen-1);
addptr = add + (addlen-1);
while (len) {
remainder += *dstptr + *addptr;
*dstptr = remainder & 0xff;
remainder >>= 8;
len--; dstptr--; addptr--;
}
len = dstlen - addlen;
while (len && remainder > 0) {
remainder = *dstptr + 1;
*dstptr = remainder & 0xff;
remainder >>= 8;
len--; dstptr--;
}
}
/*
* scratchpad usage: as drbg_hash_update and drbg_hash_df are used
* interlinked, the scratchpad is used as follows:
* drbg_hash_update
* start: drbg->scratchpad
* length: drbg_statelen(drbg)
* drbg_hash_df:
* start: drbg->scratchpad + drbg_statelen(drbg)
* length: drbg_blocklen(drbg)
*
* drbg_hash_process_addtl uses the scratchpad, but fully completes
* before either of the functions mentioned before are invoked. Therefore,
* drbg_hash_process_addtl does not need to be specifically considered.
*/
/* Derivation Function for Hash DRBG as defined in 10.4.1 */
static int drbg_hash_df(struct drbg_state *drbg,
unsigned char *outval, size_t outlen,
struct list_head *entropylist)
{
int ret = 0;
size_t len = 0;
unsigned char input[5];
unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg);
struct drbg_string data;
/* 10.4.1 step 3 */
input[0] = 1;
drbg_cpu_to_be32((outlen * 8), &input[1]);
/* 10.4.1 step 4.1 -- concatenation of data for input into hash */
drbg_string_fill(&data, input, 5);
list_add(&data.list, entropylist);
/* 10.4.1 step 4 */
while (len < outlen) {
short blocklen = 0;
/* 10.4.1 step 4.1 */
ret = drbg_kcapi_hash(drbg, tmp, entropylist);
if (ret)
goto out;
/* 10.4.1 step 4.2 */
input[0]++;
blocklen = (drbg_blocklen(drbg) < (outlen - len)) ?
drbg_blocklen(drbg) : (outlen - len);
memcpy(outval + len, tmp, blocklen);
len += blocklen;
}
out:
memset(tmp, 0, drbg_blocklen(drbg));
return ret;
}
/* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */
static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
int reseed)
{
int ret = 0;
struct drbg_string data1, data2;
LIST_HEAD(datalist);
LIST_HEAD(datalist2);
unsigned char *V = drbg->scratchpad;
unsigned char prefix = DRBG_PREFIX1;
if (!seed)
return -EINVAL;
if (reseed) {
/* 10.1.1.3 step 1 */
memcpy(V, drbg->V, drbg_statelen(drbg));
drbg_string_fill(&data1, &prefix, 1);
list_add_tail(&data1.list, &datalist);
drbg_string_fill(&data2, V, drbg_statelen(drbg));
list_add_tail(&data2.list, &datalist);
}
list_splice_tail(seed, &datalist);
/* 10.1.1.2 / 10.1.1.3 step 2 and 3 */
ret = drbg_hash_df(drbg, drbg->V, drbg_statelen(drbg), &datalist);
if (ret)
goto out;
/* 10.1.1.2 / 10.1.1.3 step 4 */
prefix = DRBG_PREFIX0;
drbg_string_fill(&data1, &prefix, 1);
list_add_tail(&data1.list, &datalist2);
drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
list_add_tail(&data2.list, &datalist2);
/* 10.1.1.2 / 10.1.1.3 step 4 */
ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
out:
memset(drbg->scratchpad, 0, drbg_statelen(drbg));
return ret;
}
/* processing of additional information string for Hash DRBG */
static int drbg_hash_process_addtl(struct drbg_state *drbg,
struct list_head *addtl)
{
int ret = 0;
struct drbg_string data1, data2;
LIST_HEAD(datalist);
unsigned char prefix = DRBG_PREFIX2;
/* 10.1.1.4 step 2 */
if (!addtl || list_empty(addtl))
return 0;
/* 10.1.1.4 step 2a */
drbg_string_fill(&data1, &prefix, 1);
drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
list_add_tail(&data1.list, &datalist);
list_add_tail(&data2.list, &datalist);
list_splice_tail(addtl, &datalist);
ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
if (ret)
goto out;
/* 10.1.1.4 step 2b */
drbg_add_buf(drbg->V, drbg_statelen(drbg),
drbg->scratchpad, drbg_blocklen(drbg));
out:
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return ret;
}
/* Hashgen defined in 10.1.1.4 */
static int drbg_hash_hashgen(struct drbg_state *drbg,
unsigned char *buf,
unsigned int buflen)
{
int len = 0;
int ret = 0;
unsigned char *src = drbg->scratchpad;
unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
struct drbg_string data;
LIST_HEAD(datalist);
/* 10.1.1.4 step hashgen 2 */
memcpy(src, drbg->V, drbg_statelen(drbg));
drbg_string_fill(&data, src, drbg_statelen(drbg));
list_add_tail(&data.list, &datalist);
while (len < buflen) {
unsigned int outlen = 0;
/* 10.1.1.4 step hashgen 4.1 */
ret = drbg_kcapi_hash(drbg, dst, &datalist);
if (ret) {
len = ret;
goto out;
}
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
/* 10.1.1.4 step hashgen 4.2 */
memcpy(buf + len, dst, outlen);
len += outlen;
/* 10.1.1.4 hashgen step 4.3 */
if (len < buflen)
crypto_inc(src, drbg_statelen(drbg));
}
out:
memset(drbg->scratchpad, 0,
(drbg_statelen(drbg) + drbg_blocklen(drbg)));
return len;
}
/* generate function for Hash DRBG as defined in 10.1.1.4 */
static int drbg_hash_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct list_head *addtl)
{
int len = 0;
int ret = 0;
union {
unsigned char req[8];
__be64 req_int;
} u;
unsigned char prefix = DRBG_PREFIX3;
struct drbg_string data1, data2;
LIST_HEAD(datalist);
/* 10.1.1.4 step 2 */
ret = drbg_hash_process_addtl(drbg, addtl);
if (ret)
return ret;
/* 10.1.1.4 step 3 */
len = drbg_hash_hashgen(drbg, buf, buflen);
/* this is the value H as documented in 10.1.1.4 */
/* 10.1.1.4 step 4 */
drbg_string_fill(&data1, &prefix, 1);
list_add_tail(&data1.list, &datalist);
drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
list_add_tail(&data2.list, &datalist);
ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
if (ret) {
len = ret;
goto out;
}
/* 10.1.1.4 step 5 */
drbg_add_buf(drbg->V, drbg_statelen(drbg),
drbg->scratchpad, drbg_blocklen(drbg));
drbg_add_buf(drbg->V, drbg_statelen(drbg),
drbg->C, drbg_statelen(drbg));
u.req_int = cpu_to_be64(drbg->reseed_ctr);
drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
out:
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len;
}
/*
* scratchpad usage: as update and generate are used isolated, both
* can use the scratchpad
*/
static const struct drbg_state_ops drbg_hash_ops = {
.update = drbg_hash_update,
.generate = drbg_hash_generate,
.crypto_init = drbg_init_hash_kernel,
.crypto_fini = drbg_fini_hash_kernel,
};
#endif /* CONFIG_CRYPTO_DRBG_HASH */
/******************************************************************
* Functions common for DRBG implementations
******************************************************************/
static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
int reseed, enum drbg_seed_state new_seed_state)
{
int ret = drbg->d_ops->update(drbg, seed, reseed);
if (ret)
return ret;
drbg->seeded = new_seed_state;
drbg->last_seed_time = jiffies;
/* 10.1.1.2 / 10.1.1.3 step 5 */
drbg->reseed_ctr = 1;
switch (drbg->seeded) {
case DRBG_SEED_STATE_UNSEEDED:
/* Impossible, but handle it to silence compiler warnings. */
fallthrough;
case DRBG_SEED_STATE_PARTIAL:
/*
* Require frequent reseeds until the seed source is
* fully initialized.
*/
drbg->reseed_threshold = 50;
break;
case DRBG_SEED_STATE_FULL:
/*
* Seed source has become fully initialized, frequent
* reseeds no longer required.
*/
drbg->reseed_threshold = drbg_max_requests(drbg);
break;
}
return ret;
}
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
unsigned char *entropy,
unsigned int entropylen)
{
int ret;
do {
get_random_bytes(entropy, entropylen);
ret = drbg_fips_continuous_test(drbg, entropy);
if (ret && ret != -EAGAIN)
return ret;
} while (ret);
return 0;
}
static int drbg_seed_from_random(struct drbg_state *drbg)
{
struct drbg_string data;
LIST_HEAD(seedlist); unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
unsigned char entropy[32];
int ret;
BUG_ON(!entropylen);
BUG_ON(entropylen > sizeof(entropy));
drbg_string_fill(&data, entropy, entropylen);
list_add_tail(&data.list, &seedlist);
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
out:
memzero_explicit(entropy, entropylen);
return ret;
}
static bool drbg_nopr_reseed_interval_elapsed(struct drbg_state *drbg)
{
unsigned long next_reseed;
/* Don't ever reseed from get_random_bytes() in test mode. */
if (list_empty(&drbg->test_data.list))
return false;
/*
* Obtain fresh entropy for the nopr DRBGs after 300s have
* elapsed in order to still achieve sort of partial
* prediction resistance over the time domain at least. Note
* that the period of 300s has been chosen to match the
* CRNG_RESEED_INTERVAL of the get_random_bytes()' chacha
* rngs.
*/
next_reseed = drbg->last_seed_time + 300 * HZ;
return time_after(jiffies, next_reseed);
}
/*
* Seeding or reseeding of the DRBG
*
* @drbg: DRBG state struct
* @pers: personalization / additional information buffer
* @reseed: 0 for initial seed process, 1 for reseeding
*
* return:
* 0 on success
* error value otherwise
*/
static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
bool reseed)
{
int ret;
unsigned char entropy[((32 + 16) * 2)];
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
struct drbg_string data1;
LIST_HEAD(seedlist);
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
/* 9.1 / 9.2 / 9.3.1 step 3 */
if (pers && pers->len > (drbg_max_addtl(drbg))) {
pr_devel("DRBG: personalization string too long %zu\n",
pers->len);
return -EINVAL;
}
if (list_empty(&drbg->test_data.list)) {
drbg_string_fill(&data1, drbg->test_data.buf,
drbg->test_data.len);
pr_devel("DRBG: using test entropy\n");
} else {
/*
* Gather entropy equal to the security strength of the DRBG.
* With a derivation function, a nonce is required in addition
* to the entropy. A nonce must be at least 1/2 of the security
* strength of the DRBG in size. Thus, entropy + nonce is 3/2
* of the strength. The consideration of a nonce is only
* applicable during initial seeding.
*/
BUG_ON(!entropylen);
if (!reseed)
entropylen = ((entropylen + 1) / 2) * 3;
BUG_ON((entropylen * 2) > sizeof(entropy));
/* Get seed from in-kernel /dev/urandom */
if (!rng_is_initialized())
new_seed_state = DRBG_SEED_STATE_PARTIAL; ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
if (!drbg->jent) { drbg_string_fill(&data1, entropy, entropylen); pr_devel("DRBG: (re)seeding with %u bytes of entropy\n",
entropylen);
} else {
/*
* Get seed from Jitter RNG, failures are
* fatal only in FIPS mode.
*/
ret = crypto_rng_get_bytes(drbg->jent,
entropy + entropylen,
entropylen);
if (fips_enabled && ret) {
pr_devel("DRBG: jent failed with %d\n", ret);
/*
* Do not treat the transient failure of the
* Jitter RNG as an error that needs to be
* reported. The combined number of the
* maximum reseed threshold times the maximum
* number of Jitter RNG transient errors is
* less than the reseed threshold required by
* SP800-90A allowing us to treat the
* transient errors as such.
*
* However, we mandate that at least the first
* seeding operation must succeed with the
* Jitter RNG.
*/
if (!reseed || ret != -EAGAIN)
goto out;
}
drbg_string_fill(&data1, entropy, entropylen * 2);
pr_devel("DRBG: (re)seeding with %u bytes of entropy\n",
entropylen * 2);
}
}
list_add_tail(&data1.list, &seedlist);
/*
* concatenation of entropy with personalization str / addtl input)
* the variable pers is directly handed in by the caller, so check its
* contents whether it is appropriate
*/
if (pers && pers->buf && 0 < pers->len) {
list_add_tail(&pers->list, &seedlist);
pr_devel("DRBG: using personalization string\n");
}
if (!reseed) { memset(drbg->V, 0, drbg_statelen(drbg)); memset(drbg->C, 0, drbg_statelen(drbg));
}
ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
out:
memzero_explicit(entropy, entropylen * 2);
return ret;}
/* Free all substructures in a DRBG state without the DRBG state structure */
static inline void drbg_dealloc_state(struct drbg_state *drbg)
{
if (!drbg)
return;
kfree_sensitive(drbg->Vbuf);
drbg->Vbuf = NULL;
drbg->V = NULL;
kfree_sensitive(drbg->Cbuf);
drbg->Cbuf = NULL;
drbg->C = NULL;
kfree_sensitive(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
drbg->reseed_ctr = 0;
drbg->d_ops = NULL;
drbg->core = NULL;
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
kfree_sensitive(drbg->prev);
drbg->prev = NULL;
drbg->fips_primed = false;
}
}
/*
* Allocate all sub-structures for a DRBG state.
* The DRBG state structure must already be allocated.
*/
static inline int drbg_alloc_state(struct drbg_state *drbg)
{
int ret = -ENOMEM;
unsigned int sb_size = 0;
switch (drbg->core->flags & DRBG_TYPE_MASK) {
#ifdef CONFIG_CRYPTO_DRBG_HMAC
case DRBG_HMAC:
drbg->d_ops = &drbg_hmac_ops;
break;
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
#ifdef CONFIG_CRYPTO_DRBG_HASH
case DRBG_HASH:
drbg->d_ops = &drbg_hash_ops;
break;
#endif /* CONFIG_CRYPTO_DRBG_HASH */
#ifdef CONFIG_CRYPTO_DRBG_CTR
case DRBG_CTR:
drbg->d_ops = &drbg_ctr_ops;
break;
#endif /* CONFIG_CRYPTO_DRBG_CTR */
default:
ret = -EOPNOTSUPP;
goto err;
}
ret = drbg->d_ops->crypto_init(drbg);
if (ret < 0)
goto err; drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
if (!drbg->Vbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1); drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); if (!drbg->Cbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
/* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC)
sb_size = 0;
else if (drbg->core->flags & DRBG_CTR) sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
drbg_statelen(drbg) + /* df_data */
drbg_blocklen(drbg) + /* pad */
drbg_blocklen(drbg) + /* iv */
drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
else
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg); if (0 < sb_size) {
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
if (!drbg->scratchpadbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
GFP_KERNEL);
if (!drbg->prev) {
ret = -ENOMEM;
goto fini;
}
drbg->fips_primed = false;
}
return 0;
fini:
drbg->d_ops->crypto_fini(drbg);
err:
drbg_dealloc_state(drbg); return ret;
}
/*************************************************************************
* DRBG interface functions
*************************************************************************/
/*
* DRBG generate function as required by SP800-90A - this function
* generates random numbers
*
* @drbg DRBG state handle
* @buf Buffer where to store the random numbers -- the buffer must already
* be pre-allocated by caller
* @buflen Length of output buffer - this value defines the number of random
* bytes pulled from DRBG
* @addtl Additional input that is mixed into state, may be NULL -- note
* the entropy is pulled by the DRBG internally unconditionally
* as defined in SP800-90A. The additional input is mixed into
* the state in addition to the pulled entropy.
*
* return: 0 when all bytes are generated; < 0 in case of an error
*/
static int drbg_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct drbg_string *addtl)
{
int len = 0;
LIST_HEAD(addtllist);
if (!drbg->core) {
pr_devel("DRBG: not yet seeded\n");
return -EINVAL;
}
if (0 == buflen || !buf) {
pr_devel("DRBG: no output buffer provided\n");
return -EINVAL;
}
if (addtl && NULL == addtl->buf && 0 < addtl->len) {
pr_devel("DRBG: wrong format of additional information\n");
return -EINVAL;
}
/* 9.3.1 step 2 */
len = -EINVAL;
if (buflen > (drbg_max_request_bytes(drbg))) {
pr_devel("DRBG: requested random numbers too large %u\n",
buflen);
goto err;
}
/* 9.3.1 step 3 is implicit with the chosen DRBG */
/* 9.3.1 step 4 */
if (addtl && addtl->len > (drbg_max_addtl(drbg))) {
pr_devel("DRBG: additional information string too long %zu\n",
addtl->len);
goto err;
}
/* 9.3.1 step 5 is implicit with the chosen DRBG */
/*
* 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented
* here. The spec is a bit convoluted here, we make it simpler.
*/
if (drbg->reseed_threshold < drbg->reseed_ctr)
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
str_true_false(drbg->pr),
(drbg->seeded == DRBG_SEED_STATE_FULL ?
"seeded" : "unseeded"));
/* 9.3.1 steps 7.1 through 7.3 */
len = drbg_seed(drbg, addtl, true);
if (len)
goto err;
/* 9.3.1 step 7.4 */
addtl = NULL;
} else if (rng_is_initialized() &&
(drbg->seeded == DRBG_SEED_STATE_PARTIAL || drbg_nopr_reseed_interval_elapsed(drbg))) { len = drbg_seed_from_random(drbg);
if (len)
goto err;
}
if (addtl && 0 < addtl->len) list_add_tail(&addtl->list, &addtllist);
/* 9.3.1 step 8 and 10 */
len = drbg->d_ops->generate(drbg, buf, buflen, &addtllist);
/* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */
drbg->reseed_ctr++;
if (0 >= len)
goto err;
/*
* Section 11.3.3 requires to re-perform self tests after some
* generated random numbers. The chosen value after which self
* test is performed is arbitrary, but it should be reasonable.
* However, we do not perform the self tests because of the following
* reasons: it is mathematically impossible that the initial self tests
* were successfully and the following are not. If the initial would
* pass and the following would not, the kernel integrity is violated.
* In this case, the entire kernel operation is questionable and it
* is unlikely that the integrity violation only affects the
* correct operation of the DRBG.
*
* Albeit the following code is commented out, it is provided in
* case somebody has a need to implement the test of 11.3.3.
*/
#if 0
if (drbg->reseed_ctr && !(drbg->reseed_ctr % 4096)) {
int err = 0;
pr_devel("DRBG: start to perform self test\n");
if (drbg->core->flags & DRBG_HMAC)
err = alg_test("drbg_pr_hmac_sha512",
"drbg_pr_hmac_sha512", 0, 0);
else if (drbg->core->flags & DRBG_CTR)
err = alg_test("drbg_pr_ctr_aes256",
"drbg_pr_ctr_aes256", 0, 0);
else
err = alg_test("drbg_pr_sha256",
"drbg_pr_sha256", 0, 0);
if (err) {
pr_err("DRBG: periodical self test failed\n");
/*
* uninstantiate implies that from now on, only errors
* are returned when reusing this DRBG cipher handle
*/
drbg_uninstantiate(drbg);
return 0;
} else {
pr_devel("DRBG: self test successful\n");
}
}
#endif
/*
* All operations were successful, return 0 as mandated by
* the kernel crypto API interface.
*/
len = 0;
err:
return len;
}
/*
* Wrapper around drbg_generate which can pull arbitrary long strings
* from the DRBG without hitting the maximum request limitation.
*
* Parameters: see drbg_generate
* Return codes: see drbg_generate -- if one drbg_generate request fails,
* the entire drbg_generate_long request fails
*/
static int drbg_generate_long(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct drbg_string *addtl)
{
unsigned int len = 0;
unsigned int slice = 0;
do {
int err = 0;
unsigned int chunk = 0;
slice = ((buflen - len) / drbg_max_request_bytes(drbg)); chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); mutex_lock(&drbg->drbg_mutex); err = drbg_generate(drbg, buf + len, chunk, addtl);
mutex_unlock(&drbg->drbg_mutex);
if (0 > err) return err; len += chunk; } while (slice > 0 && (len < buflen)); return 0;
}
static int drbg_prepare_hrng(struct drbg_state *drbg)
{
/* We do not need an HRNG in test mode. */
if (list_empty(&drbg->test_data.list))
return 0;
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
if (IS_ERR(drbg->jent)) {
const int err = PTR_ERR(drbg->jent);
drbg->jent = NULL;
if (fips_enabled)
return err;
pr_info("DRBG: Continuing without Jitter RNG\n");
}
return 0;
}
/*
* DRBG instantiation function as required by SP800-90A - this function
* sets up the DRBG handle, performs the initial seeding and all sanity
* checks required by SP800-90A
*
* @drbg memory of state -- if NULL, new memory is allocated
* @pers Personalization string that is mixed into state, may be NULL -- note
* the entropy is pulled by the DRBG internally unconditionally
* as defined in SP800-90A. The additional input is mixed into
* the state in addition to the pulled entropy.
* @coreref reference to core
* @pr prediction resistance enabled
*
* return
* 0 on success
* error value otherwise
*/
static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
int coreref, bool pr)
{
int ret;
bool reseed = true;
pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
"%s\n", coreref, str_enabled_disabled(pr));
mutex_lock(&drbg->drbg_mutex);
/* 9.1 step 1 is implicit with the selected DRBG type */
/*
* 9.1 step 2 is implicit as caller can select prediction resistance
* and the flag is copied into drbg->flags --
* all DRBG types support prediction resistance
*/
/* 9.1 step 4 is implicit in drbg_sec_strength */
if (!drbg->core) {
drbg->core = &drbg_cores[coreref];
drbg->pr = pr;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
drbg->last_seed_time = 0;
drbg->reseed_threshold = drbg_max_requests(drbg);
ret = drbg_alloc_state(drbg);
if (ret)
goto unlock;
ret = drbg_prepare_hrng(drbg);
if (ret)
goto free_everything;
reseed = false;
}
ret = drbg_seed(drbg, pers, reseed); if (ret && !reseed)
goto free_everything;
mutex_unlock(&drbg->drbg_mutex); return ret;
unlock:
mutex_unlock(&drbg->drbg_mutex);
return ret;
free_everything:
mutex_unlock(&drbg->drbg_mutex);
drbg_uninstantiate(drbg);
return ret;
}
/*
* DRBG uninstantiate function as required by SP800-90A - this function
* frees all buffers and the DRBG handle
*
* @drbg DRBG state handle
*
* return
* 0 on success
*/
static int drbg_uninstantiate(struct drbg_state *drbg)
{
if (!IS_ERR_OR_NULL(drbg->jent))
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
if (drbg->d_ops)
drbg->d_ops->crypto_fini(drbg);
drbg_dealloc_state(drbg);
/* no scrubbing of test_data -- this shall survive an uninstantiate */
return 0;
}
/*
* Helper function for setting the test data in the DRBG
*
* @drbg DRBG state handle
* @data test data
* @len test data length
*/
static void drbg_kcapi_set_entropy(struct crypto_rng *tfm,
const u8 *data, unsigned int len)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
mutex_lock(&drbg->drbg_mutex);
drbg_string_fill(&drbg->test_data, data, len);
mutex_unlock(&drbg->drbg_mutex);
}
/***************************************************************
* Kernel crypto API cipher invocations requested by DRBG
***************************************************************/
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
struct sdesc {
struct shash_desc shash;
char ctx[];
};
static int drbg_init_hash_kernel(struct drbg_state *drbg)
{
struct sdesc *sdesc;
struct crypto_shash *tfm;
tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
pr_info("DRBG: could not allocate digest TFM handle: %s\n",
drbg->core->backend_cra_name);
return PTR_ERR(tfm);
}
BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm)); sdesc = kzalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
GFP_KERNEL);
if (!sdesc) {
crypto_free_shash(tfm);
return -ENOMEM;
}
sdesc->shash.tfm = tfm;
drbg->priv_data = sdesc;
return 0;}
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
{
struct sdesc *sdesc = drbg->priv_data;
if (sdesc) {
crypto_free_shash(sdesc->shash.tfm);
kfree_sensitive(sdesc);
}
drbg->priv_data = NULL;
return 0;
}
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
struct sdesc *sdesc = drbg->priv_data; crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));}static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in)
{
struct sdesc *sdesc = drbg->priv_data;
struct drbg_string *input = NULL;
crypto_shash_init(&sdesc->shash);
list_for_each_entry(input, in, list) crypto_shash_update(&sdesc->shash, input->buf, input->len);
return crypto_shash_final(&sdesc->shash, outval);
}
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
#ifdef CONFIG_CRYPTO_DRBG_CTR
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm =
(struct crypto_cipher *)drbg->priv_data;
if (tfm)
crypto_free_cipher(tfm);
drbg->priv_data = NULL;
if (drbg->ctr_handle)
crypto_free_skcipher(drbg->ctr_handle);
drbg->ctr_handle = NULL;
if (drbg->ctr_req)
skcipher_request_free(drbg->ctr_req);
drbg->ctr_req = NULL;
kfree(drbg->outscratchpadbuf);
drbg->outscratchpadbuf = NULL;
return 0;
}
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm;
struct crypto_skcipher *sk_tfm;
struct skcipher_request *req;
unsigned int alignmask;
char ctr_name[CRYPTO_MAX_ALG_NAME];
tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
pr_info("DRBG: could not allocate cipher TFM handle: %s\n",
drbg->core->backend_cra_name);
return PTR_ERR(tfm);
}
BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
drbg->priv_data = tfm;
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
drbg_fini_sym_kernel(drbg);
return -EINVAL;
}
sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0);
if (IS_ERR(sk_tfm)) {
pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n",
ctr_name);
drbg_fini_sym_kernel(drbg);
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
crypto_init_wait(&drbg->ctr_wait);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
pr_info("DRBG: could not allocate request queue\n");
drbg_fini_sym_kernel(drbg);
return -ENOMEM;
}
drbg->ctr_req = req;
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
GFP_KERNEL);
if (!drbg->outscratchpadbuf) {
drbg_fini_sym_kernel(drbg);
return -ENOMEM;
}
drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
alignmask + 1);
sg_init_table(&drbg->sg_in, 1);
sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
return alignmask;
}
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
struct crypto_cipher *tfm = drbg->priv_data;
crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
}
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in)
{
struct crypto_cipher *tfm = drbg->priv_data;
/* there is only component in *in */
BUG_ON(in->len < drbg_blocklen(drbg));
crypto_cipher_encrypt_one(tfm, outval, in->buf);
return 0;
}
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
{
struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out;
u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN);
int ret;
if (inbuf) {
/* Use caller-provided input buffer */
sg_set_buf(sg_in, inbuf, inlen);
} else {
/* Use scratchpad for in-place operation */
inlen = scratchpad_use;
memset(drbg->outscratchpad, 0, scratchpad_use);
sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use);
}
while (outlen) {
u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
/* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out,
cryptlen, drbg->V);
ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
&drbg->ctr_wait);
if (ret)
goto out;
crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
memzero_explicit(drbg->outscratchpad, cryptlen);
outlen -= cryptlen;
outbuf += cryptlen;
}
ret = 0;
out:
return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
/***************************************************************
* Kernel crypto API interface to register DRBG
***************************************************************/
/*
* Look up the DRBG flags by given kernel crypto API cra_name
* The code uses the drbg_cores definition to do this
*
* @cra_name kernel crypto API cra_name
* @coreref reference to integer which is filled with the pointer to
* the applicable core
* @pr reference for setting prediction resistance
*
* return: flags
*/
static inline void drbg_convert_tfm_core(const char *cra_driver_name,
int *coreref, bool *pr)
{
int i = 0;
size_t start = 0;
int len = 0;
*pr = true;
/* disassemble the names */
if (!memcmp(cra_driver_name, "drbg_nopr_", 10)) {
start = 10;
*pr = false;
} else if (!memcmp(cra_driver_name, "drbg_pr_", 8)) {
start = 8;
} else {
return;
}
/* remove the first part */
len = strlen(cra_driver_name) - start; for (i = 0; ARRAY_SIZE(drbg_cores) > i; i++) { if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name,
len)) {
*coreref = i;
return;
}
}
}
static int drbg_kcapi_init(struct crypto_tfm *tfm)
{
struct drbg_state *drbg = crypto_tfm_ctx(tfm);
mutex_init(&drbg->drbg_mutex);
return 0;
}
static void drbg_kcapi_cleanup(struct crypto_tfm *tfm)
{
drbg_uninstantiate(crypto_tfm_ctx(tfm));
}
/*
* Generate random numbers invoked by the kernel crypto API:
* The API of the kernel crypto API is extended as follows:
*
* src is additional input supplied to the RNG.
* slen is the length of src.
* dst is the output buffer where random data is to be stored.
* dlen is the length of dst.
*/
static int drbg_kcapi_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
struct drbg_string *addtl = NULL;
struct drbg_string string;
if (slen) {
/* linked list variable is now local to allow modification */
drbg_string_fill(&string, src, slen);
addtl = &string;
}
return drbg_generate_long(drbg, dst, dlen, addtl);}
/*
* Seed the DRBG invoked by the kernel crypto API
*/
static int drbg_kcapi_seed(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm);
bool pr = false;
struct drbg_string string;
struct drbg_string *seed_string = NULL;
int coreref = 0; drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref,
&pr);
if (0 < slen) {
drbg_string_fill(&string, seed, slen); seed_string = &string;
}
return drbg_instantiate(drbg, seed_string, coreref, pr);
}
/***************************************************************
* Kernel module: code to load the module
***************************************************************/
/*
* Tests as defined in 11.3.2 in addition to the cipher tests: testing
* of the error handling.
*
* Note: testing of failing seed source as defined in 11.3.2 is not applicable
* as seed source of get_random_bytes does not fail.
*
* Note 2: There is no sensible way of testing the reseed counter
* enforcement, so skip it.
*/
static inline int __init drbg_healthcheck_sanity(void)
{
int len = 0;
#define OUTBUFLEN 16
unsigned char buf[OUTBUFLEN];
struct drbg_state *drbg = NULL;
int ret;
int rc = -EFAULT;
bool pr = false;
int coreref = 0;
struct drbg_string addtl;
size_t max_addtllen, max_request_bytes;
/* only perform test in FIPS mode */
if (!fips_enabled)
return 0;
#ifdef CONFIG_CRYPTO_DRBG_CTR
drbg_convert_tfm_core("drbg_nopr_ctr_aes256", &coreref, &pr);
#endif
#ifdef CONFIG_CRYPTO_DRBG_HASH
drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
#endif
#ifdef CONFIG_CRYPTO_DRBG_HMAC
drbg_convert_tfm_core("drbg_nopr_hmac_sha512", &coreref, &pr);
#endif
drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
if (!drbg)
return -ENOMEM;
mutex_init(&drbg->drbg_mutex);
drbg->core = &drbg_cores[coreref];
drbg->reseed_threshold = drbg_max_requests(drbg);
/*
* if the following tests fail, it is likely that there is a buffer
* overflow as buf is much smaller than the requested or provided
* string lengths -- in case the error handling does not succeed
* we may get an OOPS. And we want to get an OOPS as this is a
* grave bug.
*/
max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1);
/* overflow addtllen with additonal info string */
len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl);
BUG_ON(0 < len);
/* overflow max_bits */
len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
BUG_ON(0 < len);
/* overflow max addtllen with personalization string */
ret = drbg_seed(drbg, &addtl, false);
BUG_ON(0 == ret);
/* all tests passed */
rc = 0;
pr_devel("DRBG: Sanity tests for failure code paths successfully "
"completed\n");
kfree(drbg);
return rc;
}
static struct rng_alg drbg_algs[22];
/*
* Fill the array drbg_algs used to register the different DRBGs
* with the kernel crypto API. To fill the array, the information
* from drbg_cores[] is used.
*/
static inline void __init drbg_fill_array(struct rng_alg *alg,
const struct drbg_core *core, int pr)
{
int pos = 0;
static int priority = 200;
memcpy(alg->base.cra_name, "stdrng", 6);
if (pr) {
memcpy(alg->base.cra_driver_name, "drbg_pr_", 8);
pos = 8;
} else {
memcpy(alg->base.cra_driver_name, "drbg_nopr_", 10);
pos = 10;
}
memcpy(alg->base.cra_driver_name + pos, core->cra_name,
strlen(core->cra_name));
alg->base.cra_priority = priority;
priority++;
/*
* If FIPS mode enabled, the selected DRBG shall have the
* highest cra_priority over other stdrng instances to ensure
* it is selected.
*/
if (fips_enabled)
alg->base.cra_priority += 200;
alg->base.cra_ctxsize = sizeof(struct drbg_state);
alg->base.cra_module = THIS_MODULE;
alg->base.cra_init = drbg_kcapi_init;
alg->base.cra_exit = drbg_kcapi_cleanup;
alg->generate = drbg_kcapi_random;
alg->seed = drbg_kcapi_seed;
alg->set_ent = drbg_kcapi_set_entropy;
alg->seedsize = 0;
}
static int __init drbg_init(void)
{
unsigned int i = 0; /* pointer to drbg_algs */
unsigned int j = 0; /* pointer to drbg_cores */
int ret;
ret = drbg_healthcheck_sanity();
if (ret)
return ret;
if (ARRAY_SIZE(drbg_cores) * 2 > ARRAY_SIZE(drbg_algs)) {
pr_info("DRBG: Cannot register all DRBG types"
"(slots needed: %zu, slots available: %zu)\n",
ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
return -EFAULT;
}
/*
* each DRBG definition can be used with PR and without PR, thus
* we instantiate each DRBG in drbg_cores[] twice.
*
* As the order of placing them into the drbg_algs array matters
* (the later DRBGs receive a higher cra_priority) we register the
* prediction resistance DRBGs first as the should not be too
* interesting.
*/
for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1);
for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0);
return crypto_register_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
static void __exit drbg_exit(void)
{
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
module_init(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
#endif
#ifndef CRYPTO_DRBG_HMAC_STRING
#define CRYPTO_DRBG_HMAC_STRING ""
#endif
#ifndef CRYPTO_DRBG_CTR_STRING
#define CRYPTO_DRBG_CTR_STRING ""
#endif
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
"using following cores: "
CRYPTO_DRBG_HASH_STRING
CRYPTO_DRBG_HMAC_STRING
CRYPTO_DRBG_CTR_STRING);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_FILE_REF_H
#define _LINUX_FILE_REF_H
#include <linux/atomic.h>
#include <linux/preempt.h>
#include <linux/types.h>
/*
* file_ref is a reference count implementation specifically for use by
* files. It takes inspiration from rcuref but differs in key aspects
* such as support for SLAB_TYPESAFE_BY_RCU type caches.
*
* FILE_REF_ONEREF FILE_REF_MAXREF
* 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL
* <-------------------valid ------------------->
*
* FILE_REF_SATURATED
* 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL
* <-----------------------saturation zone---------------------->
*
* FILE_REF_RELEASED FILE_REF_DEAD
* 0xC000000000000000UL 0xE000000000000000UL
* <-------------------dead zone------------------->
*
* FILE_REF_NOREF
* 0xFFFFFFFFFFFFFFFFUL
*/
#ifdef CONFIG_64BIT
#define FILE_REF_ONEREF 0x0000000000000000UL
#define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL
#define FILE_REF_SATURATED 0xA000000000000000UL
#define FILE_REF_RELEASED 0xC000000000000000UL
#define FILE_REF_DEAD 0xE000000000000000UL
#define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL
#else
#define FILE_REF_ONEREF 0x00000000U
#define FILE_REF_MAXREF 0x7FFFFFFFU
#define FILE_REF_SATURATED 0xA0000000U
#define FILE_REF_RELEASED 0xC0000000U
#define FILE_REF_DEAD 0xE0000000U
#define FILE_REF_NOREF 0xFFFFFFFFU
#endif
typedef struct {
#ifdef CONFIG_64BIT
atomic64_t refcnt;
#else
atomic_t refcnt;
#endif
} file_ref_t;
/**
* file_ref_init - Initialize a file reference count
* @ref: Pointer to the reference count
* @cnt: The initial reference count typically '1'
*/
static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
{
atomic_long_set(&ref->refcnt, cnt - 1);
}
bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
/**
* file_ref_get - Acquire one reference on a file
* @ref: Pointer to the reference count
*
* Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF.
*
* Provides full memory ordering.
*
* Return: False if the attempt to acquire a reference failed. This happens
* when the last reference has been put already. True if a reference
* was successfully acquired
*/
static __always_inline __must_check bool file_ref_get(file_ref_t *ref)
{
/*
* Unconditionally increase the reference count with full
* ordering. The saturation and dead zones provide enough
* tolerance for this.
*
* If this indicates negative the file in question the fail can
* be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU.
* Hence, unconditionally altering the file reference count to
* e.g., reset the file reference count back to the middle of
* the deadzone risk end up marking someone else's file as dead
* behind their back.
*
* It would be possible to do a careful:
*
* cnt = atomic_long_inc_return();
* if (likely(cnt >= 0))
* return true;
*
* and then something like:
*
* if (cnt >= FILE_REF_RELEASE)
* atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD),
*
* to set the value back to the middle of the deadzone. But it's
* practically impossible to go from FILE_REF_DEAD to
* FILE_REF_ONEREF. It would need 2305843009213693952/2^61
* file_ref_get()s to resurrect such a dead file.
*/
return !atomic_long_add_negative(1, &ref->refcnt);
}
/**
* file_ref_inc - Acquire one reference on a file
* @ref: Pointer to the reference count
*
* Acquire an additional reference on a file. Warns if the caller didn't
* already hold a reference.
*/
static __always_inline void file_ref_inc(file_ref_t *ref)
{
long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt);
WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference");
}
/**
* file_ref_put -- Release a file reference
* @ref: Pointer to the reference count
*
* Provides release memory ordering, such that prior loads and stores
* are done before, and provides an acquire ordering on success such
* that free() must come after.
*
* Return: True if this was the last reference with no future references
* possible. This signals the caller that it can safely release
* the object which is protected by the reference counter.
* False if there are still active references or the put() raced
* with a concurrent get()/put() pair. Caller is not allowed to
* release the protected object.
*/
static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
{
long cnt;
/*
* While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put()
* calls don't risk UAFs when a file is recyclyed, it is still
* vulnerable to UAFs caused by freeing the whole slab page once
* it becomes unused. Prevent file_ref_put() from being
* preempted protects against this.
*/
guard(preempt)();
/*
* Unconditionally decrease the reference count. The saturation
* and dead zones provide enough tolerance for this. If this
* fails then we need to handle the last reference drop and
* cases inside the saturation and dead zones.
*/
cnt = atomic_long_dec_return(&ref->refcnt);
if (cnt >= 0)
return false;
return __file_ref_put(ref, cnt);
}
/**
* file_ref_put_close - drop a reference expecting it would transition to FILE_REF_NOREF
* @ref: Pointer to the reference count
*
* Semantically it is equivalent to calling file_ref_put(), but it trades lower
* performance in face of other CPUs also modifying the refcount for higher
* performance when this happens to be the last reference.
*
* For the last reference file_ref_put() issues 2 atomics. One to drop the
* reference and another to transition it to FILE_REF_DEAD. This routine does
* the work in one step, but in order to do it has to pre-read the variable which
* decreases scalability.
*
* Use with close() et al, stick to file_ref_put() by default.
*/
static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
{
long old;
old = atomic_long_read(&ref->refcnt);
if (likely(old == FILE_REF_ONEREF)) {
if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
return true;
}
return file_ref_put(ref);
}
/**
* file_ref_read - Read the number of file references
* @ref: Pointer to the reference count
*
* Return: The number of held references (0 ... N)
*/
static inline unsigned long file_ref_read(file_ref_t *ref)
{
unsigned long c = atomic_long_read(&ref->refcnt);
/* Return 0 if within the DEAD zone. */
return c >= FILE_REF_RELEASED ? 0 : c + 1;
}
/*
* __file_ref_read_raw - Return the value stored in ref->refcnt
* @ref: Pointer to the reference count
*
* Return: The raw value found in the counter
*
* A hack for file_needs_f_pos_lock(), you probably want to use
* file_ref_read() instead.
*/
static inline unsigned long __file_ref_read_raw(file_ref_t *ref)
{
return atomic_long_read(&ref->refcnt);
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
*
* (C) SGI 2006, Christoph Lameter
* Cleaned up and restructured to ease the addition of alternative
* implementations of SLAB allocators.
* (C) Linux Foundation 2008-2013
* Unified interface for all slab allocators
*/
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
#include <linux/cache.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
#include <linux/cleanup.h>
#include <linux/hash.h>
enum _slab_flag_bits {
_SLAB_CONSISTENCY_CHECKS,
_SLAB_RED_ZONE,
_SLAB_POISON,
_SLAB_KMALLOC,
_SLAB_HWCACHE_ALIGN,
_SLAB_CACHE_DMA,
_SLAB_CACHE_DMA32,
_SLAB_STORE_USER,
_SLAB_PANIC,
_SLAB_TYPESAFE_BY_RCU,
_SLAB_TRACE,
#ifdef CONFIG_DEBUG_OBJECTS
_SLAB_DEBUG_OBJECTS,
#endif
_SLAB_NOLEAKTRACE,
_SLAB_NO_MERGE,
#ifdef CONFIG_FAILSLAB
_SLAB_FAILSLAB,
#endif
#ifdef CONFIG_MEMCG
_SLAB_ACCOUNT,
#endif
#ifdef CONFIG_KASAN_GENERIC
_SLAB_KASAN,
#endif
_SLAB_NO_USER_FLAGS,
#ifdef CONFIG_KFENCE
_SLAB_SKIP_KFENCE,
#endif
#ifndef CONFIG_SLUB_TINY
_SLAB_RECLAIM_ACCOUNT,
#endif
_SLAB_OBJECT_POISON,
_SLAB_CMPXCHG_DOUBLE,
#ifdef CONFIG_SLAB_OBJ_EXT
_SLAB_NO_OBJ_EXT,
#endif
_SLAB_FLAGS_LAST_BIT
};
#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
/*
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
/* DEBUG: Red zone objs in a cache */
#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
/* DEBUG: Poison objects */
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
/* Indicate a kmalloc slab */
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
/**
* define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
*
* Sufficiently large objects are aligned on cache line boundary. For object
* size smaller than a half of cache line size, the alignment is on the half of
* cache line size. In general, if object size is smaller than 1/2^n of cache
* line size, the alignment is adjusted to 1/2^n.
*
* If explicit alignment is also requested by the respective
* &struct kmem_cache_args field, the greater of both is alignments is applied.
*/
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
/* Use GFP_DMA memory */
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
/* Use GFP_DMA32 memory */
#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
/* DEBUG: Store the last owner for bug hunting */
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
/**
* define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
* This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free()
* that memory location is free to be reused at any time. Thus it may
* be possible to see another object there in the same RCU grace period.
*
* This feature only ensures the memory location backing the object
* stays valid, the trick to using this is relying on an independent
* object validation pass. Something like:
*
* ::
*
* begin:
* rcu_read_lock();
* obj = lockless_lookup(key);
* if (obj) {
* if (!try_get_ref(obj)) // might fail for free objects
* rcu_read_unlock();
* goto begin;
*
* if (obj->key != key) { // not the object we expected
* put_ref(obj);
* rcu_read_unlock();
* goto begin;
* }
* }
* rcu_read_unlock();
*
* This is useful if we need to approach a kernel structure obliquely,
* from its address obtained without the usual locking. We can lock
* the structure to stabilize it and check it's still at the given address,
* only if we can be sure that the memory has not been meanwhile reused
* for some other kind of object (which our subsystem's lock might corrupt).
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
* Note that object identity check has to be done *after* acquiring a
* reference, therefore user has to ensure proper ordering for loads.
* Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU,
* the newly allocated object has to be fully initialized *before* its
* refcount gets initialized and proper ordering for stores is required.
* refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are
* designed with the proper fences required for reference counting objects
* allocated with SLAB_TYPESAFE_BY_RCU.
*
* Note that it is not possible to acquire a lock within a structure
* allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
* as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
* are not zeroed before being given to the slab, which means that any
* locks must be initialized after each and every kmem_struct_alloc().
* Alternatively, make the ctor passed to kmem_cache_create() initialize
* the locks at page-allocation time, as is done in __i915_request_ctor(),
* sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers
* to safely acquire those ctor-initialized locks under rcu_read_lock()
* protection.
*
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
/* Trace allocations and frees */
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
#else
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
#endif
/* Avoid kmemleak tracing */
#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
/*
* Prevent merging with compatible kmem caches. This flag should be used
* cautiously. Valid use cases:
*
* - caches created for self-tests (e.g. kunit)
* - general caches created and used by a subsystem, only when a
* (subsystem-specific) debug option is enabled
* - performance critical caches, should be very rare and consulted with slab
* maintainers, and not used together with CONFIG_SLUB_TINY
*/
#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
#else
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
#endif
/**
* define SLAB_ACCOUNT - Account allocations to memcg.
*
* All object allocations from this cache will be memcg accounted, regardless of
* __GFP_ACCOUNT being or not being passed to individual allocations.
*/
#ifdef CONFIG_MEMCG
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
#else
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
#endif
#ifdef CONFIG_KASAN_GENERIC
#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
#else
#define SLAB_KASAN __SLAB_FLAG_UNUSED
#endif
/*
* Ignore user specified debugging flags.
* Intended for caches created for self-tests so they have only flags
* specified in the code and other flags are ignored.
*/
#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
#ifdef CONFIG_KFENCE
#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
#else
#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
#endif
/* The following flags affect the page allocator grouping pages by mobility */
/**
* define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
*
* Use this flag for caches that have an associated shrinker. As a result, slab
* pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
* mobility, and are accounted in SReclaimable counter in /proc/meminfo
*/
#ifndef CONFIG_SLUB_TINY
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
#else
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
#endif
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Slab created using create_boot_cache */
#ifdef CONFIG_SLAB_OBJ_EXT
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
#else
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
#endif
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
*
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
* Both make kfree a no-op.
*/
#define ZERO_SIZE_PTR ((void *)16)
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
#include <linux/kasan.h>
struct list_lru;
struct mem_cgroup;
/*
* struct kmem_cache related prototypes
*/
bool slab_is_available(void);
/**
* struct kmem_cache_args - Less common arguments for kmem_cache_create()
*
* Any uninitialized fields of the structure are interpreted as unused. The
* exception is @freeptr_offset where %0 is a valid value, so
* @use_freeptr_offset must be also set to %true in order to interpret the field
* as used. For @useroffset %0 is also valid, but only with non-%0
* @usersize.
*
* When %NULL args is passed to kmem_cache_create(), it is equivalent to all
* fields unused.
*/
struct kmem_cache_args {
/**
* @align: The required alignment for the objects.
*
* %0 means no specific alignment is requested.
*/
unsigned int align;
/**
* @useroffset: Usercopy region offset.
*
* %0 is a valid offset, when @usersize is non-%0
*/
unsigned int useroffset;
/**
* @usersize: Usercopy region size.
*
* %0 means no usercopy region is specified.
*/
unsigned int usersize;
/**
* @freeptr_offset: Custom offset for the free pointer
* in &SLAB_TYPESAFE_BY_RCU caches
*
* By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
* outside of the object. This might cause the object to grow in size.
* Cache creators that have a reason to avoid this can specify a custom
* free pointer offset in their struct where the free pointer will be
* placed.
*
* Note that placing the free pointer inside the object requires the
* caller to ensure that no fields are invalidated that are required to
* guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
* details).
*
* Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
* is specified, %use_freeptr_offset must be set %true.
*
* Note that @ctor currently isn't supported with custom free pointers
* as a @ctor requires an external free pointer.
*/
unsigned int freeptr_offset;
/**
* @use_freeptr_offset: Whether a @freeptr_offset is used.
*/
bool use_freeptr_offset;
/**
* @ctor: A constructor for the objects.
*
* The constructor is invoked for each object in a newly allocated slab
* page. It is the cache user's responsibility to free object in the
* same state as after calling the constructor, or deal appropriately
* with any differences between a freshly constructed and a reallocated
* object.
*
* %NULL means no constructor.
*/
void (*ctor)(void *);
/**
* @sheaf_capacity: Enable sheaves of given capacity for the cache.
*
* With a non-zero value, allocations from the cache go through caching
* arrays called sheaves. Each cpu has a main sheaf that's always
* present, and a spare sheaf that may be not present. When both become
* empty, there's an attempt to replace an empty sheaf with a full sheaf
* from the per-node barn.
*
* When no full sheaf is available, and gfp flags allow blocking, a
* sheaf is allocated and filled from slab(s) using bulk allocation.
* Otherwise the allocation falls back to the normal operation
* allocating a single object from a slab.
*
* Analogically when freeing and both percpu sheaves are full, the barn
* may replace it with an empty sheaf, unless it's over capacity. In
* that case a sheaf is bulk freed to slab pages.
*
* The sheaves do not enforce NUMA placement of objects, so allocations
* via kmem_cache_alloc_node() with a node specified other than
* NUMA_NO_NODE will bypass them.
*
* Bulk allocation and free operations also try to use the cpu sheaves
* and barn, but fallback to using slab pages directly.
*
* When slub_debug is enabled for the cache, the sheaf_capacity argument
* is ignored.
*
* %0 means no sheaves will be created.
*/
unsigned int sheaf_capacity;
};
struct kmem_cache *__kmem_cache_create_args(const char *name,
unsigned int object_size,
struct kmem_cache_args *args,
slab_flags_t flags);
static inline struct kmem_cache *
__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache_args kmem_args = {
.align = align,
.ctor = ctor,
};
return __kmem_cache_create_args(name, size, &kmem_args, flags);
}
/**
* kmem_cache_create_usercopy - Create a kmem cache with a region suitable
* for copying to userspace.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @useroffset: Usercopy region offset
* @usersize: Usercopy region size
* @ctor: A constructor for the objects, or %NULL.
*
* This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
* if whitelisting a single field is sufficient, or kmem_cache_create() with
* the necessary parameters passed via the args parameter (see
* &struct kmem_cache_args)
*
* Return: a pointer to the cache on success, NULL on failure.
*/
static inline struct kmem_cache *
kmem_cache_create_usercopy(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{
struct kmem_cache_args kmem_args = {
.align = align,
.ctor = ctor,
.useroffset = useroffset,
.usersize = usersize,
};
return __kmem_cache_create_args(name, size, &kmem_args, flags);
}
/* If NULL is passed for @args, use this variant with default arguments. */
static inline struct kmem_cache *
__kmem_cache_default_args(const char *name, unsigned int size,
struct kmem_cache_args *args,
slab_flags_t flags)
{
struct kmem_cache_args kmem_default_args = {};
/* Make sure we don't get passed garbage. */
if (WARN_ON_ONCE(args))
return ERR_PTR(-EINVAL);
return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
}
/**
* kmem_cache_create - Create a kmem cache.
* @__name: A string which is used in /proc/slabinfo to identify this cache.
* @__object_size: The size of objects to be created in this cache.
* @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
* means defaults will be used for all the arguments.
*
* This is currently implemented as a macro using ``_Generic()`` to call
* either the new variant of the function, or a legacy one.
*
* The new variant has 4 parameters:
* ``kmem_cache_create(name, object_size, args, flags)``
*
* See __kmem_cache_create_args() which implements this.
*
* The legacy variant has 5 parameters:
* ``kmem_cache_create(name, object_size, align, flags, ctor)``
*
* The align and ctor parameters map to the respective fields of
* &struct kmem_cache_args
*
* Context: Cannot be called within a interrupt, but can be interrupted.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
#define kmem_cache_create(__name, __object_size, __args, ...) \
_Generic((__args), \
struct kmem_cache_args *: __kmem_cache_create_args, \
void *: __kmem_cache_default_args, \
default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
void kmem_cache_destroy(struct kmem_cache *s);
int kmem_cache_shrink(struct kmem_cache *s);
/*
* Please use this macro to create slab caches. Simply specify the
* name of the structure and maybe some flags that are listed above.
*
* The alignment of the struct determines object alignment. If you
* f.e. add ____cacheline_aligned_in_smp to the struct declaration
* then the objects will be properly aligned in SMP configurations.
*/
#define KMEM_CACHE(__struct, __flags) \
__kmem_cache_create_args(#__struct, sizeof(struct __struct), \
&(struct kmem_cache_args) { \
.align = __alignof__(struct __struct), \
}, (__flags))
/*
* To whitelist a single field for copying to/from usercopy, use this
* macro instead for KMEM_CACHE() above.
*/
#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
__kmem_cache_create_args(#__struct, sizeof(struct __struct), \
&(struct kmem_cache_args) { \
.align = __alignof__(struct __struct), \
.useroffset = offsetof(struct __struct, __field), \
.usersize = sizeof_field(struct __struct, __field), \
}, (__flags))
/*
* Common kmalloc functions provided by all allocators
*/
void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
unsigned long align,
gfp_t flags, int nid) __realloc_size(2);
#define krealloc_noprof(_o, _s, _f) krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
#define krealloc_node_align(...) alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
#define krealloc_node(_o, _s, _f, _n) krealloc_node_align(_o, _s, 1, _f, _n)
#define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
void kfree(const void *objp);
void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
/**
* ksize - Report actual allocation size of associated object
*
* @objp: Pointer returned from a prior kmalloc()-family allocation.
*
* This should not be used for writing beyond the originally requested
* allocation size. Either use krealloc() or round up the allocation size
* with kmalloc_size_roundup() prior to allocation. If this is used to
* access beyond the originally requested allocation size, UBSAN_BOUNDS
* and/or FORTIFY_SOURCE may trip, since they only know about the
* originally allocated size via the __alloc_size attribute.
*/
size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
bool kmem_dump_obj(void *object);
#else
static inline bool kmem_dump_obj(void *object) { return false; }
#endif
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
* Setting ARCH_DMA_MINALIGN in arch headers allows that.
*/
#ifdef ARCH_HAS_DMA_MINALIGN
#if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#endif
#endif
#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#elif ARCH_KMALLOC_MINALIGN > 8
#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
#endif
/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
* Intended for arches that get misalignment faults even for 64 bit integer
* aligned buffers.
*/
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
/*
* Arches can define this function if they want to decide the minimum slab
* alignment at runtime. The value returned by the function must be a power
* of two and >= ARCH_SLAB_MINALIGN.
*/
#ifndef arch_slab_minalign
static inline unsigned int arch_slab_minalign(void)
{
return ARCH_SLAB_MINALIGN;
}
#endif
/*
* kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
* kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
* and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
*/
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
/*
* Kmalloc array related definitions
*/
/*
* SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
/* Maximum order allocatable via the slab allocator */
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
/*
* Kmalloc subsystem.
*/
#ifndef KMALLOC_MIN_SIZE
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif
/*
* This restriction comes from byte sized index implementation.
* Page size is normally 2^12 bytes and, in this case, if we want to use
* byte sized index which can represent 2^8 entries, the size of the object
* should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
* If minimum size of kmalloc is less than 16, we use it as minimum object
* size and give up to use byte sized index.
*/
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
#ifdef CONFIG_RANDOM_KMALLOC_CACHES
#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies
#else
#define RANDOM_KMALLOC_CACHES_NR 0
#endif
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
*
* KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
* is for accounted but unreclaimable and non-dma objects. All the other
* kmem caches can have both accounted and unaccounted objects.
*/
enum kmalloc_cache_type {
KMALLOC_NORMAL = 0,
#ifndef CONFIG_ZONE_DMA
KMALLOC_DMA = KMALLOC_NORMAL,
#endif
#ifndef CONFIG_MEMCG
KMALLOC_CGROUP = KMALLOC_NORMAL,
#endif
KMALLOC_RANDOM_START = KMALLOC_NORMAL,
KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
#ifdef CONFIG_SLUB_TINY
KMALLOC_RECLAIM = KMALLOC_NORMAL,
#else
KMALLOC_RECLAIM,
#endif
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
#endif
#ifdef CONFIG_MEMCG
KMALLOC_CGROUP,
#endif
NR_KMALLOC_TYPES
};
typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1];
extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES];
/*
* Define gfp bits that should not be set for KMALLOC_NORMAL.
*/
#define KMALLOC_NOT_NORMAL_BITS \
(__GFP_RECLAIMABLE | \
(IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
(IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))
extern unsigned long random_kmalloc_seed;
static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
{
/*
* The most common case is KMALLOC_NORMAL, so test for it
* with a single branch for all the relevant flags.
*/
if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
#ifdef CONFIG_RANDOM_KMALLOC_CACHES
/* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
#else
return KMALLOC_NORMAL;
#endif
/*
* At least one of the flags has to be set. Their priorities in
* decreasing order are:
* 1) __GFP_DMA
* 2) __GFP_RECLAIMABLE
* 3) __GFP_ACCOUNT
*/
if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
return KMALLOC_DMA;
if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
return KMALLOC_RECLAIM;
else
return KMALLOC_CGROUP;
}
/*
* Figure out which kmalloc slab an allocation of a certain size
* belongs to.
* 0 = zero alloc
* 1 = 65 .. 96 bytes
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
*
* Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
* typical usage is via kmalloc_index() and therefore evaluated at compile-time.
* Callers where !size_is_constant should only be test modules, where runtime
* overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
*/
static __always_inline unsigned int __kmalloc_index(size_t size,
bool size_is_constant)
{
if (!size)
return 0;
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;
if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
return 1;
if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
return 2;
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
if (size <= 64) return 6;
if (size <= 128) return 7;
if (size <= 256) return 8;
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
if (size <= 32 * 1024) return 15;
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
else
BUG();
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
static_assert(PAGE_SHIFT <= 20);
#define kmalloc_index(s) __kmalloc_index(s, true)
#include <linux/alloc_tag.h>
/**
* kmem_cache_alloc - Allocate an object
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
*
* Allocate an object from this cache.
* See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
*
* Return: pointer to the new object or %NULL in case of error
*/
void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
gfp_t flags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
/**
* kmem_cache_charge - memcg charge an already allocated slab memory
* @objp: address of the slab object to memcg charge
* @gfpflags: describe the allocation context
*
* kmem_cache_charge allows charging a slab object to the current memcg,
* primarily in cases where charging at allocation time might not be possible
* because the target memcg is not known (i.e. softirq context)
*
* The objp should be pointer returned by the slab allocator functions like
* kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
* behavior can be controlled through gfpflags parameter, which affects how the
* necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
* that overcharging is requested instead of failure, but is not applied for the
* internal metadata allocation.
*
* There are several cases where it will return true even if the charging was
* not done:
* More specifically:
*
* 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
* 2. Already charged slab objects.
* 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
* without __GFP_ACCOUNT
* 4. Allocating internal metadata has failed
*
* Return: true if charge was successful otherwise false.
*/
bool kmem_cache_charge(void *objp, gfp_t gfpflags);
void kmem_cache_free(struct kmem_cache *s, void *objp);
kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
/*
* Bulk allocation and freeing operations. These are accelerated in an
* allocator specific way to avoid taking locks repeatedly or building
* metadata structures unnecessarily.
*
* Note that interrupts must be enabled when calling these functions.
*/
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
#define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))
static __always_inline void kfree_bulk(size_t size, void **p)
{
kmem_cache_free_bulk(NULL, size, p);
}
void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
int node) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
struct slab_sheaf *
kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf **sheafp, unsigned int size);
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf *sheaf);
void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_from_sheaf(...) \
alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
* sites don't have to pass NULL.
*/
#ifdef CONFIG_SLAB_BUCKETS
#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b)
#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b)
#define PASS_BUCKET_PARAM(_b) (_b)
#else
#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size)
#define PASS_BUCKET_PARAMS(_size, _b) (_size)
#define PASS_BUCKET_PARAM(_b) NULL
#endif
/*
* The following functions are not to be used directly and are intended only
* for internal use from kmalloc() and kmalloc_node()
* with the exception of kunit tests
*/
void *__kmalloc_noprof(size_t size, gfp_t flags)
__assume_kmalloc_alignment __alloc_size(1);
void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
__assume_kmalloc_alignment __alloc_size(1);
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_kmalloc_alignment __alloc_size(3);
void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size)
__assume_kmalloc_alignment __alloc_size(4);
void *__kmalloc_large_noprof(size_t size, gfp_t flags)
__assume_page_alignment __alloc_size(1);
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
__assume_page_alignment __alloc_size(1);
/**
* kmalloc - allocate kernel memory
* @size: how many bytes of memory are required.
* @flags: describe the allocation context
*
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
*
* The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
* bytes. For @size of power of two bytes, the alignment is also guaranteed
* to be at least to the size. For other sizes, the alignment is guaranteed to
* be at least the largest power-of-two divisor of @size.
*
* The @flags argument may be one of the GFP flags defined at
* include/linux/gfp_types.h and described at
* :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
*
* The recommended usage of the @flags is described at
* :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
*
* Below is a brief outline of the most useful GFP flags
*
* %GFP_KERNEL
* Allocate normal kernel ram. May sleep.
*
* %GFP_NOWAIT
* Allocation will not sleep.
*
* %GFP_ATOMIC
* Allocation will not sleep. May use emergency pools.
*
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
* %__GFP_ZERO
* Zero the allocated memory before returning. Also see kzalloc().
*
* %__GFP_HIGH
* This allocation has high priority and may use emergency pools.
*
* %__GFP_NOFAIL
* Indicate that this allocation is in no way allowed to fail
* (think twice before using).
*
* %__GFP_NORETRY
* If memory is not immediately available,
* then give up at once.
*
* %__GFP_NOWARN
* If allocation fails, don't issue any warnings.
*
* %__GFP_RETRY_MAYFAIL
* Try really hard to succeed the allocation but fail
* eventually.
*/
static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && size) {
unsigned int index;
if (size > KMALLOC_MAX_CACHE_SIZE)
return __kmalloc_large_noprof(size, flags);
index = kmalloc_index(size);
return __kmalloc_cache_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, size);
}
return __kmalloc_noprof(size, flags);
}
#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
#define kmem_buckets_alloc_track_caller(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_))
static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && size) {
unsigned int index;
if (size > KMALLOC_MAX_CACHE_SIZE)
return __kmalloc_large_node_noprof(size, flags, node);
index = kmalloc_index(size);
return __kmalloc_cache_node_noprof(
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, node, size);
}
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
}
#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
return kmalloc_noprof(bytes, flags);
}
#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
/**
* krealloc_array - reallocate memory for an array.
* @p: pointer to the memory chunk to reallocate
* @new_n: new number of elements to alloc
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*
* If __GFP_ZERO logic is requested, callers must ensure that, starting with the
* initial memory allocation, every subsequent call to this API for the same
* memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
* __GFP_ZERO is not fully honored by this API.
*
* See krealloc_noprof() for further details.
*
* In any case, the contents of the object pointed to are preserved up to the
* lesser of the new and old sizes.
*/
static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
size_t new_n,
size_t new_size,
gfp_t flags)
{
size_t bytes;
if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
return NULL;
return krealloc_noprof(p, bytes, flags);
}
#define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__))
/**
* kcalloc - allocate memory for an array. The memory is set to zero.
* @n: number of elements.
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
unsigned long caller) __alloc_size(1);
#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
#define kmalloc_node_track_caller(...) \
alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
* of just the calling function (confusing, eh?).
* It's useful when the call to kmalloc comes from a widely-used standard
* allocator where we care about the real place the memory allocation
* request comes from.
*/
#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
#define kmalloc_track_caller_noprof(...) \
kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
int node)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size))
return kmalloc_node_noprof(bytes, flags, node);
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
}
#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
#define kcalloc_node(_n, _size, _flags, _node) \
kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
/*
* Shortcuts
*/
#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
/**
* kzalloc - allocate memory. The memory is set to zero.
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
{
return kmalloc_noprof(size, flags | __GFP_ZERO);
}
#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
gfp_t flags, int node) __alloc_size(1);
#define kvmalloc_node_align_noprof(_size, _align, _flags, _node) \
__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
#define kvmalloc_node_align(...) \
alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
#define kvmalloc_node(_s, _f, _n) kvmalloc_node_align(_s, 1, _f, _n)
#define kvmalloc(...) kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO)
#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
#define kmem_buckets_valloc(_b, _size, _flags) \
alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
static inline __alloc_size(1, 2) void *
kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
return kvmalloc_node_align_noprof(bytes, 1, flags, node);
}
#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
#define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node)
#define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
gfp_t flags, int nid) __realloc_size(2);
#define kvrealloc_node_align(...) \
alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
#define kvrealloc_node(_p, _s, _f, _n) kvrealloc_node_align(_p, _s, 1, _f, _n)
#define kvrealloc(...) kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)
extern void kvfree(const void *addr);
DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s);
#ifndef CONFIG_KVFREE_RCU_BATCHED
static inline void kvfree_rcu_barrier(void)
{
rcu_barrier();
}
static inline void kfree_rcu_scheduler_running(void) { }
#else
void kvfree_rcu_barrier(void);
void kfree_rcu_scheduler_running(void);
#endif
/**
* kmalloc_size_roundup - Report allocation bucket size for the given size
*
* @size: Number of bytes to round up from.
*
* This returns the number of bytes that would be available in a kmalloc()
* allocation of @size bytes. For example, a 126 byte request would be
* rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
* for the general-purpose kmalloc()-based allocations, and is not for the
* pre-sized kmem_cache_alloc()-based allocations.)
*
* Use this to kmalloc() the full bucket size ahead of time instead of using
* ksize() to query the size after an allocation.
*/
size_t kmalloc_size_roundup(size_t size);
void __init kmem_cache_init_late(void);
void __init kvfree_rcu_init(void);
#endif /* _LINUX_SLAB_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
#include <linux/gfp_types.h>
#include <linux/mmzone.h>
#include <linux/topology.h>
#include <linux/alloc_tag.h>
#include <linux/cleanup.h>
#include <linux/sched.h>
struct vm_area_struct;
struct mempolicy;
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
static inline int gfp_migratetype(const gfp_t gfp_flags)
{
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags)
{
/*
* !__GFP_DIRECT_RECLAIM -> direct claim is not allowed.
* !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd.
* All GFP_* flags including GFP_NOWAIT use one or both flags.
* alloc_pages_nolock() is the only API that doesn't specify either flag.
*
* This is stronger than GFP_NOWAIT or GFP_ATOMIC because
* those are guaranteed to never block on a sleeping lock.
* Here we are enforcing that the allocation doesn't ever spin
* on any locks (i.e. only trylocks). There is no high level
* GFP_$FOO flag for this use in alloc_pages_nolock() as the
* regular page allocator doesn't fully support this
* allocation mode.
*/
return !!(gfp_flags & __GFP_RECLAIM);
}
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
#define OPT_ZONE_HIGHMEM ZONE_NORMAL
#endif
#ifdef CONFIG_ZONE_DMA
#define OPT_ZONE_DMA ZONE_DMA
#else
#define OPT_ZONE_DMA ZONE_NORMAL
#endif
#ifdef CONFIG_ZONE_DMA32
#define OPT_ZONE_DMA32 ZONE_DMA32
#else
#define OPT_ZONE_DMA32 ZONE_NORMAL
#endif
/*
* GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
* zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT
* bits long and there are 16 of them to cover all possible combinations of
* __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
*
* The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
* But GFP_MOVABLE is not only a zone specifier but also an allocation
* policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
* Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
*
* bit result
* =================
* 0x0 => NORMAL
* 0x1 => DMA or NORMAL
* 0x2 => HIGHMEM or NORMAL
* 0x3 => BAD (DMA+HIGHMEM)
* 0x4 => DMA32 or NORMAL
* 0x5 => BAD (DMA+DMA32)
* 0x6 => BAD (HIGHMEM+DMA32)
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
* 0x8 => NORMAL (MOVABLE+0)
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
* 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
* 0xd => BAD (MOVABLE+DMA32+DMA)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*
* GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
*/
#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
/* ZONE_DEVICE is not a valid GFP zone specifier */
#define GFP_ZONES_SHIFT 2
#else
#define GFP_ZONES_SHIFT ZONES_SHIFT
#endif
#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
#endif
#define GFP_ZONE_TABLE ( \
(ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
| (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
| (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
| (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
| (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
| (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
| (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
| (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
)
/*
* GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
* __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
* entry starting with bit 0. Bit is set if the combination is not
* allowed.
*/
#define GFP_ZONE_BAD ( \
1 << (___GFP_DMA | ___GFP_HIGHMEM) \
| 1 << (___GFP_DMA | ___GFP_DMA32) \
| 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
| 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
| 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
)
static inline enum zone_type gfp_zone(gfp_t flags)
{
enum zone_type z;
int bit = (__force int) (flags & GFP_ZONEMASK);
z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
((1 << GFP_ZONES_SHIFT) - 1);
VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
return z;
}
/*
* There is only one page-allocator function, and two main namespaces to
* it. The alloc_page*() variants return 'struct page *' and as such
* can allocate highmem pages, the *get*page*() variants return
* virtual kernel addresses to the allocated page(s).
*/
static inline int gfp_zonelist(gfp_t flags)
{
#ifdef CONFIG_NUMA
if (unlikely(flags & __GFP_THISNODE))
return ZONELIST_NOFALLBACK;
#endif
return ZONELIST_FALLBACK;
}
/*
* gfp flag masking for nested internal allocations.
*
* For code that needs to do allocations inside the public allocation API (e.g.
* memory allocation tracking code) the allocations need to obey the caller
* allocation context constrains to prevent allocation context mismatches (e.g.
* GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
* situations.
*
* It is also assumed that these nested allocations are for internal kernel
* object storage purposes only and are not going to be used for DMA, etc. Hence
* we strip out all the zone information and leave just the context information
* intact.
*
* Further, internal allocations must fail before the higher level allocation
* can fail, so we must make them fail faster and fail silently. We also don't
* want them to deplete emergency reserves. Hence nested allocations must be
* prepared for these allocations to fail.
*/
static inline gfp_t gfp_nested_mask(gfp_t flags)
{
return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
(__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
}
/*
* We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
* For the case of non-NUMA systems the NODE_DATA() gets optimized to
* &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
}
#ifndef HAVE_ARCH_FREE_PAGE
static inline void arch_free_page(struct page *page, int order) { }
#endif
#ifndef HAVE_ARCH_ALLOC_PAGE
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
struct page **page_array);
#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
unsigned long nr_pages,
struct page **page_array);
#define alloc_pages_bulk_mempolicy(...) \
alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
/* Bulk allocate order-0 pages */
#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \
__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
static inline unsigned long
alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
struct page **page_array)
{
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
}
#define alloc_pages_bulk_node(...) \
alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
return;
if (node_online(this_node))
return;
pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
dump_stack();
}
/*
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
*/
static inline struct page *
__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
warn_if_node_offline(nid, gfp_mask);
return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
}
#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
static inline
struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
warn_if_node_offline(nid, gfp);
return __folio_alloc_noprof(gfp, order, nid, NULL);
}
#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
/*
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
* prefer the current CPU's closest node. Otherwise node must be valid and
* online.
*/
static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
unsigned int order)
{
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
return __alloc_pages_node_noprof(nid, gfp_mask, order);
}
#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
#ifdef CONFIG_NUMA
struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid);
struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr);
#else
static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
}
static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node_noprof(gfp, order, numa_node_id());
}
static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid)
{
return folio_alloc_noprof(gfp, order);
}
#define vma_alloc_folio_noprof(gfp, order, vma, addr) \
folio_alloc_noprof(gfp, order)
#endif
#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
struct vm_area_struct *vma, unsigned long addr)
{
struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
return &folio->page;
}
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__))
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
void free_pages_exact(void *virt, size_t size);
__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
#define alloc_pages_exact_nid(...) \
alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
#define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA, (order))
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages_nolock(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
void page_alloc_init_cpuhp(void);
int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
void page_alloc_init_late(void);
void setup_pcp_cacheinfo(unsigned int cpu);
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
* GFP flags are used before interrupts are enabled. Once interrupts are
* enabled, it is set to __GFP_BITS_MASK while the system is running. During
* hibernation, it is used by PM to avoid I/O during memory allocation while
* devices are suspended.
*/
extern gfp_t gfp_allowed_mask;
/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
static inline bool gfp_has_io_fs(gfp_t gfp)
{
return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
}
/*
* Check if the gfp flags allow compaction - GFP_NOIO is a really
* tricky context because the migration might require IO.
*/
static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
{
return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
}
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
typedef unsigned int __bitwise acr_flags_t;
#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request
#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
acr_flags_t alloc_flags, gfp_t gfp_mask);
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
#ifdef CONFIG_CONTIG_ALLOC
static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
int nid, nodemask_t *node)
{
struct page *page;
if (WARN_ON(!order || !(gfp & __GFP_COMP)))
return NULL;
page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
return page ? page_folio(page) : NULL;
}
#else
static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
int nid, nodemask_t *node)
{
return NULL;
}
#endif
/* This should be paired with folio_put() rather than free_contig_range(). */
#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
#endif /* __LINUX_GFP_H */
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/filesystems.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* table of configured filesystems
*/
#include <linux/syscalls.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/fs_parser.h>
/*
* Handling of filesystem drivers list.
* Rules:
* Inclusion to/removals from/scanning of list are protected by spinlock.
* During the unload module must call unregister_filesystem().
* We can access the fields of list element if:
* 1) spinlock is held or
* 2) we hold the reference to the module.
* The latter can be guaranteed by call of try_module_get(); if it
* returned 0 we must skip the element, otherwise we got the reference.
* Once the reference is obtained we can drop the spinlock.
*/
static struct file_system_type *file_systems;
static DEFINE_RWLOCK(file_systems_lock);
/* WARNING: This can be used only if we _already_ own a reference */
struct file_system_type *get_filesystem(struct file_system_type *fs)
{
__module_get(fs->owner);
return fs;
}
void put_filesystem(struct file_system_type *fs)
{
module_put(fs->owner);
}
static struct file_system_type **find_filesystem(const char *name, unsigned len)
{
struct file_system_type **p;
for (p = &file_systems; *p; p = &(*p)->next)
if (strncmp((*p)->name, name, len) == 0 &&
!(*p)->name[len])
break;
return p;
}
/**
* register_filesystem - register a new filesystem
* @fs: the file system structure
*
* Adds the file system passed to the list of file systems the kernel
* is aware of for mount and other syscalls. Returns 0 on success,
* or a negative errno code on an error.
*
* The &struct file_system_type that is passed is linked into the kernel
* structures and must not be freed until the file system has been
* unregistered.
*/
int register_filesystem(struct file_system_type * fs)
{
int res = 0;
struct file_system_type ** p;
if (fs->parameters &&
!fs_validate_description(fs->name, fs->parameters))
return -EINVAL;
BUG_ON(strchr(fs->name, '.'));
if (fs->next)
return -EBUSY;
write_lock(&file_systems_lock);
p = find_filesystem(fs->name, strlen(fs->name));
if (*p)
res = -EBUSY;
else
*p = fs;
write_unlock(&file_systems_lock);
return res;
}
EXPORT_SYMBOL(register_filesystem);
/**
* unregister_filesystem - unregister a file system
* @fs: filesystem to unregister
*
* Remove a file system that was previously successfully registered
* with the kernel. An error is returned if the file system is not found.
* Zero is returned on a success.
*
* Once this function has returned the &struct file_system_type structure
* may be freed or reused.
*/
int unregister_filesystem(struct file_system_type * fs)
{
struct file_system_type ** tmp;
write_lock(&file_systems_lock);
tmp = &file_systems;
while (*tmp) {
if (fs == *tmp) {
*tmp = fs->next;
fs->next = NULL;
write_unlock(&file_systems_lock);
synchronize_rcu();
return 0;
}
tmp = &(*tmp)->next;
}
write_unlock(&file_systems_lock);
return -EINVAL;
}
EXPORT_SYMBOL(unregister_filesystem);
#ifdef CONFIG_SYSFS_SYSCALL
static int fs_index(const char __user * __name)
{
struct file_system_type * tmp;
struct filename *name;
int err, index;
name = getname(__name);
err = PTR_ERR(name);
if (IS_ERR(name))
return err;
err = -EINVAL;
read_lock(&file_systems_lock);
for (tmp=file_systems, index=0 ; tmp ; tmp=tmp->next, index++) {
if (strcmp(tmp->name, name->name) == 0) {
err = index;
break;
}
}
read_unlock(&file_systems_lock);
putname(name);
return err;
}
static int fs_name(unsigned int index, char __user * buf)
{
struct file_system_type * tmp;
int len, res = -EINVAL;
read_lock(&file_systems_lock);
for (tmp = file_systems; tmp; tmp = tmp->next, index--) {
if (index == 0) {
if (try_module_get(tmp->owner))
res = 0;
break;
}
}
read_unlock(&file_systems_lock);
if (res)
return res;
/* OK, we got the reference, so we can safely block */
len = strlen(tmp->name) + 1;
res = copy_to_user(buf, tmp->name, len) ? -EFAULT : 0;
put_filesystem(tmp);
return res;
}
static int fs_maxindex(void)
{
struct file_system_type * tmp;
int index;
read_lock(&file_systems_lock);
for (tmp = file_systems, index = 0 ; tmp ; tmp = tmp->next, index++)
;
read_unlock(&file_systems_lock);
return index;
}
/*
* Whee.. Weird sysv syscall.
*/
SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2)
{
int retval = -EINVAL;
switch (option) {
case 1:
retval = fs_index((const char __user *) arg1);
break;
case 2:
retval = fs_name(arg1, (char __user *) arg2);
break;
case 3:
retval = fs_maxindex();
break;
}
return retval;
}
#endif
int __init list_bdev_fs_names(char *buf, size_t size)
{
struct file_system_type *p;
size_t len;
int count = 0;
read_lock(&file_systems_lock);
for (p = file_systems; p; p = p->next) {
if (!(p->fs_flags & FS_REQUIRES_DEV))
continue;
len = strlen(p->name) + 1;
if (len > size) {
pr_warn("%s: truncating file system list\n", __func__);
break;
}
memcpy(buf, p->name, len);
buf += len;
size -= len;
count++;
}
read_unlock(&file_systems_lock);
return count;
}
#ifdef CONFIG_PROC_FS
static int filesystems_proc_show(struct seq_file *m, void *v)
{
struct file_system_type * tmp;
read_lock(&file_systems_lock);
tmp = file_systems;
while (tmp) {
seq_printf(m, "%s\t%s\n",
(tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev",
tmp->name);
tmp = tmp->next;
}
read_unlock(&file_systems_lock);
return 0;
}
static int __init proc_filesystems_init(void)
{
proc_create_single("filesystems", 0, NULL, filesystems_proc_show);
return 0;
}
module_init(proc_filesystems_init);
#endif
static struct file_system_type *__get_fs_type(const char *name, int len)
{
struct file_system_type *fs;
read_lock(&file_systems_lock);
fs = *(find_filesystem(name, len));
if (fs && !try_module_get(fs->owner))
fs = NULL;
read_unlock(&file_systems_lock);
return fs;
}
struct file_system_type *get_fs_type(const char *name)
{
struct file_system_type *fs;
const char *dot = strchr(name, '.');
int len = dot ? dot - name : strlen(name);
fs = __get_fs_type(name, len);
if (!fs && (request_module("fs-%.*s", len, name) == 0)) {
fs = __get_fs_type(name, len);
if (!fs)
pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n",
len, name);
}
if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
put_filesystem(fs);
fs = NULL;
}
return fs;
}
EXPORT_SYMBOL(get_fs_type);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGEMAP_H
#define _LINUX_PAGEMAP_H
/*
* Copyright 1995 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
struct folio_batch;
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
static inline void invalidate_remote_inode(struct inode *inode)
{
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode))
invalidate_mapping_pages(inode->i_mapping, 0, -1);
}
int invalidate_inode_pages2(struct address_space *mapping);
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
int filemap_invalidate_pages(struct address_space *mapping,
loff_t pos, loff_t end, bool nowait);
int write_inode_now(struct inode *, int sync);
int filemap_fdatawrite(struct address_space *);
int filemap_flush(struct address_space *);
int filemap_fdatawait_keep_errors(struct address_space *mapping);
int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
loff_t start_byte, loff_t end_byte);
int filemap_invalidate_inode(struct inode *inode, bool flush,
loff_t start, loff_t end);
static inline int filemap_fdatawait(struct address_space *mapping)
{
return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
}
bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
int __filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end, int sync_mode);
int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
int filemap_check_errors(struct address_space *mapping);
void __filemap_set_wb_err(struct address_space *mapping, int err);
int filemap_fdatawrite_wbc(struct address_space *mapping,
struct writeback_control *wbc);
int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
static inline int filemap_write_and_wait(struct address_space *mapping)
{
return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
}
/**
* filemap_set_wb_err - set a writeback error on an address_space
* @mapping: mapping in which to set writeback error
* @err: error to be set in mapping
*
* When writeback fails in some way, we must record that error so that
* userspace can be informed when fsync and the like are called. We endeavor
* to report errors on any file that was open at the time of the error. Some
* internal callers also need to know when writeback errors have occurred.
*
* When a writeback error occurs, most filesystems will want to call
* filemap_set_wb_err to record the error in the mapping so that it will be
* automatically reported whenever fsync is called on the file.
*/
static inline void filemap_set_wb_err(struct address_space *mapping, int err)
{
/* Fastpath for common case of no error */
if (unlikely(err))
__filemap_set_wb_err(mapping, err);
}
/**
* filemap_check_wb_err - has an error occurred since the mark was sampled?
* @mapping: mapping to check for writeback errors
* @since: previously-sampled errseq_t
*
* Grab the errseq_t value from the mapping, and see if it has changed "since"
* the given value was sampled.
*
* If it has then report the latest error set, otherwise return 0.
*/
static inline int filemap_check_wb_err(struct address_space *mapping,
errseq_t since)
{
return errseq_check(&mapping->wb_err, since);
}
/**
* filemap_sample_wb_err - sample the current errseq_t to test for later errors
* @mapping: mapping to be sampled
*
* Writeback errors are always reported relative to a particular sample point
* in the past. This function provides those sample points.
*/
static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
{
return errseq_sample(&mapping->wb_err);
}
/**
* file_sample_sb_err - sample the current errseq_t to test for later errors
* @file: file pointer to be sampled
*
* Grab the most current superblock-level errseq_t value for the given
* struct file.
*/
static inline errseq_t file_sample_sb_err(struct file *file)
{
return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
}
/*
* Flush file data before changing attributes. Caller must hold any locks
* required to prevent further writes to this file until we're done setting
* flags.
*/
static inline int inode_drain_writes(struct inode *inode)
{
inode_dio_wait(inode);
return filemap_write_and_wait(inode->i_mapping);
}
static inline bool mapping_empty(const struct address_space *mapping)
{
return xa_empty(&mapping->i_pages);
}
/*
* mapping_shrinkable - test if page cache state allows inode reclaim
* @mapping: the page cache mapping
*
* This checks the mapping's cache state for the pupose of inode
* reclaim and LRU management.
*
* The caller is expected to hold the i_lock, but is not required to
* hold the i_pages lock, which usually protects cache state. That's
* because the i_lock and the list_lru lock that protect the inode and
* its LRU state don't nest inside the irq-safe i_pages lock.
*
* Cache deletions are performed under the i_lock, which ensures that
* when an inode goes empty, it will reliably get queued on the LRU.
*
* Cache additions do not acquire the i_lock and may race with this
* check, in which case we'll report the inode as shrinkable when it
* has cache pages. This is okay: the shrinker also checks the
* refcount and the referenced bit, which will be elevated or set in
* the process of adding new cache pages to an inode.
*/
static inline bool mapping_shrinkable(const struct address_space *mapping)
{
void *head;
/*
* On highmem systems, there could be lowmem pressure from the
* inodes before there is highmem pressure from the page
* cache. Make inodes shrinkable regardless of cache state.
*/
if (IS_ENABLED(CONFIG_HIGHMEM))
return true;
/* Cache completely empty? Shrink away. */
head = rcu_access_pointer(mapping->i_pages.xa_head);
if (!head)
return true;
/*
* The xarray stores single offset-0 entries directly in the
* head pointer, which allows non-resident page cache entries
* to escape the shadow shrinker's list of xarray nodes. The
* inode shrinker needs to pick them up under memory pressure.
*/
if (!xa_is_node(head) && xa_is_value(head))
return true;
return false;
}
/*
* Bits in mapping->flags.
*/
enum mapping_flags {
AS_EIO = 0, /* IO error on async write */
AS_ENOSPC = 1, /* ENOSPC on async write */
AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
folio contents */
AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
account usage to user cgroups */
/* Bits 16-25 are used for FOLIO_ORDER */
AS_FOLIO_ORDER_BITS = 5,
AS_FOLIO_ORDER_MIN = 16,
AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
};
#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
* @error: the error to set in the mapping
*
* When writeback fails in some way, we must record that error so that
* userspace can be informed when fsync and the like are called. We endeavor
* to report errors on any file that was open at the time of the error. Some
* internal callers also need to know when writeback errors have occurred.
*
* When a writeback error occurs, most filesystems will want to call
* mapping_set_error to record the error in the mapping so that it can be
* reported when the application calls fsync(2).
*/
static inline void mapping_set_error(struct address_space *mapping, int error)
{
if (likely(!error))
return;
/* Record in wb_err for checkers using errseq_t based tracking */
__filemap_set_wb_err(mapping, error);
/* Record it in superblock */
if (mapping->host)
errseq_set(&mapping->host->i_sb->s_wb_err, error);
/* Record it in flags for now, for legacy callers */
if (error == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else
set_bit(AS_EIO, &mapping->flags);
}
static inline void mapping_set_unevictable(struct address_space *mapping)
{
set_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline void mapping_clear_unevictable(struct address_space *mapping)
{
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline bool mapping_unevictable(const struct address_space *mapping)
{
return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline void mapping_set_exiting(struct address_space *mapping)
{
set_bit(AS_EXITING, &mapping->flags);
}
static inline int mapping_exiting(const struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
{
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
static inline int mapping_use_writeback_tags(const struct address_space *mapping)
{
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
static inline bool mapping_release_always(const struct address_space *mapping)
{
return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
}
static inline void mapping_set_release_always(struct address_space *mapping)
{
set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
}
static inline void mapping_clear_release_always(struct address_space *mapping)
{
clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
}
static inline bool mapping_stable_writes(const struct address_space *mapping)
{
return test_bit(AS_STABLE_WRITES, &mapping->flags);
}
static inline void mapping_set_stable_writes(struct address_space *mapping)
{
set_bit(AS_STABLE_WRITES, &mapping->flags);
}
static inline void mapping_clear_stable_writes(struct address_space *mapping)
{
clear_bit(AS_STABLE_WRITES, &mapping->flags);
}
static inline void mapping_set_inaccessible(struct address_space *mapping)
{
/*
* It's expected inaccessible mappings are also unevictable. Compaction
* migrate scanner (isolate_migratepages_block()) relies on this to
* reduce page locking.
*/
set_bit(AS_UNEVICTABLE, &mapping->flags);
set_bit(AS_INACCESSIBLE, &mapping->flags);
}
static inline bool mapping_inaccessible(const struct address_space *mapping)
{
return test_bit(AS_INACCESSIBLE, &mapping->flags);
}
static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
{
set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
{
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
}
/*
* This is non-atomic. Only to be used before the mapping is activated.
* Probably needs a barrier...
*/
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
{
m->gfp_mask = mask;
}
/*
* There are some parts of the kernel which assume that PMD entries
* are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
* limit the maximum allocation order to PMD size. I'm not aware of any
* assumptions about maximum order if THP are disabled, but 8 seems like
* a good order (that's 1MB if you're using 4kB pages)
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
#else
#define PREFERRED_MAX_PAGECACHE_ORDER 8
#endif
/*
* xas_split_alloc() does not support arbitrary orders. This implies no
* 512MB THP on ARM64 with 64KB base page size.
*/
#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
/*
* mapping_max_folio_size_supported() - Check the max folio size supported
*
* The filesystem should call this function at mount time if there is a
* requirement on the folio mapping size in the page cache.
*/
static inline size_t mapping_max_folio_size_supported(void)
{
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
return PAGE_SIZE;
}
/*
* mapping_set_folio_order_range() - Set the orders supported by a file.
* @mapping: The address space of the file.
* @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
* @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
*
* The filesystem should call this function in its inode constructor to
* indicate which base size (min) and maximum size (max) of folio the VFS
* can use to cache the contents of the file. This should only be used
* if the filesystem needs special handling of folio sizes (ie there is
* something the core cannot know).
* Do not tune it based on, eg, i_size.
*
* Context: This should not be called while the inode is active as it
* is non-atomic.
*/
static inline void mapping_set_folio_order_range(struct address_space *mapping,
unsigned int min,
unsigned int max)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return;
if (min > MAX_PAGECACHE_ORDER)
min = MAX_PAGECACHE_ORDER;
if (max > MAX_PAGECACHE_ORDER)
max = MAX_PAGECACHE_ORDER;
if (max < min)
max = min;
mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
(min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
}
static inline void mapping_set_folio_min_order(struct address_space *mapping,
unsigned int min)
{
mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
}
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
* @mapping: The address space of the file.
*
* The filesystem should call this function in its inode constructor to
* indicate that the VFS can use large folios to cache the contents of
* the file.
*
* Context: This should not be called while the inode is active as it
* is non-atomic.
*/
static inline void mapping_set_large_folios(struct address_space *mapping)
{
mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
}
static inline unsigned int
mapping_max_folio_order(const struct address_space *mapping)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return 0;
return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
}
static inline unsigned int
mapping_min_folio_order(const struct address_space *mapping)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return 0;
return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
}
static inline unsigned long
mapping_min_folio_nrpages(const struct address_space *mapping)
{
return 1UL << mapping_min_folio_order(mapping);
}
static inline unsigned long
mapping_min_folio_nrbytes(const struct address_space *mapping)
{
return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
}
/**
* mapping_align_index() - Align index for this mapping.
* @mapping: The address_space.
* @index: The page index.
*
* The index of a folio must be naturally aligned. If you are adding a
* new folio to the page cache and need to know what index to give it,
* call this function.
*/
static inline pgoff_t mapping_align_index(const struct address_space *mapping,
pgoff_t index)
{
return round_down(index, mapping_min_folio_nrpages(mapping));
}
/*
* Large folio support currently depends on THP. These dependencies are
* being worked on but are not yet fixed.
*/
static inline bool mapping_large_folio_support(const struct address_space *mapping)
{
/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
"Anonymous mapping always supports large folio");
return mapping_max_folio_order(mapping) > 0;
}
/* Return the maximum folio size for this pagecache mapping, in bytes. */
static inline size_t mapping_max_folio_size(const struct address_space *mapping)
{
return PAGE_SIZE << mapping_max_folio_order(mapping);
}
static inline int filemap_nr_thps(const struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
return atomic_read(&mapping->nr_thps);
#else
return 0;
#endif
}
static inline void filemap_nr_thps_inc(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
if (!mapping_large_folio_support(mapping))
atomic_inc(&mapping->nr_thps);
#else
WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
#endif
}
static inline void filemap_nr_thps_dec(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
if (!mapping_large_folio_support(mapping))
atomic_dec(&mapping->nr_thps);
#else
WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
#endif
}
struct address_space *folio_mapping(const struct folio *folio);
/**
* folio_flush_mapping - Find the file mapping this folio belongs to.
* @folio: The folio.
*
* For folios which are in the page cache, return the mapping that this
* page belongs to. Anonymous folios return NULL, even if they're in
* the swap cache. Other kinds of folio also return NULL.
*
* This is ONLY used by architecture cache flushing code. If you aren't
* writing cache flushing code, you want either folio_mapping() or
* folio_file_mapping().
*/
static inline struct address_space *folio_flush_mapping(struct folio *folio)
{
if (unlikely(folio_test_swapcache(folio)))
return NULL;
return folio_mapping(folio);
}
/**
* folio_inode - Get the host inode for this folio.
* @folio: The folio.
*
* For folios which are in the page cache, return the inode that this folio
* belongs to.
*
* Do not call this for folios which aren't in the page cache.
*/
static inline struct inode *folio_inode(struct folio *folio)
{
return folio->mapping->host;
}
/**
* folio_attach_private - Attach private data to a folio.
* @folio: Folio to attach data to.
* @data: Data to attach to folio.
*
* Attaching private data to a folio increments the page's reference count.
* The data must be detached before the folio will be freed.
*/
static inline void folio_attach_private(struct folio *folio, void *data)
{
folio_get(folio);
folio->private = data;
folio_set_private(folio);
}
/**
* folio_change_private - Change private data on a folio.
* @folio: Folio to change the data on.
* @data: Data to set on the folio.
*
* Change the private data attached to a folio and return the old
* data. The page must previously have had data attached and the data
* must be detached before the folio will be freed.
*
* Return: Data that was previously attached to the folio.
*/
static inline void *folio_change_private(struct folio *folio, void *data)
{
void *old = folio_get_private(folio);
folio->private = data;
return old;
}
/**
* folio_detach_private - Detach private data from a folio.
* @folio: Folio to detach data from.
*
* Removes the data that was previously attached to the folio and decrements
* the refcount on the page.
*
* Return: Data that was attached to the folio.
*/
static inline void *folio_detach_private(struct folio *folio)
{
void *data = folio_get_private(folio);
if (!folio_test_private(folio))
return NULL;
folio_clear_private(folio);
folio->private = NULL;
folio_put(folio);
return data;
}
static inline void attach_page_private(struct page *page, void *data)
{
folio_attach_private(page_folio(page), data);
}
static inline void *detach_page_private(struct page *page)
{
return folio_detach_private(page_folio(page));
}
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
#else
static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
{
return folio_alloc_noprof(gfp, order);
}
#endif
#define filemap_alloc_folio(...) \
alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
return &filemap_alloc_folio(gfp, 0)->page;
}
static inline gfp_t readahead_gfp_mask(struct address_space *x)
{
return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
}
typedef int filler_t(struct file *, struct folio *);
pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
/**
* typedef fgf_t - Flags for getting folios from the page cache.
*
* Most users of the page cache will not need to use these flags;
* there are convenience functions such as filemap_get_folio() and
* filemap_lock_folio(). For users which need more control over exactly
* what is done with the folios, these flags to __filemap_get_folio()
* are available.
*
* * %FGP_ACCESSED - The folio will be marked accessed.
* * %FGP_LOCK - The folio is returned locked.
* * %FGP_CREAT - If no folio is present then a new folio is allocated,
* added to the page cache and the VM's LRU list. The folio is
* returned locked.
* * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
* folio is already in cache. If the folio was allocated, unlock it
* before returning so the caller can do the same dance.
* * %FGP_WRITE - The folio will be written to by the caller.
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't block on the folio lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
* * %FGP_DONTCACHE - Uncached buffered IO
* * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
* implementation.
*/
typedef unsigned int __bitwise fgf_t;
#define FGP_ACCESSED ((__force fgf_t)0x00000001)
#define FGP_LOCK ((__force fgf_t)0x00000002)
#define FGP_CREAT ((__force fgf_t)0x00000004)
#define FGP_WRITE ((__force fgf_t)0x00000008)
#define FGP_NOFS ((__force fgf_t)0x00000010)
#define FGP_NOWAIT ((__force fgf_t)0x00000020)
#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
#define FGP_STABLE ((__force fgf_t)0x00000080)
#define FGP_DONTCACHE ((__force fgf_t)0x00000100)
#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
static inline unsigned int filemap_get_order(size_t size)
{
unsigned int shift = ilog2(size);
if (shift <= PAGE_SHIFT)
return 0;
return shift - PAGE_SHIFT;
}
/**
* fgf_set_order - Encode a length in the fgf_t flags.
* @size: The suggested size of the folio to create.
*
* The caller of __filemap_get_folio() can use this to suggest a preferred
* size for the folio that is created. If there is already a folio at
* the index, it will be returned, no matter what its size. If a folio
* is freshly created, it may be of a different size than requested
* due to alignment constraints, memory pressure, or the presence of
* other folios at nearby indices.
*/
static inline fgf_t fgf_set_order(size_t size)
{
unsigned int order = filemap_get_order(size);
if (!order)
return 0;
return (__force fgf_t)(order << 26);
}
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp);
/**
* write_begin_get_folio - Get folio for write_begin with flags.
* @iocb: The kiocb passed from write_begin (may be NULL).
* @mapping: The address space to search.
* @index: The page cache index.
* @len: Length of data being written.
*
* This is a helper for filesystem write_begin() implementations.
* It wraps __filemap_get_folio(), setting appropriate flags in
* the write begin context.
*
* Return: A folio or an ERR_PTR.
*/
static inline struct folio *write_begin_get_folio(const struct kiocb *iocb,
struct address_space *mapping, pgoff_t index, size_t len)
{
fgf_t fgp_flags = FGP_WRITEBEGIN;
fgp_flags |= fgf_set_order(len);
if (iocb && iocb->ki_flags & IOCB_DONTCACHE)
fgp_flags |= FGP_DONTCACHE;
return __filemap_get_folio(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
}
/**
* filemap_get_folio - Find and get a folio.
* @mapping: The address_space to search.
* @index: The page index.
*
* Looks up the page cache entry at @mapping & @index. If a folio is
* present, it is returned with an increased refcount.
*
* Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
* this index. Will not return a shadow, swap or DAX entry.
*/
static inline struct folio *filemap_get_folio(struct address_space *mapping,
pgoff_t index)
{
return __filemap_get_folio(mapping, index, 0, 0);
}
/**
* filemap_lock_folio - Find and lock a folio.
* @mapping: The address_space to search.
* @index: The page index.
*
* Looks up the page cache entry at @mapping & @index. If a folio is
* present, it is returned locked with an increased refcount.
*
* Context: May sleep.
* Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
* this index. Will not return a shadow, swap or DAX entry.
*/
static inline struct folio *filemap_lock_folio(struct address_space *mapping,
pgoff_t index)
{
return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
}
/**
* filemap_grab_folio - grab a folio from the page cache
* @mapping: The address space to search
* @index: The page index
*
* Looks up the page cache entry at @mapping & @index. If no folio is found,
* a new folio is created. The folio is locked, marked as accessed, and
* returned.
*
* Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
* and failed to create a folio.
*/
static inline struct folio *filemap_grab_folio(struct address_space *mapping,
pgoff_t index)
{
return __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
mapping_gfp_mask(mapping));
}
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned with an increased refcount.
*
* Otherwise, %NULL is returned.
*/
static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset)
{
return pagecache_get_page(mapping, offset, 0, 0);
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, fgf_t fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
* @index: the page index
*
* Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
* Context: May sleep.
* Return: A struct page or %NULL if there is no page in the cache for this
* index.
*/
static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t index)
{
return pagecache_get_page(mapping, index, FGP_LOCK, 0);
}
/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
* If the page is not present, a new page is allocated using @gfp_mask
* and added to the page cache and the VM's LRU list. The page is
* returned locked and with an increased refcount.
*
* On memory exhaustion, %NULL is returned.
*
* find_or_create_page() may sleep, even if @gfp_flags specifies an
* atomic allocation!
*/
static inline struct page *find_or_create_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask)
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
gfp_mask);
}
/**
* grab_cache_page_nowait - returns locked page at given index in given cache
* @mapping: target address_space
* @index: the page index
*
* Returns locked page at given index in given cache, creating it if
* needed, but do not wait if the page is locked or to reclaim memory.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
*
* Clear __GFP_FS when allocating the page to avoid recursion into the fs
* and deadlock against the caller's locked page.
*/
static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
pgoff_t index)
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
mapping_gfp_mask(mapping));
}
/**
* folio_next_index - Get the index of the next folio.
* @folio: The current folio.
*
* Return: The index of the folio which follows this folio in the file.
*/
static inline pgoff_t folio_next_index(const struct folio *folio)
{
return folio->index + folio_nr_pages(folio);
}
/**
* folio_file_page - The page for a particular index.
* @folio: The folio which contains this index.
* @index: The index we want to look up.
*
* Sometimes after looking up a folio in the page cache, we need to
* obtain the specific page for an index (eg a page fault).
*
* Return: The page containing the file data for this index.
*/
static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
{
return folio_page(folio, index & (folio_nr_pages(folio) - 1));
}
/**
* folio_contains - Does this folio contain this index?
* @folio: The folio.
* @index: The page index within the file.
*
* Context: The caller should have the folio locked and ensure
* e.g., shmem did not move this folio to the swap cache.
* Return: true or false.
*/
static inline bool folio_contains(const struct folio *folio, pgoff_t index)
{
VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
return index - folio->index < folio_nr_pages(folio);
}
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch);
unsigned filemap_get_folios_contig(struct address_space *mapping,
pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
struct folio *read_cache_folio(struct address_space *, pgoff_t index,
filler_t *filler, struct file *file);
struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
gfp_t flags);
struct page *read_cache_page(struct address_space *, pgoff_t index,
filler_t *filler, struct file *file);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, struct file *file)
{
return read_cache_page(mapping, index, NULL, file);
}
static inline struct folio *read_mapping_folio(struct address_space *mapping,
pgoff_t index, struct file *file)
{
return read_cache_folio(mapping, index, NULL, file);
}
/**
* page_pgoff - Calculate the logical page offset of this page.
* @folio: The folio containing this page.
* @page: The page which we need the offset of.
*
* For file pages, this is the offset from the beginning of the file
* in units of PAGE_SIZE. For anonymous pages, this is the offset from
* the beginning of the anon_vma in units of PAGE_SIZE. This will
* return nonsense for KSM pages.
*
* Context: Caller must have a reference on the folio or otherwise
* prevent it from being split or freed.
*
* Return: The offset in units of PAGE_SIZE.
*/
static inline pgoff_t page_pgoff(const struct folio *folio,
const struct page *page)
{
return folio->index + folio_page_idx(folio, page);
}
/**
* folio_pos - Returns the byte position of this folio in its file.
* @folio: The folio.
*/
static inline loff_t folio_pos(const struct folio *folio)
{
return ((loff_t)folio->index) * PAGE_SIZE;
}
/*
* Return byte-offset into filesystem object for page.
*/
static inline loff_t page_offset(struct page *page)
{
struct folio *folio = page_folio(page);
return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE;
}
/*
* Get the offset in PAGE_SIZE (even for hugetlb folios).
*/
static inline pgoff_t folio_pgoff(const struct folio *folio)
{
return folio->index;
}
static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
const unsigned long address)
{
pgoff_t pgoff;
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
return pgoff;
}
struct wait_page_key {
struct folio *folio;
int bit_nr;
int page_match;
};
struct wait_page_queue {
struct folio *folio;
int bit_nr;
wait_queue_entry_t wait;
};
static inline bool wake_page_match(struct wait_page_queue *wait_page,
struct wait_page_key *key)
{
if (wait_page->folio != key->folio)
return false;
key->page_match = 1;
if (wait_page->bit_nr != key->bit_nr)
return false;
return true;
}
void __folio_lock(struct folio *folio);
int __folio_lock_killable(struct folio *folio);
vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
void unlock_page(struct page *page);
void folio_unlock(struct folio *folio);
/**
* folio_trylock() - Attempt to lock a folio.
* @folio: The folio to attempt to lock.
*
* Sometimes it is undesirable to wait for a folio to be unlocked (eg
* when the locks are being taken in the wrong order, or if making
* progress through a batch of folios is more important than processing
* them in order). Usually folio_lock() is the correct function to call.
*
* Context: Any context.
* Return: Whether the lock was successfully acquired.
*/
static inline bool folio_trylock(struct folio *folio)
{
return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
}
/*
* Return true if the page was successfully locked
*/
static inline bool trylock_page(struct page *page)
{
return folio_trylock(page_folio(page));
}
/**
* folio_lock() - Lock this folio.
* @folio: The folio to lock.
*
* The folio lock protects against many things, probably more than it
* should. It is primarily held while a folio is being brought uptodate,
* either from its backing file or from swap. It is also held while a
* folio is being truncated from its address_space, so holding the lock
* is sufficient to keep folio->mapping stable.
*
* The folio lock is also held while write() is modifying the page to
* provide POSIX atomicity guarantees (as long as the write does not
* cross a page boundary). Other modifications to the data in the folio
* do not hold the folio lock and can race with writes, eg DMA and stores
* to mapped pages.
*
* Context: May sleep. If you need to acquire the locks of two or
* more folios, they must be in order of ascending index, if they are
* in the same address_space. If they are in different address_spaces,
* acquire the lock of the folio which belongs to the address_space which
* has the lowest address in memory first.
*/
static inline void folio_lock(struct folio *folio)
{
might_sleep();
if (!folio_trylock(folio)) __folio_lock(folio);
}
/**
* lock_page() - Lock the folio containing this page.
* @page: The page to lock.
*
* See folio_lock() for a description of what the lock protects.
* This is a legacy function and new code should probably use folio_lock()
* instead.
*
* Context: May sleep. Pages in the same folio share a lock, so do not
* attempt to lock two pages which share a folio.
*/
static inline void lock_page(struct page *page)
{
struct folio *folio;
might_sleep();
folio = page_folio(page);
if (!folio_trylock(folio))
__folio_lock(folio);
}
/**
* folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
* @folio: The folio to lock.
*
* Attempts to lock the folio, like folio_lock(), except that the sleep
* to acquire the lock is interruptible by a fatal signal.
*
* Context: May sleep; see folio_lock().
* Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
*/
static inline int folio_lock_killable(struct folio *folio)
{
might_sleep();
if (!folio_trylock(folio))
return __folio_lock_killable(folio);
return 0;
}
/*
* folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_lock implications depend on flags; see
* __folio_lock_or_retry().
*/
static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
struct vm_fault *vmf)
{
might_sleep();
if (!folio_trylock(folio))
return __folio_lock_or_retry(folio, vmf);
return 0;
}
/*
* This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
* and should not be used directly.
*/
void folio_wait_bit(struct folio *folio, int bit_nr);
int folio_wait_bit_killable(struct folio *folio, int bit_nr);
/*
* Wait for a folio to be unlocked.
*
* This must be called with the caller "holding" the folio,
* ie with increased folio reference count so that the folio won't
* go away during the wait.
*/
static inline void folio_wait_locked(struct folio *folio)
{
if (folio_test_locked(folio))
folio_wait_bit(folio, PG_locked);
}
static inline int folio_wait_locked_killable(struct folio *folio)
{
if (!folio_test_locked(folio))
return 0;
return folio_wait_bit_killable(folio, PG_locked);
}
void folio_end_read(struct folio *folio, bool success);
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
void end_page_writeback(struct page *page);
void folio_end_writeback(struct folio *folio);
void folio_end_writeback_no_dropbehind(struct folio *folio);
void folio_end_dropbehind(struct folio *folio);
void folio_wait_stable(struct folio *folio);
void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
void __folio_cancel_dirty(struct folio *folio);
static inline void folio_cancel_dirty(struct folio *folio)
{
/* Avoid atomic ops, locking, etc. when not actually needed. */
if (folio_test_dirty(folio))
__folio_cancel_dirty(folio);
}
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
#ifdef CONFIG_MIGRATION
int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
#else
#define filemap_migrate_folio NULL
#endif
void folio_end_private_2(struct folio *folio);
void folio_wait_private_2(struct folio *folio);
int folio_wait_private_2_killable(struct folio *folio);
/*
* Fault in userspace address range.
*/
size_t fault_in_writeable(char __user *uaddr, size_t size);
size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
size_t fault_in_readable(const char __user *uaddr, size_t size);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp);
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp);
void filemap_remove_folio(struct folio *folio);
void __filemap_remove_folio(struct folio *folio, void *shadow);
void replace_page_cache_folio(struct folio *old, struct folio *new);
void delete_from_page_cache_batch(struct address_space *mapping,
struct folio_batch *fbatch);
bool filemap_release_folio(struct folio *folio, gfp_t gfp);
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
int whence);
/* Must be non-static for BPF error injection */
int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp, void **shadowp);
bool filemap_range_has_writeback(struct address_space *mapping,
loff_t start_byte, loff_t end_byte);
/**
* filemap_range_needs_writeback - check if range potentially needs writeback
* @mapping: address space within which to check
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Find at least one page in the range supplied, usually used to check if
* direct writing in this range will trigger a writeback. Used by O_DIRECT
* read/write with IOCB_NOWAIT, to see if the caller needs to do
* filemap_write_and_wait_range() before proceeding.
*
* Return: %true if the caller should do filemap_write_and_wait_range() before
* doing O_DIRECT to a page in this range, %false otherwise.
*/
static inline bool filemap_range_needs_writeback(struct address_space *mapping,
loff_t start_byte,
loff_t end_byte)
{
if (!mapping->nrpages)
return false;
if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
return false;
return filemap_range_has_writeback(mapping, start_byte, end_byte);
}
/**
* struct readahead_control - Describes a readahead request.
*
* A readahead request is for consecutive pages. Filesystems which
* implement the ->readahead method should call readahead_folio() or
* __readahead_batch() in a loop and attempt to start reads into each
* folio in the request.
*
* Most of the fields in this struct are private and should be accessed
* by the functions below.
*
* @file: The file, used primarily by network filesystems for authentication.
* May be NULL if invoked internally by the filesystem.
* @mapping: Readahead this filesystem object.
* @ra: File readahead state. May be NULL.
*/
struct readahead_control {
struct file *file;
struct address_space *mapping;
struct file_ra_state *ra;
/* private: use the readahead_* accessors instead */
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
bool dropbehind;
bool _workingset;
unsigned long _pflags;
};
#define DEFINE_READAHEAD(ractl, f, r, m, i) \
struct readahead_control ractl = { \
.file = f, \
.mapping = m, \
.ra = r, \
._index = i, \
}
#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count);
void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
void page_cache_async_ra(struct readahead_control *, struct folio *,
unsigned long req_count);
void readahead_expand(struct readahead_control *ractl,
loff_t new_start, size_t new_len);
/**
* page_cache_sync_readahead - generic file readahead
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @file: Used by the filesystem for authentication.
* @index: Index of first page to be read.
* @req_count: Total number of pages being read by the caller.
*
* page_cache_sync_readahead() should be called when a cache miss happened:
* it will submit the read. The readahead logic may decide to piggyback more
* pages onto the read request if access patterns suggest it will improve
* performance.
*/
static inline
void page_cache_sync_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file, pgoff_t index,
unsigned long req_count)
{
DEFINE_READAHEAD(ractl, file, ra, mapping, index);
page_cache_sync_ra(&ractl, req_count);
}
/**
* page_cache_async_readahead - file readahead for marked pages
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @file: Used by the filesystem for authentication.
* @folio: The folio which triggered the readahead call.
* @req_count: Total number of pages being read by the caller.
*
* page_cache_async_readahead() should be called when a page is used which
* is marked as PageReadahead; this is a marker to suggest that the application
* has used up enough of the readahead window that we should start pulling in
* more pages.
*/
static inline
void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
struct folio *folio, unsigned long req_count)
{
DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index);
page_cache_async_ra(&ractl, folio, req_count);
}
static inline struct folio *__readahead_folio(struct readahead_control *ractl)
{
struct folio *folio;
BUG_ON(ractl->_batch_count > ractl->_nr_pages);
ractl->_nr_pages -= ractl->_batch_count;
ractl->_index += ractl->_batch_count;
if (!ractl->_nr_pages) {
ractl->_batch_count = 0;
return NULL;
}
folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
ractl->_batch_count = folio_nr_pages(folio);
return folio;
}
/**
* readahead_folio - Get the next folio to read.
* @ractl: The current readahead request.
*
* Context: The folio is locked. The caller should unlock the folio once
* all I/O to that folio has completed.
* Return: A pointer to the next folio, or %NULL if we are done.
*/
static inline struct folio *readahead_folio(struct readahead_control *ractl)
{
struct folio *folio = __readahead_folio(ractl);
if (folio)
folio_put(folio);
return folio;
}
static inline unsigned int __readahead_batch(struct readahead_control *rac,
struct page **array, unsigned int array_sz)
{
unsigned int i = 0;
XA_STATE(xas, &rac->mapping->i_pages, 0);
struct folio *folio;
BUG_ON(rac->_batch_count > rac->_nr_pages);
rac->_nr_pages -= rac->_batch_count;
rac->_index += rac->_batch_count;
rac->_batch_count = 0;
xas_set(&xas, rac->_index);
rcu_read_lock();
xas_for_each(&xas, folio, rac->_index + rac->_nr_pages - 1) {
if (xas_retry(&xas, folio))
continue;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
array[i++] = folio_page(folio, 0);
rac->_batch_count += folio_nr_pages(folio);
if (i == array_sz)
break;
}
rcu_read_unlock();
return i;
}
/**
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
static inline loff_t readahead_pos(const struct readahead_control *rac)
{
return (loff_t)rac->_index * PAGE_SIZE;
}
/**
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
static inline size_t readahead_length(const struct readahead_control *rac)
{
return rac->_nr_pages * PAGE_SIZE;
}
/**
* readahead_index - The index of the first page in this readahead request.
* @rac: The readahead request.
*/
static inline pgoff_t readahead_index(const struct readahead_control *rac)
{
return rac->_index;
}
/**
* readahead_count - The number of pages in this readahead request.
* @rac: The readahead request.
*/
static inline unsigned int readahead_count(const struct readahead_control *rac)
{
return rac->_nr_pages;
}
/**
* readahead_batch_length - The number of bytes in the current batch.
* @rac: The readahead request.
*/
static inline size_t readahead_batch_length(const struct readahead_control *rac)
{
return rac->_batch_count * PAGE_SIZE;
}
static inline unsigned long dir_pages(const struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
}
/**
* folio_mkwrite_check_truncate - check if folio was truncated
* @folio: the folio to check
* @inode: the inode to check the folio against
*
* Return: the number of bytes in the folio up to EOF,
* or -EFAULT if the folio was truncated.
*/
static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
const struct inode *inode)
{
loff_t size = i_size_read(inode);
pgoff_t index = size >> PAGE_SHIFT;
size_t offset = offset_in_folio(folio, size);
if (!folio->mapping)
return -EFAULT;
/* folio is wholly inside EOF */
if (folio_next_index(folio) - 1 < index)
return folio_size(folio);
/* folio is wholly past EOF */
if (folio->index > index || !offset)
return -EFAULT;
/* folio is partially inside EOF */
return offset;
}
/**
* i_blocks_per_folio - How many blocks fit in this folio.
* @inode: The inode which contains the blocks.
* @folio: The folio.
*
* If the block size is larger than the size of this folio, return zero.
*
* Context: The caller should hold a refcount on the folio to prevent it
* from being split.
* Return: The number of filesystem blocks covered by this folio.
*/
static inline
unsigned int i_blocks_per_folio(const struct inode *inode,
const struct folio *folio)
{
return folio_size(folio) >> inode->i_blkbits;
}
#endif /* _LINUX_PAGEMAP_H */
// SPDX-License-Identifier: GPL-2.0
/*
* device.h - generic, centralized driver model
*
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
* See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _DEVICE_H_
#define _DEVICE_H_
#include <linux/dev_printk.h>
#include <linux/energy_model.h>
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/klist.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/atomic.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
#include <linux/device/bus.h>
#include <linux/device/class.h>
#include <linux/device/devres.h>
#include <linux/device/driver.h>
#include <linux/cleanup.h>
#include <asm/device.h>
struct device;
struct device_private;
struct device_driver;
struct driver_private;
struct module;
struct class;
struct subsys_private;
struct device_node;
struct fwnode_handle;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
struct msi_device_data;
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
* @subsys: subsystem of the devices to attach to
* @node: the list of functions registered at the subsystem
* @add_dev: device hookup to device function handler
* @remove_dev: device hookup to device function handler
*
* Simple interfaces attached to a subsystem. Multiple interfaces can
* attach to a subsystem and its devices. Unlike drivers, they do not
* exclusively claim or control devices. Interfaces usually represent
* a specific functionality of a subsystem/class of devices.
*/
struct subsys_interface {
const char *name;
const struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
};
int subsys_interface_register(struct subsys_interface *sif);
void subsys_interface_unregister(struct subsys_interface *sif);
int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups);
int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups);
/*
* The type of device, "struct device" is embedded in. A class
* or bus can contain devices of different types
* like "partitions" and "disks", "mouse" and "event".
* This identifies the device type and carries type-specific
* information, equivalent to the kobj_type of a kobject.
* If "name" is specified, the uevent will contain it in
* the DEVTYPE variable.
*/
struct device_type {
const char *name;
const struct attribute_group **groups;
int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
char *(*devnode)(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid);
void (*release)(struct device *dev);
const struct dev_pm_ops *pm;
};
/**
* struct device_attribute - Interface for exporting device attributes.
* @attr: sysfs attribute definition.
* @show: Show handler.
* @store: Store handler.
*/
struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t (*store)(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
};
/**
* struct dev_ext_attribute - Exported device attribute with extra context.
* @attr: Exported device attribute.
* @var: Pointer to context.
*/
struct dev_ext_attribute {
struct device_attribute attr;
void *var;
};
ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t device_show_string(struct device *dev, struct device_attribute *attr,
char *buf);
/**
* DEVICE_ATTR - Define a device attribute.
* @_name: Attribute name.
* @_mode: File mode.
* @_show: Show handler. Optional, but mandatory if attribute is readable.
* @_store: Store handler. Optional, but mandatory if attribute is writable.
*
* Convenience macro for defining a struct device_attribute.
*
* For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to:
*
* .. code-block:: c
*
* struct device_attribute dev_attr_foo = {
* .attr = { .name = "foo", .mode = 0644 },
* .show = foo_show,
* .store = foo_store,
* };
*/
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
/**
* DEVICE_ATTR_PREALLOC - Define a preallocated device attribute.
* @_name: Attribute name.
* @_mode: File mode.
* @_show: Show handler. Optional, but mandatory if attribute is readable.
* @_store: Store handler. Optional, but mandatory if attribute is writable.
*
* Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode.
*/
#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_PREALLOC(_name, _mode, _show, _store)
/**
* DEVICE_ATTR_RW - Define a read-write device attribute.
* @_name: Attribute name.
*
* Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show,
* and @_store is <_name>_store.
*/
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
/**
* DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute.
* @_name: Attribute name.
*
* Like DEVICE_ATTR_RW(), but @_mode is 0600.
*/
#define DEVICE_ATTR_ADMIN_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
/**
* DEVICE_ATTR_RO - Define a readable device attribute.
* @_name: Attribute name.
*
* Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show.
*/
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
/**
* DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute.
* @_name: Attribute name.
*
* Like DEVICE_ATTR_RO(), but @_mode is 0400.
*/
#define DEVICE_ATTR_ADMIN_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
/**
* DEVICE_ATTR_WO - Define an admin-only writable device attribute.
* @_name: Attribute name.
*
* Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store.
*/
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
/**
* DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long.
* @_name: Attribute name.
* @_mode: File mode.
* @_var: Identifier of unsigned long.
*
* Like DEVICE_ATTR(), but @_show and @_store are automatically provided
* such that reads and writes to the attribute from userspace affect @_var.
*/
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
/**
* DEVICE_INT_ATTR - Define a device attribute backed by an int.
* @_name: Attribute name.
* @_mode: File mode.
* @_var: Identifier of int.
*
* Like DEVICE_ULONG_ATTR(), but @_var is an int.
*/
#define DEVICE_INT_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
/**
* DEVICE_BOOL_ATTR - Define a device attribute backed by a bool.
* @_name: Attribute name.
* @_mode: File mode.
* @_var: Identifier of bool.
*
* Like DEVICE_ULONG_ATTR(), but @_var is a bool.
*/
#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
/**
* DEVICE_STRING_ATTR_RO - Define a device attribute backed by a r/o string.
* @_name: Attribute name.
* @_mode: File mode.
* @_var: Identifier of string.
*
* Like DEVICE_ULONG_ATTR(), but @_var is a string. Because the length of the
* string allocation is unknown, the attribute must be read-only.
*/
#define DEVICE_STRING_ATTR_RO(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, (_mode) & ~0222, device_show_string, NULL), (_var) }
#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
int device_create_file(struct device *device,
const struct device_attribute *entry);
void device_remove_file(struct device *dev,
const struct device_attribute *attr);
bool device_remove_file_self(struct device *dev,
const struct device_attribute *attr);
int __must_check device_create_bin_file(struct device *dev,
const struct bin_attribute *attr);
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
* @type: Type to allocate per-cpu memory for
*
* Managed alloc_percpu. Per-cpu memory allocated with this function is
* automatically freed on driver detach.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
#define devm_alloc_percpu(dev, type) \
((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
__alignof__(type)))
void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
size_t align);
void devm_free_percpu(struct device *dev, void __percpu *pdata);
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
* sg limitations.
*/
unsigned int max_segment_size;
unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
* @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
* @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
* @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
* @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
*/
enum device_link_state {
DL_STATE_NONE = -1,
DL_STATE_DORMANT = 0,
DL_STATE_AVAILABLE,
DL_STATE_CONSUMER_PROBE,
DL_STATE_ACTIVE,
DL_STATE_SUPPLIER_UNBIND,
};
/*
* Device link flags.
*
* STATELESS: The core will not remove this link automatically.
* AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
* AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
* SYNC_STATE_ONLY: Link only affects sync_state() behavior.
* INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
#define DL_FLAG_PM_RUNTIME BIT(2)
#define DL_FLAG_RPM_ACTIVE BIT(3)
#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
#define DL_FLAG_INFERRED BIT(8)
#define DL_FLAG_CYCLE BIT(9)
/**
* enum dl_dev_state - Device driver presence tracking information.
* @DL_DEV_NO_DRIVER: There is no driver attached to the device.
* @DL_DEV_PROBING: A driver is probing.
* @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
* @DL_DEV_UNBINDING: The driver is unbinding from the device.
*/
enum dl_dev_state {
DL_DEV_NO_DRIVER = 0,
DL_DEV_PROBING,
DL_DEV_DRIVER_BOUND,
DL_DEV_UNBINDING,
};
/**
* enum device_removable - Whether the device is removable. The criteria for a
* device to be classified as removable is determined by its subsystem or bus.
* @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
* device (default).
* @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
* @DEVICE_FIXED: Device is not removable by the user.
* @DEVICE_REMOVABLE: Device is removable by the user.
*/
enum device_removable {
DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
DEVICE_REMOVABLE_UNKNOWN,
DEVICE_FIXED,
DEVICE_REMOVABLE,
};
/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
* @defer_sync: Hook to global list of devices that have deferred sync_state.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
struct list_head defer_sync;
enum dl_dev_state status;
};
/**
* struct dev_msi_info - Device data related to MSI
* @domain: The MSI interrupt domain associated to the device
* @data: Pointer to MSI device data
*/
struct dev_msi_info {
#ifdef CONFIG_GENERIC_MSI_IRQ
struct irq_domain *domain;
struct msi_device_data *data;
#endif
};
/**
* enum device_physical_location_panel - Describes which panel surface of the
* system's housing the device connection point resides on.
* @DEVICE_PANEL_TOP: Device connection point is on the top panel.
* @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
* @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
* @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
* @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
* @DEVICE_PANEL_BACK: Device connection point is on the back panel.
* @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
*/
enum device_physical_location_panel {
DEVICE_PANEL_TOP,
DEVICE_PANEL_BOTTOM,
DEVICE_PANEL_LEFT,
DEVICE_PANEL_RIGHT,
DEVICE_PANEL_FRONT,
DEVICE_PANEL_BACK,
DEVICE_PANEL_UNKNOWN,
};
/**
* enum device_physical_location_vertical_position - Describes vertical
* position of the device connection point on the panel surface.
* @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
* @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
* @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
*/
enum device_physical_location_vertical_position {
DEVICE_VERT_POS_UPPER,
DEVICE_VERT_POS_CENTER,
DEVICE_VERT_POS_LOWER,
};
/**
* enum device_physical_location_horizontal_position - Describes horizontal
* position of the device connection point on the panel surface.
* @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
* @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
* @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
*/
enum device_physical_location_horizontal_position {
DEVICE_HORI_POS_LEFT,
DEVICE_HORI_POS_CENTER,
DEVICE_HORI_POS_RIGHT,
};
/**
* struct device_physical_location - Device data related to physical location
* of the device connection point.
* @panel: Panel surface of the system's housing that the device connection
* point resides on.
* @vertical_position: Vertical position of the device connection point within
* the panel.
* @horizontal_position: Horizontal position of the device connection point
* within the panel.
* @dock: Set if the device connection point resides in a docking station or
* port replicator.
* @lid: Set if this device connection point resides on the lid of laptop
* system.
*/
struct device_physical_location {
enum device_physical_location_panel panel;
enum device_physical_location_vertical_position vertical_position;
enum device_physical_location_horizontal_position horizontal_position;
bool dock;
bool lid;
};
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
* controller. If parent is NULL, the device, is a top-level device,
* which is not usually what you want.
* @p: Holds the private data of the driver core portions of the device.
* See the comment of the struct device_private for detail.
* @kobj: A top-level, abstract class from which other classes are derived.
* @init_name: Initial name of the device.
* @type: The type of device.
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
* Example: For devices on custom boards, as typical of embedded
* and SOC based hardware, Linux often uses platform_data to point
* to board-specific structures describing devices and how they
* are wired. That can include what ports are available, chip
* variants, which GPIO pins act in what additional roles, and so
* on. This shrinks the "Board Support Packages" (BSPs) and
* minimizes board-specific #ifdefs in drivers.
* @driver_data: Private pointer for driver specific info.
* @links: Links to suppliers and consumers of this device.
* @power: For device power management.
* See Documentation/driver-api/pm/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
* @em_pd: device's energy model performance domain
* @pins: For device pin management.
* See Documentation/driver-api/pin-control.rst for details.
* @msi: MSI related data
* @numa_node: NUMA node this device is close to.
* @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
* @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
* DMA limit than the device itself supports.
* @dma_range_map: map for DMA memory ranges relative to that of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
* @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
* @dma_io_tlb_pools: List of transient swiotlb memory pools.
* @dma_io_tlb_lock: Protects changes to the list of active pools.
* @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
* @devt: For creating the sysfs "dev".
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
* @devres_head: The resources list of the device.
* @class: The class of the device.
* @groups: Optional attribute groups.
* @release: Callback to free the device after all references have
* gone away. This should be set by the allocator of the
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
* @iommu: Per device generic IOMMU runtime data
* @physical_location: Describes physical location of the device connection
* point in the system housing.
* @removable: Whether the device can be removed from the system. This
* should be set by the subsystem / bus driver that discovered
* the device.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
* @state_synced: The hardware state of this device has been synced to match
* the software state of this device by calling the driver/bus
* sync_state() callback.
* @can_match: The device has matched with a driver at least once or it is in
* a bus (like AMBA) which can't check for matching drivers until
* other devices probe successfully.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
* @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
* streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
* and optionall (if the coherent mask is large enough) also
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
* @dma_iommu: Device is using default IOMMU implementation for DMA and
* doesn't rely on dma_ops structure.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
* that the device model core needs to model the system. Most subsystems,
* however, track additional information about the devices they host. As a
* result, it is rare for devices to be represented by bare device structures;
* instead, that structure, like kobject structures, is usually embedded within
* a higher-level representation of the device.
*/
struct device {
struct kobject kobj;
struct device *parent;
struct device_private *p;
const char *init_name; /* initial name of the device */
const struct device_type *type;
const struct bus_type *bus; /* type of bus device is on */
struct device_driver *driver; /* which driver has allocated this
device */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
#ifdef CONFIG_ENERGY_MODEL
struct em_perf_domain *em_pd;
#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
struct dev_msi_info msi;
#ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
u64 bus_dma_limit; /* upstream dma constraint */
const struct bus_dma_region *dma_range_map;
struct device_dma_parameters *dma_parms;
struct list_head dma_pools; /* dma pools (if dma'ble) */
#ifdef CONFIG_DMA_DECLARE_COHERENT
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
#endif
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
#ifdef CONFIG_SWIOTLB
struct io_tlb_mem *dma_io_tlb_mem;
#endif
#ifdef CONFIG_SWIOTLB_DYNAMIC
struct list_head dma_io_tlb_pools;
spinlock_t dma_io_tlb_lock;
bool dma_uses_io_tlb;
#endif
/* arch specific additions */
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
struct fwnode_handle *fwnode; /* firmware device node */
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
#endif
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
spinlock_t devres_lock;
struct list_head devres_head;
const struct class *class;
const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
struct dev_iommu *iommu;
struct device_physical_location *physical_location;
enum device_removable removable;
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
bool state_synced:1;
bool can_match:1;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
bool dma_coherent:1;
#endif
#ifdef CONFIG_DMA_OPS_BYPASS
bool dma_ops_bypass : 1;
#endif
#ifdef CONFIG_DMA_NEED_SYNC
bool dma_skip_sync:1;
#endif
#ifdef CONFIG_IOMMU_DMA
bool dma_iommu:1;
#endif
};
/**
* struct device_link - Device link representation.
* @supplier: The device on the supplier end of the link.
* @s_node: Hook to the supplier device's list of links to consumers.
* @consumer: The device on the consumer end of the link.
* @c_node: Hook to the consumer device's list of links to suppliers.
* @link_dev: device used to expose link details in sysfs
* @status: The state of the link (with respect to the presence of drivers).
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @kref: Count repeated addition of the same link.
* @rm_work: Work structure used for removing the link.
* @supplier_preactivated: Supplier has been made active before consumer probe.
*/
struct device_link {
struct device *supplier;
struct list_head s_node;
struct device *consumer;
struct list_head c_node;
struct device link_dev;
enum device_link_state status;
u32 flags;
refcount_t rpm_active;
struct kref kref;
struct work_struct rm_work;
bool supplier_preactivated; /* Owned by consumer probe. */
};
#define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj)
/**
* device_iommu_mapped - Returns true when the device DMA is translated
* by an IOMMU
* @dev: Device to perform the check on
*/
static inline bool device_iommu_mapped(struct device *dev)
{
return (dev->iommu_group != NULL);
}
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
/**
* dev_name - Return a device's name.
* @dev: Device with name to get.
* Return: The kobject name of the device, or its initial name if unavailable.
*/
static inline const char *dev_name(const struct device *dev)
{
/* Use the init name until the kobject becomes available */
if (dev->init_name)
return dev->init_name;
return kobject_name(&dev->kobj);
}
/**
* dev_bus_name - Return a device's bus/class name, if at all possible
* @dev: struct device to get the bus/class name of
*
* Will return the name of the bus/class the device is attached to. If it is
* not attached to a bus/class, an empty string will be returned.
*/
static inline const char *dev_bus_name(const struct device *dev)
{
return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
}
__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
{
return dev->numa_node;
}
static inline void set_dev_node(struct device *dev, int node)
{
dev->numa_node = node;
}
#else
static inline int dev_to_node(struct device *dev)
{
return NUMA_NO_NODE;
}
static inline void set_dev_node(struct device *dev, int node)
{
}
#endif
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ
return dev->msi.domain;
#else
return NULL;
#endif
}
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
#ifdef CONFIG_GENERIC_MSI_IRQ
dev->msi.domain = d;
#endif
}
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
}
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
{
return dev ? dev->power.subsys_data : NULL;
}
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
{
return dev->kobj.uevent_suppress;
}
static inline void dev_set_uevent_suppress(struct device *dev, int val)
{
dev->kobj.uevent_suppress = val;
}
static inline int device_is_registered(struct device *dev)
{
return dev->kobj.state_in_sysfs;
}
static inline void device_enable_async_suspend(struct device *dev)
{
if (!dev->power.is_prepared)
dev->power.async_suspend = true;
}
static inline void device_disable_async_suspend(struct device *dev)
{
if (!dev->power.is_prepared)
dev->power.async_suspend = false;
}
static inline bool device_async_suspend_enabled(struct device *dev)
{
return !!dev->power.async_suspend;
}
static inline bool device_pm_not_required(struct device *dev)
{
return dev->power.no_pm;
}
static inline void device_set_pm_not_required(struct device *dev)
{
dev->power.no_pm = true;
#ifdef CONFIG_PM
dev->power.no_callbacks = true;
#endif
}
static inline void dev_pm_syscore_device(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
dev->power.syscore = val;
#endif
}
static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
{
dev->power.driver_flags = flags;
}
static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
{
return !!(dev->power.driver_flags & flags);
}
static inline bool dev_pm_smart_suspend(struct device *dev)
{
#ifdef CONFIG_PM_SLEEP
return dev->power.smart_suspend;
#else
return false;
#endif
}
/*
* dev_pm_set_strict_midlayer - Update the device's power.strict_midlayer flag
* @dev: Target device.
* @val: New flag value.
*
* When set, power.strict_midlayer means that the middle layer power management
* code (typically, a bus type or a PM domain) does not expect its runtime PM
* suspend callback to be invoked at all during system-wide PM transitions and
* it does not expect its runtime PM resume callback to be invoked at any point
* when runtime PM is disabled for the device during system-wide PM transitions.
*/
static inline void dev_pm_set_strict_midlayer(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
dev->power.strict_midlayer = val;
#endif
}
static inline bool dev_pm_strict_midlayer_is_set(struct device *dev)
{
#ifdef CONFIG_PM_SLEEP
return dev->power.strict_midlayer;
#else
return false;
#endif
}
static inline void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
}
static inline int device_lock_interruptible(struct device *dev)
{
return mutex_lock_interruptible(&dev->mutex);
}
static inline int device_trylock(struct device *dev)
{
return mutex_trylock(&dev->mutex);
}
static inline void device_unlock(struct device *dev)
{
mutex_unlock(&dev->mutex);
}
DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
static inline void device_lock_assert(struct device *dev)
{
lockdep_assert_held(&dev->mutex);
}
static inline bool dev_has_sync_state(struct device *dev)
{
if (!dev)
return false;
if (dev->driver && dev->driver->sync_state)
return true;
if (dev->bus && dev->bus->sync_state)
return true;
return false;
}
static inline int dev_set_drv_sync_state(struct device *dev,
void (*fn)(struct device *dev))
{
if (!dev || !dev->driver)
return 0;
if (dev->driver->sync_state && dev->driver->sync_state != fn)
return -EBUSY;
if (!dev->driver->sync_state)
dev->driver->sync_state = fn;
return 0;
}
static inline void dev_set_removable(struct device *dev,
enum device_removable removable)
{
dev->removable = removable;
}
static inline bool dev_is_removable(struct device *dev)
{
return dev->removable == DEVICE_REMOVABLE;
}
static inline bool dev_removable_is_valid(struct device *dev)
{
return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
}
/*
* High level routines for use by the bus drivers
*/
int __must_check device_register(struct device *dev);
void device_unregister(struct device *dev);
void device_initialize(struct device *dev);
int __must_check device_add(struct device *dev);
void device_del(struct device *dev);
DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
int device_for_each_child(struct device *parent, void *data,
device_iter_t fn);
int device_for_each_child_reverse(struct device *parent, void *data,
device_iter_t fn);
int device_for_each_child_reverse_from(struct device *parent,
struct device *from, void *data,
device_iter_t fn);
struct device *device_find_child(struct device *parent, const void *data,
device_match_t match);
/**
* device_find_child_by_name - device iterator for locating a child device.
* @parent: parent struct device
* @name: name of the child device
*
* This is similar to the device_find_child() function above, but it
* returns a reference to a device that has the name @name.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
static inline struct device *device_find_child_by_name(struct device *parent,
const char *name)
{
return device_find_child(parent, name, device_match_name);
}
/**
* device_find_any_child - device iterator for locating a child device, if any.
* @parent: parent struct device
*
* This is similar to the device_find_child() function above, but it
* returns a reference to a child device, if any.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
static inline struct device *device_find_any_child(struct device *parent)
{
return device_find_child(parent, NULL, device_match_any);
}
int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
static inline bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}
#define __device_lock_set_class(dev, name, key) \
do { \
struct device *__d2 __maybe_unused = dev; \
lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \
} while (0)
/**
* device_lock_set_class - Specify a temporary lock class while a device
* is attached to a driver
* @dev: device to modify
* @key: lock class key data
*
* This must be called with the device_lock() already held, for example
* from driver ->probe(). Take care to only override the default
* lockdep_no_validate class.
*/
#ifdef CONFIG_LOCKDEP
#define device_lock_set_class(dev, key) \
do { \
struct device *__d = dev; \
dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \
&__lockdep_no_validate__), \
"overriding existing custom lock class\n"); \
__device_lock_set_class(__d, #key, key); \
} while (0)
#else
#define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key)
#endif
/**
* device_lock_reset_class - Return a device to the default lockdep novalidate state
* @dev: device to modify
*
* This must be called with the device_lock() already held, for example
* from driver ->remove().
*/
#define device_lock_reset_class(dev) \
do { \
struct device *__d __maybe_unused = dev; \
lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \
_THIS_IP_); \
} while (0)
void lock_device_hotplug(void);
void unlock_device_hotplug(void);
int lock_device_hotplug_sysfs(void);
int device_offline(struct device *dev);
int device_online(struct device *dev);
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
int device_add_of_node(struct device *dev, struct device_node *of_node);
void device_remove_of_node(struct device *dev);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode);
static inline struct device_node *dev_of_node(struct device *dev)
{
if (!IS_ENABLED(CONFIG_OF) || !dev)
return NULL;
return dev->of_node;
}
static inline int dev_num_vf(struct device *dev)
{
if (dev->bus && dev->bus->num_vf)
return dev->bus->num_vf(dev);
return 0;
}
/*
* Root device objects for grouping under /sys/devices
*/
struct device *__root_device_register(const char *name, struct module *owner);
/* This is a macro to avoid include problems with THIS_MODULE */
#define root_device_register(name) \
__root_device_register(name, THIS_MODULE)
void root_device_unregister(struct device *root);
static inline void *dev_get_platdata(const struct device *dev)
{
return dev->platform_data;
}
/*
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
int __must_check device_driver_attach(const struct device_driver *drv,
struct device *dev);
int __must_check device_bind_driver(struct device *dev);
void device_release_driver(struct device *dev);
int __must_check device_attach(struct device *dev);
int __must_check driver_attach(const struct device_driver *drv);
void device_initial_probe(struct device *dev);
int __must_check device_reprobe(struct device *dev);
bool device_is_bound(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
__printf(5, 6) struct device *
device_create(const struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const char *fmt, ...);
__printf(6, 7) struct device *
device_create_with_groups(const struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const struct attribute_group **groups,
const char *fmt, ...);
void device_destroy(const struct class *cls, dev_t devt);
int __must_check device_add_groups(struct device *dev,
const struct attribute_group **groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups);
static inline int __must_check device_add_group(struct device *dev,
const struct attribute_group *grp)
{
const struct attribute_group *groups[] = { grp, NULL };
return device_add_groups(dev, groups);
}
static inline void device_remove_group(struct device *dev,
const struct attribute_group *grp)
{
const struct attribute_group *groups[] = { grp, NULL };
device_remove_groups(dev, groups);
}
int __must_check devm_device_add_group(struct device *dev,
const struct attribute_group *grp);
/*
* get_device - atomically increment the reference count for the device.
*
*/
struct device *get_device(struct device *dev);
void put_device(struct device *dev);
DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
int devtmpfs_mount(void);
#else
static inline int devtmpfs_mount(void) { return 0; }
#endif
/* drivers/base/power/shutdown.c */
void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
const char *dev_driver_string(const struct device *dev);
/* Device links interface. */
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
void device_link_wait_removal(void);
static inline bool device_link_test(const struct device_link *link, u32 flags)
{
return !!(link->flags & flags);
}
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
MODULE_ALIAS("char-major-" __stringify(major) "-*")
#endif /* _DEVICE_H_ */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
* Copyright (C) 2005 SGI, Christoph Lameter
* Copyright (C) 2006 Nick Piggin
* Copyright (C) 2012 Konstantin Khlebnikov
* Copyright (C) 2016 Intel, Matthew Wilcox
* Copyright (C) 2016 Intel, Ross Zwisler
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/percpu.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/xarray.h>
#include "radix-tree.h"
/*
* Radix tree node cache.
*/
struct kmem_cache *radix_tree_node_cachep;
/*
* The radix tree is variable-height, so an insert operation not only has
* to build the branch to its corresponding item, it also has to build the
* branch to existing items if the size has to be increased (by
* radix_tree_extend).
*
* The worst case is a zero height tree with just a single item at index 0,
* and then inserting an item at index ULONG_MAX. This requires 2 new branches
* of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
* Hence:
*/
#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
/*
* The IDR does not have to be as high as the radix tree since it uses
* signed integers, not unsigned longs.
*/
#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
* Per-cpu pool of preloaded nodes
*/
DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
.lock = INIT_LOCAL_LOCK(lock),
};
EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
}
static inline void *node_to_entry(void *ptr)
{
return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
}
#define RADIX_TREE_RETRY XA_RETRY_ENTRY
static inline unsigned long
get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{
return parent ? slot - parent->slots : 0;
}
static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
struct radix_tree_node **nodep, unsigned long index)
{
unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
*nodep = (void *)entry;
return offset;
}
static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
{
return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
}
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
int offset)
{
__set_bit(offset, node->tags[tag]);
}
static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
int offset)
{
__clear_bit(offset, node->tags[tag]);
}
static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
int offset)
{
return test_bit(offset, node->tags[tag]);
}
static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
{
root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{
root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));}
static inline void root_tag_clear_all(struct radix_tree_root *root)
{
root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
}
static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
{
return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
}
static inline unsigned root_tags_get(const struct radix_tree_root *root)
{
return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
}
static inline bool is_idr(const struct radix_tree_root *root)
{
return !!(root->xa_flags & ROOT_IS_IDR);
}
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
*/
static inline int any_tag_set(const struct radix_tree_node *node,
unsigned int tag)
{
unsigned idx;
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { if (node->tags[tag][idx])
return 1;
}
return 0;
}
static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
{
bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
}
/**
* radix_tree_find_next_bit - find the next set bit in a memory region
*
* @node: where to begin the search
* @tag: the tag index
* @offset: the bitnumber to start searching at
*
* Unrollable variant of find_next_bit() for constant size arrays.
* Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
* Returns next bit offset, or size if nothing found.
*/
static __always_inline unsigned long
radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
unsigned long offset)
{
const unsigned long *addr = node->tags[tag];
if (offset < RADIX_TREE_MAP_SIZE) {
unsigned long tmp;
addr += offset / BITS_PER_LONG;
tmp = *addr >> (offset % BITS_PER_LONG);
if (tmp)
return __ffs(tmp) + offset;
offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
while (offset < RADIX_TREE_MAP_SIZE) {
tmp = *++addr;
if (tmp)
return __ffs(tmp) + offset;
offset += BITS_PER_LONG;
}
}
return RADIX_TREE_MAP_SIZE;
}
static unsigned int iter_offset(const struct radix_tree_iter *iter)
{
return iter->index & RADIX_TREE_MAP_MASK;
}
/*
* The maximum index which can be stored in a radix tree
*/
static inline unsigned long shift_maxindex(unsigned int shift)
{
return (RADIX_TREE_MAP_SIZE << shift) - 1;
}
static inline unsigned long node_maxindex(const struct radix_tree_node *node)
{
return shift_maxindex(node->shift);
}
static unsigned long next_index(unsigned long index,
const struct radix_tree_node *node,
unsigned long offset)
{
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
*/
static struct radix_tree_node *
radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
struct radix_tree_root *root,
unsigned int shift, unsigned int offset,
unsigned int count, unsigned int nr_values)
{
struct radix_tree_node *ret = NULL;
/*
* Preload code isn't irq safe and it doesn't make sense to use
* preloading during an interrupt anyway as all the allocations have
* to be atomic. So just do normal allocation when in interrupt.
*/
if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
struct radix_tree_preload *rtp;
/*
* Even if the caller has preloaded, try to allocate from the
* cache first for the new node to get accounted to the memory
* cgroup.
*/
ret = kmem_cache_alloc(radix_tree_node_cachep,
gfp_mask | __GFP_NOWARN);
if (ret)
goto out;
/*
* Provided the caller has preloaded here, we will always
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr) {
ret = rtp->nodes;
rtp->nodes = ret->parent;
rtp->nr--;
}
/*
* Update the allocation stack trace as this is more useful
* for debugging.
*/
kmemleak_update_trace(ret);
goto out;
}
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
out:
BUG_ON(radix_tree_is_internal_node(ret)); if (ret) {
ret->shift = shift;
ret->offset = offset;
ret->count = count;
ret->nr_values = nr_values;
ret->parent = parent;
ret->array = root;
}
return ret;
}
void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
/*
* Must only free zeroed nodes into the slab. We can be left with
* non-NULL entries by radix_tree_free_nodes, so clear the entries
* and tags here.
*/
memset(node->slots, 0, sizeof(node->slots));
memset(node->tags, 0, sizeof(node->tags));
INIT_LIST_HEAD(&node->private_list);
kmem_cache_free(radix_tree_node_cachep, node);
}
static inline void
radix_tree_node_free(struct radix_tree_node *node)
{
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
*
* To make use of this facility, the radix tree must be initialised without
* __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
*/
static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
int ret = -ENOMEM;
/*
* Nodes preloaded by one cgroup can be used by another cgroup, so
* they should never be accounted to any particular memory cgroup.
*/
gfp_mask &= ~__GFP_ACCOUNT;
local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) { local_unlock(&radix_tree_preloads.lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
rtp->nodes = node;
rtp->nr++;
} else {
kmem_cache_free(radix_tree_node_cachep, node);
}
}
ret = 0;
out:
return ret;}
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
*
* To make use of this facility, the radix tree must be initialised without
* __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
*/
int radix_tree_preload(gfp_t gfp_mask)
{
/* Warn on non-sensical use... */
WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
}
EXPORT_SYMBOL(radix_tree_preload);
/*
* The same as above function, except we don't guarantee preloading happens.
* We do it, if we decide it helps. On success, return zero with preemption
* disabled. On error, return -ENOMEM with preemption not disabled.
*/
int radix_tree_maybe_preload(gfp_t gfp_mask)
{
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
local_lock(&radix_tree_preloads.lock);
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
*nodep = node;
if (likely(radix_tree_is_internal_node(node))) { node = entry_to_node(node); *maxindex = node_maxindex(node);
return node->shift + RADIX_TREE_MAP_SHIFT;
}
*maxindex = 0;
return 0;
}
/*
* Extend a radix tree so it can store key @index.
*/
static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
unsigned long index, unsigned int shift)
{
void *entry;
unsigned int maxshift;
int tag;
/* Figure out what the shift should be. */
maxshift = shift;
while (index > shift_maxindex(maxshift))
maxshift += RADIX_TREE_MAP_SHIFT;
entry = rcu_dereference_raw(root->xa_head);
if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
goto out;
do {
struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
root, shift, 0, 1, 0);
if (!node)
return -ENOMEM;
if (is_idr(root)) {
all_tag_set(node, IDR_FREE);
if (!root_tag_get(root, IDR_FREE)) {
tag_clear(node, IDR_FREE, 0);
root_tag_set(root, IDR_FREE);
}
} else {
/* Propagate the aggregated tag info to the new child */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
}
BUG_ON(shift > BITS_PER_LONG);
if (radix_tree_is_internal_node(entry)) {
entry_to_node(entry)->parent = node;
} else if (xa_is_value(entry)) {
/* Moving a value entry root->xa_head to a node */
node->nr_values = 1;
}
/*
* entry was already in the radix tree, so we do not need
* rcu_assign_pointer here
*/
node->slots[0] = (void __rcu *)entry;
entry = node_to_entry(node);
rcu_assign_pointer(root->xa_head, entry);
shift += RADIX_TREE_MAP_SHIFT;
} while (shift <= maxshift);
out:
return maxshift + RADIX_TREE_MAP_SHIFT;
}
/**
* radix_tree_shrink - shrink radix tree to minimum height
* @root: radix tree root
*/
static inline bool radix_tree_shrink(struct radix_tree_root *root)
{
bool shrunk = false;
for (;;) {
struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
struct radix_tree_node *child;
if (!radix_tree_is_internal_node(node))
break;
node = entry_to_node(node);
/*
* The candidate node has more than one child, or its child
* is not at the leftmost slot, we cannot shrink.
*/
if (node->count != 1)
break;
child = rcu_dereference_raw(node->slots[0]);
if (!child)
break;
/*
* For an IDR, we must not shrink entry 0 into the root in
* case somebody calls idr_replace() with a pointer that
* appears to be an internal entry
*/
if (!node->shift && is_idr(root))
break;
if (radix_tree_is_internal_node(child))
entry_to_node(child)->parent = NULL;
/*
* We don't need rcu_assign_pointer(), since we are simply
* moving the node from one part of the tree to another: if it
* was safe to dereference the old pointer to it
* (node->slots[0]), it will be safe to dereference the new
* one (root->xa_head) as far as dependent read barriers go.
*/
root->xa_head = (void __rcu *)child;
if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
root_tag_clear(root, IDR_FREE);
/*
* We have a dilemma here. The node's slot[0] must not be
* NULLed in case there are concurrent lookups expecting to
* find the item. However if this was a bottom-level node,
* then it may be subject to the slot pointer being visible
* to callers dereferencing it. If item corresponding to
* slot[0] is subsequently deleted, these callers would expect
* their slot to become empty sooner or later.
*
* For example, lockless pagecache will look up a slot, deref
* the page pointer, and if the page has 0 refcount it means it
* was concurrently deleted from pagecache so try the deref
* again. Fortunately there is already a requirement for logic
* to retry the entire slot lookup -- the indirect pointer
* problem (replacing direct root node with an indirect pointer
* also results in a stale slot). So tag the slot as indirect
* to force callers to retry.
*/
node->count = 0; if (!radix_tree_is_internal_node(child)) {
node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
}
WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node);
shrunk = true;
}
return shrunk;
}
static bool delete_node(struct radix_tree_root *root,
struct radix_tree_node *node)
{
bool deleted = false;
do {
struct radix_tree_node *parent;
if (node->count) { if (node_to_entry(node) ==
rcu_dereference_raw(root->xa_head))
deleted |= radix_tree_shrink(root);
return deleted;
}
parent = node->parent; if (parent) {
parent->slots[node->offset] = NULL;
parent->count--;
} else {
/*
* Shouldn't the tags already have all been cleared
* by the caller?
*/
if (!is_idr(root))
root_tag_clear_all(root);
root->xa_head = NULL;
}
WARN_ON_ONCE(!list_empty(&node->private_list));
radix_tree_node_free(node); deleted = true;
node = parent;
} while (node); return deleted;}
/**
* __radix_tree_create - create a slot in a radix tree
* @root: radix tree root
* @index: index key
* @nodep: returns node
* @slotp: returns slot
*
* Create, if necessary, and return the node and slot for an item
* at position @index in the radix tree @root.
*
* Until there is more than one item in the tree, no nodes are
* allocated and @root->xa_head is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL.
*
* Returns -ENOMEM, or 0 for success.
*/
static int __radix_tree_create(struct radix_tree_root *root,
unsigned long index, struct radix_tree_node **nodep,
void __rcu ***slotp)
{
struct radix_tree_node *node = NULL, *child;
void __rcu **slot = (void __rcu **)&root->xa_head;
unsigned long maxindex;
unsigned int shift, offset = 0;
unsigned long max = index;
gfp_t gfp = root_gfp_mask(root);
shift = radix_tree_load_root(root, &child, &maxindex);
/* Make sure the tree is high enough. */
if (max > maxindex) {
int error = radix_tree_extend(root, gfp, max, shift);
if (error < 0)
return error;
shift = error;
child = rcu_dereference_raw(root->xa_head);
}
while (shift > 0) {
shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) {
/* Have to add a child node. */
child = radix_tree_node_alloc(gfp, node, root, shift,
offset, 0, 0);
if (!child)
return -ENOMEM;
rcu_assign_pointer(*slot, node_to_entry(child));
if (node)
node->count++;
} else if (!radix_tree_is_internal_node(child))
break;
/* Go a level down */
node = entry_to_node(child);
offset = radix_tree_descend(node, &child, index);
slot = &node->slots[offset];
}
if (nodep)
*nodep = node;
if (slotp)
*slotp = slot;
return 0;
}
/*
* Free any nodes below this node. The tree is presumed to not need
* shrinking, and any user data in the tree is presumed to not need a
* destructor called on it. If we need to add a destructor, we can
* add that functionality later. Note that we may not clear tags or
* slots from the tree as an RCU walker may still have a pointer into
* this subtree. We could replace the entries with RADIX_TREE_RETRY,
* but we'll still have to clear those in rcu_free.
*/
static void radix_tree_free_nodes(struct radix_tree_node *node)
{
unsigned offset = 0;
struct radix_tree_node *child = entry_to_node(node);
for (;;) {
void *entry = rcu_dereference_raw(child->slots[offset]);
if (xa_is_node(entry) && child->shift) {
child = entry_to_node(entry);
offset = 0;
continue;
}
offset++;
while (offset == RADIX_TREE_MAP_SIZE) {
struct radix_tree_node *old = child;
offset = child->offset + 1;
child = child->parent;
WARN_ON_ONCE(!list_empty(&old->private_list));
radix_tree_node_free(old);
if (old == entry_to_node(node))
return;
}
}
}
static inline int insert_entries(struct radix_tree_node *node,
void __rcu **slot, void *item)
{
if (*slot)
return -EEXIST;
rcu_assign_pointer(*slot, item);
if (node) {
node->count++;
if (xa_is_value(item))
node->nr_values++;
}
return 1;
}
/**
* radix_tree_insert - insert into a radix tree
* @root: radix tree root
* @index: index key
* @item: item to insert
*
* Insert an item into the radix tree at position @index.
*/
int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
void *item)
{
struct radix_tree_node *node;
void __rcu **slot;
int error;
BUG_ON(radix_tree_is_internal_node(item));
error = __radix_tree_create(root, index, &node, &slot);
if (error)
return error;
error = insert_entries(node, slot, item);
if (error < 0)
return error;
if (node) {
unsigned offset = get_slot_offset(node, slot);
BUG_ON(tag_get(node, 0, offset));
BUG_ON(tag_get(node, 1, offset));
BUG_ON(tag_get(node, 2, offset));
} else {
BUG_ON(root_tags_get(root));
}
return 0;
}
EXPORT_SYMBOL(radix_tree_insert);
/**
* __radix_tree_lookup - lookup an item in a radix tree
* @root: radix tree root
* @index: index key
* @nodep: returns node
* @slotp: returns slot
*
* Lookup and return the item at position @index in the radix
* tree @root.
*
* Until there is more than one item in the tree, no nodes are
* allocated and @root->xa_head is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL.
*/
void *__radix_tree_lookup(const struct radix_tree_root *root,
unsigned long index, struct radix_tree_node **nodep,
void __rcu ***slotp)
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
void __rcu **slot;
restart:
parent = NULL; slot = (void __rcu **)&root->xa_head; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex)
return NULL;
while (radix_tree_is_internal_node(node)) {
unsigned offset;
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
slot = parent->slots + offset;
if (node == RADIX_TREE_RETRY) goto restart;
if (parent->shift == 0)
break;
}
if (nodep)
*nodep = parent;
if (slotp) *slotp = slot;
return node;
}
/**
* radix_tree_lookup_slot - lookup a slot in a radix tree
* @root: radix tree root
* @index: index key
*
* Returns: the slot corresponding to the position @index in the
* radix tree @root. This is useful for update-if-exists operations.
*
* This function can be called under rcu_read_lock iff the slot is not
* modified by radix_tree_replace_slot, otherwise it must be called
* exclusive from other writers. Any dereference of the slot must be done
* using radix_tree_deref_slot.
*/
void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
unsigned long index)
{
void __rcu **slot;
if (!__radix_tree_lookup(root, index, NULL, &slot))
return NULL;
return slot;
}
EXPORT_SYMBOL(radix_tree_lookup_slot);
/**
* radix_tree_lookup - perform lookup operation on a radix tree
* @root: radix tree root
* @index: index key
*
* Lookup the item at the position @index in the radix tree @root.
*
* This function can be called under rcu_read_lock, however the caller
* must manage lifetimes of leaf nodes (eg. RCU may also be used to free
* them safely). No RCU barriers are required to access or modify the
* returned item, however.
*/
void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
{
return __radix_tree_lookup(root, index, NULL, NULL);
}
EXPORT_SYMBOL(radix_tree_lookup);
static void replace_slot(void __rcu **slot, void *item,
struct radix_tree_node *node, int count, int values)
{
if (node && (count || values)) {
node->count += count;
node->nr_values += values;
}
rcu_assign_pointer(*slot, item);
}
static bool node_tag_get(const struct radix_tree_root *root,
const struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
{
if (node)
return tag_get(node, tag, offset);
return root_tag_get(root, tag);
}
/*
* IDR users want to be able to store NULL in the tree, so if the slot isn't
* free, don't adjust the count, even if it's transitioning between NULL and
* non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
* have empty bits, but it only stores NULL in slots when they're being
* deleted.
*/
static int calculate_count(struct radix_tree_root *root,
struct radix_tree_node *node, void __rcu **slot,
void *item, void *old)
{
if (is_idr(root)) {
unsigned offset = get_slot_offset(node, slot);
bool free = node_tag_get(root, node, IDR_FREE, offset);
if (!free)
return 0;
if (!old)
return 1;
}
return !!item - !!old;
}
/**
* __radix_tree_replace - replace item in a slot
* @root: radix tree root
* @node: pointer to tree node
* @slot: pointer to slot in @node
* @item: new item to store in the slot.
*
* For use with __radix_tree_lookup(). Caller must hold tree write locked
* across slot lookup and replacement.
*/
void __radix_tree_replace(struct radix_tree_root *root,
struct radix_tree_node *node,
void __rcu **slot, void *item)
{
void *old = rcu_dereference_raw(*slot);
int values = !!xa_is_value(item) - !!xa_is_value(old);
int count = calculate_count(root, node, slot, item, old);
/*
* This function supports replacing value entries and
* deleting entries, but that needs accounting against the
* node unless the slot is root->xa_head.
*/
WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
(count || values));
replace_slot(slot, item, node, count, values); if (!node)
return;
delete_node(root, node);}
/**
* radix_tree_replace_slot - replace item in a slot
* @root: radix tree root
* @slot: pointer to slot
* @item: new item to store in the slot.
*
* For use with radix_tree_lookup_slot() and
* radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
* across slot lookup and replacement.
*
* NOTE: This cannot be used to switch between non-entries (empty slots),
* regular entries, and value entries, as that requires accounting
* inside the radix tree node. When switching from one type of entry or
* deleting, use __radix_tree_lookup() and __radix_tree_replace() or
* radix_tree_iter_replace().
*/
void radix_tree_replace_slot(struct radix_tree_root *root,
void __rcu **slot, void *item)
{
__radix_tree_replace(root, NULL, slot, item);
}
EXPORT_SYMBOL(radix_tree_replace_slot);
/**
* radix_tree_iter_replace - replace item in a slot
* @root: radix tree root
* @iter: iterator state
* @slot: pointer to slot
* @item: new item to store in the slot.
*
* For use with radix_tree_for_each_slot().
* Caller must hold tree write locked.
*/
void radix_tree_iter_replace(struct radix_tree_root *root,
const struct radix_tree_iter *iter,
void __rcu **slot, void *item)
{
__radix_tree_replace(root, iter->node, slot, item);
}
static void node_tag_set(struct radix_tree_root *root,
struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
{
while (node) {
if (tag_get(node, tag, offset))
return;
tag_set(node, tag, offset);
offset = node->offset;
node = node->parent;
}
if (!root_tag_get(root, tag))
root_tag_set(root, tag);
}
/**
* radix_tree_tag_set - set a tag on a radix tree node
* @root: radix tree root
* @index: index key
* @tag: tag index
*
* Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
* corresponding to @index in the radix tree. From
* the root all the way down to the leaf node.
*
* Returns the address of the tagged item. Setting a tag on a not-present
* item is a bug.
*/
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
radix_tree_load_root(root, &node, &maxindex);
BUG_ON(index > maxindex);
while (radix_tree_is_internal_node(node)) {
unsigned offset;
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
BUG_ON(!node);
if (!tag_get(parent, tag, offset))
tag_set(parent, tag, offset);
}
/* set the root's tag bit */
if (!root_tag_get(root, tag))
root_tag_set(root, tag);
return node;
}
EXPORT_SYMBOL(radix_tree_tag_set);
static void node_tag_clear(struct radix_tree_root *root,
struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
{ while (node) { if (!tag_get(node, tag, offset))
return;
tag_clear(node, tag, offset);
if (any_tag_set(node, tag))
return;
offset = node->offset;
node = node->parent;
}
/* clear the root's tag bit */
if (root_tag_get(root, tag)) root_tag_clear(root, tag);
}
/**
* radix_tree_tag_clear - clear a tag on a radix tree node
* @root: radix tree root
* @index: index key
* @tag: tag index
*
* Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
* corresponding to @index in the radix tree. If this causes
* the leaf node to have no tags set then clear the tag in the
* next-to-leaf node, etc.
*
* Returns the address of the tagged item on success, else NULL. ie:
* has the same return value and semantics as radix_tree_lookup().
*/
void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
int offset = 0;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
return NULL;
parent = NULL;
while (radix_tree_is_internal_node(node)) {
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
}
if (node)
node_tag_clear(root, parent, tag, offset);
return node;
}
EXPORT_SYMBOL(radix_tree_tag_clear);
/**
* radix_tree_iter_tag_clear - clear a tag on the current iterator entry
* @root: radix tree root
* @iter: iterator state
* @tag: tag to clear
*/
void radix_tree_iter_tag_clear(struct radix_tree_root *root,
const struct radix_tree_iter *iter, unsigned int tag)
{
node_tag_clear(root, iter->node, tag, iter_offset(iter));
}
/**
* radix_tree_tag_get - get a tag on a radix tree node
* @root: radix tree root
* @index: index key
* @tag: tag index (< RADIX_TREE_MAX_TAGS)
*
* Return values:
*
* 0: tag not present or not set
* 1: tag set
*
* Note that the return value of this function may not be relied on, even if
* the RCU lock is held, unless tag modification and node deletion are excluded
* from concurrency.
*/
int radix_tree_tag_get(const struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
if (!root_tag_get(root, tag)) return 0; radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
return 0;
while (radix_tree_is_internal_node(node)) {
unsigned offset;
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
if (!tag_get(parent, tag, offset))
return 0;
if (node == RADIX_TREE_RETRY)
break;
}
return 1;}
EXPORT_SYMBOL(radix_tree_tag_get);
/* Construct iter->tags bit-mask from node->tags[tag] array */
static void set_iter_tags(struct radix_tree_iter *iter,
struct radix_tree_node *node, unsigned offset,
unsigned tag)
{
unsigned tag_long = offset / BITS_PER_LONG;
unsigned tag_bit = offset % BITS_PER_LONG;
if (!node) { iter->tags = 1; return;
}
iter->tags = node->tags[tag][tag_long] >> tag_bit;
/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
/* Pick tags from next element */
if (tag_bit)
iter->tags |= node->tags[tag][tag_long + 1] <<
(BITS_PER_LONG - tag_bit);
/* Clip chunk size, here only BITS_PER_LONG tags */
iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
}
}
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
iter->index = __radix_tree_iter_add(iter, 1);
iter->next_index = iter->index;
iter->tags = 0;
return NULL;
}
EXPORT_SYMBOL(radix_tree_iter_resume);
/**
* radix_tree_next_chunk - find next chunk of slots for iteration
*
* @root: radix tree root
* @iter: iterator state
* @flags: RADIX_TREE_ITER_* flags and tag index
* Returns: pointer to chunk first slot, or NULL if iteration is over
*/
void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags)
{
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
struct radix_tree_node *node, *child;
unsigned long index, offset, maxindex;
if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) return NULL;
/*
* Catch next_index overflow after ~0UL. iter->index never overflows
* during iterating; it can be zero only at the beginning.
* And we cannot overflow iter->next_index in a single step,
* because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
*
* This condition also used by radix_tree_next_slot() to stop
* contiguous iterating, and forbid switching to the next chunk.
*/
index = iter->next_index; if (!index && iter->index)
return NULL;
restart: radix_tree_load_root(root, &child, &maxindex); if (index > maxindex)
return NULL;
if (!child)
return NULL;
if (!radix_tree_is_internal_node(child)) {
/* Single-slot tree */
iter->index = index;
iter->next_index = maxindex + 1;
iter->tags = 1;
iter->node = NULL;
return (void __rcu **)&root->xa_head;
}
do {
node = entry_to_node(child); offset = radix_tree_descend(node, &child, index); if ((flags & RADIX_TREE_ITER_TAGGED) ?
!tag_get(node, tag, offset) : !child) {
/* Hole detected */
if (flags & RADIX_TREE_ITER_CONTIG)
return NULL;
if (flags & RADIX_TREE_ITER_TAGGED)
offset = radix_tree_find_next_bit(node, tag,
offset + 1);
else
while (++offset < RADIX_TREE_MAP_SIZE) {
void *slot = rcu_dereference_raw(
node->slots[offset]);
if (slot)
break;
}
index &= ~node_maxindex(node);
index += offset << node->shift;
/* Overflow after ~0UL */
if (!index)
return NULL;
if (offset == RADIX_TREE_MAP_SIZE) goto restart; child = rcu_dereference_raw(node->slots[offset]);
}
if (!child) goto restart; if (child == RADIX_TREE_RETRY)
break;
} while (node->shift && radix_tree_is_internal_node(child));
/* Update the iterator state */
iter->index = (index &~ node_maxindex(node)) | offset;
iter->next_index = (index | node_maxindex(node)) + 1;
iter->node = node;
if (flags & RADIX_TREE_ITER_TAGGED)
set_iter_tags(iter, node, offset, tag); return node->slots + offset;
}
EXPORT_SYMBOL(radix_tree_next_chunk);
/**
* radix_tree_gang_lookup - perform multiple lookup on a radix tree
* @root: radix tree root
* @results: where the results of the lookup are placed
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
*
* Performs an index-ascending scan of the tree for present items. Places
* them at *@results and returns the number of items which were placed at
* *@results.
*
* The implementation is naive.
*
* Like radix_tree_lookup, radix_tree_gang_lookup may be called under
* rcu_read_lock. In this case, rather than the returned results being
* an atomic snapshot of the tree at a single point in time, the
* semantics of an RCU protected gang lookup are as though multiple
* radix_tree_lookups have been issued in individual locks, and results
* stored in 'results'.
*/
unsigned int
radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items)
{
struct radix_tree_iter iter;
void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_slot(slot, root, &iter, first_index) {
results[ret] = rcu_dereference_raw(*slot);
if (!results[ret])
continue;
if (radix_tree_is_internal_node(results[ret])) {
slot = radix_tree_iter_retry(&iter);
continue;
}
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup);
/**
* radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
* based on a tag
* @root: radix tree root
* @results: where the results of the lookup are placed
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
* @tag: the tag index (< RADIX_TREE_MAX_TAGS)
*
* Performs an index-ascending scan of the tree for present items which
* have the tag indexed by @tag set. Places the items at *@results and
* returns the number of items which were placed at *@results.
*/
unsigned int
radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items,
unsigned int tag)
{
struct radix_tree_iter iter;
void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
results[ret] = rcu_dereference_raw(*slot);
if (!results[ret])
continue;
if (radix_tree_is_internal_node(results[ret])) {
slot = radix_tree_iter_retry(&iter);
continue;
}
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
/**
* radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
* radix tree based on a tag
* @root: radix tree root
* @results: where the results of the lookup are placed
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
* @tag: the tag index (< RADIX_TREE_MAX_TAGS)
*
* Performs an index-ascending scan of the tree for present items which
* have the tag indexed by @tag set. Places the slots at *@results and
* returns the number of slots which were placed at *@results.
*/
unsigned int
radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
void __rcu ***results, unsigned long first_index,
unsigned int max_items, unsigned int tag)
{
struct radix_tree_iter iter;
void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
results[ret] = slot;
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
static bool __radix_tree_delete(struct radix_tree_root *root,
struct radix_tree_node *node, void __rcu **slot)
{
void *old = rcu_dereference_raw(*slot);
int values = xa_is_value(old) ? -1 : 0;
unsigned offset = get_slot_offset(node, slot);
int tag;
if (is_idr(root))
node_tag_set(root, node, IDR_FREE, offset);
else
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
replace_slot(slot, NULL, node, -1, values);
return node && delete_node(root, node);
}
/**
* radix_tree_iter_delete - delete the entry at this iterator position
* @root: radix tree root
* @iter: iterator state
* @slot: pointer to slot
*
* Delete the entry at the position currently pointed to by the iterator.
* This may result in the current node being freed; if it is, the iterator
* is advanced so that it will not reference the freed memory. This
* function may be called without any locking if there are no other threads
* which can access this tree.
*/
void radix_tree_iter_delete(struct radix_tree_root *root,
struct radix_tree_iter *iter, void __rcu **slot)
{
if (__radix_tree_delete(root, iter->node, slot))
iter->index = iter->next_index;
}
EXPORT_SYMBOL(radix_tree_iter_delete);
/**
* radix_tree_delete_item - delete an item from a radix tree
* @root: radix tree root
* @index: index key
* @item: expected item
*
* Remove @item at @index from the radix tree rooted at @root.
*
* Return: the deleted entry, or %NULL if it was not present
* or the entry at the given @index was not @item.
*/
void *radix_tree_delete_item(struct radix_tree_root *root,
unsigned long index, void *item)
{
struct radix_tree_node *node = NULL;
void __rcu **slot = NULL;
void *entry;
entry = __radix_tree_lookup(root, index, &node, &slot);
if (!slot)
return NULL;
if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
get_slot_offset(node, slot))))
return NULL;
if (item && entry != item)
return NULL;
__radix_tree_delete(root, node, slot);
return entry;
}
EXPORT_SYMBOL(radix_tree_delete_item);
/**
* radix_tree_delete - delete an entry from a radix tree
* @root: radix tree root
* @index: index key
*
* Remove the entry at @index from the radix tree rooted at @root.
*
* Return: The deleted entry, or %NULL if it was not present.
*/
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
return radix_tree_delete_item(root, index, NULL);
}
EXPORT_SYMBOL(radix_tree_delete);
/**
* radix_tree_tagged - test whether any items in the tree are tagged
* @root: radix tree root
* @tag: tag to test
*/
int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
{
return root_tag_get(root, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
/**
* idr_preload - preload for idr_alloc()
* @gfp_mask: allocation mask to use for preloading
*
* Preallocate memory to use for the next call to idr_alloc(). This function
* returns with preemption disabled. It will be enabled by idr_preload_end().
*/
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
local_lock(&radix_tree_preloads.lock);}
EXPORT_SYMBOL(idr_preload);
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
{
struct radix_tree_node *node = NULL, *child;
void __rcu **slot = (void __rcu **)&root->xa_head;
unsigned long maxindex, start = iter->next_index;
unsigned int shift, offset = 0;
grow:
shift = radix_tree_load_root(root, &child, &maxindex); if (!radix_tree_tagged(root, IDR_FREE))
start = max(start, maxindex + 1);
if (start > max) return ERR_PTR(-ENOSPC); if (start > maxindex) {
int error = radix_tree_extend(root, gfp, start, shift);
if (error < 0)
return ERR_PTR(error); shift = error;
child = rcu_dereference_raw(root->xa_head);
}
if (start == 0 && shift == 0) shift = RADIX_TREE_MAP_SHIFT; while (shift) { shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) {
/* Have to add a child node. */
child = radix_tree_node_alloc(gfp, node, root, shift,
offset, 0, 0);
if (!child) return ERR_PTR(-ENOMEM);
all_tag_set(child, IDR_FREE);
rcu_assign_pointer(*slot, node_to_entry(child));
if (node)
node->count++;
} else if (!radix_tree_is_internal_node(child))
break;
node = entry_to_node(child);
offset = radix_tree_descend(node, &child, start);
if (!tag_get(node, IDR_FREE, offset)) {
offset = radix_tree_find_next_bit(node, IDR_FREE, offset + 1); start = next_index(start, node, offset); if (start > max || start == 0)
return ERR_PTR(-ENOSPC);
while (offset == RADIX_TREE_MAP_SIZE) { offset = node->offset + 1;
node = node->parent;
if (!node)
goto grow;
shift = node->shift;
}
child = rcu_dereference_raw(node->slots[offset]);
}
slot = &node->slots[offset];
}
iter->index = start; if (node)
iter->next_index = 1 + min(max, (start | node_maxindex(node)));
else
iter->next_index = 1;
iter->node = node;
set_iter_tags(iter, node, offset, IDR_FREE);
return slot;
}
/**
* idr_destroy - release all internal memory from an IDR
* @idr: idr handle
*
* After this function is called, the IDR is empty, and may be reused or
* the data structure containing it may be freed.
*
* A typical clean-up sequence for objects stored in an idr tree will use
* idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free the memory used to keep track of those objects.
*/
void idr_destroy(struct idr *idr)
{
struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
if (radix_tree_is_internal_node(node))
radix_tree_free_nodes(node);
idr->idr_rt.xa_head = NULL;
root_tag_set(&idr->idr_rt, IDR_FREE);
}
EXPORT_SYMBOL(idr_destroy);
static void
radix_tree_node_ctor(void *arg)
{
struct radix_tree_node *node = arg;
memset(node, 0, sizeof(*node));
INIT_LIST_HEAD(&node->private_list);
}
static int radix_tree_cpu_dead(unsigned int cpu)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
/* Free per-cpu pool of preloaded nodes */
rtp = &per_cpu(radix_tree_preloads, cpu);
while (rtp->nr) {
node = rtp->nodes;
rtp->nodes = node->parent;
kmem_cache_free(radix_tree_node_cachep, node);
rtp->nr--;
}
return 0;
}
void __init radix_tree_init(void)
{
int ret;
BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
NULL, radix_tree_cpu_dead);
WARN_ON(ret < 0);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Helpers for formatting and printing strings
*
* Copyright 31 August 2008 James Bottomley
* Copyright (C) 2013, Intel Corporation
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/limits.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
/**
* string_get_size - get the size in the specified units
* @size: The size to be converted in blocks
* @blk_size: Size of the block (use 1 for size in bytes)
* @units: Units to use (powers of 1000 or 1024), whether to include space separator
* @buf: buffer to format to
* @len: length of buffer
*
* This function returns a string formatted to 3 significant figures
* giving the size in the required units. @buf should have room for
* at least 9 bytes and will always be zero terminated.
*
* Return value: number of characters of output that would have been written
* (which may be greater than len, if output was truncated).
*/
int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len)
{
enum string_size_units units_base = units & STRING_UNITS_MASK;
static const char *const units_10[] = {
"", "k", "M", "G", "T", "P", "E", "Z", "Y",
};
static const char *const units_2[] = {
"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi",
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
static const unsigned int divisor[] = {
[STRING_UNITS_10] = 1000,
[STRING_UNITS_2] = 1024,
};
static const unsigned int rounding[] = { 500, 50, 5 };
int i = 0, j;
u32 remainder = 0, sf_cap;
char tmp[12];
const char *unit;
tmp[0] = '\0';
if (blk_size == 0)
size = 0;
if (size == 0)
goto out;
/* This is Napier's algorithm. Reduce the original block size to
*
* coefficient * divisor[units_base]^i
*
* we do the reduction so both coefficients are just under 32 bits so
* that multiplying them together won't overflow 64 bits and we keep
* as much precision as possible in the numbers.
*
* Note: it's safe to throw away the remainders here because all the
* precision is in the coefficients.
*/
while (blk_size >> 32) {
do_div(blk_size, divisor[units_base]);
i++;
}
while (size >> 32) {
do_div(size, divisor[units_base]);
i++;
}
/* now perform the actual multiplication keeping i as the sum of the
* two logarithms */
size *= blk_size;
/* and logarithmically reduce it until it's just under the divisor */
while (size >= divisor[units_base]) {
remainder = do_div(size, divisor[units_base]);
i++;
}
/* work out in j how many digits of precision we need from the
* remainder */
sf_cap = size;
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
if (units_base == STRING_UNITS_2) {
/* express the remainder as a decimal. It's currently the
* numerator of a fraction whose denominator is
* divisor[units_base], which is 1 << 10 for STRING_UNITS_2 */
remainder *= 1000;
remainder >>= 10;
}
/* add a 5 to the digit below what will be printed to ensure
* an arithmetical round up and carry it through to size */
remainder += rounding[j];
if (remainder >= 1000) {
remainder -= 1000;
size += 1;
}
if (j) {
snprintf(tmp, sizeof(tmp), ".%03u", remainder);
tmp[j+1] = '\0';
}
out:
if (i >= ARRAY_SIZE(units_2))
unit = "UNK";
else
unit = units_str[units_base][i];
return snprintf(buf, len, "%u%s%s%s%s", (u32)size, tmp,
(units & STRING_UNITS_NO_SPACE) ? "" : " ",
unit,
(units & STRING_UNITS_NO_BYTES) ? "" : "B");
}
EXPORT_SYMBOL(string_get_size);
int parse_int_array(const char *buf, size_t count, int **array)
{
int *ints, nints;
get_options(buf, 0, &nints);
if (!nints)
return -ENOENT;
ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
if (!ints)
return -ENOMEM;
get_options(buf, nints + 1, ints);
*array = ints;
return 0;
}
EXPORT_SYMBOL(parse_int_array);
/**
* parse_int_array_user - Split string into a sequence of integers
* @from: The user space buffer to read from
* @count: The maximum number of bytes to read
* @array: Returned pointer to sequence of integers
*
* On success @array is allocated and initialized with a sequence of
* integers extracted from the @from plus an additional element that
* begins the sequence and specifies the integers count.
*
* Caller takes responsibility for freeing @array when it is no longer
* needed.
*/
int parse_int_array_user(const char __user *from, size_t count, int **array)
{
char *buf;
int ret;
buf = memdup_user_nul(from, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
ret = parse_int_array(buf, count, array);
kfree(buf);
return ret;
}
EXPORT_SYMBOL(parse_int_array_user);
static bool unescape_space(char **src, char **dst)
{
char *p = *dst, *q = *src;
switch (*q) {
case 'n':
*p = '\n';
break;
case 'r':
*p = '\r';
break;
case 't':
*p = '\t';
break;
case 'v':
*p = '\v';
break;
case 'f':
*p = '\f';
break;
default:
return false;
}
*dst += 1;
*src += 1;
return true;
}
static bool unescape_octal(char **src, char **dst)
{
char *p = *dst, *q = *src;
u8 num;
if (isodigit(*q) == 0)
return false;
num = (*q++) & 7;
while (num < 32 && isodigit(*q) && (q - *src < 3)) {
num <<= 3;
num += (*q++) & 7;
}
*p = num;
*dst += 1;
*src = q;
return true;
}
static bool unescape_hex(char **src, char **dst)
{
char *p = *dst, *q = *src;
int digit;
u8 num;
if (*q++ != 'x')
return false;
num = digit = hex_to_bin(*q++);
if (digit < 0)
return false;
digit = hex_to_bin(*q);
if (digit >= 0) {
q++;
num = (num << 4) | digit;
}
*p = num;
*dst += 1;
*src = q;
return true;
}
static bool unescape_special(char **src, char **dst)
{
char *p = *dst, *q = *src;
switch (*q) {
case '\"':
*p = '\"';
break;
case '\\':
*p = '\\';
break;
case 'a':
*p = '\a';
break;
case 'e':
*p = '\e';
break;
default:
return false;
}
*dst += 1;
*src += 1;
return true;
}
/**
* string_unescape - unquote characters in the given string
* @src: source buffer (escaped)
* @dst: destination buffer (unescaped)
* @size: size of the destination buffer (0 to unlimit)
* @flags: combination of the flags.
*
* Description:
* The function unquotes characters in the given string.
*
* Because the size of the output will be the same as or less than the size of
* the input, the transformation may be performed in place.
*
* Caller must provide valid source and destination pointers. Be aware that
* destination buffer will always be NULL-terminated. Source string must be
* NULL-terminated as well. The supported flags are::
*
* UNESCAPE_SPACE:
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
* '\t' - horizontal tab
* '\v' - vertical tab
* UNESCAPE_OCTAL:
* '\NNN' - byte with octal value NNN (1 to 3 digits)
* UNESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
* UNESCAPE_SPECIAL:
* '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
* UNESCAPE_ANY:
* all previous together
*
* Return:
* The amount of the characters processed to the destination buffer excluding
* trailing '\0' is returned.
*/
int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
{
char *out = dst;
if (!size)
size = SIZE_MAX;
while (*src && --size) {
if (src[0] == '\\' && src[1] != '\0' && size > 1) {
src++;
size--;
if (flags & UNESCAPE_SPACE &&
unescape_space(&src, &out))
continue;
if (flags & UNESCAPE_OCTAL &&
unescape_octal(&src, &out))
continue;
if (flags & UNESCAPE_HEX &&
unescape_hex(&src, &out))
continue;
if (flags & UNESCAPE_SPECIAL &&
unescape_special(&src, &out))
continue;
*out++ = '\\';
}
*out++ = *src++;
}
*out = '\0';
return out - dst;
}
EXPORT_SYMBOL(string_unescape);
static bool escape_passthrough(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (out < end)
*out = c;
*dst = out + 1;
return true;
}
static bool escape_space(unsigned char c, char **dst, char *end)
{
char *out = *dst;
unsigned char to;
switch (c) {
case '\n':
to = 'n';
break;
case '\r':
to = 'r';
break;
case '\t':
to = 't';
break;
case '\v':
to = 'v';
break;
case '\f':
to = 'f';
break;
default:
return false;
}
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = to;
++out;
*dst = out;
return true;
}
static bool escape_special(unsigned char c, char **dst, char *end)
{
char *out = *dst;
unsigned char to;
switch (c) {
case '\\':
to = '\\';
break;
case '\a':
to = 'a';
break;
case '\e':
to = 'e';
break;
case '"':
to = '"';
break;
default:
return false;
}
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = to;
++out;
*dst = out;
return true;
}
static bool escape_null(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (c)
return false;
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = '0';
++out;
*dst = out;
return true;
}
static bool escape_octal(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = ((c >> 6) & 0x07) + '0';
++out;
if (out < end)
*out = ((c >> 3) & 0x07) + '0';
++out;
if (out < end)
*out = ((c >> 0) & 0x07) + '0';
++out;
*dst = out;
return true;
}
static bool escape_hex(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = 'x';
++out;
if (out < end)
*out = hex_asc_hi(c);
++out;
if (out < end)
*out = hex_asc_lo(c);
++out;
*dst = out;
return true;
}
/**
* string_escape_mem - quote characters in the given memory buffer
* @src: source buffer (unescaped)
* @isz: source buffer size
* @dst: destination buffer (escaped)
* @osz: destination buffer size
* @flags: combination of the flags
* @only: NULL-terminated string containing characters used to limit
* the selected escape class. If characters are included in @only
* that would not normally be escaped by the classes selected
* in @flags, they will be copied to @dst unescaped.
*
* Description:
* The process of escaping byte buffer includes several parts. They are applied
* in the following sequence.
*
* 1. The character is not matched to the one from @only string and thus
* must go as-is to the output.
* 2. The character is matched to the printable and ASCII classes, if asked,
* and in case of match it passes through to the output.
* 3. The character is matched to the printable or ASCII class, if asked,
* and in case of match it passes through to the output.
* 4. The character is checked if it falls into the class given by @flags.
* %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
* character. Note that they actually can't go together, otherwise
* %ESCAPE_HEX will be ignored.
*
* Caller must provide valid source and destination pointers. Be aware that
* destination buffer will not be NULL-terminated, thus caller have to append
* it if needs. The supported flags are::
*
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
* '\t' - horizontal tab
* '\v' - vertical tab
* %ESCAPE_SPECIAL:
* '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
* %ESCAPE_NULL:
* '\0' - null
* %ESCAPE_OCTAL:
* '\NNN' - byte with octal value NNN (3 digits)
* %ESCAPE_ANY:
* all previous together
* %ESCAPE_NP:
* escape only non-printable characters, checked by isprint()
* %ESCAPE_ANY_NP:
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
* %ESCAPE_NA:
* escape only non-ascii characters, checked by isascii()
* %ESCAPE_NAP:
* escape only non-printable or non-ascii characters
* %ESCAPE_APPEND:
* append characters from @only to be escaped by the given classes
*
* %ESCAPE_APPEND would help to pass additional characters to the escaped, when
* one of %ESCAPE_NP, %ESCAPE_NA, or %ESCAPE_NAP is provided.
*
* One notable caveat, the %ESCAPE_NAP, %ESCAPE_NP and %ESCAPE_NA have the
* higher priority than the rest of the flags (%ESCAPE_NAP is the highest).
* It doesn't make much sense to use either of them without %ESCAPE_OCTAL
* or %ESCAPE_HEX, because they cover most of the other character classes.
* %ESCAPE_NAP can utilize %ESCAPE_SPACE or %ESCAPE_SPECIAL in addition to
* the above.
*
* Return:
* The total size of the escaped output that would be generated for
* the given input and flags. To check whether the output was
* truncated, compare the return value to osz. There is room left in
* dst for a '\0' terminator if and only if ret < osz.
*/
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *only)
{
char *p = dst;
char *end = p + osz;
bool is_dict = only && *only;
bool is_append = flags & ESCAPE_APPEND;
while (isz--) {
unsigned char c = *src++;
bool in_dict = is_dict && strchr(only, c);
/*
* Apply rules in the following sequence:
* - the @only string is supplied and does not contain a
* character under question
* - the character is printable and ASCII, when @flags has
* %ESCAPE_NAP bit set
* - the character is printable, when @flags has
* %ESCAPE_NP bit set
* - the character is ASCII, when @flags has
* %ESCAPE_NA bit set
* - the character doesn't fall into a class of symbols
* defined by given @flags
* In these cases we just pass through a character to the
* output buffer.
*
* When %ESCAPE_APPEND is passed, the characters from @only
* have been excluded from the %ESCAPE_NAP, %ESCAPE_NP, and
* %ESCAPE_NA cases.
*/
if (!(is_append || in_dict) && is_dict &&
escape_passthrough(c, &p, end))
continue;
if (!(is_append && in_dict) && isascii(c) && isprint(c) &&
flags & ESCAPE_NAP && escape_passthrough(c, &p, end))
continue;
if (!(is_append && in_dict) && isprint(c) &&
flags & ESCAPE_NP && escape_passthrough(c, &p, end))
continue;
if (!(is_append && in_dict) && isascii(c) &&
flags & ESCAPE_NA && escape_passthrough(c, &p, end))
continue;
if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
continue;
if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
continue;
if (flags & ESCAPE_NULL && escape_null(c, &p, end))
continue;
/* ESCAPE_OCTAL and ESCAPE_HEX always go last */
if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
continue;
if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
continue;
escape_passthrough(c, &p, end);
}
return p - dst;
}
EXPORT_SYMBOL(string_escape_mem);
/*
* Return an allocated string that has been escaped of special characters
* and double quotes, making it safe to log in quotes.
*/
char *kstrdup_quotable(const char *src, gfp_t gfp)
{
size_t slen, dlen;
char *dst;
const int flags = ESCAPE_HEX;
const char esc[] = "\f\n\r\t\v\a\e\\\"";
if (!src)
return NULL;
slen = strlen(src);
dlen = string_escape_mem(src, slen, NULL, 0, flags, esc);
dst = kmalloc(dlen + 1, gfp);
if (!dst)
return NULL;
WARN_ON(string_escape_mem(src, slen, dst, dlen, flags, esc) != dlen);
dst[dlen] = '\0';
return dst;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable);
/*
* Returns allocated NULL-terminated string containing process
* command line, with inter-argument NULLs replaced with spaces,
* and other special characters escaped.
*/
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp)
{
char *buffer, *quoted;
int i, res;
buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return NULL;
res = get_cmdline(task, buffer, PAGE_SIZE - 1);
buffer[res] = '\0';
/* Collapse trailing NULLs, leave res pointing to last non-NULL. */
while (--res >= 0 && buffer[res] == '\0')
;
/* Replace inter-argument NULLs. */
for (i = 0; i <= res; i++)
if (buffer[i] == '\0')
buffer[i] = ' ';
/* Make sure result is printable. */
quoted = kstrdup_quotable(buffer, gfp);
kfree(buffer);
return quoted;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable_cmdline);
/*
* Returns allocated NULL-terminated string containing pathname,
* with special characters escaped, able to be safely logged. If
* there is an error, the leading character will be "<".
*/
char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
{
char *temp, *pathname;
if (!file)
return kstrdup("<unknown>", gfp);
/* We add 11 spaces for ' (deleted)' to be appended */
temp = kmalloc(PATH_MAX + 11, GFP_KERNEL);
if (!temp)
return kstrdup("<no_memory>", gfp);
pathname = file_path(file, temp, PATH_MAX + 11);
if (IS_ERR(pathname))
pathname = kstrdup("<too_long>", gfp);
else
pathname = kstrdup_quotable(pathname, gfp);
kfree(temp);
return pathname;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable_file);
/*
* Returns duplicate string in which the @old characters are replaced by @new.
*/
char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp)
{
char *dst;
dst = kstrdup(src, gfp);
if (!dst)
return NULL;
return strreplace(dst, old, new);
}
EXPORT_SYMBOL_GPL(kstrdup_and_replace);
/**
* kasprintf_strarray - allocate and fill array of sequential strings
* @gfp: flags for the slab allocator
* @prefix: prefix to be used
* @n: amount of lines to be allocated and filled
*
* Allocates and fills @n strings using pattern "%s-%zu", where prefix
* is provided by caller. The caller is responsible to free them with
* kfree_strarray() after use.
*
* Returns array of strings or NULL when memory can't be allocated.
*/
char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n)
{
char **names;
size_t i;
names = kcalloc(n + 1, sizeof(char *), gfp);
if (!names)
return NULL;
for (i = 0; i < n; i++) {
names[i] = kasprintf(gfp, "%s-%zu", prefix, i);
if (!names[i]) {
kfree_strarray(names, i);
return NULL;
}
}
return names;
}
EXPORT_SYMBOL_GPL(kasprintf_strarray);
/**
* kfree_strarray - free a number of dynamically allocated strings contained
* in an array and the array itself
*
* @array: Dynamically allocated array of strings to free.
* @n: Number of strings (starting from the beginning of the array) to free.
*
* Passing a non-NULL @array and @n == 0 as well as NULL @array are valid
* use-cases. If @array is NULL, the function does nothing.
*/
void kfree_strarray(char **array, size_t n)
{
unsigned int i;
if (!array)
return;
for (i = 0; i < n; i++)
kfree(array[i]);
kfree(array);
}
EXPORT_SYMBOL_GPL(kfree_strarray);
struct strarray {
char **array;
size_t n;
};
static void devm_kfree_strarray(struct device *dev, void *res)
{
struct strarray *array = res;
kfree_strarray(array->array, array->n);
}
char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
{
struct strarray *ptr;
ptr = devres_alloc(devm_kfree_strarray, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
ptr->array = kasprintf_strarray(GFP_KERNEL, prefix, n);
if (!ptr->array) {
devres_free(ptr);
return ERR_PTR(-ENOMEM);
}
ptr->n = n;
devres_add(dev, ptr);
return ptr->array;
}
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
/**
* skip_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
* Returns a pointer to the first non-whitespace character in @str.
*/
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
EXPORT_SYMBOL(skip_spaces);
/**
* strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
char *strim(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
return skip_spaces(s);
}
EXPORT_SYMBOL(strim);
/**
* sysfs_streq - return true if strings are equal, modulo trailing newline
* @s1: one string
* @s2: another string
*
* This routine returns true iff two strings are equal, treating both
* NUL and newline-then-NUL as equivalent string terminations. It's
* geared for use with sysfs input strings, which generally terminate
* with newlines but are compared against values without newlines.
*/
bool sysfs_streq(const char *s1, const char *s2)
{
while (*s1 && *s1 == *s2) {
s1++;
s2++;
}
if (*s1 == *s2)
return true;
if (!*s1 && *s2 == '\n' && !s2[1])
return true;
if (*s1 == '\n' && !s1[1] && !*s2)
return true;
return false;
}
EXPORT_SYMBOL(sysfs_streq);
/**
* match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @string: string to match with
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*
* Return:
* index of a @string in the @array if matches, or %-EINVAL otherwise.
*/
int match_string(const char * const *array, size_t n, const char *string)
{
int index;
const char *item;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (!strcmp(item, string))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(match_string);
/**
* __sysfs_match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @str: string to match with
*
* Returns index of @str in the @array or -EINVAL, just like match_string().
* Uses sysfs_streq instead of strcmp for matching.
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*/
int __sysfs_match_string(const char * const *array, size_t n, const char *str)
{
const char *item;
int index;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (sysfs_streq(item, str))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(__sysfs_match_string);
/**
* strreplace - Replace all occurrences of character in string.
* @str: The string to operate on.
* @old: The character being replaced.
* @new: The character @old is replaced with.
*
* Replaces the each @old character with a @new one in the given string @str.
*
* Return: pointer to the string @str itself.
*/
char *strreplace(char *str, char old, char new)
{
char *s = str;
for (; *s; ++s)
if (*s == old)
*s = new;
return str;
}
EXPORT_SYMBOL(strreplace);
/**
* memcpy_and_pad - Copy one buffer to another with padding
* @dest: Where to copy to
* @dest_len: The destination buffer size
* @src: Where to copy from
* @count: The number of bytes to copy
* @pad: Character to use for padding if space is left in destination.
*/
void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
int pad)
{
if (dest_len > count) { memcpy(dest, src, count);
memset(dest + count, pad, dest_len - count);
} else {
memcpy(dest, src, dest_len);
}
}
EXPORT_SYMBOL(memcpy_and_pad);
#ifdef CONFIG_FORTIFY_SOURCE
/* These are placeholders for fortify compile-time warnings. */
void __read_overflow2_field(size_t avail, size_t wanted) { }
EXPORT_SYMBOL(__read_overflow2_field);
void __write_overflow_field(size_t avail, size_t wanted) { }
EXPORT_SYMBOL(__write_overflow_field);
static const char * const fortify_func_name[] = {
#define MAKE_FORTIFY_FUNC_NAME(func) [MAKE_FORTIFY_FUNC(func)] = #func
EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC_NAME)
#undef MAKE_FORTIFY_FUNC_NAME
};
void __fortify_report(const u8 reason, const size_t avail, const size_t size)
{
const u8 func = FORTIFY_REASON_FUNC(reason);
const bool write = FORTIFY_REASON_DIR(reason);
const char *name;
name = fortify_func_name[umin(func, FORTIFY_FUNC_UNKNOWN)];
WARN(1, "%s: detected buffer overflow: %zu byte %s of buffer size %zu\n",
name, size, str_read_write(!write), avail);
}
EXPORT_SYMBOL(__fortify_report);
void __fortify_panic(const u8 reason, const size_t avail, const size_t size)
{
__fortify_report(reason, avail, size);
BUG();
}
EXPORT_SYMBOL(__fortify_panic);
#endif /* CONFIG_FORTIFY_SOURCE */
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/proc/net.c
*
* Copyright (C) 2007
*
* Author: Eric Biederman <ebiederm@xmission.com>
*
* proc net directory handling functions
*/
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/mount.h>
#include <linux/nsproxy.h>
#include <linux/uidgid.h>
#include <net/net_namespace.h>
#include <linux/seq_file.h>
#include "internal.h"
static inline struct net *PDE_NET(struct proc_dir_entry *pde)
{
return pde->parent->data;
}
static struct net *get_proc_net(const struct inode *inode)
{
return maybe_get_net(PDE_NET(PDE(inode)));
}
static int seq_open_net(struct inode *inode, struct file *file)
{
unsigned int state_size = PDE(inode)->state_size;
struct seq_net_private *p;
struct net *net;
WARN_ON_ONCE(state_size < sizeof(*p));
if (file->f_mode & FMODE_WRITE && !PDE(inode)->write)
return -EACCES;
net = get_proc_net(inode);
if (!net)
return -ENXIO;
p = __seq_open_private(file, PDE(inode)->seq_ops, state_size);
if (!p) {
put_net(net);
return -ENOMEM;
}
#ifdef CONFIG_NET_NS
p->net = net;
netns_tracker_alloc(net, &p->ns_tracker, GFP_KERNEL);
#endif
return 0;
}
static void seq_file_net_put_net(struct seq_file *seq)
{
#ifdef CONFIG_NET_NS
struct seq_net_private *priv = seq->private;
put_net_track(priv->net, &priv->ns_tracker);
#else
put_net(&init_net);
#endif
}
static int seq_release_net(struct inode *ino, struct file *f)
{
struct seq_file *seq = f->private_data;
seq_file_net_put_net(seq);
seq_release_private(ino, f);
return 0;
}
static const struct proc_ops proc_net_seq_ops = {
.proc_open = seq_open_net,
.proc_read = seq_read,
.proc_write = proc_simple_write,
.proc_lseek = seq_lseek,
.proc_release = seq_release_net,
};
int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux)
{
#ifdef CONFIG_NET_NS
struct seq_net_private *p = priv_data;
p->net = get_net_track(current->nsproxy->net_ns, &p->ns_tracker,
GFP_KERNEL);
#endif
return 0;
}
void bpf_iter_fini_seq_net(void *priv_data)
{
#ifdef CONFIG_NET_NS
struct seq_net_private *p = priv_data;
put_net_track(p->net, &p->ns_tracker);
#endif
}
struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
struct proc_dir_entry *parent, const struct seq_operations *ops,
unsigned int state_size, void *data)
{
struct proc_dir_entry *p;
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
pde_force_lookup(p);
p->proc_ops = &proc_net_seq_ops;
p->seq_ops = ops;
p->state_size = state_size;
return proc_register(parent, p);}
EXPORT_SYMBOL_GPL(proc_create_net_data);
/**
* proc_create_net_data_write - Create a writable net_ns-specific proc file
* @name: The name of the file.
* @mode: The file's access mode.
* @parent: The parent directory in which to create.
* @ops: The seq_file ops with which to read the file.
* @write: The write method with which to 'modify' the file.
* @state_size: The size of the per-file private state to allocate.
* @data: Data for retrieval by pde_data().
*
* Create a network namespaced proc file in the @parent directory with the
* specified @name and @mode that allows reading of a file that displays a
* series of elements and also provides for the file accepting writes that have
* some arbitrary effect.
*
* The functions in the @ops table are used to iterate over items to be
* presented and extract the readable content using the seq_file interface.
*
* The @write function is called with the data copied into a kernel space
* scratch buffer and has a NUL appended for convenience. The buffer may be
* modified by the @write function. @write should return 0 on success.
*
* The @data value is accessible from the @show and @write functions by calling
* pde_data() on the file inode. The network namespace must be accessed by
* calling seq_file_net() on the seq_file struct.
*/
struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode,
struct proc_dir_entry *parent,
const struct seq_operations *ops,
proc_write_t write,
unsigned int state_size, void *data)
{
struct proc_dir_entry *p;
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
pde_force_lookup(p);
p->proc_ops = &proc_net_seq_ops;
p->seq_ops = ops;
p->state_size = state_size;
p->write = write;
return proc_register(parent, p);
}
EXPORT_SYMBOL_GPL(proc_create_net_data_write);
static int single_open_net(struct inode *inode, struct file *file)
{
struct proc_dir_entry *de = PDE(inode);
struct net *net;
int err;
net = get_proc_net(inode);
if (!net)
return -ENXIO;
err = single_open(file, de->single_show, net);
if (err)
put_net(net);
return err;
}
static int single_release_net(struct inode *ino, struct file *f)
{
struct seq_file *seq = f->private_data;
put_net(seq->private);
return single_release(ino, f);
}
static const struct proc_ops proc_net_single_ops = {
.proc_open = single_open_net,
.proc_read = seq_read,
.proc_write = proc_simple_write,
.proc_lseek = seq_lseek,
.proc_release = single_release_net,
};
struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
struct proc_dir_entry *parent,
int (*show)(struct seq_file *, void *), void *data)
{
struct proc_dir_entry *p;
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
pde_force_lookup(p);
p->proc_ops = &proc_net_single_ops;
p->single_show = show;
return proc_register(parent, p);}
EXPORT_SYMBOL_GPL(proc_create_net_single);
/**
* proc_create_net_single_write - Create a writable net_ns-specific proc file
* @name: The name of the file.
* @mode: The file's access mode.
* @parent: The parent directory in which to create.
* @show: The seqfile show method with which to read the file.
* @write: The write method with which to 'modify' the file.
* @data: Data for retrieval by pde_data().
*
* Create a network-namespaced proc file in the @parent directory with the
* specified @name and @mode that allows reading of a file that displays a
* single element rather than a series and also provides for the file accepting
* writes that have some arbitrary effect.
*
* The @show function is called to extract the readable content via the
* seq_file interface.
*
* The @write function is called with the data copied into a kernel space
* scratch buffer and has a NUL appended for convenience. The buffer may be
* modified by the @write function. @write should return 0 on success.
*
* The @data value is accessible from the @show and @write functions by calling
* pde_data() on the file inode. The network namespace must be accessed by
* calling seq_file_single_net() on the seq_file struct.
*/
struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode,
struct proc_dir_entry *parent,
int (*show)(struct seq_file *, void *),
proc_write_t write,
void *data)
{
struct proc_dir_entry *p;
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
pde_force_lookup(p);
p->proc_ops = &proc_net_single_ops;
p->single_show = show;
p->write = write;
return proc_register(parent, p);
}
EXPORT_SYMBOL_GPL(proc_create_net_single_write);
static struct net *get_proc_task_net(struct inode *dir)
{
struct task_struct *task;
struct nsproxy *ns;
struct net *net = NULL;
rcu_read_lock();
task = pid_task(proc_pid(dir), PIDTYPE_PID);
if (task != NULL) {
task_lock(task);
ns = task->nsproxy;
if (ns != NULL)
net = get_net(ns->net_ns);
task_unlock(task);
}
rcu_read_unlock();
return net;
}
static struct dentry *proc_tgid_net_lookup(struct inode *dir,
struct dentry *dentry, unsigned int flags)
{
struct dentry *de;
struct net *net;
de = ERR_PTR(-ENOENT);
net = get_proc_task_net(dir);
if (net != NULL) {
de = proc_lookup_de(dir, dentry, net->proc_net);
put_net(net);
}
return de;
}
static int proc_tgid_net_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct net *net;
net = get_proc_task_net(inode);
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (net != NULL) {
stat->nlink = net->proc_net->nlink;
put_net(net);
}
return 0;
}
const struct inode_operations proc_net_inode_operations = {
.lookup = proc_tgid_net_lookup,
.getattr = proc_tgid_net_getattr,
.setattr = proc_setattr,
};
static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx)
{
int ret;
struct net *net;
ret = -EINVAL;
net = get_proc_task_net(file_inode(file));
if (net != NULL) {
ret = proc_readdir_de(file, ctx, net->proc_net);
put_net(net);
}
return ret;
}
const struct file_operations proc_net_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = proc_tgid_net_readdir,
};
static __net_init int proc_net_ns_init(struct net *net)
{
struct proc_dir_entry *netd, *net_statd;
kuid_t uid;
kgid_t gid;
int err;
/*
* This PDE acts only as an anchor for /proc/${pid}/net hierarchy.
* Corresponding inode (PDE(inode) == net->proc_net) is never
* instantiated therefore blanket zeroing is fine.
* net->proc_net_stat inode is instantiated normally.
*/
err = -ENOMEM;
netd = kmem_cache_zalloc(proc_dir_entry_cache, GFP_KERNEL);
if (!netd)
goto out; netd->subdir = RB_ROOT;
netd->data = net;
netd->nlink = 2;
netd->namelen = 3;
netd->parent = &proc_root;
netd->name = netd->inline_name;
memcpy(netd->name, "net", 4);
uid = make_kuid(net->user_ns, 0);
if (!uid_valid(uid))
uid = netd->uid; gid = make_kgid(net->user_ns, 0);
if (!gid_valid(gid))
gid = netd->gid; proc_set_user(netd, uid, gid);
/* Seed dentry revalidation for /proc/${pid}/net */
pde_force_lookup(netd);
err = -EEXIST;
net_statd = proc_net_mkdir(net, "stat", netd);
if (!net_statd)
goto free_net;
net->proc_net = netd;
net->proc_net_stat = net_statd;
return 0;
free_net:
pde_free(netd);
out:
return err;
}
static __net_exit void proc_net_ns_exit(struct net *net)
{
remove_proc_entry("stat", net->proc_net);
pde_free(net->proc_net);
}
static struct pernet_operations __net_initdata proc_net_ns_ops = {
.init = proc_net_ns_init,
.exit = proc_net_ns_exit,
};
int __init proc_net_init(void)
{
proc_symlink("net", NULL, "self/net");
return register_pernet_subsys(&proc_net_ns_ops);
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file provides wrappers with sanitizer instrumentation for bit
* locking operations.
*
* To use this functionality, an arch's bitops.h file needs to define each of
* the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
* arch___set_bit(), etc.).
*/
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
kcsan_release();
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
/**
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* This is a non-atomic operation but implies a release barrier before the
* memory operation. It can be used for an unlock if no other CPUs can
* concurrently modify other bits in the word.
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
kcsan_release();
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics if
* the returned value is 0.
* It can be used to implement bit locks.
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
/**
* xor_unlock_is_negative_byte - XOR a single byte in memory and test if
* it is negative, for unlock.
* @mask: Change the bits which are set in this mask.
* @addr: The address of the word containing the byte to change.
*
* Changes some of bits 0-6 in the word pointed to by @addr.
* This operation is atomic and provides release barrier semantics.
* Used to optimise some folio operations which are commonly paired
* with an unlock or end of writeback. Bit 7 is used as PG_waiters to
* indicate whether anybody is waiting for the unlock.
*
* Return: Whether the top bit of the byte is set.
*/
static inline bool xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
kcsan_release();
instrument_atomic_write(addr, sizeof(long));
return arch_xor_unlock_is_negative_byte(mask, addr);
}
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/super.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* super.c contains code to handle: - mount structures
* - super-block tables
* - filesystem drivers list
* - mount system call
* - umount system call
* - ustat system call
*
* GK 2/5/95 - Changed to support mounting the root fs via NFS
*
* Added kerneld support: Jacques Gelinas and Bjorn Ekwall
* Added change_root: Werner Almesberger & Hans Lermen, Feb '96
* Added options to /proc/mounts:
* Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
* Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
* Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/writeback.h> /* for the emergency remount stuff */
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/backing-dev.h>
#include <linux/rculist_bl.h>
#include <linux/fscrypt.h>
#include <linux/fsnotify.h>
#include <linux/lockdep.h>
#include <linux/user_namespace.h>
#include <linux/fs_context.h>
#include <uapi/linux/mount.h>
#include "internal.h"
static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
const void *freeze_owner);
static LIST_HEAD(super_blocks);
static DEFINE_SPINLOCK(sb_lock);
static char *sb_writers_name[SB_FREEZE_LEVELS] = {
"sb_writers",
"sb_pagefaults",
"sb_internal",
};
static inline void __super_lock(struct super_block *sb, bool excl)
{
if (excl)
down_write(&sb->s_umount);
else
down_read(&sb->s_umount);
}
static inline void super_unlock(struct super_block *sb, bool excl)
{
if (excl)
up_write(&sb->s_umount);
else
up_read(&sb->s_umount);
}
static inline void __super_lock_excl(struct super_block *sb)
{
__super_lock(sb, true);
}
static inline void super_unlock_excl(struct super_block *sb)
{
super_unlock(sb, true);
}
static inline void super_unlock_shared(struct super_block *sb)
{
super_unlock(sb, false);
}
static bool super_flags(const struct super_block *sb, unsigned int flags)
{
/*
* Pairs with smp_store_release() in super_wake() and ensures
* that we see @flags after we're woken.
*/
return smp_load_acquire(&sb->s_flags) & flags;
}
/**
* super_lock - wait for superblock to become ready and lock it
* @sb: superblock to wait for
* @excl: whether exclusive access is required
*
* If the superblock has neither passed through vfs_get_tree() or
* generic_shutdown_super() yet wait for it to happen. Either superblock
* creation will succeed and SB_BORN is set by vfs_get_tree() or we're
* woken and we'll see SB_DYING.
*
* The caller must have acquired a temporary reference on @sb->s_count.
*
* Return: The function returns true if SB_BORN was set and with
* s_umount held. The function returns false if SB_DYING was
* set and without s_umount held.
*/
static __must_check bool super_lock(struct super_block *sb, bool excl)
{
lockdep_assert_not_held(&sb->s_umount);
/* wait until the superblock is ready or dying */
wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
/* Don't pointlessly acquire s_umount. */
if (super_flags(sb, SB_DYING))
return false;
__super_lock(sb, excl);
/*
* Has gone through generic_shutdown_super() in the meantime.
* @sb->s_root is NULL and @sb->s_active is 0. No one needs to
* grab a reference to this. Tell them so.
*/
if (sb->s_flags & SB_DYING) {
super_unlock(sb, excl);
return false;
}
WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
return true;
}
/* wait and try to acquire read-side of @sb->s_umount */
static inline bool super_lock_shared(struct super_block *sb)
{
return super_lock(sb, false);
}
/* wait and try to acquire write-side of @sb->s_umount */
static inline bool super_lock_excl(struct super_block *sb)
{
return super_lock(sb, true);
}
/* wake waiters */
#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
static void super_wake(struct super_block *sb, unsigned int flag)
{
WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
/*
* Pairs with smp_load_acquire() in super_lock() to make sure
* all initializations in the superblock are seen by the user
* seeing SB_BORN sent.
*/
smp_store_release(&sb->s_flags, sb->s_flags | flag);
/*
* Pairs with the barrier in prepare_to_wait_event() to make sure
* ___wait_var_event() either sees SB_BORN set or
* waitqueue_active() check in wake_up_var() sees the waiter.
*/
smp_mb();
wake_up_var(&sb->s_flags);
}
/*
* One thing we have to be careful of with a per-sb shrinker is that we don't
* drop the last active reference to the superblock from within the shrinker.
* If that happens we could trigger unregistering the shrinker from within the
* shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
* take a passive reference to the superblock to avoid this from occurring.
*/
static unsigned long super_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct super_block *sb;
long fs_objects = 0;
long total_objects;
long freed = 0;
long dentries;
long inodes;
sb = shrink->private_data;
/*
* Deadlock avoidance. We may hold various FS locks, and we don't want
* to recurse into the FS that called us in clear_inode() and friends..
*/
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
if (!super_trylock_shared(sb))
return SHRINK_STOP;
if (sb->s_op->nr_cached_objects)
fs_objects = sb->s_op->nr_cached_objects(sb, sc);
inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
total_objects = dentries + inodes + fs_objects;
if (!total_objects)
total_objects = 1;
/* proportion the scan between the caches */
dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
/*
* prune the dcache first as the icache is pinned by it, then
* prune the icache, followed by the filesystem specific caches
*
* Ensure that we always scan at least one object - memcg kmem
* accounting uses this to fully empty the caches.
*/
sc->nr_to_scan = dentries + 1;
freed = prune_dcache_sb(sb, sc);
sc->nr_to_scan = inodes + 1;
freed += prune_icache_sb(sb, sc);
if (fs_objects) {
sc->nr_to_scan = fs_objects + 1;
freed += sb->s_op->free_cached_objects(sb, sc);
}
super_unlock_shared(sb);
return freed;
}
static unsigned long super_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct super_block *sb;
long total_objects = 0;
sb = shrink->private_data;
/*
* We don't call super_trylock_shared() here as it is a scalability
* bottleneck, so we're exposed to partial setup state. The shrinker
* rwsem does not protect filesystem operations backing
* list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
* change between super_cache_count and super_cache_scan, so we really
* don't need locks here.
*
* However, if we are currently mounting the superblock, the underlying
* filesystem might be in a state of partial construction and hence it
* is dangerous to access it. super_trylock_shared() uses a SB_BORN check
* to avoid this situation, so do the same here. The memory barrier is
* matched with the one in mount_fs() as we don't hold locks here.
*/
if (!(sb->s_flags & SB_BORN))
return 0;
smp_rmb();
if (sb->s_op && sb->s_op->nr_cached_objects)
total_objects = sb->s_op->nr_cached_objects(sb, sc);
total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
if (!total_objects)
return SHRINK_EMPTY;
total_objects = vfs_pressure_ratio(total_objects);
return total_objects;
}
static void destroy_super_work(struct work_struct *work)
{
struct super_block *s = container_of(work, struct super_block,
destroy_work);
fsnotify_sb_free(s);
security_sb_free(s);
put_user_ns(s->s_user_ns);
kfree(s->s_subtype);
for (int i = 0; i < SB_FREEZE_LEVELS; i++)
percpu_free_rwsem(&s->s_writers.rw_sem[i]);
kfree(s);
}
static void destroy_super_rcu(struct rcu_head *head)
{
struct super_block *s = container_of(head, struct super_block, rcu);
INIT_WORK(&s->destroy_work, destroy_super_work);
schedule_work(&s->destroy_work);
}
/* Free a superblock that has never been seen by anyone */
static void destroy_unused_super(struct super_block *s)
{
if (!s)
return;
super_unlock_excl(s);
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
shrinker_free(s->s_shrink);
/* no delays needed */
destroy_super_work(&s->destroy_work);
}
/**
* alloc_super - create new superblock
* @type: filesystem type superblock should belong to
* @flags: the mount flags
* @user_ns: User namespace for the super_block
*
* Allocates and initializes a new &struct super_block. alloc_super()
* returns a pointer new superblock or %NULL if allocation had failed.
*/
static struct super_block *alloc_super(struct file_system_type *type, int flags,
struct user_namespace *user_ns)
{
struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
static const struct super_operations default_op;
int i;
if (!s)
return NULL;
s->s_user_ns = get_user_ns(user_ns);
init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
/*
* sget() can have s_umount recursion.
*
* When it cannot find a suitable sb, it allocates a new
* one (this one), and tries again to find a suitable old
* one.
*
* In case that succeeds, it will acquire the s_umount
* lock of the old one. Since these are clearly distrinct
* locks, and this object isn't exposed yet, there's no
* risk of deadlocks.
*
* Annotate this by putting this lock in a different
* subclass.
*/
down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
if (security_sb_alloc(s))
goto fail; for (i = 0; i < SB_FREEZE_LEVELS; i++) { if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
sb_writers_name[i],
&type->s_writers_key[i]))
goto fail;
}
s->s_bdi = &noop_backing_dev_info;
s->s_flags = flags;
if (s->s_user_ns != &init_user_ns)
s->s_iflags |= SB_I_NODEV;
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_roots);
mutex_init(&s->s_sync_lock);
INIT_LIST_HEAD(&s->s_inodes);
spin_lock_init(&s->s_inode_list_lock);
INIT_LIST_HEAD(&s->s_inodes_wb);
spin_lock_init(&s->s_inode_wblist_lock);
s->s_count = 1;
atomic_set(&s->s_active, 1);
mutex_init(&s->s_vfs_rename_mutex);
lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
init_rwsem(&s->s_dquot.dqio_sem);
s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op;
s->s_time_gran = 1000000000;
s->s_time_min = TIME64_MIN;
s->s_time_max = TIME64_MAX;
s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
"sb-%s", type->name);
if (!s->s_shrink) goto fail;
s->s_shrink->scan_objects = super_cache_scan;
s->s_shrink->count_objects = super_cache_count;
s->s_shrink->batch = 1024;
s->s_shrink->private_data = s;
if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink)) goto fail; if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink)) goto fail;
return s;
fail:
destroy_unused_super(s);
return NULL;
}
/* Superblock refcounting */
/*
* Drop a superblock's refcount. The caller must hold sb_lock.
*/
static void __put_super(struct super_block *s)
{
if (!--s->s_count) {
list_del_init(&s->s_list);
WARN_ON(s->s_dentry_lru.node);
WARN_ON(s->s_inode_lru.node);
WARN_ON(s->s_mounts);
call_rcu(&s->rcu, destroy_super_rcu);
}
}
/**
* put_super - drop a temporary reference to superblock
* @sb: superblock in question
*
* Drops a temporary reference, frees superblock if there's no
* references left.
*/
void put_super(struct super_block *sb)
{
spin_lock(&sb_lock);
__put_super(sb);
spin_unlock(&sb_lock);
}
static void kill_super_notify(struct super_block *sb)
{
lockdep_assert_not_held(&sb->s_umount);
/* already notified earlier */
if (sb->s_flags & SB_DEAD)
return;
/*
* Remove it from @fs_supers so it isn't found by new
* sget{_fc}() walkers anymore. Any concurrent mounter still
* managing to grab a temporary reference is guaranteed to
* already see SB_DYING and will wait until we notify them about
* SB_DEAD.
*/
spin_lock(&sb_lock);
hlist_del_init(&sb->s_instances);
spin_unlock(&sb_lock);
/*
* Let concurrent mounts know that this thing is really dead.
* We don't need @sb->s_umount here as every concurrent caller
* will see SB_DYING and either discard the superblock or wait
* for SB_DEAD.
*/
super_wake(sb, SB_DEAD);
}
/**
* deactivate_locked_super - drop an active reference to superblock
* @s: superblock to deactivate
*
* Drops an active reference to superblock, converting it into a temporary
* one if there is no other active references left. In that case we
* tell fs driver to shut it down and drop the temporary reference we
* had just acquired.
*
* Caller holds exclusive lock on superblock; that lock is released.
*/
void deactivate_locked_super(struct super_block *s)
{
struct file_system_type *fs = s->s_type;
if (atomic_dec_and_test(&s->s_active)) {
shrinker_free(s->s_shrink);
fs->kill_sb(s);
kill_super_notify(s);
/*
* Since list_lru_destroy() may sleep, we cannot call it from
* put_super(), where we hold the sb_lock. Therefore we destroy
* the lru lists right now.
*/
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
put_filesystem(fs);
put_super(s);
} else {
super_unlock_excl(s);
}
}
EXPORT_SYMBOL(deactivate_locked_super);
/**
* deactivate_super - drop an active reference to superblock
* @s: superblock to deactivate
*
* Variant of deactivate_locked_super(), except that superblock is *not*
* locked by caller. If we are going to drop the final active reference,
* lock will be acquired prior to that.
*/
void deactivate_super(struct super_block *s)
{ if (!atomic_add_unless(&s->s_active, -1, 1)) {
__super_lock_excl(s);
deactivate_locked_super(s);
}
}
EXPORT_SYMBOL(deactivate_super);
/**
* grab_super - acquire an active reference to a superblock
* @sb: superblock to acquire
*
* Acquire a temporary reference on a superblock and try to trade it for
* an active reference. This is used in sget{_fc}() to wait for a
* superblock to either become SB_BORN or for it to pass through
* sb->kill() and be marked as SB_DEAD.
*
* Return: This returns true if an active reference could be acquired,
* false if not.
*/
static bool grab_super(struct super_block *sb)
{
bool locked;
sb->s_count++;
spin_unlock(&sb_lock);
locked = super_lock_excl(sb);
if (locked) {
if (atomic_inc_not_zero(&sb->s_active)) {
put_super(sb);
return true;
}
super_unlock_excl(sb);
}
wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
put_super(sb);
return false;
}
/*
* super_trylock_shared - try to grab ->s_umount shared
* @sb: reference we are trying to grab
*
* Try to prevent fs shutdown. This is used in places where we
* cannot take an active reference but we need to ensure that the
* filesystem is not shut down while we are working on it. It returns
* false if we cannot acquire s_umount or if we lose the race and
* filesystem already got into shutdown, and returns true with the s_umount
* lock held in read mode in case of success. On successful return,
* the caller must drop the s_umount lock when done.
*
* Note that unlike get_super() et.al. this one does *not* bump ->s_count.
* The reason why it's safe is that we are OK with doing trylock instead
* of down_read(). There's a couple of places that are OK with that, but
* it's very much not a general-purpose interface.
*/
bool super_trylock_shared(struct super_block *sb)
{
if (down_read_trylock(&sb->s_umount)) {
if (!(sb->s_flags & SB_DYING) && sb->s_root &&
(sb->s_flags & SB_BORN))
return true;
super_unlock_shared(sb);
}
return false;
}
/**
* retire_super - prevents superblock from being reused
* @sb: superblock to retire
*
* The function marks superblock to be ignored in superblock test, which
* prevents it from being reused for any new mounts. If the superblock has
* a private bdi, it also unregisters it, but doesn't reduce the refcount
* of the superblock to prevent potential races. The refcount is reduced
* by generic_shutdown_super(). The function can not be called
* concurrently with generic_shutdown_super(). It is safe to call the
* function multiple times, subsequent calls have no effect.
*
* The marker will affect the re-use only for block-device-based
* superblocks. Other superblocks will still get marked if this function
* is used, but that will not affect their reusability.
*/
void retire_super(struct super_block *sb)
{
WARN_ON(!sb->s_bdev);
__super_lock_excl(sb);
if (sb->s_iflags & SB_I_PERSB_BDI) {
bdi_unregister(sb->s_bdi);
sb->s_iflags &= ~SB_I_PERSB_BDI;
}
sb->s_iflags |= SB_I_RETIRED;
super_unlock_excl(sb);
}
EXPORT_SYMBOL(retire_super);
/**
* generic_shutdown_super - common helper for ->kill_sb()
* @sb: superblock to kill
*
* generic_shutdown_super() does all fs-independent work on superblock
* shutdown. Typical ->kill_sb() should pick all fs-specific objects
* that need destruction out of superblock, call generic_shutdown_super()
* and release aforementioned objects. Note: dentries and inodes _are_
* taken care of and do not need specific handling.
*
* Upon calling this function, the filesystem may no longer alter or
* rearrange the set of dentries belonging to this super_block, nor may it
* change the attachments of dentries to inodes.
*/
void generic_shutdown_super(struct super_block *sb)
{
const struct super_operations *sop = sb->s_op;
if (sb->s_root) {
shrink_dcache_for_umount(sb);
sync_filesystem(sb);
sb->s_flags &= ~SB_ACTIVE;
cgroup_writeback_umount(sb);
/* Evict all inodes with zero refcount. */
evict_inodes(sb);
/*
* Clean up and evict any inodes that still have references due
* to fsnotify or the security policy.
*/
fsnotify_sb_delete(sb);
security_sb_delete(sb);
if (sb->s_dio_done_wq) {
destroy_workqueue(sb->s_dio_done_wq);
sb->s_dio_done_wq = NULL;
}
if (sop->put_super)
sop->put_super(sb);
/*
* Now that all potentially-encrypted inodes have been evicted,
* the fscrypt keyring can be destroyed.
*/
fscrypt_destroy_keyring(sb);
if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL,
"VFS: Busy inodes after unmount of %s (%s)",
sb->s_id, sb->s_type->name)) {
/*
* Adding a proper bailout path here would be hard, but
* we can at least make it more likely that a later
* iput_final() or such crashes cleanly.
*/
struct inode *inode;
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
inode->i_op = VFS_PTR_POISON;
inode->i_sb = VFS_PTR_POISON;
inode->i_mapping = VFS_PTR_POISON;
}
spin_unlock(&sb->s_inode_list_lock);
}
}
/*
* Broadcast to everyone that grabbed a temporary reference to this
* superblock before we removed it from @fs_supers that the superblock
* is dying. Every walker of @fs_supers outside of sget{_fc}() will now
* discard this superblock and treat it as dead.
*
* We leave the superblock on @fs_supers so it can be found by
* sget{_fc}() until we passed sb->kill_sb().
*/
super_wake(sb, SB_DYING);
super_unlock_excl(sb);
if (sb->s_bdi != &noop_backing_dev_info) {
if (sb->s_iflags & SB_I_PERSB_BDI)
bdi_unregister(sb->s_bdi);
bdi_put(sb->s_bdi);
sb->s_bdi = &noop_backing_dev_info;
}
}
EXPORT_SYMBOL(generic_shutdown_super);
bool mount_capable(struct fs_context *fc)
{
if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
return capable(CAP_SYS_ADMIN);
else
return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
}
/**
* sget_fc - Find or create a superblock
* @fc: Filesystem context.
* @test: Comparison callback
* @set: Setup callback
*
* Create a new superblock or find an existing one.
*
* The @test callback is used to find a matching existing superblock.
* Whether or not the requested parameters in @fc are taken into account
* is specific to the @test callback that is used. They may even be
* completely ignored.
*
* If an extant superblock is matched, it will be returned unless:
*
* (1) the namespace the filesystem context @fc and the extant
* superblock's namespace differ
*
* (2) the filesystem context @fc has requested that reusing an extant
* superblock is not allowed
*
* In both cases EBUSY will be returned.
*
* If no match is made, a new superblock will be allocated and basic
* initialisation will be performed (s_type, s_fs_info and s_id will be
* set and the @set callback will be invoked), the superblock will be
* published and it will be returned in a partially constructed state
* with SB_BORN and SB_ACTIVE as yet unset.
*
* Return: On success, an extant or newly created superblock is
* returned. On failure an error pointer is returned.
*/
struct super_block *sget_fc(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
int (*set)(struct super_block *, struct fs_context *))
{
struct super_block *s = NULL;
struct super_block *old;
struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
int err;
/*
* Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
* not set, as the filesystem is likely unprepared to handle it.
* This can happen when fsconfig() is called from init_user_ns with
* an fs_fd opened in another user namespace.
*/
if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
return ERR_PTR(-EPERM);
}
retry: spin_lock(&sb_lock); if (test) { hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) { if (test(old, fc))
goto share_extant_sb;
}
}
if (!s) {
spin_unlock(&sb_lock);
s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
if (!s)
return ERR_PTR(-ENOMEM); goto retry;
}
s->s_fs_info = fc->s_fs_info;
err = set(s, fc);
if (err) {
s->s_fs_info = NULL;
spin_unlock(&sb_lock);
destroy_unused_super(s);
return ERR_PTR(err);
}
fc->s_fs_info = NULL;
s->s_type = fc->fs_type;
s->s_iflags |= fc->s_iflags;
strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
/*
* Make the superblock visible on @super_blocks and @fs_supers.
* It's in a nascent state and users should wait on SB_BORN or
* SB_DYING to be set.
*/
list_add_tail(&s->s_list, &super_blocks); hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(s->s_type);
shrinker_register(s->s_shrink);
return s;
share_extant_sb:
if (user_ns != old->s_user_ns || fc->exclusive) { spin_unlock(&sb_lock);
destroy_unused_super(s);
if (fc->exclusive)
warnfc(fc, "reusing existing filesystem not allowed");
else
warnfc(fc, "reusing existing filesystem in another namespace not allowed"); return ERR_PTR(-EBUSY);
}
if (!grab_super(old))
goto retry;
destroy_unused_super(s);
return old;}
EXPORT_SYMBOL(sget_fc);
/**
* sget - find or create a superblock
* @type: filesystem type superblock should belong to
* @test: comparison callback
* @set: setup callback
* @flags: mount flags
* @data: argument to each of them
*/
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags,
void *data)
{
struct user_namespace *user_ns = current_user_ns();
struct super_block *s = NULL;
struct super_block *old;
int err;
retry:
spin_lock(&sb_lock);
if (test) {
hlist_for_each_entry(old, &type->fs_supers, s_instances) {
if (!test(old, data))
continue;
if (user_ns != old->s_user_ns) {
spin_unlock(&sb_lock);
destroy_unused_super(s);
return ERR_PTR(-EBUSY);
}
if (!grab_super(old))
goto retry;
destroy_unused_super(s);
return old;
}
}
if (!s) {
spin_unlock(&sb_lock);
s = alloc_super(type, flags, user_ns);
if (!s)
return ERR_PTR(-ENOMEM);
goto retry;
}
err = set(s, data);
if (err) {
spin_unlock(&sb_lock);
destroy_unused_super(s);
return ERR_PTR(err);
}
s->s_type = type;
strscpy(s->s_id, type->name, sizeof(s->s_id));
list_add_tail(&s->s_list, &super_blocks);
hlist_add_head(&s->s_instances, &type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(type);
shrinker_register(s->s_shrink);
return s;
}
EXPORT_SYMBOL(sget);
void drop_super(struct super_block *sb)
{
super_unlock_shared(sb);
put_super(sb);
}
EXPORT_SYMBOL(drop_super);
void drop_super_exclusive(struct super_block *sb)
{
super_unlock_excl(sb);
put_super(sb);
}
EXPORT_SYMBOL(drop_super_exclusive);
enum super_iter_flags_t {
SUPER_ITER_EXCL = (1U << 0),
SUPER_ITER_UNLOCKED = (1U << 1),
SUPER_ITER_REVERSE = (1U << 2),
};
static inline struct super_block *first_super(enum super_iter_flags_t flags)
{
if (flags & SUPER_ITER_REVERSE)
return list_last_entry(&super_blocks, struct super_block, s_list);
return list_first_entry(&super_blocks, struct super_block, s_list);
}
static inline struct super_block *next_super(struct super_block *sb,
enum super_iter_flags_t flags)
{
if (flags & SUPER_ITER_REVERSE)
return list_prev_entry(sb, s_list);
return list_next_entry(sb, s_list);
}
static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg,
enum super_iter_flags_t flags)
{
struct super_block *sb, *p = NULL;
bool excl = flags & SUPER_ITER_EXCL;
guard(spinlock)(&sb_lock);
for (sb = first_super(flags);
!list_entry_is_head(sb, &super_blocks, s_list);
sb = next_super(sb, flags)) {
if (super_flags(sb, SB_DYING))
continue;
sb->s_count++;
spin_unlock(&sb_lock);
if (flags & SUPER_ITER_UNLOCKED) {
f(sb, arg);
} else if (super_lock(sb, excl)) {
f(sb, arg);
super_unlock(sb, excl);
}
spin_lock(&sb_lock);
if (p)
__put_super(p);
p = sb;
}
if (p)
__put_super(p);
}
void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
{
__iterate_supers(f, arg, 0);
}
/**
* iterate_supers_type - call function for superblocks of given type
* @type: fs type
* @f: function to call
* @arg: argument to pass to it
*
* Scans the superblock list and calls given function, passing it
* locked superblock and given argument.
*/
void iterate_supers_type(struct file_system_type *type,
void (*f)(struct super_block *, void *), void *arg)
{
struct super_block *sb, *p = NULL;
spin_lock(&sb_lock);
hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
bool locked;
if (super_flags(sb, SB_DYING))
continue;
sb->s_count++;
spin_unlock(&sb_lock);
locked = super_lock_shared(sb);
if (locked) {
f(sb, arg);
super_unlock_shared(sb);
}
spin_lock(&sb_lock);
if (p)
__put_super(p);
p = sb;
}
if (p)
__put_super(p);
spin_unlock(&sb_lock);
}
EXPORT_SYMBOL(iterate_supers_type);
struct super_block *user_get_super(dev_t dev, bool excl)
{
struct super_block *sb;
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
bool locked;
if (sb->s_dev != dev)
continue;
sb->s_count++;
spin_unlock(&sb_lock);
locked = super_lock(sb, excl);
if (locked)
return sb;
spin_lock(&sb_lock);
__put_super(sb);
break;
}
spin_unlock(&sb_lock);
return NULL;
}
/**
* reconfigure_super - asks filesystem to change superblock parameters
* @fc: The superblock and configuration
*
* Alters the configuration parameters of a live superblock.
*/
int reconfigure_super(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
int retval;
bool remount_ro = false;
bool remount_rw = false;
bool force = fc->sb_flags & SB_FORCE;
if (fc->sb_flags_mask & ~MS_RMT_MASK)
return -EINVAL;
if (sb->s_writers.frozen != SB_UNFROZEN)
return -EBUSY;
retval = security_sb_remount(sb, fc->security);
if (retval)
return retval;
if (fc->sb_flags_mask & SB_RDONLY) {
#ifdef CONFIG_BLOCK
if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
bdev_read_only(sb->s_bdev))
return -EACCES;
#endif
remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
}
if (remount_ro) {
if (!hlist_empty(&sb->s_pins)) {
super_unlock_excl(sb);
group_pin_kill(&sb->s_pins);
__super_lock_excl(sb);
if (!sb->s_root)
return 0;
if (sb->s_writers.frozen != SB_UNFROZEN)
return -EBUSY;
remount_ro = !sb_rdonly(sb);
}
}
shrink_dcache_sb(sb);
/* If we are reconfiguring to RDONLY and current sb is read/write,
* make sure there are no files open for writing.
*/
if (remount_ro) {
if (force) {
sb_start_ro_state_change(sb);
} else {
retval = sb_prepare_remount_readonly(sb);
if (retval)
return retval;
}
} else if (remount_rw) {
/*
* Protect filesystem's reconfigure code from writes from
* userspace until reconfigure finishes.
*/
sb_start_ro_state_change(sb);
}
if (fc->ops->reconfigure) {
retval = fc->ops->reconfigure(fc);
if (retval) {
if (!force)
goto cancel_readonly;
/* If forced remount, go ahead despite any errors */
WARN(1, "forced remount of a %s fs returned %i\n",
sb->s_type->name, retval);
}
}
WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
(fc->sb_flags & fc->sb_flags_mask)));
sb_end_ro_state_change(sb);
/*
* Some filesystems modify their metadata via some other path than the
* bdev buffer cache (eg. use a private mapping, or directories in
* pagecache, etc). Also file data modifications go via their own
* mappings. So If we try to mount readonly then copy the filesystem
* from bdev, we could get stale data, so invalidate it to give a best
* effort at coherency.
*/
if (remount_ro && sb->s_bdev)
invalidate_bdev(sb->s_bdev);
return 0;
cancel_readonly:
sb_end_ro_state_change(sb);
return retval;
}
static void do_emergency_remount_callback(struct super_block *sb, void *unused)
{
if (sb->s_bdev && !sb_rdonly(sb)) {
struct fs_context *fc;
fc = fs_context_for_reconfigure(sb->s_root,
SB_RDONLY | SB_FORCE, SB_RDONLY);
if (!IS_ERR(fc)) {
if (parse_monolithic_mount_data(fc, NULL) == 0)
(void)reconfigure_super(fc);
put_fs_context(fc);
}
}
}
static void do_emergency_remount(struct work_struct *work)
{
__iterate_supers(do_emergency_remount_callback, NULL,
SUPER_ITER_EXCL | SUPER_ITER_REVERSE);
kfree(work);
printk("Emergency Remount complete\n");
}
void emergency_remount(void)
{
struct work_struct *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
INIT_WORK(work, do_emergency_remount);
schedule_work(work);
}
}
static void do_thaw_all_callback(struct super_block *sb, void *unused)
{
if (IS_ENABLED(CONFIG_BLOCK))
while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL);
return;
}
static void do_thaw_all(struct work_struct *work)
{
__iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL);
kfree(work);
printk(KERN_WARNING "Emergency Thaw complete\n");
}
/**
* emergency_thaw_all -- forcibly thaw every frozen filesystem
*
* Used for emergency unfreeze of all filesystems via SysRq
*/
void emergency_thaw_all(void)
{
struct work_struct *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
INIT_WORK(work, do_thaw_all);
schedule_work(work);
}
}
static inline bool get_active_super(struct super_block *sb)
{
bool active = false;
if (super_lock_excl(sb)) {
active = atomic_inc_not_zero(&sb->s_active);
super_unlock_excl(sb);
}
return active;
}
static const char *filesystems_freeze_ptr = "filesystems_freeze";
static void filesystems_freeze_callback(struct super_block *sb, void *freeze_all_ptr)
{
if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
return;
if (freeze_all_ptr && !(sb->s_type->fs_flags & FS_POWER_FREEZE))
return;
if (!get_active_super(sb))
return;
if (sb->s_op->freeze_super)
sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
filesystems_freeze_ptr);
else
freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
filesystems_freeze_ptr);
deactivate_super(sb);
}
void filesystems_freeze(bool freeze_all)
{
void *freeze_all_ptr = NULL;
if (freeze_all)
freeze_all_ptr = &freeze_all;
__iterate_supers(filesystems_freeze_callback, freeze_all_ptr,
SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE);
}
static void filesystems_thaw_callback(struct super_block *sb, void *unused)
{
if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
return;
if (!get_active_super(sb))
return;
if (sb->s_op->thaw_super)
sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
filesystems_freeze_ptr);
else
thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
filesystems_freeze_ptr);
deactivate_super(sb);
}
void filesystems_thaw(void)
{
__iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED);
}
static DEFINE_IDA(unnamed_dev_ida);
/**
* get_anon_bdev - Allocate a block device for filesystems which don't have one.
* @p: Pointer to a dev_t.
*
* Filesystems which don't use real block devices can call this function
* to allocate a virtual block device.
*
* Context: Any context. Frequently called while holding sb_lock.
* Return: 0 on success, -EMFILE if there are no anonymous bdevs left
* or -ENOMEM if memory allocation failed.
*/
int get_anon_bdev(dev_t *p)
{
int dev;
/*
* Many userspace utilities consider an FSID of 0 invalid.
* Always return at least 1 from get_anon_bdev.
*/
dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
GFP_ATOMIC);
if (dev == -ENOSPC)
dev = -EMFILE;
if (dev < 0) return dev;
*p = MKDEV(0, dev);
return 0;}
EXPORT_SYMBOL(get_anon_bdev);
void free_anon_bdev(dev_t dev)
{
ida_free(&unnamed_dev_ida, MINOR(dev));
}
EXPORT_SYMBOL(free_anon_bdev);
int set_anon_super(struct super_block *s, void *data)
{
return get_anon_bdev(&s->s_dev);
}
EXPORT_SYMBOL(set_anon_super);
void kill_anon_super(struct super_block *sb)
{
dev_t dev = sb->s_dev;
generic_shutdown_super(sb);
kill_super_notify(sb);
free_anon_bdev(dev);
}
EXPORT_SYMBOL(kill_anon_super);
void kill_litter_super(struct super_block *sb)
{
if (sb->s_root)
d_genocide(sb->s_root);
kill_anon_super(sb);
}
EXPORT_SYMBOL(kill_litter_super);
int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
{
return set_anon_super(sb, NULL);
}
EXPORT_SYMBOL(set_anon_super_fc);
static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
{
return sb->s_fs_info == fc->s_fs_info;
}
static int test_single_super(struct super_block *s, struct fs_context *fc)
{
return 1;
}
static int vfs_get_super(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
{
struct super_block *sb;
int err;
sb = sget_fc(fc, test, set_anon_super_fc);
if (IS_ERR(sb))
return PTR_ERR(sb); if (!sb->s_root) {
err = fill_super(sb, fc);
if (err)
goto error;
sb->s_flags |= SB_ACTIVE;
}
fc->root = dget(sb->s_root); return 0;
error:
deactivate_locked_super(sb);
return err;
}
int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
{
return vfs_get_super(fc, NULL, fill_super);
}
EXPORT_SYMBOL(get_tree_nodev);
int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
{
return vfs_get_super(fc, test_single_super, fill_super);
}
EXPORT_SYMBOL(get_tree_single);
int get_tree_keyed(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc),
void *key)
{
fc->s_fs_info = key;
return vfs_get_super(fc, test_keyed_super, fill_super);
}
EXPORT_SYMBOL(get_tree_keyed);
static int set_bdev_super(struct super_block *s, void *data)
{
s->s_dev = *(dev_t *)data;
return 0;
}
static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
{
return set_bdev_super(s, fc->sget_key);
}
static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
{
return !(s->s_iflags & SB_I_RETIRED) &&
s->s_dev == *(dev_t *)fc->sget_key;
}
/**
* sget_dev - Find or create a superblock by device number
* @fc: Filesystem context.
* @dev: device number
*
* Find or create a superblock using the provided device number that
* will be stored in fc->sget_key.
*
* If an extant superblock is matched, then that will be returned with
* an elevated reference count that the caller must transfer or discard.
*
* If no match is made, a new superblock will be allocated and basic
* initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
* be set). The superblock will be published and it will be returned in
* a partially constructed state with SB_BORN and SB_ACTIVE as yet
* unset.
*
* Return: an existing or newly created superblock on success, an error
* pointer on failure.
*/
struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
{
fc->sget_key = &dev;
return sget_fc(fc, super_s_dev_test, super_s_dev_set);
}
EXPORT_SYMBOL(sget_dev);
#ifdef CONFIG_BLOCK
/*
* Lock the superblock that is holder of the bdev. Returns the superblock
* pointer if we successfully locked the superblock and it is alive. Otherwise
* we return NULL and just unlock bdev->bd_holder_lock.
*
* The function must be called with bdev->bd_holder_lock and releases it.
*/
static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
__releases(&bdev->bd_holder_lock)
{
struct super_block *sb = bdev->bd_holder;
bool locked;
lockdep_assert_held(&bdev->bd_holder_lock);
lockdep_assert_not_held(&sb->s_umount);
lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
/* Make sure sb doesn't go away from under us */
spin_lock(&sb_lock);
sb->s_count++;
spin_unlock(&sb_lock);
mutex_unlock(&bdev->bd_holder_lock);
locked = super_lock(sb, excl);
/*
* If the superblock wasn't already SB_DYING then we hold
* s_umount and can safely drop our temporary reference.
*/
put_super(sb);
if (!locked)
return NULL;
if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
super_unlock(sb, excl);
return NULL;
}
return sb;
}
static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
{
struct super_block *sb;
sb = bdev_super_lock(bdev, false);
if (!sb)
return;
if (sb->s_op->remove_bdev) {
int ret;
ret = sb->s_op->remove_bdev(sb, bdev);
if (!ret) {
super_unlock_shared(sb);
return;
}
/* Fallback to shutdown. */
}
if (!surprise)
sync_filesystem(sb);
shrink_dcache_sb(sb);
evict_inodes(sb);
if (sb->s_op->shutdown)
sb->s_op->shutdown(sb);
super_unlock_shared(sb);
}
static void fs_bdev_sync(struct block_device *bdev)
{
struct super_block *sb;
sb = bdev_super_lock(bdev, false);
if (!sb)
return;
sync_filesystem(sb);
super_unlock_shared(sb);
}
static struct super_block *get_bdev_super(struct block_device *bdev)
{
bool active = false;
struct super_block *sb;
sb = bdev_super_lock(bdev, true);
if (sb) {
active = atomic_inc_not_zero(&sb->s_active);
super_unlock_excl(sb);
}
if (!active)
return NULL;
return sb;
}
/**
* fs_bdev_freeze - freeze owning filesystem of block device
* @bdev: block device
*
* Freeze the filesystem that owns this block device if it is still
* active.
*
* A filesystem that owns multiple block devices may be frozen from each
* block device and won't be unfrozen until all block devices are
* unfrozen. Each block device can only freeze the filesystem once as we
* nest freezes for block devices in the block layer.
*
* Return: If the freeze was successful zero is returned. If the freeze
* failed a negative error code is returned.
*/
static int fs_bdev_freeze(struct block_device *bdev)
{
struct super_block *sb;
int error = 0;
lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
sb = get_bdev_super(bdev);
if (!sb)
return -EINVAL;
if (sb->s_op->freeze_super)
error = sb->s_op->freeze_super(sb,
FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
else
error = freeze_super(sb,
FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
if (!error)
error = sync_blockdev(bdev);
deactivate_super(sb);
return error;
}
/**
* fs_bdev_thaw - thaw owning filesystem of block device
* @bdev: block device
*
* Thaw the filesystem that owns this block device.
*
* A filesystem that owns multiple block devices may be frozen from each
* block device and won't be unfrozen until all block devices are
* unfrozen. Each block device can only freeze the filesystem once as we
* nest freezes for block devices in the block layer.
*
* Return: If the thaw was successful zero is returned. If the thaw
* failed a negative error code is returned. If this function
* returns zero it doesn't mean that the filesystem is unfrozen
* as it may have been frozen multiple times (kernel may hold a
* freeze or might be frozen from other block devices).
*/
static int fs_bdev_thaw(struct block_device *bdev)
{
struct super_block *sb;
int error;
lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
/*
* The block device may have been frozen before it was claimed by a
* filesystem. Concurrently another process might try to mount that
* frozen block device and has temporarily claimed the block device for
* that purpose causing a concurrent fs_bdev_thaw() to end up here. The
* mounter is already about to abort mounting because they still saw an
* elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
* NULL in that case.
*/
sb = get_bdev_super(bdev);
if (!sb)
return -EINVAL;
if (sb->s_op->thaw_super)
error = sb->s_op->thaw_super(sb,
FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
else
error = thaw_super(sb,
FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
deactivate_super(sb);
return error;
}
const struct blk_holder_ops fs_holder_ops = {
.mark_dead = fs_bdev_mark_dead,
.sync = fs_bdev_sync,
.freeze = fs_bdev_freeze,
.thaw = fs_bdev_thaw,
};
EXPORT_SYMBOL_GPL(fs_holder_ops);
int setup_bdev_super(struct super_block *sb, int sb_flags,
struct fs_context *fc)
{
blk_mode_t mode = sb_open_mode(sb_flags);
struct file *bdev_file;
struct block_device *bdev;
bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
if (IS_ERR(bdev_file)) {
if (fc)
errorf(fc, "%s: Can't open blockdev", fc->source);
return PTR_ERR(bdev_file);
}
bdev = file_bdev(bdev_file);
/*
* This really should be in blkdev_get_by_dev, but right now can't due
* to legacy issues that require us to allow opening a block device node
* writable from userspace even for a read-only block device.
*/
if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
bdev_fput(bdev_file);
return -EACCES;
}
/*
* It is enough to check bdev was not frozen before we set
* s_bdev as freezing will wait until SB_BORN is set.
*/
if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
bdev_fput(bdev_file);
return -EBUSY;
}
spin_lock(&sb_lock);
sb->s_bdev_file = bdev_file;
sb->s_bdev = bdev;
sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
if (bdev_stable_writes(bdev))
sb->s_iflags |= SB_I_STABLE_WRITES;
spin_unlock(&sb_lock);
snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
sb->s_id);
sb_set_blocksize(sb, block_size(bdev));
return 0;
}
EXPORT_SYMBOL_GPL(setup_bdev_super);
/**
* get_tree_bdev_flags - Get a superblock based on a single block device
* @fc: The filesystem context holding the parameters
* @fill_super: Helper to initialise a new superblock
* @flags: GET_TREE_BDEV_* flags
*/
int get_tree_bdev_flags(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc), unsigned int flags)
{
struct super_block *s;
int error = 0;
dev_t dev;
if (!fc->source)
return invalf(fc, "No source specified");
error = lookup_bdev(fc->source, &dev);
if (error) {
if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP))
errorf(fc, "%s: Can't lookup blockdev", fc->source);
return error;
}
fc->sb_flags |= SB_NOSEC;
s = sget_dev(fc, dev);
if (IS_ERR(s))
return PTR_ERR(s);
if (s->s_root) {
/* Don't summarily change the RO/RW state. */
if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
deactivate_locked_super(s);
return -EBUSY;
}
} else {
error = setup_bdev_super(s, fc->sb_flags, fc);
if (!error)
error = fill_super(s, fc);
if (error) {
deactivate_locked_super(s);
return error;
}
s->s_flags |= SB_ACTIVE;
}
BUG_ON(fc->root);
fc->root = dget(s->s_root);
return 0;
}
EXPORT_SYMBOL_GPL(get_tree_bdev_flags);
/**
* get_tree_bdev - Get a superblock based on a single block device
* @fc: The filesystem context holding the parameters
* @fill_super: Helper to initialise a new superblock
*/
int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *,
struct fs_context *))
{
return get_tree_bdev_flags(fc, fill_super, 0);
}
EXPORT_SYMBOL(get_tree_bdev);
void kill_block_super(struct super_block *sb)
{
struct block_device *bdev = sb->s_bdev;
generic_shutdown_super(sb);
if (bdev) {
sync_blockdev(bdev);
bdev_fput(sb->s_bdev_file);
}
}
EXPORT_SYMBOL(kill_block_super);
#endif
/**
* vfs_get_tree - Get the mountable root
* @fc: The superblock configuration context.
*
* The filesystem is invoked to get or create a superblock which can then later
* be used for mounting. The filesystem places a pointer to the root to be
* used for mounting in @fc->root.
*/
int vfs_get_tree(struct fs_context *fc)
{
struct super_block *sb;
int error;
if (fc->root)
return -EBUSY;
/* Get the mountable root in fc->root, with a ref on the root and a ref
* on the superblock.
*/
error = fc->ops->get_tree(fc);
if (error < 0)
return error;
if (!fc->root) {
pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n",
fc->fs_type->name, error);
/* We don't know what the locking state of the superblock is -
* if there is a superblock.
*/
BUG();
}
sb = fc->root->d_sb;
WARN_ON(!sb->s_bdi);
/*
* super_wake() contains a memory barrier which also care of
* ordering for super_cache_count(). We place it before setting
* SB_BORN as the data dependency between the two functions is
* the superblock structure contents that we just set up, not
* the SB_BORN flag.
*/
super_wake(sb, SB_BORN);
error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
if (unlikely(error)) {
fc_drop_locked(fc);
return error;
}
/*
* filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
* but s_maxbytes was an unsigned long long for many releases. Throw
* this warning for a little while to try and catch filesystems that
* violate this rule.
*/
WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
"negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
return 0;
}
EXPORT_SYMBOL(vfs_get_tree);
/*
* Setup private BDI for given superblock. It gets automatically cleaned up
* in generic_shutdown_super().
*/
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
{
struct backing_dev_info *bdi;
int err;
va_list args;
bdi = bdi_alloc(NUMA_NO_NODE);
if (!bdi)
return -ENOMEM;
va_start(args, fmt);
err = bdi_register_va(bdi, fmt, args);
va_end(args);
if (err) {
bdi_put(bdi);
return err;
}
WARN_ON(sb->s_bdi != &noop_backing_dev_info);
sb->s_bdi = bdi;
sb->s_iflags |= SB_I_PERSB_BDI;
return 0;
}
EXPORT_SYMBOL(super_setup_bdi_name);
/*
* Setup private BDI for given superblock. I gets automatically cleaned up
* in generic_shutdown_super().
*/
int super_setup_bdi(struct super_block *sb)
{
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
atomic_long_inc_return(&bdi_seq));
}
EXPORT_SYMBOL(super_setup_bdi);
/**
* sb_wait_write - wait until all writers to given file system finish
* @sb: the super for which we wait
* @level: type of writers we wait for (normal vs page fault)
*
* This function waits until there are no writers of given type to given file
* system.
*/
static void sb_wait_write(struct super_block *sb, int level)
{
percpu_down_write(sb->s_writers.rw_sem + level-1);
}
/*
* We are going to return to userspace and forget about these locks, the
* ownership goes to the caller of thaw_super() which does unlock().
*/
static void lockdep_sb_freeze_release(struct super_block *sb)
{
int level;
for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_);
}
/*
* Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
*/
static void lockdep_sb_freeze_acquire(struct super_block *sb)
{
int level;
for (level = 0; level < SB_FREEZE_LEVELS; ++level)
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
}
static void sb_freeze_unlock(struct super_block *sb, int level)
{
for (level--; level >= 0; level--)
percpu_up_write(sb->s_writers.rw_sem + level);
}
static int wait_for_partially_frozen(struct super_block *sb)
{
int ret = 0;
do {
unsigned short old = sb->s_writers.frozen;
up_write(&sb->s_umount);
ret = wait_var_event_killable(&sb->s_writers.frozen,
sb->s_writers.frozen != old);
down_write(&sb->s_umount);
} while (ret == 0 &&
sb->s_writers.frozen != SB_UNFROZEN &&
sb->s_writers.frozen != SB_FREEZE_COMPLETE);
return ret;
}
#define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
#define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL)
static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
{
WARN_ON_ONCE((who & ~FREEZE_FLAGS));
WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
if (who & FREEZE_HOLDER_KERNEL)
++sb->s_writers.freeze_kcount;
if (who & FREEZE_HOLDER_USERSPACE)
++sb->s_writers.freeze_ucount;
return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
}
static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
{
WARN_ON_ONCE((who & ~FREEZE_FLAGS));
WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
--sb->s_writers.freeze_kcount;
if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
--sb->s_writers.freeze_ucount;
return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
}
static inline bool may_freeze(struct super_block *sb, enum freeze_holder who,
const void *freeze_owner)
{
lockdep_assert_held(&sb->s_umount);
WARN_ON_ONCE((who & ~FREEZE_FLAGS));
WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
if (who & FREEZE_EXCL) {
if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
return false;
if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
return false;
if (WARN_ON_ONCE(!freeze_owner))
return false;
/* This freeze already has a specific owner. */
if (sb->s_writers.freeze_owner)
return false;
/*
* This is already frozen multiple times so we're just
* going to take a reference count and mark the freeze as
* being owned by the caller.
*/
if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount)
sb->s_writers.freeze_owner = freeze_owner;
return true;
}
if (who & FREEZE_HOLDER_KERNEL)
return (who & FREEZE_MAY_NEST) ||
sb->s_writers.freeze_kcount == 0;
if (who & FREEZE_HOLDER_USERSPACE)
return (who & FREEZE_MAY_NEST) ||
sb->s_writers.freeze_ucount == 0;
return false;
}
static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who,
const void *freeze_owner)
{
lockdep_assert_held(&sb->s_umount);
WARN_ON_ONCE((who & ~FREEZE_FLAGS));
WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
if (who & FREEZE_EXCL) {
if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
return false;
if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
return false;
if (WARN_ON_ONCE(!freeze_owner))
return false;
if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0))
return false;
/* This isn't exclusively frozen. */
if (!sb->s_writers.freeze_owner)
return false;
/* This isn't exclusively frozen by us. */
if (sb->s_writers.freeze_owner != freeze_owner)
return false;
/*
* This is still frozen multiple times so we're just
* going to drop our reference count and undo our
* exclusive freeze.
*/
if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1)
sb->s_writers.freeze_owner = NULL;
return true;
}
if (who & FREEZE_HOLDER_KERNEL) {
/*
* Someone's trying to steal the reference belonging to
* @sb->s_writers.freeze_owner.
*/
if (sb->s_writers.freeze_kcount == 1 &&
sb->s_writers.freeze_owner)
return false;
return sb->s_writers.freeze_kcount > 0;
}
if (who & FREEZE_HOLDER_USERSPACE)
return sb->s_writers.freeze_ucount > 0;
return false;
}
/**
* freeze_super - lock the filesystem and force it into a consistent state
* @sb: the super to lock
* @who: context that wants to freeze
* @freeze_owner: owner of the freeze
*
* Syncs the super to make sure the filesystem is consistent and calls the fs's
* freeze_fs. Subsequent calls to this without first thawing the fs may return
* -EBUSY.
*
* @who should be:
* * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
* * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
* * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
*
* The @who argument distinguishes between the kernel and userspace trying to
* freeze the filesystem. Although there cannot be multiple kernel freezes or
* multiple userspace freezes in effect at any given time, the kernel and
* userspace can both hold a filesystem frozen. The filesystem remains frozen
* until there are no kernel or userspace freezes in effect.
*
* A filesystem may hold multiple devices and thus a filesystems may be
* frozen through the block layer via multiple block devices. In this
* case the request is marked as being allowed to nest by passing
* FREEZE_MAY_NEST. The filesystem remains frozen until all block
* devices are unfrozen. If multiple freezes are attempted without
* FREEZE_MAY_NEST -EBUSY will be returned.
*
* During this function, sb->s_writers.frozen goes through these values:
*
* SB_UNFROZEN: File system is normal, all writes progress as usual.
*
* SB_FREEZE_WRITE: The file system is in the process of being frozen. New
* writes should be blocked, though page faults are still allowed. We wait for
* all writes to complete and then proceed to the next stage.
*
* SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
* but internal fs threads can still modify the filesystem (although they
* should not dirty new pages or inodes), writeback can run etc. After waiting
* for all running page faults we sync the filesystem which will clean all
* dirty pages and inodes (no new dirty pages or inodes can be created when
* sync is running).
*
* SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
* modification are blocked (e.g. XFS preallocation truncation on inode
* reclaim). This is usually implemented by blocking new transactions for
* filesystems that have them and need this additional guard. After all
* internal writers are finished we call ->freeze_fs() to finish filesystem
* freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
* mostly auxiliary for filesystems to verify they do not modify frozen fs.
*
* sb->s_writers.frozen is protected by sb->s_umount.
*
* Return: If the freeze was successful zero is returned. If the freeze
* failed a negative error code is returned.
*/
int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner)
{
int ret;
if (!super_lock_excl(sb)) {
WARN_ON_ONCE("Dying superblock while freezing!");
return -EINVAL;
}
atomic_inc(&sb->s_active);
retry:
if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
if (may_freeze(sb, who, freeze_owner))
ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
else
ret = -EBUSY;
/* All freezers share a single active reference. */
deactivate_locked_super(sb);
return ret;
}
if (sb->s_writers.frozen != SB_UNFROZEN) {
ret = wait_for_partially_frozen(sb);
if (ret) {
deactivate_locked_super(sb);
return ret;
}
goto retry;
}
if (sb_rdonly(sb)) {
/* Nothing to do really... */
WARN_ON_ONCE(freeze_inc(sb, who) > 1);
sb->s_writers.freeze_owner = freeze_owner;
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
wake_up_var(&sb->s_writers.frozen);
super_unlock_excl(sb);
return 0;
}
sb->s_writers.frozen = SB_FREEZE_WRITE;
/* Release s_umount to preserve sb_start_write -> s_umount ordering */
super_unlock_excl(sb);
sb_wait_write(sb, SB_FREEZE_WRITE);
__super_lock_excl(sb);
/* Now we go and block page faults... */
sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
/* All writers are done so after syncing there won't be dirty data */
ret = sync_filesystem(sb);
if (ret) {
sb->s_writers.frozen = SB_UNFROZEN;
sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
wake_up_var(&sb->s_writers.frozen);
deactivate_locked_super(sb);
return ret;
}
/* Now wait for internal filesystem counter */
sb->s_writers.frozen = SB_FREEZE_FS;
sb_wait_write(sb, SB_FREEZE_FS);
if (sb->s_op->freeze_fs) {
ret = sb->s_op->freeze_fs(sb);
if (ret) {
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
sb->s_writers.frozen = SB_UNFROZEN;
sb_freeze_unlock(sb, SB_FREEZE_FS);
wake_up_var(&sb->s_writers.frozen);
deactivate_locked_super(sb);
return ret;
}
}
/*
* For debugging purposes so that fs can warn if it sees write activity
* when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
*/
WARN_ON_ONCE(freeze_inc(sb, who) > 1);
sb->s_writers.freeze_owner = freeze_owner;
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
wake_up_var(&sb->s_writers.frozen);
lockdep_sb_freeze_release(sb);
super_unlock_excl(sb);
return 0;
}
EXPORT_SYMBOL(freeze_super);
/*
* Undoes the effect of a freeze_super_locked call. If the filesystem is
* frozen both by userspace and the kernel, a thaw call from either source
* removes that state without releasing the other state or unlocking the
* filesystem.
*/
static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
const void *freeze_owner)
{
int error = -EINVAL;
if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
goto out_unlock;
if (!may_unfreeze(sb, who, freeze_owner))
goto out_unlock;
/*
* All freezers share a single active reference.
* So just unlock in case there are any left.
*/
if (freeze_dec(sb, who))
goto out_unlock;
if (sb_rdonly(sb)) {
sb->s_writers.frozen = SB_UNFROZEN;
sb->s_writers.freeze_owner = NULL;
wake_up_var(&sb->s_writers.frozen);
goto out_deactivate;
}
lockdep_sb_freeze_acquire(sb);
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
if (error) {
pr_err("VFS: Filesystem thaw failed\n");
freeze_inc(sb, who);
lockdep_sb_freeze_release(sb);
goto out_unlock;
}
}
sb->s_writers.frozen = SB_UNFROZEN;
sb->s_writers.freeze_owner = NULL;
wake_up_var(&sb->s_writers.frozen);
sb_freeze_unlock(sb, SB_FREEZE_FS);
out_deactivate:
deactivate_locked_super(sb);
return 0;
out_unlock:
super_unlock_excl(sb);
return error;
}
/**
* thaw_super -- unlock filesystem
* @sb: the super to thaw
* @who: context that wants to freeze
* @freeze_owner: owner of the freeze
*
* Unlocks the filesystem and marks it writeable again after freeze_super()
* if there are no remaining freezes on the filesystem.
*
* @who should be:
* * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
* * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
* * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
*
* A filesystem may hold multiple devices and thus a filesystems may
* have been frozen through the block layer via multiple block devices.
* The filesystem remains frozen until all block devices are unfrozen.
*/
int thaw_super(struct super_block *sb, enum freeze_holder who,
const void *freeze_owner)
{
if (!super_lock_excl(sb)) {
WARN_ON_ONCE("Dying superblock while thawing!");
return -EINVAL;
}
return thaw_super_locked(sb, who, freeze_owner);
}
EXPORT_SYMBOL(thaw_super);
/*
* Create workqueue for deferred direct IO completions. We allocate the
* workqueue when it's first needed. This avoids creating workqueue for
* filesystems that don't need it and also allows us to create the workqueue
* late enough so the we can include s_id in the name of the workqueue.
*/
int sb_init_dio_done_wq(struct super_block *sb)
{
struct workqueue_struct *old;
struct workqueue_struct *wq = alloc_workqueue("dio/%s",
WQ_MEM_RECLAIM | WQ_PERCPU,
0,
sb->s_id);
if (!wq)
return -ENOMEM;
old = NULL;
/*
* This has to be atomic as more DIOs can race to create the workqueue
*/
if (!try_cmpxchg(&sb->s_dio_done_wq, &old, wq)) {
/* Someone created workqueue before us? Free ours... */
destroy_workqueue(wq);
}
return 0;
}
EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
#include <asm/barrier.h>
/*
* Circular doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
* sometimes we already know the next/prev entries and we can
* generate better code by using them directly rather than
* using the generic single-entry routines.
*/
/**
* LIST_HEAD_INIT - initialize a &struct list_head's links to point to itself
* @name: name of the list_head
*/
#define LIST_HEAD_INIT(name) { &(name), &(name) }
/**
* LIST_HEAD - definition of a &struct list_head with initialization values
* @name: name of the list_head
*/
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
/**
* INIT_LIST_HEAD - Initialize a list_head structure
* @list: list_head structure to be initialized.
*
* Initializes the list_head to point to itself. If it is a list header,
* the result is an empty list.
*/
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list); WRITE_ONCE(list->prev, list);}
#ifdef CONFIG_LIST_HARDENED
#ifdef CONFIG_DEBUG_LIST
# define __list_valid_slowpath
#else
# define __list_valid_slowpath __cold __preserve_most
#endif
/*
* Performs the full set of list corruption checks before __list_add().
* On list corruption reports a warning, and returns false.
*/
bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
struct list_head *prev,
struct list_head *next);
/*
* Performs list corruption checks before __list_add(). Returns false if a
* corruption is detected, true otherwise.
*
* With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
* inline to catch non-faulting corruptions, and only if a corruption is
* detected calls the reporting function __list_add_valid_or_report().
*/
static __always_inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
bool ret = true;
if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
/*
* With the hardening version, elide checking if next and prev
* are NULL, since the immediate dereference of them below would
* result in a fault if NULL.
*
* With the reduced set of checks, we can afford to inline the
* checks, which also gives the compiler a chance to elide some
* of them completely if they can be proven at compile-time. If
* one of the pre-conditions does not hold, the slow-path will
* show a report which pre-condition failed.
*/
if (likely(next->prev == prev && prev->next == next && new != prev && new != next))
return true;
ret = false;
}
ret &= __list_add_valid_or_report(new, prev, next);
return ret;
}
/*
* Performs the full set of list corruption checks before __list_del_entry().
* On list corruption reports a warning, and returns false.
*/
bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
/*
* Performs list corruption checks before __list_del_entry(). Returns false if a
* corruption is detected, true otherwise.
*
* With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
* inline to catch non-faulting corruptions, and only if a corruption is
* detected calls the reporting function __list_del_entry_valid_or_report().
*/
static __always_inline bool __list_del_entry_valid(struct list_head *entry)
{
bool ret = true;
if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
struct list_head *prev = entry->prev;
struct list_head *next = entry->next;
/*
* With the hardening version, elide checking if next and prev
* are NULL, LIST_POISON1 or LIST_POISON2, since the immediate
* dereference of them below would result in a fault.
*/
if (likely(prev->next == entry && next->prev == entry))
return true;
ret = false;
}
ret &= __list_del_entry_valid_or_report(entry);
return ret;
}
#else
static inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
return true;
}
static inline bool __list_del_entry_valid(struct list_head *entry)
{
return true;
}
#endif
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
if (!__list_add_valid(new, prev, next))
return;
next->prev = new; new->next = next;
new->prev = prev;
WRITE_ONCE(prev->next, new);
}
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
WRITE_ONCE(prev->next, next);
}
/*
* Delete a list entry and clear the 'prev' pointer.
*
* This is a special-purpose list clearing method used in the networking code
* for lists allocated as per-cpu, where we don't want to incur the extra
* WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this
* needs to check the node 'prev' pointer instead of calling list_empty().
*/
static inline void __list_del_clearprev(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = NULL;
}
static inline void __list_del_entry(struct list_head *entry)
{
if (!__list_del_entry_valid(entry))
return;
__list_del(entry->prev, entry->next);
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
__list_del_entry(entry);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
/**
* list_replace - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* If @old was empty, it will be overwritten.
*/
static inline void list_replace(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->next->prev = new;
new->prev = old->prev;
new->prev->next = new;
}
/**
* list_replace_init - replace old entry by new one and initialize the old one
* @old : the element to be replaced
* @new : the new element to insert
*
* If @old was empty, it will be overwritten.
*/
static inline void list_replace_init(struct list_head *old,
struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}
/**
* list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position
* @entry1: the location to place entry2
* @entry2: the location to place entry1
*/
static inline void list_swap(struct list_head *entry1,
struct list_head *entry2)
{
struct list_head *pos = entry2->prev;
list_del(entry2);
list_replace(entry1, entry2);
if (pos == entry1)
pos = entry2;
list_add(entry1, pos);
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
static inline void list_del_init(struct list_head *entry)
{
__list_del_entry(entry);
INIT_LIST_HEAD(entry);
}
/**
* list_move - delete from one list and add as another's head
* @list: the entry to move
* @head: the head that will precede our entry
*/
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del_entry(list);
list_add(list, head);
}
/**
* list_move_tail - delete from one list and add as another's tail
* @list: the entry to move
* @head: the head that will follow our entry
*/
static inline void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del_entry(list);
list_add_tail(list, head);
}
/**
* list_bulk_move_tail - move a subsection of a list to its tail
* @head: the head that will follow our entry
* @first: first entry to move
* @last: last entry to move, can be the same as first
*
* Move all entries between @first and including @last before @head.
* All three entries must belong to the same linked list.
*/
static inline void list_bulk_move_tail(struct list_head *head,
struct list_head *first,
struct list_head *last)
{
first->prev->next = last->next;
last->next->prev = first->prev;
head->prev->next = first;
first->prev = head->prev;
last->next = head;
head->prev = last;
}
/**
* list_is_first -- tests whether @list is the first entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
static inline int list_is_first(const struct list_head *list, const struct list_head *head)
{
return list->prev == head;
}
/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
static inline int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}
/**
* list_is_head - tests whether @list is the list @head
* @list: the entry to test
* @head: the head of the list
*/
static inline int list_is_head(const struct list_head *list, const struct list_head *head)
{
return list == head;
}
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return READ_ONCE(head->next) == head;
}
/**
* list_del_init_careful - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*
* This is the same as list_del_init(), except designed to be used
* together with list_empty_careful() in a way to guarantee ordering
* of other memory operations.
*
* Any memory operations done before a list_del_init_careful() are
* guaranteed to be visible after a list_empty_careful() test.
*/
static inline void list_del_init_careful(struct list_head *entry)
{
__list_del_entry(entry);
WRITE_ONCE(entry->prev, entry);
smp_store_release(&entry->next, entry);
}
/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
* Description:
* tests whether a list is empty _and_ checks that no other CPU might be
* in the process of modifying either member (next or prev)
*
* NOTE: using list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
* to the list entry is list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = smp_load_acquire(&head->next);
return list_is_head(next, head) && (next == READ_ONCE(head->prev));
}
/**
* list_rotate_left - rotate the list to the left
* @head: the head of the list
*/
static inline void list_rotate_left(struct list_head *head)
{
struct list_head *first;
if (!list_empty(head)) {
first = head->next;
list_move_tail(first, head);
}
}
/**
* list_rotate_to_front() - Rotate list to specific item.
* @list: The desired new front of the list.
* @head: The head of the list.
*
* Rotates list so that @list becomes the new front of the list.
*/
static inline void list_rotate_to_front(struct list_head *list,
struct list_head *head)
{
/*
* Deletes the list head from the list denoted by @head and
* places it as the tail of @list, this effectively rotates the
* list so that @list is at the front.
*/
list_move_tail(head, list);
}
/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
static inline int list_is_singular(const struct list_head *head)
{
return !list_empty(head) && (head->next == head->prev);
}
static inline void __list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
struct list_head *new_first = entry->next;
list->next = head->next;
list->next->prev = list;
list->prev = entry;
entry->next = list;
head->next = new_first;
new_first->prev = head;
}
/**
* list_cut_position - cut a list into two
* @list: a new list to add all removed entries
* @head: a list with entries
* @entry: an entry within head, could be the head itself
* and if so we won't cut the list
*
* This helper moves the initial part of @head, up to and
* including @entry, from @head to @list. You should
* pass on @entry an element you know is on @head. @list
* should be an empty list or a list you do not care about
* losing its data.
*
*/
static inline void list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
if (list_empty(head))
return;
if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
/**
* list_cut_before - cut a list into two, before given entry
* @list: a new list to add all removed entries
* @head: a list with entries
* @entry: an entry within head, could be the head itself
*
* This helper moves the initial part of @head, up to but
* excluding @entry, from @head to @list. You should pass
* in @entry an element you know is on @head. @list should
* be an empty list or a list you do not care about losing
* its data.
* If @entry == @head, all entries on @head are moved to
* @list.
*/
static inline void list_cut_before(struct list_head *list,
struct list_head *head,
struct list_head *entry)
{
if (head->next == entry) {
INIT_LIST_HEAD(list);
return;
}
list->next = head->next;
list->next->prev = list;
list->prev = entry->prev;
list->prev->next = list;
head->next = entry;
entry->prev = head;
}
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
first->prev = prev;
prev->next = first;
last->next = next;
next->prev = last;
}
/**
* list_splice - join two lists, this is designed for stacks
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice(const struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head, head->next);
}
/**
* list_splice_tail - join two lists, each list being a queue
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice_tail(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head->prev, head);
}
/**
* list_splice_init - join two lists and reinitialise the emptied list.
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* The list at @list is reinitialised
*/
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head, head->next);
INIT_LIST_HEAD(list);
}
}
/**
* list_splice_tail_init - join two lists and reinitialise the emptied list
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* Each of the lists is a queue.
* The list at @list is reinitialised
*/
static inline void list_splice_tail_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head->prev, head);
INIT_LIST_HEAD(list);
}
}
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
/**
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
/**
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*/
#define list_first_entry_or_null(ptr, type, member) ({ \
struct list_head *head__ = (ptr); \
struct list_head *pos__ = READ_ONCE(head__->next); \
pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
})
/**
* list_last_entry_or_null - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*/
#define list_last_entry_or_null(ptr, type, member) ({ \
struct list_head *head__ = (ptr); \
struct list_head *pos__ = READ_ONCE(head__->prev); \
pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
})
/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
*/
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
* list_next_entry_circular - get the next element in list
* @pos: the type * to cursor.
* @head: the list head to take the element from.
* @member: the name of the list_head within the struct.
*
* Wraparound if pos is the last element (return the first element).
* Note, that list is expected to be not empty.
*/
#define list_next_entry_circular(pos, head, member) \
(list_is_last(&(pos)->member, head) ? \
list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member))
/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
*/
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
* list_prev_entry_circular - get the prev element in list
* @pos: the type * to cursor.
* @head: the list head to take the element from.
* @member: the name of the list_head within the struct.
*
* Wraparound if pos is the first element (return the last element).
* Note, that list is expected to be not empty.
*/
#define list_prev_entry_circular(pos, head, member) \
(list_is_first(&(pos)->member, head) ? \
list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member))
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_continue - continue iteration over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* Continue to iterate over a list, continuing after the current position.
*/
#define list_for_each_continue(pos, head) \
for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
* @pos: the &struct list_head to use as a loop cursor.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; \
!list_is_head(pos, (head)); \
pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
* @pos: the &struct list_head to use as a loop cursor.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
!list_is_head(pos, (head)); \
pos = n, n = pos->prev)
/**
* list_count_nodes - count nodes in the list
* @head: the head for your list.
*/
static inline size_t list_count_nodes(struct list_head *head)
{
struct list_head *pos;
size_t count = 0;
list_for_each(pos, head)
count++;
return count;
}
/**
* list_entry_is_head - test if the entry points to the head of the list
* @pos: the type * to cursor
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_entry_is_head(pos, head, member) \
list_is_head(&pos->member, (head))
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
!list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member); \
!list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
* @pos: the type * to use as a start point
* @head: the head of the list
* @member: the name of the list_head within the struct.
*
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
*/
#define list_prepare_entry(pos, head, member) \
((pos) ? : list_entry(head, typeof(*pos), member))
/**
* list_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_continue_reverse - iterate backwards from the given point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Start to iterate over list of given type backwards, continuing after
* the current position.
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_prev_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
* list_for_each_entry_from - iterate over list of given type from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
for (; !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_from_reverse - iterate backwards over list of given type
* from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate backwards over list of given type, continuing from current position.
*/
#define list_for_each_entry_from_reverse(pos, head, member) \
for (; !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_continue - continue list iteration safe against removal
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing after current point,
* safe against removal of list entry.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_from - iterate over list from current point safe against removal
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate over list of given type from current point, safe against
* removal of list entry.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate backwards over list of given type, safe against removal
* of list entry.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_prev_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_prev_entry(n, member))
/**
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
* @pos: the loop cursor used in the list_for_each_entry_safe loop
* @n: temporary storage used in list_for_each_entry_safe
* @member: the name of the list_head within the struct.
*
* list_safe_reset_next is not safe to use in general if the list may be
* modified concurrently (eg. the lock is dropped in the loop body). An
* exception to this is if the cursor element (pos) is pinned in the list,
* and list_safe_reset_next is called after re-taking the lock and before
* completing the current iteration of the loop body.
*/
#define list_safe_reset_next(pos, n, member) \
n = list_next_entry(pos, member)
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/
#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
static inline void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = NULL; h->pprev = NULL;
}
/**
* hlist_unhashed - Has node been removed from list and reinitialized?
* @h: Node to be checked
*
* Not that not all removal functions will leave a node in unhashed
* state. For example, hlist_nulls_del_init_rcu() does leave the
* node in unhashed state, but hlist_nulls_del() does not.
*/
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
/**
* hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
* @h: Node to be checked
*
* This variant of hlist_unhashed() must be used in lockless contexts
* to avoid potential load-tearing. The READ_ONCE() is paired with the
* various WRITE_ONCE() in hlist helpers that are defined below.
*/
static inline int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !READ_ONCE(h->pprev);
}
/**
* hlist_empty - Is the specified hlist_head structure an empty hlist?
* @h: Structure to check.
*/
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
}
static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; WRITE_ONCE(*pprev, next); if (next) WRITE_ONCE(next->pprev, pprev);
}
/**
* hlist_del - Delete the specified hlist_node from its list
* @n: Node to delete.
*
* Note that this function leaves the node in hashed state. Use
* hlist_del_init() or similar instead to unhash @n.
*/
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n); n->next = LIST_POISON1;
n->pprev = LIST_POISON2;
}
/**
* hlist_del_init - Delete the specified hlist_node from its list and initialize
* @n: Node to delete.
*
* Note that this function leaves the node in unhashed state.
*/
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n);
}
}
/**
* hlist_add_head - add a new entry at the beginning of the hlist
* @n: new entry to be added
* @h: hlist head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
WRITE_ONCE(n->next, first);
if (first) WRITE_ONCE(first->pprev, &n->next); WRITE_ONCE(h->first, n);
WRITE_ONCE(n->pprev, &h->first);
}
/**
* hlist_add_before - add a new entry before the one specified
* @n: new entry to be added
* @next: hlist node to add it before, which must be non-NULL
*/
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
WRITE_ONCE(n->pprev, next->pprev);
WRITE_ONCE(n->next, next);
WRITE_ONCE(next->pprev, &n->next);
WRITE_ONCE(*(n->pprev), n);
}
/**
* hlist_add_behind - add a new entry after the one specified
* @n: new entry to be added
* @prev: hlist node to add it after, which must be non-NULL
*/
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
WRITE_ONCE(n->next, prev->next);
WRITE_ONCE(prev->next, n);
WRITE_ONCE(n->pprev, &prev->next);
if (n->next)
WRITE_ONCE(n->next->pprev, &n->next);
}
/**
* hlist_add_fake - create a fake hlist consisting of a single headless node
* @n: Node to make a fake list out of
*
* This makes @n appear to be its own predecessor on a headless hlist.
* The point of this is to allow things like hlist_del() to work correctly
* in cases where there is no list.
*/
static inline void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}
/**
* hlist_fake: Is this node a fake hlist?
* @h: Node to check for being a self-referential fake hlist.
*/
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
/**
* hlist_is_singular_node - is node the only element of the specified hlist?
* @n: Node to check for singularity.
* @h: Header for potentially singular list.
*
* Check whether the node is the only node of the head without
* accessing head, thus avoiding unnecessary cache misses.
*/
static inline bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
{
return !n->next && n->pprev == &h->first;
}
/**
* hlist_move_list - Move an hlist
* @old: hlist_head for old list.
* @new: hlist_head for new list.
*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
static inline void hlist_move_list(struct hlist_head *old,
struct hlist_head *new)
{
new->first = old->first;
if (new->first)
new->first->pprev = &new->first;
old->first = NULL;
}
/**
* hlist_splice_init() - move all entries from one list to another
* @from: hlist_head from which entries will be moved
* @last: last entry on the @from list
* @to: hlist_head to which entries will be moved
*
* @to can be empty, @from must contain at least @last.
*/
static inline void hlist_splice_init(struct hlist_head *from,
struct hlist_node *last,
struct hlist_head *to)
{
if (to->first)
to->first->pprev = &last->next;
last->next = to->first;
to->first = from->first;
from->first->pprev = &to->first;
from->first = NULL;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos ; pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
#define hlist_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? hlist_entry(____ptr, type, member) : NULL; \
})
/**
* hlist_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(pos, head, member) \
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
pos; \
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
* @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(pos, member) \
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
pos; \
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
* @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(pos, member) \
for (; pos; \
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: a &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(pos, n, head, member) \
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*pos), member))
/**
* hlist_count_nodes - count nodes in the hlist
* @head: the head for your hlist.
*/
static inline size_t hlist_count_nodes(struct hlist_head *head)
{
struct hlist_node *pos;
size_t count = 0;
hlist_for_each(pos, head)
count++;
return count;
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_NULLS_H
#define _LINUX_LIST_NULLS_H
#include <linux/poison.h>
#include <linux/const.h>
/*
* Special version of lists, where end of list is not a NULL pointer,
* but a 'nulls' marker, which can have many different values.
* (up to 2^31 different values guaranteed on all platforms)
*
* In the standard hlist, termination of a list is the NULL pointer.
* In this special 'nulls' variant, we use the fact that objects stored in
* a list are aligned on a word (4 or 8 bytes alignment).
* We therefore use the last significant bit of 'ptr' :
* Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1)
* Set to 0 : This is a pointer to some object (ptr)
*/
struct hlist_nulls_head {
struct hlist_nulls_node *first;
};
struct hlist_nulls_node {
struct hlist_nulls_node *next, **pprev;
};
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
#define HLIST_NULLS_HEAD_INIT(nulls) {.first = (struct hlist_nulls_node *)NULLS_MARKER(nulls)}
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_nulls_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
!is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
})
/**
* ptr_is_a_nulls - Test if a ptr is a nulls
* @ptr: ptr to be tested
*
*/
static inline int is_a_nulls(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr & 1);
}
/**
* get_nulls_value - Get the 'nulls' value of the end of chain
* @ptr: end of chain
*
* Should be called only if is_a_nulls(ptr);
*/
static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr) >> 1;
}
/**
* hlist_nulls_unhashed - Has node been removed and reinitialized?
* @h: Node to be checked
*
* Not that not all removal functions will leave a node in unhashed state.
* For example, hlist_del_init_rcu() leaves the node in unhashed state,
* but hlist_nulls_del() does not.
*/
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
/**
* hlist_nulls_unhashed_lockless - Has node been removed and reinitialized?
* @h: Node to be checked
*
* Not that not all removal functions will leave a node in unhashed state.
* For example, hlist_del_init_rcu() leaves the node in unhashed state,
* but hlist_nulls_del() does not. Unlike hlist_nulls_unhashed(), this
* function may be used locklessly.
*/
static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
{
return !READ_ONCE(h->pprev);
}
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(READ_ONCE(h->first));
}
static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *first = h->first;
n->next = first;
WRITE_ONCE(n->pprev, &h->first);
h->first = n;
if (!is_a_nulls(first))
WRITE_ONCE(first->pprev, &n->next);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
{
struct hlist_nulls_node *next = n->next;
struct hlist_nulls_node **pprev = n->pprev;
WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
* hlist_nulls_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
*/
#define hlist_nulls_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*
*/
#define hlist_nulls_for_each_entry_from(tpos, pos, member) \
for (; (!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/core.c - core driver model code (device registration, etc)
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2006 Novell, Inc.
*/
#include <linux/acpi.h>
#include <linux/blkdev.h>
#include <linux/cleanup.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/err.h>
#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/swiotlb.h>
#include <linux/sysfs.h>
#include "base.h"
#include "physical_location.h"
#include "power/power.h"
/* Device links support. */
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
static DEFINE_MUTEX(fwnode_link_lock);
static bool fw_devlink_is_permissive(void);
static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
static struct workqueue_struct *device_link_wq;
/**
* __fwnode_link_add - Create a link between two fwnode_handles.
* @con: Consumer end of the link.
* @sup: Supplier end of the link.
* @flags: Link flags.
*
* Create a fwnode link between fwnode handles @con and @sup. The fwnode link
* represents the detail that the firmware lists @sup fwnode as supplying a
* resource to @con.
*
* The driver core will use the fwnode link to create a device link between the
* two device objects corresponding to @con and @sup when they are created. The
* driver core will automatically delete the fwnode link between @con and @sup
* after doing that.
*
* Attempts to create duplicate links between the same pair of fwnode handles
* are ignored and there is no reference counting.
*/
static int __fwnode_link_add(struct fwnode_handle *con,
struct fwnode_handle *sup, u8 flags)
{
struct fwnode_link *link;
list_for_each_entry(link, &sup->consumers, s_hook)
if (link->consumer == con) {
link->flags |= flags;
return 0;
}
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
link->supplier = sup;
INIT_LIST_HEAD(&link->s_hook);
link->consumer = con;
INIT_LIST_HEAD(&link->c_hook);
link->flags = flags;
list_add(&link->s_hook, &sup->consumers);
list_add(&link->c_hook, &con->suppliers);
pr_debug("%pfwf Linked as a fwnode consumer to %pfwf\n",
con, sup);
return 0;
}
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
u8 flags)
{
guard(mutex)(&fwnode_link_lock);
return __fwnode_link_add(con, sup, flags);
}
/**
* __fwnode_link_del - Delete a link between two fwnode_handles.
* @link: the fwnode_link to be deleted
*
* The fwnode_link_lock needs to be held when this function is called.
*/
static void __fwnode_link_del(struct fwnode_link *link)
{
pr_debug("%pfwf Dropping the fwnode link to %pfwf\n",
link->consumer, link->supplier);
list_del(&link->s_hook);
list_del(&link->c_hook);
kfree(link);
}
/**
* __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
* @link: the fwnode_link to be marked
*
* The fwnode_link_lock needs to be held when this function is called.
*/
static void __fwnode_link_cycle(struct fwnode_link *link)
{
pr_debug("%pfwf: cycle: depends on %pfwf\n",
link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE;
}
/**
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
* @fwnode: fwnode whose supplier links need to be deleted
*
* Deletes all supplier links connecting directly to @fwnode.
*/
static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
guard(mutex)(&fwnode_link_lock);
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
__fwnode_link_del(link);
}
/**
* fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle.
* @fwnode: fwnode whose consumer links need to be deleted
*
* Deletes all consumer links connecting directly to @fwnode.
*/
static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
guard(mutex)(&fwnode_link_lock);
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
__fwnode_link_del(link);
}
/**
* fwnode_links_purge - Delete all links connected to a fwnode_handle.
* @fwnode: fwnode whose links needs to be deleted
*
* Deletes all links connecting directly to a fwnode.
*/
void fwnode_links_purge(struct fwnode_handle *fwnode)
{
fwnode_links_purge_suppliers(fwnode);
fwnode_links_purge_consumers(fwnode);
}
void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
/* Don't purge consumer links of an added child */
if (fwnode->dev)
return;
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
fwnode_links_purge_consumers(fwnode);
fwnode_for_each_available_child_node(fwnode, child)
fw_devlink_purge_absent_suppliers(child);
}
EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
/**
* __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
* @from: move consumers away from this fwnode
* @to: move consumers to this fwnode
*
* Move all consumer links from @from fwnode to @to fwnode.
*/
static void __fwnode_links_move_consumers(struct fwnode_handle *from,
struct fwnode_handle *to)
{
struct fwnode_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
__fwnode_link_add(link->consumer, to, link->flags);
__fwnode_link_del(link);
}
}
/**
* __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
* @fwnode: fwnode from which to pick up dangling consumers
* @new_sup: fwnode of new supplier
*
* If the @fwnode has a corresponding struct device and the device supports
* probing (that is, added to a bus), then we want to let fw_devlink create
* MANAGED device links to this device, so leave @fwnode and its descendant's
* fwnode links alone.
*
* Otherwise, move its consumers to the new supplier @new_sup.
*/
static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
struct fwnode_handle *new_sup)
{
struct fwnode_handle *child;
if (fwnode->dev && fwnode->dev->bus)
return;
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
__fwnode_links_move_consumers(fwnode, new_sup);
fwnode_for_each_available_child_node(fwnode, child)
__fw_devlink_pickup_dangling_consumers(child, new_sup);
}
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
static inline void device_links_write_lock(void)
{
mutex_lock(&device_links_lock);
}
static inline void device_links_write_unlock(void)
{
mutex_unlock(&device_links_lock);
}
int device_links_read_lock(void) __acquires(&device_links_srcu)
{
return srcu_read_lock(&device_links_srcu);
}
void device_links_read_unlock(int idx) __releases(&device_links_srcu)
{
srcu_read_unlock(&device_links_srcu, idx);
}
int device_links_read_lock_held(void)
{
return srcu_read_lock_held(&device_links_srcu);
}
static void device_link_synchronize_removal(void)
{
synchronize_srcu(&device_links_srcu);
}
static void device_link_remove_from_lists(struct device_link *link)
{
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
}
static bool device_is_ancestor(struct device *dev, struct device *target)
{
while (target->parent) {
target = target->parent;
if (dev == target)
return true;
}
return false;
}
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
DL_FLAG_CYCLE | \
DL_FLAG_MANAGED)
bool device_link_flag_is_sync_state_only(u32 flags)
{
return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
* @target: Device to check against.
*
* Check if @target depends on @dev or any device dependent on it (its child or
* its consumer etc). Return 1 if that is the case or 0 otherwise.
*/
static int device_is_dependent(struct device *dev, void *target)
{
struct device_link *link;
int ret;
/*
* The "ancestors" check is needed to catch the case when the target
* device has not been completely initialized yet and it is still
* missing from the list of children of its parent device.
*/
if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
if (ret)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (device_link_flag_is_sync_state_only(link->flags))
continue;
if (link->consumer == target)
return 1;
ret = device_is_dependent(link->consumer, target);
if (ret)
break;
}
return ret;
}
static void device_link_init_status(struct device_link *link,
struct device *consumer,
struct device *supplier)
{
switch (supplier->links.status) {
case DL_DEV_PROBING:
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
* A consumer driver can create a link to a supplier
* that has not completed its probing yet as long as it
* knows that the supplier is already functional (for
* example, it has just acquired some resources from the
* supplier).
*/
link->status = DL_STATE_CONSUMER_PROBE;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
break;
case DL_DEV_DRIVER_BOUND:
switch (consumer->links.status) {
case DL_DEV_PROBING:
link->status = DL_STATE_CONSUMER_PROBE;
break;
case DL_DEV_DRIVER_BOUND:
link->status = DL_STATE_ACTIVE;
break;
default:
link->status = DL_STATE_AVAILABLE;
break;
}
break;
case DL_DEV_UNBINDING:
link->status = DL_STATE_SUPPLIER_UNBIND;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
}
static int device_reorder_to_tail(struct device *dev, void *not_used)
{
struct device_link *link;
/*
* Devices that have not been registered yet will be put to the ends
* of the lists during the registration, so skip them here.
*/
if (device_is_registered(dev))
devices_kset_move_last(dev);
if (device_pm_initialized(dev))
device_pm_move_last(dev);
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (device_link_flag_is_sync_state_only(link->flags))
continue;
device_reorder_to_tail(link->consumer, NULL);
}
return 0;
}
/**
* device_pm_move_to_tail - Move set of devices to the end of device lists
* @dev: Device to move
*
* This is a device_reorder_to_tail() wrapper taking the requisite locks.
*
* It moves the @dev along with all of its children and all of its consumers
* to the ends of the device_kset and dpm_list, recursively.
*/
void device_pm_move_to_tail(struct device *dev)
{
int idx;
idx = device_links_read_lock();
device_pm_lock();
device_reorder_to_tail(dev, NULL);
device_pm_unlock();
device_links_read_unlock(idx);
}
#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *output;
switch (to_devlink(dev)->status) {
case DL_STATE_NONE:
output = "not tracked";
break;
case DL_STATE_DORMANT:
output = "dormant";
break;
case DL_STATE_AVAILABLE:
output = "available";
break;
case DL_STATE_CONSUMER_PROBE:
output = "consumer probing";
break;
case DL_STATE_ACTIVE:
output = "active";
break;
case DL_STATE_SUPPLIER_UNBIND:
output = "supplier unbinding";
break;
default:
output = "unknown";
break;
}
return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(status);
static ssize_t auto_remove_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_link *link = to_devlink(dev);
const char *output;
if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
output = "supplier unbind";
else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER))
output = "consumer unbind";
else
output = "never";
return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(auto_remove_on);
static ssize_t runtime_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_link *link = to_devlink(dev);
return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME));
}
static DEVICE_ATTR_RO(runtime_pm);
static ssize_t sync_state_only_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_link *link = to_devlink(dev);
return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
}
static DEVICE_ATTR_RO(sync_state_only);
static struct attribute *devlink_attrs[] = {
&dev_attr_status.attr,
&dev_attr_auto_remove_on.attr,
&dev_attr_runtime_pm.attr,
&dev_attr_sync_state_only.attr,
NULL,
};
ATTRIBUTE_GROUPS(devlink);
static void device_link_release_fn(struct work_struct *work)
{
struct device_link *link = container_of(work, struct device_link, rm_work);
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
pm_runtime_release_supplier(link);
/*
* If supplier_preactivated is set, the link has been dropped between
* the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls
* in __driver_probe_device(). In that case, drop the supplier's
* PM-runtime usage counter to remove the reference taken by
* pm_runtime_get_suppliers().
*/
if (link->supplier_preactivated)
pm_runtime_put_noidle(link->supplier);
pm_request_idle(link->supplier);
put_device(link->consumer);
put_device(link->supplier);
kfree(link);
}
static void devlink_dev_release(struct device *dev)
{
struct device_link *link = to_devlink(dev);
INIT_WORK(&link->rm_work, device_link_release_fn);
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
* supplier devices get deleted when it runs, so put it into the
* dedicated workqueue.
*/
queue_work(device_link_wq, &link->rm_work);
}
/**
* device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
*/
void device_link_wait_removal(void)
{
/*
* devlink removal jobs are queued in the dedicated work queue.
* To be sure that all removal jobs are terminated, ensure that any
* scheduled work has run to completion.
*/
flush_workqueue(device_link_wq);
}
EXPORT_SYMBOL_GPL(device_link_wait_removal);
static const struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
.dev_release = devlink_dev_release,
};
static int devlink_add_symlinks(struct device *dev)
{
char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
int ret;
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
struct device *con = link->consumer;
ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
if (ret)
goto out;
ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
if (ret)
goto err_con;
buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
if (!buf_con) {
ret = -ENOMEM;
goto err_con_dev;
}
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con);
if (ret)
goto err_con_dev;
buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
if (!buf_sup) {
ret = -ENOMEM;
goto err_sup_dev;
}
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup);
if (ret)
goto err_sup_dev;
goto out;
err_sup_dev:
sysfs_remove_link(&sup->kobj, buf_con);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
err_con:
sysfs_remove_link(&link->link_dev.kobj, "supplier");
out:
return ret;
}
static void devlink_remove_symlinks(struct device *dev)
{
char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
struct device *con = link->consumer;
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
if (device_is_registered(con)) {
buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
if (!buf_sup)
goto out;
sysfs_remove_link(&con->kobj, buf_sup);
}
buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
if (!buf_con)
goto out;
sysfs_remove_link(&sup->kobj, buf_con);
return;
out:
WARN(1, "Unable to properly free device link symlinks!\n");
}
static struct class_interface devlink_class_intf = {
.class = &devlink_class,
.add_dev = devlink_add_symlinks,
.remove_dev = devlink_remove_symlinks,
};
static int __init devlink_class_init(void)
{
int ret;
ret = class_register(&devlink_class);
if (ret)
return ret;
ret = class_interface_register(&devlink_class_intf);
if (ret)
class_unregister(&devlink_class);
return ret;
}
postcore_initcall(devlink_class_init);
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
DL_FLAG_SYNC_STATE_ONLY | \
DL_FLAG_INFERRED | \
DL_FLAG_CYCLE)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
/**
* device_link_add - Create a link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
* Return: On success, a device_link struct will be returned.
* On error or invalid flag settings, NULL will be returned.
*
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
* DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
* be forced into the active meta state and reference-counted upon the creation
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
* If DL_FLAG_STATELESS is set in @flags, the caller of this function is
* expected to release the link returned by it directly with the help of either
* device_link_del() or device_link_remove().
*
* If that flag is not set, however, the caller of this function is handing the
* management of the link over to the driver core entirely and its return value
* can only be used to check whether or not the link is present. In that case,
* the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
* flags can be used to indicate to the driver core when the link can be safely
* deleted. Namely, setting one of them in @flags indicates to the driver core
* that the link is not going to be used (by the given caller of this function)
* after unbinding the consumer or supplier driver, respectively, from its
* device, so the link can be deleted at that point. If none of them is set,
* the link will be maintained until one of the devices pointed to by it (either
* the consumer or the supplier) is unregistered.
*
* Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
* DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
* managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
* be used to request the driver core to automatically probe for a consumer
* driver after successfully binding a driver to the supplier device.
*
* The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
* DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
* the same time is invalid and will cause NULL to be returned upfront.
* However, if a device link between the given @consumer and @supplier pair
* exists already when this function is called for them, the existing link will
* be returned regardless of its current type and status (the link's flags may
* be modified then). The caller of this function is then expected to treat
* the link as though it has just been created, so (in particular) if
* DL_FLAG_STATELESS was passed in @flags, the link needs to be released
* explicitly when not needed any more (as stated above).
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
*
* The supplier device is required to be registered when this function is called
* and NULL will be returned if that is not the case. The consumer device need
* not be registered, however.
*/
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags)
{
struct device_link *link;
if (!consumer || !supplier || consumer == supplier ||
flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
return NULL;
if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
if (pm_runtime_get_sync(supplier) < 0) {
pm_runtime_put_noidle(supplier);
return NULL;
}
}
if (!(flags & DL_FLAG_STATELESS))
flags |= DL_FLAG_MANAGED;
if (flags & DL_FLAG_SYNC_STATE_ONLY &&
!device_link_flag_is_sync_state_only(flags))
return NULL;
device_links_write_lock();
device_pm_lock();
/*
* If the supplier has not been fully registered yet or there is a
* reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
* the supplier already in the graph, return NULL. If the link is a
* SYNC_STATE_ONLY link, we don't check for reverse dependencies
* because it only affects sync_state() callbacks.
*/
if (!device_pm_initialized(supplier)
|| (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
device_is_dependent(consumer, supplier))) {
link = NULL;
goto out;
}
/*
* SYNC_STATE_ONLY links are useless once a consumer device has probed.
* So, only create it if the consumer hasn't probed yet.
*/
if (flags & DL_FLAG_SYNC_STATE_ONLY &&
consumer->links.status != DL_DEV_NO_DRIVER &&
consumer->links.status != DL_DEV_PROBING) {
link = NULL;
goto out;
}
/*
* DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
* longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
* together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer != consumer)
continue;
if (device_link_test(link, DL_FLAG_INFERRED) &&
!(flags & DL_FLAG_INFERRED))
link->flags &= ~DL_FLAG_INFERRED;
if (flags & DL_FLAG_PM_RUNTIME) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
link->flags |= DL_FLAG_PM_RUNTIME;
}
if (flags & DL_FLAG_RPM_ACTIVE)
refcount_inc(&link->rpm_active);
}
if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
!device_link_test(link, DL_FLAG_STATELESS)) {
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
link->flags |= DL_FLAG_STATELESS;
goto out;
}
}
/*
* If the life time of the link following from the new flags is
* longer than indicated by the flags of the existing link,
* update the existing link to stay around longer.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
}
} else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
if (!device_link_test(link, DL_FLAG_MANAGED)) {
kref_get(&link->kref);
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
goto reorder;
}
goto out;
}
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
goto out;
refcount_set(&link->rpm_active, 1);
get_device(supplier);
link->supplier = supplier;
INIT_LIST_HEAD(&link->s_node);
get_device(consumer);
link->consumer = consumer;
INIT_LIST_HEAD(&link->c_node);
link->flags = flags;
kref_init(&link->kref);
link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev);
dev_set_name(&link->link_dev, "%s:%s--%s:%s",
dev_bus_name(supplier), dev_name(supplier),
dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
put_device(&link->link_dev);
link = NULL;
goto out;
}
if (flags & DL_FLAG_PM_RUNTIME) {
if (flags & DL_FLAG_RPM_ACTIVE)
refcount_inc(&link->rpm_active);
pm_runtime_new_link(consumer);
}
/* Determine the initial link state. */
if (flags & DL_FLAG_STATELESS)
link->status = DL_STATE_NONE;
else
device_link_init_status(link, consumer, supplier);
/*
* Some callers expect the link creation during consumer driver probe to
* resume the supplier even without DL_FLAG_RPM_ACTIVE.
*/
if (link->status == DL_STATE_CONSUMER_PROBE &&
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
if (flags & DL_FLAG_SYNC_STATE_ONLY) {
dev_dbg(consumer,
"Linked as a sync state only consumer to %s\n",
dev_name(supplier));
goto out;
}
reorder:
/*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
*
* It is necessary to hold dpm_list locked throughout all that or else
* we may end up suspending with a wrong ordering of it.
*/
device_reorder_to_tail(consumer, NULL);
dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
out:
device_pm_unlock();
device_links_write_unlock();
if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
pm_runtime_put(supplier);
return link;
}
EXPORT_SYMBOL_GPL(device_link_add);
static void __device_link_del(struct kref *kref)
{
struct device_link *link = container_of(kref, struct device_link, kref);
dev_dbg(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
pm_runtime_drop_link(link);
device_link_remove_from_lists(link);
device_unregister(&link->link_dev);
}
static void device_link_put_kref(struct device_link *link)
{
if (device_link_test(link, DL_FLAG_STATELESS))
kref_put(&link->kref, __device_link_del);
else if (!device_is_registered(link->consumer))
__device_link_del(&link->kref);
else
WARN(1, "Unable to drop a managed device link reference\n");
}
/**
* device_link_del - Delete a stateless link between two devices.
* @link: Device link to delete.
*
* The caller must ensure proper synchronization of this function with runtime
* PM. If the link was added multiple times, it needs to be deleted as often.
* Care is required for hotplugged devices: Their links are purged on removal
* and calling device_link_del() is then no longer allowed.
*/
void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_link_put_kref(link);
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
/**
* device_link_remove - Delete a stateless link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
*
* The caller must ensure proper synchronization of this function with runtime
* PM.
*/
void device_link_remove(void *consumer, struct device *supplier)
{
struct device_link *link;
if (WARN_ON(consumer == supplier))
return;
device_links_write_lock();
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer == consumer) {
device_link_put_kref(link);
break;
}
}
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_remove);
static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (link->status != DL_STATE_CONSUMER_PROBE)
continue;
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
}
static bool dev_is_best_effort(struct device *dev)
{
return (fw_devlink_best_effort && dev->can_match) ||
(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
}
static struct fwnode_handle *fwnode_links_check_suppliers(
struct fwnode_handle *fwnode)
{
struct fwnode_link *link;
if (!fwnode || fw_devlink_is_permissive())
return NULL;
list_for_each_entry(link, &fwnode->suppliers, c_hook)
if (!(link->flags &
(FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE)))
return link->supplier;
return NULL;
}
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
*
* Check links from this device to any suppliers. Walk the list of the device's
* links to suppliers and see if all of them are available. If not, simply
* return -EPROBE_DEFER.
*
* We need to guarantee that the supplier will not go away after the check has
* been positive here. It only can go away in __device_release_driver() and
* that function checks the device's links to consumers. This means we need to
* mark the link as "consumer probe in progress" to make the supplier removal
* wait for us to complete (or bad things may happen).
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
int ret = 0, fwnode_ret = 0;
struct fwnode_handle *sup_fw;
/*
* Device waiting for supplier to become available is not allowed to
* probe.
*/
scoped_guard(mutex, &fwnode_link_lock) {
sup_fw = fwnode_links_check_suppliers(dev->fwnode);
if (sup_fw) {
if (dev_is_best_effort(dev))
fwnode_ret = -EAGAIN;
else
return dev_err_probe(dev, -EPROBE_DEFER,
"wait for supplier %pfwf\n", sup_fw);
}
}
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE &&
!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
if (dev_is_best_effort(dev) &&
device_link_test(link, DL_FLAG_INFERRED) &&
!link->supplier->can_match) {
ret = -EAGAIN;
continue;
}
device_links_missing_supplier(dev);
ret = dev_err_probe(dev, -EPROBE_DEFER,
"supplier %s not ready\n", dev_name(link->supplier));
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
}
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
return ret ? ret : fwnode_ret;
}
/**
* __device_links_queue_sync_state - Queue a device for sync_state() callback
* @dev: Device to call sync_state() on
* @list: List head to queue the @dev on
*
* Queues a device for a sync_state() callback when the device links write lock
* isn't held. This allows the sync_state() execution flow to use device links
* APIs. The caller must ensure this function is called with
* device_links_write_lock() held.
*
* This function does a get_device() to make sure the device is not freed while
* on this list.
*
* So the caller must also ensure that device_links_flush_sync_list() is called
* as soon as the caller releases device_links_write_lock(). This is necessary
* to make sure the sync_state() is called in a timely fashion and the
* put_device() is called on this device.
*/
static void __device_links_queue_sync_state(struct device *dev,
struct list_head *list)
{
struct device_link *link;
if (!dev_has_sync_state(dev))
return;
if (dev->state_synced)
return;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_ACTIVE)
return;
}
/*
* Set the flag here to avoid adding the same device to a list more
* than once. This can happen if new consumers get added to the device
* and probed before the list is flushed.
*/
dev->state_synced = true;
if (WARN_ON(!list_empty(&dev->links.defer_sync)))
return;
get_device(dev);
list_add_tail(&dev->links.defer_sync, list);
}
/**
* device_links_flush_sync_list - Call sync_state() on a list of devices
* @list: List of devices to call sync_state() on
* @dont_lock_dev: Device for which lock is already held by the caller
*
* Calls sync_state() on all the devices that have been queued for it. This
* function is used in conjunction with __device_links_queue_sync_state(). The
* @dont_lock_dev parameter is useful when this function is called from a
* context where a device lock is already held.
*/
static void device_links_flush_sync_list(struct list_head *list,
struct device *dont_lock_dev)
{
struct device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
list_del_init(&dev->links.defer_sync);
if (dev != dont_lock_dev)
device_lock(dev);
dev_sync_state(dev);
if (dev != dont_lock_dev)
device_unlock(dev);
put_device(dev);
}
}
void device_links_supplier_sync_state_pause(void)
{
device_links_write_lock();
defer_sync_state_count++;
device_links_write_unlock();
}
void device_links_supplier_sync_state_resume(void)
{
struct device *dev, *tmp;
LIST_HEAD(sync_list);
device_links_write_lock();
if (!defer_sync_state_count) {
WARN(true, "Unmatched sync_state pause/resume!");
goto out;
}
defer_sync_state_count--;
if (defer_sync_state_count)
goto out;
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
/*
* Delete from deferred_sync list before queuing it to
* sync_list because defer_sync is used for both lists.
*/
list_del_init(&dev->links.defer_sync);
__device_links_queue_sync_state(dev, &sync_list);
}
out:
device_links_write_unlock();
device_links_flush_sync_list(&sync_list, NULL);
}
static int sync_state_resume_initcall(void)
{
device_links_supplier_sync_state_resume();
return 0;
}
late_initcall(sync_state_resume_initcall);
static void __device_links_supplier_defer_sync(struct device *sup)
{
if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
list_add_tail(&sup->links.defer_sync, &deferred_sync);
}
static void device_link_drop_managed(struct device_link *link)
{
link->flags &= ~DL_FLAG_MANAGED;
WRITE_ONCE(link->status, DL_STATE_NONE);
kref_put(&link->kref, __device_link_del);
}
static ssize_t waiting_for_supplier_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
bool val;
device_lock(dev);
scoped_guard(mutex, &fwnode_link_lock)
val = !!fwnode_links_check_suppliers(dev->fwnode);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
static DEVICE_ATTR_RO(waiting_for_supplier);
/**
* device_links_force_bind - Prepares device to be force bound
* @dev: Consumer device.
*
* device_bind_driver() force binds a device to a driver without calling any
* driver probe functions. So the consumer really isn't going to wait for any
* supplier before it's bound to the driver. We still want the device link
* states to be sensible when this happens.
*
* In preparation for device_bind_driver(), this function goes through each
* supplier device links and checks if the supplier is bound. If it is, then
* the device link status is set to CONSUMER_PROBE. Otherwise, the device link
* is dropped. Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_force_bind(struct device *dev)
{
struct device_link *link, *ln;
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE) {
device_link_drop_managed(link);
continue;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
}
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
}
/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
* The probe has been successful, so update links from this device to any
* consumers by changing their status to "available".
*
* Also change the status of @dev's links to suppliers to "active".
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_driver_bound(struct device *dev)
{
struct device_link *link, *ln;
LIST_HEAD(sync_list);
/*
* If a device binds successfully, it's expected to have created all
* the device links it needs to or make new device links as it needs
* them. So, fw_devlink no longer needs to create device links to any
* of the device's suppliers.
*
* Also, if a child firmware node of this bound device is not added as a
* device by now, assume it is never going to be added. Make this bound
* device the fallback supplier to the dangling consumers of the child
* firmware node because this bound device is probably implementing the
* child firmware node functionality and we don't want the dangling
* consumers to defer probe indefinitely waiting for a device for the
* child firmware node.
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
fwnode_links_purge_suppliers(dev->fwnode);
guard(mutex)(&fwnode_link_lock);
fwnode_for_each_available_child_node(dev->fwnode, child)
__fw_devlink_pickup_dangling_consumers(child,
dev->fwnode);
__fw_devlink_link_to_consumers(dev);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
* Links created during consumer probe may be in the "consumer
* probe" state to start with if the supplier is still probing
* when they are created and they may become "active" if the
* consumer probe returns first. Skip them here.
*/
if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
continue;
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER))
driver_deferred_probe_add(link->consumer);
}
if (defer_sync_state_count)
__device_links_supplier_defer_sync(dev);
else
__device_links_queue_sync_state(dev, &sync_list);
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
struct device *supplier;
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
supplier = link->supplier;
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
/*
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
* save to drop the managed link completely.
*/
device_link_drop_managed(link);
} else if (dev_is_best_effort(dev) &&
device_link_test(link, DL_FLAG_INFERRED) &&
link->status != DL_STATE_CONSUMER_PROBE &&
!link->supplier->can_match) {
/*
* When dev_is_best_effort() is true, we ignore device
* links to suppliers that don't have a driver. If the
* consumer device still managed to probe, there's no
* point in maintaining a device link in a weird state
* (consumer probed before supplier). So delete it.
*/
device_link_drop_managed(link);
} else {
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
}
/*
* This needs to be done even for the deleted
* DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
* device link that was preventing the supplier from getting a
* sync_state() call.
*/
if (defer_sync_state_count)
__device_links_supplier_defer_sync(supplier);
else
__device_links_queue_sync_state(supplier, &sync_list);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
device_links_write_unlock();
device_links_flush_sync_list(&sync_list, dev);
}
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
*
* Delete all non-persistent links from this device to any suppliers.
*
* Persistent links stay around, but their status is changed to "available",
* unless they already are in the "supplier unbind in progress" state in which
* case they need not be updated.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
static void __device_links_no_driver(struct device *dev)
{
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
device_link_drop_managed(link);
continue;
}
if (link->status != DL_STATE_CONSUMER_PROBE &&
link->status != DL_STATE_ACTIVE)
continue;
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
dev->links.status = DL_DEV_NO_DRIVER;
}
/**
* device_links_no_driver - Update links after failing driver probe.
* @dev: Device whose driver has just failed to probe.
*
* Clean up leftover links to consumers for @dev and invoke
* %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_no_driver(struct device *dev)
{
struct device_link *link;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
* The probe has failed, so if the status of the link is
* "consumer probe" or "active", it must have been added by
* a probing consumer while this device was still probing.
* Change its state to "dormant", as it represents a valid
* relationship, but it is not functionally meaningful.
*/
if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_driver_cleanup - Update links after driver removal.
* @dev: Device whose driver has just gone away.
*
* Update links to consumers for @dev by changing their status to "dormant" and
* invoke %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_driver_cleanup(struct device *dev)
{
struct device_link *link, *ln;
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER));
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
/*
* autoremove the links between this @dev and its consumer
* devices that are not active, i.e. where the link state
* has moved to DL_STATE_SUPPLIER_UNBIND.
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
device_link_drop_managed(link);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
list_del_init(&dev->links.defer_sync);
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_busy - Check if there are any busy links to consumers.
* @dev: Device to check.
*
* Check each consumer of the device and return 'true' if its link's status
* is one of "consumer probe" or "active" (meaning that the given consumer is
* probing right now or its driver is present). Otherwise, change the link
* state to "supplier unbind" to prevent the consumer from being probed
* successfully going forward.
*
* Return 'false' if there are no probing or active consumers.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
bool device_links_busy(struct device *dev)
{
struct device_link *link;
bool ret = false;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
|| link->status == DL_STATE_ACTIVE) {
ret = true;
break;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
}
dev->links.status = DL_DEV_UNBINDING;
device_links_write_unlock();
return ret;
}
/**
* device_links_unbind_consumers - Force unbind consumers of the given device.
* @dev: Device to unbind the consumers of.
*
* Walk the list of links to consumers for @dev and if any of them is in the
* "consumer probe" state, wait for all device probes in progress to complete
* and start over.
*
* If that's not the case, change the status of the link to "supplier unbind"
* and check if the link was in the "active" state. If so, force the consumer
* driver to unbind and start over (the consumer will not re-probe as we have
* changed the state of the link already).
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_unbind_consumers(struct device *dev)
{
struct device_link *link;
start:
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
if (!device_link_test(link, DL_FLAG_MANAGED) ||
device_link_test(link, DL_FLAG_SYNC_STATE_ONLY))
continue;
status = link->status;
if (status == DL_STATE_CONSUMER_PROBE) {
device_links_write_unlock();
wait_for_device_probe();
goto start;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
if (status == DL_STATE_ACTIVE) {
struct device *consumer = link->consumer;
get_device(consumer);
device_links_write_unlock();
device_release_driver_internal(consumer, NULL,
consumer->parent);
put_device(consumer);
goto start;
}
}
device_links_write_unlock();
}
/**
* device_links_purge - Delete existing links to other devices.
* @dev: Target device.
*/
static void device_links_purge(struct device *dev)
{
struct device_link *link, *ln;
if (dev->class == &devlink_class)
return;
/*
* Delete all of the remaining links from this device to any other
* devices (either consumers or suppliers).
*/
device_links_write_lock();
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
WARN_ON(link->status == DL_STATE_ACTIVE);
__device_link_del(&link->kref);
}
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
WARN_ON(link->status != DL_STATE_DORMANT &&
link->status != DL_STATE_NONE);
__device_link_del(&link->kref);
}
device_links_write_unlock();
}
#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
DL_FLAG_SYNC_STATE_ONLY)
#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
DL_FLAG_AUTOPROBE_CONSUMER)
#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
DL_FLAG_PM_RUNTIME)
static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
static int __init fw_devlink_setup(char *arg)
{
if (!arg)
return -EINVAL;
if (strcmp(arg, "off") == 0) {
fw_devlink_flags = 0;
} else if (strcmp(arg, "permissive") == 0) {
fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
} else if (strcmp(arg, "on") == 0) {
fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
} else if (strcmp(arg, "rpm") == 0) {
fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
}
return 0;
}
early_param("fw_devlink", fw_devlink_setup);
static bool fw_devlink_strict;
static int __init fw_devlink_strict_setup(char *arg)
{
return kstrtobool(arg, &fw_devlink_strict);
}
early_param("fw_devlink.strict", fw_devlink_strict_setup);
#define FW_DEVLINK_SYNC_STATE_STRICT 0
#define FW_DEVLINK_SYNC_STATE_TIMEOUT 1
#ifndef CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT
static int fw_devlink_sync_state;
#else
static int fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
#endif
static int __init fw_devlink_sync_state_setup(char *arg)
{
if (!arg)
return -EINVAL;
if (strcmp(arg, "strict") == 0) {
fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_STRICT;
return 0;
} else if (strcmp(arg, "timeout") == 0) {
fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
return 0;
}
return -EINVAL;
}
early_param("fw_devlink.sync_state", fw_devlink_sync_state_setup);
static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
{
if (fwlink_flags & FWLINK_FLAG_CYCLE)
return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
return fw_devlink_flags;
}
static bool fw_devlink_is_permissive(void)
{
return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
}
bool fw_devlink_is_strict(void)
{
return fw_devlink_strict && !fw_devlink_is_permissive();
}
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
{
if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
return;
fwnode_call_int_op(fwnode, add_links);
fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
}
static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
{
struct fwnode_handle *child = NULL;
fw_devlink_parse_fwnode(fwnode);
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
fw_devlink_parse_fwtree(child);
}
static void fw_devlink_relax_link(struct device_link *link)
{
if (!device_link_test(link, DL_FLAG_INFERRED))
return;
if (device_link_flag_is_sync_state_only(link->flags))
return;
pm_runtime_drop_link(link);
link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
dev_dbg(link->consumer, "Relaxing link with %s\n",
dev_name(link->supplier));
}
static int fw_devlink_no_driver(struct device *dev, void *data)
{
struct device_link *link = to_devlink(dev);
if (!link->supplier->can_match)
fw_devlink_relax_link(link);
return 0;
}
void fw_devlink_drivers_done(void)
{
fw_devlink_drv_reg_done = true;
device_links_write_lock();
class_for_each_device(&devlink_class, NULL, NULL,
fw_devlink_no_driver);
device_links_write_unlock();
}
static int fw_devlink_dev_sync_state(struct device *dev, void *data)
{
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
if (!device_link_test(link, DL_FLAG_MANAGED) ||
link->status == DL_STATE_ACTIVE || sup->state_synced ||
!dev_has_sync_state(sup))
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
if (!list_empty(&sup->links.defer_sync))
return 0;
dev_warn(sup, "Timed out. Forcing sync_state()\n");
sup->state_synced = true;
get_device(sup);
list_add_tail(&sup->links.defer_sync, data);
return 0;
}
void fw_devlink_probing_done(void)
{
LIST_HEAD(sync_list);
device_links_write_lock();
class_for_each_device(&devlink_class, NULL, &sync_list,
fw_devlink_dev_sync_state);
device_links_write_unlock();
device_links_flush_sync_list(&sync_list, NULL);
}
/**
* wait_for_init_devices_probe - Try to probe any device needed for init
*
* Some devices might need to be probed and bound successfully before the kernel
* boot sequence can finish and move on to init/userspace. For example, a
* network interface might need to be bound to be able to mount a NFS rootfs.
*
* With fw_devlink=on by default, some of these devices might be blocked from
* probing because they are waiting on a optional supplier that doesn't have a
* driver. While fw_devlink will eventually identify such devices and unblock
* the probing automatically, it might be too late by the time it unblocks the
* probing of devices. For example, the IP4 autoconfig might timeout before
* fw_devlink unblocks probing of the network interface.
*
* This function is available to temporarily try and probe all devices that have
* a driver even if some of their suppliers haven't been added or don't have
* drivers.
*
* The drivers can then decide which of the suppliers are optional vs mandatory
* and probe the device if possible. By the time this function returns, all such
* "best effort" probes are guaranteed to be completed. If a device successfully
* probes in this mode, we delete all fw_devlink discovered dependencies of that
* device where the supplier hasn't yet probed successfully because they have to
* be optional dependencies.
*
* Any devices that didn't successfully probe go back to being treated as if
* this function was never called.
*
* This also means that some devices that aren't needed for init and could have
* waited for their optional supplier to probe (when the supplier's module is
* loaded later on) would end up probing prematurely with limited functionality.
* So call this function only when boot would fail without it.
*/
void __init wait_for_init_devices_probe(void)
{
if (!fw_devlink_flags || fw_devlink_is_permissive())
return;
/*
* Wait for all ongoing probes to finish so that the "best effort" is
* only applied to devices that can't probe otherwise.
*/
wait_for_device_probe();
pr_info("Trying to probe devices needed for running init ...\n");
fw_devlink_best_effort = true;
driver_deferred_probe_trigger();
/*
* Wait for all "best effort" probes to finish before going back to
* normal enforcement.
*/
wait_for_device_probe();
fw_devlink_best_effort = false;
}
static void fw_devlink_unblock_consumers(struct device *dev)
{
struct device_link *link;
if (!fw_devlink_flags || fw_devlink_is_permissive())
return;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) fw_devlink_relax_link(link); device_links_write_unlock();
}
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
struct device *dev;
bool ret;
if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
return false;
dev = get_dev_from_fwnode(fwnode);
ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
put_device(dev);
return ret;
}
static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
fwnode_for_each_parent_node(fwnode, parent) {
if (fwnode_init_without_drv(parent)) {
fwnode_handle_put(parent);
return true;
}
}
return false;
}
/**
* fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
* @ancestor: Firmware which is tested for being an ancestor
* @child: Firmware which is tested for being the child
*
* A node is considered an ancestor of itself too.
*
* Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
*/
static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor,
const struct fwnode_handle *child)
{
struct fwnode_handle *parent;
if (IS_ERR_OR_NULL(ancestor))
return false;
if (child == ancestor)
return true;
fwnode_for_each_parent_node(child, parent) {
if (parent == ancestor) {
fwnode_handle_put(parent);
return true;
}
}
return false;
}
/**
* fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
* @fwnode: firmware node
*
* Given a firmware node (@fwnode), this function finds its closest ancestor
* firmware node that has a corresponding struct device and returns that struct
* device.
*
* The caller is responsible for calling put_device() on the returned device
* pointer.
*
* Return: a pointer to the device of the @fwnode's closest ancestor.
*/
static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
struct device *dev;
fwnode_for_each_parent_node(fwnode, parent) {
dev = get_dev_from_fwnode(parent);
if (dev) {
fwnode_handle_put(parent);
return dev;
}
}
return NULL;
}
/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
* @con_handle: Potential consumer device fwnode.
* @sup_handle: Potential supplier's fwnode.
*
* Needs to be called with fwnode_lock and device link lock held.
*
* Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
* depend on @con. This function can detect multiple cyles between @sup_handle
* and @con. When such dependency cycles are found, convert all device links
* created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
* all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
* converted into a device link in the future, they are created as
* SYNC_STATE_ONLY device links. This is the equivalent of doing
* fw_devlink=permissive just between the devices in the cycle. We need to do
* this because, at this point, fw_devlink can't tell which of these
* dependencies is not a real dependency.
*
* Return true if one or more cycles were found. Otherwise, return false.
*/
static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
struct fwnode_handle *sup_handle)
{
struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL;
struct fwnode_link *link;
struct device_link *dev_link;
bool ret = false;
if (!sup_handle)
return false;
/*
* We aren't trying to find all cycles. Just a cycle between con and
* sup_handle.
*/
if (sup_handle->flags & FWNODE_FLAG_VISITED)
return false;
sup_handle->flags |= FWNODE_FLAG_VISITED;
/* Termination condition. */
if (sup_handle == con_handle) {
pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
sup_dev = get_dev_from_fwnode(sup_handle);
con_dev = get_dev_from_fwnode(con_handle);
/*
* If sup_dev is bound to a driver and @con hasn't started binding to a
* driver, sup_dev can't be a consumer of @con. So, no need to check
* further.
*/
if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) {
ret = false;
goto out;
}
list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
if (link->flags & FWLINK_FLAG_IGNORE)
continue;
if (__fw_devlink_relax_cycles(con_handle, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
}
}
/*
* Give priority to device parent over fwnode parent to account for any
* quirks in how fwnodes are converted to devices.
*/
if (sup_dev)
par_dev = get_device(sup_dev->parent);
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) {
pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
par_dev->fwnode);
ret = true;
}
if (!sup_dev)
goto out;
list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
/*
* Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
* such due to a cycle.
*/
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
!device_link_test(dev_link, DL_FLAG_CYCLE))
continue;
if (__fw_devlink_relax_cycles(con_handle,
dev_link->supplier->fwnode)) {
pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
dev_link->supplier->fwnode);
fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE;
ret = true;
}
}
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
put_device(con_dev);
put_device(par_dev);
return ret;
}
/**
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
* @con: consumer device for the device link
* @sup_handle: fwnode handle of supplier
* @link: fwnode link that's being converted to a device link
*
* This function will try to create a device link between the consumer device
* @con and the supplier device represented by @sup_handle.
*
* The supplier has to be provided as a fwnode because incorrect cycles in
* fwnode links can sometimes cause the supplier device to never be created.
* This function detects such cases and returns an error if it cannot create a
* device link from the consumer to a missing supplier.
*
* Returns,
* 0 on successfully creating a device link
* -EINVAL if the device link cannot be created as expected
* -EAGAIN if the device link cannot be created right now, but it may be
* possible to do that in the future
*/
static int fw_devlink_create_devlink(struct device *con,
struct fwnode_handle *sup_handle,
struct fwnode_link *link)
{
struct device *sup_dev;
int ret = 0;
u32 flags;
if (link->flags & FWLINK_FLAG_IGNORE)
return 0;
/*
* In some cases, a device P might also be a supplier to its child node
* C. However, this would defer the probe of C until the probe of P
* completes successfully. This is perfectly fine in the device driver
* model. device_add() doesn't guarantee probe completion of the device
* by the time it returns.
*
* However, there are a few drivers that assume C will finish probing
* as soon as it's added and before P finishes probing. So, we provide
* a flag to let fw_devlink know not to delay the probe of C until the
* probe of P completes successfully.
*
* When such a flag is set, we can't create device links where P is the
* supplier of C as that would delay the probe of C.
*/
if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
fwnode_is_ancestor_of(sup_handle, con->fwnode))
return -EINVAL;
/*
* Don't try to optimize by not calling the cycle detection logic under
* certain conditions. There's always some corner case that won't get
* detected.
*/
device_links_write_lock();
if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) {
__fwnode_link_cycle(link);
pr_debug("----- cycle: end -----\n");
pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n",
link->consumer, sup_handle);
}
device_links_write_unlock();
if (con->fwnode == link->consumer)
flags = fw_devlink_get_flags(link->flags);
else
flags = FW_DEVLINK_FLAGS_PERMISSIVE;
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
sup_dev = fwnode_get_next_parent_dev(sup_handle);
else
sup_dev = get_dev_from_fwnode(sup_handle);
if (sup_dev) {
/*
* If it's one of those drivers that don't actually bind to
* their device using driver core, then don't wait on this
* supplier device indefinitely.
*/
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
dev_dbg(con,
"Not linking %pfwf - dev might never probe\n",
sup_handle);
ret = -EINVAL;
goto out;
}
if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n",
flags, dev_name(sup_dev), link->consumer);
ret = -EINVAL;
}
goto out;
}
/*
* Supplier or supplier's ancestor already initialized without a struct
* device or being probed by a driver.
*/
if (fwnode_init_without_drv(sup_handle) ||
fwnode_ancestor_init_without_drv(sup_handle)) {
dev_dbg(con, "Not linking %pfwf - might never become dev\n",
sup_handle);
return -EINVAL;
}
ret = -EAGAIN;
out:
put_device(sup_dev);
return ret;
}
/**
* __fw_devlink_link_to_consumers - Create device links to consumers of a device
* @dev: Device that needs to be linked to its consumers
*
* This function looks at all the consumer fwnodes of @dev and creates device
* links between the consumer device and @dev (supplier).
*
* If the consumer device has not been added yet, then this function creates a
* SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device
* of the consumer fwnode. This is necessary to make sure @dev doesn't get a
* sync_state() callback before the real consumer device gets to be added and
* then probed.
*
* Once device links are created from the real consumer to @dev (supplier), the
* fwnode links are deleted.
*/
static void __fw_devlink_link_to_consumers(struct device *dev)
{
struct fwnode_handle *fwnode = dev->fwnode;
struct fwnode_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
struct device *con_dev;
bool own_link = true;
int ret;
con_dev = get_dev_from_fwnode(link->consumer);
/*
* If consumer device is not available yet, make a "proxy"
* SYNC_STATE_ONLY link from the consumer's parent device to
* the supplier device. This is necessary to make sure the
* supplier doesn't get a sync_state() callback before the real
* consumer can create a device link to the supplier.
*
* This proxy link step is needed to handle the case where the
* consumer's parent device is added before the supplier.
*/
if (!con_dev) {
con_dev = fwnode_get_next_parent_dev(link->consumer);
/*
* However, if the consumer's parent device is also the
* parent of the supplier, don't create a
* consumer-supplier link from the parent to its child
* device. Such a dependency is impossible.
*/
if (con_dev &&
fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
put_device(con_dev);
con_dev = NULL;
} else {
own_link = false;
}
}
if (!con_dev)
continue;
ret = fw_devlink_create_devlink(con_dev, fwnode, link);
put_device(con_dev);
if (!own_link || ret == -EAGAIN)
continue;
__fwnode_link_del(link);
}
}
/**
* __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
* @dev: The consumer device that needs to be linked to its suppliers
* @fwnode: Root of the fwnode tree that is used to create device links
*
* This function looks at all the supplier fwnodes of fwnode tree rooted at
* @fwnode and creates device links between @dev (consumer) and all the
* supplier devices of the entire fwnode tree at @fwnode.
*
* The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
* and the real suppliers of @dev. Once these device links are created, the
* fwnode links are deleted.
*
* In addition, it also looks at all the suppliers of the entire fwnode tree
* because some of the child devices of @dev that have not been added yet
* (because @dev hasn't probed) might already have their suppliers added to
* driver core. So, this function creates SYNC_STATE_ONLY device links between
* @dev (consumer) and these suppliers to make sure they don't execute their
* sync_state() callbacks before these child devices have a chance to create
* their device links. The fwnode links that correspond to the child devices
* aren't delete because they are needed later to create the device links
* between the real consumer and supplier devices.
*/
static void __fw_devlink_link_to_suppliers(struct device *dev,
struct fwnode_handle *fwnode)
{
bool own_link = (dev->fwnode == fwnode);
struct fwnode_link *link, *tmp;
struct fwnode_handle *child = NULL;
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
int ret;
struct fwnode_handle *sup = link->supplier;
ret = fw_devlink_create_devlink(dev, sup, link);
if (!own_link || ret == -EAGAIN)
continue;
__fwnode_link_del(link);
}
/*
* Make "proxy" SYNC_STATE_ONLY device links to represent the needs of
* all the descendants. This proxy link step is needed to handle the
* case where the supplier is added before the consumer's parent device
* (@dev).
*/
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
__fw_devlink_link_to_suppliers(dev, child);
}
static void fw_devlink_link_device(struct device *dev)
{
struct fwnode_handle *fwnode = dev->fwnode;
if (!fw_devlink_flags) return;
fw_devlink_parse_fwtree(fwnode);
guard(mutex)(&fwnode_link_lock);
__fw_devlink_link_to_consumers(dev);
__fw_devlink_link_to_suppliers(dev, fwnode);
}
/* Device links support end. */
static struct kobject *dev_kobj;
/* /sys/dev/char */
static struct kobject *sysfs_dev_char_kobj;
/* /sys/dev/block */
static struct kobject *sysfs_dev_block_kobj;
static DEFINE_MUTEX(device_hotplug_lock);
void lock_device_hotplug(void)
{
mutex_lock(&device_hotplug_lock);
}
void unlock_device_hotplug(void)
{
mutex_unlock(&device_hotplug_lock);
}
int lock_device_hotplug_sysfs(void)
{
if (mutex_trylock(&device_hotplug_lock))
return 0;
/* Avoid busy looping (5 ms of sleep should do). */
msleep(5);
return restart_syscall();
}
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
return !(dev->type == &part_type);
}
#else
static inline int device_is_not_partition(struct device *dev)
{
return 1;
}
#endif
static void device_platform_notify(struct device *dev)
{
acpi_device_notify(dev);
software_node_notify(dev);
}
static void device_platform_notify_remove(struct device *dev)
{
software_node_notify_remove(dev);
acpi_device_notify_remove(dev);
}
/**
* dev_driver_string - Return a device's driver name, if at all possible
* @dev: struct device to get the name of
*
* Will return the device's driver's name if it is bound to a device. If
* the device is not bound to a driver, it will return the name of the bus
* it is attached to. If it is not attached to a bus either, an empty
* string will be returned.
*/
const char *dev_driver_string(const struct device *dev)
{
struct device_driver *drv;
/* dev->driver can change to NULL underneath us because of unbinding,
* so be careful about accessing it. dev->bus and dev->class should
* never change once they are set, so they don't need special care.
*/
drv = READ_ONCE(dev->driver);
return drv ? drv->name : dev_bus_name(dev);
}
EXPORT_SYMBOL(dev_driver_string);
#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct device_attribute *dev_attr = to_dev_attr(attr);
struct device *dev = kobj_to_dev(kobj);
ssize_t ret = -EIO;
if (dev_attr->show)
ret = dev_attr->show(dev, dev_attr, buf);
if (ret >= (ssize_t)PAGE_SIZE) {
printk("dev_attr_show: %pS returned bad count\n",
dev_attr->show);
}
return ret;
}
static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct device_attribute *dev_attr = to_dev_attr(attr);
struct device *dev = kobj_to_dev(kobj);
ssize_t ret = -EIO;
if (dev_attr->store)
ret = dev_attr->store(dev, dev_attr, buf, count);
return ret;
}
static const struct sysfs_ops dev_sysfs_ops = {
.show = dev_attr_show,
.store = dev_attr_store,
};
#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
ssize_t device_store_ulong(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
int ret;
unsigned long new;
ret = kstrtoul(buf, 0, &new);
if (ret)
return ret;
*(unsigned long *)(ea->var) = new;
/* Always return full write size even if we didn't consume all */
return size;
}
EXPORT_SYMBOL_GPL(device_store_ulong);
ssize_t device_show_ulong(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_ulong);
ssize_t device_store_int(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
int ret;
long new;
ret = kstrtol(buf, 0, &new);
if (ret)
return ret;
if (new > INT_MAX || new < INT_MIN)
return -EINVAL;
*(int *)(ea->var) = new;
/* Always return full write size even if we didn't consume all */
return size;
}
EXPORT_SYMBOL_GPL(device_store_int);
ssize_t device_show_int(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_int);
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
if (kstrtobool(buf, ea->var) < 0)
return -EINVAL;
return size;
}
EXPORT_SYMBOL_GPL(device_store_bool);
ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_bool);
ssize_t device_show_string(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%s\n", (char *)ea->var);
}
EXPORT_SYMBOL_GPL(device_show_string);
/**
* device_release - free device structure.
* @kobj: device's kobject.
*
* This is called once the reference count for the object
* reaches 0. We forward the call to the device's release
* method, which should handle actually freeing the structure.
*/
static void device_release(struct kobject *kobj)
{
struct device *dev = kobj_to_dev(kobj);
struct device_private *p = dev->p;
/*
* Some platform devices are driven without driver attached
* and managed resources may have been acquired. Make sure
* all resources are released.
*
* Drivers still can add resources into device after device
* is deleted but alive, so release devres here to avoid
* possible memory leak.
*/
devres_release_all(dev);
kfree(dev->dma_range_map);
if (dev->release)
dev->release(dev);
else if (dev->type && dev->type->release)
dev->type->release(dev);
else if (dev->class && dev->class->dev_release)
dev->class->dev_release(dev);
else
WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
dev_name(dev));
kfree(p);
}
static const void *device_namespace(const struct kobject *kobj)
{
const struct device *dev = kobj_to_dev(kobj);
const void *ns = NULL;
if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;}
static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
const struct device *dev = kobj_to_dev(kobj);
if (dev->class && dev->class->get_ownership)
dev->class->get_ownership(dev, uid, gid);
}
static const struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
.namespace = device_namespace,
.get_ownership = device_get_ownership,
};
static int dev_uevent_filter(const struct kobject *kobj)
{
const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &device_ktype) {
const struct device *dev = kobj_to_dev(kobj);
if (dev->bus)
return 1;
if (dev->class) return 1;
}
return 0;}
static const char *dev_uevent_name(const struct kobject *kobj)
{
const struct device *dev = kobj_to_dev(kobj);
if (dev->bus) return dev->bus->name; if (dev->class) return dev->class->name;
return NULL;
}
/*
* Try filling "DRIVER=<name>" uevent variable for a device. Because this
* function may race with binding and unbinding the device from a driver,
* we need to be careful. Binding is generally safe, at worst we miss the
* fact that the device is already bound to a driver (but the driver
* information that is delivered through uevents is best-effort, it may
* become obsolete as soon as it is generated anyways). Unbinding is more
* risky as driver pointer is transitioning to NULL, so READ_ONCE() should
* be used to make sure we are dealing with the same pointer, and to
* ensure that driver structure is not going to disappear from under us
* we take bus' drivers klist lock. The assumption that only registered
* driver can be bound to a device, and to unregister a driver bus code
* will take the same lock.
*/
static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
if (sp) {
scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
struct device_driver *drv = READ_ONCE(dev->driver);
if (drv)
add_uevent_var(env, "DRIVER=%s", drv->name);
}
subsys_put(sp);
}
}
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
int retval = 0;
/* add device node properties if present */
if (MAJOR(dev->devt)) {
const char *tmp;
const char *name;
umode_t mode = 0;
kuid_t uid = GLOBAL_ROOT_UID;
kgid_t gid = GLOBAL_ROOT_GID;
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
if (name) {
add_uevent_var(env, "DEVNAME=%s", name);
if (mode)
add_uevent_var(env, "DEVMODE=%#o", mode & 0777); if (!uid_eq(uid, GLOBAL_ROOT_UID)) add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); if (!gid_eq(gid, GLOBAL_ROOT_GID))
add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
kfree(tmp);
}
}
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
/* Add "DRIVER=%s" variable if the device is bound to a driver */
dev_driver_uevent(dev, env);
/* Add common DT information about the device */
of_device_uevent(dev, env);
/* have the bus specific function add its stuff */
if (dev->bus && dev->bus->uevent) {
retval = dev->bus->uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: bus uevent() returned %d\n",
dev_name(dev), __func__, retval);
}
/* have the class specific function add its stuff */
if (dev->class && dev->class->dev_uevent) {
retval = dev->class->dev_uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: class uevent() "
"returned %d\n", dev_name(dev),
__func__, retval);
}
/* have the device type specific function add its stuff */
if (dev->type && dev->type->uevent) {
retval = dev->type->uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: dev_type uevent() "
"returned %d\n", dev_name(dev),
__func__, retval);
}
return retval;}
static const struct kset_uevent_ops device_uevent_ops = {
.filter = dev_uevent_filter,
.name = dev_uevent_name,
.uevent = dev_uevent,
};
static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct kobject *top_kobj;
struct kset *kset;
struct kobj_uevent_env *env = NULL;
int i;
int len = 0;
int retval;
/* search the kset, the device belongs to */
top_kobj = &dev->kobj;
while (!top_kobj->kset && top_kobj->parent)
top_kobj = top_kobj->parent;
if (!top_kobj->kset)
goto out;
kset = top_kobj->kset;
if (!kset->uevent_ops || !kset->uevent_ops->uevent)
goto out;
/* respect filter */
if (kset->uevent_ops && kset->uevent_ops->filter)
if (!kset->uevent_ops->filter(&dev->kobj))
goto out;
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
if (retval)
goto out;
/* copy keys to file */
for (i = 0; i < env->envp_idx; i++)
len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
out:
kfree(env);
return len;
}
static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
rc = kobject_synth_uevent(&dev->kobj, buf, count);
if (rc) {
dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
return rc;
}
return count;
}
static DEVICE_ATTR_RW(uevent);
static ssize_t online_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
bool val;
device_lock(dev);
val = !dev->offline;
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
static ssize_t online_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
bool val;
int ret;
ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
ret = val ? device_online(dev) : device_offline(dev);
unlock_device_hotplug();
return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(online);
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const char *loc;
switch (dev->removable) {
case DEVICE_REMOVABLE:
loc = "removable";
break;
case DEVICE_FIXED:
loc = "fixed";
break;
default:
loc = "unknown";
}
return sysfs_emit(buf, "%s\n", loc);
}
static DEVICE_ATTR_RO(removable);
int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
}
EXPORT_SYMBOL_GPL(device_add_groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups)
{
sysfs_remove_groups(&dev->kobj, groups);
}
EXPORT_SYMBOL_GPL(device_remove_groups);
union device_attr_group_devres {
const struct attribute_group *group;
const struct attribute_group **groups;
};
static void devm_attr_group_remove(struct device *dev, void *res)
{
union device_attr_group_devres *devres = res;
const struct attribute_group *group = devres->group;
dev_dbg(dev, "%s: removing group %p\n", __func__, group);
sysfs_remove_group(&dev->kobj, group);
}
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
* @grp: The attribute group to create
*
* This function creates a group for the first time. It will explicitly
* warn and error if any of the attribute files being created already exist.
*
* Returns 0 on success or error code on failure.
*/
int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
{
union device_attr_group_devres *devres;
int error;
devres = devres_alloc(devm_attr_group_remove,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
error = sysfs_create_group(&dev->kobj, grp);
if (error) {
devres_free(devres);
return error;
}
devres->group = grp;
devres_add(dev, devres);
return 0;
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
const struct device_type *type = dev->type;
int error;
if (class) {
error = device_add_groups(dev, class->dev_groups);
if (error)
return error;
}
if (type) {
error = device_add_groups(dev, type->groups);
if (error)
goto err_remove_class_groups;
}
error = device_add_groups(dev, dev->groups); if (error)
goto err_remove_type_groups;
if (device_supports_offline(dev) && !dev->offline_disabled) {
error = device_create_file(dev, &dev_attr_online);
if (error)
goto err_remove_dev_groups;
}
if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
error = device_create_file(dev, &dev_attr_waiting_for_supplier);
if (error)
goto err_remove_dev_online;
}
if (dev_removable_is_valid(dev)) {
error = device_create_file(dev, &dev_attr_removable);
if (error)
goto err_remove_dev_waiting_for_supplier;
}
if (dev_add_physical_location(dev)) {
error = device_add_group(dev,
&dev_attr_physical_location_group);
if (error)
goto err_remove_dev_removable;
}
return 0;
err_remove_dev_removable:
device_remove_file(dev, &dev_attr_removable);
err_remove_dev_waiting_for_supplier:
device_remove_file(dev, &dev_attr_waiting_for_supplier);
err_remove_dev_online:
device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
if (type) device_remove_groups(dev, type->groups);
err_remove_class_groups:
if (class)
device_remove_groups(dev, class->dev_groups);
return error;
}
static void device_remove_attrs(struct device *dev)
{
const struct class *class = dev->class;
const struct device_type *type = dev->type;
if (dev->physical_location) {
device_remove_group(dev, &dev_attr_physical_location_group);
kfree(dev->physical_location);
}
device_remove_file(dev, &dev_attr_removable);
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
if (type)
device_remove_groups(dev, type->groups);
if (class)
device_remove_groups(dev, class->dev_groups);
}
static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return print_dev_t(buf, dev->devt);
}
static DEVICE_ATTR_RO(dev);
/* /sys/devices/ */
struct kset *devices_kset;
/**
* devices_kset_move_before - Move device in the devices_kset's list.
* @deva: Device to move.
* @devb: Device @deva should come before.
*/
static void devices_kset_move_before(struct device *deva, struct device *devb)
{
if (!devices_kset)
return;
pr_debug("devices_kset: Moving %s before %s\n",
dev_name(deva), dev_name(devb));
spin_lock(&devices_kset->list_lock);
list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
spin_unlock(&devices_kset->list_lock);
}
/**
* devices_kset_move_after - Move device in the devices_kset's list.
* @deva: Device to move
* @devb: Device @deva should come after.
*/
static void devices_kset_move_after(struct device *deva, struct device *devb)
{
if (!devices_kset)
return;
pr_debug("devices_kset: Moving %s after %s\n",
dev_name(deva), dev_name(devb));
spin_lock(&devices_kset->list_lock);
list_move(&deva->kobj.entry, &devb->kobj.entry);
spin_unlock(&devices_kset->list_lock);
}
/**
* devices_kset_move_last - move the device to the end of devices_kset's list.
* @dev: device to move
*/
void devices_kset_move_last(struct device *dev)
{
if (!devices_kset)
return;
pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
spin_lock(&devices_kset->list_lock);
list_move_tail(&dev->kobj.entry, &devices_kset->list);
spin_unlock(&devices_kset->list_lock);
}
/**
* device_create_file - create sysfs attribute file for device.
* @dev: device.
* @attr: device attribute descriptor.
*/
int device_create_file(struct device *dev,
const struct device_attribute *attr)
{
int error = 0; if (dev) { WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
"Attribute %s: write permission without 'store'\n",
attr->attr.name);
WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
"Attribute %s: read permission without 'show'\n",
attr->attr.name);
error = sysfs_create_file(&dev->kobj, &attr->attr);
}
return error;}
EXPORT_SYMBOL_GPL(device_create_file);
/**
* device_remove_file - remove sysfs attribute file.
* @dev: device.
* @attr: device attribute descriptor.
*/
void device_remove_file(struct device *dev,
const struct device_attribute *attr)
{
if (dev) sysfs_remove_file(&dev->kobj, &attr->attr);}
EXPORT_SYMBOL_GPL(device_remove_file);
/**
* device_remove_file_self - remove sysfs attribute file from its own method.
* @dev: device.
* @attr: device attribute descriptor.
*
* See kernfs_remove_self() for details.
*/
bool device_remove_file_self(struct device *dev,
const struct device_attribute *attr)
{
if (dev)
return sysfs_remove_file_self(&dev->kobj, &attr->attr);
else
return false;
}
EXPORT_SYMBOL_GPL(device_remove_file_self);
/**
* device_create_bin_file - create sysfs binary attribute file for device.
* @dev: device.
* @attr: device binary attribute descriptor.
*/
int device_create_bin_file(struct device *dev,
const struct bin_attribute *attr)
{
int error = -EINVAL;
if (dev)
error = sysfs_create_bin_file(&dev->kobj, attr);
return error;
}
EXPORT_SYMBOL_GPL(device_create_bin_file);
/**
* device_remove_bin_file - remove sysfs binary attribute file
* @dev: device.
* @attr: device binary attribute descriptor.
*/
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr)
{
if (dev)
sysfs_remove_bin_file(&dev->kobj, attr);
}
EXPORT_SYMBOL_GPL(device_remove_bin_file);
static void klist_children_get(struct klist_node *n)
{
struct device_private *p = to_device_private_parent(n);
struct device *dev = p->device;
get_device(dev);
}
static void klist_children_put(struct klist_node *n)
{
struct device_private *p = to_device_private_parent(n);
struct device *dev = p->device;
put_device(dev);
}
/**
* device_initialize - init device structure.
* @dev: device.
*
* This prepares the device for use by other layers by initializing
* its fields.
* It is the first half of device_register(), if called by
* that function, though it can also be called separately, so one
* may use @dev's fields. In particular, get_device()/put_device()
* may be used for reference counting of @dev after calling this
* function.
*
* All fields in @dev must be initialized by the caller to 0, except
* for those explicitly set to some other value. The simplest
* approach is to use kzalloc() to allocate the structure containing
* @dev.
*
* NOTE: Use put_device() to give up your reference instead of freeing
* @dev directly once you have called this function.
*/
void device_initialize(struct device *dev)
{
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, NUMA_NO_NODE);
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
dev->dma_coherent = dma_default_coherent;
#endif
swiotlb_dev_init(dev);
}
EXPORT_SYMBOL_GPL(device_initialize);
struct kobject *virtual_device_parent(void)
{
static struct kobject *virtual_dir = NULL;
if (!virtual_dir)
virtual_dir = kobject_create_and_add("virtual",
&devices_kset->kobj);
return virtual_dir;}
struct class_dir {
struct kobject kobj;
const struct class *class;
};
#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
static void class_dir_release(struct kobject *kobj)
{
struct class_dir *dir = to_class_dir(kobj);
kfree(dir);
}
static const
struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *kobj)
{
const struct class_dir *dir = to_class_dir(kobj);
return dir->class->ns_type;
}
static const struct kobj_type class_dir_ktype = {
.release = class_dir_release,
.sysfs_ops = &kobj_sysfs_ops,
.child_ns_type = class_dir_child_ns_type
};
static struct kobject *class_dir_create_and_add(struct subsys_private *sp,
struct kobject *parent_kobj)
{
struct class_dir *dir;
int retval;
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
return ERR_PTR(-ENOMEM);
dir->class = sp->class;
kobject_init(&dir->kobj, &class_dir_ktype);
dir->kobj.kset = &sp->glue_dirs;
retval = kobject_add(&dir->kobj, parent_kobj, "%s", sp->class->name);
if (retval < 0) {
kobject_put(&dir->kobj);
return ERR_PTR(retval);
}
return &dir->kobj;
}
static DEFINE_MUTEX(gdp_mutex);
static struct kobject *get_device_parent(struct device *dev,
struct device *parent)
{
struct subsys_private *sp = class_to_subsys(dev->class);
struct kobject *kobj = NULL;
if (sp) {
struct kobject *parent_kobj;
struct kobject *k;
/*
* If we have no parent, we live in "virtual".
* Class-devices with a non class-device as parent, live
* in a "glue" directory to prevent namespace collisions.
*/
if (parent == NULL) parent_kobj = virtual_device_parent(); else if (parent->class && !dev->class->ns_type) {
subsys_put(sp);
return &parent->kobj;
} else {
parent_kobj = &parent->kobj;
}
mutex_lock(&gdp_mutex);
/* find our class-directory at the parent and reference it */
spin_lock(&sp->glue_dirs.list_lock);
list_for_each_entry(k, &sp->glue_dirs.list, entry)
if (k->parent == parent_kobj) {
kobj = kobject_get(k);
break;
}
spin_unlock(&sp->glue_dirs.list_lock); if (kobj) {
mutex_unlock(&gdp_mutex);
subsys_put(sp);
return kobj;
}
/* or create a new class-directory at the parent device */
k = class_dir_create_and_add(sp, parent_kobj);
/* do not emit an uevent for this simple "glue" directory */
mutex_unlock(&gdp_mutex);
subsys_put(sp);
return k;
}
/* subsystems can specify a default root directory for their devices */
if (!parent && dev->bus) {
struct device *dev_root = bus_get_dev_root(dev->bus);
if (dev_root) {
kobj = &dev_root->kobj;
put_device(dev_root);
return kobj;
}
}
if (parent)
return &parent->kobj; return NULL;
}
static inline bool live_in_glue_dir(struct kobject *kobj,
struct device *dev)
{
struct subsys_private *sp;
bool retval;
if (!kobj || !dev->class)
return false;
sp = class_to_subsys(dev->class);
if (!sp)
return false;
if (kobj->kset == &sp->glue_dirs)
retval = true;
else
retval = false;
subsys_put(sp);
return retval;
}
static inline struct kobject *get_glue_dir(struct device *dev)
{
return dev->kobj.parent;
}
/**
* kobject_has_children - Returns whether a kobject has children.
* @kobj: the object to test
*
* This will return whether a kobject has other kobjects as children.
*
* It does NOT account for the presence of attribute files, only sub
* directories. It also assumes there is no concurrent addition or
* removal of such children, and thus relies on external locking.
*/
static inline bool kobject_has_children(struct kobject *kobj)
{
WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
return kobj->sd && kobj->sd->dir.subdirs;
}
/*
* make sure cleaning up dir as the last step, we need to make
* sure .release handler of kobject is run with holding the
* global lock
*/
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
unsigned int ref;
/* see if we live in a "glue" directory */
if (!live_in_glue_dir(glue_dir, dev))
return;
mutex_lock(&gdp_mutex);
/**
* There is a race condition between removing glue directory
* and adding a new device under the glue directory.
*
* CPU1: CPU2:
*
* device_add()
* get_device_parent()
* class_dir_create_and_add()
* kobject_add_internal()
* create_dir() // create glue_dir
*
* device_add()
* get_device_parent()
* kobject_get() // get glue_dir
*
* device_del()
* cleanup_glue_dir()
* kobject_del(glue_dir)
*
* kobject_add()
* kobject_add_internal()
* create_dir() // in glue_dir
* sysfs_create_dir_ns()
* kernfs_create_dir_ns(sd)
*
* sysfs_remove_dir() // glue_dir->sd=NULL
* sysfs_put() // free glue_dir->sd
*
* // sd is freed
* kernfs_new_node(sd)
* kernfs_get(glue_dir)
* kernfs_add_one()
* kernfs_put()
*
* Before CPU1 remove last child device under glue dir, if CPU2 add
* a new device under glue dir, the glue_dir kobject reference count
* will be increase to 2 in kobject_get(k). And CPU2 has been called
* kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
* and sysfs_put(). This result in glue_dir->sd is freed.
*
* Then the CPU2 will see a stale "empty" but still potentially used
* glue dir around in kernfs_new_node().
*
* In order to avoid this happening, we also should make sure that
* kernfs_node for glue_dir is released in CPU1 only when refcount
* for glue_dir kobj is 1.
*/
ref = kref_read(&glue_dir->kref);
if (!kobject_has_children(glue_dir) && !--ref)
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}
static int device_add_class_symlinks(struct device *dev)
{
struct device_node *of_node = dev_of_node(dev);
struct subsys_private *sp;
int error;
if (of_node) {
error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
if (error)
dev_warn(dev, "Error %d creating of_node link\n",error);
/* An error here doesn't warrant bringing down the device */
}
sp = class_to_subsys(dev->class);
if (!sp)
return 0;
error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
if (error)
goto out_devnode; if (dev->parent && device_is_not_partition(dev)) {
error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
"device");
if (error)
goto out_subsys;
}
/* link in the class directory pointing to the device */
error = sysfs_create_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
if (error)
goto out_device;
goto exit;
out_device:
sysfs_remove_link(&dev->kobj, "device");
out_subsys:
sysfs_remove_link(&dev->kobj, "subsystem");
out_devnode:
sysfs_remove_link(&dev->kobj, "of_node");
exit:
subsys_put(sp);
return error;
}
static void device_remove_class_symlinks(struct device *dev)
{
struct subsys_private *sp = class_to_subsys(dev->class);
if (dev_of_node(dev))
sysfs_remove_link(&dev->kobj, "of_node");
if (!sp)
return;
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(&dev->kobj, "subsystem");
sysfs_delete_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
subsys_put(sp);
}
/**
* dev_set_name - set a device name
* @dev: device
* @fmt: format string for the device's name
*/
int dev_set_name(struct device *dev, const char *fmt, ...)
{
va_list vargs;
int err;
va_start(vargs, fmt);
err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
va_end(vargs);
return err;
}
EXPORT_SYMBOL_GPL(dev_set_name);
/* select a /sys/dev/ directory for the device */
static struct kobject *device_to_dev_kobj(struct device *dev)
{
if (is_blockdev(dev))
return sysfs_dev_block_kobj;
else
return sysfs_dev_char_kobj;
}
static int device_create_sys_dev_entry(struct device *dev)
{
struct kobject *kobj = device_to_dev_kobj(dev);
int error = 0;
char devt_str[15];
if (kobj) {
format_dev_t(devt_str, dev->devt);
error = sysfs_create_link(kobj, &dev->kobj, devt_str);
}
return error;
}
static void device_remove_sys_dev_entry(struct device *dev)
{
struct kobject *kobj = device_to_dev_kobj(dev);
char devt_str[15];
if (kobj) {
format_dev_t(devt_str, dev->devt);
sysfs_remove_link(kobj, devt_str);
}
}
static int device_private_init(struct device *dev)
{
dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
if (!dev->p)
return -ENOMEM;
dev->p->device = dev;
klist_init(&dev->p->klist_children, klist_children_get,
klist_children_put);
INIT_LIST_HEAD(&dev->p->deferred_probe);
return 0;
}
/**
* device_add - add device to device hierarchy.
* @dev: device.
*
* This is part 2 of device_register(), though may be called
* separately _iff_ device_initialize() has been called separately.
*
* This adds @dev to the kobject hierarchy via kobject_add(), adds it
* to the global and sibling lists for the device, then
* adds it to the other relevant subsystems of the driver model.
*
* Do not call this routine or device_register() more than once for
* any device structure. The driver model core is not designed to work
* with devices that get unregistered and then spring back to life.
* (Among other things, it's very hard to guarantee that all references
* to the previous incarnation of @dev have been dropped.) Allocate
* and register a fresh new struct device instead.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up your
* reference instead.
*
* Rule of thumb is: if device_add() succeeds, you should call
* device_del() when you want to get rid of it. If device_add() has
* *not* succeeded, use *only* put_device() to drop the reference
* count.
*/
int device_add(struct device *dev)
{
struct subsys_private *sp;
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
int error = -EINVAL;
struct kobject *glue_dir = NULL;
dev = get_device(dev);
if (!dev)
goto done;
if (!dev->p) { error = device_private_init(dev);
if (error)
goto done;
}
/*
* for statically allocated devices, which should all be converted
* some day, we need to initialize the name. We prevent reading back
* the name, and force the use of dev_name()
*/
if (dev->init_name) {
error = dev_set_name(dev, "%s", dev->init_name);
dev->init_name = NULL;
}
if (dev_name(dev))
error = 0;
/* subsystems can specify simple device enumeration */
else if (dev->bus && dev->bus->dev_name)
error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
else
error = -EINVAL; if (error)
goto name_error;
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
parent = get_device(dev->parent); kobj = get_device_parent(dev, parent);
if (IS_ERR(kobj)) {
error = PTR_ERR(kobj); goto parent_error;
}
if (kobj) dev->kobj.parent = kobj;
/* use parent numa_node */
if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) set_dev_node(dev, dev_to_node(parent));
/* first, register with generic layer. */
/* we require the name to be set before, and pass NULL */
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
if (error) {
glue_dir = kobj;
goto Error;
}
/* notify platform of device entry */
device_platform_notify(dev);
error = device_create_file(dev, &dev_attr_uevent);
if (error)
goto attrError; error = device_add_class_symlinks(dev); if (error) goto SymlinkError; error = device_add_attrs(dev); if (error) goto AttrsError;
error = bus_add_device(dev);
if (error)
goto BusError;
error = dpm_sysfs_add(dev);
if (error)
goto DPMError;
device_pm_add(dev);
if (MAJOR(dev->devt)) {
error = device_create_file(dev, &dev_attr_dev);
if (error)
goto DevAttrError; error = device_create_sys_dev_entry(dev); if (error)
goto SysEntryError;
devtmpfs_create_node(dev);
}
/* Notify clients of device addition. This call must come
* after dpm_sysfs_add() and before kobject_uevent().
*/
bus_notify(dev, BUS_NOTIFY_ADD_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_ADD);
/*
* Check if any of the other devices (consumers) have been waiting for
* this device (supplier) to be added so that they can create a device
* link to it.
*
* This needs to happen after device_pm_add() because device_link_add()
* requires the supplier be registered before it's called.
*
* But this also needs to happen before bus_probe_device() to make sure
* waiting consumers can link to it before the driver is bound to the
* device and the driver sync_state callback is called for this device.
*/
if (dev->fwnode && !dev->fwnode->dev) {
dev->fwnode->dev = dev;
fw_devlink_link_device(dev);
}
bus_probe_device(dev);
/*
* If all driver registration is done and a newly added device doesn't
* match with any driver, don't block its consumers from probing in
* case the consumer device is able to operate without this supplier.
*/
if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match) fw_devlink_unblock_consumers(dev); if (parent)
klist_add_tail(&dev->p->knode_parent,
&parent->p->klist_children);
sp = class_to_subsys(dev->class); if (sp) {
mutex_lock(&sp->mutex);
/* tie the class to the device */
klist_add_tail(&dev->p->knode_class, &sp->klist_devices);
/* notify any interfaces that the device is here */
list_for_each_entry(class_intf, &sp->interfaces, node) if (class_intf->add_dev)
class_intf->add_dev(dev);
mutex_unlock(&sp->mutex);
subsys_put(sp);
}
done:
put_device(dev); return error;
SysEntryError:
if (MAJOR(dev->devt))
device_remove_file(dev, &dev_attr_dev);
DevAttrError:
device_pm_remove(dev); dpm_sysfs_remove(dev);
DPMError:
device_set_driver(dev, NULL);
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
AttrsError:
device_remove_class_symlinks(dev);
SymlinkError:
device_remove_file(dev, &dev_attr_uevent);
attrError:
device_platform_notify_remove(dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
Error:
cleanup_glue_dir(dev, glue_dir);
parent_error:
put_device(parent);
name_error:
kfree(dev->p);
dev->p = NULL;
goto done;
}
EXPORT_SYMBOL_GPL(device_add);
/**
* device_register - register a device with the system.
* @dev: pointer to the device structure
*
* This happens in two clean steps - initialize the device
* and add it to the system. The two steps can be called
* separately, but this is the easiest and most common.
* I.e. you should only call the two helpers separately if
* have a clearly defined need to use and refcount the device
* before it is added to the hierarchy.
*
* For more information, see the kerneldoc for device_initialize()
* and device_add().
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up the
* reference initialized in this function instead.
*/
int device_register(struct device *dev)
{
device_initialize(dev);
return device_add(dev);
}
EXPORT_SYMBOL_GPL(device_register);
/**
* get_device - increment reference count for device.
* @dev: device.
*
* This simply forwards the call to kobject_get(), though
* we do take care to provide for the case that we get a NULL
* pointer passed in.
*/
struct device *get_device(struct device *dev)
{
return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;}
EXPORT_SYMBOL_GPL(get_device);
/**
* put_device - decrement reference count.
* @dev: device in question.
*/
void put_device(struct device *dev)
{
/* might_sleep(); */
if (dev) kobject_put(&dev->kobj);
}
EXPORT_SYMBOL_GPL(put_device);
bool kill_device(struct device *dev)
{
/*
* Require the device lock and set the "dead" flag to guarantee that
* the update behavior is consistent with the other bitfields near
* it and that we cannot have an asynchronous probe routine trying
* to run while we are tearing out the bus/class/sysfs from
* underneath the device.
*/
device_lock_assert(dev);
if (dev->p->dead)
return false;
dev->p->dead = true;
return true;
}
EXPORT_SYMBOL_GPL(kill_device);
/**
* device_del - delete device from system.
* @dev: device.
*
* This is the first part of the device unregistration
* sequence. This removes the device from the lists we control
* from here, has it removed from the other driver model
* subsystems it was added to in device_add(), and removes it
* from the kobject hierarchy.
*
* NOTE: this should be called manually _iff_ device_add() was
* also called manually.
*/
void device_del(struct device *dev)
{
struct subsys_private *sp;
struct device *parent = dev->parent;
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
unsigned int noio_flag;
device_lock(dev);
kill_device(dev);
device_unlock(dev);
if (dev->fwnode && dev->fwnode->dev == dev)
dev->fwnode->dev = NULL;
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
noio_flag = memalloc_noio_save();
bus_notify(dev, BUS_NOTIFY_DEL_DEVICE);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
if (MAJOR(dev->devt)) {
devtmpfs_delete_node(dev);
device_remove_sys_dev_entry(dev);
device_remove_file(dev, &dev_attr_dev);
}
sp = class_to_subsys(dev->class);
if (sp) {
device_remove_class_symlinks(dev);
mutex_lock(&sp->mutex);
/* notify any interfaces that the device is now gone */
list_for_each_entry(class_intf, &sp->interfaces, node)
if (class_intf->remove_dev)
class_intf->remove_dev(dev);
/* remove the device from the class list */
klist_del(&dev->p->knode_class);
mutex_unlock(&sp->mutex);
subsys_put(sp);
}
device_remove_file(dev, &dev_attr_uevent);
device_remove_attrs(dev);
bus_remove_device(dev);
device_pm_remove(dev);
driver_deferred_probe_del(dev);
device_platform_notify_remove(dev);
device_links_purge(dev);
/*
* If a device does not have a driver attached, we need to clean
* up any managed resources. We do this in device_release(), but
* it's never called (and we leak the device) if a managed
* resource holds a reference to the device. So release all
* managed resources here, like we do in driver_detach(). We
* still need to do so again in device_release() in case someone
* adds a new resource after this point, though.
*/
devres_release_all(dev);
bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
cleanup_glue_dir(dev, glue_dir);
memalloc_noio_restore(noio_flag);
put_device(parent);
}
EXPORT_SYMBOL_GPL(device_del);
/**
* device_unregister - unregister device from system.
* @dev: device going away.
*
* We do this in two parts, like we do device_register(). First,
* we remove it from all the subsystems with device_del(), then
* we decrement the reference count via put_device(). If that
* is the final reference count, the device will be cleaned up
* via device_release() above. Otherwise, the structure will
* stick around until the final reference to the device is dropped.
*/
void device_unregister(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
device_del(dev);
put_device(dev);
}
EXPORT_SYMBOL_GPL(device_unregister);
static struct device *prev_device(struct klist_iter *i)
{
struct klist_node *n = klist_prev(i);
struct device *dev = NULL;
struct device_private *p;
if (n) {
p = to_device_private_parent(n);
dev = p->device;
}
return dev;
}
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
struct device *dev = NULL;
struct device_private *p;
if (n) {
p = to_device_private_parent(n);
dev = p->device;
}
return dev;
}
/**
* device_get_devnode - path of device node file
* @dev: device
* @mode: returned file access mode
* @uid: returned file owner
* @gid: returned file group
* @tmp: possibly allocated string
*
* Return the relative path of a possible device node.
* Non-default names may need to allocate a memory to compose
* a name. This memory is returned in tmp and needs to be
* freed by the caller.
*/
const char *device_get_devnode(const struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid,
const char **tmp)
{
char *s;
*tmp = NULL;
/* the device type may provide a specific name */
if (dev->type && dev->type->devnode)
*tmp = dev->type->devnode(dev, mode, uid, gid);
if (*tmp)
return *tmp;
/* the class may provide a specific name */
if (dev->class && dev->class->devnode)
*tmp = dev->class->devnode(dev, mode);
if (*tmp)
return *tmp;
/* return name without allocation, tmp == NULL */
if (strchr(dev_name(dev), '!') == NULL)
return dev_name(dev);
/* replace '!' in the name with '/' */
s = kstrdup_and_replace(dev_name(dev), '!', '/', GFP_KERNEL);
if (!s)
return NULL;
return *tmp = s;
}
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
while (!error && (child = next_device(&i)))
error = fn(child, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
while ((child = prev_device(&i)) && !error)
error = fn(child, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
* device_for_each_child_reverse_from - device child iterator in reversed order.
* @parent: parent struct device.
* @from: optional starting point in child list
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @parent's child devices, starting at @from, and call @fn
* for each, passing it @data. This helper is identical to
* device_for_each_child_reverse() when @from is NULL.
*
* @fn is checked each iteration. If it returns anything other than 0,
* iteration stop and that value is returned to the caller of
* device_for_each_child_reverse_from();
*/
int device_for_each_child_reverse_from(struct device *parent,
struct device *from, void *data,
device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
if (!parent || !parent->p)
return 0;
klist_iter_init_node(&parent->p->klist_children, &i,
(from ? &from->p->knode_parent : NULL));
while ((child = prev_device(&i)) && !error)
error = fn(child, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
* @data: Data to pass to match function
* @match: Callback function to check device
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
* determined by the @match callback.
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero and a reference to the
* current device can be obtained, this function will return to the caller
* and not iterate over any more devices.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
struct device *device_find_child(struct device *parent, const void *data,
device_match_t match)
{
struct klist_iter i;
struct device *child;
if (!parent || !parent->p)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
while ((child = next_device(&i))) {
if (match(child, data)) {
get_device(child);
break;
}
}
klist_iter_exit(&i);
return child;
}
EXPORT_SYMBOL_GPL(device_find_child);
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
if (!devices_kset)
return -ENOMEM;
dev_kobj = kobject_create_and_add("dev", NULL);
if (!dev_kobj)
goto dev_kobj_err;
sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
if (!sysfs_dev_block_kobj)
goto block_kobj_err;
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
if (!device_link_wq)
goto wq_err;
return 0;
wq_err:
kobject_put(sysfs_dev_char_kobj);
char_kobj_err:
kobject_put(sysfs_dev_block_kobj);
block_kobj_err:
kobject_put(dev_kobj);
dev_kobj_err:
kset_unregister(devices_kset);
return -ENOMEM;
}
static int device_check_offline(struct device *dev, void *not_used)
{
int ret;
ret = device_for_each_child(dev, NULL, device_check_offline);
if (ret)
return ret;
return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
}
/**
* device_offline - Prepare the device for hot-removal.
* @dev: Device to be put offline.
*
* Execute the device bus type's .offline() callback, if present, to prepare
* the device for a subsequent hot-removal. If that succeeds, the device must
* not be used until either it is removed or its bus type's .online() callback
* is executed.
*
* Call under device_hotplug_lock.
*/
int device_offline(struct device *dev)
{
int ret;
if (dev->offline_disabled)
return -EPERM;
ret = device_for_each_child(dev, NULL, device_check_offline);
if (ret)
return ret;
device_lock(dev);
if (device_supports_offline(dev)) {
if (dev->offline) {
ret = 1;
} else {
ret = dev->bus->offline(dev);
if (!ret) {
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
dev->offline = true;
}
}
}
device_unlock(dev);
return ret;
}
/**
* device_online - Put the device back online after successful device_offline().
* @dev: Device to be put back online.
*
* If device_offline() has been successfully executed for @dev, but the device
* has not been removed subsequently, execute its bus type's .online() callback
* to indicate that the device can be used again.
*
* Call under device_hotplug_lock.
*/
int device_online(struct device *dev)
{
int ret = 0;
device_lock(dev);
if (device_supports_offline(dev)) {
if (dev->offline) {
ret = dev->bus->online(dev);
if (!ret) {
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
dev->offline = false;
}
} else {
ret = 1;
}
}
device_unlock(dev);
return ret;
}
struct root_device {
struct device dev;
struct module *owner;
};
static inline struct root_device *to_root_device(struct device *d)
{
return container_of(d, struct root_device, dev);
}
static void root_device_release(struct device *dev)
{
kfree(to_root_device(dev));
}
/**
* __root_device_register - allocate and register a root device
* @name: root device name
* @owner: owner module of the root device, usually THIS_MODULE
*
* This function allocates a root device and registers it
* using device_register(). In order to free the returned
* device, use root_device_unregister().
*
* Root devices are dummy devices which allow other devices
* to be grouped under /sys/devices. Use this function to
* allocate a root device and then use it as the parent of
* any device which should appear under /sys/devices/{name}
*
* The /sys/devices/{name} directory will also contain a
* 'module' symlink which points to the @owner directory
* in sysfs.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
*
* Note: You probably want to use root_device_register().
*/
struct device *__root_device_register(const char *name, struct module *owner)
{
struct root_device *root;
int err = -ENOMEM;
root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
if (!root)
return ERR_PTR(err);
err = dev_set_name(&root->dev, "%s", name);
if (err) {
kfree(root);
return ERR_PTR(err);
}
root->dev.release = root_device_release;
err = device_register(&root->dev);
if (err) {
put_device(&root->dev);
return ERR_PTR(err);
}
#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
if (owner) {
struct module_kobject *mk = &owner->mkobj;
err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
if (err) {
device_unregister(&root->dev);
return ERR_PTR(err);
}
root->owner = owner;
}
#endif
return &root->dev;
}
EXPORT_SYMBOL_GPL(__root_device_register);
/**
* root_device_unregister - unregister and free a root device
* @dev: device going away
*
* This function unregisters and cleans up a device that was created by
* root_device_register().
*/
void root_device_unregister(struct device *dev)
{
struct root_device *root = to_root_device(dev);
if (root->owner)
sysfs_remove_link(&root->dev.kobj, "module");
device_unregister(dev);
}
EXPORT_SYMBOL_GPL(root_device_unregister);
static void device_create_release(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
kfree(dev);
}
static __printf(6, 0) struct device *
device_create_groups_vargs(const struct class *class, struct device *parent,
dev_t devt, void *drvdata,
const struct attribute_group **groups,
const char *fmt, va_list args)
{
struct device *dev = NULL;
int retval = -ENODEV;
if (IS_ERR_OR_NULL(class))
goto error;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
retval = -ENOMEM;
goto error;
}
device_initialize(dev);
dev->devt = devt;
dev->class = class;
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
if (retval)
goto error;
retval = device_add(dev);
if (retval)
goto error;
return dev;
error:
put_device(dev);
return ERR_PTR(retval);
}
/**
* device_create - creates a device and registers it with sysfs
* @class: pointer to the struct class that this device should be registered to
* @parent: pointer to the parent struct device of this new device, if any
* @devt: the dev_t for the char device to be added
* @drvdata: the data to be added to the device for callbacks
* @fmt: string for the device's name
*
* This function can be used by char device classes. A struct device
* will be created in sysfs, registered to the specified class.
*
* A "dev" file will be created, showing the dev_t for the device, if
* the dev_t is not 0,0.
* If a pointer to a parent struct device is passed in, the newly created
* struct device will be a child of that device in sysfs.
* The pointer to the struct device will be returned from the call.
* Any further sysfs files that might be required can be created using this
* pointer.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
*/
struct device *device_create(const struct class *class, struct device *parent,
dev_t devt, void *drvdata, const char *fmt, ...)
{
va_list vargs;
struct device *dev;
va_start(vargs, fmt);
dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
fmt, vargs);
va_end(vargs);
return dev;
}
EXPORT_SYMBOL_GPL(device_create);
/**
* device_create_with_groups - creates a device and registers it with sysfs
* @class: pointer to the struct class that this device should be registered to
* @parent: pointer to the parent struct device of this new device, if any
* @devt: the dev_t for the char device to be added
* @drvdata: the data to be added to the device for callbacks
* @groups: NULL-terminated list of attribute groups to be created
* @fmt: string for the device's name
*
* This function can be used by char device classes. A struct device
* will be created in sysfs, registered to the specified class.
* Additional attributes specified in the groups parameter will also
* be created automatically.
*
* A "dev" file will be created, showing the dev_t for the device, if
* the dev_t is not 0,0.
* If a pointer to a parent struct device is passed in, the newly created
* struct device will be a child of that device in sysfs.
* The pointer to the struct device will be returned from the call.
* Any further sysfs files that might be required can be created using this
* pointer.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
*/
struct device *device_create_with_groups(const struct class *class,
struct device *parent, dev_t devt,
void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...)
{
va_list vargs;
struct device *dev;
va_start(vargs, fmt);
dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
fmt, vargs);
va_end(vargs);
return dev;
}
EXPORT_SYMBOL_GPL(device_create_with_groups);
/**
* device_destroy - removes a device that was created with device_create()
* @class: pointer to the struct class that this device was registered with
* @devt: the dev_t of the device that was previously registered
*
* This call unregisters and cleans up a device that was created with a
* call to device_create().
*/
void device_destroy(const struct class *class, dev_t devt)
{
struct device *dev;
dev = class_find_device_by_devt(class, devt);
if (dev) {
put_device(dev);
device_unregister(dev);
}
}
EXPORT_SYMBOL_GPL(device_destroy);
/**
* device_rename - renames a device
* @dev: the pointer to the struct device to be renamed
* @new_name: the new name of the device
*
* It is the responsibility of the caller to provide mutual
* exclusion between two different calls of device_rename
* on the same device to ensure that new_name is valid and
* won't conflict with other devices.
*
* Note: given that some subsystems (networking and infiniband) use this
* function, with no immediate plans for this to change, we cannot assume or
* require that this function not be called at all.
*
* However, if you're writing new code, do not call this function. The following
* text from Kay Sievers offers some insight:
*
* Renaming devices is racy at many levels, symlinks and other stuff are not
* replaced atomically, and you get a "move" uevent, but it's not easy to
* connect the event to the old and new device. Device nodes are not renamed at
* all, there isn't even support for that in the kernel now.
*
* In the meantime, during renaming, your target name might be taken by another
* driver, creating conflicts. Or the old name is taken directly after you
* renamed it -- then you get events for the same DEVPATH, before you even see
* the "move" event. It's just a mess, and nothing new should ever rely on
* kernel device renaming. Besides that, it's not even implemented now for
* other things than (driver-core wise very simple) network devices.
*
* Make up a "real" name in the driver before you register anything, or add
* some other attributes for userspace to find the device, or use udev to add
* symlinks -- but never rename kernel devices later, it's a complete mess. We
* don't even want to get into that and try to implement the missing pieces in
* the core. We really have other pieces to fix in the driver core mess. :)
*/
int device_rename(struct device *dev, const char *new_name)
{
struct subsys_private *sp = NULL;
struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
bool is_link_renamed = false;
dev = get_device(dev);
if (!dev)
return -EINVAL;
dev_dbg(dev, "renaming to %s\n", new_name);
old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!old_device_name) {
error = -ENOMEM;
goto out;
}
if (dev->class) {
sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
goto out;
}
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj));
if (error)
goto out;
is_link_renamed = true;
}
error = kobject_rename(kobj, new_name);
out:
if (error && is_link_renamed)
sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
old_device_name, kobject_namespace(kobj));
subsys_put(sp);
put_device(dev);
kfree(old_device_name);
return error;
}
EXPORT_SYMBOL_GPL(device_rename);
static int device_move_class_links(struct device *dev,
struct device *old_parent,
struct device *new_parent)
{
int error = 0;
if (old_parent)
sysfs_remove_link(&dev->kobj, "device");
if (new_parent)
error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
"device");
return error;
}
/**
* device_move - moves a device to a new parent
* @dev: the pointer to the struct device to be moved
* @new_parent: the new parent of the device (can be NULL)
* @dpm_order: how to reorder the dpm_list
*/
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order)
{
int error;
struct device *old_parent;
struct kobject *new_parent_kobj;
dev = get_device(dev);
if (!dev)
return -EINVAL;
device_pm_lock();
new_parent = get_device(new_parent);
new_parent_kobj = get_device_parent(dev, new_parent);
if (IS_ERR(new_parent_kobj)) {
error = PTR_ERR(new_parent_kobj);
put_device(new_parent);
goto out;
}
pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
__func__, new_parent ? dev_name(new_parent) : "<NULL>");
error = kobject_move(&dev->kobj, new_parent_kobj);
if (error) {
cleanup_glue_dir(dev, new_parent_kobj);
put_device(new_parent);
goto out;
}
old_parent = dev->parent;
dev->parent = new_parent;
if (old_parent)
klist_remove(&dev->p->knode_parent);
if (new_parent) {
klist_add_tail(&dev->p->knode_parent,
&new_parent->p->klist_children);
set_dev_node(dev, dev_to_node(new_parent));
}
if (dev->class) {
error = device_move_class_links(dev, old_parent, new_parent);
if (error) {
/* We ignore errors on cleanup since we're hosed anyway... */
device_move_class_links(dev, new_parent, old_parent);
if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
if (new_parent)
klist_remove(&dev->p->knode_parent);
dev->parent = old_parent;
if (old_parent) {
klist_add_tail(&dev->p->knode_parent,
&old_parent->p->klist_children);
set_dev_node(dev, dev_to_node(old_parent));
}
}
cleanup_glue_dir(dev, new_parent_kobj);
put_device(new_parent);
goto out;
}
}
switch (dpm_order) {
case DPM_ORDER_NONE:
break;
case DPM_ORDER_DEV_AFTER_PARENT:
device_pm_move_after(dev, new_parent);
devices_kset_move_after(dev, new_parent);
break;
case DPM_ORDER_PARENT_BEFORE_DEV:
device_pm_move_before(new_parent, dev);
devices_kset_move_before(new_parent, dev);
break;
case DPM_ORDER_DEV_LAST:
device_pm_move_last(dev);
devices_kset_move_last(dev);
break;
}
put_device(old_parent);
out:
device_pm_unlock();
put_device(dev);
return error;
}
EXPORT_SYMBOL_GPL(device_move);
static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
{
struct kobject *kobj = &dev->kobj;
const struct class *class = dev->class;
const struct device_type *type = dev->type;
int error;
if (class) {
/*
* Change the device groups of the device class for @dev to
* @kuid/@kgid.
*/
error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
kgid);
if (error)
return error;
}
if (type) {
/*
* Change the device groups of the device type for @dev to
* @kuid/@kgid.
*/
error = sysfs_groups_change_owner(kobj, type->groups, kuid,
kgid);
if (error)
return error;
}
/* Change the device groups of @dev to @kuid/@kgid. */
error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
if (error)
return error;
if (device_supports_offline(dev) && !dev->offline_disabled) {
/* Change online device attributes of @dev to @kuid/@kgid. */
error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
kuid, kgid);
if (error)
return error;
}
return 0;
}
/**
* device_change_owner - change the owner of an existing device.
* @dev: device.
* @kuid: new owner's kuid
* @kgid: new owner's kgid
*
* This changes the owner of @dev and its corresponding sysfs entries to
* @kuid/@kgid. This function closely mirrors how @dev was added via driver
* core.
*
* Returns 0 on success or error code on failure.
*/
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
{
int error;
struct kobject *kobj = &dev->kobj;
struct subsys_private *sp;
dev = get_device(dev);
if (!dev)
return -EINVAL;
/*
* Change the kobject and the default attributes and groups of the
* ktype associated with it to @kuid/@kgid.
*/
error = sysfs_change_owner(kobj, kuid, kgid);
if (error)
goto out;
/*
* Change the uevent file for @dev to the new owner. The uevent file
* was created in a separate step when @dev got added and we mirror
* that step here.
*/
error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
kgid);
if (error)
goto out;
/*
* Change the device groups, the device groups associated with the
* device class, and the groups associated with the device type of @dev
* to @kuid/@kgid.
*/
error = device_attrs_change_owner(dev, kuid, kgid);
if (error)
goto out;
error = dpm_sysfs_change_owner(dev, kuid, kgid);
if (error)
goto out;
/*
* Change the owner of the symlink located in the class directory of
* the device class associated with @dev which points to the actual
* directory entry for @dev to @kuid/@kgid. This ensures that the
* symlink shows the same permissions as its target.
*/
sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
goto out;
}
error = sysfs_link_change_owner(&sp->subsys.kobj, &dev->kobj, dev_name(dev), kuid, kgid);
subsys_put(sp);
out:
put_device(dev);
return error;
}
EXPORT_SYMBOL_GPL(device_change_owner);
/**
* device_shutdown - call ->shutdown() on each device to shutdown.
*/
void device_shutdown(void)
{
struct device *dev, *parent;
wait_for_device_probe();
device_block_probing();
cpufreq_suspend();
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
* Beware that device unplug events may also start pulling
* devices offline, even as the system is shutting down.
*/
while (!list_empty(&devices_kset->list)) {
dev = list_entry(devices_kset->list.prev, struct device,
kobj.entry);
/*
* hold reference count of device's parent to
* prevent it from being freed because parent's
* lock is to be held
*/
parent = get_device(dev->parent);
get_device(dev);
/*
* Make sure the device is off the kset list, in the
* event that dev->*->shutdown() doesn't remove it.
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
/* hold lock to avoid race with probe/release */
if (parent)
device_lock(parent);
device_lock(dev);
/* Don't allow any more runtime suspends */
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
if (dev->class && dev->class->shutdown_pre) {
if (initcall_debug)
dev_info(dev, "shutdown_pre\n");
dev->class->shutdown_pre(dev);
}
if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
} else if (dev->driver && dev->driver->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
device_unlock(dev);
if (parent)
device_unlock(parent);
put_device(dev);
put_device(parent);
spin_lock(&devices_kset->list_lock);
}
spin_unlock(&devices_kset->list_lock);
}
/*
* Device logging functions
*/
#ifdef CONFIG_PRINTK
static void
set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
{
const char *subsys;
memset(dev_info, 0, sizeof(*dev_info));
if (dev->class)
subsys = dev->class->name;
else if (dev->bus)
subsys = dev->bus->name;
else
return;
strscpy(dev_info->subsystem, subsys);
/*
* Add device identifier DEVICE=:
* b12:8 block dev_t
* c127:3 char dev_t
* n8 netdev ifindex
* +sound:card0 subsystem:devname
*/
if (MAJOR(dev->devt)) {
char c;
if (strcmp(subsys, "block") == 0)
c = 'b';
else
c = 'c';
snprintf(dev_info->device, sizeof(dev_info->device),
"%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
} else if (strcmp(subsys, "net") == 0) {
struct net_device *net = to_net_dev(dev);
snprintf(dev_info->device, sizeof(dev_info->device),
"n%u", net->ifindex);
} else {
snprintf(dev_info->device, sizeof(dev_info->device),
"+%s:%s", subsys, dev_name(dev));
}
}
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
{
struct dev_printk_info dev_info;
set_dev_info(dev, &dev_info);
return vprintk_emit(0, level, &dev_info, fmt, args);
}
EXPORT_SYMBOL(dev_vprintk_emit);
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = dev_vprintk_emit(level, dev, fmt, args);
va_end(args);
return r;
}
EXPORT_SYMBOL(dev_printk_emit);
static void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{
if (dev)
dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
dev_driver_string(dev), dev_name(dev), vaf);
else
printk("%s(NULL device *): %pV", level, vaf);
}
void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
__dev_printk(level, dev, &vaf);
va_end(args);
}
EXPORT_SYMBOL(_dev_printk);
#define define_dev_printk_level(func, kern_level) \
void func(const struct device *dev, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
__dev_printk(kern_level, dev, &vaf); \
\
va_end(args); \
} \
EXPORT_SYMBOL(func);
define_dev_printk_level(_dev_emerg, KERN_EMERG);
define_dev_printk_level(_dev_alert, KERN_ALERT);
define_dev_printk_level(_dev_crit, KERN_CRIT);
define_dev_printk_level(_dev_err, KERN_ERR);
define_dev_printk_level(_dev_warn, KERN_WARNING);
define_dev_printk_level(_dev_notice, KERN_NOTICE);
define_dev_printk_level(_dev_info, KERN_INFO);
#endif
static void __dev_probe_failed(const struct device *dev, int err, bool fatal,
const char *fmt, va_list vargsp)
{
struct va_format vaf;
va_list vargs;
/*
* On x86_64 and possibly on other architectures, va_list is actually a
* size-1 array containing a structure. As a result, function parameter
* vargsp decays from T[1] to T*, and &vargsp has type T** rather than
* T(*)[1], which is expected by its assignment to vaf.va below.
*
* One standard way to solve this mess is by creating a copy in a local
* variable of type va_list and then using a pointer to that local copy
* instead, which is the approach employed here.
*/
va_copy(vargs, vargsp);
vaf.fmt = fmt;
vaf.va = &vargs;
switch (err) {
case -EPROBE_DEFER:
device_set_deferred_probe_reason(dev, &vaf);
dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
break;
case -ENOMEM:
/* Don't print anything on -ENOMEM, there's already enough output */
break;
default:
/* Log fatal final failures as errors, otherwise produce warnings */
if (fatal)
dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
else
dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
break;
}
va_end(vargs);
}
/**
* dev_err_probe - probe error check and log helper
* @dev: the pointer to the struct device
* @err: error value to test
* @fmt: printf-style format string
* @...: arguments as specified in the format string
*
* This helper implements common pattern present in probe functions for error
* checking: print debug or error message depending if the error value is
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
* It replaces the following code sequence::
*
* if (err != -EPROBE_DEFER)
* dev_err(dev, ...);
* else
* dev_dbg(dev, ...);
* return err;
*
* with::
*
* return dev_err_probe(dev, err, ...);
*
* Using this helper in your probe function is totally fine even if @err
* is known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_err() is the standardized format
* of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
* instead of "-35"), and having the error code returned allows more
* compact error paths.
*
* Returns @err.
*/
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
{
va_list vargs;
va_start(vargs, fmt);
/* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */
__dev_probe_failed(dev, err, true, fmt, vargs);
va_end(vargs);
return err;
}
EXPORT_SYMBOL_GPL(dev_err_probe);
/**
* dev_warn_probe - probe error check and log helper
* @dev: the pointer to the struct device
* @err: error value to test
* @fmt: printf-style format string
* @...: arguments as specified in the format string
*
* This helper implements common pattern present in probe functions for error
* checking: print debug or warning message depending if the error value is
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
* It replaces the following code sequence::
*
* if (err != -EPROBE_DEFER)
* dev_warn(dev, ...);
* else
* dev_dbg(dev, ...);
* return err;
*
* with::
*
* return dev_warn_probe(dev, err, ...);
*
* Using this helper in your probe function is totally fine even if @err
* is known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_warn() is the standardized format
* of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
* instead of "-35"), and having the error code returned allows more
* compact error paths.
*
* Returns @err.
*/
int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...)
{
va_list vargs;
va_start(vargs, fmt);
/* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */
__dev_probe_failed(dev, err, false, fmt, vargs);
va_end(vargs);
return err;
}
EXPORT_SYMBOL_GPL(dev_warn_probe);
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
{
return fwnode && !IS_ERR(fwnode->secondary);
}
/**
* set_primary_fwnode - Change the primary firmware node of a given device.
* @dev: Device to handle.
* @fwnode: New primary firmware node of the device.
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
*
* Valid fwnode cases are:
* - primary --> secondary --> -ENODEV
* - primary --> NULL
* - secondary --> -ENODEV
* - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
struct device *parent = dev->parent;
struct fwnode_handle *fn = dev->fwnode;
if (fwnode) {
if (fwnode_is_primary(fn))
fn = fn->secondary;
if (fn) {
WARN_ON(fwnode->secondary);
fwnode->secondary = fn;
}
dev->fwnode = fwnode;
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
/* Skip nullifying fn->secondary if the primary is shared */
if (parent && fn == parent->fwnode)
return;
/* Set fn->secondary = NULL, so fn remains the primary fwnode */
fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
}
}
EXPORT_SYMBOL_GPL(set_primary_fwnode);
/**
* set_secondary_fwnode - Change the secondary firmware node of a given device.
* @dev: Device to handle.
* @fwnode: New secondary firmware node of the device.
*
* If a primary firmware node of the device is present, set its secondary
* pointer to @fwnode. Otherwise, set the device's firmware node pointer to
* @fwnode.
*/
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
if (fwnode)
fwnode->secondary = ERR_PTR(-ENODEV);
if (fwnode_is_primary(dev->fwnode))
dev->fwnode->secondary = fwnode;
else
dev->fwnode = fwnode;
}
EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
* device_remove_of_node - Remove an of_node from a device
* @dev: device whose device tree node is being removed
*/
void device_remove_of_node(struct device *dev)
{
dev = get_device(dev);
if (!dev)
return;
if (!dev->of_node)
goto end;
if (dev->fwnode == of_fwnode_handle(dev->of_node))
dev->fwnode = NULL;
of_node_put(dev->of_node);
dev->of_node = NULL;
end:
put_device(dev);
}
EXPORT_SYMBOL_GPL(device_remove_of_node);
/**
* device_add_of_node - Add an of_node to an existing device
* @dev: device whose device tree node is being added
* @of_node: of_node to add
*
* Return: 0 on success or error code on failure.
*/
int device_add_of_node(struct device *dev, struct device_node *of_node)
{
int ret;
if (!of_node)
return -EINVAL;
dev = get_device(dev);
if (!dev)
return -EINVAL;
if (dev->of_node) {
dev_err(dev, "Cannot replace node %pOF with %pOF\n",
dev->of_node, of_node);
ret = -EBUSY;
goto end;
}
dev->of_node = of_node_get(of_node);
if (!dev->fwnode)
dev->fwnode = of_fwnode_handle(of_node);
ret = 0;
end:
put_device(dev);
return ret;
}
EXPORT_SYMBOL_GPL(device_add_of_node);
/**
* device_set_of_node_from_dev - reuse device-tree node of another device
* @dev: device whose device-tree node is being set
* @dev2: device whose device-tree node is being reused
*
* Takes another reference to the new device-tree node after first dropping
* any reference held to the old node.
*/
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
{
of_node_put(dev->of_node);
dev->of_node = of_node_get(dev2->of_node);
dev->of_node_reused = true;
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
{
dev->fwnode = fwnode;
dev->of_node = to_of_node(fwnode);
}
EXPORT_SYMBOL_GPL(device_set_node);
/**
* get_dev_from_fwnode - Obtain a reference count of the struct device the
* struct fwnode_handle is associated with.
* @fwnode: The pointer to the struct fwnode_handle to obtain the struct device
* reference count of.
*
* This function obtains a reference count of the device the device pointer
* embedded in the struct fwnode_handle points to.
*
* Note that the struct device pointer embedded in struct fwnode_handle does
* *not* have a reference count of the struct device itself.
*
* Hence, it is a UAF (and thus a bug) to call this function if the caller can't
* guarantee that the last reference count of the corresponding struct device is
* not dropped concurrently.
*
* This is possible since struct fwnode_handle has its own reference count and
* hence can out-live the struct device it is associated with.
*/
struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
{
return get_device((fwnode)->dev);
}
EXPORT_SYMBOL_GPL(get_dev_from_fwnode);
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
}
EXPORT_SYMBOL_GPL(device_match_name);
int device_match_type(struct device *dev, const void *type)
{
return dev->type == type;
}
EXPORT_SYMBOL_GPL(device_match_type);
int device_match_of_node(struct device *dev, const void *np)
{
return np && dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
return fwnode && dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
int device_match_devt(struct device *dev, const void *pdevt)
{
return dev->devt == *(dev_t *)pdevt;
}
EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
return adev && ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
return handle && ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
int device_match_any(struct device *dev, const void *unused)
{
return 1;
}
EXPORT_SYMBOL_GPL(device_match_any);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
#include <asm/page_64_types.h>
#ifndef __ASSEMBLER__
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
#include <linux/kmsan-checks.h>
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long page_offset_base;
extern unsigned long vmalloc_base;
extern unsigned long vmemmap_base;
extern unsigned long direct_map_physmem_end;
static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
{
unsigned long y = x - __START_KERNEL_map;
/* use the carry flag to determine if x was < __START_KERNEL_map */
x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
return x;
}
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
extern unsigned long __phys_addr_symbol(unsigned long);
#else
#define __phys_addr(x) __phys_addr_nodebug(x)
#define __phys_addr_symbol(x) \
((unsigned long)(x) - __START_KERNEL_map + phys_base)
#endif
#define __phys_reloc_hide(x) (x)
void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
KCFI_REFERENCE(clear_page_orig);
KCFI_REFERENCE(clear_page_rep);
KCFI_REFERENCE(clear_page_erms);
static inline void clear_page(void *page)
{
/*
* Clean up KMSAN metadata for the page being cleared. The assembly call
* below clobbers @page, so we perform unpoisoning before it.
*/
kmsan_unpoison_memory(page, PAGE_SIZE);
alternative_call_2(clear_page_orig,
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
"=D" (page),
"D" (page),
"cc", "memory", "rax", "rcx");
}
void copy_page(void *to, void *from);
KCFI_REFERENCE(copy_page);
/*
* User space process size. This is the first address outside the user range.
* There are a few constraints that determine this:
*
* On Intel CPUs, if a SYSCALL instruction is at the highest canonical
* address, then that syscall will enter the kernel with a
* non-canonical return address, and SYSRET will explode dangerously.
* We avoid this particular problem by preventing anything
* from being mapped at the maximum canonical address.
*
* On AMD CPUs in the Ryzen family, there's a nasty bug in which the
* CPUs malfunction if they execute code from the highest canonical page.
* They'll speculate right off the end of the canonical space, and
* bad things happen. This is worked around in the same way as the
* Intel problem.
*
* With page table isolation enabled, we map the LDT in ... [stay tuned]
*/
static __always_inline unsigned long task_size_max(void)
{
unsigned long ret;
alternative_io("movq %[small],%0","movq %[large],%0",
X86_FEATURE_LA57,
"=r" (ret),
[small] "i" ((1ul << 47)-PAGE_SIZE),
[large] "i" ((1ul << 56)-PAGE_SIZE));
return ret;
}
#endif /* !__ASSEMBLER__ */
#ifdef CONFIG_X86_VSYSCALL_EMULATION
# define __HAVE_ARCH_GATE_AREA 1
#endif
#endif /* _ASM_X86_PAGE_64_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
#include <linux/mm_types.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#include <asm/smp.h>
#include <asm/invpcid.h>
#include <asm/pti.h>
#include <asm/processor-flags.h>
#include <asm/pgtable.h>
DECLARE_PER_CPU(u64, tlbstate_untag_mask);
void __flush_tlb_all(void);
#define TLB_FLUSH_ALL -1UL
#define TLB_GENERATION_INVALID 0
void cr4_update_irqsoff(unsigned long set, unsigned long clear);
unsigned long cr4_read_shadow(void);
/* Set in this cpu's CR4. */
static inline void cr4_set_bits_irqsoff(unsigned long mask)
{
cr4_update_irqsoff(mask, 0);
}
/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits_irqsoff(unsigned long mask)
{
cr4_update_irqsoff(0, mask);
}
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
{
unsigned long flags;
local_irq_save(flags);
cr4_set_bits_irqsoff(mask);
local_irq_restore(flags);
}
/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits(unsigned long mask)
{
unsigned long flags;
local_irq_save(flags);
cr4_clear_bits_irqsoff(mask);
local_irq_restore(flags);
}
#ifndef MODULE
/*
* 6 because 6 should be plenty and struct tlb_state will fit in two cache
* lines.
*/
#define TLB_NR_DYN_ASIDS 6
struct tlb_context {
u64 ctx_id;
u64 tlb_gen;
};
struct tlb_state {
/*
* cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
* are on. This means that it may not match current->active_mm,
* which will contain the previous user mm when we're in lazy TLB
* mode even if we've already switched back to swapper_pg_dir.
*
* During switch_mm_irqs_off(), loaded_mm will be set to
* LOADED_MM_SWITCHING during the brief interrupts-off window
* when CR3 and loaded_mm would otherwise be inconsistent. This
* is for nmi_uaccess_okay()'s benefit.
*/
struct mm_struct *loaded_mm;
#define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
/* Last user mm for optimizing IBPB */
union {
struct mm_struct *last_user_mm;
unsigned long last_user_mm_spec;
};
u16 loaded_mm_asid;
u16 next_asid;
/*
* If set we changed the page tables in such a way that we
* needed an invalidation of all contexts (aka. PCIDs / ASIDs).
* This tells us to go invalidate all the non-loaded ctxs[]
* on the next context switch.
*
* The current ctx was kept up-to-date as it ran and does not
* need to be invalidated.
*/
bool invalidate_other;
#ifdef CONFIG_ADDRESS_MASKING
/*
* Active LAM mode.
*
* X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM
* disabled.
*/
u8 lam;
#endif
/*
* Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
* the corresponding user PCID needs a flush next time we
* switch to it; see SWITCH_TO_USER_CR3.
*/
unsigned short user_pcid_flush_mask;
/*
* Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one.
*/
unsigned long cr4;
/*
* This is a list of all contexts that might exist in the TLB.
* There is one per ASID that we use, and the ASID (what the
* CPU calls PCID) is the index into ctxts.
*
* For each context, ctx_id indicates which mm the TLB's user
* entries came from. As an invariant, the TLB will never
* contain entries that are out-of-date as when that mm reached
* the tlb_gen in the list.
*
* To be clear, this means that it's legal for the TLB code to
* flush the TLB without updating tlb_gen. This can happen
* (for now, at least) due to paravirt remote flushes.
*
* NB: context 0 is a bit special, since it's also used by
* various bits of init code. This is fine -- code that
* isn't aware of PCID will end up harmlessly flushing
* context 0.
*/
struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
};
DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
struct tlb_state_shared {
/*
* We can be in one of several states:
*
* - Actively using an mm. Our CPU's bit will be set in
* mm_cpumask(loaded_mm) and is_lazy == false;
*
* - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
* will not be set in mm_cpumask(&init_mm) and is_lazy == false.
*
* - Lazily using a real mm. loaded_mm != &init_mm, our bit
* is set in mm_cpumask(loaded_mm), but is_lazy == true.
* We're heuristically guessing that the CR3 load we
* skipped more than makes up for the overhead added by
* lazy mode.
*/
bool is_lazy;
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
bool nmi_uaccess_okay(void);
#define nmi_uaccess_okay nmi_uaccess_okay
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
}
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
/* How many pages can be invalidated with one INVLPGB. */
extern u16 invlpgb_count_max;
extern void initialize_tlbstate_and_flush(void);
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
struct flush_tlb_info {
/*
* We support several kinds of flushes.
*
* - Fully flush a single mm. .mm will be set, .end will be
* TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
* which the IPI sender is trying to catch us up.
*
* - Partially flush a single mm. .mm will be set, .start and
* .end will indicate the range, and .new_tlb_gen will be set
* such that the changes between generation .new_tlb_gen-1 and
* .new_tlb_gen are entirely contained in the indicated range.
*
* - Fully flush all mms whose tlb_gens have been updated. .mm
* will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
* will be zero.
*/
struct mm_struct *mm;
unsigned long start;
unsigned long end;
u64 new_tlb_gen;
unsigned int initiating_cpu;
u8 stride_shift;
u8 freed_tables;
u8 trim_cpumask;
};
void flush_tlb_local(void);
void flush_tlb_one_user(unsigned long addr);
void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
static inline bool is_dyn_asid(u16 asid)
{
return asid < TLB_NR_DYN_ASIDS;
}
static inline bool is_global_asid(u16 asid)
{
return !is_dyn_asid(asid);
}
#ifdef CONFIG_BROADCAST_TLB_FLUSH
static inline u16 mm_global_asid(struct mm_struct *mm)
{
u16 asid;
if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
return 0;
asid = smp_load_acquire(&mm->context.global_asid);
/* mm->context.global_asid is either 0, or a global ASID */
VM_WARN_ON_ONCE(asid && is_dyn_asid(asid));
return asid;
}
static inline void mm_init_global_asid(struct mm_struct *mm)
{
if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
mm->context.global_asid = 0;
mm->context.asid_transition = false;
}
}
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
{
/*
* Notably flush_tlb_mm_range() -> broadcast_tlb_flush() ->
* finish_asid_transition() needs to observe asid_transition = true
* once it observes global_asid.
*/
mm->context.asid_transition = true;
smp_store_release(&mm->context.global_asid, asid);
}
static inline void mm_clear_asid_transition(struct mm_struct *mm)
{
WRITE_ONCE(mm->context.asid_transition, false);
}
static inline bool mm_in_asid_transition(struct mm_struct *mm)
{
if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
return false;
return mm && READ_ONCE(mm->context.asid_transition);
}
#else
static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
static inline void mm_init_global_asid(struct mm_struct *mm) { }
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
static inline void mm_clear_asid_transition(struct mm_struct *mm) { }
static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
#endif /* CONFIG_BROADCAST_TLB_FLUSH */
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
#define flush_tlb_mm(mm) \
flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
#define flush_tlb_range(vma, start, end) \
flush_tlb_mm_range((vma)->vm_mm, start, end, \
((vma)->vm_flags & VM_HUGETLB) \
? huge_page_shift(hstate_vma(vma)) \
: PAGE_SHIFT, true)
extern void flush_tlb_all(void);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift,
bool freed_tables);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
}
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
bool should_defer = false;
/* If remote CPUs need to be flushed then defer batch the flush */
if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
should_defer = true;
put_cpu();
return should_defer;
}
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
/*
* Bump the generation count. This also serves as a full barrier
* that synchronizes with switch_mm(): callers are required to order
* their read of mm_cpumask after their writes to the paging
* structures.
*/
return atomic64_inc_return(&mm->context.tlb_gen);
}
static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm, unsigned long start, unsigned long end)
{
inc_mm_tlb_gen(mm);
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
batch->unmapped_pages = true;
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
bool ignore_access)
{
/*
* Flags that require a flush when cleared but not when they are set.
* Only include flags that would not trigger spurious page-faults.
* Non-present entries are not cached. Hardware would set the
* dirty/access bit if needed without a fault.
*/
const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
_PAGE_ACCESSED;
const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
_PAGE_SOFTW3 | _PAGE_SOFTW4 |
_PAGE_SAVED_DIRTY;
const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
_PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
_PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
_PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX;
unsigned long diff = oldflags ^ newflags;
BUILD_BUG_ON(flush_on_clear & software_flags);
BUILD_BUG_ON(flush_on_clear & flush_on_change);
BUILD_BUG_ON(flush_on_change & software_flags);
/* Ignore software flags */
diff &= ~software_flags;
if (ignore_access)
diff &= ~_PAGE_ACCESSED;
/*
* Did any of the 'flush_on_clear' flags was clleared set from between
* 'oldflags' and 'newflags'?
*/
if (diff & oldflags & flush_on_clear)
return true;
/* Flush on modified flags. */
if (diff & flush_on_change)
return true;
/* Ensure there are no flags that were left behind */
if (IS_ENABLED(CONFIG_DEBUG_VM) &&
(diff & ~(flush_on_clear | software_flags | flush_on_change))) {
VM_WARN_ON_ONCE(1);
return true;
}
return false;
}
/*
* pte_needs_flush() checks whether permissions were demoted and require a
* flush. It should only be used for userspace PTEs.
*/
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{
/* !PRESENT -> * ; no need for flush */
if (!(pte_flags(oldpte) & _PAGE_PRESENT))
return false;
/* PFN changed ; needs flush */
if (pte_pfn(oldpte) != pte_pfn(newpte))
return true;
/*
* check PTE flags; ignore access-bit; see comment in
* ptep_clear_flush_young().
*/
return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte),
true);
}
#define pte_needs_flush pte_needs_flush
/*
* huge_pmd_needs_flush() checks whether permissions were demoted and require a
* flush. It should only be used for userspace huge PMDs.
*/
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{
/* !PRESENT -> * ; no need for flush */
if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
return false;
/* PFN changed ; needs flush */
if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
return true;
/*
* check PMD flags; do not ignore access-bit; see
* pmdp_clear_flush_young().
*/
return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd),
false);
}
#define huge_pmd_needs_flush huge_pmd_needs_flush
#ifdef CONFIG_ADDRESS_MASKING
static inline u64 tlbstate_lam_cr3_mask(void)
{
u64 lam = this_cpu_read(cpu_tlbstate.lam);
return lam << X86_CR3_LAM_U57_BIT;
}
static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask)
{
this_cpu_write(cpu_tlbstate.lam, lam >> X86_CR3_LAM_U57_BIT);
this_cpu_write(tlbstate_untag_mask, untag_mask);
}
#else
static inline u64 tlbstate_lam_cr3_mask(void)
{
return 0;
}
static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask)
{
}
#endif
#endif /* !MODULE */
static inline void __native_tlb_flush_global(unsigned long cr4)
{
native_write_cr4(cr4 ^ X86_CR4_PGE);
native_write_cr4(cr4);
}
#endif /* _ASM_X86_TLBFLUSH_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Queued spinlock
*
* A 'generic' spinlock implementation that is based on MCS locks. For an
* architecture that's looking for a 'generic' spinlock, please first consider
* ticket-lock.h and only come looking here when you've considered all the
* constraints below and can show your hardware does actually perform better
* with qspinlock.
*
* qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
* weaker than RCtso if you're power), where regular code only expects atomic_t
* to be RCpc.
*
* qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
* of atomic operations to behave well together, please audit them carefully to
* ensure they all have forward progress. Many atomic operations may default to
* cmpxchg() loops which will not have good forward progress properties on
* LL/SC architectures.
*
* One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
* do. Carefully read the patches that introduced
* queued_fetch_set_pending_acquire().
*
* qspinlock also heavily relies on mixed size atomic operations, in specific
* it requires architectures to have xchg16; something which many LL/SC
* architectures need to implement as a 32bit and+or in order to satisfy the
* forward progress guarantees mentioned above.
*
* Further reading on mixed size atomics that might be relevant:
*
* http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
*
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
*
* Authors: Waiman Long <waiman.long@hpe.com>
*/
#ifndef __ASM_GENERIC_QSPINLOCK_H
#define __ASM_GENERIC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
#include <linux/atomic.h>
#ifndef queued_spin_is_locked
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
* isn't immediately observable.
*/
return atomic_read(&lock->val);
}
#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
* @lock: queued spinlock structure
* Return: 1 if it is unlocked, 0 otherwise
*
* N.B. Whenever there are tasks waiting for the lock, it is considered
* locked wrt the lockref code to avoid lock stealing by the lockref
* code and change things underneath the lock. This also allows some
* optimizations to be applied without conflict with lockref.
*/
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
return !lock.val.counter;
}
/**
* queued_spin_is_contended - check if the lock is contended
* @lock : Pointer to queued spinlock structure
* Return: 1 if lock contended, 0 otherwise
*/
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
{
return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
}
/**
* queued_spin_trylock - try to acquire the queued spinlock
* @lock : Pointer to queued spinlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
int val = atomic_read(&lock->val);
if (unlikely(val))
return 0;
return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
}
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
#ifndef queued_spin_lock
/**
* queued_spin_lock - acquire a queued spinlock
* @lock: Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
int val = 0;
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
}
#endif
#ifndef queued_spin_unlock
/**
* queued_spin_unlock - release a queued spinlock
* @lock : Pointer to queued spinlock structure
*/
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
{
/*
* unlock() needs release semantics:
*/
smp_store_release(&lock->locked, 0);
}
#endif
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
return false;
}
#endif
#ifndef __no_arch_spinlock_redefine
/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions.
*/
#define arch_spin_is_locked(l) queued_spin_is_locked(l)
#define arch_spin_is_contended(l) queued_spin_is_contended(l)
#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
#define arch_spin_lock(l) queued_spin_lock(l)
#define arch_spin_trylock(l) queued_spin_trylock(l)
#define arch_spin_unlock(l) queued_spin_unlock(l)
#endif
#endif /* __ASM_GENERIC_QSPINLOCK_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
#include <linux/math.h>
#include <vdso/time64.h>
/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
/*
* The microseconds/nanosecond delay multiplicators are used to convert a
* constant microseconds/nanoseconds value to a value which can be used by the
* architectures specific implementation to transform it into loops.
*/
#define UDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, USEC_PER_SEC))
#define NDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, NSEC_PER_SEC))
/*
* The maximum constant udelay/ndelay value picked out of thin air to prevent
* too long constant udelays/ndelays.
*/
#define DELAY_CONST_MAX 20000
/**
* udelay - Inserting a delay based on microseconds with busy waiting
* @usec: requested delay in microseconds
*
* When delaying in an atomic context ndelay(), udelay() and mdelay() are the
* only valid variants of delaying/sleeping to go with.
*
* When inserting delays in non atomic context which are shorter than the time
* which is required to queue e.g. an hrtimer and to enter then the scheduler,
* it is also valuable to use udelay(). But it is not simple to specify a
* generic threshold for this which will fit for all systems. An approximation
* is a threshold for all delays up to 10 microseconds.
*
* When having a delay which is larger than the architecture specific
* %MAX_UDELAY_MS value, please make sure mdelay() is used. Otherwise a overflow
* risk is given.
*
* Please note that ndelay(), udelay() and mdelay() may return early for several
* reasons (https://lists.openwall.net/linux-kernel/2011/01/09/56):
*
* #. computed loops_per_jiffy too low (due to the time taken to execute the
* timer interrupt.)
* #. cache behaviour affecting the time it takes to execute the loop function.
* #. CPU clock rate changes.
*/
static __always_inline void udelay(unsigned long usec)
{
if (__builtin_constant_p(usec)) {
if (usec >= DELAY_CONST_MAX)
__bad_udelay();
else
__const_udelay(usec * UDELAY_CONST_MULT);
} else {
__udelay(usec);
}
}
/**
* ndelay - Inserting a delay based on nanoseconds with busy waiting
* @nsec: requested delay in nanoseconds
*
* See udelay() for basic information about ndelay() and it's variants.
*/
static __always_inline void ndelay(unsigned long nsec)
{
if (__builtin_constant_p(nsec)) {
if (nsec >= DELAY_CONST_MAX)
__bad_ndelay();
else
__const_udelay(nsec * NDELAY_CONST_MULT);
} else {
__ndelay(nsec);
}
}
#define ndelay(x) ndelay(x)
#endif /* __ASM_GENERIC_DELAY_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __X86_KERNEL_FPU_CONTEXT_H
#define __X86_KERNEL_FPU_CONTEXT_H
#include <asm/fpu/xstate.h>
#include <asm/trace/fpu.h>
/* Functions related to FPU context tracking */
/*
* The in-register FPU state for an FPU context on a CPU is assumed to be
* valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
* matches the FPU.
*
* If the FPU register state is valid, the kernel can skip restoring the
* FPU state from memory.
*
* Any code that clobbers the FPU registers or updates the in-memory
* FPU state for a task MUST let the rest of the kernel know that the
* FPU registers are no longer valid for this task.
*
* Invalidate a resource you control: CPU if using the CPU for something else
* (with preemption disabled), FPU for the current task, or a task that
* is prevented from running by the current task.
*/
static inline void __cpu_invalidate_fpregs_state(void)
{
__this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
{
fpu->last_cpu = -1;
}
static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
{
return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
}
static inline void fpregs_deactivate(struct fpu *fpu)
{
__this_cpu_write(fpu_fpregs_owner_ctx, NULL);
trace_x86_fpu_regs_deactivated(fpu);
}
static inline void fpregs_activate(struct fpu *fpu)
{
__this_cpu_write(fpu_fpregs_owner_ctx, fpu);
trace_x86_fpu_regs_activated(fpu);
}
/* Internal helper for switch_fpu_return() and signal frame setup */
static inline void fpregs_restore_userregs(void)
{
struct fpu *fpu = x86_task_fpu(current); int cpu = smp_processor_id();
if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER)))
return; if (!fpregs_state_valid(fpu, cpu)) {
/*
* This restores _all_ xstate which has not been
* established yet.
*
* If PKRU is enabled, then the PKRU value is already
* correct because it was either set in switch_to() or in
* flush_thread(). So it is excluded because it might be
* not up to date in current->thread.fpu->xsave state.
*
* XFD state is handled in restore_fpregs_from_fpstate().
*/
restore_fpregs_from_fpstate(fpu->fpstate, XFEATURE_MASK_FPSTATE);
fpregs_activate(fpu);
fpu->last_cpu = cpu;
}
clear_thread_flag(TIF_NEED_FPU_LOAD);
}
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/open.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/fsnotify.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/namei.h>
#include <linux/backing-dev.h>
#include <linux/capability.h>
#include <linux/securebits.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/rcupdate.h>
#include <linux/audit.h>
#include <linux/falloc.h>
#include <linux/fs_struct.h>
#include <linux/dnotify.h>
#include <linux/compat.h>
#include <linux/mnt_idmapping.h>
#include <linux/filelock.h>
#include "internal.h"
int do_truncate(struct mnt_idmap *idmap, struct dentry *dentry,
loff_t length, unsigned int time_attrs, struct file *filp)
{
int ret;
struct iattr newattrs;
/* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
if (length < 0)
return -EINVAL;
newattrs.ia_size = length;
newattrs.ia_valid = ATTR_SIZE | time_attrs;
if (filp) {
newattrs.ia_file = filp;
newattrs.ia_valid |= ATTR_FILE;
}
/* Remove suid, sgid, and file capabilities on truncate too */
ret = dentry_needs_remove_privs(idmap, dentry);
if (ret < 0)
return ret;
if (ret)
newattrs.ia_valid |= ret | ATTR_FORCE;
ret = inode_lock_killable(dentry->d_inode);
if (ret)
return ret;
/* Note any delegations or leases have already been broken: */
ret = notify_change(idmap, dentry, &newattrs, NULL);
inode_unlock(dentry->d_inode);
return ret;
}
int vfs_truncate(const struct path *path, loff_t length)
{
struct mnt_idmap *idmap;
struct inode *inode;
int error;
inode = path->dentry->d_inode;
/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
if (S_ISDIR(inode->i_mode))
return -EISDIR;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
idmap = mnt_idmap(path->mnt);
error = inode_permission(idmap, inode, MAY_WRITE);
if (error)
return error;
error = fsnotify_truncate_perm(path, length);
if (error)
return error;
error = mnt_want_write(path->mnt);
if (error)
return error;
error = -EPERM;
if (IS_APPEND(inode))
goto mnt_drop_write_and_out;
error = get_write_access(inode);
if (error)
goto mnt_drop_write_and_out;
/*
* Make sure that there are no leases. get_write_access() protects
* against the truncate racing with a lease-granting setlease().
*/
error = break_lease(inode, O_WRONLY);
if (error)
goto put_write_and_out;
error = security_path_truncate(path);
if (!error)
error = do_truncate(idmap, path->dentry, length, 0, NULL);
put_write_and_out:
put_write_access(inode);
mnt_drop_write_and_out:
mnt_drop_write(path->mnt);
return error;
}
EXPORT_SYMBOL_GPL(vfs_truncate);
int do_sys_truncate(const char __user *pathname, loff_t length)
{
unsigned int lookup_flags = LOOKUP_FOLLOW;
struct path path;
int error;
if (length < 0) /* sorry, but loff_t says... */
return -EINVAL;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (!error) {
error = vfs_truncate(&path, length);
path_put(&path);
}
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
{
return do_sys_truncate(path, length);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length)
{
return do_sys_truncate(path, length);
}
#endif
int do_ftruncate(struct file *file, loff_t length, int small)
{
struct inode *inode;
struct dentry *dentry;
int error;
/* explicitly opened as large or we are on 64-bit box */
if (file->f_flags & O_LARGEFILE)
small = 0;
dentry = file->f_path.dentry;
inode = dentry->d_inode;
if (!S_ISREG(inode->i_mode) || !(file->f_mode & FMODE_WRITE))
return -EINVAL;
/* Cannot ftruncate over 2^31 bytes without large file support */
if (small && length > MAX_NON_LFS)
return -EINVAL;
/* Check IS_APPEND on real upper inode */
if (IS_APPEND(file_inode(file)))
return -EPERM;
error = security_file_truncate(file);
if (error)
return error;
error = fsnotify_truncate_perm(&file->f_path, length);
if (error)
return error;
sb_start_write(inode->i_sb);
error = do_truncate(file_mnt_idmap(file), dentry, length,
ATTR_MTIME | ATTR_CTIME, file);
sb_end_write(inode->i_sb);
return error;
}
int do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
if (length < 0)
return -EINVAL;
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
return do_ftruncate(fd_file(f), length, small);
}
SYSCALL_DEFINE2(ftruncate, unsigned int, fd, off_t, length)
{
return do_sys_ftruncate(fd, length, 1);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_off_t, length)
{
return do_sys_ftruncate(fd, length, 1);
}
#endif
/* LFS versions of truncate are only needed on 32 bit machines */
#if BITS_PER_LONG == 32
SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length)
{
return do_sys_truncate(path, length);
}
SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length)
{
return do_sys_ftruncate(fd, length, 0);
}
#endif /* BITS_PER_LONG == 32 */
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_TRUNCATE64)
COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, pathname,
compat_arg_u64_dual(length))
{
return ksys_truncate(pathname, compat_arg_u64_glue(length));
}
#endif
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FTRUNCATE64)
COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd,
compat_arg_u64_dual(length))
{
return ksys_ftruncate(fd, compat_arg_u64_glue(length));
}
#endif
int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
int ret;
loff_t sum;
if (offset < 0 || len <= 0)
return -EINVAL;
if (mode & ~(FALLOC_FL_MODE_MASK | FALLOC_FL_KEEP_SIZE))
return -EOPNOTSUPP;
/*
* Modes are exclusive, even if that is not obvious from the encoding
* as bit masks and the mix with the flag in the same namespace.
*
* To make things even more complicated, FALLOC_FL_ALLOCATE_RANGE is
* encoded as no bit set.
*/
switch (mode & FALLOC_FL_MODE_MASK) {
case FALLOC_FL_ALLOCATE_RANGE:
case FALLOC_FL_UNSHARE_RANGE:
case FALLOC_FL_ZERO_RANGE:
break;
case FALLOC_FL_PUNCH_HOLE:
if (!(mode & FALLOC_FL_KEEP_SIZE))
return -EOPNOTSUPP;
break;
case FALLOC_FL_COLLAPSE_RANGE:
case FALLOC_FL_INSERT_RANGE:
case FALLOC_FL_WRITE_ZEROES:
if (mode & FALLOC_FL_KEEP_SIZE)
return -EOPNOTSUPP;
break;
default:
return -EOPNOTSUPP;
}
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
/*
* On append-only files only space preallocation is supported.
*/
if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode))
return -EPERM;
if (IS_IMMUTABLE(inode))
return -EPERM;
/*
* We cannot allow any fallocate operation on an active swapfile
*/
if (IS_SWAPFILE(inode))
return -ETXTBSY;
/*
* Revalidate the write permissions, in case security policy has
* changed since the files were opened.
*/
ret = security_file_permission(file, MAY_WRITE);
if (ret)
return ret;
ret = fsnotify_file_area_perm(file, MAY_WRITE, &offset, len);
if (ret)
return ret;
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
if (S_ISDIR(inode->i_mode))
return -EISDIR;
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
return -ENODEV;
/* Check for wraparound */
if (check_add_overflow(offset, len, &sum))
return -EFBIG;
if (sum > inode->i_sb->s_maxbytes)
return -EFBIG;
if (!file->f_op->fallocate)
return -EOPNOTSUPP;
file_start_write(file);
ret = file->f_op->fallocate(file, mode, offset, len);
/*
* Create inotify and fanotify events.
*
* To keep the logic simple always create events if fallocate succeeds.
* This implies that events are even created if the file size remains
* unchanged, e.g. when using flag FALLOC_FL_KEEP_SIZE.
*/
if (ret == 0)
fsnotify_modify(file);
file_end_write(file);
return ret;
}
EXPORT_SYMBOL_GPL(vfs_fallocate);
int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len)
{
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
return vfs_fallocate(fd_file(f), mode, offset, len);
}
SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
{
return ksys_fallocate(fd, mode, offset, len);
}
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FALLOCATE)
COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, compat_arg_u64_dual(offset),
compat_arg_u64_dual(len))
{
return ksys_fallocate(fd, mode, compat_arg_u64_glue(offset),
compat_arg_u64_glue(len));
}
#endif
/*
* access() needs to use the real uid/gid, not the effective uid/gid.
* We do this by temporarily clearing all FS-related capabilities and
* switching the fsuid/fsgid around to the real ones.
*
* Creating new credentials is expensive, so we try to skip doing it,
* which we can if the result would match what we already got.
*/
static bool access_need_override_creds(int flags)
{
const struct cred *cred;
if (flags & AT_EACCESS)
return false;
cred = current_cred();
if (!uid_eq(cred->fsuid, cred->uid) ||
!gid_eq(cred->fsgid, cred->gid))
return true;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
kuid_t root_uid = make_kuid(cred->user_ns, 0);
if (!uid_eq(cred->uid, root_uid)) {
if (!cap_isclear(cred->cap_effective))
return true;
} else {
if (!cap_isidentical(cred->cap_effective,
cred->cap_permitted))
return true;
}
}
return false;
}
static const struct cred *access_override_creds(void)
{
struct cred *override_cred;
override_cred = prepare_creds();
if (!override_cred)
return NULL;
/*
* XXX access_need_override_creds performs checks in hopes of skipping
* this work. Make sure it stays in sync if making any changes in this
* routine.
*/
override_cred->fsuid = override_cred->uid;
override_cred->fsgid = override_cred->gid;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
/* Clear the capabilities if we switch to a non-root user */
kuid_t root_uid = make_kuid(override_cred->user_ns, 0);
if (!uid_eq(override_cred->uid, root_uid))
cap_clear(override_cred->cap_effective);
else
override_cred->cap_effective =
override_cred->cap_permitted;
}
/*
* The new set of credentials can *only* be used in
* task-synchronous circumstances, and does not need
* RCU freeing, unless somebody then takes a separate
* reference to it.
*
* NOTE! This is _only_ true because this credential
* is used purely for override_creds() that installs
* it as the subjective cred. Other threads will be
* accessing ->real_cred, not the subjective cred.
*
* If somebody _does_ make a copy of this (using the
* 'get_current_cred()' function), that will clear the
* non_rcu field, because now that other user may be
* expecting RCU freeing. But normal thread-synchronous
* cred accesses will keep things non-racy to avoid RCU
* freeing.
*/
override_cred->non_rcu = 1;
return override_creds(override_cred);
}
static int do_faccessat(int dfd, const char __user *filename, int mode, int flags)
{
struct path path;
struct inode *inode;
int res;
unsigned int lookup_flags = LOOKUP_FOLLOW;
const struct cred *old_cred = NULL;
if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */
return -EINVAL;
if (flags & ~(AT_EACCESS | AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH))
return -EINVAL;
if (flags & AT_SYMLINK_NOFOLLOW)
lookup_flags &= ~LOOKUP_FOLLOW;
if (flags & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
if (access_need_override_creds(flags)) {
old_cred = access_override_creds();
if (!old_cred)
return -ENOMEM;
}
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);
if (res)
goto out;
inode = d_backing_inode(path.dentry);
if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
/*
* MAY_EXEC on regular files is denied if the fs is mounted
* with the "noexec" flag.
*/
res = -EACCES;
if (path_noexec(&path))
goto out_path_release;
}
res = inode_permission(mnt_idmap(path.mnt), inode, mode | MAY_ACCESS);
/* SuS v2 requires we report a read only fs too */
if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
goto out_path_release;
/*
* This is a rare case where using __mnt_is_readonly()
* is OK without a mnt_want/drop_write() pair. Since
* no actual write to the fs is performed here, we do
* not need to telegraph to that to anyone.
*
* By doing this, we accept that this access is
* inherently racy and know that the fs may change
* state before we even see this result.
*/
if (__mnt_is_readonly(path.mnt))
res = -EROFS;
out_path_release:
path_put(&path);
if (retry_estale(res, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
if (old_cred)
put_cred(revert_creds(old_cred));
return res;
}
SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
{
return do_faccessat(dfd, filename, mode, 0);
}
SYSCALL_DEFINE4(faccessat2, int, dfd, const char __user *, filename, int, mode,
int, flags)
{
return do_faccessat(dfd, filename, mode, flags);
}
SYSCALL_DEFINE2(access, const char __user *, filename, int, mode)
{
return do_faccessat(AT_FDCWD, filename, mode, 0);
}
SYSCALL_DEFINE1(chdir, const char __user *, filename)
{
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
retry:
error = user_path_at(AT_FDCWD, filename, lookup_flags, &path);
if (error)
goto out;
error = path_permission(&path, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
set_fs_pwd(current->fs, &path);
dput_and_out:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
return error;
}
SYSCALL_DEFINE1(fchdir, unsigned int, fd)
{
CLASS(fd_raw, f)(fd);
int error;
if (fd_empty(f))
return -EBADF;
if (!d_can_lookup(fd_file(f)->f_path.dentry))
return -ENOTDIR;
error = file_permission(fd_file(f), MAY_EXEC | MAY_CHDIR);
if (!error)
set_fs_pwd(current->fs, &fd_file(f)->f_path);
return error;
}
SYSCALL_DEFINE1(chroot, const char __user *, filename)
{
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
retry:
error = user_path_at(AT_FDCWD, filename, lookup_flags, &path);
if (error)
goto out;
error = path_permission(&path, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
error = -EPERM;
if (!ns_capable(current_user_ns(), CAP_SYS_CHROOT))
goto dput_and_out;
error = security_path_chroot(&path);
if (error)
goto dput_and_out;
set_fs_root(current->fs, &path);
error = 0;
dput_and_out:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
return error;
}
int chmod_common(const struct path *path, umode_t mode)
{
struct inode *inode = path->dentry->d_inode;
struct inode *delegated_inode = NULL;
struct iattr newattrs;
int error;
error = mnt_want_write(path->mnt);
if (error)
return error;
retry_deleg:
error = inode_lock_killable(inode);
if (error)
goto out_mnt_unlock;
error = security_path_chmod(path, mode);
if (error)
goto out_unlock;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
error = notify_change(mnt_idmap(path->mnt), path->dentry,
&newattrs, &delegated_inode);
out_unlock:
inode_unlock(inode);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
out_mnt_unlock:
mnt_drop_write(path->mnt);
return error;
}
int vfs_fchmod(struct file *file, umode_t mode)
{
audit_file(file);
return chmod_common(&file->f_path, mode);
}
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
{
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
return vfs_fchmod(fd_file(f), mode);
}
static int do_fchmodat(int dfd, const char __user *filename, umode_t mode,
unsigned int flags)
{
struct path path;
int error;
unsigned int lookup_flags;
if (unlikely(flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)))
return -EINVAL;
lookup_flags = (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
if (flags & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (!error) {
error = chmod_common(&path, mode);
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
}
return error;
}
SYSCALL_DEFINE4(fchmodat2, int, dfd, const char __user *, filename,
umode_t, mode, unsigned int, flags)
{
return do_fchmodat(dfd, filename, mode, flags);
}
SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename,
umode_t, mode)
{
return do_fchmodat(dfd, filename, mode, 0);
}
SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
{
return do_fchmodat(AT_FDCWD, filename, mode, 0);
}
/*
* Check whether @kuid is valid and if so generate and set vfsuid_t in
* ia_vfsuid.
*
* Return: true if @kuid is valid, false if not.
*/
static inline bool setattr_vfsuid(struct iattr *attr, kuid_t kuid)
{
if (!uid_valid(kuid))
return false;
attr->ia_valid |= ATTR_UID;
attr->ia_vfsuid = VFSUIDT_INIT(kuid);
return true;
}
/*
* Check whether @kgid is valid and if so generate and set vfsgid_t in
* ia_vfsgid.
*
* Return: true if @kgid is valid, false if not.
*/
static inline bool setattr_vfsgid(struct iattr *attr, kgid_t kgid)
{
if (!gid_valid(kgid))
return false;
attr->ia_valid |= ATTR_GID;
attr->ia_vfsgid = VFSGIDT_INIT(kgid);
return true;
}
int chown_common(const struct path *path, uid_t user, gid_t group)
{
struct mnt_idmap *idmap;
struct user_namespace *fs_userns;
struct inode *inode = path->dentry->d_inode;
struct inode *delegated_inode = NULL;
int error;
struct iattr newattrs;
kuid_t uid;
kgid_t gid;
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
idmap = mnt_idmap(path->mnt);
fs_userns = i_user_ns(inode);
retry_deleg:
newattrs.ia_vfsuid = INVALID_VFSUID;
newattrs.ia_vfsgid = INVALID_VFSGID;
newattrs.ia_valid = ATTR_CTIME;
if ((user != (uid_t)-1) && !setattr_vfsuid(&newattrs, uid))
return -EINVAL;
if ((group != (gid_t)-1) && !setattr_vfsgid(&newattrs, gid))
return -EINVAL;
error = inode_lock_killable(inode);
if (error)
return error;
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_PRIV |
setattr_should_drop_sgid(idmap, inode);
/* Continue to send actual fs values, not the mount values. */
error = security_path_chown(
path,
from_vfsuid(idmap, fs_userns, newattrs.ia_vfsuid),
from_vfsgid(idmap, fs_userns, newattrs.ia_vfsgid));
if (!error)
error = notify_change(idmap, path->dentry, &newattrs,
&delegated_inode);
inode_unlock(inode);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
return error;
}
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
int flag)
{
struct path path;
int error = -EINVAL;
int lookup_flags;
if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
goto out;
lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
if (flag & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out:
return error;
}
SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
gid_t, group, int, flag)
{
return do_fchownat(dfd, filename, user, group, flag);
}
SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
{
return do_fchownat(AT_FDCWD, filename, user, group, 0);
}
SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
{
return do_fchownat(AT_FDCWD, filename, user, group,
AT_SYMLINK_NOFOLLOW);
}
int vfs_fchown(struct file *file, uid_t user, gid_t group)
{
int error;
error = mnt_want_write_file(file);
if (error)
return error;
audit_file(file);
error = chown_common(&file->f_path, user, group);
mnt_drop_write_file(file);
return error;
}
int ksys_fchown(unsigned int fd, uid_t user, gid_t group)
{
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
return vfs_fchown(fd_file(f), user, group);
}
SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
{
return ksys_fchown(fd, user, group);
}
static inline int file_get_write_access(struct file *f)
{
int error;
error = get_write_access(f->f_inode);
if (unlikely(error))
return error;
error = mnt_get_write_access(f->f_path.mnt);
if (unlikely(error))
goto cleanup_inode;
if (unlikely(f->f_mode & FMODE_BACKING)) {
error = mnt_get_write_access(backing_file_user_path(f)->mnt);
if (unlikely(error))
goto cleanup_mnt;
}
return 0;
cleanup_mnt:
mnt_put_write_access(f->f_path.mnt);
cleanup_inode:
put_write_access(f->f_inode);
return error;
}
static int do_dentry_open(struct file *f,
int (*open)(struct inode *, struct file *))
{
static const struct file_operations empty_fops = {};
struct inode *inode = f->f_path.dentry->d_inode;
int error;
path_get(&f->f_path);
f->f_inode = inode;
f->f_mapping = inode->i_mapping;
f->f_wb_err = filemap_sample_wb_err(f->f_mapping);
f->f_sb_err = file_sample_sb_err(f);
if (unlikely(f->f_flags & O_PATH)) {
f->f_mode = FMODE_PATH | FMODE_OPENED;
file_set_fsnotify_mode(f, FMODE_NONOTIFY);
f->f_op = &empty_fops;
return 0;
}
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
i_readcount_inc(inode);
} else if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
error = file_get_write_access(f);
if (unlikely(error))
goto cleanup_file;
f->f_mode |= FMODE_WRITER;
}
/* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))
f->f_mode |= FMODE_ATOMIC_POS;
f->f_op = fops_get(inode->i_fop);
if (WARN_ON(!f->f_op)) {
error = -ENODEV;
goto cleanup_all;
}
error = security_file_open(f);
if (error)
goto cleanup_all;
/*
* Call fsnotify open permission hook and set FMODE_NONOTIFY_* bits
* according to existing permission watches.
* If FMODE_NONOTIFY mode was already set for an fanotify fd or for a
* pseudo file, this call will not change the mode.
*/
error = fsnotify_open_perm_and_set_mode(f);
if (error)
goto cleanup_all;
error = break_lease(file_inode(f), f->f_flags);
if (error)
goto cleanup_all;
/* normally all 3 are set; ->open() can clear them if needed */
f->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
if (!open)
open = f->f_op->open;
if (open) {
error = open(inode, f);
if (error)
goto cleanup_all;
}
f->f_mode |= FMODE_OPENED;
if ((f->f_mode & FMODE_READ) &&
likely(f->f_op->read || f->f_op->read_iter))
f->f_mode |= FMODE_CAN_READ;
if ((f->f_mode & FMODE_WRITE) &&
likely(f->f_op->write || f->f_op->write_iter))
f->f_mode |= FMODE_CAN_WRITE;
if ((f->f_mode & FMODE_LSEEK) && !f->f_op->llseek)
f->f_mode &= ~FMODE_LSEEK;
if (f->f_mapping->a_ops && f->f_mapping->a_ops->direct_IO)
f->f_mode |= FMODE_CAN_ODIRECT;
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
f->f_iocb_flags = iocb_flags(f);
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
if ((f->f_flags & O_DIRECT) && !(f->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
/*
* XXX: Huge page cache doesn't support writing yet. Drop all page
* cache for this file before processing writes.
*/
if (f->f_mode & FMODE_WRITE) {
/*
* Depends on full fence from get_write_access() to synchronize
* against collapse_file() regarding i_writecount and nr_thps
* updates. Ensures subsequent insertion of THPs into the page
* cache will fail.
*/
if (filemap_nr_thps(inode->i_mapping)) {
struct address_space *mapping = inode->i_mapping;
filemap_invalidate_lock(inode->i_mapping);
/*
* unmap_mapping_range just need to be called once
* here, because the private pages is not need to be
* unmapped mapping (e.g. data segment of dynamic
* shared libraries here).
*/
unmap_mapping_range(mapping, 0, 0, 0);
truncate_inode_pages(mapping, 0);
filemap_invalidate_unlock(inode->i_mapping);
}
}
return 0;
cleanup_all:
if (WARN_ON_ONCE(error > 0))
error = -EINVAL;
fops_put(f->f_op);
put_file_access(f);
cleanup_file:
path_put(&f->f_path);
f->__f_path.mnt = NULL;
f->__f_path.dentry = NULL;
f->f_inode = NULL;
return error;
}
/**
* finish_open - finish opening a file
* @file: file pointer
* @dentry: pointer to dentry
* @open: open callback
*
* This can be used to finish opening a file passed to i_op->atomic_open().
*
* If the open callback is set to NULL, then the standard f_op->open()
* filesystem callback is substituted.
*
* NB: the dentry reference is _not_ consumed. If, for example, the dentry is
* the return value of d_splice_alias(), then the caller needs to perform dput()
* on it after finish_open().
*
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *))
{
BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
file->__f_path.dentry = dentry;
return do_dentry_open(file, open);
}
EXPORT_SYMBOL(finish_open);
/**
* finish_no_open - finish ->atomic_open() without opening the file
*
* @file: file pointer
* @dentry: dentry, ERR_PTR(-E...) or NULL (as returned from ->lookup())
*
* This can be used to set the result of a lookup in ->atomic_open().
*
* NB: unlike finish_open() this function does consume the dentry reference and
* the caller need not dput() it.
*
* Returns 0 or -E..., which must be the return value of ->atomic_open() after
* having called this function.
*/
int finish_no_open(struct file *file, struct dentry *dentry)
{
if (IS_ERR(dentry))
return PTR_ERR(dentry);
file->__f_path.dentry = dentry;
return 0;
}
EXPORT_SYMBOL(finish_no_open);
char *file_path(struct file *filp, char *buf, int buflen)
{
return d_path(&filp->f_path, buf, buflen);
}
EXPORT_SYMBOL(file_path);
/**
* vfs_open - open the file at the given path
* @path: path to open
* @file: newly allocated file with f_flag initialized
*/
int vfs_open(const struct path *path, struct file *file)
{
int ret;
file->__f_path = *path;
ret = do_dentry_open(file, NULL);
if (!ret) {
/*
* Once we return a file with FMODE_OPENED, __fput() will call
* fsnotify_close(), so we need fsnotify_open() here for
* symmetry.
*/
fsnotify_open(file);
}
return ret;
}
struct file *dentry_open(const struct path *path, int flags,
const struct cred *cred)
{
int error;
struct file *f;
/* We must always pass in a valid mount pointer. */
BUG_ON(!path->mnt);
f = alloc_empty_file(flags, cred);
if (!IS_ERR(f)) {
error = vfs_open(path, f);
if (error) {
fput(f);
f = ERR_PTR(error);
}
}
return f;
}
EXPORT_SYMBOL(dentry_open);
struct file *dentry_open_nonotify(const struct path *path, int flags,
const struct cred *cred)
{
struct file *f = alloc_empty_file(flags, cred);
if (!IS_ERR(f)) {
int error;
file_set_fsnotify_mode(f, FMODE_NONOTIFY);
error = vfs_open(path, f);
if (error) {
fput(f);
f = ERR_PTR(error);
}
}
return f;
}
/**
* dentry_create - Create and open a file
* @path: path to create
* @flags: O_ flags
* @mode: mode bits for new file
* @cred: credentials to use
*
* Caller must hold the parent directory's lock, and have prepared
* a negative dentry, placed in @path->dentry, for the new file.
*
* Caller sets @path->mnt to the vfsmount of the filesystem where
* the new file is to be created. The parent directory and the
* negative dentry must reside on the same filesystem instance.
*
* On success, returns a "struct file *". Otherwise a ERR_PTR
* is returned.
*/
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred)
{
struct file *f;
int error;
f = alloc_empty_file(flags, cred);
if (IS_ERR(f))
return f;
error = vfs_create(mnt_idmap(path->mnt),
d_inode(path->dentry->d_parent),
path->dentry, mode, true);
if (!error)
error = vfs_open(path, f);
if (unlikely(error)) {
fput(f);
return ERR_PTR(error);
}
return f;
}
EXPORT_SYMBOL(dentry_create);
/**
* kernel_file_open - open a file for kernel internal use
* @path: path of the file to open
* @flags: open flags
* @cred: credentials for open
*
* Open a file for use by in-kernel consumers. The file is not accounted
* against nr_files and must not be installed into the file descriptor
* table.
*
* Return: Opened file on success, an error pointer on failure.
*/
struct file *kernel_file_open(const struct path *path, int flags,
const struct cred *cred)
{
struct file *f;
int error;
f = alloc_empty_file_noaccount(flags, cred);
if (IS_ERR(f))
return f;
error = vfs_open(path, f);
if (error) {
fput(f);
return ERR_PTR(error);
}
return f;
}
EXPORT_SYMBOL_GPL(kernel_file_open);
#define WILL_CREATE(flags) (flags & (O_CREAT | __O_TMPFILE))
#define O_PATH_FLAGS (O_DIRECTORY | O_NOFOLLOW | O_PATH | O_CLOEXEC)
inline struct open_how build_open_how(int flags, umode_t mode)
{
struct open_how how = {
.flags = flags & VALID_OPEN_FLAGS,
.mode = mode & S_IALLUGO,
};
/* O_PATH beats everything else. */
if (how.flags & O_PATH)
how.flags &= O_PATH_FLAGS;
/* Modes should only be set for create-like flags. */
if (!WILL_CREATE(how.flags))
how.mode = 0;
return how;
}
inline int build_open_flags(const struct open_how *how, struct open_flags *op)
{
u64 flags = how->flags;
u64 strip = O_CLOEXEC;
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
BUILD_BUG_ON_MSG(upper_32_bits(VALID_OPEN_FLAGS),
"struct open_flags doesn't yet handle flags > 32 bits");
/*
* Strip flags that aren't relevant in determining struct open_flags.
*/
flags &= ~strip;
/*
* Older syscalls implicitly clear all of the invalid flags or argument
* values before calling build_open_flags(), but openat2(2) checks all
* of its arguments.
*/
if (flags & ~VALID_OPEN_FLAGS)
return -EINVAL;
if (how->resolve & ~VALID_RESOLVE_FLAGS)
return -EINVAL;
/* Scoping flags are mutually exclusive. */
if ((how->resolve & RESOLVE_BENEATH) && (how->resolve & RESOLVE_IN_ROOT))
return -EINVAL;
/* Deal with the mode. */
if (WILL_CREATE(flags)) {
if (how->mode & ~S_IALLUGO)
return -EINVAL;
op->mode = how->mode | S_IFREG;
} else {
if (how->mode != 0)
return -EINVAL;
op->mode = 0;
}
/*
* Block bugs where O_DIRECTORY | O_CREAT created regular files.
* Note, that blocking O_DIRECTORY | O_CREAT here also protects
* O_TMPFILE below which requires O_DIRECTORY being raised.
*/
if ((flags & (O_DIRECTORY | O_CREAT)) == (O_DIRECTORY | O_CREAT))
return -EINVAL;
/* Now handle the creative implementation of O_TMPFILE. */
if (flags & __O_TMPFILE) {
/*
* In order to ensure programs get explicit errors when trying
* to use O_TMPFILE on old kernels we enforce that O_DIRECTORY
* is raised alongside __O_TMPFILE.
*/
if (!(flags & O_DIRECTORY))
return -EINVAL;
if (!(acc_mode & MAY_WRITE))
return -EINVAL;
}
if (flags & O_PATH) {
/* O_PATH only permits certain other flags to be set. */
if (flags & ~O_PATH_FLAGS)
return -EINVAL;
acc_mode = 0;
}
/*
* O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
* check for O_DSYNC if the need any syncing at all we enforce it's
* always set instead of having to deal with possibly weird behaviour
* for malicious applications setting only __O_SYNC.
*/
if (flags & __O_SYNC)
flags |= O_DSYNC;
op->open_flag = flags;
/* O_TRUNC implies we need access checks for write permissions */
if (flags & O_TRUNC)
acc_mode |= MAY_WRITE;
/* Allow the LSM permission hook to distinguish append
access from general write access. */
if (flags & O_APPEND)
acc_mode |= MAY_APPEND;
op->acc_mode = acc_mode;
op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN;
if (flags & O_CREAT) {
op->intent |= LOOKUP_CREATE;
if (flags & O_EXCL) {
op->intent |= LOOKUP_EXCL;
flags |= O_NOFOLLOW;
}
}
if (flags & O_DIRECTORY)
lookup_flags |= LOOKUP_DIRECTORY;
if (!(flags & O_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
if (how->resolve & RESOLVE_NO_XDEV)
lookup_flags |= LOOKUP_NO_XDEV;
if (how->resolve & RESOLVE_NO_MAGICLINKS)
lookup_flags |= LOOKUP_NO_MAGICLINKS;
if (how->resolve & RESOLVE_NO_SYMLINKS)
lookup_flags |= LOOKUP_NO_SYMLINKS;
if (how->resolve & RESOLVE_BENEATH)
lookup_flags |= LOOKUP_BENEATH;
if (how->resolve & RESOLVE_IN_ROOT)
lookup_flags |= LOOKUP_IN_ROOT;
if (how->resolve & RESOLVE_CACHED) {
/* Don't bother even trying for create/truncate/tmpfile open */
if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE))
return -EAGAIN;
lookup_flags |= LOOKUP_CACHED;
}
op->lookup_flags = lookup_flags;
return 0;
}
/**
* file_open_name - open file and return file pointer
*
* @name: struct filename containing path to open
* @flags: open flags as per the open(2) second argument
* @mode: mode for the new file if O_CREAT is set, else ignored
*
* This is the helper to open a file from kernelspace if you really
* have to. But in generally you should not do this, so please move
* along, nothing to see here..
*/
struct file *file_open_name(struct filename *name, int flags, umode_t mode)
{
struct open_flags op;
struct open_how how = build_open_how(flags, mode);
int err = build_open_flags(&how, &op);
if (err)
return ERR_PTR(err);
return do_filp_open(AT_FDCWD, name, &op);
}
/**
* filp_open - open file and return file pointer
*
* @filename: path to open
* @flags: open flags as per the open(2) second argument
* @mode: mode for the new file if O_CREAT is set, else ignored
*
* This is the helper to open a file from kernelspace if you really
* have to. But in generally you should not do this, so please move
* along, nothing to see here..
*/
struct file *filp_open(const char *filename, int flags, umode_t mode)
{
struct filename *name = getname_kernel(filename);
struct file *file = ERR_CAST(name);
if (!IS_ERR(name)) {
file = file_open_name(name, flags, mode);
putname(name);
}
return file;
}
EXPORT_SYMBOL(filp_open);
struct file *file_open_root(const struct path *root,
const char *filename, int flags, umode_t mode)
{
struct open_flags op;
struct open_how how = build_open_how(flags, mode);
int err = build_open_flags(&how, &op);
if (err)
return ERR_PTR(err);
return do_file_open_root(root, filename, &op);
}
EXPORT_SYMBOL(file_open_root);
static int do_sys_openat2(int dfd, const char __user *filename,
struct open_how *how)
{
struct open_flags op;
struct filename *tmp;
int err, fd;
err = build_open_flags(how, &op);
if (unlikely(err))
return err;
tmp = getname(filename);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
fd = get_unused_fd_flags(how->flags);
if (likely(fd >= 0)) {
struct file *f = do_filp_open(dfd, tmp, &op);
if (IS_ERR(f)) {
put_unused_fd(fd);
fd = PTR_ERR(f);
} else {
fd_install(fd, f);
}
}
putname(tmp);
return fd;
}
int do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_how how = build_open_how(flags, mode);
return do_sys_openat2(dfd, filename, &how);
}
SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
{
if (force_o_largefile())
flags |= O_LARGEFILE;
return do_sys_open(AT_FDCWD, filename, flags, mode);
}
SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
umode_t, mode)
{
if (force_o_largefile())
flags |= O_LARGEFILE;
return do_sys_open(dfd, filename, flags, mode);
}
SYSCALL_DEFINE4(openat2, int, dfd, const char __user *, filename,
struct open_how __user *, how, size_t, usize)
{
int err;
struct open_how tmp;
BUILD_BUG_ON(sizeof(struct open_how) < OPEN_HOW_SIZE_VER0);
BUILD_BUG_ON(sizeof(struct open_how) != OPEN_HOW_SIZE_LATEST);
if (unlikely(usize < OPEN_HOW_SIZE_VER0))
return -EINVAL;
if (unlikely(usize > PAGE_SIZE))
return -E2BIG;
err = copy_struct_from_user(&tmp, sizeof(tmp), how, usize);
if (err)
return err;
audit_openat2_how(&tmp);
/* O_LARGEFILE is only allowed for non-O_PATH. */
if (!(tmp.flags & O_PATH) && force_o_largefile())
tmp.flags |= O_LARGEFILE;
return do_sys_openat2(dfd, filename, &tmp);
}
#ifdef CONFIG_COMPAT
/*
* Exactly like sys_open(), except that it doesn't set the
* O_LARGEFILE flag.
*/
COMPAT_SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
{
return do_sys_open(AT_FDCWD, filename, flags, mode);
}
/*
* Exactly like sys_openat(), except that it doesn't set the
* O_LARGEFILE flag.
*/
COMPAT_SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, umode_t, mode)
{
return do_sys_open(dfd, filename, flags, mode);
}
#endif
#ifndef __alpha__
/*
* For backward compatibility? Maybe this should be moved
* into arch/i386 instead?
*/
SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode)
{
int flags = O_CREAT | O_WRONLY | O_TRUNC;
if (force_o_largefile())
flags |= O_LARGEFILE;
return do_sys_open(AT_FDCWD, pathname, flags, mode);
}
#endif
/*
* "id" is the POSIX thread ID. We use the
* files pointer for this..
*/
static int filp_flush(struct file *filp, fl_owner_t id)
{
int retval = 0;
if (CHECK_DATA_CORRUPTION(file_count(filp) == 0, filp,
"VFS: Close: file count is 0 (f_op=%ps)",
filp->f_op)) {
return 0;
}
if (filp->f_op->flush)
retval = filp->f_op->flush(filp, id);
if (likely(!(filp->f_mode & FMODE_PATH))) {
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
}
return retval;
}
int filp_close(struct file *filp, fl_owner_t id)
{
int retval;
retval = filp_flush(filp, id);
fput_close(filp);
return retval;
}
EXPORT_SYMBOL(filp_close);
/*
* Careful here! We test whether the file pointer is NULL before
* releasing the fd. This ensures that one clone task can't release
* an fd while another clone is opening it.
*/
SYSCALL_DEFINE1(close, unsigned int, fd)
{
int retval;
struct file *file;
file = file_close_fd(fd);
if (!file)
return -EBADF;
retval = filp_flush(file, current->files);
/*
* We're returning to user space. Don't bother
* with any delayed fput() cases.
*/
fput_close_sync(file);
if (likely(retval == 0))
return 0;
/* can't restart close syscall because file table entry was cleared */
if (retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK)
retval = -EINTR;
return retval;
}
/*
* This routine simulates a hangup on the tty, to arrange that users
* are given clean terminals at login time.
*/
SYSCALL_DEFINE0(vhangup)
{
if (capable(CAP_SYS_TTY_CONFIG)) {
tty_vhangup_self();
return 0;
}
return -EPERM;
}
/*
* Called when an inode is about to be open.
* We use this to disallow opening large files on 32bit systems if
* the caller didn't specify O_LARGEFILE. On 64bit systems we force
* on this flag in sys_open.
*/
int generic_file_open(struct inode * inode, struct file * filp)
{
if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EOVERFLOW;
return 0;
}
EXPORT_SYMBOL(generic_file_open);
/*
* This is used by subsystems that don't want seekable
* file descriptors. The function is not supposed to ever fail, the only
* reason it returns an 'int' and not 'void' is so that it can be plugged
* directly into file_operations structure.
*/
int nonseekable_open(struct inode *inode, struct file *filp)
{
filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
return 0;
}
EXPORT_SYMBOL(nonseekable_open);
/*
* stream_open is used by subsystems that want stream-like file descriptors.
* Such file descriptors are not seekable and don't have notion of position
* (file.f_pos is always 0 and ppos passed to .read()/.write() is always NULL).
* Contrary to file descriptors of other regular files, .read() and .write()
* can run simultaneously.
*
* stream_open never fails and is marked to return int so that it could be
* directly used as file_operations.open .
*/
int stream_open(struct inode *inode, struct file *filp)
{
filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
filp->f_mode |= FMODE_STREAM;
return 0;
}
EXPORT_SYMBOL(stream_open);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_BITOPS_H
#define _ASM_X86_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*
* Note: inlines with more than a single statement should be marked
* __always_inline to avoid problems with older gcc's inlining heuristics.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/alternative.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>
#if BITS_PER_LONG == 32
# define _BITOPS_LONG_SHIFT 5
#elif BITS_PER_LONG == 64
# define _BITOPS_LONG_SHIFT 6
#else
# error "Unexpected BITS_PER_LONG"
#endif
#define BIT_64(n) (U64_C(1) << (n))
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
#define ADDR RLONG_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
static __always_inline void
arch_set_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
asm_inline volatile(LOCK_PREFIX "orb %b1,%0" : CONST_MASK_ADDR(nr, addr)
: "iq" (CONST_MASK(nr))
: "memory");
} else {
asm_inline volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline void
arch_clear_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
asm_inline volatile(LOCK_PREFIX "andb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (~CONST_MASK(nr)));
} else {
asm_inline volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
static __always_inline void
arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
arch_clear_bit(nr, addr);
}
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
bool negative;
asm_inline volatile(LOCK_PREFIX "xorb %2,%1"
: "=@ccs" (negative), WBYTE_ADDR(addr)
: "iq" ((char)mask) : "memory");
return negative;
}
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
static __always_inline void
arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
{
arch___clear_bit(nr, addr);
}
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline void
arch_change_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
asm_inline volatile(LOCK_PREFIX "xorb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (CONST_MASK(nr)));
} else {
asm_inline volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
static __always_inline bool
arch_test_and_set_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
static __always_inline bool
arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return arch_test_and_set_bit(nr, addr);
}
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
asm(__ASM_SIZE(bts) " %2,%1"
: "=@ccc" (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
/*
* Note: the operation is performed atomically with respect to
* the local CPU, but not other CPUs. Portable code should not
* rely on this behaviour.
* KVM relies on this behaviour on x86 for modifying memory that is also
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
asm volatile(__ASM_SIZE(btr) " %2,%1"
: "=@ccc" (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
asm volatile(__ASM_SIZE(btc) " %2,%1"
: "=@ccc" (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch_test_and_change_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
{
bool oldbit;
asm volatile("testb %2,%1"
: "=@ccnz" (oldbit)
: "m" (((unsigned char *)addr)[nr >> 3]),
"i" (1 << (nr & 7))
:"memory");
return oldbit;
}
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
{
bool oldbit;
asm volatile(__ASM_SIZE(bt) " %2,%1"
: "=@ccc" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
variable_test_bit(nr, addr);
}
static __always_inline bool
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
{
return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
variable_test_bit(nr, addr);
}
static __always_inline __attribute_const__ unsigned long variable__ffs(unsigned long word)
{
asm("tzcnt %1,%0"
: "=r" (word)
: ASM_INPUT_RM (word));
return word;
}
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
#define __ffs(word) \
(__builtin_constant_p(word) ? \
(unsigned long)__builtin_ctzl(word) : \
variable__ffs(word))
static __always_inline __attribute_const__ unsigned long variable_ffz(unsigned long word)
{
return variable__ffs(~word);
}
/**
* ffz - find first zero bit in word
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
#define ffz(word) \
(__builtin_constant_p(word) ? \
(unsigned long)__builtin_ctzl(~word) : \
variable_ffz(word))
/*
* __fls: find last set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static __always_inline __attribute_const__ unsigned long __fls(unsigned long word)
{
if (__builtin_constant_p(word))
return BITS_PER_LONG - 1 - __builtin_clzl(word);
asm("bsr %1,%0"
: "=r" (word)
: ASM_INPUT_RM (word));
return word;
}
#undef ADDR
#ifdef __KERNEL__
static __always_inline __attribute_const__ int variable_ffs(int x)
{
int r;
#ifdef CONFIG_X86_64
/*
* AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before, except that the
* top 32 bits will be cleared.
*
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
asm("bsfl %1,%0"
: "=r" (r)
: ASM_INPUT_RM (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* ffs - find first set bit in word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs
* routines, therefore differs in spirit from the other bitops.
*
* ffs(value) returns 0 if value is 0 or the position of the first
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
/**
* fls - find last set bit in word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffs, but returns the position of the most significant set bit.
*
* fls(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static __always_inline __attribute_const__ int fls(unsigned int x)
{
int r;
if (__builtin_constant_p(x))
return x ? 32 - __builtin_clz(x) : 0;
#ifdef CONFIG_X86_64
/*
* AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before, except that the
* top 32 bits will be cleared.
*
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
asm("bsrl %1,%0"
: "=r" (r)
: ASM_INPUT_RM (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "rm" (-1));
#else
asm("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#ifdef CONFIG_X86_64
static __always_inline __attribute_const__ int fls64(__u64 x)
{
int bitpos = -1;
if (__builtin_constant_p(x))
return x ? 64 - __builtin_clzll(x) : 0;
/*
* AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before.
*/
asm("bsrq %1,%q0"
: "+r" (bitpos)
: ASM_INPUT_RM (x));
return bitpos + 1;
}
#else
#include <asm-generic/bitops/fls64.h>
#endif
#include <asm-generic/bitops/sched.h>
#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-non-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion
*
* Copyright IBM Corporation, 2001
*
* Author: Dipankar Sarma <dipankar@in.ibm.com>
*
* Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
*
* For detailed explanation of Read-Copy Update mechanism see -
* http://lse.sourceforge.net/locking/rcupdate.html
*
*/
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <linux/cleanup.h>
#include <asm/processor.h>
#include <linux/context_tracking_irq.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
#define RCU_SEQ_CTR_SHIFT 2
#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void synchronize_rcu(void);
struct rcu_gp_oldstate;
unsigned long get_completed_synchronize_rcu(void);
void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
// Maximum number of unsigned long values corresponding to
// not-yet-completed RCU grace periods.
#define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
/**
* same_state_synchronize_rcu - Are two old-state values identical?
* @oldstate1: First old-state value.
* @oldstate2: Second old-state value.
*
* The two old-state values must have been obtained from either
* get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
* get_completed_synchronize_rcu(). Returns @true if the two values are
* identical and @false otherwise. This allows structures whose lifetimes
* are tracked by old-state values to push these values to a list header,
* allowing those structures to be slightly smaller.
*/
static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
{
return oldstate1 == oldstate2;
}
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
/*
* Defined as a macro as it is a very low level header included from
* areas that don't even know about current. This gives the rcu_read_lock()
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TINY_RCU
#define rcu_read_unlock_strict() do { } while (0)
#else
void rcu_read_unlock_strict(void);
#endif
static inline void __rcu_read_lock(void)
{
preempt_disable();
}
static inline void __rcu_read_unlock(void)
{
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
rcu_read_unlock_strict();
preempt_enable();
}
static inline int rcu_preempt_depth(void)
{
return 0;
}
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_RCU_LAZY
void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func);
#else
static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
#endif
/* Internal to kernel */
void rcu_init(void);
extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
#else /* #ifdef CONFIG_RCU_STALL_COMMON */
static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
static __always_inline void rcu_irq_work_resched(void) { }
#endif
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu);
int rcu_nocb_cpu_deoffload(int cpu);
void rcu_nocb_flush_deferred_wakeup(void);
#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s)
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static inline void rcu_init_nohz(void) { }
static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#define RCU_NOCB_LOCKDEP_WARN(c, s)
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
* This is a macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU_GENERIC
# ifdef CONFIG_TASKS_RCU
# define rcu_tasks_classic_qs(t, preempt) \
do { \
if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void rcu_tasks_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
# define synchronize_rcu_tasks synchronize_rcu
# endif
# ifdef CONFIG_TASKS_TRACE_RCU
// Bits for ->trc_reader_special.b.need_qs field.
#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
void rcu_tasks_trace_qs_blkd(struct task_struct *t);
# define rcu_tasks_trace_qs(t) \
do { \
int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
\
if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
likely(!___rttq_nesting)) { \
rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
} else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
!READ_ONCE((t)->trc_reader_special.b.blocked)) { \
rcu_tasks_trace_qs_blkd(t); \
} \
} while (0)
void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
#define rcu_tasks_qs(t, preempt) \
do { \
rcu_tasks_classic_qs((t), (preempt)); \
rcu_tasks_trace_qs(t); \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
void synchronize_rcu_tasks_rude(void);
void rcu_tasks_rude_torture_stats_print(char *tt, char *tf);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
#define rcu_tasks_qs(t, preempt) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
/**
* rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
*
* As an accident of implementation, an RCU Tasks Trace grace period also
* acts as an RCU grace period. However, this could change at any time.
* Code relying on this accident must call this function to verify that
* this accident is still happening.
*
* You have been warned!
*/
static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
/**
* cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
* This macro resembles cond_resched(), except that it is defined to
* report potential quiescent states to RCU-tasks even if the cond_resched()
* machinery were to be shut off, as some advocate for PREEMPTION kernels.
*/
#define cond_resched_tasks_rcu_qs() \
do { \
rcu_tasks_qs(current, false); \
cond_resched(); \
} while (0)
/**
* rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
* @old_ts: jiffies at start of processing.
*
* This helper is for long-running softirq handlers, such as NAPI threads in
* networking. The caller should initialize the variable passed in as @old_ts
* at the beginning of the softirq handler. When invoked frequently, this macro
* will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
* provide both RCU and RCU-Tasks quiescent states. Note that this macro
* modifies its old_ts argument.
*
* Because regions of code that have disabled softirq act as RCU read-side
* critical sections, this macro should be invoked with softirq (and
* preemption) enabled.
*
* The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
* have more chance to invoke schedule() calls and provide necessary quiescent
* states. As a contrast, calling cond_resched() only won't achieve the same
* effect because cond_resched() does not provide RCU-Tasks quiescent states.
*/
#define rcu_softirq_qs_periodic(old_ts) \
do { \
if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
time_after(jiffies, (old_ts) + HZ / 10)) { \
preempt_disable(); \
rcu_softirq_qs(); \
preempt_enable(); \
(old_ts) = jiffies; \
} \
} while (0)
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
#if defined(CONFIG_TREE_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h>
#else
#error "Unknown RCU implementation specified to kernel configuration"
#endif
/*
* The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls
* are needed for dynamic initialization and destruction of rcu_head
* on the stack, and init_rcu_head()/destroy_rcu_head() are needed for
* dynamic initialization and destruction of statically allocated rcu_head
* structures. However, rcu_head structures allocated dynamically in the
* heap don't need any initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
void init_rcu_head(struct rcu_head *head);
void destroy_rcu_head(struct rcu_head *head);
void init_rcu_head_on_stack(struct rcu_head *head);
void destroy_rcu_head_on_stack(struct rcu_head *head);
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
static inline void init_rcu_head(struct rcu_head *head) { }
static inline void destroy_rcu_head(struct rcu_head *head) { }
static inline void init_rcu_head_on_stack(struct rcu_head *head) { }
static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { }
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
extern struct lockdep_map rcu_lock_map;
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
extern struct lockdep_map rcu_callback_map;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}
static inline void rcu_try_lock_acquire(struct lockdep_map *map)
{
lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
}
static inline void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, _THIS_IP_);
}
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
int rcu_read_lock_sched_held(void);
int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
# define rcu_lock_acquire(a) do { } while (0)
# define rcu_try_lock_acquire(a) do { } while (0)
# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)
{
return 1;
}
static inline int rcu_read_lock_bh_held(void)
{
return 1;
}
static inline int rcu_read_lock_sched_held(void)
{
return !preemptible();
}
static inline int rcu_read_lock_any_held(void)
{
return !preemptible();
}
static inline int debug_lockdep_rcu_enabled(void)
{
return 0;
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
/**
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
*
* This checks debug_lockdep_rcu_enabled() before checking (c) to
* prevent early boot splats due to lockdep not yet being initialized,
* and rechecks it after checking (c) to prevent false-positive splats
* due to races with lockdep being disabled. See commit 3066820034b5dd
* ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(".data..unlikely") __warned; \
if (debug_lockdep_rcu_enabled() && (c) && \
debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
#ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal context switch in RCU read-side critical section");
}
#else // #ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void) { }
#endif // #else // #ifndef CONFIG_PREEMPT_RCU
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
// See RCU_LOCKDEP_WARN() for an explanation of the double call to
// debug_lockdep_rcu_enabled().
static inline bool lockdep_assert_rcu_helper(bool c)
{
return debug_lockdep_rcu_enabled() &&
(c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
debug_lockdep_rcu_enabled();
}
/**
* lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock()
*
* Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
*/
#define lockdep_assert_in_rcu_read_lock() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
/**
* lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
*
* Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect.
* Note that local_bh_disable() and friends do not suffice here, instead an
* actual rcu_read_lock_bh() is required.
*/
#define lockdep_assert_in_rcu_read_lock_bh() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
/**
* lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
*
* Splats if lockdep is enabled and there is no rcu_read_lock_sched()
* in effect. Note that preempt_disable() and friends do not suffice here,
* instead an actual rcu_read_lock_sched() is required.
*/
#define lockdep_assert_in_rcu_read_lock_sched() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
/**
* lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
*
* Splats if lockdep is enabled and there is no RCU reader of any
* type in effect. Note that regions of code protected by things like
* preempt_disable, local_bh_disable(), and local_irq_disable() all qualify
* as RCU readers.
*
* Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY
* kernels that are not also built with PREEMPT_COUNT. But if you have
* lockdep enabled, you might as well also enable PREEMPT_COUNT.
*/
#define lockdep_assert_in_rcu_reader() \
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
!lock_is_held(&rcu_bh_lock_map) && \
!lock_is_held(&rcu_sched_lock_map) && \
preemptible()))
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
#define lockdep_assert_in_rcu_read_lock() do { } while (0)
#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
#define lockdep_assert_in_rcu_reader() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
/*
* Helper functions for rcu_dereference_check(), rcu_dereference_protected()
* and rcu_assign_pointer(). Some of these could be folded into their
* callers, but they are left separate in order to ease introduction of
* multiple pointers markings to match different RCU implementations
* (e.g., __srcu), should this make sense in the future.
*/
#ifdef __CHECKER__
#define rcu_check_sparse(p, space) \
((void)(((typeof(*p) space *)p) == p))
#else /* #ifdef __CHECKER__ */
#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
#define __unrcu_pointer(p, local) \
({ \
typeof(*p) *local = (typeof(*p) *__force)(p); \
rcu_check_sparse(p, __rcu); \
((typeof(*p) __force __kernel *)(local)); \
})
/**
* unrcu_pointer - mark a pointer as not being RCU protected
* @p: pointer needing to lose its __rcu property
*
* Converts @p from an __rcu pointer to a __kernel pointer.
* This allows an __rcu pointer to be used with xchg() and friends.
*/
#define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu))
#define __rcu_access_pointer(p, local, space) \
({ \
typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(local)); \
})
#define __rcu_dereference_check(p, local, c, space) \
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(local)); \
})
#define __rcu_dereference_protected(p, local, c, space) \
({ \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
#define __rcu_dereference_raw(p, local) \
({ \
/* Dependency order vs. p above. */ \
typeof(p) local = READ_ONCE(p); \
((typeof(*p) __force __kernel *)(local)); \
})
#define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu))
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
*/
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
*
* Assigns the specified value to the specified RCU-protected
* pointer, ensuring that any concurrent RCU readers will see
* any prior initialization.
*
* Inserts memory barriers on architectures that require them
* (which is most of them), and also prevents the compiler from
* reordering the code that initializes the structure after the pointer
* assignment. More importantly, this call documents which pointers
* will be dereferenced by RCU read-side code.
*
* In some special cases, you may use RCU_INIT_POINTER() instead
* of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
* to the fact that it does not constrain either the CPU or the compiler.
* That said, using RCU_INIT_POINTER() when you should have used
* rcu_assign_pointer() is a very bad thing that results in
* impossible-to-diagnose memory corruption. So please be careful.
* See the RCU_INIT_POINTER() comment header for details.
*
* Note that rcu_assign_pointer() evaluates each of its arguments only
* once, appearances notwithstanding. One of the "extra" evaluations
* is in typeof() and the other visible only to sparse (__CHECKER__),
* neither of which actually execute the argument. As with most cpp
* macros, this execute-arguments-only-once property is important, so
* please be careful when making changes to rcu_assign_pointer() and the
* other macros that it invokes.
*/
#define rcu_assign_pointer(p, v) \
do { \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
rcu_check_sparse(p, __rcu); \
\
if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
else \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
} while (0)
/**
* rcu_replace_pointer() - replace an RCU pointer, returning its old value
* @rcu_ptr: RCU pointer, whose old value is returned
* @ptr: regular pointer
* @c: the lockdep conditions under which the dereference will take place
*
* Perform a replacement, where @rcu_ptr is an RCU-annotated
* pointer and @c is the lockdep argument that is passed to the
* rcu_dereference_protected() call used to read that pointer. The old
* value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
*/
#define rcu_replace_pointer(rcu_ptr, ptr, c) \
({ \
typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
rcu_assign_pointer((rcu_ptr), (ptr)); \
__tmp; \
})
/**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
* lockdep checks for being in an RCU read-side critical section. This is
* useful when the value of this pointer is accessed, but the pointer is
* not dereferenced, for example, when testing an RCU-protected pointer
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
* Within an RCU read-side critical section, there is little reason to
* use rcu_access_pointer().
*
* It is usually best to test the rcu_access_pointer() return value
* directly in order to avoid accidental dereferences being introduced
* by later inattentive changes. In other words, assigning the
* rcu_access_pointer() return value to a local variable results in an
* accident waiting to happen.
*
* It is also permissible to use rcu_access_pointer() when read-side
* access to the pointer was removed at least one grace period ago, as is
* the case in the context of the RCU callback that is freeing up the data,
* or after a synchronize_rcu() returns. This can be useful when tearing
* down multi-linked structures after a grace period has elapsed. However,
* rcu_dereference_protected() is normally preferred for this use case.
*/
#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
/**
* rcu_dereference_check() - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Do an rcu_dereference(), but check that the conditions under which the
* dereference will take place are correct. Typically the conditions
* indicate the various locking conditions that should be held at that
* point. The check should return true if the conditions are satisfied.
* An implicit check for being in an RCU read-side critical section
* (rcu_read_lock()) is included.
*
* For example:
*
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
*
* could be used to indicate to lockdep that foo->bar may only be dereferenced
* if either rcu_read_lock() is held, or that the lock required to replace
* the bar struct at foo->bar is held.
*
* Note that the list of conditions may also include indications of when a lock
* need not be held, for example during initialisation or destruction of the
* target struct:
*
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
* atomic_read(&foo->usage) == 0);
*
* Inserts memory barriers on architectures that require them
* (currently only the Alpha), prevents the compiler from refetching
* (and from merging fetches), and, more importantly, documents exactly
* which pointers are protected by RCU and checks that the pointer is
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
(c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* This is the RCU-bh counterpart to rcu_dereference_check(). However,
* please note that starting in v5.0 kernels, vanilla RCU grace periods
* wait for local_bh_disable() regions of code in addition to regions of
* code demarked by rcu_read_lock() and rcu_read_unlock(). This means
* that synchronize_rcu(), call_rcu, and friends all take not only
* rcu_read_lock() but also rcu_read_lock_bh() into account.
*/
#define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
(c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
* However, please note that starting in v5.0 kernels, vanilla RCU grace
* periods wait for preempt_disable() regions of code in addition to
* regions of code demarked by rcu_read_lock() and rcu_read_unlock().
* This means that synchronize_rcu(), call_rcu, and friends all take not
* only rcu_read_lock() but also rcu_read_lock_sched() into account.
*/
#define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
(c) || rcu_read_lock_sched_held(), \
__rcu)
/**
* rcu_dereference_all_check() - rcu_dereference_all with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* This is similar to rcu_dereference_check(), but allows protection
* by all forms of vanilla RCU readers, including preemption disabled,
* bh-disabled, and interrupt-disabled regions of code. Note that "vanilla
* RCU" excludes SRCU and the various Tasks RCU flavors. Please note
* that this macro should not be backported to any Linux-kernel version
* preceding v5.0 due to changes in synchronize_rcu() semantics prior
* to that version.
*/
#define rcu_dereference_all_check(p, c) \
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
(c) || rcu_read_lock_any_held(), \
__rcu)
/*
* The tracing infrastructure traces RCU (we want that), but unfortunately
* some of the RCU checks causes tracing to lock up the system.
*
* The no-tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
#define rcu_dereference_raw_check(p) \
__rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu)
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* the READ_ONCE(). This is useful in cases where update-side locks
* prevent the value of the pointer from changing. Please note that this
* primitive does *not* prevent the compiler from repeating this reference
* or combining it with other references, so it should not be used without
* protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent
* but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
__rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu)
/**
* rcu_dereference() - fetch RCU-protected pointer for dereferencing
* @p: The pointer to read, prior to dereferencing
*
* This is a simple wrapper around rcu_dereference_check().
*/
#define rcu_dereference(p) rcu_dereference_check(p, 0)
/**
* rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
* @p: The pointer to read, prior to dereferencing
*
* Makes rcu_dereference_check() do the dirty work.
*/
#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
/**
* rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
* @p: The pointer to read, prior to dereferencing
*
* Makes rcu_dereference_check() do the dirty work.
*/
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
/**
* rcu_dereference_all() - fetch RCU-all-protected pointer for dereferencing
* @p: The pointer to read, prior to dereferencing
*
* Makes rcu_dereference_check() do the dirty work.
*/
#define rcu_dereference_all(p) rcu_dereference_all_check(p, 0)
/**
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
* @p: The pointer to hand off
*
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
* kill_dependency(). It could be used as follows::
*
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
* if (long_lived) {
* if (!atomic_inc_not_zero(p->refcnt))
* long_lived = false;
* else
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
*/
#define rcu_pointer_handoff(p) (p)
/**
* rcu_read_lock() - mark the beginning of an RCU read-side critical section
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
* are within RCU read-side critical sections, then the
* synchronize_rcu() is guaranteed to block until after all the other
* CPUs exit their critical sections. Similarly, if call_rcu() is invoked
* on one CPU while other CPUs are within RCU read-side critical
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
* Both synchronize_rcu() and call_rcu() also wait for regions of code
* with preemption disabled, including regions of code with interrupts or
* softirqs disabled.
*
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
* read-side critical section, (2) CPU 1 invokes call_rcu() to register
* an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
* (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
* callback is invoked. This is legal, because the RCU read-side critical
* section that was running concurrently with the call_rcu() (and which
* therefore might be referencing something that the corresponding RCU
* callback would free up) has completed before the corresponding
* RCU callback is invoked.
*
* RCU read-side critical sections may be nested. Any deferred actions
* will be deferred until the outermost RCU read-side critical section
* completes.
*
* You can avoid reading and understanding the next paragraph by
* following this rule: don't put anything in an rcu_read_lock() RCU
* read-side critical section that would block in a !PREEMPTION kernel.
* But if you want the full story, read on!
*
* In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
* it is illegal to block while in an RCU read-side critical section.
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
* kernel builds, RCU read-side critical sections may be preempted,
* but explicit blocking is illegal. Finally, in preemptible RCU
* implementations in real-time (with -rt patchset) kernel builds, RCU
* read-side critical sections may be preempted and they may also block, but
* only when acquiring spinlocks that are subject to priority inheritance.
*/
static __always_inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
}
/*
* So where is rcu_write_lock()? It does not exist, as there is no
* way for writers to lock out RCU readers. This is a feature, not
* a bug -- this property is what provides RCU's performance benefits.
* Of course, writers must coordinate with each other. The normal
* spinlock primitives work well for this, but any other technique may be
* used as well. RCU does not care how the writers keep out of each
* others' way, as long as they do so.
*/
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
* In almost all situations, rcu_read_unlock() is immune from deadlock.
* This deadlock immunity also extends to the scheduler's runqueue
* and priority-inheritance spinlocks, courtesy of the quiescent-state
* deferral that is carried out when rcu_read_unlock() is invoked with
* interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
static inline void rcu_read_unlock(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
__release(RCU);
__rcu_read_unlock();}
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
* This is equivalent to rcu_read_lock(), but also disables softirqs.
* Note that anything else that disables softirqs can also serve as an RCU
* read-side critical section. However, please note that this equivalence
* applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
* rcu_read_lock_bh() were unrelated.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
* rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
* was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
}
/**
* rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
static inline void rcu_read_unlock_bh(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
}
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
* This is equivalent to rcu_read_lock(), but also disables preemption.
* Read-side critical sections can also be introduced by anything else that
* disables preemption, including local_irq_disable() and friends. However,
* please note that the equivalence to rcu_read_lock() applies only to
* v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
* were unrelated.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
* rcu_read_unlock_sched() from process context if the matching
* rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
{
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_lock_sched_notrace(void)
{
preempt_disable_notrace();
__acquire(RCU_SCHED);
}
/**
* rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
*
* See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
{ RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_unlock_sched_notrace(void)
{
__release(RCU_SCHED);
preempt_enable_notrace();
}
static __always_inline void rcu_read_lock_dont_migrate(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
migrate_disable();
rcu_read_lock();
}
static inline void rcu_read_unlock_migrate(void)
{
rcu_read_unlock();
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
migrate_enable();
}
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
* @p: The pointer to be initialized.
* @v: The value to initialized the pointer to.
*
* Initialize an RCU-protected pointer in special cases where readers
* do not need ordering constraints on the CPU or the compiler. These
* special cases are:
*
* 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
* 2. The caller has taken whatever steps are required to prevent
* RCU readers from concurrently accessing this pointer *or*
* 3. The referenced data structure has already been exposed to
* readers either at compile time or via rcu_assign_pointer() *and*
*
* a. You have not made *any* reader-visible changes to
* this structure since then *or*
* b. It is OK for readers accessing this structure from its
* new location to see the old state of the structure. (For
* example, the changes were to statistical counters or to
* other state where exact synchronization is not required.)
*
* Failure to follow these rules governing use of RCU_INIT_POINTER() will
* result in impossible-to-diagnose memory corruption. As in the structures
* will look OK in crash dumps, but any concurrent RCU readers might
* see pre-initialized values of the referenced data structure. So
* please be very careful how you use RCU_INIT_POINTER()!!!
*
* If you are creating an RCU-protected linked structure that is accessed
* by a single external-to-structure RCU-protected pointer, then you may
* use RCU_INIT_POINTER() to initialize the internal RCU-protected
* pointers, but you must use rcu_assign_pointer() to initialize the
* external-to-structure pointer *after* you have completely initialized
* the reader-accessible portions of the linked structure.
*
* Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
* ordering guarantees for either the CPU or the compiler.
*/
#define RCU_INIT_POINTER(p, v) \
do { \
rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
* @p: The pointer to be initialized.
* @v: The value to initialized the pointer to.
*
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
#define RCU_POINTER_INITIALIZER(p, v) \
.p = RCU_INITIALIZER(v)
/**
* kfree_rcu() - kfree an object after a grace period.
* @ptr: pointer to kfree for double-argument invocations.
* @rhf: the name of the struct rcu_head within the type of @ptr.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
* when they are used in a kernel module, that module must invoke the
* high-latency rcu_barrier() function at module-unload time.
*
* The kfree_rcu() function handles this issue. In order to have a universal
* callback function handling different offsets of rcu_head, the callback needs
* to determine the starting address of the freed object, which can be a large
* kmalloc or vmalloc allocation. To allow simply aligning the pointer down to
* page boundary for those, only offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
* be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
* The object to be freed can be allocated either by kmalloc() or
* kmem_cache_alloc().
*
* Note that the allowable offset might decrease in the future.
*
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
#define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
#define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
/**
* kfree_rcu_mightsleep() - kfree an object after a grace period.
* @ptr: pointer to kfree for single-argument invocations.
*
* When it comes to head-less variant, only one argument
* is passed and that is just a pointer which has to be
* freed after a grace period. Therefore the semantic is
*
* kfree_rcu_mightsleep(ptr);
*
* where @ptr is the pointer to be freed by kvfree().
*
* Please note, head-less way of freeing is permitted to
* use from a context that has to follow might_sleep()
* annotation. Otherwise, please switch and embed the
* rcu_head structure within the type of @ptr.
*/
#define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
/*
* In mm/slab_common.c, no suitable header to include here.
*/
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
/*
* The BUILD_BUG_ON() makes sure the rcu_head offset can be handled. See the
* comment of kfree_rcu() for details.
*/
#define kvfree_rcu_arg_2(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
\
if (___p) { \
BUILD_BUG_ON(offsetof(typeof(*(ptr)), rhf) >= 4096); \
kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
} \
} while (0)
#define kvfree_rcu_arg_1(ptr) \
do { \
typeof(ptr) ___p = (ptr); \
\
if (___p) \
kvfree_call_rcu(NULL, (void *) (___p)); \
} while (0)
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#define smp_mb__after_unlock_lock() do { } while (0)
#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
/* Has the specified rcu_head structure been handed to call_rcu()? */
/**
* rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
* @rhp: The rcu_head structure to initialize.
*
* If you intend to invoke rcu_head_after_call_rcu() to test whether a
* given rcu_head structure has already been passed to call_rcu(), then
* you must also invoke this rcu_head_init() function on it just after
* allocating that structure. Calls to this function must not race with
* calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
*/
static inline void rcu_head_init(struct rcu_head *rhp)
{
rhp->func = (rcu_callback_t)~0L;
}
/**
* rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
* @rhp: The rcu_head structure to test.
* @f: The function passed to call_rcu() along with @rhp.
*
* Returns @true if the @rhp has been passed to call_rcu() with @func,
* and @false otherwise. Emits a warning in any other case, including
* the case where @rhp has already been invoked after a grace period.
* Calls to this function must not race with callback invocation. One way
* to avoid such races is to enclose the call to rcu_head_after_call_rcu()
* in an RCU read-side critical section that includes a read-side fetch
* of the pointer to the structure containing @rhp.
*/
static inline bool
rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
{
rcu_callback_t func = READ_ONCE(rhp->func);
if (func == f)
return true;
WARN_ON_ONCE(func != (rcu_callback_t)~0L);
return false;
}
/* kernel/ksysfs.c definitions */
extern int rcu_expedited;
extern int rcu_normal;
DEFINE_LOCK_GUARD_0(rcu,
do {
rcu_read_lock();
/*
* sparse doesn't call the cleanup function,
* so just release immediately and don't track
* the context. We don't need to anyway, since
* the whole point of the guard is to not need
* the explicit unlock.
*/
__release(RCU);
} while (0),
rcu_read_unlock())
#endif /* __LINUX_RCUPDATE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2012 Novell Inc.
* Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
* Copyright (c) 2012-2019 Linux Foundation
*
* Core driver model functions and structures that should not be
* shared outside of the drivers/base/ directory.
*
*/
#include <linux/notifier.h>
/**
* struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
*
* @subsys - the struct kset that defines this subsystem
* @devices_kset - the subsystem's 'devices' directory
* @interfaces - list of subsystem interfaces associated
* @mutex - protect the devices, and interfaces lists.
*
* @drivers_kset - the list of drivers associated
* @klist_devices - the klist to iterate over the @devices_kset
* @klist_drivers - the klist to iterate over the @drivers_kset
* @bus_notifier - the bus notifier list for anything that cares about things
* on this bus.
* @bus - pointer back to the struct bus_type that this structure is associated
* with.
* @dev_root: Default device to use as the parent.
*
* @glue_dirs - "glue" directory to put in-between the parent device to
* avoid namespace conflicts
* @class - pointer back to the struct class that this structure is associated
* with.
* @lock_key: Lock class key for use by the lock validator
*
* This structure is the one that is the actual kobject allowing struct
* bus_type/class to be statically allocated safely. Nothing outside of the
* driver core should ever touch these fields.
*/
struct subsys_private {
struct kset subsys;
struct kset *devices_kset;
struct list_head interfaces;
struct mutex mutex;
struct kset *drivers_kset;
struct klist klist_devices;
struct klist klist_drivers;
struct blocking_notifier_head bus_notifier;
unsigned int drivers_autoprobe:1;
const struct bus_type *bus;
struct device *dev_root;
struct kset glue_dirs;
const struct class *class;
struct lock_class_key lock_key;
};
#define to_subsys_private(obj) container_of_const(obj, struct subsys_private, subsys.kobj)
static inline struct subsys_private *subsys_get(struct subsys_private *sp)
{
if (sp) kset_get(&sp->subsys);
return sp;
}
static inline void subsys_put(struct subsys_private *sp)
{
if (sp)
kset_put(&sp->subsys);}
struct subsys_private *bus_to_subsys(const struct bus_type *bus);
struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
struct kobject kobj;
struct klist klist_devices;
struct klist_node knode_bus;
struct module_kobject *mkobj;
struct device_driver *driver;
};
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
* @klist_children - klist containing all children of this device
* @knode_parent - node in sibling list
* @knode_driver - node in driver list
* @knode_bus - node in bus list
* @knode_class - node in class list
* @deferred_probe - entry in deferred_probe_list which is used to retry the
* binding of drivers which were unable to get all the resources needed by
* the device; typically because it depends on another driver getting
* probed first.
* @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* associated with.
* @dead - This device is currently either in the process of or has been
* removed from the system. Any asynchronous events scheduled for this
* device should exit without taking any action.
*
* Nothing outside of the driver core should ever touch these fields.
*/
struct device_private {
struct klist klist_children;
struct klist_node knode_parent;
struct klist_node knode_driver;
struct klist_node knode_bus;
struct klist_node knode_class;
struct list_head deferred_probe;
const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
u8 dead:1;
};
#define to_device_private_parent(obj) \
container_of(obj, struct device_private, knode_parent)
#define to_device_private_driver(obj) \
container_of(obj, struct device_private, knode_driver)
#define to_device_private_bus(obj) \
container_of(obj, struct device_private, knode_bus)
#define to_device_private_class(obj) \
container_of(obj, struct device_private, knode_class)
/* initialisation functions */
int devices_init(void);
int buses_init(void);
int classes_init(void);
int firmware_init(void);
#ifdef CONFIG_SYS_HYPERVISOR
int hypervisor_init(void);
#else
static inline int hypervisor_init(void) { return 0; }
#endif
int platform_bus_init(void);
int faux_bus_init(void);
void cpu_dev_init(void);
void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
void auxiliary_bus_init(void);
#else
static inline void auxiliary_bus_init(void) { }
#endif
struct kobject *virtual_device_parent(void);
int bus_add_device(struct device *dev);
void bus_probe_device(struct device *dev);
void bus_remove_device(struct device *dev);
void bus_notify(struct device *dev, enum bus_notifier_event value);
bool bus_is_registered(const struct bus_type *bus);
int bus_add_driver(struct device_driver *drv);
void bus_remove_driver(struct device_driver *drv);
void device_release_driver_internal(struct device *dev, const struct device_driver *drv,
struct device *parent);
void driver_detach(const struct device_driver *drv);
void driver_deferred_probe_del(struct device *dev);
void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf);
static inline int driver_match_device(const struct device_driver *drv,
struct device *dev)
{
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
}
static inline void dev_sync_state(struct device *dev)
{
if (dev->bus->sync_state)
dev->bus->sync_state(dev);
else if (dev->driver && dev->driver->sync_state)
dev->driver->sync_state(dev);
}
int driver_add_groups(const struct device_driver *drv, const struct attribute_group **groups);
void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
{
/*
* Majority (all?) read accesses to dev->driver happens either
* while holding device lock or in bus/driver code that is only
* invoked when the device is bound to a driver and there is no
* concern of the pointer being changed while it is being read.
* However when reading device's uevent file we read driver pointer
* without taking device lock (so we do not block there for
* arbitrary amount of time). We use WRITE_ONCE() here to prevent
* tearing so that READ_ONCE() can safely be used in uevent code.
*/
// FIXME - this cast should not be needed "soon"
WRITE_ONCE(dev->driver, (struct device_driver *)drv);
}
int devres_release_all(struct device *dev);
void device_block_probing(void);
void device_unblock_probing(void);
void deferred_probe_extend_timeout(void);
void driver_deferred_probe_trigger(void);
const char *device_get_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid, const char **tmp);
/* /sys/devices directory */
extern struct kset *devices_kset;
void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
int module_add_driver(struct module *mod, const struct device_driver *drv);
void module_remove_driver(const struct device_driver *drv);
#else
static inline int module_add_driver(struct module *mod,
struct device_driver *drv)
{
return 0;
}
static inline void module_remove_driver(struct device_driver *drv) { }
#endif
#ifdef CONFIG_DEVTMPFS
int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
#ifdef CONFIG_BLOCK
extern const struct class block_class;
static inline bool is_blockdev(struct device *dev)
{
return dev->class == &block_class;
}
#else
static inline bool is_blockdev(struct device *dev) { return false; }
#endif
/* Device links support */
int device_links_read_lock(void);
void device_links_read_unlock(int idx);
int device_links_read_lock_held(void);
int device_links_check_suppliers(struct device *dev);
void device_links_force_bind(struct device *dev);
void device_links_driver_bound(struct device *dev);
void device_links_driver_cleanup(struct device *dev);
void device_links_no_driver(struct device *dev);
bool device_links_busy(struct device *dev);
void device_links_unbind_consumers(struct device *dev);
bool device_link_flag_is_sync_state_only(u32 flags);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
#define dev_for_each_link_to_supplier(__link, __dev) \
list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
device_links_read_lock_held())
#define dev_for_each_link_to_consumer(__link, __dev) \
list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
device_links_read_lock_held())
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
#ifdef CONFIG_DEVTMPFS
int devtmpfs_create_node(struct device *dev);
int devtmpfs_delete_node(struct device *dev);
#else
static inline int devtmpfs_create_node(struct device *dev) { return 0; }
static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
#endif
void software_node_notify(struct device *dev);
void software_node_notify_remove(struct device *dev);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common values for AES algorithms
*/
#ifndef _CRYPTO_AES_H
#define _CRYPTO_AES_H
#include <linux/types.h>
#include <linux/crypto.h>
#define AES_MIN_KEY_SIZE 16
#define AES_MAX_KEY_SIZE 32
#define AES_KEYSIZE_128 16
#define AES_KEYSIZE_192 24
#define AES_KEYSIZE_256 32
#define AES_BLOCK_SIZE 16
#define AES_MAX_KEYLENGTH (15 * 16)
#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
/*
* Please ensure that the first two fields are 16-byte aligned
* relative to the start of the structure, i.e., don't move them!
*/
struct crypto_aes_ctx {
u32 key_enc[AES_MAX_KEYLENGTH_U32];
u32 key_dec[AES_MAX_KEYLENGTH_U32];
u32 key_length;
};
extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
/*
* validate key length for AES algorithms
*/
static inline int aes_check_keylen(unsigned int keylen)
{
switch (keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_192:
case AES_KEYSIZE_256:
break;
default:
return -EINVAL;
}
return 0;
}
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len);
/**
* aes_expandkey - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
* @in_key: The supplied key.
* @key_len: The length of the supplied key.
*
* Returns 0 on success. The function fails only if an invalid key size (or
* pointer) is supplied.
* The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
* key schedule plus a 16 bytes key which is used before the first round).
* The decryption key is prepared for the "Equivalent Inverse Cipher" as
* described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
* for the initial combination, the second slot for the first round and so on.
*/
int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
/**
* aes_encrypt - Encrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the ciphertext
* @in: Buffer containing the plaintext
*/
void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
/**
* aes_decrypt - Decrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the plaintext
* @in: Buffer containing the ciphertext
*/
void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
#endif
// SPDX-License-Identifier: GPL-2.0-or-later
/* Task credentials management - see Documentation/security/credentials.rst
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define pr_fmt(fmt) "CRED: " fmt
#include <linux/export.h>
#include <linux/cred.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/coredump.h>
#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/init_task.h>
#include <linux/security.h>
#include <linux/binfmts.h>
#include <linux/cn_proc.h>
#include <linux/uidgid.h>
#if 0
#define kdebug(FMT, ...) \
printk("[%-5.5s%5u] " FMT "\n", \
current->comm, current->pid, ##__VA_ARGS__)
#else
#define kdebug(FMT, ...) \
do { \
if (0) \
no_printk("[%-5.5s%5u] " FMT "\n", \
current->comm, current->pid, ##__VA_ARGS__); \
} while (0)
#endif
static struct kmem_cache *cred_jar;
/* init to 2 - one for init_task, one to ensure it is never freed */
static struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
/*
* The initial credentials for the initial task
*/
struct cred init_cred = {
.usage = ATOMIC_INIT(4),
.uid = GLOBAL_ROOT_UID,
.gid = GLOBAL_ROOT_GID,
.suid = GLOBAL_ROOT_UID,
.sgid = GLOBAL_ROOT_GID,
.euid = GLOBAL_ROOT_UID,
.egid = GLOBAL_ROOT_GID,
.fsuid = GLOBAL_ROOT_UID,
.fsgid = GLOBAL_ROOT_GID,
.securebits = SECUREBITS_DEFAULT,
.cap_inheritable = CAP_EMPTY_SET,
.cap_permitted = CAP_FULL_SET,
.cap_effective = CAP_FULL_SET,
.cap_bset = CAP_FULL_SET,
.user = INIT_USER,
.user_ns = &init_user_ns,
.group_info = &init_groups,
.ucounts = &init_ucounts,
};
/*
* The RCU callback to actually dispose of a set of credentials
*/
static void put_cred_rcu(struct rcu_head *rcu)
{
struct cred *cred = container_of(rcu, struct cred, rcu);
kdebug("put_cred_rcu(%p)", cred);
if (atomic_long_read(&cred->usage) != 0)
panic("CRED: put_cred_rcu() sees %p with usage %ld\n",
cred, atomic_long_read(&cred->usage));
security_cred_free(cred);
key_put(cred->session_keyring);
key_put(cred->process_keyring);
key_put(cred->thread_keyring);
key_put(cred->request_key_auth);
if (cred->group_info)
put_group_info(cred->group_info);
free_uid(cred->user);
if (cred->ucounts)
put_ucounts(cred->ucounts);
put_user_ns(cred->user_ns);
kmem_cache_free(cred_jar, cred);
}
/**
* __put_cred - Destroy a set of credentials
* @cred: The record to release
*
* Destroy a set of credentials on which no references remain.
*/
void __put_cred(struct cred *cred)
{
kdebug("__put_cred(%p{%ld})", cred,
atomic_long_read(&cred->usage));
BUG_ON(atomic_long_read(&cred->usage) != 0);
BUG_ON(cred == current->cred);
BUG_ON(cred == current->real_cred);
if (cred->non_rcu)
put_cred_rcu(&cred->rcu);
else
call_rcu(&cred->rcu, put_cred_rcu);
}
EXPORT_SYMBOL(__put_cred);
/*
* Clean up a task's credentials when it exits
*/
void exit_creds(struct task_struct *tsk)
{
struct cred *real_cred, *cred;
kdebug("exit_creds(%u,%p,%p,{%ld})", tsk->pid, tsk->real_cred, tsk->cred,
atomic_long_read(&tsk->cred->usage));
real_cred = (struct cred *) tsk->real_cred;
tsk->real_cred = NULL;
cred = (struct cred *) tsk->cred;
tsk->cred = NULL;
if (real_cred == cred) {
put_cred_many(cred, 2);
} else {
put_cred(real_cred);
put_cred(cred);
}
#ifdef CONFIG_KEYS_REQUEST_CACHE
key_put(tsk->cached_requested_key);
tsk->cached_requested_key = NULL;
#endif
}
/**
* get_task_cred - Get another task's objective credentials
* @task: The task to query
*
* Get the objective credentials of a task, pinning them so that they can't go
* away. Accessing a task's credentials directly is not permitted.
*
* The caller must also make sure task doesn't get deleted, either by holding a
* ref on task or by holding tasklist_lock to prevent it from being unlinked.
*/
const struct cred *get_task_cred(struct task_struct *task)
{
const struct cred *cred;
rcu_read_lock();
do {
cred = __task_cred((task));
BUG_ON(!cred);
} while (!get_cred_rcu(cred));
rcu_read_unlock();
return cred;
}
EXPORT_SYMBOL(get_task_cred);
/*
* Allocate blank credentials, such that the credentials can be filled in at a
* later date without risk of ENOMEM.
*/
struct cred *cred_alloc_blank(void)
{
struct cred *new;
new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
atomic_long_set(&new->usage, 1);
if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
goto error;
return new;
error:
abort_creds(new);
return NULL;
}
/**
* prepare_creds - Prepare a new set of credentials for modification
*
* Prepare a new set of task credentials for modification. A task's creds
* shouldn't generally be modified directly, therefore this function is used to
* prepare a new copy, which the caller then modifies and then commits by
* calling commit_creds().
*
* Preparation involves making a copy of the objective creds for modification.
*
* Returns a pointer to the new creds-to-be if successful, NULL otherwise.
*
* Call commit_creds() or abort_creds() to clean up.
*/
struct cred *prepare_creds(void)
{
struct task_struct *task = current;
const struct cred *old;
struct cred *new;
new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
kdebug("prepare_creds() alloc %p", new);
old = task->cred;
memcpy(new, old, sizeof(struct cred));
new->non_rcu = 0;
atomic_long_set(&new->usage, 1); get_group_info(new->group_info); get_uid(new->user); get_user_ns(new->user_ns);
#ifdef CONFIG_KEYS
key_get(new->session_keyring); key_get(new->process_keyring); key_get(new->thread_keyring); key_get(new->request_key_auth);
#endif
#ifdef CONFIG_SECURITY
new->security = NULL;
#endif
new->ucounts = get_ucounts(new->ucounts); if (!new->ucounts)
goto error;
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error;
return new;
error:
abort_creds(new); return NULL;}
EXPORT_SYMBOL(prepare_creds);
/*
* Prepare credentials for current to perform an execve()
* - The caller must hold ->cred_guard_mutex
*/
struct cred *prepare_exec_creds(void)
{
struct cred *new;
new = prepare_creds();
if (!new)
return new;
#ifdef CONFIG_KEYS
/* newly exec'd tasks don't get a thread keyring */
key_put(new->thread_keyring);
new->thread_keyring = NULL;
/* inherit the session keyring; new process keyring */
key_put(new->process_keyring);
new->process_keyring = NULL;
#endif
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
return new;
}
/*
* Copy credentials for the new process created by fork()
*
* We share if we can, but under some circumstances we have to generate a new
* set.
*
* The new process gets the current process's subjective credentials as its
* objective and subjective credentials
*/
int copy_creds(struct task_struct *p, u64 clone_flags)
{
struct cred *new;
int ret;
#ifdef CONFIG_KEYS_REQUEST_CACHE
p->cached_requested_key = NULL;
#endif
if (
#ifdef CONFIG_KEYS
!p->cred->thread_keyring &&
#endif
clone_flags & CLONE_THREAD
) {
p->real_cred = get_cred_many(p->cred, 2);
kdebug("share_creds(%p{%ld})",
p->cred, atomic_long_read(&p->cred->usage));
inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); return 0;
}
new = prepare_creds();
if (!new)
return -ENOMEM;
if (clone_flags & CLONE_NEWUSER) {
ret = create_user_ns(new);
if (ret < 0) goto error_put;
ret = set_cred_ucounts(new);
if (ret < 0)
goto error_put;
}
#ifdef CONFIG_KEYS
/* new threads get their own thread keyrings if their parent already
* had one */
if (new->thread_keyring) {
key_put(new->thread_keyring);
new->thread_keyring = NULL;
if (clone_flags & CLONE_THREAD)
install_thread_keyring_to_cred(new);
}
/* The process keyring is only shared between the threads in a process;
* anything outside of those threads doesn't inherit.
*/
if (!(clone_flags & CLONE_THREAD)) { key_put(new->process_keyring);
new->process_keyring = NULL;
}
#endif
p->cred = p->real_cred = get_cred(new); inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
return 0;
error_put:
put_cred(new); return ret;
}
static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
{
const struct user_namespace *set_ns = set->user_ns;
const struct user_namespace *subset_ns = subset->user_ns;
/* If the two credentials are in the same user namespace see if
* the capabilities of subset are a subset of set.
*/
if (set_ns == subset_ns)
return cap_issubset(subset->cap_permitted, set->cap_permitted);
/* The credentials are in a different user namespaces
* therefore one is a subset of the other only if a set is an
* ancestor of subset and set->euid is owner of subset or one
* of subsets ancestors.
*/
for (;subset_ns != &init_user_ns; subset_ns = subset_ns->parent) { if ((set_ns == subset_ns->parent) &&
uid_eq(subset_ns->owner, set->euid))
return true;
}
return false;
}
/**
* commit_creds - Install new credentials upon the current task
* @new: The credentials to be assigned
*
* Install a new set of credentials to the current task, using RCU to replace
* the old set. Both the objective and the subjective credentials pointers are
* updated. This function may not be called if the subjective credentials are
* in an overridden state.
*
* This function eats the caller's reference to the new credentials.
*
* Always returns 0 thus allowing this function to be tail-called at the end
* of, say, sys_setgid().
*/
int commit_creds(struct cred *new)
{
struct task_struct *task = current;
const struct cred *old = task->real_cred;
kdebug("commit_creds(%p{%ld})", new,
atomic_long_read(&new->usage));
BUG_ON(task->cred != old); BUG_ON(atomic_long_read(&new->usage) < 1); get_cred(new); /* we will require a ref for the subj creds too */
/* dumpability changes */
if (!uid_eq(old->euid, new->euid) || !gid_eq(old->egid, new->egid) || !uid_eq(old->fsuid, new->fsuid) || !gid_eq(old->fsgid, new->fsgid) || !cred_cap_issubset(old, new)) { if (task->mm)
set_dumpable(task->mm, suid_dumpable);
task->pdeath_signal = 0;
/*
* If a task drops privileges and becomes nondumpable,
* the dumpability change must become visible before
* the credential change; otherwise, a __ptrace_may_access()
* racing with this change may be able to attach to a task it
* shouldn't be able to attach to (as if the task had dropped
* privileges without becoming nondumpable).
* Pairs with a read barrier in __ptrace_may_access().
*/
smp_wmb();
}
/* alter the thread keyring */
if (!uid_eq(new->fsuid, old->fsuid)) key_fsuid_changed(new); if (!gid_eq(new->fsgid, old->fsgid))
key_fsgid_changed(new);
/* do it
* RLIMIT_NPROC limits on user->processes have already been checked
* in set_user().
*/
if (new->user != old->user || new->user_ns != old->user_ns) inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1); rcu_assign_pointer(task->real_cred, new);
rcu_assign_pointer(task->cred, new);
if (new->user != old->user || new->user_ns != old->user_ns) dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
/* send notifications */
if (!uid_eq(new->uid, old->uid) || !uid_eq(new->euid, old->euid) || !uid_eq(new->suid, old->suid) ||
!uid_eq(new->fsuid, old->fsuid))
proc_id_connector(task, PROC_EVENT_UID); if (!gid_eq(new->gid, old->gid) || !gid_eq(new->egid, old->egid) || !gid_eq(new->sgid, old->sgid) ||
!gid_eq(new->fsgid, old->fsgid))
proc_id_connector(task, PROC_EVENT_GID);
/* release the old obj and subj refs both */
put_cred_many(old, 2);
return 0;
}
EXPORT_SYMBOL(commit_creds);
/**
* abort_creds - Discard a set of credentials and unlock the current task
* @new: The credentials that were going to be applied
*
* Discard a set of credentials that were under construction and unlock the
* current task.
*/
void abort_creds(struct cred *new)
{
kdebug("abort_creds(%p{%ld})", new,
atomic_long_read(&new->usage));
BUG_ON(atomic_long_read(&new->usage) < 1);
put_cred(new);
}
EXPORT_SYMBOL(abort_creds);
/**
* cred_fscmp - Compare two credentials with respect to filesystem access.
* @a: The first credential
* @b: The second credential
*
* cred_cmp() will return zero if both credentials have the same
* fsuid, fsgid, and supplementary groups. That is, if they will both
* provide the same access to files based on mode/uid/gid.
* If the credentials are different, then either -1 or 1 will
* be returned depending on whether @a comes before or after @b
* respectively in an arbitrary, but stable, ordering of credentials.
*
* Return: -1, 0, or 1 depending on comparison
*/
int cred_fscmp(const struct cred *a, const struct cred *b)
{
struct group_info *ga, *gb;
int g;
if (a == b)
return 0;
if (uid_lt(a->fsuid, b->fsuid))
return -1;
if (uid_gt(a->fsuid, b->fsuid))
return 1;
if (gid_lt(a->fsgid, b->fsgid))
return -1;
if (gid_gt(a->fsgid, b->fsgid))
return 1;
ga = a->group_info;
gb = b->group_info;
if (ga == gb)
return 0;
if (ga == NULL)
return -1;
if (gb == NULL)
return 1;
if (ga->ngroups < gb->ngroups)
return -1;
if (ga->ngroups > gb->ngroups)
return 1;
for (g = 0; g < ga->ngroups; g++) {
if (gid_lt(ga->gid[g], gb->gid[g]))
return -1;
if (gid_gt(ga->gid[g], gb->gid[g]))
return 1;
}
return 0;
}
EXPORT_SYMBOL(cred_fscmp);
int set_cred_ucounts(struct cred *new)
{
struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
/*
* This optimization is needed because alloc_ucounts() uses locks
* for table lookups.
*/
if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid)) return 0; if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid)))
return -EAGAIN;
new->ucounts = new_ucounts;
put_ucounts(old_ucounts);
return 0;
}
/*
* initialise the credentials stuff
*/
void __init cred_init(void)
{
/* allocate a slab in which we can store credentials */
cred_jar = KMEM_CACHE(cred,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
}
/**
* prepare_kernel_cred - Prepare a set of credentials for a kernel service
* @daemon: A userspace daemon to be used as a reference
*
* Prepare a set of credentials for a kernel service. This can then be used to
* override a task's own credentials so that work can be done on behalf of that
* task that requires a different subjective context.
*
* @daemon is used to provide a base cred, with the security data derived from
* that; if this is "&init_task", they'll be set to 0, no groups, full
* capabilities, and no keys.
*
* The caller may change these controls afterwards if desired.
*
* Returns the new credentials or NULL if out of memory.
*/
struct cred *prepare_kernel_cred(struct task_struct *daemon)
{
const struct cred *old;
struct cred *new;
if (WARN_ON_ONCE(!daemon))
return NULL;
new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
kdebug("prepare_kernel_cred() alloc %p", new);
old = get_task_cred(daemon);
*new = *old;
new->non_rcu = 0;
atomic_long_set(&new->usage, 1);
get_uid(new->user);
get_user_ns(new->user_ns);
get_group_info(new->group_info);
#ifdef CONFIG_KEYS
new->session_keyring = NULL;
new->process_keyring = NULL;
new->thread_keyring = NULL;
new->request_key_auth = NULL;
new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
#endif
#ifdef CONFIG_SECURITY
new->security = NULL;
#endif
new->ucounts = get_ucounts(new->ucounts);
if (!new->ucounts)
goto error;
if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
put_cred(old);
return new;
error:
put_cred(new);
put_cred(old);
return NULL;
}
EXPORT_SYMBOL(prepare_kernel_cred);
/**
* set_security_override - Set the security ID in a set of credentials
* @new: The credentials to alter
* @secid: The LSM security ID to set
*
* Set the LSM security ID in a set of credentials so that the subjective
* security is overridden when an alternative set of credentials is used.
*/
int set_security_override(struct cred *new, u32 secid)
{
return security_kernel_act_as(new, secid);
}
EXPORT_SYMBOL(set_security_override);
/**
* set_security_override_from_ctx - Set the security ID in a set of credentials
* @new: The credentials to alter
* @secctx: The LSM security context to generate the security ID from.
*
* Set the LSM security ID in a set of credentials so that the subjective
* security is overridden when an alternative set of credentials is used. The
* security ID is specified in string form as a security context to be
* interpreted by the LSM.
*/
int set_security_override_from_ctx(struct cred *new, const char *secctx)
{
u32 secid;
int ret;
ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
if (ret < 0)
return ret;
return set_security_override(new, secid);
}
EXPORT_SYMBOL(set_security_override_from_ctx);
/**
* set_create_files_as - Set the LSM file create context in a set of credentials
* @new: The credentials to alter
* @inode: The inode to take the context from
*
* Change the LSM file creation context in a set of credentials to be the same
* as the object context of the specified inode, so that the new inodes have
* the same MAC context as that inode.
*/
int set_create_files_as(struct cred *new, struct inode *inode)
{
if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
return -EINVAL;
new->fsuid = inode->i_uid;
new->fsgid = inode->i_gid;
return security_kernel_create_files_as(new, inode);
}
EXPORT_SYMBOL(set_create_files_as);
// SPDX-License-Identifier: GPL-2.0
/*
* kobject.c - library routines for handling generic kernel objects
*
* Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2007 Novell Inc.
*
* Please see the file Documentation/core-api/kobject.rst for critical information
* about using the kobject interface.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/random.h>
/**
* kobject_namespace() - Return @kobj's namespace tag.
* @kobj: kobject in question
*
* Returns namespace tag of @kobj if its parent has namespace ops enabled
* and thus @kobj should have a namespace tag associated with it. Returns
* %NULL otherwise.
*/
const void *kobject_namespace(const struct kobject *kobj)
{
const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj);
if (!ns_ops || ns_ops->type == KOBJ_NS_TYPE_NONE) return NULL; return kobj->ktype->namespace(kobj);}
/**
* kobject_get_ownership() - Get sysfs ownership data for @kobj.
* @kobj: kobject in question
* @uid: kernel user ID for sysfs objects
* @gid: kernel group ID for sysfs objects
*
* Returns initial uid/gid pair that should be used when creating sysfs
* representation of given kobject. Normally used to adjust ownership of
* objects in a container.
*/
void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
*uid = GLOBAL_ROOT_UID;
*gid = GLOBAL_ROOT_GID;
if (kobj->ktype->get_ownership)
kobj->ktype->get_ownership(kobj, uid, gid);
}
static bool kobj_ns_type_is_valid(enum kobj_ns_type type)
{
if ((type <= KOBJ_NS_TYPE_NONE) || (type >= KOBJ_NS_TYPES))
return false;
return true;
}
static int create_dir(struct kobject *kobj)
{
const struct kobj_type *ktype = get_ktype(kobj);
const struct kobj_ns_type_operations *ops;
int error;
error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
if (error)
return error;
if (ktype) {
error = sysfs_create_groups(kobj, ktype->default_groups);
if (error) {
sysfs_remove_dir(kobj);
return error;
}
}
/*
* @kobj->sd may be deleted by an ancestor going away. Hold an
* extra reference so that it stays until @kobj is gone.
*/
sysfs_get(kobj->sd);
/*
* If @kobj has ns_ops, its children need to be filtered based on
* their namespace tags. Enable namespace support on @kobj->sd.
*/
ops = kobj_child_ns_ops(kobj);
if (ops) {
BUG_ON(!kobj_ns_type_is_valid(ops->type)); BUG_ON(!kobj_ns_type_registered(ops->type)); sysfs_enable_ns(kobj->sd);
}
return 0;
}
static int get_kobj_path_length(const struct kobject *kobj)
{
int length = 1;
const struct kobject *parent = kobj;
/* walk up the ancestors until we hit the one pointing to the
* root.
* Add 1 to strlen for leading '/' of each level.
*/
do {
if (kobject_name(parent) == NULL)
return 0;
length += strlen(kobject_name(parent)) + 1;
parent = parent->parent;
} while (parent);
return length;
}
static int fill_kobj_path(const struct kobject *kobj, char *path, int length)
{
const struct kobject *parent;
--length;
for (parent = kobj; parent; parent = parent->parent) { int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
if (length <= 0)
return -EINVAL;
memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
pr_debug("'%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
kobj, __func__, path);
return 0;
}
/**
* kobject_get_path() - Allocate memory and fill in the path for @kobj.
* @kobj: kobject in question, with which to build the path
* @gfp_mask: the allocation type used to allocate the path
*
* Return: The newly allocated memory, caller must free with kfree().
*/
char *kobject_get_path(const struct kobject *kobj, gfp_t gfp_mask)
{
char *path;
int len;
retry:
len = get_kobj_path_length(kobj); if (len == 0) return NULL;
path = kzalloc(len, gfp_mask);
if (!path)
return NULL;
if (fill_kobj_path(kobj, path, len)) {
kfree(path);
goto retry;
}
return path;
}
EXPORT_SYMBOL_GPL(kobject_get_path);
/* add the kobject to its kset's list */
static void kobj_kset_join(struct kobject *kobj)
{
if (!kobj->kset)
return;
kset_get(kobj->kset);
spin_lock(&kobj->kset->list_lock);
list_add_tail(&kobj->entry, &kobj->kset->list);
spin_unlock(&kobj->kset->list_lock);
}
/* remove the kobject from its kset's list */
static void kobj_kset_leave(struct kobject *kobj)
{
if (!kobj->kset)
return;
spin_lock(&kobj->kset->list_lock);
list_del_init(&kobj->entry);
spin_unlock(&kobj->kset->list_lock);
kset_put(kobj->kset);
}
static void kobject_init_internal(struct kobject *kobj)
{
if (!kobj)
return;
kref_init(&kobj->kref);
INIT_LIST_HEAD(&kobj->entry);
kobj->state_in_sysfs = 0;
kobj->state_add_uevent_sent = 0;
kobj->state_remove_uevent_sent = 0;
kobj->state_initialized = 1;}
static int kobject_add_internal(struct kobject *kobj)
{
int error = 0;
struct kobject *parent;
if (!kobj) return -ENOENT; if (!kobj->name || !kobj->name[0]) { WARN(1,
"kobject: (%p): attempted to be registered with empty name!\n",
kobj);
return -EINVAL;
}
parent = kobject_get(kobj->parent);
/* join kset if set, use it as parent if we do not already have one */
if (kobj->kset) { if (!parent)
parent = kobject_get(&kobj->kset->kobj);
kobj_kset_join(kobj);
kobj->parent = parent;
}
pr_debug("'%s' (%p): %s: parent: '%s', set: '%s'\n",
kobject_name(kobj), kobj, __func__,
parent ? kobject_name(parent) : "<NULL>",
kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
error = create_dir(kobj); if (error) {
kobj_kset_leave(kobj);
kobject_put(parent);
kobj->parent = NULL;
/* be noisy on error issues */
if (error == -EEXIST)
pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
__func__, kobject_name(kobj));
else
pr_err("%s failed for %s (error: %d parent: %s)\n",
__func__, kobject_name(kobj), error,
parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
return error;
}
/**
* kobject_set_name_vargs() - Set the name of a kobject.
* @kobj: struct kobject to set the name of
* @fmt: format string used to build the name
* @vargs: vargs to format the string.
*/
int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
const char *s;
if (kobj->name && !fmt) return 0; s = kvasprintf_const(GFP_KERNEL, fmt, vargs);
if (!s)
return -ENOMEM;
/*
* ewww... some of these buggers have '/' in the name ... If
* that's the case, we need to make sure we have an actual
* allocated copy to modify, since kvasprintf_const may have
* returned something from .rodata.
*/
if (strchr(s, '/')) {
char *t;
t = kstrdup(s, GFP_KERNEL);
kfree_const(s);
if (!t)
return -ENOMEM;
s = strreplace(t, '/', '!');
}
kfree_const(kobj->name);
kobj->name = s;
return 0;
}
/**
* kobject_set_name() - Set the name of a kobject.
* @kobj: struct kobject to set the name of
* @fmt: format string used to build the name
*
* This sets the name of the kobject. If you have already added the
* kobject to the system, you must call kobject_rename() in order to
* change the name of the kobject.
*/
int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
{
va_list vargs;
int retval;
va_start(vargs, fmt);
retval = kobject_set_name_vargs(kobj, fmt, vargs);
va_end(vargs);
return retval;
}
EXPORT_SYMBOL(kobject_set_name);
/**
* kobject_init() - Initialize a kobject structure.
* @kobj: pointer to the kobject to initialize
* @ktype: pointer to the ktype for this kobject.
*
* This function will properly initialize a kobject such that it can then
* be passed to the kobject_add() call.
*
* After this function is called, the kobject MUST be cleaned up by a call
* to kobject_put(), not by a call to kfree directly to ensure that all of
* the memory is cleaned up properly.
*/
void kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
{
char *err_str;
if (!kobj) {
err_str = "invalid kobject pointer!";
goto error;
}
if (!ktype) {
err_str = "must have a ktype to be initialized properly!\n";
goto error;
}
if (kobj->state_initialized) {
/* do not error out as sometimes we can recover */
pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
kobj);
dump_stack_lvl(KERN_ERR);
}
kobject_init_internal(kobj);
kobj->ktype = ktype;
return;
error:
pr_err("kobject (%p): %s\n", kobj, err_str); dump_stack_lvl(KERN_ERR);}
EXPORT_SYMBOL(kobject_init);
static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
struct kobject *parent,
const char *fmt, va_list vargs)
{
int retval;
retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
pr_err("can not set name properly!\n");
return retval;
}
kobj->parent = parent;
return kobject_add_internal(kobj);}
/**
* kobject_add() - The main kobject add function.
* @kobj: the kobject to add
* @parent: pointer to the parent of the kobject.
* @fmt: format to name the kobject with.
*
* The kobject name is set and added to the kobject hierarchy in this
* function.
*
* If @parent is set, then the parent of the @kobj will be set to it.
* If @parent is NULL, then the parent of the @kobj will be set to the
* kobject associated with the kset assigned to this kobject. If no kset
* is assigned to the kobject, then the kobject will be located in the
* root of the sysfs tree.
*
* Note, no "add" uevent will be created with this call, the caller should set
* up all of the necessary sysfs files for the object and then call
* kobject_uevent() with the UEVENT_ADD parameter to ensure that
* userspace is properly notified of this kobject's creation.
*
* Return: If this function returns an error, kobject_put() must be
* called to properly clean up the memory associated with the
* object. Under no instance should the kobject that is passed
* to this function be directly freed with a call to kfree(),
* that can leak memory.
*
* If this function returns success, kobject_put() must also be called
* in order to properly clean up the memory associated with the object.
*
* In short, once this function is called, kobject_put() MUST be called
* when the use of the object is finished in order to properly free
* everything.
*/
int kobject_add(struct kobject *kobj, struct kobject *parent,
const char *fmt, ...)
{
va_list args;
int retval;
if (!kobj) return -EINVAL; if (!kobj->state_initialized) {
pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
kobject_name(kobj), kobj);
dump_stack_lvl(KERN_ERR);
return -EINVAL;
}
va_start(args, fmt);
retval = kobject_add_varg(kobj, parent, fmt, args);
va_end(args);
return retval;
}
EXPORT_SYMBOL(kobject_add);
/**
* kobject_init_and_add() - Initialize a kobject structure and add it to
* the kobject hierarchy.
* @kobj: pointer to the kobject to initialize
* @ktype: pointer to the ktype for this kobject.
* @parent: pointer to the parent of this kobject.
* @fmt: the name of the kobject.
*
* This function combines the call to kobject_init() and kobject_add().
*
* If this function returns an error, kobject_put() must be called to
* properly clean up the memory associated with the object. This is the
* same type of error handling after a call to kobject_add() and kobject
* lifetime rules are the same here.
*/
int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
struct kobject *parent, const char *fmt, ...)
{
va_list args;
int retval;
kobject_init(kobj, ktype);
va_start(args, fmt);
retval = kobject_add_varg(kobj, parent, fmt, args);
va_end(args);
return retval;
}
EXPORT_SYMBOL_GPL(kobject_init_and_add);
/**
* kobject_rename() - Change the name of an object.
* @kobj: object in question.
* @new_name: object's new name
*
* It is the responsibility of the caller to provide mutual
* exclusion between two different calls of kobject_rename
* on the same kobject and to ensure that new_name is valid and
* won't conflict with other kobjects.
*/
int kobject_rename(struct kobject *kobj, const char *new_name)
{
int error = 0;
const char *devpath = NULL;
const char *dup_name = NULL, *name;
char *devpath_string = NULL;
char *envp[2];
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
if (!kobj->parent) {
kobject_put(kobj);
return -EINVAL;
}
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
error = -ENOMEM;
goto out;
}
devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
if (!devpath_string) {
error = -ENOMEM;
goto out;
}
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
name = dup_name = kstrdup_const(new_name, GFP_KERNEL);
if (!name) {
error = -ENOMEM;
goto out;
}
error = sysfs_rename_dir_ns(kobj, new_name, kobject_namespace(kobj));
if (error)
goto out;
/* Install the new kobject name */
dup_name = kobj->name;
kobj->name = name;
/* This function is mostly/only used for network interface.
* Some hotplug package track interfaces by their name and
* therefore want to know when the name is changed by the user. */
kobject_uevent_env(kobj, KOBJ_MOVE, envp);
out:
kfree_const(dup_name);
kfree(devpath_string);
kfree(devpath);
kobject_put(kobj);
return error;
}
EXPORT_SYMBOL_GPL(kobject_rename);
/**
* kobject_move() - Move object to another parent.
* @kobj: object in question.
* @new_parent: object's new parent (can be NULL)
*/
int kobject_move(struct kobject *kobj, struct kobject *new_parent)
{
int error;
struct kobject *old_parent;
const char *devpath = NULL;
char *devpath_string = NULL;
char *envp[2];
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
new_parent = kobject_get(new_parent);
if (!new_parent) {
if (kobj->kset)
new_parent = kobject_get(&kobj->kset->kobj);
}
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
error = -ENOMEM;
goto out;
}
devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
if (!devpath_string) {
error = -ENOMEM;
goto out;
}
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
error = sysfs_move_dir_ns(kobj, new_parent, kobject_namespace(kobj));
if (error)
goto out;
old_parent = kobj->parent;
kobj->parent = new_parent;
new_parent = NULL;
kobject_put(old_parent);
kobject_uevent_env(kobj, KOBJ_MOVE, envp);
out:
kobject_put(new_parent);
kobject_put(kobj);
kfree(devpath_string);
kfree(devpath);
return error;
}
EXPORT_SYMBOL_GPL(kobject_move);
static void __kobject_del(struct kobject *kobj)
{
struct kernfs_node *sd;
const struct kobj_type *ktype;
sd = kobj->sd;
ktype = get_ktype(kobj);
if (ktype)
sysfs_remove_groups(kobj, ktype->default_groups);
/* send "remove" if the caller did not do it but sent "add" */
if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
pr_debug("'%s' (%p): auto cleanup 'remove' event\n",
kobject_name(kobj), kobj);
kobject_uevent(kobj, KOBJ_REMOVE);
}
sysfs_remove_dir(kobj);
sysfs_put(sd);
kobj->state_in_sysfs = 0;
kobj_kset_leave(kobj);
kobj->parent = NULL;
}
/**
* kobject_del() - Unlink kobject from hierarchy.
* @kobj: object.
*
* This is the function that should be called to delete an object
* successfully added via kobject_add().
*/
void kobject_del(struct kobject *kobj)
{
struct kobject *parent;
if (!kobj)
return;
parent = kobj->parent;
__kobject_del(kobj);
kobject_put(parent);
}
EXPORT_SYMBOL(kobject_del);
/**
* kobject_get() - Increment refcount for object.
* @kobj: object.
*/
struct kobject *kobject_get(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
WARN(1, KERN_WARNING
"kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n",
kobject_name(kobj), kobj);
kref_get(&kobj->kref);
}
return kobj;}
EXPORT_SYMBOL(kobject_get);
struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
{
if (!kobj)
return NULL;
if (!kref_get_unless_zero(&kobj->kref))
kobj = NULL;
return kobj;
}
EXPORT_SYMBOL(kobject_get_unless_zero);
/*
* kobject_cleanup - free kobject resources.
* @kobj: object to cleanup
*/
static void kobject_cleanup(struct kobject *kobj)
{
struct kobject *parent = kobj->parent;
const struct kobj_type *t = get_ktype(kobj);
const char *name = kobj->name;
pr_debug("'%s' (%p): %s, parent %p\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
kobject_name(kobj), kobj);
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("'%s' (%p): auto cleanup kobject_del\n",
kobject_name(kobj), kobj);
__kobject_del(kobj);
} else {
/* avoid dropping the parent reference unnecessarily */
parent = NULL;
}
if (t && t->release) {
pr_debug("'%s' (%p): calling ktype release\n",
kobject_name(kobj), kobj);
t->release(kobj);
}
/* free name if we allocated it */
if (name) {
pr_debug("'%s': free name\n", name);
kfree_const(name);
}
kobject_put(parent);
}
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
static void kobject_delayed_cleanup(struct work_struct *work)
{
kobject_cleanup(container_of(to_delayed_work(work),
struct kobject, release));
}
#endif
static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
unsigned long delay = HZ + HZ * get_random_u32_below(4);
pr_info("'%s' (%p): %s, parent %p (delayed %ld)\n",
kobject_name(kobj), kobj, __func__, kobj->parent, delay);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
schedule_delayed_work(&kobj->release, delay);
#else
kobject_cleanup(kobj);
#endif
}
/**
* kobject_put() - Decrement refcount for object.
* @kobj: object.
*
* Decrement the refcount, and if 0, call kobject_cleanup().
*/
void kobject_put(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
WARN(1, KERN_WARNING
"kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n",
kobject_name(kobj), kobj);
kref_put(&kobj->kref, kobject_release);
}
}
EXPORT_SYMBOL(kobject_put);
static void dynamic_kobj_release(struct kobject *kobj)
{
pr_debug("(%p): %s\n", kobj, __func__);
kfree(kobj);
}
static const struct kobj_type dynamic_kobj_ktype = {
.release = dynamic_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
/**
* kobject_create() - Create a struct kobject dynamically.
*
* This function creates a kobject structure dynamically and sets it up
* to be a "dynamic" kobject with a default release function set up.
*
* If the kobject was not able to be created, NULL will be returned.
* The kobject structure returned from here must be cleaned up with a
* call to kobject_put() and not kfree(), as kobject_init() has
* already been called on this structure.
*/
static struct kobject *kobject_create(void)
{
struct kobject *kobj;
kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
if (!kobj)
return NULL;
kobject_init(kobj, &dynamic_kobj_ktype);
return kobj;
}
/**
* kobject_create_and_add() - Create a struct kobject dynamically and
* register it with sysfs.
* @name: the name for the kobject
* @parent: the parent kobject of this kobject, if any.
*
* This function creates a kobject structure dynamically and registers it
* with sysfs. When you are finished with this structure, call
* kobject_put() and the structure will be dynamically freed when
* it is no longer being used.
*
* If the kobject was not able to be created, NULL will be returned.
*/
struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
{
struct kobject *kobj;
int retval;
kobj = kobject_create();
if (!kobj)
return NULL;
retval = kobject_add(kobj, parent, "%s", name);
if (retval) {
pr_warn("%s: kobject_add error: %d\n", __func__, retval);
kobject_put(kobj);
kobj = NULL;
}
return kobj;
}
EXPORT_SYMBOL_GPL(kobject_create_and_add);
/**
* kset_init() - Initialize a kset for use.
* @k: kset
*/
void kset_init(struct kset *k)
{
kobject_init_internal(&k->kobj);
INIT_LIST_HEAD(&k->list);
spin_lock_init(&k->list_lock);
}
/* default kobject attribute operations */
static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->show)
ret = kattr->show(kobj, kattr, buf);
return ret;
}
static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->store)
ret = kattr->store(kobj, kattr, buf, count);
return ret;
}
const struct sysfs_ops kobj_sysfs_ops = {
.show = kobj_attr_show,
.store = kobj_attr_store,
};
EXPORT_SYMBOL_GPL(kobj_sysfs_ops);
/**
* kset_register() - Initialize and add a kset.
* @k: kset.
*
* NOTE: On error, the kset.kobj.name allocated by() kobj_set_name()
* is freed, it can not be used any more.
*/
int kset_register(struct kset *k)
{
int err;
if (!k)
return -EINVAL;
if (!k->kobj.ktype) {
pr_err("must have a ktype to be initialized properly!\n");
return -EINVAL;
}
kset_init(k);
err = kobject_add_internal(&k->kobj);
if (err) {
kfree_const(k->kobj.name);
/* Set it to NULL to avoid accessing bad pointer in callers. */
k->kobj.name = NULL;
return err;
}
kobject_uevent(&k->kobj, KOBJ_ADD);
return 0;}
EXPORT_SYMBOL(kset_register);
/**
* kset_unregister() - Remove a kset.
* @k: kset.
*/
void kset_unregister(struct kset *k)
{
if (!k)
return;
kobject_del(&k->kobj);
kobject_put(&k->kobj);
}
EXPORT_SYMBOL(kset_unregister);
/**
* kset_find_obj() - Search for object in kset.
* @kset: kset we're looking in.
* @name: object's name.
*
* Lock kset via @kset->subsys, and iterate over @kset->list,
* looking for a matching kobject. If matching object is found
* take a reference and return the object.
*/
struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
struct kobject *k;
struct kobject *ret = NULL;
spin_lock(&kset->list_lock);
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
ret = kobject_get_unless_zero(k);
break;
}
}
spin_unlock(&kset->list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(kset_find_obj);
static void kset_release(struct kobject *kobj)
{
struct kset *kset = container_of(kobj, struct kset, kobj);
pr_debug("'%s' (%p): %s\n",
kobject_name(kobj), kobj, __func__);
kfree(kset);
}
static void kset_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
if (kobj->parent)
kobject_get_ownership(kobj->parent, uid, gid);
}
static const struct kobj_type kset_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = kset_release,
.get_ownership = kset_get_ownership,
};
/**
* kset_create() - Create a struct kset dynamically.
*
* @name: the name for the kset
* @uevent_ops: a struct kset_uevent_ops for the kset
* @parent_kobj: the parent kobject of this kset, if any.
*
* This function creates a kset structure dynamically. This structure can
* then be registered with the system and show up in sysfs with a call to
* kset_register(). When you are finished with this structure, if
* kset_register() has been called, call kset_unregister() and the
* structure will be dynamically freed when it is no longer being used.
*
* If the kset was not able to be created, NULL will be returned.
*/
static struct kset *kset_create(const char *name,
const struct kset_uevent_ops *uevent_ops,
struct kobject *parent_kobj)
{
struct kset *kset;
int retval;
kset = kzalloc(sizeof(*kset), GFP_KERNEL);
if (!kset)
return NULL;
retval = kobject_set_name(&kset->kobj, "%s", name);
if (retval) {
kfree(kset);
return NULL;
}
kset->uevent_ops = uevent_ops; kset->kobj.parent = parent_kobj;
/*
* The kobject of this kset will have a type of kset_ktype and belong to
* no kset itself. That way we can properly free it when it is
* finished being used.
*/
kset->kobj.ktype = &kset_ktype;
kset->kobj.kset = NULL;
return kset;
}
/**
* kset_create_and_add() - Create a struct kset dynamically and add it to sysfs.
*
* @name: the name for the kset
* @uevent_ops: a struct kset_uevent_ops for the kset
* @parent_kobj: the parent kobject of this kset, if any.
*
* This function creates a kset structure dynamically and registers it
* with sysfs. When you are finished with this structure, call
* kset_unregister() and the structure will be dynamically freed when it
* is no longer being used.
*
* If the kset was not able to be created, NULL will be returned.
*/
struct kset *kset_create_and_add(const char *name,
const struct kset_uevent_ops *uevent_ops,
struct kobject *parent_kobj)
{
struct kset *kset;
int error;
kset = kset_create(name, uevent_ops, parent_kobj); if (!kset) return NULL;
error = kset_register(kset);
if (error) {
kfree(kset);
return NULL;
}
return kset;
}
EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
{
enum kobj_ns_type type = ops->type;
int error;
spin_lock(&kobj_ns_type_lock);
error = -EINVAL;
if (!kobj_ns_type_is_valid(type))
goto out;
error = -EBUSY;
if (kobj_ns_ops_tbl[type])
goto out;
error = 0;
kobj_ns_ops_tbl[type] = ops;
out:
spin_unlock(&kobj_ns_type_lock);
return error;
}
int kobj_ns_type_registered(enum kobj_ns_type type)
{
int registered = 0;
spin_lock(&kobj_ns_type_lock);
if (kobj_ns_type_is_valid(type))
registered = kobj_ns_ops_tbl[type] != NULL;
spin_unlock(&kobj_ns_type_lock);
return registered;
}
const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent)
{
const struct kobj_ns_type_operations *ops = NULL; if (parent && parent->ktype && parent->ktype->child_ns_type)
ops = parent->ktype->child_ns_type(parent);
return ops;}
const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj)
{ return kobj_child_ns_ops(kobj->parent);}
bool kobj_ns_current_may_mount(enum kobj_ns_type type)
{
bool may_mount = true;
spin_lock(&kobj_ns_type_lock);
if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
may_mount = kobj_ns_ops_tbl[type]->current_may_mount();
spin_unlock(&kobj_ns_type_lock);
return may_mount;
}
void *kobj_ns_grab_current(enum kobj_ns_type type)
{
void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->grab_current_ns();
spin_unlock(&kobj_ns_type_lock);
return ns;
}
EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
spin_lock(&kobj_ns_type_lock);
if (kobj_ns_type_is_valid(type) &&
kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
kobj_ns_ops_tbl[type]->drop_ns(ns);
spin_unlock(&kobj_ns_type_lock);
}
EXPORT_SYMBOL_GPL(kobj_ns_drop);
// SPDX-License-Identifier: GPL-2.0
/*
* linux/ipc/namespace.c
* Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
*/
#include <linux/ipc.h>
#include <linux/msg.h>
#include <linux/ipc_namespace.h>
#include <linux/rcupdate.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/user_namespace.h>
#include <linux/proc_ns.h>
#include <linux/nstree.h>
#include <linux/sched/task.h>
#include "util.h"
/*
* The work queue is used to avoid the cost of synchronize_rcu in kern_unmount.
*/
static void free_ipc(struct work_struct *unused);
static DECLARE_WORK(free_ipc_work, free_ipc);
static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns)
{
return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES);
}
static void dec_ipc_namespaces(struct ucounts *ucounts)
{
dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES);
}
static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
struct ipc_namespace *old_ns)
{
struct ipc_namespace *ns;
struct ucounts *ucounts;
int err;
err = -ENOSPC;
again:
ucounts = inc_ipc_namespaces(user_ns); if (!ucounts) {
/*
* IPC namespaces are freed asynchronously, by free_ipc_work.
* If frees were pending, flush_work will wait, and
* return true. Fail the allocation if no frees are pending.
*/
if (flush_work(&free_ipc_work))
goto again; goto fail;
}
err = -ENOMEM;
ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT);
if (ns == NULL)
goto fail_dec; err = ns_common_init(ns);
if (err)
goto fail_free; ns->user_ns = get_user_ns(user_ns);
ns->ucounts = ucounts;
err = mq_init_ns(ns);
if (err)
goto fail_put;
err = -ENOMEM;
if (!setup_mq_sysctls(ns))
goto fail_put;
if (!setup_ipc_sysctls(ns))
goto fail_mq;
err = msg_init_ns(ns);
if (err)
goto fail_ipc;
sem_init_ns(ns);
shm_init_ns(ns);
ns_tree_add(ns);
return ns;
fail_ipc:
retire_ipc_sysctls(ns);
fail_mq:
retire_mq_sysctls(ns);
fail_put:
put_user_ns(ns->user_ns); ns_common_free(ns);
fail_free:
kfree(ns);
fail_dec:
dec_ipc_namespaces(ucounts);
fail:
return ERR_PTR(err);
}
struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns)
{
if (!(flags & CLONE_NEWIPC)) return get_ipc_ns(ns); return create_ipc_ns(user_ns, ns);}
/*
* free_ipcs - free all ipcs of one type
* @ns: the namespace to remove the ipcs from
* @ids: the table of ipcs to free
* @free: the function called to free each individual ipc
*
* Called for each kind of ipc when an ipc_namespace exits.
*/
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
{
struct kern_ipc_perm *perm;
int next_id;
int total, in_use;
down_write(&ids->rwsem);
in_use = ids->in_use;
for (total = 0, next_id = 0; total < in_use; next_id++) {
perm = idr_find(&ids->ipcs_idr, next_id);
if (perm == NULL)
continue;
rcu_read_lock();
ipc_lock_object(perm);
free(ns, perm);
total++;
}
up_write(&ids->rwsem);
}
static void free_ipc_ns(struct ipc_namespace *ns)
{
/*
* Caller needs to wait for an RCU grace period to have passed
* after making the mount point inaccessible to new accesses.
*/
mntput(ns->mq_mnt);
sem_exit_ns(ns);
msg_exit_ns(ns);
shm_exit_ns(ns);
retire_mq_sysctls(ns);
retire_ipc_sysctls(ns);
dec_ipc_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
ns_common_free(ns);
kfree(ns);
}
static LLIST_HEAD(free_ipc_list);
static void free_ipc(struct work_struct *unused)
{
struct llist_node *node = llist_del_all(&free_ipc_list);
struct ipc_namespace *n, *t;
llist_for_each_entry_safe(n, t, node, mnt_llist)
mnt_make_shortterm(n->mq_mnt);
/* Wait for any last users to have gone away. */
synchronize_rcu();
llist_for_each_entry_safe(n, t, node, mnt_llist)
free_ipc_ns(n);
}
/*
* put_ipc_ns - drop a reference to an ipc namespace.
* @ns: the namespace to put
*
* If this is the last task in the namespace exiting, and
* it is dropping the refcount to 0, then it can race with
* a task in another ipc namespace but in a mounts namespace
* which has this ipcns's mqueuefs mounted, doing some action
* with one of the mqueuefs files. That can raise the refcount.
* So dropping the refcount, and raising the refcount when
* accessing it through the VFS, are protected with mq_lock.
*
* (Clearly, a task raising the refcount on its own ipc_ns
* needn't take mq_lock since it can't race with the last task
* in the ipcns exiting).
*/
void put_ipc_ns(struct ipc_namespace *ns)
{
if (ns_ref_put_and_lock(ns, &mq_lock)) {
mq_clear_sbinfo(ns);
spin_unlock(&mq_lock);
ns_tree_remove(ns);
if (llist_add(&ns->mnt_llist, &free_ipc_list))
schedule_work(&free_ipc_work);
}
}
static struct ns_common *ipcns_get(struct task_struct *task)
{
struct ipc_namespace *ns = NULL;
struct nsproxy *nsproxy;
task_lock(task);
nsproxy = task->nsproxy;
if (nsproxy)
ns = get_ipc_ns(nsproxy->ipc_ns);
task_unlock(task);
return ns ? &ns->ns : NULL;
}
static void ipcns_put(struct ns_common *ns)
{
return put_ipc_ns(to_ipc_ns(ns));
}
static int ipcns_install(struct nsset *nsset, struct ns_common *new)
{
struct nsproxy *nsproxy = nsset->nsproxy;
struct ipc_namespace *ns = to_ipc_ns(new);
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
return -EPERM;
put_ipc_ns(nsproxy->ipc_ns);
nsproxy->ipc_ns = get_ipc_ns(ns);
return 0;
}
static struct user_namespace *ipcns_owner(struct ns_common *ns)
{
return to_ipc_ns(ns)->user_ns;
}
const struct proc_ns_operations ipcns_operations = {
.name = "ipc",
.get = ipcns_get,
.put = ipcns_put,
.install = ipcns_install,
.owner = ipcns_owner,
};
// SPDX-License-Identifier: GPL-2.0
/*
* x86 single-step support code, common to 32-bit and 64-bit.
*/
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
unsigned long addr, seg;
addr = regs->ip;
seg = regs->cs;
if (v8086_mode(regs)) {
addr = (addr & 0xffff) + (seg << 4);
return addr;
}
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/*
* We'll assume that the code segments in the GDT
* are all zero-based. That is largely true: the
* TLS segments are used for data, and the PNPBIOS
* and APM bios ones we just ignore here.
*/
if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
struct desc_struct *desc;
unsigned long base;
seg >>= 3;
mutex_lock(&child->mm->context.lock);
if (unlikely(!child->mm->context.ldt ||
seg >= child->mm->context.ldt->nr_entries))
addr = -1L; /* bogus selector, access would fault */
else {
desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc);
/* 16-bit code segment? */
if (!desc->d)
addr &= 0xffff;
addr += base;
}
mutex_unlock(&child->mm->context.lock);
}
#endif
return addr;
}
static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
{
int i, copied;
unsigned char opcode[15];
unsigned long addr = convert_ip_to_linear(child, regs);
copied = access_process_vm(child, addr, opcode, sizeof(opcode),
FOLL_FORCE);
for (i = 0; i < copied; i++) {
switch (opcode[i]) {
/* popf and iret */
case 0x9d: case 0xcf:
return 1;
/* CHECKME: 64 65 */
/* opcode and address size prefixes */
case 0x66: case 0x67:
continue;
/* irrelevant prefixes (segment overrides and repeats) */
case 0x26: case 0x2e:
case 0x36: case 0x3e:
case 0x64: case 0x65:
case 0xf0: case 0xf2: case 0xf3:
continue;
#ifdef CONFIG_X86_64
case 0x40 ... 0x4f:
if (!user_64bit_mode(regs))
/* 32-bit mode: register increment */
return 0;
/* 64-bit mode: REX prefix */
continue;
#endif
/* CHECKME: f2, f3 */
/*
* pushf: NOTE! We should probably not let
* the user see the TF bit being set. But
* it's more pain than it's worth to avoid
* it, and a debugger could emulate this
* all in user space if it _really_ cares.
*/
case 0x9c:
default:
return 0;
}
}
return 0;
}
/*
* Enable single-stepping. Return nonzero if user mode is not using TF itself.
*/
static int enable_single_step(struct task_struct *child)
{
struct pt_regs *regs = task_pt_regs(child);
unsigned long oflags;
/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
* If user-mode had set TF itself, then it's still clear from
* do_debug() and we need to set it again to restore the user
* state so we don't wrongly set TIF_FORCED_TF below.
* If enable_single_step() was used last and that is what
* set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
* already set and our bookkeeping is fine.
*/
if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
regs->flags |= X86_EFLAGS_TF;
/*
* Always set TIF_SINGLESTEP. This will also
* cause us to set TF when returning to user mode.
*/
set_tsk_thread_flag(child, TIF_SINGLESTEP);
/*
* Ensure that a trap is triggered once stepping out of a system
* call prior to executing any user instruction.
*/
set_task_syscall_work(child, SYSCALL_EXIT_TRAP);
oflags = regs->flags;
/* Set TF on the kernel stack.. */
regs->flags |= X86_EFLAGS_TF;
/*
* ..but if TF is changed by the instruction we will trace,
* don't mark it as being "us" that set it, so that we
* won't clear it by hand later.
*
* Note that if we don't actually execute the popf because
* of a signal arriving right now or suchlike, we will lose
* track of the fact that it really was "us" that set it.
*/
if (is_setting_trap_flag(child, regs)) {
clear_tsk_thread_flag(child, TIF_FORCED_TF);
return 0;
}
/*
* If TF was already set, check whether it was us who set it.
* If not, we should never attempt a block step.
*/
if (oflags & X86_EFLAGS_TF)
return test_tsk_thread_flag(child, TIF_FORCED_TF);
set_tsk_thread_flag(child, TIF_FORCED_TF);
return 1;
}
void set_task_blockstep(struct task_struct *task, bool on)
{
unsigned long debugctl;
/*
* Ensure irq/preemption can't change debugctl in between.
* Note also that both TIF_BLOCKSTEP and debugctl should
* be changed atomically wrt preemption.
*
* NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
* task is current or it can't be running, otherwise we can race
* with __switch_to_xtra(). We rely on ptrace_freeze_traced().
*/
local_irq_disable();
debugctl = get_debugctlmsr();
if (on) {
debugctl |= DEBUGCTLMSR_BTF;
set_tsk_thread_flag(task, TIF_BLOCKSTEP);
} else {
debugctl &= ~DEBUGCTLMSR_BTF;
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
}
if (task == current)
update_debugctlmsr(debugctl);
local_irq_enable();
}
/*
* Enable single or block step.
*/
static void enable_step(struct task_struct *child, bool block)
{
/*
* Make sure block stepping (BTF) is not enabled unless it should be.
* Note that we don't try to worry about any is_setting_trap_flag()
* instructions after the first when using block stepping.
* So no one should try to use debugger block stepping in a program
* that uses user-mode single stepping itself.
*/
if (enable_single_step(child) && block)
set_task_blockstep(child, true);
else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
set_task_blockstep(child, false);
}
void user_enable_single_step(struct task_struct *child)
{
enable_step(child, 0);
}
void user_enable_block_step(struct task_struct *child)
{
enable_step(child, 1);
}
void user_disable_single_step(struct task_struct *child)
{
/*
* Make sure block stepping (BTF) is disabled.
*/
if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
set_task_blockstep(child, false);
/* Always clear TIF_SINGLESTEP... */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
clear_task_syscall_work(child, SYSCALL_EXIT_TRAP);
/* But touch TF only if it was set by us.. */
if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;}
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/user_namespace.h>
#include <linux/proc_ns.h>
#include <linux/highuid.h>
#include <linux/cred.h>
#include <linux/securebits.h>
#include <linux/security.h>
#include <linux/keyctl.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/projid.h>
#include <linux/fs_struct.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/nstree.h>
static struct kmem_cache *user_ns_cachep __ro_after_init;
static DEFINE_MUTEX(userns_state_mutex);
static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *map);
static void free_user_ns(struct work_struct *work);
static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
{
return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
}
static void dec_user_namespaces(struct ucounts *ucounts)
{
return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
}
static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
{
/* Start with the same capabilities as init but useless for doing
* anything as the capabilities are bound to the new user namespace.
*/
cred->securebits = SECUREBITS_DEFAULT;
cred->cap_inheritable = CAP_EMPTY_SET;
cred->cap_permitted = CAP_FULL_SET;
cred->cap_effective = CAP_FULL_SET;
cred->cap_ambient = CAP_EMPTY_SET;
cred->cap_bset = CAP_FULL_SET;
#ifdef CONFIG_KEYS
key_put(cred->request_key_auth);
cred->request_key_auth = NULL;
#endif
/* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
cred->user_ns = user_ns;
}
static unsigned long enforced_nproc_rlimit(void)
{
unsigned long limit = RLIM_INFINITY;
/* Is RLIMIT_NPROC currently enforced? */
if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) ||
(current_user_ns() != &init_user_ns))
limit = rlimit(RLIMIT_NPROC);
return limit;
}
/*
* Create a new user namespace, deriving the creator from the user in the
* passed credentials, and replacing that user with the new root user for the
* new namespace.
*
* This is called by copy_creds(), which will finish setting the target task's
* credentials.
*/
int create_user_ns(struct cred *new)
{
struct user_namespace *ns, *parent_ns = new->user_ns;
kuid_t owner = new->euid;
kgid_t group = new->egid;
struct ucounts *ucounts;
int ret, i;
ret = -ENOSPC;
if (parent_ns->level > 32)
goto fail;
ucounts = inc_user_namespaces(parent_ns, owner);
if (!ucounts)
goto fail;
/*
* Verify that we can not violate the policy of which files
* may be accessed that is specified by the root directory,
* by verifying that the root directory is at the root of the
* mount namespace which allows all files to be accessed.
*/
ret = -EPERM;
if (current_chrooted())
goto fail_dec;
/* The creator needs a mapping in the parent user namespace
* or else we won't be able to reasonably tell userspace who
* created a user_namespace.
*/
ret = -EPERM;
if (!kuid_has_mapping(parent_ns, owner) ||
!kgid_has_mapping(parent_ns, group))
goto fail_dec;
ret = security_create_user_ns(new);
if (ret < 0)
goto fail_dec;
ret = -ENOMEM;
ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
if (!ns)
goto fail_dec;
ns->parent_could_setfcap = cap_raised(new->cap_effective, CAP_SETFCAP);
ret = ns_common_init(ns);
if (ret)
goto fail_free;
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
ns->level = parent_ns->level + 1;
ns->owner = owner;
ns->group = group;
INIT_WORK(&ns->work, free_user_ns);
for (i = 0; i < UCOUNT_COUNTS; i++) {
ns->ucount_max[i] = INT_MAX;
}
set_userns_rlimit_max(ns, UCOUNT_RLIMIT_NPROC, enforced_nproc_rlimit());
set_userns_rlimit_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE));
set_userns_rlimit_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING));
set_userns_rlimit_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK));
ns->ucounts = ucounts;
/* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
mutex_lock(&userns_state_mutex);
ns->flags = parent_ns->flags;
mutex_unlock(&userns_state_mutex);
#ifdef CONFIG_KEYS
INIT_LIST_HEAD(&ns->keyring_name_list);
init_rwsem(&ns->keyring_sem);
#endif
ret = -ENOMEM;
if (!setup_userns_sysctls(ns))
goto fail_keyring;
set_cred_user_ns(new, ns);
ns_tree_add(ns);
return 0;
fail_keyring:
#ifdef CONFIG_PERSISTENT_KEYRINGS
key_put(ns->persistent_keyring_register);
#endif
ns_common_free(ns);
fail_free:
kmem_cache_free(user_ns_cachep, ns);
fail_dec:
dec_user_namespaces(ucounts);
fail:
return ret;
}
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
{
struct cred *cred;
int err = -ENOMEM;
if (!(unshare_flags & CLONE_NEWUSER)) return 0;
cred = prepare_creds();
if (cred) {
err = create_user_ns(cred);
if (err)
put_cred(cred);
else
*new_cred = cred;
}
return err;
}
static void free_user_ns(struct work_struct *work)
{
struct user_namespace *parent, *ns =
container_of(work, struct user_namespace, work);
do {
struct ucounts *ucounts = ns->ucounts;
parent = ns->parent;
ns_tree_remove(ns);
if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
kfree(ns->gid_map.forward);
kfree(ns->gid_map.reverse);
}
if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
kfree(ns->uid_map.forward);
kfree(ns->uid_map.reverse);
}
if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
kfree(ns->projid_map.forward);
kfree(ns->projid_map.reverse);
}
#if IS_ENABLED(CONFIG_BINFMT_MISC)
kfree(ns->binfmt_misc);
#endif
retire_userns_sysctls(ns);
key_free_user_ns(ns);
ns_common_free(ns);
/* Concurrent nstree traversal depends on a grace period. */
kfree_rcu(ns, ns.ns_rcu);
dec_user_namespaces(ucounts);
ns = parent;
} while (ns_ref_put(parent));
}
void __put_user_ns(struct user_namespace *ns)
{
schedule_work(&ns->work);
}
EXPORT_SYMBOL(__put_user_ns);
/*
* struct idmap_key - holds the information necessary to find an idmapping in a
* sorted idmap array. It is passed to cmp_map_id() as first argument.
*/
struct idmap_key {
bool map_up; /* true -> id from kid; false -> kid from id */
u32 id; /* id to find */
u32 count;
};
/*
* cmp_map_id - Function to be passed to bsearch() to find the requested
* idmapping. Expects struct idmap_key to be passed via @k.
*/
static int cmp_map_id(const void *k, const void *e)
{
u32 first, last, id2;
const struct idmap_key *key = k;
const struct uid_gid_extent *el = e;
id2 = key->id + key->count - 1;
/* handle map_id_{down,up}() */
if (key->map_up)
first = el->lower_first;
else
first = el->first;
last = first + el->count - 1;
if (key->id >= first && key->id <= last &&
(id2 >= first && id2 <= last))
return 0;
if (key->id < first || id2 < first)
return -1;
return 1;
}
/*
* map_id_range_down_max - Find idmap via binary search in ordered idmap array.
* Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static struct uid_gid_extent *
map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
{
struct idmap_key key;
key.map_up = false;
key.count = count;
key.id = id;
return bsearch(&key, map->forward, extents,
sizeof(struct uid_gid_extent), cmp_map_id);
}
/*
* map_id_range_down_base - Find idmap via binary search in static extent array.
* Can only be called if number of mappings is equal or less than
* UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static struct uid_gid_extent *
map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
{
unsigned idx;
u32 first, last, id2;
id2 = id + count - 1;
/* Find the matching extent */
for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last &&
(id2 >= first && id2 <= last))
return &map->extent[idx];
}
return NULL;
}
static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
{
struct uid_gid_extent *extent;
unsigned extents = map->nr_extents;
smp_rmb();
if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
extent = map_id_range_down_base(extents, map, id, count);
else
extent = map_id_range_down_max(extents, map, id, count);
/* Map the id or note failure */
if (extent) id = (id - extent->first) + extent->lower_first;
else
id = (u32) -1; return id;}
u32 map_id_down(struct uid_gid_map *map, u32 id)
{
return map_id_range_down(map, id, 1);
}
/*
* map_id_up_base - Find idmap via binary search in static extent array.
* Can only be called if number of mappings is equal or less than
* UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static struct uid_gid_extent *
map_id_range_up_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
{
unsigned idx;
u32 first, last, id2;
id2 = id + count - 1;
/* Find the matching extent */
for (idx = 0; idx < extents; idx++) { first = map->extent[idx].lower_first; last = first + map->extent[idx].count - 1; if (id >= first && id <= last &&
(id2 >= first && id2 <= last))
return &map->extent[idx];
}
return NULL;
}
/*
* map_id_up_max - Find idmap via binary search in ordered idmap array.
* Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static struct uid_gid_extent *
map_id_range_up_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
{
struct idmap_key key;
key.map_up = true;
key.count = count;
key.id = id;
return bsearch(&key, map->reverse, extents,
sizeof(struct uid_gid_extent), cmp_map_id);
}
u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count)
{
struct uid_gid_extent *extent;
unsigned extents = map->nr_extents;
smp_rmb();
if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
extent = map_id_range_up_base(extents, map, id, count);
else
extent = map_id_range_up_max(extents, map, id, count);
/* Map the id or note failure */
if (extent) id = (id - extent->lower_first) + extent->first;
else
id = (u32) -1; return id;}
u32 map_id_up(struct uid_gid_map *map, u32 id)
{
return map_id_range_up(map, id, 1);
}
/**
* make_kuid - Map a user-namespace uid pair into a kuid.
* @ns: User namespace that the uid is in
* @uid: User identifier
*
* Maps a user-namespace uid pair into a kernel internal kuid,
* and returns that kuid.
*
* When there is no mapping defined for the user-namespace uid
* pair INVALID_UID is returned. Callers are expected to test
* for and handle INVALID_UID being returned. INVALID_UID
* may be tested for using uid_valid().
*/
kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
{
/* Map the uid to a global kernel uid */
return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
}
EXPORT_SYMBOL(make_kuid);
/**
* from_kuid - Create a uid from a kuid user-namespace pair.
* @targ: The user namespace we want a uid in.
* @kuid: The kernel internal uid to start with.
*
* Map @kuid into the user-namespace specified by @targ and
* return the resulting uid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kuid has no mapping in @targ (uid_t)-1 is returned.
*/
uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
{
/* Map the uid from a global kernel uid */
return map_id_up(&targ->uid_map, __kuid_val(kuid));
}
EXPORT_SYMBOL(from_kuid);
/**
* from_kuid_munged - Create a uid from a kuid user-namespace pair.
* @targ: The user namespace we want a uid in.
* @kuid: The kernel internal uid to start with.
*
* Map @kuid into the user-namespace specified by @targ and
* return the resulting uid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kuid from_kuid_munged never fails and always
* returns a valid uid. This makes from_kuid_munged appropriate
* for use in syscalls like stat and getuid where failing the
* system call and failing to provide a valid uid are not an
* options.
*
* If @kuid has no mapping in @targ overflowuid is returned.
*/
uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
{
uid_t uid;
uid = from_kuid(targ, kuid);
if (uid == (uid_t) -1)
uid = overflowuid; return uid;
}
EXPORT_SYMBOL(from_kuid_munged);
/**
* make_kgid - Map a user-namespace gid pair into a kgid.
* @ns: User namespace that the gid is in
* @gid: group identifier
*
* Maps a user-namespace gid pair into a kernel internal kgid,
* and returns that kgid.
*
* When there is no mapping defined for the user-namespace gid
* pair INVALID_GID is returned. Callers are expected to test
* for and handle INVALID_GID being returned. INVALID_GID may be
* tested for using gid_valid().
*/
kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
{
/* Map the gid to a global kernel gid */
return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
}
EXPORT_SYMBOL(make_kgid);
/**
* from_kgid - Create a gid from a kgid user-namespace pair.
* @targ: The user namespace we want a gid in.
* @kgid: The kernel internal gid to start with.
*
* Map @kgid into the user-namespace specified by @targ and
* return the resulting gid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kgid has no mapping in @targ (gid_t)-1 is returned.
*/
gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
{
/* Map the gid from a global kernel gid */
return map_id_up(&targ->gid_map, __kgid_val(kgid));
}
EXPORT_SYMBOL(from_kgid);
/**
* from_kgid_munged - Create a gid from a kgid user-namespace pair.
* @targ: The user namespace we want a gid in.
* @kgid: The kernel internal gid to start with.
*
* Map @kgid into the user-namespace specified by @targ and
* return the resulting gid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kgid from_kgid_munged never fails and always
* returns a valid gid. This makes from_kgid_munged appropriate
* for use in syscalls like stat and getgid where failing the
* system call and failing to provide a valid gid are not options.
*
* If @kgid has no mapping in @targ overflowgid is returned.
*/
gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
{
gid_t gid;
gid = from_kgid(targ, kgid);
if (gid == (gid_t) -1)
gid = overflowgid;
return gid;
}
EXPORT_SYMBOL(from_kgid_munged);
/**
* make_kprojid - Map a user-namespace projid pair into a kprojid.
* @ns: User namespace that the projid is in
* @projid: Project identifier
*
* Maps a user-namespace uid pair into a kernel internal kuid,
* and returns that kuid.
*
* When there is no mapping defined for the user-namespace projid
* pair INVALID_PROJID is returned. Callers are expected to test
* for and handle INVALID_PROJID being returned. INVALID_PROJID
* may be tested for using projid_valid().
*/
kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
{
/* Map the uid to a global kernel uid */
return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
}
EXPORT_SYMBOL(make_kprojid);
/**
* from_kprojid - Create a projid from a kprojid user-namespace pair.
* @targ: The user namespace we want a projid in.
* @kprojid: The kernel internal project identifier to start with.
*
* Map @kprojid into the user-namespace specified by @targ and
* return the resulting projid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kprojid has no mapping in @targ (projid_t)-1 is returned.
*/
projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
{
/* Map the uid from a global kernel uid */
return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
}
EXPORT_SYMBOL(from_kprojid);
/**
* from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
* @targ: The user namespace we want a projid in.
* @kprojid: The kernel internal projid to start with.
*
* Map @kprojid into the user-namespace specified by @targ and
* return the resulting projid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kprojid from_kprojid_munged never fails and always
* returns a valid projid. This makes from_kprojid_munged
* appropriate for use in syscalls like stat and where
* failing the system call and failing to provide a valid projid are
* not an options.
*
* If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
*/
projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
{
projid_t projid;
projid = from_kprojid(targ, kprojid);
if (projid == (projid_t) -1)
projid = OVERFLOW_PROJID;
return projid;
}
EXPORT_SYMBOL(from_kprojid_munged);
static int uid_m_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
struct uid_gid_extent *extent = v;
struct user_namespace *lower_ns;
uid_t lower;
lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
seq_printf(seq, "%10u %10u %10u\n",
extent->first,
lower,
extent->count);
return 0;
}
static int gid_m_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
struct uid_gid_extent *extent = v;
struct user_namespace *lower_ns;
gid_t lower;
lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
seq_printf(seq, "%10u %10u %10u\n",
extent->first,
lower,
extent->count);
return 0;
}
static int projid_m_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
struct uid_gid_extent *extent = v;
struct user_namespace *lower_ns;
projid_t lower;
lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
seq_printf(seq, "%10u %10u %10u\n",
extent->first,
lower,
extent->count);
return 0;
}
static void *m_start(struct seq_file *seq, loff_t *ppos,
struct uid_gid_map *map)
{
loff_t pos = *ppos;
unsigned extents = map->nr_extents;
smp_rmb();
if (pos >= extents)
return NULL;
if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
return &map->extent[pos];
return &map->forward[pos];
}
static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
{
struct user_namespace *ns = seq->private;
return m_start(seq, ppos, &ns->uid_map);
}
static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
{
struct user_namespace *ns = seq->private;
return m_start(seq, ppos, &ns->gid_map);
}
static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
{
struct user_namespace *ns = seq->private;
return m_start(seq, ppos, &ns->projid_map);
}
static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
return seq->op->start(seq, pos);
}
static void m_stop(struct seq_file *seq, void *v)
{
return;
}
const struct seq_operations proc_uid_seq_operations = {
.start = uid_m_start,
.stop = m_stop,
.next = m_next,
.show = uid_m_show,
};
const struct seq_operations proc_gid_seq_operations = {
.start = gid_m_start,
.stop = m_stop,
.next = m_next,
.show = gid_m_show,
};
const struct seq_operations proc_projid_seq_operations = {
.start = projid_m_start,
.stop = m_stop,
.next = m_next,
.show = projid_m_show,
};
static bool mappings_overlap(struct uid_gid_map *new_map,
struct uid_gid_extent *extent)
{
u32 upper_first, lower_first, upper_last, lower_last;
unsigned idx;
upper_first = extent->first;
lower_first = extent->lower_first;
upper_last = upper_first + extent->count - 1;
lower_last = lower_first + extent->count - 1;
for (idx = 0; idx < new_map->nr_extents; idx++) {
u32 prev_upper_first, prev_lower_first;
u32 prev_upper_last, prev_lower_last;
struct uid_gid_extent *prev;
if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
prev = &new_map->extent[idx];
else
prev = &new_map->forward[idx];
prev_upper_first = prev->first;
prev_lower_first = prev->lower_first;
prev_upper_last = prev_upper_first + prev->count - 1;
prev_lower_last = prev_lower_first + prev->count - 1;
/* Does the upper range intersect a previous extent? */
if ((prev_upper_first <= upper_last) &&
(prev_upper_last >= upper_first))
return true;
/* Does the lower range intersect a previous extent? */
if ((prev_lower_first <= lower_last) &&
(prev_lower_last >= lower_first))
return true;
}
return false;
}
/*
* insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
* Takes care to allocate a 4K block of memory if the number of mappings exceeds
* UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
{
struct uid_gid_extent *dest;
if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
struct uid_gid_extent *forward;
/* Allocate memory for 340 mappings. */
forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
sizeof(struct uid_gid_extent),
GFP_KERNEL);
if (!forward)
return -ENOMEM;
/* Copy over memory. Only set up memory for the forward pointer.
* Defer the memory setup for the reverse pointer.
*/
memcpy(forward, map->extent,
map->nr_extents * sizeof(map->extent[0]));
map->forward = forward;
map->reverse = NULL;
}
if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
dest = &map->extent[map->nr_extents];
else
dest = &map->forward[map->nr_extents];
*dest = *extent;
map->nr_extents++;
return 0;
}
/* cmp function to sort() forward mappings */
static int cmp_extents_forward(const void *a, const void *b)
{
const struct uid_gid_extent *e1 = a;
const struct uid_gid_extent *e2 = b;
if (e1->first < e2->first)
return -1;
if (e1->first > e2->first)
return 1;
return 0;
}
/* cmp function to sort() reverse mappings */
static int cmp_extents_reverse(const void *a, const void *b)
{
const struct uid_gid_extent *e1 = a;
const struct uid_gid_extent *e2 = b;
if (e1->lower_first < e2->lower_first)
return -1;
if (e1->lower_first > e2->lower_first)
return 1;
return 0;
}
/*
* sort_idmaps - Sorts an array of idmap entries.
* Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static int sort_idmaps(struct uid_gid_map *map)
{
if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
return 0;
/* Sort forward array. */
sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
cmp_extents_forward, NULL);
/* Only copy the memory from forward we actually need. */
map->reverse = kmemdup_array(map->forward, map->nr_extents,
sizeof(struct uid_gid_extent), GFP_KERNEL);
if (!map->reverse)
return -ENOMEM;
/* Sort reverse array. */
sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
cmp_extents_reverse, NULL);
return 0;
}
/**
* verify_root_map() - check the uid 0 mapping
* @file: idmapping file
* @map_ns: user namespace of the target process
* @new_map: requested idmap
*
* If a process requests mapping parent uid 0 into the new ns, verify that the
* process writing the map had the CAP_SETFCAP capability as the target process
* will be able to write fscaps that are valid in ancestor user namespaces.
*
* Return: true if the mapping is allowed, false if not.
*/
static bool verify_root_map(const struct file *file,
struct user_namespace *map_ns,
struct uid_gid_map *new_map)
{
int idx;
const struct user_namespace *file_ns = file->f_cred->user_ns;
struct uid_gid_extent *extent0 = NULL;
for (idx = 0; idx < new_map->nr_extents; idx++) {
if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
extent0 = &new_map->extent[idx];
else
extent0 = &new_map->forward[idx];
if (extent0->lower_first == 0)
break;
extent0 = NULL;
}
if (!extent0)
return true;
if (map_ns == file_ns) {
/* The process unshared its ns and is writing to its own
* /proc/self/uid_map. User already has full capabilites in
* the new namespace. Verify that the parent had CAP_SETFCAP
* when it unshared.
* */
if (!file_ns->parent_could_setfcap)
return false;
} else {
/* Process p1 is writing to uid_map of p2, who is in a child
* user namespace to p1's. Verify that the opener of the map
* file has CAP_SETFCAP against the parent of the new map
* namespace */
if (!file_ns_capable(file, map_ns->parent, CAP_SETFCAP))
return false;
}
return true;
}
static ssize_t map_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos,
int cap_setid,
struct uid_gid_map *map,
struct uid_gid_map *parent_map)
{
struct seq_file *seq = file->private_data;
struct user_namespace *map_ns = seq->private;
struct uid_gid_map new_map;
unsigned idx;
struct uid_gid_extent extent;
char *kbuf, *pos, *next_line;
ssize_t ret;
/* Only allow < page size writes at the beginning of the file */
if ((*ppos != 0) || (count >= PAGE_SIZE))
return -EINVAL;
/* Slurp in the user data */
kbuf = memdup_user_nul(buf, count);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
/*
* The userns_state_mutex serializes all writes to any given map.
*
* Any map is only ever written once.
*
* An id map fits within 1 cache line on most architectures.
*
* On read nothing needs to be done unless you are on an
* architecture with a crazy cache coherency model like alpha.
*
* There is a one time data dependency between reading the
* count of the extents and the values of the extents. The
* desired behavior is to see the values of the extents that
* were written before the count of the extents.
*
* To achieve this smp_wmb() is used on guarantee the write
* order and smp_rmb() is guaranteed that we don't have crazy
* architectures returning stale data.
*/
mutex_lock(&userns_state_mutex);
memset(&new_map, 0, sizeof(struct uid_gid_map));
ret = -EPERM;
/* Only allow one successful write to the map */
if (map->nr_extents != 0)
goto out;
/*
* Adjusting namespace settings requires capabilities on the target.
*/
if (cap_valid(cap_setid) && !file_ns_capable(file, map_ns, CAP_SYS_ADMIN))
goto out;
/* Parse the user data */
ret = -EINVAL;
pos = kbuf;
for (; pos; pos = next_line) {
/* Find the end of line and ensure I don't look past it */
next_line = strchr(pos, '\n');
if (next_line) {
*next_line = '\0';
next_line++;
if (*next_line == '\0')
next_line = NULL;
}
pos = skip_spaces(pos);
extent.first = simple_strtoul(pos, &pos, 10);
if (!isspace(*pos))
goto out;
pos = skip_spaces(pos);
extent.lower_first = simple_strtoul(pos, &pos, 10);
if (!isspace(*pos))
goto out;
pos = skip_spaces(pos);
extent.count = simple_strtoul(pos, &pos, 10);
if (*pos && !isspace(*pos))
goto out;
/* Verify there is not trailing junk on the line */
pos = skip_spaces(pos);
if (*pos != '\0')
goto out;
/* Verify we have been given valid starting values */
if ((extent.first == (u32) -1) ||
(extent.lower_first == (u32) -1))
goto out;
/* Verify count is not zero and does not cause the
* extent to wrap
*/
if ((extent.first + extent.count) <= extent.first)
goto out;
if ((extent.lower_first + extent.count) <=
extent.lower_first)
goto out;
/* Do the ranges in extent overlap any previous extents? */
if (mappings_overlap(&new_map, &extent))
goto out;
if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
(next_line != NULL))
goto out;
ret = insert_extent(&new_map, &extent);
if (ret < 0)
goto out;
ret = -EINVAL;
}
/* Be very certain the new map actually exists */
if (new_map.nr_extents == 0)
goto out;
ret = -EPERM;
/* Validate the user is allowed to use user id's mapped to. */
if (!new_idmap_permitted(file, map_ns, cap_setid, &new_map))
goto out;
ret = -EPERM;
/* Map the lower ids from the parent user namespace to the
* kernel global id space.
*/
for (idx = 0; idx < new_map.nr_extents; idx++) {
struct uid_gid_extent *e;
u32 lower_first;
if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
e = &new_map.extent[idx];
else
e = &new_map.forward[idx];
lower_first = map_id_range_down(parent_map,
e->lower_first,
e->count);
/* Fail if we can not map the specified extent to
* the kernel global id space.
*/
if (lower_first == (u32) -1)
goto out;
e->lower_first = lower_first;
}
/*
* If we want to use binary search for lookup, this clones the extent
* array and sorts both copies.
*/
ret = sort_idmaps(&new_map);
if (ret < 0)
goto out;
/* Install the map */
if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
memcpy(map->extent, new_map.extent,
new_map.nr_extents * sizeof(new_map.extent[0]));
} else {
map->forward = new_map.forward;
map->reverse = new_map.reverse;
}
smp_wmb();
map->nr_extents = new_map.nr_extents;
*ppos = count;
ret = count;
out:
if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
kfree(new_map.forward);
kfree(new_map.reverse);
map->forward = NULL;
map->reverse = NULL;
map->nr_extents = 0;
}
mutex_unlock(&userns_state_mutex);
kfree(kbuf);
return ret;
}
ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
return map_write(file, buf, size, ppos, CAP_SETUID,
&ns->uid_map, &ns->parent->uid_map);
}
ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
return map_write(file, buf, size, ppos, CAP_SETGID,
&ns->gid_map, &ns->parent->gid_map);
}
ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
/* Anyone can set any valid project id no capability needed */
return map_write(file, buf, size, ppos, -1,
&ns->projid_map, &ns->parent->projid_map);
}
static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *new_map)
{
const struct cred *cred = file->f_cred;
if (cap_setid == CAP_SETUID && !verify_root_map(file, ns, new_map))
return false;
/* Don't allow mappings that would allow anything that wouldn't
* be allowed without the establishment of unprivileged mappings.
*/
if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
uid_eq(ns->owner, cred->euid)) {
u32 id = new_map->extent[0].lower_first;
if (cap_setid == CAP_SETUID) {
kuid_t uid = make_kuid(ns->parent, id);
if (uid_eq(uid, cred->euid))
return true;
} else if (cap_setid == CAP_SETGID) {
kgid_t gid = make_kgid(ns->parent, id);
if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
gid_eq(gid, cred->egid))
return true;
}
}
/* Allow anyone to set a mapping that doesn't require privilege */
if (!cap_valid(cap_setid))
return true;
/* Allow the specified ids if we have the appropriate capability
* (CAP_SETUID or CAP_SETGID) over the parent user namespace.
* And the opener of the id file also has the appropriate capability.
*/
if (ns_capable(ns->parent, cap_setid) &&
file_ns_capable(file, ns->parent, cap_setid))
return true;
return false;
}
int proc_setgroups_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
unsigned long userns_flags = READ_ONCE(ns->flags);
seq_printf(seq, "%s\n",
(userns_flags & USERNS_SETGROUPS_ALLOWED) ?
"allow" : "deny");
return 0;
}
ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
char kbuf[8], *pos;
bool setgroups_allowed;
ssize_t ret;
/* Only allow a very narrow range of strings to be written */
ret = -EINVAL;
if ((*ppos != 0) || (count >= sizeof(kbuf)))
goto out;
/* What was written? */
ret = -EFAULT;
if (copy_from_user(kbuf, buf, count))
goto out;
kbuf[count] = '\0';
pos = kbuf;
/* What is being requested? */
ret = -EINVAL;
if (strncmp(pos, "allow", 5) == 0) {
pos += 5;
setgroups_allowed = true;
}
else if (strncmp(pos, "deny", 4) == 0) {
pos += 4;
setgroups_allowed = false;
}
else
goto out;
/* Verify there is not trailing junk on the line */
pos = skip_spaces(pos);
if (*pos != '\0')
goto out;
ret = -EPERM;
mutex_lock(&userns_state_mutex);
if (setgroups_allowed) {
/* Enabling setgroups after setgroups has been disabled
* is not allowed.
*/
if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
goto out_unlock;
} else {
/* Permanently disabling setgroups after setgroups has
* been enabled by writing the gid_map is not allowed.
*/
if (ns->gid_map.nr_extents != 0)
goto out_unlock;
ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
}
mutex_unlock(&userns_state_mutex);
/* Report a successful write */
*ppos = count;
ret = count;
out:
return ret;
out_unlock:
mutex_unlock(&userns_state_mutex);
goto out;
}
bool userns_may_setgroups(const struct user_namespace *ns)
{
bool allowed;
mutex_lock(&userns_state_mutex);
/* It is not safe to use setgroups until a gid mapping in
* the user namespace has been established.
*/
allowed = ns->gid_map.nr_extents != 0;
/* Is setgroups allowed? */
allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
mutex_unlock(&userns_state_mutex);
return allowed;
}
/*
* Returns true if @child is the same namespace or a descendant of
* @ancestor.
*/
bool in_userns(const struct user_namespace *ancestor,
const struct user_namespace *child)
{
const struct user_namespace *ns;
for (ns = child; ns->level > ancestor->level; ns = ns->parent)
;
return (ns == ancestor);
}
bool current_in_userns(const struct user_namespace *target_ns)
{
return in_userns(target_ns, current_user_ns());
}
EXPORT_SYMBOL(current_in_userns);
static struct ns_common *userns_get(struct task_struct *task)
{
struct user_namespace *user_ns;
rcu_read_lock();
user_ns = get_user_ns(__task_cred(task)->user_ns);
rcu_read_unlock();
return user_ns ? &user_ns->ns : NULL;
}
static void userns_put(struct ns_common *ns)
{
put_user_ns(to_user_ns(ns));
}
static int userns_install(struct nsset *nsset, struct ns_common *ns)
{
struct user_namespace *user_ns = to_user_ns(ns);
struct cred *cred;
/* Don't allow gaining capabilities by reentering
* the same user namespace.
*/
if (user_ns == current_user_ns())
return -EINVAL;
/* Tasks that share a thread group must share a user namespace */
if (!thread_group_empty(current))
return -EINVAL;
if (current->fs->users != 1)
return -EINVAL;
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
cred = nsset_cred(nsset);
if (!cred)
return -EINVAL;
put_user_ns(cred->user_ns);
set_cred_user_ns(cred, get_user_ns(user_ns));
if (set_cred_ucounts(cred) < 0)
return -EINVAL;
return 0;
}
struct ns_common *ns_get_owner(struct ns_common *ns)
{
struct user_namespace *my_user_ns = current_user_ns();
struct user_namespace *owner, *p;
/* See if the owner is in the current user namespace */
owner = p = ns->ops->owner(ns);
for (;;) {
if (!p)
return ERR_PTR(-EPERM);
if (p == my_user_ns)
break;
p = p->parent;
}
return &get_user_ns(owner)->ns;
}
static struct user_namespace *userns_owner(struct ns_common *ns)
{
return to_user_ns(ns)->parent;
}
const struct proc_ns_operations userns_operations = {
.name = "user",
.get = userns_get,
.put = userns_put,
.install = userns_install,
.owner = userns_owner,
.get_parent = ns_get_owner,
};
static __init int user_namespaces_init(void)
{
user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC | SLAB_ACCOUNT);
ns_tree_add(&init_user_ns);
return 0;
}
subsys_initcall(user_namespaces_init);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header provides generic wrappers for memory access instrumentation that
* the compiler cannot emit for: KASAN, KCSAN, KMSAN.
*/
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
#include <linux/kmsan-checks.h>
#include <linux/types.h>
/**
* instrument_read - instrument regular read access
* @v: address of access
* @size: size of access
*
* Instrument a regular read access. The instrumentation should be inserted
* before the actual read happens.
*/
static __always_inline void instrument_read(const volatile void *v, size_t size)
{
kasan_check_read(v, size);
kcsan_check_read(v, size);
}
/**
* instrument_write - instrument regular write access
* @v: address of access
* @size: size of access
*
* Instrument a regular write access. The instrumentation should be inserted
* before the actual write happens.
*/
static __always_inline void instrument_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_write(v, size);
}
/**
* instrument_read_write - instrument regular read-write access
* @v: address of access
* @size: size of access
*
* Instrument a regular write access. The instrumentation should be inserted
* before the actual write happens.
*/
static __always_inline void instrument_read_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_read_write(v, size);
}
/**
* instrument_atomic_read - instrument atomic read access
* @v: address of access
* @size: size of access
*
* Instrument an atomic read access. The instrumentation should be inserted
* before the actual read happens.
*/
static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
{
kasan_check_read(v, size);
kcsan_check_atomic_read(v, size);
}
/**
* instrument_atomic_write - instrument atomic write access
* @v: address of access
* @size: size of access
*
* Instrument an atomic write access. The instrumentation should be inserted
* before the actual write happens.
*/
static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_atomic_write(v, size);
}
/**
* instrument_atomic_read_write - instrument atomic read-write access
* @v: address of access
* @size: size of access
*
* Instrument an atomic read-write access. The instrumentation should be
* inserted before the actual write happens.
*/
static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_atomic_read_write(v, size);
}
/**
* instrument_copy_to_user - instrument reads of copy_to_user
* @to: destination address
* @from: source address
* @n: number of bytes to copy
*
* Instrument reads from kernel memory, that are due to copy_to_user (and
* variants). The instrumentation must be inserted before the accesses.
*/
static __always_inline void
instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
kcsan_check_read(from, n);
kmsan_copy_to_user(to, from, n, 0);
}
/**
* instrument_copy_from_user_before - add instrumentation before copy_from_user
* @to: destination address
* @from: source address
* @n: number of bytes to copy
*
* Instrument writes to kernel memory, that are due to copy_from_user (and
* variants). The instrumentation should be inserted before the accesses.
*/
static __always_inline void
instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
kcsan_check_write(to, n);
}
/**
* instrument_copy_from_user_after - add instrumentation after copy_from_user
* @to: destination address
* @from: source address
* @n: number of bytes to copy
* @left: number of bytes not copied (as returned by copy_from_user)
*
* Instrument writes to kernel memory, that are due to copy_from_user (and
* variants). The instrumentation should be inserted after the accesses.
*/
static __always_inline void
instrument_copy_from_user_after(const void *to, const void __user *from,
unsigned long n, unsigned long left)
{
kmsan_unpoison_memory(to, n - left);
}
/**
* instrument_memcpy_before - add instrumentation before non-instrumented memcpy
* @to: destination address
* @from: source address
* @n: number of bytes to copy
*
* Instrument memory accesses that happen in custom memcpy implementations. The
* instrumentation should be inserted before the memcpy call.
*/
static __always_inline void instrument_memcpy_before(void *to, const void *from,
unsigned long n)
{
kasan_check_write(to, n);
kasan_check_read(from, n);
kcsan_check_write(to, n);
kcsan_check_read(from, n);
}
/**
* instrument_memcpy_after - add instrumentation after non-instrumented memcpy
* @to: destination address
* @from: source address
* @n: number of bytes to copy
* @left: number of bytes not copied (if known)
*
* Instrument memory accesses that happen in custom memcpy implementations. The
* instrumentation should be inserted after the memcpy call.
*/
static __always_inline void instrument_memcpy_after(void *to, const void *from,
unsigned long n,
unsigned long left)
{
kmsan_memmove(to, from, n - left);
}
/**
* instrument_get_user() - add instrumentation to get_user()-like macros
* @to: destination variable, may not be address-taken
*
* get_user() and friends are fragile, so it may depend on the implementation
* whether the instrumentation happens before or after the data is copied from
* the userspace.
*/
#define instrument_get_user(to) \
({ \
u64 __tmp = (u64)(to); \
kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
to = __tmp; \
})
/**
* instrument_put_user() - add instrumentation to put_user()-like macros
* @from: source address
* @ptr: userspace pointer to copy to
* @size: number of bytes to copy
*
* put_user() and friends are fragile, so it may depend on the implementation
* whether the instrumentation happens before or after the data is copied from
* the userspace.
*/
#define instrument_put_user(from, ptr, size) \
({ \
kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
})
#endif /* _LINUX_INSTRUMENTED_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Latched RB-trees
*
* Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org>
*
* Since RB-trees have non-atomic modifications they're not immediately suited
* for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for
* lockless lookups; we cannot guarantee they return a correct result.
*
* The simplest solution is a seqlock + RB-tree, this will allow lockless
* lookups; but has the constraint (inherent to the seqlock) that read sides
* cannot nest in write sides.
*
* If we need to allow unconditional lookups (say as required for NMI context
* usage) we need a more complex setup; this data structure provides this by
* employing the latch technique -- see @write_seqcount_latch_begin -- to
* implement a latched RB-tree which does allow for unconditional lookups by
* virtue of always having (at least) one stable copy of the tree.
*
* However, while we have the guarantee that there is at all times one stable
* copy, this does not guarantee an iteration will not observe modifications.
* What might have been a stable copy at the start of the iteration, need not
* remain so for the duration of the iteration.
*
* Therefore, this does require a lockless RB-tree iteration to be non-fatal;
* see the comment in lib/rbtree.c. Note however that we only require the first
* condition -- not seeing partial stores -- because the latch thing isolates
* us from loops. If we were to interrupt a modification the lookup would be
* pointed at the stable tree and complete while the modification was halted.
*/
#ifndef RB_TREE_LATCH_H
#define RB_TREE_LATCH_H
#include <linux/rbtree.h>
#include <linux/seqlock.h>
#include <linux/rcupdate.h>
struct latch_tree_node {
struct rb_node node[2];
};
struct latch_tree_root {
seqcount_latch_t seq;
struct rb_root tree[2];
};
/**
* latch_tree_ops - operators to define the tree order
* @less: used for insertion; provides the (partial) order between two elements.
* @comp: used for lookups; provides the order between the search key and an element.
*
* The operators are related like:
*
* comp(a->key,b) < 0 := less(a,b)
* comp(a->key,b) > 0 := less(b,a)
* comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
*
* If these operators define a partial order on the elements we make no
* guarantee on which of the elements matching the key is found. See
* latch_tree_find().
*/
struct latch_tree_ops {
bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b);
int (*comp)(void *key, struct latch_tree_node *b);
};
static __always_inline struct latch_tree_node *
__lt_from_rb(struct rb_node *node, int idx)
{
return container_of(node, struct latch_tree_node, node[idx]);
}
static __always_inline void
__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx,
bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
{
struct rb_root *root = <r->tree[idx];
struct rb_node **link = &root->rb_node;
struct rb_node *node = <n->node[idx];
struct rb_node *parent = NULL;
struct latch_tree_node *ltp;
while (*link) {
parent = *link;
ltp = __lt_from_rb(parent, idx);
if (less(ltn, ltp))
link = &parent->rb_left;
else
link = &parent->rb_right;
}
rb_link_node_rcu(node, parent, link);
rb_insert_color(node, root);
}
static __always_inline void
__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx)
{
rb_erase(<n->node[idx], <r->tree[idx]);
}
static __always_inline struct latch_tree_node *
__lt_find(void *key, struct latch_tree_root *ltr, int idx,
int (*comp)(void *key, struct latch_tree_node *node))
{
struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node);
struct latch_tree_node *ltn;
int c;
while (node) { ltn = __lt_from_rb(node, idx); c = comp(key, ltn);
if (c < 0)
node = rcu_dereference_raw(node->rb_left); else if (c > 0)
node = rcu_dereference_raw(node->rb_right);
else
return ltn;
}
return NULL;
}
/**
* latch_tree_insert() - insert @node into the trees @root
* @node: nodes to insert
* @root: trees to insert @node into
* @ops: operators defining the node order
*
* It inserts @node into @root in an ordered fashion such that we can always
* observe one complete tree. See the comment for write_seqcount_latch_begin().
*
* The inserts use rcu_assign_pointer() to publish the element such that the
* tree structure is stored before we can observe the new @node.
*
* All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
* serialized.
*/
static __always_inline void
latch_tree_insert(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
write_seqcount_latch_begin(&root->seq);
__lt_insert(node, root, 0, ops->less);
write_seqcount_latch(&root->seq);
__lt_insert(node, root, 1, ops->less);
write_seqcount_latch_end(&root->seq);
}
/**
* latch_tree_erase() - removes @node from the trees @root
* @node: nodes to remote
* @root: trees to remove @node from
* @ops: operators defining the node order
*
* Removes @node from the trees @root in an ordered fashion such that we can
* always observe one complete tree. See the comment for
* write_seqcount_latch_begin().
*
* It is assumed that @node will observe one RCU quiescent state before being
* reused of freed.
*
* All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
* serialized.
*/
static __always_inline void
latch_tree_erase(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
write_seqcount_latch_begin(&root->seq);
__lt_erase(node, root, 0);
write_seqcount_latch(&root->seq);
__lt_erase(node, root, 1);
write_seqcount_latch_end(&root->seq);
}
/**
* latch_tree_find() - find the node matching @key in the trees @root
* @key: search key
* @root: trees to search for @key
* @ops: operators defining the node order
*
* Does a lockless lookup in the trees @root for the node matching @key.
*
* It is assumed that this is called while holding the appropriate RCU read
* side lock.
*
* If the operators define a partial order on the elements (there are multiple
* elements which have the same key value) it is undefined which of these
* elements will be found. Nor is it possible to iterate the tree to find
* further elements with the same key value.
*
* Returns: a pointer to the node matching @key or NULL.
*/
static __always_inline struct latch_tree_node *
latch_tree_find(void *key, struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
struct latch_tree_node *node;
unsigned int seq;
do {
seq = read_seqcount_latch(&root->seq); node = __lt_find(key, root, seq & 1, ops->comp); } while (read_seqcount_latch_retry(&root->seq, seq));
return node;
}
#endif /* RB_TREE_LATCH_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Skb ref helpers.
*
*/
#ifndef _LINUX_SKBUFF_REF_H
#define _LINUX_SKBUFF_REF_H
#include <linux/skbuff.h>
/**
* __skb_frag_ref - take an addition reference on a paged fragment.
* @frag: the paged fragment
*
* Takes an additional reference on the paged fragment @frag.
*/
static inline void __skb_frag_ref(skb_frag_t *frag)
{
get_netmem(skb_frag_netmem(frag));
}
/**
* skb_frag_ref - take an addition reference on a paged fragment of an skb.
* @skb: the buffer
* @f: the fragment offset.
*
* Takes an additional reference on the @f'th paged fragment of @skb.
*/
static inline void skb_frag_ref(struct sk_buff *skb, int f)
{
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
bool napi_pp_put_page(netmem_ref netmem);
static inline void skb_page_unref(netmem_ref netmem, bool recycle)
{
#ifdef CONFIG_PAGE_POOL
if (recycle && napi_pp_put_page(netmem))
return;
#endif
put_netmem(netmem);
}
/**
* __skb_frag_unref - release a reference on a paged fragment.
* @frag: the paged fragment
* @recycle: recycle the page if allocated via page_pool
*
* Releases a reference on the paged fragment @frag
* or recycles the page via the page_pool API.
*/
static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
skb_page_unref(skb_frag_netmem(frag), recycle);
}
/**
* skb_frag_unref - release a reference on a paged fragment of an skb.
* @skb: the buffer
* @f: the fragment offset
*
* Releases a reference on the @f'th paged fragment of @skb.
*/
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (!skb_zcopy_managed(skb))
__skb_frag_unref(&shinfo->frags[f], skb->pp_recycle);
}
#endif /* _LINUX_SKBUFF_REF_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RMAP_H
#define _LINUX_RMAP_H
/*
* Declarations for Reverse Mapping functions in mm/rmap.c
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/memremap.h>
#include <linux/bit_spinlock.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
* an anonymous page pointing to this anon_vma needs to be unmapped:
* the vmas on the list will be related by forking, or by splitting.
*
* Since vmas come and go as they are split and merged (particularly
* in mprotect), the mapping field of an anonymous page cannot point
* directly to a vma: instead it points to an anon_vma, on whose list
* the related vmas can be easily linked or unlinked.
*
* After unlinking the last vma on the list, we must garbage collect
* the anon_vma object itself: we're guaranteed no page can be
* pointing to this anon_vma once its vma list is empty.
*/
struct anon_vma {
struct anon_vma *root; /* Root of this anon_vma tree */
struct rw_semaphore rwsem; /* W: modification, R: walking the list */
/*
* The refcount is taken on an anon_vma when there is no
* guarantee that the vma of page tables will exist for
* the duration of the operation. A caller that takes
* the reference is responsible for clearing up the
* anon_vma if they are the last user on release
*/
atomic_t refcount;
/*
* Count of child anon_vmas. Equals to the count of all anon_vmas that
* have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
unsigned long num_children;
/* Count of VMAs whose ->anon_vma pointer points to this object. */
unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
/*
* NOTE: the LSB of the rb_root.rb_node is set by
* mm_take_all_locks() _after_ taking the above lock. So the
* rb_root must only be read/written after taking the above lock
* to be sure to see a valid next pointer. The LSB bit itself
* is serialized by a system wide lock only visible to
* mm_take_all_locks() (mm_all_locks_mutex).
*/
/* Interval tree of private "related" vmas */
struct rb_root_cached rb_root;
};
/*
* The copy-on-write semantics of fork mean that an anon_vma
* can become associated with multiple processes. Furthermore,
* each child process will have its own anon_vma, where new
* pages for that process are instantiated.
*
* This structure allows us to find the anon_vmas associated
* with a VMA, or the VMAs associated with an anon_vma.
* The "same_vma" list contains the anon_vma_chains linking
* all the anon_vmas associated with this VMA.
* The "rb" field indexes on an interval tree the anon_vma_chains
* which link all the VMAs associated with this anon_vma.
*/
struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
struct rb_node rb; /* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
unsigned long cached_vma_start, cached_vma_last;
#endif
};
enum ttu_flags {
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
TTU_HWPOISON = 0x20, /* do convert pte to hwpoison entry */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
};
#ifdef CONFIG_MMU
static inline void get_anon_vma(struct anon_vma *anon_vma)
{
atomic_inc(&anon_vma->refcount);
}
void __put_anon_vma(struct anon_vma *anon_vma);
static inline void put_anon_vma(struct anon_vma *anon_vma)
{
if (atomic_dec_and_test(&anon_vma->refcount))
__put_anon_vma(anon_vma);
}
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
down_write(&anon_vma->root->rwsem);
}
static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
{
return down_write_trylock(&anon_vma->root->rwsem);
}
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
{
up_write(&anon_vma->root->rwsem);
}
static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
{
down_read(&anon_vma->root->rwsem);
}
static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
{
return down_read_trylock(&anon_vma->root->rwsem);
}
static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
{
up_read(&anon_vma->root->rwsem);
}
/*
* anon_vma helper functions.
*/
void anon_vma_init(void); /* create anon_vma_cachep */
int __anon_vma_prepare(struct vm_area_struct *);
void unlink_anon_vmas(struct vm_area_struct *);
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
static inline int anon_vma_prepare(struct vm_area_struct *vma)
{
if (likely(vma->anon_vma))
return 0;
return __anon_vma_prepare(vma);
}
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
{
VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
unlink_anon_vmas(next);
}
struct anon_vma *folio_get_anon_vma(const struct folio *folio);
#ifdef CONFIG_MM_ID
static __always_inline void folio_lock_large_mapcount(struct folio *folio)
{
bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
}
static __always_inline void folio_unlock_large_mapcount(struct folio *folio)
{
__bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
}
static inline unsigned int folio_mm_id(const struct folio *folio, int idx)
{
VM_WARN_ON_ONCE(idx != 0 && idx != 1);
return folio->_mm_id[idx] & MM_ID_MASK;
}
static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id)
{
VM_WARN_ON_ONCE(idx != 0 && idx != 1);
folio->_mm_id[idx] &= ~MM_ID_MASK;
folio->_mm_id[idx] |= id;
}
static inline void __folio_large_mapcount_sanity_checks(const struct folio *folio,
int diff, mm_id_t mm_id)
{
VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio));
VM_WARN_ON_ONCE(diff <= 0);
VM_WARN_ON_ONCE(mm_id < MM_ID_MIN || mm_id > MM_ID_MAX);
/*
* Make sure we can detect at least one complete PTE mapping of the
* folio in a single MM as "exclusively mapped". This is primarily
* a check on 32bit, where we currently reduce the size of the per-MM
* mapcount to a short.
*/
VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio));
VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX);
VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY &&
folio->_mm_id_mapcount[0] != -1);
VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY &&
folio->_mm_id_mapcount[0] < 0);
VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY &&
folio->_mm_id_mapcount[1] != -1);
VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY &&
folio->_mm_id_mapcount[1] < 0);
VM_WARN_ON_ONCE(!folio_mapped(folio) &&
test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids));
}
static __always_inline void folio_set_large_mapcount(struct folio *folio,
int mapcount, struct vm_area_struct *vma)
{
__folio_large_mapcount_sanity_checks(folio, mapcount, vma->vm_mm->mm_id);
VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY);
VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY);
/* Note: mapcounts start at -1. */
atomic_set(&folio->_large_mapcount, mapcount - 1);
folio->_mm_id_mapcount[0] = mapcount - 1;
folio_set_mm_id(folio, 0, vma->vm_mm->mm_id);
}
static __always_inline int folio_add_return_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
const mm_id_t mm_id = vma->vm_mm->mm_id;
int new_mapcount_val;
folio_lock_large_mapcount(folio);
__folio_large_mapcount_sanity_checks(folio, diff, mm_id);
new_mapcount_val = atomic_read(&folio->_large_mapcount) + diff;
atomic_set(&folio->_large_mapcount, new_mapcount_val);
/*
* If a folio is mapped more than once into an MM on 32bit, we
* can in theory overflow the per-MM mapcount (although only for
* fairly large folios), turning it negative. In that case, just
* free up the slot and mark the folio "mapped shared", otherwise
* we might be in trouble when unmapping pages later.
*/
if (folio_mm_id(folio, 0) == mm_id) {
folio->_mm_id_mapcount[0] += diff;
if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[0] < 0)) {
folio->_mm_id_mapcount[0] = -1;
folio_set_mm_id(folio, 0, MM_ID_DUMMY);
folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
}
} else if (folio_mm_id(folio, 1) == mm_id) {
folio->_mm_id_mapcount[1] += diff;
if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[1] < 0)) {
folio->_mm_id_mapcount[1] = -1;
folio_set_mm_id(folio, 1, MM_ID_DUMMY);
folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
}
} else if (folio_mm_id(folio, 0) == MM_ID_DUMMY) {
folio_set_mm_id(folio, 0, mm_id);
folio->_mm_id_mapcount[0] = diff - 1;
/* We might have other mappings already. */
if (new_mapcount_val != diff - 1)
folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
} else if (folio_mm_id(folio, 1) == MM_ID_DUMMY) {
folio_set_mm_id(folio, 1, mm_id);
folio->_mm_id_mapcount[1] = diff - 1;
/* Slot 0 certainly has mappings as well. */
folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
}
folio_unlock_large_mapcount(folio);
return new_mapcount_val + 1;
}
#define folio_add_large_mapcount folio_add_return_large_mapcount
static __always_inline int folio_sub_return_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
const mm_id_t mm_id = vma->vm_mm->mm_id;
int new_mapcount_val;
folio_lock_large_mapcount(folio);
__folio_large_mapcount_sanity_checks(folio, diff, mm_id);
new_mapcount_val = atomic_read(&folio->_large_mapcount) - diff;
atomic_set(&folio->_large_mapcount, new_mapcount_val);
/*
* There are valid corner cases where we might underflow a per-MM
* mapcount (some mappings added when no slot was free, some mappings
* added once a slot was free), so we always set it to -1 once we go
* negative.
*/
if (folio_mm_id(folio, 0) == mm_id) {
folio->_mm_id_mapcount[0] -= diff;
if (folio->_mm_id_mapcount[0] >= 0)
goto out;
folio->_mm_id_mapcount[0] = -1;
folio_set_mm_id(folio, 0, MM_ID_DUMMY);
} else if (folio_mm_id(folio, 1) == mm_id) {
folio->_mm_id_mapcount[1] -= diff;
if (folio->_mm_id_mapcount[1] >= 0)
goto out;
folio->_mm_id_mapcount[1] = -1;
folio_set_mm_id(folio, 1, MM_ID_DUMMY);
}
/*
* If one MM slot owns all mappings, the folio is mapped exclusively.
* Note that if the folio is now unmapped (new_mapcount_val == -1), both
* slots must be free (mapcount == -1), and we'll also mark it as
* exclusive.
*/
if (folio->_mm_id_mapcount[0] == new_mapcount_val ||
folio->_mm_id_mapcount[1] == new_mapcount_val)
folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT;
out:
folio_unlock_large_mapcount(folio);
return new_mapcount_val + 1;
}
#define folio_sub_large_mapcount folio_sub_return_large_mapcount
#else /* !CONFIG_MM_ID */
/*
* See __folio_rmap_sanity_checks(), we might map large folios even without
* CONFIG_TRANSPARENT_HUGEPAGE. We'll keep that working for now.
*/
static inline void folio_set_large_mapcount(struct folio *folio, int mapcount,
struct vm_area_struct *vma)
{
/* Note: mapcounts start at -1. */
atomic_set(&folio->_large_mapcount, mapcount - 1);
}
static inline void folio_add_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
atomic_add(diff, &folio->_large_mapcount);
}
static inline int folio_add_return_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
BUILD_BUG();
}
static inline void folio_sub_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
atomic_sub(diff, &folio->_large_mapcount);
}
static inline int folio_sub_return_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
BUILD_BUG();
}
#endif /* CONFIG_MM_ID */
#define folio_inc_large_mapcount(folio, vma) \
folio_add_large_mapcount(folio, 1, vma)
#define folio_inc_return_large_mapcount(folio, vma) \
folio_add_return_large_mapcount(folio, 1, vma)
#define folio_dec_large_mapcount(folio, vma) \
folio_sub_large_mapcount(folio, 1, vma)
#define folio_dec_return_large_mapcount(folio, vma) \
folio_sub_return_large_mapcount(folio, 1, vma)
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
/*
* No special request: A mapped anonymous (sub)page is possibly shared between
* processes.
*/
#define RMAP_NONE ((__force rmap_t)0)
/* The anonymous (sub)page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
static __always_inline void __folio_rmap_sanity_checks(const struct folio *folio,
const struct page *page, int nr_pages, enum pgtable_level level)
{
/* hugetlb folios are handled separately. */
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
/* When (un)mapping zeropages, we should never touch ref+mapcount. */
VM_WARN_ON_FOLIO(is_zero_folio(folio), folio);
/*
* TODO: we get driver-allocated folios that have nothing to do with
* the rmap using vm_insert_page(); therefore, we cannot assume that
* folio_test_large_rmappable() holds for large folios. We should
* handle any desired mapcount+stats accounting for these folios in
* VM_MIXEDMAP VMAs separately, and then sanity-check here that
* we really only get rmappable folios.
*/
VM_WARN_ON_ONCE(nr_pages <= 0); VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
switch (level) {
case PGTABLE_LEVEL_PTE:
break;
case PGTABLE_LEVEL_PMD:
/*
* We don't support folios larger than a single PMD yet. So
* when PGTABLE_LEVEL_PMD is set, we assume that we are creating
* a single "entire" mapping of the folio.
*/
VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
break;
case PGTABLE_LEVEL_PUD:
/*
* Assume that we are creating a single "entire" mapping of the
* folio.
*/
VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PUD_NR, folio);
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
break;
default:
BUILD_BUG();
}
/*
* Anon folios must have an associated live anon_vma as long as they're
* mapped into userspace.
* Note that the atomic_read() mainly does two things:
*
* 1. In KASAN builds with CONFIG_SLUB_RCU_DEBUG, it causes KASAN to
* check that the associated anon_vma has not yet been freed (subject
* to KASAN's usual limitations). This check will pass if the
* anon_vma's refcount has already dropped to 0 but an RCU grace
* period hasn't passed since then.
* 2. If the anon_vma has not yet been freed, it checks that the
* anon_vma still has a nonzero refcount (as opposed to being in the
* middle of an RCU delay for getting freed).
*/
if (folio_test_anon(folio) && !folio_test_ksm(folio)) {
unsigned long mapping = (unsigned long)folio->mapping;
struct anon_vma *anon_vma;
anon_vma = (void *)(mapping - FOLIO_MAPPING_ANON);
VM_WARN_ON_FOLIO(atomic_read(&anon_vma->refcount) == 0, folio);
}
}
/*
* rmap interfaces called when adding or removing pte of page
*/
void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
struct vm_area_struct *, unsigned long address, rmap_t flags);
#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
void folio_add_anon_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *, unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
struct vm_area_struct *);
#define folio_add_file_rmap_pte(folio, page, vma) \
folio_add_file_rmap_ptes(folio, page, 1, vma)
void folio_add_file_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *);
void folio_add_file_rmap_pud(struct folio *, struct page *,
struct vm_area_struct *);
void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
struct vm_area_struct *);
#define folio_remove_rmap_pte(folio, page, vma) \
folio_remove_rmap_ptes(folio, page, 1, vma)
void folio_remove_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *);
void folio_remove_rmap_pud(struct folio *, struct page *,
struct vm_area_struct *);
void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
/* See folio_try_dup_anon_rmap_*() */
static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
struct vm_area_struct *vma)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
if (PageAnonExclusive(&folio->page)) {
if (unlikely(folio_needs_cow_for_dma(vma, folio)))
return -EBUSY;
ClearPageAnonExclusive(&folio->page);
}
atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
return 0;
}
/* See folio_try_share_anon_rmap_*() */
static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
/* Paired with the memory barrier in try_grab_folio(). */
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
smp_mb();
if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
ClearPageAnonExclusive(&folio->page);
/*
* This is conceptually a smp_wmb() paired with the smp_rmb() in
* gup_must_unshare().
*/
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
smp_mb__after_atomic();
return 0;
}
static inline void hugetlb_add_file_rmap(struct folio *folio)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
}
static inline void hugetlb_remove_rmap(struct folio *folio)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
atomic_dec(&folio->_entire_mapcount);
atomic_dec(&folio->_large_mapcount);
}
static __always_inline void __folio_dup_file_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
enum pgtable_level level)
{
const int orig_nr_pages = nr_pages;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
case PGTABLE_LEVEL_PTE:
if (!folio_test_large(folio)) { atomic_inc(&folio->_mapcount);
break;
}
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
do {
atomic_inc(&page->_mapcount); } while (page++, --nr_pages > 0);
}
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
case PGTABLE_LEVEL_PMD:
case PGTABLE_LEVEL_PUD:
atomic_inc(&folio->_entire_mapcount);
folio_inc_large_mapcount(folio, dst_vma);
break;
default:
BUILD_BUG();
}
}
/**
* folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
* @folio: The folio to duplicate the mappings of
* @page: The first page to duplicate the mappings of
* @nr_pages: The number of pages of which the mapping will be duplicated
* @dst_vma: The destination vm area
*
* The page range of the folio is defined by [page, page + nr_pages)
*
* The caller needs to hold the page table lock.
*/
static inline void folio_dup_file_rmap_ptes(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
{
__folio_dup_file_rmap(folio, page, nr_pages, dst_vma, PGTABLE_LEVEL_PTE);
}
static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
struct page *page, struct vm_area_struct *dst_vma)
{
__folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE);
}
/**
* folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
* @folio: The folio to duplicate the mapping of
* @page: The first page to duplicate the mapping of
* @dst_vma: The destination vm area
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock.
*/
static inline void folio_dup_file_rmap_pmd(struct folio *folio,
struct page *page, struct vm_area_struct *dst_vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE);
#else
WARN_ON_ONCE(true);
#endif
}
static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, enum pgtable_level level)
{
const int orig_nr_pages = nr_pages;
bool maybe_pinned;
int i;
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
/*
* If this folio may have been pinned by the parent process,
* don't allow to duplicate the mappings but instead require to e.g.,
* copy the subpage immediately for the child so that we'll always
* guarantee the pinned folio won't be randomly replaced in the
* future on write faults.
*/
maybe_pinned = likely(!folio_is_device_private(folio)) &&
unlikely(folio_needs_cow_for_dma(src_vma, folio));
/*
* No need to check+clear for already shared PTEs/PMDs of the
* folio. But if any page is PageAnonExclusive, we must fallback to
* copying if the folio maybe pinned.
*/
switch (level) {
case PGTABLE_LEVEL_PTE:
if (unlikely(maybe_pinned)) {
for (i = 0; i < nr_pages; i++)
if (PageAnonExclusive(page + i))
return -EBUSY;
}
if (!folio_test_large(folio)) { if (PageAnonExclusive(page)) ClearPageAnonExclusive(page); atomic_inc(&folio->_mapcount); break;
}
do {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
atomic_inc(&page->_mapcount);
} while (page++, --nr_pages > 0);
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
case PGTABLE_LEVEL_PMD:
case PGTABLE_LEVEL_PUD:
if (PageAnonExclusive(page)) {
if (unlikely(maybe_pinned))
return -EBUSY;
ClearPageAnonExclusive(page);
}
atomic_inc(&folio->_entire_mapcount);
folio_inc_large_mapcount(folio, dst_vma);
break;
default:
BUILD_BUG();
}
return 0;
}
/**
* folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
* of a folio
* @folio: The folio to duplicate the mappings of
* @page: The first page to duplicate the mappings of
* @nr_pages: The number of pages of which the mapping will be duplicated
* @dst_vma: The destination vm area
* @src_vma: The vm area from which the mappings are duplicated
*
* The page range of the folio is defined by [page, page + nr_pages)
*
* The caller needs to hold the page table lock and the
* vma->vma_mm->write_protect_seq.
*
* Duplicating the mappings can only fail if the folio may be pinned; device
* private folios cannot get pinned and consequently this function cannot fail
* for them.
*
* If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
* the parent and the child. They must *not* be writable after this call
* succeeded.
*
* Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
*/
static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
src_vma, PGTABLE_LEVEL_PTE);
}
static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
struct page *page, struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
PGTABLE_LEVEL_PTE);
}
/**
* folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
* of a folio
* @folio: The folio to duplicate the mapping of
* @page: The first page to duplicate the mapping of
* @dst_vma: The destination vm area
* @src_vma: The vm area from which the mapping is duplicated
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock and the
* vma->vma_mm->write_protect_seq.
*
* Duplicating the mapping can only fail if the folio may be pinned; device
* private folios cannot get pinned and consequently this function cannot fail
* for them.
*
* If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
* the parent and the child. They must *not* be writable after this call
* succeeded.
*
* Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
*/
static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
struct page *page, struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
src_vma, PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
return -EBUSY;
#endif
}
static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
struct page *page, int nr_pages, enum pgtable_level level)
{
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
/* device private folios cannot get pinned via GUP. */
if (unlikely(folio_is_device_private(folio))) {
ClearPageAnonExclusive(page);
return 0;
}
/*
* We have to make sure that when we clear PageAnonExclusive, that
* the page is not pinned and that concurrent GUP-fast won't succeed in
* concurrently pinning the page.
*
* Conceptually, PageAnonExclusive clearing consists of:
* (A1) Clear PTE
* (A2) Check if the page is pinned; back off if so.
* (A3) Clear PageAnonExclusive
* (A4) Restore PTE (optional, but certainly not writable)
*
* When clearing PageAnonExclusive, we cannot possibly map the page
* writable again, because anon pages that may be shared must never
* be writable. So in any case, if the PTE was writable it cannot
* be writable anymore afterwards and there would be a PTE change. Only
* if the PTE wasn't writable, there might not be a PTE change.
*
* Conceptually, GUP-fast pinning of an anon page consists of:
* (B1) Read the PTE
* (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
* (B3) Pin the mapped page
* (B4) Check if the PTE changed by re-reading it; back off if so.
* (B5) If the original PTE is not writable, check if
* PageAnonExclusive is not set; back off if so.
*
* If the PTE was writable, we only have to make sure that GUP-fast
* observes a PTE change and properly backs off.
*
* If the PTE was not writable, we have to make sure that GUP-fast either
* detects a (temporary) PTE change or that PageAnonExclusive is cleared
* and properly backs off.
*
* Consequently, when clearing PageAnonExclusive(), we have to make
* sure that (A1), (A2)/(A3) and (A4) happen in the right memory
* order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
* and (B5) happen in the right memory order.
*
* We assume that there might not be a memory barrier after
* clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
* so we use explicit ones here.
*/
/* Paired with the memory barrier in try_grab_folio(). */
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
smp_mb();
if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
ClearPageAnonExclusive(page);
/*
* This is conceptually a smp_wmb() paired with the smp_rmb() in
* gup_must_unshare().
*/
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
smp_mb__after_atomic();
return 0;
}
/**
* folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
* mapped by a PTE possibly shared to prepare
* for KSM or temporary unmapping
* @folio: The folio to share a mapping of
* @page: The mapped exclusive page
*
* The caller needs to hold the page table lock and has to have the page table
* entries cleared/invalidated.
*
* This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
* fork() to duplicate mappings, but instead to prepare for KSM or temporarily
* unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
*
* Marking the mapped page shared can only fail if the folio maybe pinned;
* device private folios cannot get pinned and consequently this function cannot
* fail.
*
* Returns 0 if marking the mapped page possibly shared succeeded. Returns
* -EBUSY otherwise.
*/
static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
struct page *page)
{
return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
}
/**
* folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
* range mapped by a PMD possibly shared to
* prepare for temporary unmapping
* @folio: The folio to share the mapping of
* @page: The first page to share the mapping of
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock and has to have the page table
* entries cleared/invalidated.
*
* This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
* fork() to duplicate a mapping, but instead to prepare for temporarily
* unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
*
* Marking the mapped pages shared can only fail if the folio maybe pinned;
* device private folios cannot get pinned and consequently this function cannot
* fail.
*
* Returns 0 if marking the mapped pages possibly shared succeeded. Returns
* -EBUSY otherwise.
*/
static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
return -EBUSY;
#endif
}
/*
* Called from mm/vmscan.c to handle paging out
*/
int folio_referenced(struct folio *, int is_locked,
struct mem_cgroup *memcg, vm_flags_t *vm_flags);
void try_to_migrate(struct folio *folio, enum ttu_flags flags);
void try_to_unmap(struct folio *, enum ttu_flags flags);
struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
void *owner, struct folio **foliop);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
/* Result flags */
/* The page is mapped across page table boundary */
#define PVMW_PGTABLE_CROSSED (1 << 16)
struct page_vma_mapped_walk {
unsigned long pfn;
unsigned long nr_pages;
pgoff_t pgoff;
struct vm_area_struct *vma;
unsigned long address;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
unsigned int flags;
};
#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
struct page_vma_mapped_walk name = { \
.pfn = folio_pfn(_folio), \
.nr_pages = folio_nr_pages(_folio), \
.pgoff = folio_pgoff(_folio), \
.vma = _vma, \
.address = _address, \
.flags = _flags, \
}
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
}
/**
* page_vma_mapped_walk_restart - Restart the page table walk.
* @pvmw: Pointer to struct page_vma_mapped_walk.
*
* It restarts the page table walk when changes occur in the page
* table, such as splitting a PMD. Ensures that the PTL held during
* the previous walk is released and resets the state to allow for
* a new walk starting at the current address stored in pvmw->address.
*/
static inline void
page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
{
WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte);
if (likely(pvmw->ptl))
spin_unlock(pvmw->ptl);
else
WARN_ON_ONCE(1);
pvmw->ptl = NULL;
pvmw->pmd = NULL;
pvmw->pte = NULL;
}
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
unsigned long page_address_in_vma(const struct folio *folio,
const struct page *, const struct vm_area_struct *);
/*
* Cleans the PTEs of shared mappings.
* (and since clean PTEs should also be readonly, write protects them too)
*
* returns the number of cleaned PTEs.
*/
int folio_mkclean(struct folio *);
int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
unsigned long pfn, unsigned long nr_pages);
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
enum rmp_flags {
RMP_LOCKED = 1 << 0,
RMP_USE_SHARED_ZEROPAGE = 1 << 1,
};
void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
*
* arg: passed to rmap_one() and invalid_vma()
* try_lock: bail out if the rmap lock is contended
* contended: indicate the rmap traversal bailed out due to lock contention
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* anon_lock: for getting anon_lock by optimized way rather than default
* invalid_vma: for skipping uninterested vma
*/
struct rmap_walk_control {
void *arg;
bool try_lock;
bool contended;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct folio *folio);
struct anon_vma *(*anon_lock)(const struct folio *folio,
struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
vm_flags_t *vm_flags)
{
*vm_flags = 0;
return 0;
}
static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
{
}
static inline int folio_mkclean(struct folio *folio)
{
return 0;
}
#endif /* CONFIG_MMU */
#endif /* _LINUX_RMAP_H */
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Some corrections by tytso.
*/
/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
* lookup logic.
*/
/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/wordpart.h>
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/device_cgroup.h>
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
#include <linux/hash.h>
#include <linux/bitops.h>
#include <linux/init_task.h>
#include <linux/uaccess.h>
#include "internal.h"
#include "mount.h"
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
* were necessary because of omirr. The reason is that omirr needs
* to know the _real_ pathname, not the user-supplied one, in case
* of symlinks (and also when transname replacements occur).
*
* The new code replaces the old recursive symlink resolution with
* an iterative one (in case of non-nested symlink chains). It does
* this with calls to <fs>_follow_link().
* As a side effect, dir_namei(), _namei() and follow_link() are now
* replaced with a single function lookup_dentry() that can handle all
* the special cases of the former code.
*
* With the new dcache, the pathname is stored at each inode, at least as
* long as the refcount of the inode is positive. As a side effect, the
* size of the dcache depends on the inode cache and thus is dynamic.
*
* [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
* resolution to correspond with current state of the code.
*
* Note that the symlink resolution is not *completely* iterative.
* There is still a significant amount of tail- and mid- recursion in
* the algorithm. Also, note that <fs>_readlink() is not used in
* lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
* may return different results than <fs>_follow_link(). Many virtual
* filesystems (including /proc) exhibit this behavior.
*/
/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
* New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
* and the name already exists in form of a symlink, try to create the new
* name indicated by the symlink. The old code always complained that the
* name already exists, due to not following the symlink even if its target
* is nonexistent. The new semantics affects also mknod() and link() when
* the name is a symlink pointing to a non-existent name.
*
* I don't know which semantics is the right one, since I have no access
* to standards. But I found by trial that HP-UX 9.0 has the full "new"
* semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
* "old" one. Personally, I think the new semantics is much more logical.
* Note that "ln old new" where "new" is a symlink pointing to a non-existing
* file does succeed in both HP-UX and SunOs, but not in Solaris
* and in the old Linux semantics.
*/
/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
* semantics. See the comments in "open_namei" and "do_link" below.
*
* [10-Sep-98 Alan Modra] Another symlink change.
*/
/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
* inside the path - always follow.
* in the last component in creation/removal/renaming - never follow.
* if LOOKUP_FOLLOW passed - follow.
* if the pathname has trailing slashes - follow.
* otherwise - don't follow.
* (applied in that order).
*
* [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
* restored for 2.4. This is the last surviving part of old 4.2BSD bug.
* During the 2.4 we need to fix the userland stuff depending on it -
* hopefully we will be able to get rid of that wart in 2.5. So far only
* XEmacs seems to be relying on it...
*/
/*
* [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
* implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
* any extra contention...
*/
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
*
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
* PATH_MAX includes the nul terminator --RR.
*/
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
static inline void initname(struct filename *name, const char __user *uptr)
{
name->uptr = uptr;
name->aname = NULL;
atomic_set(&name->refcnt, 1);
}
struct filename *
getname_flags(const char __user *filename, int flags)
{
struct filename *result;
char *kname;
int len;
result = audit_reusename(filename);
if (result)
return result;
result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
/*
* First, try to embed the struct filename inside the names_cache
* allocation
*/
kname = (char *)result->iname;
result->name = kname;
len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
/*
* Handle both empty path and copy failure in one go.
*/
if (unlikely(len <= 0)) {
if (unlikely(len < 0)) {
__putname(result);
return ERR_PTR(len);
}
/* The empty path is special. */
if (!(flags & LOOKUP_EMPTY)) {
__putname(result);
return ERR_PTR(-ENOENT);
}
}
/*
* Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
* separate struct filename so we can dedicate the entire
* names_cache allocation for the pathname, and re-do the copy from
* userland.
*/
if (unlikely(len == EMBEDDED_NAME_MAX)) {
const size_t size = offsetof(struct filename, iname[1]);
kname = (char *)result;
/*
* size is chosen that way we to guarantee that
* result->iname[0] is within the same object and that
* kname can't be equal to result->iname, no matter what.
*/
result = kzalloc(size, GFP_KERNEL);
if (unlikely(!result)) {
__putname(kname);
return ERR_PTR(-ENOMEM);
}
result->name = kname;
len = strncpy_from_user(kname, filename, PATH_MAX);
if (unlikely(len < 0)) {
__putname(kname);
kfree(result);
return ERR_PTR(len);
}
/* The empty path is special. */
if (unlikely(!len) && !(flags & LOOKUP_EMPTY)) {
__putname(kname);
kfree(result);
return ERR_PTR(-ENOENT);
}
if (unlikely(len == PATH_MAX)) {
__putname(kname);
kfree(result);
return ERR_PTR(-ENAMETOOLONG);
}
}
initname(result, filename);
audit_getname(result);
return result;
}
struct filename *getname_uflags(const char __user *filename, int uflags)
{
int flags = (uflags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
return getname_flags(filename, flags);
}
struct filename *__getname_maybe_null(const char __user *pathname)
{
struct filename *name;
char c;
/* try to save on allocations; loss on um, though */
if (get_user(c, pathname))
return ERR_PTR(-EFAULT);
if (!c)
return NULL;
name = getname_flags(pathname, LOOKUP_EMPTY);
if (!IS_ERR(name) && !(name->name[0])) {
putname(name);
name = NULL;
}
return name;
}
struct filename *getname_kernel(const char * filename)
{
struct filename *result;
int len = strlen(filename) + 1;
result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
if (len <= EMBEDDED_NAME_MAX) {
result->name = (char *)result->iname;
} else if (len <= PATH_MAX) {
const size_t size = offsetof(struct filename, iname[1]);
struct filename *tmp;
tmp = kmalloc(size, GFP_KERNEL);
if (unlikely(!tmp)) {
__putname(result);
return ERR_PTR(-ENOMEM);
}
tmp->name = (char *)result;
result = tmp;
} else {
__putname(result);
return ERR_PTR(-ENAMETOOLONG);
}
memcpy((char *)result->name, filename, len);
initname(result, NULL);
audit_getname(result);
return result;
}
EXPORT_SYMBOL(getname_kernel);
void putname(struct filename *name)
{
int refcnt;
if (IS_ERR_OR_NULL(name))
return;
refcnt = atomic_read(&name->refcnt);
if (refcnt != 1) {
if (WARN_ON_ONCE(!refcnt))
return;
if (!atomic_dec_and_test(&name->refcnt))
return;
}
if (name->name != name->iname) {
__putname(name->name);
kfree(name);
} else
__putname(name);
}
EXPORT_SYMBOL(putname);
/**
* check_acl - perform ACL permission checking
* @idmap: idmap of the mount the inode was found from
* @inode: inode to check permissions on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
*
* This function performs the ACL permission checking. Since this function
* retrieve POSIX acls it needs to know whether it is called from a blocking or
* non-blocking context and thus cares about the MAY_NOT_BLOCK bit.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
static int check_acl(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
if (mask & MAY_NOT_BLOCK) {
acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
if (!acl)
return -EAGAIN;
/* no ->get_inode_acl() calls in RCU mode... */
if (is_uncached_acl(acl))
return -ECHILD;
return posix_acl_permission(idmap, inode, acl, mask);
}
acl = get_inode_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
int error = posix_acl_permission(idmap, inode, acl, mask);
posix_acl_release(acl);
return error;
}
#endif
return -EAGAIN;
}
/*
* Very quick optimistic "we know we have no ACL's" check.
*
* Note that this is purely for ACL_TYPE_ACCESS, and purely
* for the "we have cached that there are no ACLs" case.
*
* If this returns true, we know there are no ACLs. But if
* it returns false, we might still not have ACLs (it could
* be the is_uncached_acl() case).
*/
static inline bool no_acl_inode(struct inode *inode)
{
#ifdef CONFIG_FS_POSIX_ACL
return likely(!READ_ONCE(inode->i_acl));
#else
return true;
#endif
}
/**
* acl_permission_check - perform basic UNIX permission checking
* @idmap: idmap of the mount the inode was found from
* @inode: inode to check permissions on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
*
* This function performs the basic UNIX permission checking. Since this
* function may retrieve POSIX acls it needs to know whether it is called from a
* blocking or non-blocking context and thus cares about the MAY_NOT_BLOCK bit.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
static int acl_permission_check(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
vfsuid_t vfsuid;
/*
* Common cheap case: everybody has the requested
* rights, and there are no ACLs to check. No need
* to do any owner/group checks in that case.
*
* - 'mask&7' is the requested permission bit set
* - multiplying by 0111 spreads them out to all of ugo
* - '& ~mode' looks for missing inode permission bits
* - the '!' is for "no missing permissions"
*
* After that, we just need to check that there are no
* ACL's on the inode - do the 'IS_POSIXACL()' check last
* because it will dereference the ->i_sb pointer and we
* want to avoid that if at all possible.
*/
if (!((mask & 7) * 0111 & ~mode)) {
if (no_acl_inode(inode))
return 0;
if (!IS_POSIXACL(inode))
return 0;
}
/* Are we the owner? If so, ACL's don't matter */
vfsuid = i_uid_into_vfsuid(idmap, inode);
if (likely(vfsuid_eq_kuid(vfsuid, current_fsuid()))) {
mask &= 7;
mode >>= 6;
return (mask & ~mode) ? -EACCES : 0;
}
/* Do we have ACL's? */
if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
int error = check_acl(idmap, inode, mask);
if (error != -EAGAIN)
return error;
}
/* Only RWX matters for group/other mode bits */
mask &= 7;
/*
* Are the group permissions different from
* the other permissions in the bits we care
* about? Need to check group ownership if so.
*/
if (mask & (mode ^ (mode >> 3))) {
vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
if (vfsgid_in_group_p(vfsgid))
mode >>= 3;
}
/* Bits in 'mode' clear that we require? */
return (mask & ~mode) ? -EACCES : 0;
}
/**
* generic_permission - check for access rights on a Posix-like filesystem
* @idmap: idmap of the mount the inode was found from
* @inode: inode to check access rights for
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC,
* %MAY_NOT_BLOCK ...)
*
* Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things.
*
* generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
* request cannot be satisfied (eg. requires blocking or too much complexity).
* It would then be called again in ref-walk mode.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int generic_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
int ret;
/*
* Do the basic permission checks.
*/
ret = acl_permission_check(idmap, inode, mask);
if (ret != -EACCES)
return ret;
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (!(mask & MAY_WRITE))
if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_READ_SEARCH))
return 0;
if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_OVERRIDE))
return 0;
return -EACCES;
}
/*
* Searching includes executable on directories, else just read.
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_READ_SEARCH))
return 0;
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable when there is
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
if (capable_wrt_inode_uidgid(idmap, inode,
CAP_DAC_OVERRIDE))
return 0;
return -EACCES;
}
EXPORT_SYMBOL(generic_permission);
/**
* do_inode_permission - UNIX permission checking
* @idmap: idmap of the mount the inode was found from
* @inode: inode to check permissions on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
*
* We _really_ want to just do "generic_permission()" without
* even looking at the inode->i_op values. So we keep a cache
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
static inline int do_inode_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
if (likely(inode->i_op->permission))
return inode->i_op->permission(idmap, inode, mask);
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_FASTPERM;
spin_unlock(&inode->i_lock);
}
return generic_permission(idmap, inode, mask);
}
/**
* sb_permission - Check superblock-level permissions
* @sb: Superblock of inode to check permission on
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Separate out file-system wide checks from inode-specific permission checks.
*/
static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
{
if (unlikely(mask & MAY_WRITE)) {
umode_t mode = inode->i_mode;
/* Nobody gets write access to a read-only fs. */
if (sb_rdonly(sb) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
}
return 0;
}
/**
* inode_permission - Check for access rights to a given inode
* @idmap: idmap of the mount the inode was found from
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Check for read/write/execute permissions on an inode. We use fs[ug]id for
* this, letting us set arbitrary permissions for filesystem access without
* changing the "normal" UIDs which are used for other things.
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
int inode_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
int retval;
retval = sb_permission(inode->i_sb, inode, mask);
if (unlikely(retval))
return retval;
if (unlikely(mask & MAY_WRITE)) {
/*
* Nobody gets write access to an immutable file.
*/
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
/*
* Updating mtime will likely cause i_uid and i_gid to be
* written back improperly if their true value is unknown
* to the vfs.
*/
if (unlikely(HAS_UNMAPPED_ID(idmap, inode)))
return -EACCES;
}
retval = do_inode_permission(idmap, inode, mask);
if (unlikely(retval))
return retval;
retval = devcgroup_inode_permission(inode, mask);
if (unlikely(retval))
return retval;
return security_inode_permission(inode, mask);
}
EXPORT_SYMBOL(inode_permission);
/**
* path_get - get a reference to a path
* @path: path to get the reference to
*
* Given a path increment the reference count to the dentry and the vfsmount.
*/
void path_get(const struct path *path)
{
mntget(path->mnt);
dget(path->dentry);}
EXPORT_SYMBOL(path_get);
/**
* path_put - put a reference to a path
* @path: path to put the reference to
*
* Given a path decrement the reference count to the dentry and the vfsmount.
*/
void path_put(const struct path *path)
{
dput(path->dentry);
mntput(path->mnt);
}
EXPORT_SYMBOL(path_put);
#define EMBEDDED_LEVELS 2
struct nameidata {
struct path path;
struct qstr last;
struct path root;
struct inode *inode; /* path.dentry.d_inode */
unsigned int flags, state;
unsigned seq, next_seq, m_seq, r_seq;
int last_type;
unsigned depth;
int total_link_count;
struct saved {
struct path link;
struct delayed_call done;
const char *name;
unsigned seq;
} *stack, internal[EMBEDDED_LEVELS];
struct filename *name;
const char *pathname;
struct nameidata *saved;
unsigned root_seq;
int dfd;
vfsuid_t dir_vfsuid;
umode_t dir_mode;
} __randomize_layout;
#define ND_ROOT_PRESET 1
#define ND_ROOT_GRABBED 2
#define ND_JUMPED 4
static void __set_nameidata(struct nameidata *p, int dfd, struct filename *name)
{
struct nameidata *old = current->nameidata;
p->stack = p->internal;
p->depth = 0;
p->dfd = dfd;
p->name = name;
p->pathname = likely(name) ? name->name : "";
p->path.mnt = NULL;
p->path.dentry = NULL;
p->total_link_count = old ? old->total_link_count : 0;
p->saved = old;
current->nameidata = p;
}
static inline void set_nameidata(struct nameidata *p, int dfd, struct filename *name,
const struct path *root)
{
__set_nameidata(p, dfd, name);
p->state = 0;
if (unlikely(root)) {
p->state = ND_ROOT_PRESET;
p->root = *root;
}
}
static void restore_nameidata(void)
{
struct nameidata *now = current->nameidata, *old = now->saved;
current->nameidata = old;
if (old)
old->total_link_count = now->total_link_count;
if (now->stack != now->internal)
kfree(now->stack);
}
static bool nd_alloc_stack(struct nameidata *nd)
{
struct saved *p;
p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
nd->flags & LOOKUP_RCU ? GFP_ATOMIC : GFP_KERNEL);
if (unlikely(!p))
return false;
memcpy(p, nd->internal, sizeof(nd->internal));
nd->stack = p;
return true;
}
/**
* path_connected - Verify that a dentry is below mnt.mnt_root
* @mnt: The mountpoint to check.
* @dentry: The dentry to check.
*
* Rename can sometimes move a file or directory outside of a bind
* mount, path_connected allows those cases to be detected.
*/
static bool path_connected(struct vfsmount *mnt, struct dentry *dentry)
{
struct super_block *sb = mnt->mnt_sb;
/* Bind mounts can have disconnected paths */
if (mnt->mnt_root == sb->s_root)
return true;
return is_subdir(dentry, mnt->mnt_root);
}
static void drop_links(struct nameidata *nd)
{
int i = nd->depth;
while (i--) {
struct saved *last = nd->stack + i;
do_delayed_call(&last->done);
clear_delayed_call(&last->done);
}
}
static void leave_rcu(struct nameidata *nd)
{
nd->flags &= ~LOOKUP_RCU;
nd->seq = nd->next_seq = 0;
rcu_read_unlock();
}
static void terminate_walk(struct nameidata *nd)
{
drop_links(nd);
if (!(nd->flags & LOOKUP_RCU)) {
int i;
path_put(&nd->path);
for (i = 0; i < nd->depth; i++)
path_put(&nd->stack[i].link);
if (nd->state & ND_ROOT_GRABBED) {
path_put(&nd->root);
nd->state &= ~ND_ROOT_GRABBED;
}
} else {
leave_rcu(nd);
}
nd->depth = 0;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
/* path_put is needed afterwards regardless of success or failure */
static bool __legitimize_path(struct path *path, unsigned seq, unsigned mseq)
{
int res = __legitimize_mnt(path->mnt, mseq);
if (unlikely(res)) {
if (res > 0)
path->mnt = NULL;
path->dentry = NULL;
return false;
}
if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) {
path->dentry = NULL;
return false;
}
return !read_seqcount_retry(&path->dentry->d_seq, seq);
}
static inline bool legitimize_path(struct nameidata *nd,
struct path *path, unsigned seq)
{
return __legitimize_path(path, seq, nd->m_seq);
}
static bool legitimize_links(struct nameidata *nd)
{
int i;
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
return false;
}
for (i = 0; i < nd->depth; i++) {
struct saved *last = nd->stack + i;
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
drop_links(nd);
nd->depth = i + 1;
return false;
}
}
return true;
}
static bool legitimize_root(struct nameidata *nd)
{
/* Nothing to do if nd->root is zero or is managed by the VFS user. */
if (!nd->root.mnt || (nd->state & ND_ROOT_PRESET))
return true;
nd->state |= ND_ROOT_GRABBED;
return legitimize_path(nd, &nd->root, nd->root_seq);
}
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
* continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
* normal reference counts on dentries and vfsmounts to transition to ref-walk
* mode. Refcounts are grabbed at the last known good point before rcu-walk
* got stuck, so ref-walk may continue from there. If this is not successful
* (eg. a seqcount has changed), then failure is returned and it's up to caller
* to restart the path walk from the beginning in ref-walk mode.
*/
/**
* try_to_unlazy - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* Returns: true on success, false on failure
*
* try_to_unlazy attempts to legitimize the current nd->path and nd->root
* for ref-walk mode.
* Must be called from rcu-walk context.
* Nothing should touch nameidata between try_to_unlazy() failure and
* terminate_walk().
*/
static bool try_to_unlazy(struct nameidata *nd)
{
struct dentry *parent = nd->path.dentry;
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(!legitimize_links(nd)))
goto out1;
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
goto out;
if (unlikely(!legitimize_root(nd)))
goto out;
leave_rcu(nd);
BUG_ON(nd->inode != parent->d_inode);
return true;
out1:
nd->path.mnt = NULL;
nd->path.dentry = NULL;
out:
leave_rcu(nd);
return false;
}
/**
* try_to_unlazy_next - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* @dentry: next dentry to step into
* Returns: true on success, false on failure
*
* Similar to try_to_unlazy(), but here we have the next dentry already
* picked by rcu-walk and want to legitimize that in addition to the current
* nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context.
* Nothing should touch nameidata between try_to_unlazy_next() failure and
* terminate_walk().
*/
static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry)
{
int res;
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(!legitimize_links(nd)))
goto out2;
res = __legitimize_mnt(nd->path.mnt, nd->m_seq);
if (unlikely(res)) {
if (res > 0)
goto out2;
goto out1;
}
if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref)))
goto out1;
/*
* We need to move both the parent and the dentry from the RCU domain
* to be properly refcounted. And the sequence number in the dentry
* validates *both* dentry counters, since we checked the sequence
* number of the parent after we got the child sequence number. So we
* know the parent must still be valid if the child sequence number is
*/
if (unlikely(!lockref_get_not_dead(&dentry->d_lockref)))
goto out;
if (read_seqcount_retry(&dentry->d_seq, nd->next_seq))
goto out_dput;
/*
* Sequence counts matched. Now make sure that the root is
* still valid and get it if required.
*/
if (unlikely(!legitimize_root(nd)))
goto out_dput;
leave_rcu(nd);
return true;
out2:
nd->path.mnt = NULL;
out1:
nd->path.dentry = NULL;
out:
leave_rcu(nd);
return false;
out_dput:
leave_rcu(nd);
dput(dentry);
return false;
}
static inline int d_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *dentry, unsigned int flags)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
return dentry->d_op->d_revalidate(dir, name, dentry, flags);
else
return 1;
}
/**
* complete_walk - successful completion of path walk
* @nd: pointer nameidata
*
* If we had been in RCU mode, drop out of it and legitimize nd->path.
* Revalidate the final result, unless we'd already done that during
* the path walk or the filesystem doesn't ask for it. Return 0 on
* success, -error on failure. In case of failure caller does not
* need to drop nd->path.
*/
static int complete_walk(struct nameidata *nd)
{
struct dentry *dentry = nd->path.dentry;
int status;
if (nd->flags & LOOKUP_RCU) {
/*
* We don't want to zero nd->root for scoped-lookups or
* externally-managed nd->root.
*/
if (!(nd->state & ND_ROOT_PRESET))
if (!(nd->flags & LOOKUP_IS_SCOPED))
nd->root.mnt = NULL;
nd->flags &= ~LOOKUP_CACHED;
if (!try_to_unlazy(nd))
return -ECHILD;
}
if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
/*
* While the guarantee of LOOKUP_IS_SCOPED is (roughly) "don't
* ever step outside the root during lookup" and should already
* be guaranteed by the rest of namei, we want to avoid a namei
* BUG resulting in userspace being given a path that was not
* scoped within the root at some point during the lookup.
*
* So, do a final sanity-check to make sure that in the
* worst-case scenario (a complete bypass of LOOKUP_IS_SCOPED)
* we won't silently return an fd completely outside of the
* requested root to userspace.
*
* Userspace could move the path outside the root after this
* check, but as discussed elsewhere this is not a concern (the
* resolved file was inside the root at some point).
*/
if (!path_is_under(&nd->path, &nd->root))
return -EXDEV;
}
if (likely(!(nd->state & ND_JUMPED)))
return 0;
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
return 0;
status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
if (status > 0)
return 0;
if (!status)
status = -ESTALE;
return status;
}
static int set_root(struct nameidata *nd)
{
struct fs_struct *fs = current->fs;
/*
* Jumping to the real root in a scoped-lookup is a BUG in namei, but we
* still have to ensure it doesn't happen because it will cause a breakout
* from the dirfd.
*/
if (WARN_ON(nd->flags & LOOKUP_IS_SCOPED))
return -ENOTRECOVERABLE;
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
do {
seq = read_seqbegin(&fs->seq);
nd->root = fs->root;
nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
} while (read_seqretry(&fs->seq, seq));
} else {
get_fs_root(fs, &nd->root);
nd->state |= ND_ROOT_GRABBED;
}
return 0;
}
static int nd_jump_root(struct nameidata *nd)
{
if (unlikely(nd->flags & LOOKUP_BENEATH))
return -EXDEV;
if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
/* Absolute path arguments to path_init() are allowed. */
if (nd->path.mnt != NULL && nd->path.mnt != nd->root.mnt)
return -EXDEV;
}
if (!nd->root.mnt) {
int error = set_root(nd);
if (error)
return error;
}
if (nd->flags & LOOKUP_RCU) {
struct dentry *d;
nd->path = nd->root;
d = nd->path.dentry;
nd->inode = d->d_inode;
nd->seq = nd->root_seq;
if (read_seqcount_retry(&d->d_seq, nd->seq))
return -ECHILD;
} else {
path_put(&nd->path);
nd->path = nd->root;
path_get(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
nd->state |= ND_JUMPED;
return 0;
}
/*
* Helper to directly jump to a known parsed path from ->get_link,
* caller must have taken a reference to path beforehand.
*/
int nd_jump_link(const struct path *path)
{
int error = -ELOOP;
struct nameidata *nd = current->nameidata;
if (unlikely(nd->flags & LOOKUP_NO_MAGICLINKS))
goto err;
error = -EXDEV;
if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
if (nd->path.mnt != path->mnt)
goto err;
}
/* Not currently safe for scoped-lookups. */
if (unlikely(nd->flags & LOOKUP_IS_SCOPED))
goto err;
path_put(&nd->path);
nd->path = *path;
nd->inode = nd->path.dentry->d_inode;
nd->state |= ND_JUMPED;
return 0;
err:
path_put(path);
return error;
}
static inline void put_link(struct nameidata *nd)
{
struct saved *last = nd->stack + --nd->depth;
do_delayed_call(&last->done);
if (!(nd->flags & LOOKUP_RCU))
path_put(&last->link);
}
static int sysctl_protected_symlinks __read_mostly;
static int sysctl_protected_hardlinks __read_mostly;
static int sysctl_protected_fifos __read_mostly;
static int sysctl_protected_regular __read_mostly;
#ifdef CONFIG_SYSCTL
static const struct ctl_table namei_sysctls[] = {
{
.procname = "protected_symlinks",
.data = &sysctl_protected_symlinks,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "protected_hardlinks",
.data = &sysctl_protected_hardlinks,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "protected_fifos",
.data = &sysctl_protected_fifos,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{
.procname = "protected_regular",
.data = &sysctl_protected_regular,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
};
static int __init init_fs_namei_sysctls(void)
{
register_sysctl_init("fs", namei_sysctls);
return 0;
}
fs_initcall(init_fs_namei_sysctls);
#endif /* CONFIG_SYSCTL */
/**
* may_follow_link - Check symlink following for unsafe situations
* @nd: nameidata pathwalk data
* @inode: Used for idmapping.
*
* In the case of the sysctl_protected_symlinks sysctl being enabled,
* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
* in a sticky world-writable directory. This is to protect privileged
* processes from failing races against path names that may change out
* from under them by way of other users creating malicious symlinks.
* It will permit symlinks to be followed only when outside a sticky
* world-writable directory, or when the uid of the symlink and follower
* match, or when the directory owner matches the symlink's owner.
*
* Returns 0 if following the symlink is allowed, -ve on error.
*/
static inline int may_follow_link(struct nameidata *nd, const struct inode *inode)
{
struct mnt_idmap *idmap;
vfsuid_t vfsuid;
if (!sysctl_protected_symlinks)
return 0;
idmap = mnt_idmap(nd->path.mnt);
vfsuid = i_uid_into_vfsuid(idmap, inode);
/* Allowed if owner and follower match. */
if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
return 0;
/* Allowed if parent directory not sticky and world-writable. */
if ((nd->dir_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
return 0;
/* Allowed if parent directory and link owner match. */
if (vfsuid_valid(nd->dir_vfsuid) && vfsuid_eq(nd->dir_vfsuid, vfsuid))
return 0;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
audit_inode(nd->name, nd->stack[0].link.dentry, 0);
audit_log_path_denied(AUDIT_ANOM_LINK, "follow_link");
return -EACCES;
}
/**
* safe_hardlink_source - Check for safe hardlink conditions
* @idmap: idmap of the mount the inode was found from
* @inode: the source inode to hardlink from
*
* Return false if at least one of the following conditions:
* - inode is not a regular file
* - inode is setuid
* - inode is setgid and group-exec
* - access failure for read and write
*
* Otherwise returns true.
*/
static bool safe_hardlink_source(struct mnt_idmap *idmap,
struct inode *inode)
{
umode_t mode = inode->i_mode;
/* Special files should not get pinned to the filesystem. */
if (!S_ISREG(mode))
return false;
/* Setuid files should not get pinned to the filesystem. */
if (mode & S_ISUID)
return false;
/* Executable setgid files should not get pinned to the filesystem. */
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
return false;
/* Hardlinking to unreadable or unwritable sources is dangerous. */
if (inode_permission(idmap, inode, MAY_READ | MAY_WRITE))
return false;
return true;
}
/**
* may_linkat - Check permissions for creating a hardlink
* @idmap: idmap of the mount the inode was found from
* @link: the source to hardlink from
*
* Block hardlink when all of:
* - sysctl_protected_hardlinks enabled
* - fsuid does not match inode
* - hardlink source is unsafe (see safe_hardlink_source() above)
* - not CAP_FOWNER in a namespace with the inode owner uid mapped
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*
* Returns 0 if successful, -ve on error.
*/
int may_linkat(struct mnt_idmap *idmap, const struct path *link)
{
struct inode *inode = link->dentry->d_inode;
/* Inode writeback is not safe when the uid or gid are invalid. */
if (!vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
!vfsgid_valid(i_gid_into_vfsgid(idmap, inode)))
return -EOVERFLOW;
if (!sysctl_protected_hardlinks)
return 0;
/* Source inode owner (or CAP_FOWNER) can hardlink all they like,
* otherwise, it must be a safe source.
*/
if (safe_hardlink_source(idmap, inode) ||
inode_owner_or_capable(idmap, inode))
return 0;
audit_log_path_denied(AUDIT_ANOM_LINK, "linkat");
return -EPERM;
}
/**
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
* @idmap: idmap of the mount the inode was found from
* @nd: nameidata pathwalk data
* @inode: the inode of the file to open
*
* Block an O_CREAT open of a FIFO (or a regular file) when:
* - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
* - the file already exists
* - we are in a sticky directory
* - we don't own the file
* - the owner of the directory doesn't own the file
* - the directory is world writable
* If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
* the directory doesn't have to be world writable: being group writable will
* be enough.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*
* Returns 0 if the open is allowed, -ve on error.
*/
static int may_create_in_sticky(struct mnt_idmap *idmap, struct nameidata *nd,
struct inode *const inode)
{
umode_t dir_mode = nd->dir_mode;
vfsuid_t dir_vfsuid = nd->dir_vfsuid, i_vfsuid;
if (likely(!(dir_mode & S_ISVTX)))
return 0;
if (S_ISREG(inode->i_mode) && !sysctl_protected_regular)
return 0;
if (S_ISFIFO(inode->i_mode) && !sysctl_protected_fifos)
return 0;
i_vfsuid = i_uid_into_vfsuid(idmap, inode);
if (vfsuid_eq(i_vfsuid, dir_vfsuid))
return 0;
if (vfsuid_eq_kuid(i_vfsuid, current_fsuid()))
return 0;
if (likely(dir_mode & 0002)) {
audit_log_path_denied(AUDIT_ANOM_CREAT, "sticky_create");
return -EACCES;
}
if (dir_mode & 0020) {
if (sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) {
audit_log_path_denied(AUDIT_ANOM_CREAT,
"sticky_create_fifo");
return -EACCES;
}
if (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode)) {
audit_log_path_denied(AUDIT_ANOM_CREAT,
"sticky_create_regular");
return -EACCES;
}
}
return 0;
}
/*
* follow_up - Find the mountpoint of path's vfsmount
*
* Given a path, find the mountpoint of its source file system.
* Replace @path with the path of the mountpoint in the parent mount.
* Up is towards /.
*
* Return 1 if we went up a level and 0 if we were already at the
* root.
*/
int follow_up(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
read_seqlock_excl(&mount_lock);
parent = mnt->mnt_parent;
if (parent == mnt) {
read_sequnlock_excl(&mount_lock);
return 0;
}
mntget(&parent->mnt);
mountpoint = dget(mnt->mnt_mountpoint);
read_sequnlock_excl(&mount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
path->mnt = &parent->mnt;
return 1;
}
EXPORT_SYMBOL(follow_up);
static bool choose_mountpoint_rcu(struct mount *m, const struct path *root,
struct path *path, unsigned *seqp)
{
while (mnt_has_parent(m)) {
struct dentry *mountpoint = m->mnt_mountpoint;
m = m->mnt_parent;
if (unlikely(root->dentry == mountpoint &&
root->mnt == &m->mnt))
break;
if (mountpoint != m->mnt.mnt_root) {
path->mnt = &m->mnt;
path->dentry = mountpoint;
*seqp = read_seqcount_begin(&mountpoint->d_seq);
return true;
}
}
return false;
}
static bool choose_mountpoint(struct mount *m, const struct path *root,
struct path *path)
{
bool found;
rcu_read_lock();
while (1) {
unsigned seq, mseq = read_seqbegin(&mount_lock);
found = choose_mountpoint_rcu(m, root, path, &seq);
if (unlikely(!found)) {
if (!read_seqretry(&mount_lock, mseq))
break;
} else {
if (likely(__legitimize_path(path, seq, mseq)))
break;
rcu_read_unlock();
path_put(path);
rcu_read_lock();
}
}
rcu_read_unlock();
return found;
}
/*
* Perform an automount
* - return -EISDIR to tell follow_managed() to stop and return the path we
* were called with.
*/
static int follow_automount(struct path *path, int *count, unsigned lookup_flags)
{
struct dentry *dentry = path->dentry;
/* We don't want to mount if someone's just doing a stat -
* unless they're stat'ing a directory and appended a '/' to
* the name.
*
* We do, however, want to mount if someone wants to open or
* create a file of any type under the mountpoint, wants to
* traverse through the mountpoint or wants to open the
* mounted directory. Also, autofs may mark negative dentries
* as being automount points. These will need the attentions
* of the daemon to instantiate them before they can be used.
*/
if (!(lookup_flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
dentry->d_inode)
return -EISDIR;
/* No need to trigger automounts if mountpoint crossing is disabled. */
if (lookup_flags & LOOKUP_NO_XDEV)
return -EXDEV;
if (count && (*count)++ >= MAXSYMLINKS)
return -ELOOP;
return finish_automount(dentry->d_op->d_automount(path), path);
}
/*
* mount traversal - out-of-line part. One note on ->d_flags accesses -
* dentries are pinned but not locked here, so negative dentry can go
* positive right under us. Use of smp_load_acquire() provides a barrier
* sufficient for ->d_inode and ->d_flags consistency.
*/
static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
int *count, unsigned lookup_flags)
{
struct vfsmount *mnt = path->mnt;
bool need_mntput = false;
int ret = 0;
while (flags & DCACHE_MANAGED_DENTRY) {
/* Allow the filesystem to manage the transit without i_rwsem
* being held. */
if (flags & DCACHE_MANAGE_TRANSIT) {
if (lookup_flags & LOOKUP_NO_XDEV) {
ret = -EXDEV;
break;
}
ret = path->dentry->d_op->d_manage(path, false);
flags = smp_load_acquire(&path->dentry->d_flags);
if (ret < 0)
break;
}
if (flags & DCACHE_MOUNTED) { // something's mounted on it..
struct vfsmount *mounted = lookup_mnt(path);
if (mounted) { // ... in our namespace
dput(path->dentry);
if (need_mntput)
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
// here we know it's positive
flags = path->dentry->d_flags;
need_mntput = true;
if (unlikely(lookup_flags & LOOKUP_NO_XDEV)) {
ret = -EXDEV;
break;
}
continue;
}
}
if (!(flags & DCACHE_NEED_AUTOMOUNT))
break;
// uncovered automount point
ret = follow_automount(path, count, lookup_flags);
flags = smp_load_acquire(&path->dentry->d_flags);
if (ret < 0)
break;
}
if (ret == -EISDIR)
ret = 0;
// possible if you race with several mount --move
if (need_mntput && path->mnt == mnt)
mntput(path->mnt);
if (!ret && unlikely(d_flags_negative(flags)))
ret = -ENOENT;
*jumped = need_mntput;
return ret;
}
static inline int traverse_mounts(struct path *path, bool *jumped,
int *count, unsigned lookup_flags)
{
unsigned flags = smp_load_acquire(&path->dentry->d_flags);
/* fastpath */
if (likely(!(flags & DCACHE_MANAGED_DENTRY))) {
*jumped = false;
if (unlikely(d_flags_negative(flags)))
return -ENOENT;
return 0;
}
return __traverse_mounts(path, flags, jumped, count, lookup_flags);
}
int follow_down_one(struct path *path)
{
struct vfsmount *mounted;
mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
return 1;
}
return 0;
}
EXPORT_SYMBOL(follow_down_one);
/*
* Follow down to the covering mount currently visible to userspace. At each
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
*/
int follow_down(struct path *path, unsigned int flags)
{
struct vfsmount *mnt = path->mnt;
bool jumped;
int ret = traverse_mounts(path, &jumped, NULL, flags);
if (path->mnt != mnt)
mntput(mnt);
return ret;
}
EXPORT_SYMBOL(follow_down);
/*
* Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
* we meet a managed dentry that would need blocking.
*/
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path)
{
struct dentry *dentry = path->dentry;
unsigned int flags = dentry->d_flags;
if (likely(!(flags & DCACHE_MANAGED_DENTRY)))
return true;
if (unlikely(nd->flags & LOOKUP_NO_XDEV))
return false;
for (;;) {
/*
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
if (unlikely(flags & DCACHE_MANAGE_TRANSIT)) {
int res = dentry->d_op->d_manage(path, true);
if (res)
return res == -EISDIR;
flags = dentry->d_flags;
}
if (flags & DCACHE_MOUNTED) {
struct mount *mounted = __lookup_mnt(path->mnt, dentry);
if (mounted) {
path->mnt = &mounted->mnt;
dentry = path->dentry = mounted->mnt.mnt_root;
nd->state |= ND_JUMPED;
nd->next_seq = read_seqcount_begin(&dentry->d_seq);
flags = dentry->d_flags;
// makes sure that non-RCU pathwalk could reach
// this state.
if (read_seqretry(&mount_lock, nd->m_seq))
return false;
continue;
}
if (read_seqretry(&mount_lock, nd->m_seq))
return false;
}
return !(flags & DCACHE_NEED_AUTOMOUNT);
}
}
static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
struct path *path)
{
bool jumped;
int ret;
path->mnt = nd->path.mnt;
path->dentry = dentry;
if (nd->flags & LOOKUP_RCU) {
unsigned int seq = nd->next_seq;
if (likely(__follow_mount_rcu(nd, path)))
return 0;
// *path and nd->next_seq might've been clobbered
path->mnt = nd->path.mnt;
path->dentry = dentry;
nd->next_seq = seq;
if (!try_to_unlazy_next(nd, dentry))
return -ECHILD;
}
ret = traverse_mounts(path, &jumped, &nd->total_link_count, nd->flags);
if (jumped)
nd->state |= ND_JUMPED;
if (unlikely(ret)) {
dput(path->dentry);
if (path->mnt != nd->path.mnt)
mntput(path->mnt);
}
return ret;
}
/*
* This looks up the name in dcache and possibly revalidates the found dentry.
* NULL is returned if the dentry does not exist in the cache.
*/
static struct dentry *lookup_dcache(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct dentry *dentry = d_lookup(dir, name);
if (dentry) {
int error = d_revalidate(dir->d_inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error)
d_invalidate(dentry);
dput(dentry);
return ERR_PTR(error);
}
}
return dentry;
}
/*
* Parent directory has inode locked exclusive. This is one
* and only case when ->lookup() gets called on non in-lookup
* dentries - as the matter of fact, this only gets called
* when directory is guaranteed to have no in-lookup children
* at all.
* Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed.
* Will return -EEXIST if name is found and LOOKUP_EXCL was passed.
*/
struct dentry *lookup_one_qstr_excl(const struct qstr *name,
struct dentry *base, unsigned int flags)
{
struct dentry *dentry;
struct dentry *old;
struct inode *dir;
dentry = lookup_dcache(name, base, flags);
if (dentry)
goto found;
/* Don't create child dentry for a dead directory. */
dir = base->d_inode;
if (unlikely(IS_DEADDIR(dir)))
return ERR_PTR(-ENOENT);
dentry = d_alloc(base, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
old = dir->i_op->lookup(dir, dentry, flags);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
found:
if (IS_ERR(dentry))
return dentry;
if (d_is_negative(dentry) && !(flags & LOOKUP_CREATE)) {
dput(dentry);
return ERR_PTR(-ENOENT);
}
if (d_is_positive(dentry) && (flags & LOOKUP_EXCL)) {
dput(dentry);
return ERR_PTR(-EEXIST);
}
return dentry;
}
EXPORT_SYMBOL(lookup_one_qstr_excl);
/**
* lookup_fast - do fast lockless (but racy) lookup of a dentry
* @nd: current nameidata
*
* Do a fast, but racy lookup in the dcache for the given dentry, and
* revalidate it. Returns a valid dentry pointer or NULL if one wasn't
* found. On error, an ERR_PTR will be returned.
*
* If this function returns a valid dentry and the walk is no longer
* lazy, the dentry will carry a reference that must later be put. If
* RCU mode is still in force, then this is not the case and the dentry
* must be legitimized before use. If this returns NULL, then the walk
* will no longer be in RCU mode.
*/
static struct dentry *lookup_fast(struct nameidata *nd)
{
struct dentry *dentry, *parent = nd->path.dentry;
int status = 1;
/*
* Rename seqlock is not required here because in the off chance
* of a false negative due to a concurrent rename, the caller is
* going to fall back to non-racy lookup.
*/
if (nd->flags & LOOKUP_RCU) {
dentry = __d_lookup_rcu(parent, &nd->last, &nd->next_seq);
if (unlikely(!dentry)) {
if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
return NULL;
}
/*
* This sequence count validates that the parent had no
* changes while we did the lookup of the dentry above.
*/
if (read_seqcount_retry(&parent->d_seq, nd->seq))
return ERR_PTR(-ECHILD);
status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
if (likely(status > 0))
return dentry;
if (!try_to_unlazy_next(nd, dentry))
return ERR_PTR(-ECHILD);
if (status == -ECHILD)
/* we'd been told to redo it in non-rcu mode */
status = d_revalidate(nd->inode, &nd->last,
dentry, nd->flags);
} else {
dentry = __d_lookup(parent, &nd->last);
if (unlikely(!dentry))
return NULL;
status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
}
if (unlikely(status <= 0)) {
if (!status)
d_invalidate(dentry);
dput(dentry);
return ERR_PTR(status);
}
return dentry;
}
/* Fast lookup failed, do it the slow way */
static struct dentry *__lookup_slow(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct dentry *dentry, *old;
struct inode *inode = dir->d_inode;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
return ERR_PTR(-ENOENT);
again:
dentry = d_alloc_parallel(dir, name, &wq);
if (IS_ERR(dentry))
return dentry;
if (unlikely(!d_in_lookup(dentry))) {
int error = d_revalidate(inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error) {
d_invalidate(dentry);
dput(dentry);
goto again;
}
dput(dentry);
dentry = ERR_PTR(error);
}
} else {
old = inode->i_op->lookup(inode, dentry, flags);
d_lookup_done(dentry);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
}
return dentry;
}
static struct dentry *lookup_slow(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct inode *inode = dir->d_inode;
struct dentry *res;
inode_lock_shared(inode);
res = __lookup_slow(name, dir, flags);
inode_unlock_shared(inode);
return res;
}
static struct dentry *lookup_slow_killable(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct inode *inode = dir->d_inode;
struct dentry *res;
if (inode_lock_shared_killable(inode))
return ERR_PTR(-EINTR);
res = __lookup_slow(name, dir, flags);
inode_unlock_shared(inode);
return res;
}
static inline int may_lookup(struct mnt_idmap *idmap,
struct nameidata *restrict nd)
{
int err, mask;
mask = nd->flags & LOOKUP_RCU ? MAY_NOT_BLOCK : 0;
err = inode_permission(idmap, nd->inode, mask | MAY_EXEC);
if (likely(!err))
return 0;
// If we failed, and we weren't in LOOKUP_RCU, it's final
if (!(nd->flags & LOOKUP_RCU))
return err;
// Drop out of RCU mode to make sure it wasn't transient
if (!try_to_unlazy(nd))
return -ECHILD; // redo it all non-lazy
if (err != -ECHILD) // hard error
return err;
return inode_permission(idmap, nd->inode, MAY_EXEC);
}
static int reserve_stack(struct nameidata *nd, struct path *link)
{
if (unlikely(nd->total_link_count++ >= MAXSYMLINKS))
return -ELOOP;
if (likely(nd->depth != EMBEDDED_LEVELS))
return 0;
if (likely(nd->stack != nd->internal))
return 0;
if (likely(nd_alloc_stack(nd)))
return 0;
if (nd->flags & LOOKUP_RCU) {
// we need to grab link before we do unlazy. And we can't skip
// unlazy even if we fail to grab the link - cleanup needs it
bool grabbed_link = legitimize_path(nd, link, nd->next_seq);
if (!try_to_unlazy(nd) || !grabbed_link)
return -ECHILD;
if (nd_alloc_stack(nd))
return 0;
}
return -ENOMEM;
}
enum {WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4};
static const char *pick_link(struct nameidata *nd, struct path *link,
struct inode *inode, int flags)
{
struct saved *last;
const char *res;
int error = reserve_stack(nd, link);
if (unlikely(error)) {
if (!(nd->flags & LOOKUP_RCU))
path_put(link);
return ERR_PTR(error);
}
last = nd->stack + nd->depth++;
last->link = *link;
clear_delayed_call(&last->done);
last->seq = nd->next_seq;
if (flags & WALK_TRAILING) {
error = may_follow_link(nd, inode);
if (unlikely(error))
return ERR_PTR(error);
}
if (unlikely(nd->flags & LOOKUP_NO_SYMLINKS) ||
unlikely(link->mnt->mnt_flags & MNT_NOSYMFOLLOW))
return ERR_PTR(-ELOOP);
if (unlikely(atime_needs_update(&last->link, inode))) {
if (nd->flags & LOOKUP_RCU) {
if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
}
touch_atime(&last->link);
cond_resched();
}
error = security_inode_follow_link(link->dentry, inode,
nd->flags & LOOKUP_RCU);
if (unlikely(error))
return ERR_PTR(error);
res = READ_ONCE(inode->i_link);
if (!res) {
const char * (*get)(struct dentry *, struct inode *,
struct delayed_call *);
get = inode->i_op->get_link;
if (nd->flags & LOOKUP_RCU) {
res = get(NULL, inode, &last->done);
if (res == ERR_PTR(-ECHILD) && try_to_unlazy(nd))
res = get(link->dentry, inode, &last->done);
} else {
res = get(link->dentry, inode, &last->done);
}
if (!res)
goto all_done;
if (IS_ERR(res))
return res;
}
if (*res == '/') {
error = nd_jump_root(nd);
if (unlikely(error))
return ERR_PTR(error);
while (unlikely(*++res == '/'))
;
}
if (*res)
return res;
all_done: // pure jump
put_link(nd);
return NULL;
}
/*
* Do we need to follow links? We _really_ want to be able
* to do this check without having to look at inode->i_op,
* so we keep a cache of "no, this doesn't need follow_link"
* for the common case.
*
* NOTE: dentry must be what nd->next_seq had been sampled from.
*/
static const char *step_into(struct nameidata *nd, int flags,
struct dentry *dentry)
{
struct path path;
struct inode *inode;
int err = handle_mounts(nd, dentry, &path);
if (err < 0)
return ERR_PTR(err);
inode = path.dentry->d_inode;
if (likely(!d_is_symlink(path.dentry)) ||
((flags & WALK_TRAILING) && !(nd->flags & LOOKUP_FOLLOW)) ||
(flags & WALK_NOFOLLOW)) {
/* not a symlink or should not follow */
if (nd->flags & LOOKUP_RCU) {
if (read_seqcount_retry(&path.dentry->d_seq, nd->next_seq))
return ERR_PTR(-ECHILD);
if (unlikely(!inode))
return ERR_PTR(-ENOENT);
} else {
dput(nd->path.dentry);
if (nd->path.mnt != path.mnt)
mntput(nd->path.mnt);
}
nd->path = path;
nd->inode = inode;
nd->seq = nd->next_seq;
return NULL;
}
if (nd->flags & LOOKUP_RCU) {
/* make sure that d_is_symlink above matches inode */
if (read_seqcount_retry(&path.dentry->d_seq, nd->next_seq))
return ERR_PTR(-ECHILD);
} else {
if (path.mnt == nd->path.mnt)
mntget(path.mnt);
}
return pick_link(nd, &path, inode, flags);
}
static struct dentry *follow_dotdot_rcu(struct nameidata *nd)
{
struct dentry *parent, *old;
if (path_equal(&nd->path, &nd->root))
goto in_root;
if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
struct path path;
unsigned seq;
if (!choose_mountpoint_rcu(real_mount(nd->path.mnt),
&nd->root, &path, &seq))
goto in_root;
if (unlikely(nd->flags & LOOKUP_NO_XDEV))
return ERR_PTR(-ECHILD);
nd->path = path;
nd->inode = path.dentry->d_inode;
nd->seq = seq;
// makes sure that non-RCU pathwalk could reach this state
if (read_seqretry(&mount_lock, nd->m_seq))
return ERR_PTR(-ECHILD);
/* we know that mountpoint was pinned */
}
old = nd->path.dentry;
parent = old->d_parent;
nd->next_seq = read_seqcount_begin(&parent->d_seq);
// makes sure that non-RCU pathwalk could reach this state
if (read_seqcount_retry(&old->d_seq, nd->seq))
return ERR_PTR(-ECHILD);
if (unlikely(!path_connected(nd->path.mnt, parent)))
return ERR_PTR(-ECHILD);
return parent;
in_root:
if (read_seqretry(&mount_lock, nd->m_seq))
return ERR_PTR(-ECHILD);
if (unlikely(nd->flags & LOOKUP_BENEATH))
return ERR_PTR(-ECHILD);
nd->next_seq = nd->seq;
return nd->path.dentry;
}
static struct dentry *follow_dotdot(struct nameidata *nd)
{
struct dentry *parent;
if (path_equal(&nd->path, &nd->root))
goto in_root;
if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
struct path path;
if (!choose_mountpoint(real_mount(nd->path.mnt),
&nd->root, &path))
goto in_root;
path_put(&nd->path);
nd->path = path;
nd->inode = path.dentry->d_inode;
if (unlikely(nd->flags & LOOKUP_NO_XDEV))
return ERR_PTR(-EXDEV);
}
/* rare case of legitimate dget_parent()... */
parent = dget_parent(nd->path.dentry);
if (unlikely(!path_connected(nd->path.mnt, parent))) {
dput(parent);
return ERR_PTR(-ENOENT);
}
return parent;
in_root:
if (unlikely(nd->flags & LOOKUP_BENEATH))
return ERR_PTR(-EXDEV);
return dget(nd->path.dentry);
}
static const char *handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
const char *error = NULL;
struct dentry *parent;
if (!nd->root.mnt) {
error = ERR_PTR(set_root(nd));
if (error)
return error;
}
if (nd->flags & LOOKUP_RCU)
parent = follow_dotdot_rcu(nd);
else
parent = follow_dotdot(nd);
if (IS_ERR(parent))
return ERR_CAST(parent);
error = step_into(nd, WALK_NOFOLLOW, parent);
if (unlikely(error))
return error;
if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
/*
* If there was a racing rename or mount along our
* path, then we can't be sure that ".." hasn't jumped
* above nd->root (and so userspace should retry or use
* some fallback).
*/
smp_rmb();
if (__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq))
return ERR_PTR(-EAGAIN);
if (__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq))
return ERR_PTR(-EAGAIN);
}
}
return NULL;
}
static const char *walk_component(struct nameidata *nd, int flags)
{
struct dentry *dentry;
/*
* "." and ".." are special - ".." especially so because it has
* to be able to know about the current root directory and
* parent relationships.
*/
if (unlikely(nd->last_type != LAST_NORM)) {
if (!(flags & WALK_MORE) && nd->depth)
put_link(nd);
return handle_dots(nd, nd->last_type);
}
dentry = lookup_fast(nd);
if (IS_ERR(dentry))
return ERR_CAST(dentry);
if (unlikely(!dentry)) {
dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags);
if (IS_ERR(dentry))
return ERR_CAST(dentry);
}
if (!(flags & WALK_MORE) && nd->depth)
put_link(nd);
return step_into(nd, flags, dentry);
}
/*
* We can do the critical dentry name comparison and hashing
* operations one word at a time, but we are limited to:
*
* - Architectures with fast unaligned word accesses. We could
* do a "get_unaligned()" if this helps and is sufficiently
* fast.
*
* - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
* do not trap on the (extremely unlikely) case of a page
* crossing operation.
*
* - Furthermore, we need an efficient 64-bit compile for the
* 64-bit case in order to generate the "number of bytes in
* the final mask". Again, that could be replaced with a
* efficient population count instruction or similar.
*/
#ifdef CONFIG_DCACHE_WORD_ACCESS
#include <asm/word-at-a-time.h>
#ifdef HASH_MIX
/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
#elif defined(CONFIG_64BIT)
/*
* Register pressure in the mixing function is an issue, particularly
* on 32-bit x86, but almost any function requires one state value and
* one temporary. Instead, use a function designed for two state values
* and no temporaries.
*
* This function cannot create a collision in only two iterations, so
* we have two iterations to achieve avalanche. In those two iterations,
* we have six layers of mixing, which is enough to spread one bit's
* influence out to 2^6 = 64 state bits.
*
* Rotate constants are scored by considering either 64 one-bit input
* deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
* probability of that delta causing a change to each of the 128 output
* bits, using a sample of random initial states.
*
* The Shannon entropy of the computed probabilities is then summed
* to produce a score. Ideally, any input change has a 50% chance of
* toggling any given output bit.
*
* Mixing scores (in bits) for (12,45):
* Input delta: 1-bit 2-bit
* 1 round: 713.3 42542.6
* 2 rounds: 2753.7 140389.8
* 3 rounds: 5954.1 233458.2
* 4 rounds: 7862.6 256672.2
* Perfect: 8192 258048
* (64*128) (64*63/2 * 128)
*/
#define HASH_MIX(x, y, a) \
( x ^= (a), \
y ^= x, x = rol64(x,12),\
x += y, y = rol64(y,45),\
y *= 9 )
/*
* Fold two longs into one 32-bit hash value. This must be fast, but
* latency isn't quite as critical, as there is a fair bit of additional
* work done before the hash value is used.
*/
static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
y ^= x * GOLDEN_RATIO_64; y *= GOLDEN_RATIO_64;
return y >> 32;
}
#else /* 32-bit case */
/*
* Mixing scores (in bits) for (7,20):
* Input delta: 1-bit 2-bit
* 1 round: 330.3 9201.6
* 2 rounds: 1246.4 25475.4
* 3 rounds: 1907.1 31295.1
* 4 rounds: 2042.3 31718.6
* Perfect: 2048 31744
* (32*64) (32*31/2 * 64)
*/
#define HASH_MIX(x, y, a) \
( x ^= (a), \
y ^= x, x = rol32(x, 7),\
x += y, y = rol32(y,20),\
y *= 9 )
static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
/* Use arch-optimized multiply if one exists */
return __hash_32(y ^ __hash_32(x));
}
#endif
/*
* Return the hash of a string of known length. This is carfully
* designed to match hash_name(), which is the more critical function.
* In particular, we must end by hashing a final word containing 0..7
* payload bytes, to match the way that hash_name() iterates until it
* finds the delimiter after the name.
*/
unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
{
unsigned long a, x = 0, y = (unsigned long)salt;
for (;;) {
if (!len)
goto done;
a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long))
break;
HASH_MIX(x, y, a);
name += sizeof(unsigned long);
len -= sizeof(unsigned long);
}
x ^= a & bytemask_from_count(len);
done:
return fold_hash(x, y);
}
EXPORT_SYMBOL(full_name_hash);
/* Return the "hash_len" (hash and length) of a null-terminated string */
u64 hashlen_string(const void *salt, const char *name)
{
unsigned long a = 0, x = 0, y = (unsigned long)salt;
unsigned long adata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
len = 0;
goto inside;
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
inside:
a = load_unaligned_zeropad(name+len);
} while (!has_zero(a, &adata, &constants));
adata = prep_zero_mask(a, adata, &constants);
mask = create_zero_mask(adata);
x ^= a & zero_bytemask(mask);
return hashlen_create(fold_hash(x, y), len + find_zero(mask));
}
EXPORT_SYMBOL(hashlen_string);
/*
* Calculate the length and hash of the path component, and
* return the length as the result.
*/
static inline const char *hash_name(struct nameidata *nd,
const char *name,
unsigned long *lastword)
{
unsigned long a, b, x, y = (unsigned long)nd->path.dentry;
unsigned long adata, bdata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
/*
* The first iteration is special, because it can result in
* '.' and '..' and has no mixing other than the final fold.
*/
a = load_unaligned_zeropad(name);
b = a ^ REPEAT_BYTE('/');
if (has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)) {
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
mask = create_zero_mask(adata | bdata);
a &= zero_bytemask(mask);
*lastword = a;
len = find_zero(mask);
nd->last.hash = fold_hash(a, y);
nd->last.len = len;
return name + len;
}
len = 0;
x = 0;
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
} while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
mask = create_zero_mask(adata | bdata);
a &= zero_bytemask(mask);
x ^= a;
len += find_zero(mask);
*lastword = 0; // Multi-word components cannot be DOT or DOTDOT
nd->last.hash = fold_hash(x, y);
nd->last.len = len;
return name + len;
}
/*
* Note that the 'last' word is always zero-masked, but
* was loaded as a possibly big-endian word.
*/
#ifdef __BIG_ENDIAN
#define LAST_WORD_IS_DOT (0x2eul << (BITS_PER_LONG-8))
#define LAST_WORD_IS_DOTDOT (0x2e2eul << (BITS_PER_LONG-16))
#endif
#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
/* Return the hash of a string of known length */
unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
{
unsigned long hash = init_name_hash(salt);
while (len--)
hash = partial_name_hash((unsigned char)*name++, hash);
return end_name_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/* Return the "hash_len" (hash and length) of a null-terminated string */
u64 hashlen_string(const void *salt, const char *name)
{
unsigned long hash = init_name_hash(salt);
unsigned long len = 0, c;
c = (unsigned char)*name;
while (c) {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
}
return hashlen_create(end_name_hash(hash), len);
}
EXPORT_SYMBOL(hashlen_string);
/*
* We know there's a real path component here of at least
* one character.
*/
static inline const char *hash_name(struct nameidata *nd, const char *name, unsigned long *lastword)
{
unsigned long hash = init_name_hash(nd->path.dentry);
unsigned long len = 0, c, last = 0;
c = (unsigned char)*name;
do {
last = (last << 8) + c;
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
// This is reliable for DOT or DOTDOT, since the component
// cannot contain NUL characters - top bits being zero means
// we cannot have had any other pathnames.
*lastword = last;
nd->last.hash = end_name_hash(hash);
nd->last.len = len;
return name + len;
}
#endif
#ifndef LAST_WORD_IS_DOT
#define LAST_WORD_IS_DOT 0x2e
#define LAST_WORD_IS_DOTDOT 0x2e2e
#endif
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
*
* Returns 0 and nd will have valid dentry and mnt on success.
* Returns error and drops reference to input namei data on failure.
*/
static int link_path_walk(const char *name, struct nameidata *nd)
{
int depth = 0; // depth <= nd->depth
int err;
nd->last_type = LAST_ROOT;
nd->flags |= LOOKUP_PARENT;
if (IS_ERR(name))
return PTR_ERR(name);
if (*name == '/') {
do {
name++;
} while (unlikely(*name == '/'));
}
if (unlikely(!*name)) {
nd->dir_mode = 0; // short-circuit the 'hardening' idiocy
return 0;
}
/* At this point we know we have a real path component. */
for(;;) {
struct mnt_idmap *idmap;
const char *link;
unsigned long lastword;
idmap = mnt_idmap(nd->path.mnt);
err = may_lookup(idmap, nd);
if (unlikely(err))
return err;
nd->last.name = name;
name = hash_name(nd, name, &lastword);
switch(lastword) {
case LAST_WORD_IS_DOTDOT:
nd->last_type = LAST_DOTDOT;
nd->state |= ND_JUMPED;
break;
case LAST_WORD_IS_DOT:
nd->last_type = LAST_DOT;
break;
default:
nd->last_type = LAST_NORM;
nd->state &= ~ND_JUMPED;
struct dentry *parent = nd->path.dentry;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
err = parent->d_op->d_hash(parent, &nd->last);
if (err < 0)
return err;
}
}
if (!*name)
goto OK;
/*
* If it wasn't NUL, we know it was '/'. Skip that
* slash, and continue until no more slashes.
*/
do {
name++;
} while (unlikely(*name == '/'));
if (unlikely(!*name)) {
OK:
/* pathname or trailing symlink, done */
if (!depth) {
nd->dir_vfsuid = i_uid_into_vfsuid(idmap, nd->inode);
nd->dir_mode = nd->inode->i_mode;
nd->flags &= ~LOOKUP_PARENT;
return 0;
}
/* last component of nested symlink */
name = nd->stack[--depth].name;
link = walk_component(nd, 0);
} else {
/* not the last component */
link = walk_component(nd, WALK_MORE);
}
if (unlikely(link)) {
if (IS_ERR(link))
return PTR_ERR(link);
/* a symlink to follow */
nd->stack[depth++].name = name;
name = link;
continue;
}
if (unlikely(!d_can_lookup(nd->path.dentry))) {
if (nd->flags & LOOKUP_RCU) {
if (!try_to_unlazy(nd))
return -ECHILD;
}
return -ENOTDIR;
}
}
}
/* must be paired with terminate_walk() */
static const char *path_init(struct nameidata *nd, unsigned flags)
{
int error;
const char *s = nd->pathname;
/* LOOKUP_CACHED requires RCU, ask caller to retry */
if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED)
return ERR_PTR(-EAGAIN);
if (!*s)
flags &= ~LOOKUP_RCU;
if (flags & LOOKUP_RCU)
rcu_read_lock();
else
nd->seq = nd->next_seq = 0;
nd->flags = flags;
nd->state |= ND_JUMPED;
nd->m_seq = __read_seqcount_begin(&mount_lock.seqcount);
nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
smp_rmb();
if (nd->state & ND_ROOT_PRESET) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
if (*s && unlikely(!d_can_lookup(root)))
return ERR_PTR(-ENOTDIR);
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
nd->root_seq = nd->seq;
} else {
path_get(&nd->path);
}
return s;
}
nd->root.mnt = NULL;
/* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
error = nd_jump_root(nd);
if (unlikely(error))
return ERR_PTR(error);
return s;
}
/* Relative pathname -- get the starting-point it is relative to. */
if (nd->dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
do {
seq = read_seqbegin(&fs->seq);
nd->path = fs->pwd;
nd->inode = nd->path.dentry->d_inode;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} while (read_seqretry(&fs->seq, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
nd->inode = nd->path.dentry->d_inode;
}
} else {
/* Caller must check execute permissions on the starting path component */
CLASS(fd_raw, f)(nd->dfd);
struct dentry *dentry;
if (fd_empty(f))
return ERR_PTR(-EBADF);
if (flags & LOOKUP_LINKAT_EMPTY) {
if (fd_file(f)->f_cred != current_cred() &&
!ns_capable(fd_file(f)->f_cred->user_ns, CAP_DAC_READ_SEARCH))
return ERR_PTR(-ENOENT);
}
dentry = fd_file(f)->f_path.dentry;
if (*s && unlikely(!d_can_lookup(dentry)))
return ERR_PTR(-ENOTDIR);
nd->path = fd_file(f)->f_path;
if (flags & LOOKUP_RCU) {
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
path_get(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
}
/* For scoped-lookups we need to set the root to the dirfd as well. */
if (flags & LOOKUP_IS_SCOPED) {
nd->root = nd->path;
if (flags & LOOKUP_RCU) {
nd->root_seq = nd->seq;
} else {
path_get(&nd->root);
nd->state |= ND_ROOT_GRABBED;
}
}
return s;
}
static inline const char *lookup_last(struct nameidata *nd)
{
if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
return walk_component(nd, WALK_TRAILING);
}
static int handle_lookup_down(struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU))
dget(nd->path.dentry);
nd->next_seq = nd->seq;
return PTR_ERR(step_into(nd, WALK_NOFOLLOW, nd->path.dentry));
}
/* Returns 0 and nd will be valid on success; Returns error, otherwise. */
static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) {
err = handle_lookup_down(nd);
if (unlikely(err < 0))
s = ERR_PTR(err);
}
while (!(err = link_path_walk(s, nd)) &&
(s = lookup_last(nd)) != NULL)
;
if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
err = handle_lookup_down(nd);
nd->state &= ~ND_JUMPED; // no d_weak_revalidate(), please...
}
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY)
if (!d_can_lookup(nd->path.dentry))
err = -ENOTDIR;
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
terminate_walk(nd);
return err;
}
int filename_lookup(int dfd, struct filename *name, unsigned flags,
struct path *path, const struct path *root)
{
int retval;
struct nameidata nd;
if (IS_ERR(name))
return PTR_ERR(name);
set_nameidata(&nd, dfd, name, root);
retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
if (unlikely(retval == -ECHILD))
retval = path_lookupat(&nd, flags, path);
if (unlikely(retval == -ESTALE))
retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
if (likely(!retval))
audit_inode(name, path->dentry,
flags & LOOKUP_MOUNTPOINT ? AUDIT_INODE_NOEVAL : 0);
restore_nameidata();
return retval;
}
/* Returns 0 and nd will be valid on success; Returns error, otherwise. */
static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
const char *s = path_init(nd, flags);
int err = link_path_walk(s, nd);
if (!err)
err = complete_walk(nd);
if (!err) {
*parent = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
terminate_walk(nd);
return err;
}
/* Note: this does not consume "name" */
static int __filename_parentat(int dfd, struct filename *name,
unsigned int flags, struct path *parent,
struct qstr *last, int *type,
const struct path *root)
{
int retval;
struct nameidata nd;
if (IS_ERR(name))
return PTR_ERR(name);
set_nameidata(&nd, dfd, name, root);
retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
if (unlikely(retval == -ECHILD))
retval = path_parentat(&nd, flags, parent);
if (unlikely(retval == -ESTALE))
retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
if (likely(!retval)) {
*last = nd.last;
*type = nd.last_type;
audit_inode(name, parent->dentry, AUDIT_INODE_PARENT);
}
restore_nameidata();
return retval;
}
static int filename_parentat(int dfd, struct filename *name,
unsigned int flags, struct path *parent,
struct qstr *last, int *type)
{
return __filename_parentat(dfd, name, flags, parent, last, type, NULL);
}
/* does lookup, returns the object with parent locked */
static struct dentry *__start_removing_path(int dfd, struct filename *name,
struct path *path)
{
struct path parent_path __free(path_put) = {};
struct dentry *d;
struct qstr last;
int type, error;
error = filename_parentat(dfd, name, 0, &parent_path, &last, &type);
if (error)
return ERR_PTR(error);
if (unlikely(type != LAST_NORM))
return ERR_PTR(-EINVAL);
/* don't fail immediately if it's r/o, at least try to report other errors */
error = mnt_want_write(parent_path.mnt);
inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT);
d = lookup_one_qstr_excl(&last, parent_path.dentry, 0);
if (IS_ERR(d))
goto unlock;
if (error)
goto fail;
path->dentry = no_free_ptr(parent_path.dentry);
path->mnt = no_free_ptr(parent_path.mnt);
return d;
fail:
dput(d);
d = ERR_PTR(error);
unlock:
inode_unlock(parent_path.dentry->d_inode);
if (!error)
mnt_drop_write(parent_path.mnt);
return d;
}
/**
* kern_path_parent: lookup path returning parent and target
* @name: path name
* @path: path to store parent in
*
* The path @name should end with a normal component, not "." or ".." or "/".
* A lookup is performed and if successful the parent information
* is store in @parent and the dentry is returned.
*
* The dentry maybe negative, the parent will be positive.
*
* Returns: dentry or error.
*/
struct dentry *kern_path_parent(const char *name, struct path *path)
{
struct path parent_path __free(path_put) = {};
struct filename *filename __free(putname) = getname_kernel(name);
struct dentry *d;
struct qstr last;
int type, error;
error = filename_parentat(AT_FDCWD, filename, 0, &parent_path, &last, &type);
if (error)
return ERR_PTR(error);
if (unlikely(type != LAST_NORM))
return ERR_PTR(-EINVAL);
d = lookup_noperm_unlocked(&last, parent_path.dentry);
if (IS_ERR(d))
return d;
path->dentry = no_free_ptr(parent_path.dentry);
path->mnt = no_free_ptr(parent_path.mnt);
return d;
}
struct dentry *start_removing_path(const char *name, struct path *path)
{
struct filename *filename = getname_kernel(name);
struct dentry *res = __start_removing_path(AT_FDCWD, filename, path);
putname(filename);
return res;
}
struct dentry *start_removing_user_path_at(int dfd,
const char __user *name,
struct path *path)
{
struct filename *filename = getname(name);
struct dentry *res = __start_removing_path(dfd, filename, path);
putname(filename);
return res;
}
EXPORT_SYMBOL(start_removing_user_path_at);
int kern_path(const char *name, unsigned int flags, struct path *path)
{
struct filename *filename = getname_kernel(name);
int ret = filename_lookup(AT_FDCWD, filename, flags, path, NULL);
putname(filename);
return ret;
}
EXPORT_SYMBOL(kern_path);
/**
* vfs_path_parent_lookup - lookup a parent path relative to a dentry-vfsmount pair
* @filename: filename structure
* @flags: lookup flags
* @parent: pointer to struct path to fill
* @last: last component
* @type: type of the last component
* @root: pointer to struct path of the base directory
*/
int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
struct path *parent, struct qstr *last, int *type,
const struct path *root)
{
return __filename_parentat(AT_FDCWD, filename, flags, parent, last,
type, root);
}
EXPORT_SYMBOL(vfs_path_parent_lookup);
/**
* vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
* @dentry: pointer to dentry of the base directory
* @mnt: pointer to vfs mount of the base directory
* @name: pointer to file name
* @flags: lookup flags
* @path: pointer to struct path to fill
*/
int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
struct filename *filename;
struct path root = {.mnt = mnt, .dentry = dentry};
int ret;
filename = getname_kernel(name);
/* the first argument of filename_lookup() is ignored with root */
ret = filename_lookup(AT_FDCWD, filename, flags, path, &root);
putname(filename);
return ret;
}
EXPORT_SYMBOL(vfs_path_lookup);
static int lookup_noperm_common(struct qstr *qname, struct dentry *base)
{
const char *name = qname->name;
u32 len = qname->len;
qname->hash = full_name_hash(base, name, len);
if (!len)
return -EACCES;
if (is_dot_dotdot(name, len))
return -EACCES;
while (len--) {
unsigned int c = *(const unsigned char *)name++;
if (c == '/' || c == '\0')
return -EACCES;
}
/*
* See if the low-level filesystem might want
* to use its own hash..
*/
if (base->d_flags & DCACHE_OP_HASH) {
int err = base->d_op->d_hash(base, qname);
if (err < 0)
return err;
}
return 0;
}
static int lookup_one_common(struct mnt_idmap *idmap,
struct qstr *qname, struct dentry *base)
{
int err;
err = lookup_noperm_common(qname, base);
if (err < 0)
return err;
return inode_permission(idmap, base->d_inode, MAY_EXEC);
}
/**
* try_lookup_noperm - filesystem helper to lookup single pathname component
* @name: qstr storing pathname component to lookup
* @base: base directory to lookup from
*
* Look up a dentry by name in the dcache, returning NULL if it does not
* currently exist. The function does not try to create a dentry and if one
* is found it doesn't try to revalidate it.
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code. It does no permission checking.
*
* No locks need be held - only a counted reference to @base is needed.
*
*/
struct dentry *try_lookup_noperm(struct qstr *name, struct dentry *base)
{
int err;
err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
return d_lookup(base, name);
}
EXPORT_SYMBOL(try_lookup_noperm);
/**
* lookup_noperm - filesystem helper to lookup single pathname component
* @name: qstr storing pathname component to lookup
* @base: base directory to lookup from
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code. It does no permission checking.
*
* The caller must hold base->i_rwsem.
*/
struct dentry *lookup_noperm(struct qstr *name, struct dentry *base)
{
struct dentry *dentry;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
dentry = lookup_dcache(name, base, 0);
return dentry ? dentry : __lookup_slow(name, base, 0);
}
EXPORT_SYMBOL(lookup_noperm);
/**
* lookup_one - lookup single pathname component
* @idmap: idmap of the mount the lookup is performed from
* @name: qstr holding pathname component to lookup
* @base: base directory to lookup from
*
* This can be used for in-kernel filesystem clients such as file servers.
*
* The caller must hold base->i_rwsem.
*/
struct dentry *lookup_one(struct mnt_idmap *idmap, struct qstr *name,
struct dentry *base)
{
struct dentry *dentry;
int err;
WARN_ON_ONCE(!inode_is_locked(base->d_inode));
err = lookup_one_common(idmap, name, base);
if (err)
return ERR_PTR(err);
dentry = lookup_dcache(name, base, 0);
return dentry ? dentry : __lookup_slow(name, base, 0);
}
EXPORT_SYMBOL(lookup_one);
/**
* lookup_one_unlocked - lookup single pathname component
* @idmap: idmap of the mount the lookup is performed from
* @name: qstr olding pathname component to lookup
* @base: base directory to lookup from
*
* This can be used for in-kernel filesystem clients such as file servers.
*
* Unlike lookup_one, it should be called without the parent
* i_rwsem held, and will take the i_rwsem itself if necessary.
*/
struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap, struct qstr *name,
struct dentry *base)
{
int err;
struct dentry *ret;
err = lookup_one_common(idmap, name, base);
if (err)
return ERR_PTR(err);
ret = lookup_dcache(name, base, 0);
if (!ret)
ret = lookup_slow(name, base, 0);
return ret;
}
EXPORT_SYMBOL(lookup_one_unlocked);
/**
* lookup_one_positive_killable - lookup single pathname component
* @idmap: idmap of the mount the lookup is performed from
* @name: qstr olding pathname component to lookup
* @base: base directory to lookup from
*
* This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
* known positive or ERR_PTR(). This is what most of the users want.
*
* Note that pinned negative with unlocked parent _can_ become positive at any
* time, so callers of lookup_one_unlocked() need to be very careful; pinned
* positives have >d_inode stable, so this one avoids such problems.
*
* This can be used for in-kernel filesystem clients such as file servers.
*
* It should be called without the parent i_rwsem held, and will take
* the i_rwsem itself if necessary. If a fatal signal is pending or
* delivered, it will return %-EINTR if the lock is needed.
*/
struct dentry *lookup_one_positive_killable(struct mnt_idmap *idmap,
struct qstr *name,
struct dentry *base)
{
int err;
struct dentry *ret;
err = lookup_one_common(idmap, name, base);
if (err)
return ERR_PTR(err);
ret = lookup_dcache(name, base, 0);
if (!ret)
ret = lookup_slow_killable(name, base, 0);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
dput(ret);
ret = ERR_PTR(-ENOENT);
}
return ret;
}
EXPORT_SYMBOL(lookup_one_positive_killable);
/**
* lookup_one_positive_unlocked - lookup single pathname component
* @idmap: idmap of the mount the lookup is performed from
* @name: qstr holding pathname component to lookup
* @base: base directory to lookup from
*
* This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
* known positive or ERR_PTR(). This is what most of the users want.
*
* Note that pinned negative with unlocked parent _can_ become positive at any
* time, so callers of lookup_one_unlocked() need to be very careful; pinned
* positives have >d_inode stable, so this one avoids such problems.
*
* This can be used for in-kernel filesystem clients such as file servers.
*
* The helper should be called without i_rwsem held.
*/
struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
struct qstr *name,
struct dentry *base)
{
struct dentry *ret = lookup_one_unlocked(idmap, name, base);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
dput(ret);
ret = ERR_PTR(-ENOENT);
}
return ret;
}
EXPORT_SYMBOL(lookup_one_positive_unlocked);
/**
* lookup_noperm_unlocked - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code. It does no permission checking.
*
* Unlike lookup_noperm(), it should be called without the parent
* i_rwsem held, and will take the i_rwsem itself if necessary.
*
* Unlike try_lookup_noperm() it *does* revalidate the dentry if it already
* existed.
*/
struct dentry *lookup_noperm_unlocked(struct qstr *name, struct dentry *base)
{
struct dentry *ret;
int err;
err = lookup_noperm_common(name, base);
if (err)
return ERR_PTR(err);
ret = lookup_dcache(name, base, 0);
if (!ret)
ret = lookup_slow(name, base, 0);
return ret;
}
EXPORT_SYMBOL(lookup_noperm_unlocked);
/*
* Like lookup_noperm_unlocked(), except that it yields ERR_PTR(-ENOENT)
* on negatives. Returns known positive or ERR_PTR(); that's what
* most of the users want. Note that pinned negative with unlocked parent
* _can_ become positive at any time, so callers of lookup_noperm_unlocked()
* need to be very careful; pinned positives have ->d_inode stable, so
* this one avoids such problems.
*/
struct dentry *lookup_noperm_positive_unlocked(struct qstr *name,
struct dentry *base)
{
struct dentry *ret;
ret = lookup_noperm_unlocked(name, base);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
dput(ret);
ret = ERR_PTR(-ENOENT);
}
return ret;
}
EXPORT_SYMBOL(lookup_noperm_positive_unlocked);
#ifdef CONFIG_UNIX98_PTYS
int path_pts(struct path *path)
{
/* Find something mounted on "pts" in the same directory as
* the input path.
*/
struct dentry *parent = dget_parent(path->dentry);
struct dentry *child;
struct qstr this = QSTR_INIT("pts", 3);
if (unlikely(!path_connected(path->mnt, parent))) {
dput(parent);
return -ENOENT;
}
dput(path->dentry);
path->dentry = parent;
child = d_hash_and_lookup(parent, &this);
if (IS_ERR_OR_NULL(child))
return -ENOENT;
path->dentry = child;
dput(parent);
follow_down(path, 0);
return 0;
}
#endif
int user_path_at(int dfd, const char __user *name, unsigned flags,
struct path *path)
{
struct filename *filename = getname_flags(name, flags);
int ret = filename_lookup(dfd, filename, flags, path, NULL);
putname(filename);
return ret;
}
EXPORT_SYMBOL(user_path_at);
int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode)
{
kuid_t fsuid = current_fsuid();
if (vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), fsuid))
return 0;
if (vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, dir), fsuid))
return 0;
return !capable_wrt_inode_uidgid(idmap, inode, CAP_FOWNER);
}
EXPORT_SYMBOL(__check_sticky);
/*
* Check whether we can remove a link victim from directory dir, check
* whether the type of victim is right.
* 1. We can't do it if dir is read-only (done in permission())
* 2. We should have write and exec permissions on dir
* 3. We can't remove anything from append-only dir
* 4. We can't do anything with immutable dir (done in permission())
* 5. If the sticky bit on dir is set we should either
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do antyhing with
* links pointing to it.
* 7. If the victim has an unknown uid or gid we can't change the inode.
* 8. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 9. If we were asked to remove a non-directory and victim isn't one - EISDIR.
* 10. We can't remove a root or mountpoint.
* 11. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
static int may_delete(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *victim, bool isdir)
{
struct inode *inode = d_backing_inode(victim);
int error;
if (d_is_negative(victim))
return -ENOENT;
BUG_ON(!inode);
BUG_ON(victim->d_parent->d_inode != dir);
/* Inode writeback is not safe when the uid or gid are invalid. */
if (!vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
!vfsgid_valid(i_gid_into_vfsgid(idmap, inode)))
return -EOVERFLOW;
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
error = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(idmap, dir, inode) || IS_APPEND(inode) ||
IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) ||
HAS_UNMAPPED_ID(idmap, inode))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
return -ENOTDIR;
if (IS_ROOT(victim))
return -EBUSY;
} else if (d_is_dir(victim))
return -EISDIR;
if (IS_DEADDIR(dir))
return -ENOENT;
if (victim->d_flags & DCACHE_NFSFS_RENAMED)
return -EBUSY;
return 0;
}
/* Check whether we can create an object with dentry child in directory
* dir.
* 1. We can't do it if child already exists (open has special treatment for
* this case, but since we are inlined it's OK)
* 2. We can't do it if dir is read-only (done in permission())
* 3. We can't do it if the fs can't represent the fsuid or fsgid.
* 4. We should have write and exec permissions on dir
* 5. We can't do it if dir is immutable (done in permission())
*/
static inline int may_create(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *child)
{
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
if (!fsuidgid_has_mapping(dir->i_sb, idmap))
return -EOVERFLOW;
return inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
}
// p1 != p2, both are on the same filesystem, ->s_vfs_rename_mutex is held
static struct dentry *lock_two_directories(struct dentry *p1, struct dentry *p2)
{
struct dentry *p = p1, *q = p2, *r;
while ((r = p->d_parent) != p2 && r != p)
p = r;
if (r == p2) {
// p is a child of p2 and an ancestor of p1 or p1 itself
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT2);
return p;
}
// p is the root of connected component that contains p1
// p2 does not occur on the path from p to p1
while ((r = q->d_parent) != p1 && r != p && r != q)
q = r;
if (r == p1) {
// q is a child of p1 and an ancestor of p2 or p2 itself
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
return q;
} else if (likely(r == p)) {
// both p2 and p1 are descendents of p
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
return NULL;
} else { // no common ancestor at the time we'd been called
mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
return ERR_PTR(-EXDEV);
}
}
/*
* p1 and p2 should be directories on the same fs.
*/
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
if (p1 == p2) {
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
return NULL;
}
mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
return lock_two_directories(p1, p2);
}
EXPORT_SYMBOL(lock_rename);
/*
* c1 and p2 should be on the same fs.
*/
struct dentry *lock_rename_child(struct dentry *c1, struct dentry *p2)
{
if (READ_ONCE(c1->d_parent) == p2) {
/*
* hopefully won't need to touch ->s_vfs_rename_mutex at all.
*/
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
/*
* now that p2 is locked, nobody can move in or out of it,
* so the test below is safe.
*/
if (likely(c1->d_parent == p2))
return NULL;
/*
* c1 got moved out of p2 while we'd been taking locks;
* unlock and fall back to slow case.
*/
inode_unlock(p2->d_inode);
}
mutex_lock(&c1->d_sb->s_vfs_rename_mutex);
/*
* nobody can move out of any directories on this fs.
*/
if (likely(c1->d_parent != p2))
return lock_two_directories(c1->d_parent, p2);
/*
* c1 got moved into p2 while we were taking locks;
* we need p2 locked and ->s_vfs_rename_mutex unlocked,
* for consistency with lock_rename().
*/
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
mutex_unlock(&c1->d_sb->s_vfs_rename_mutex);
return NULL;
}
EXPORT_SYMBOL(lock_rename_child);
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
inode_unlock(p1->d_inode);
if (p1 != p2) {
inode_unlock(p2->d_inode);
mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
}
}
EXPORT_SYMBOL(unlock_rename);
/**
* vfs_prepare_mode - prepare the mode to be used for a new inode
* @idmap: idmap of the mount the inode was found from
* @dir: parent directory of the new inode
* @mode: mode of the new inode
* @mask_perms: allowed permission by the vfs
* @type: type of file to be created
*
* This helper consolidates and enforces vfs restrictions on the @mode of a new
* object to be created.
*
* Umask stripping depends on whether the filesystem supports POSIX ACLs (see
* the kernel documentation for mode_strip_umask()). Moving umask stripping
* after setgid stripping allows the same ordering for both non-POSIX ACL and
* POSIX ACL supporting filesystems.
*
* Note that it's currently valid for @type to be 0 if a directory is created.
* Filesystems raise that flag individually and we need to check whether each
* filesystem can deal with receiving S_IFDIR from the vfs before we enforce a
* non-zero type.
*
* Returns: mode to be passed to the filesystem
*/
static inline umode_t vfs_prepare_mode(struct mnt_idmap *idmap,
const struct inode *dir, umode_t mode,
umode_t mask_perms, umode_t type)
{
mode = mode_strip_sgid(idmap, dir, mode);
mode = mode_strip_umask(dir, mode);
/*
* Apply the vfs mandated allowed permission mask and set the type of
* file to be created before we call into the filesystem.
*/
mode &= (mask_perms & ~S_IFMT);
mode |= (type & S_IFMT);
return mode;
}
/**
* vfs_create - create new file
* @idmap: idmap of the mount the inode was found from
* @dir: inode of the parent directory
* @dentry: dentry of the child file
* @mode: mode of the child file
* @want_excl: whether the file must not yet exist
*
* Create a new file.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool want_excl)
{
int error;
error = may_create(idmap, dir, dentry);
if (error)
return error;
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
mode = vfs_prepare_mode(idmap, dir, mode, S_IALLUGO, S_IFREG);
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
error = dir->i_op->create(idmap, dir, dentry, mode, want_excl);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_create);
int vfs_mkobj(struct dentry *dentry, umode_t mode,
int (*f)(struct dentry *, umode_t, void *),
void *arg)
{
struct inode *dir = dentry->d_parent->d_inode;
int error = may_create(&nop_mnt_idmap, dir, dentry);
if (error)
return error;
mode &= S_IALLUGO;
mode |= S_IFREG;
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
error = f(dentry, mode, arg);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_mkobj);
bool may_open_dev(const struct path *path)
{
return !(path->mnt->mnt_flags & MNT_NODEV) &&
!(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
}
static int may_open(struct mnt_idmap *idmap, const struct path *path,
int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
struct inode *inode = dentry->d_inode;
int error;
if (!inode)
return -ENOENT;
switch (inode->i_mode & S_IFMT) {
case S_IFLNK:
return -ELOOP;
case S_IFDIR:
if (acc_mode & MAY_WRITE)
return -EISDIR;
if (acc_mode & MAY_EXEC)
return -EACCES;
break;
case S_IFBLK:
case S_IFCHR:
if (!may_open_dev(path))
return -EACCES;
fallthrough;
case S_IFIFO:
case S_IFSOCK:
if (acc_mode & MAY_EXEC)
return -EACCES;
flag &= ~O_TRUNC;
break;
case S_IFREG:
if ((acc_mode & MAY_EXEC) && path_noexec(path))
return -EACCES;
break;
default:
VFS_BUG_ON_INODE(!IS_ANON_FILE(inode), inode);
}
error = inode_permission(idmap, inode, MAY_OPEN | acc_mode);
if (error)
return error;
/*
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
return -EPERM;
if (flag & O_TRUNC)
return -EPERM;
}
/* O_NOATIME can only be set by the owner or superuser */
if (flag & O_NOATIME && !inode_owner_or_capable(idmap, inode))
return -EPERM;
return 0;
}
static int handle_truncate(struct mnt_idmap *idmap, struct file *filp)
{
const struct path *path = &filp->f_path;
struct inode *inode = path->dentry->d_inode;
int error = get_write_access(inode);
if (error)
return error;
error = security_file_truncate(filp);
if (!error) {
error = do_truncate(idmap, path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
put_write_access(inode);
return error;
}
static inline int open_to_namei_flags(int flag)
{
if ((flag & O_ACCMODE) == 3)
flag--;
return flag;
}
static int may_o_create(struct mnt_idmap *idmap,
const struct path *dir, struct dentry *dentry,
umode_t mode)
{
int error = security_path_mknod(dir, dentry, mode, 0);
if (error)
return error;
if (!fsuidgid_has_mapping(dir->dentry->d_sb, idmap))
return -EOVERFLOW;
error = inode_permission(idmap, dir->dentry->d_inode,
MAY_WRITE | MAY_EXEC);
if (error)
return error;
return security_inode_create(dir->dentry->d_inode, dentry, mode);
}
/*
* Attempt to atomically look up, create and open a file from a negative
* dentry.
*
* Returns 0 if successful. The file will have been created and attached to
* @file by the filesystem calling finish_open().
*
* If the file was looked up only or didn't need creating, FMODE_OPENED won't
* be set. The caller will need to perform the open themselves. @path will
* have been updated to point to the new dentry. This may be negative.
*
* Returns an error code otherwise.
*/
static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
struct file *file,
int open_flag, umode_t mode)
{
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
struct inode *dir = nd->path.dentry->d_inode;
int error;
if (nd->flags & LOOKUP_DIRECTORY)
open_flag |= O_DIRECTORY;
file->__f_path.dentry = DENTRY_NOT_SET;
file->__f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file,
open_to_namei_flags(open_flag), mode);
d_lookup_done(dentry);
if (!error) {
if (file->f_mode & FMODE_OPENED) {
if (unlikely(dentry != file->f_path.dentry)) {
dput(dentry);
dentry = dget(file->f_path.dentry);
}
} else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
} else {
if (file->f_path.dentry) {
dput(dentry);
dentry = file->f_path.dentry;
}
if (unlikely(d_is_negative(dentry)))
error = -ENOENT;
}
}
if (error) {
dput(dentry);
dentry = ERR_PTR(error);
}
return dentry;
}
/*
* Look up and maybe create and open the last component.
*
* Must be called with parent locked (exclusive in O_CREAT case).
*
* Returns 0 on success, that is, if
* the file was successfully atomically created (if necessary) and opened, or
* the file was not completely opened at this time, though lookups and
* creations were performed.
* These case are distinguished by presence of FMODE_OPENED on file->f_mode.
* In the latter case dentry returned in @path might be negative if O_CREAT
* hadn't been specified.
*
* An error code is returned on failure.
*/
static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
const struct open_flags *op,
bool got_write)
{
struct mnt_idmap *idmap;
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
int open_flag = op->open_flag;
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
if (unlikely(IS_DEADDIR(dir_inode)))
return ERR_PTR(-ENOENT);
file->f_mode &= ~FMODE_CREATED;
dentry = d_lookup(dir, &nd->last);
for (;;) {
if (!dentry) {
dentry = d_alloc_parallel(dir, &nd->last, &wq);
if (IS_ERR(dentry))
return dentry;
}
if (d_in_lookup(dentry))
break;
error = d_revalidate(dir_inode, &nd->last, dentry, nd->flags);
if (likely(error > 0))
break;
if (error)
goto out_dput;
d_invalidate(dentry);
dput(dentry);
dentry = NULL;
}
if (dentry->d_inode) {
/* Cached positive dentry: will open in f_op->open */
return dentry;
}
if (open_flag & O_CREAT)
audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
/*
* Checking write permission is tricky, bacuse we don't know if we are
* going to actually need it: O_CREAT opens should work as long as the
* file exists. But checking existence breaks atomicity. The trick is
* to check access and if not granted clear O_CREAT from the flags.
*
* Another problem is returing the "right" error value (e.g. for an
* O_EXCL open we want to return EEXIST not EROFS).
*/
if (unlikely(!got_write))
open_flag &= ~O_TRUNC;
idmap = mnt_idmap(nd->path.mnt);
if (open_flag & O_CREAT) {
if (open_flag & O_EXCL)
open_flag &= ~O_TRUNC;
mode = vfs_prepare_mode(idmap, dir->d_inode, mode, mode, mode);
if (likely(got_write))
create_error = may_o_create(idmap, &nd->path,
dentry, mode);
else
create_error = -EROFS;
}
if (create_error)
open_flag &= ~O_CREAT;
if (dir_inode->i_op->atomic_open) {
dentry = atomic_open(nd, dentry, file, open_flag, mode);
if (unlikely(create_error) && dentry == ERR_PTR(-ENOENT))
dentry = ERR_PTR(create_error);
return dentry;
}
if (d_in_lookup(dentry)) {
struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry,
nd->flags);
d_lookup_done(dentry);
if (unlikely(res)) {
if (IS_ERR(res)) {
error = PTR_ERR(res);
goto out_dput;
}
dput(dentry);
dentry = res;
}
}
/* Negative dentry, just create the file */
if (!dentry->d_inode && (open_flag & O_CREAT)) {
file->f_mode |= FMODE_CREATED;
audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
if (!dir_inode->i_op->create) {
error = -EACCES;
goto out_dput;
}
error = dir_inode->i_op->create(idmap, dir_inode, dentry,
mode, open_flag & O_EXCL);
if (error)
goto out_dput;
}
if (unlikely(create_error) && !dentry->d_inode) {
error = create_error;
goto out_dput;
}
return dentry;
out_dput:
dput(dentry);
return ERR_PTR(error);
}
static inline bool trailing_slashes(struct nameidata *nd)
{
return (bool)nd->last.name[nd->last.len];
}
static struct dentry *lookup_fast_for_open(struct nameidata *nd, int open_flag)
{
struct dentry *dentry;
if (open_flag & O_CREAT) {
if (trailing_slashes(nd))
return ERR_PTR(-EISDIR);
/* Don't bother on an O_EXCL create */
if (open_flag & O_EXCL)
return NULL;
}
if (trailing_slashes(nd))
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
dentry = lookup_fast(nd);
if (IS_ERR_OR_NULL(dentry))
return dentry;
if (open_flag & O_CREAT) {
/* Discard negative dentries. Need inode_lock to do the create */
if (!dentry->d_inode) {
if (!(nd->flags & LOOKUP_RCU))
dput(dentry);
dentry = NULL;
}
}
return dentry;
}
static const char *open_last_lookups(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
bool got_write = false;
struct dentry *dentry;
const char *res;
nd->flags |= op->intent;
if (nd->last_type != LAST_NORM) {
if (nd->depth)
put_link(nd);
return handle_dots(nd, nd->last_type);
}
/* We _can_ be in RCU mode here */
dentry = lookup_fast_for_open(nd, open_flag);
if (IS_ERR(dentry))
return ERR_CAST(dentry);
if (likely(dentry))
goto finish_lookup;
if (!(open_flag & O_CREAT)) {
if (WARN_ON_ONCE(nd->flags & LOOKUP_RCU))
return ERR_PTR(-ECHILD);
} else {
if (nd->flags & LOOKUP_RCU) {
if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
}
}
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
got_write = !mnt_want_write(nd->path.mnt);
/*
* do _not_ fail yet - we might not need that or fail with
* a different error; let lookup_open() decide; we'll be
* dropping this one anyway.
*/
}
if (open_flag & O_CREAT)
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
dentry = lookup_open(nd, file, op, got_write);
if (!IS_ERR(dentry)) {
if (file->f_mode & FMODE_CREATED)
fsnotify_create(dir->d_inode, dentry);
if (file->f_mode & FMODE_OPENED)
fsnotify_open(file);
}
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
inode_unlock_shared(dir->d_inode);
if (got_write)
mnt_drop_write(nd->path.mnt);
if (IS_ERR(dentry))
return ERR_CAST(dentry);
if (file->f_mode & (FMODE_OPENED | FMODE_CREATED)) {
dput(nd->path.dentry);
nd->path.dentry = dentry;
return NULL;
}
finish_lookup:
if (nd->depth)
put_link(nd);
res = step_into(nd, WALK_TRAILING, dentry);
if (unlikely(res))
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
return res;
}
/*
* Handle the last step of open()
*/
static int do_open(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
struct mnt_idmap *idmap;
int open_flag = op->open_flag;
bool do_truncate;
int acc_mode;
int error;
if (!(file->f_mode & (FMODE_OPENED | FMODE_CREATED))) {
error = complete_walk(nd);
if (error)
return error;
}
if (!(file->f_mode & FMODE_CREATED))
audit_inode(nd->name, nd->path.dentry, 0);
idmap = mnt_idmap(nd->path.mnt);
if (open_flag & O_CREAT) {
if ((open_flag & O_EXCL) && !(file->f_mode & FMODE_CREATED))
return -EEXIST;
if (d_is_dir(nd->path.dentry))
return -EISDIR;
error = may_create_in_sticky(idmap, nd,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
return error;
}
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
return -ENOTDIR;
do_truncate = false;
acc_mode = op->acc_mode;
if (file->f_mode & FMODE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
acc_mode = 0;
} else if (d_is_reg(nd->path.dentry) && open_flag & O_TRUNC) {
error = mnt_want_write(nd->path.mnt);
if (error)
return error;
do_truncate = true;
}
error = may_open(idmap, &nd->path, acc_mode, open_flag);
if (!error && !(file->f_mode & FMODE_OPENED))
error = vfs_open(&nd->path, file);
if (!error)
error = security_file_post_open(file, op->acc_mode);
if (!error && do_truncate)
error = handle_truncate(idmap, file);
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
}
if (do_truncate)
mnt_drop_write(nd->path.mnt);
return error;
}
/**
* vfs_tmpfile - create tmpfile
* @idmap: idmap of the mount the inode was found from
* @parentpath: pointer to the path of the base directory
* @file: file descriptor of the new tmpfile
* @mode: mode of the new tmpfile
*
* Create a temporary file.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_tmpfile(struct mnt_idmap *idmap,
const struct path *parentpath,
struct file *file, umode_t mode)
{
struct dentry *child;
struct inode *dir = d_inode(parentpath->dentry);
struct inode *inode;
int error;
int open_flag = file->f_flags;
/* we want directory to be writable */
error = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (!dir->i_op->tmpfile)
return -EOPNOTSUPP;
child = d_alloc(parentpath->dentry, &slash_name);
if (unlikely(!child))
return -ENOMEM;
file->__f_path.mnt = parentpath->mnt;
file->__f_path.dentry = child;
mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
error = dir->i_op->tmpfile(idmap, dir, file, mode);
dput(child);
if (file->f_mode & FMODE_OPENED)
fsnotify_open(file);
if (error)
return error;
/* Don't check for other permissions, the inode was just created */
error = may_open(idmap, &file->f_path, 0, file->f_flags);
if (error)
return error;
inode = file_inode(file);
if (!(open_flag & O_EXCL)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
spin_unlock(&inode->i_lock);
}
security_inode_post_create_tmpfile(idmap, inode);
return 0;
}
/**
* kernel_tmpfile_open - open a tmpfile for kernel internal use
* @idmap: idmap of the mount the inode was found from
* @parentpath: path of the base directory
* @mode: mode of the new tmpfile
* @open_flag: flags
* @cred: credentials for open
*
* Create and open a temporary file. The file is not accounted in nr_files,
* hence this is only for kernel internal use, and must not be installed into
* file tables or such.
*/
struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
const struct path *parentpath,
umode_t mode, int open_flag,
const struct cred *cred)
{
struct file *file;
int error;
file = alloc_empty_file_noaccount(open_flag, cred);
if (IS_ERR(file))
return file;
error = vfs_tmpfile(idmap, parentpath, file, mode);
if (error) {
fput(file);
file = ERR_PTR(error);
}
return file;
}
EXPORT_SYMBOL(kernel_tmpfile_open);
static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
struct file *file)
{
struct path path;
int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
if (unlikely(error))
return error;
error = mnt_want_write(path.mnt);
if (unlikely(error))
goto out;
error = vfs_tmpfile(mnt_idmap(path.mnt), &path, file, op->mode);
if (error)
goto out2;
audit_inode(nd->name, file->f_path.dentry, 0);
out2:
mnt_drop_write(path.mnt);
out:
path_put(&path);
return error;
}
static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
{
struct path path;
int error = path_lookupat(nd, flags, &path);
if (!error) {
audit_inode(nd->name, path.dentry, 0);
error = vfs_open(&path, file);
path_put(&path);
}
return error;
}
static struct file *path_openat(struct nameidata *nd,
const struct open_flags *op, unsigned flags)
{
struct file *file;
int error;
file = alloc_empty_file(op->open_flag, current_cred());
if (IS_ERR(file))
return file;
if (unlikely(file->f_flags & __O_TMPFILE)) {
error = do_tmpfile(nd, flags, op, file);
} else if (unlikely(file->f_flags & O_PATH)) {
error = do_o_path(nd, flags, file);
} else {
const char *s = path_init(nd, flags);
while (!(error = link_path_walk(s, nd)) &&
(s = open_last_lookups(nd, file, op)) != NULL)
;
if (!error)
error = do_open(nd, file, op);
terminate_walk(nd);
}
if (likely(!error)) {
if (likely(file->f_mode & FMODE_OPENED))
return file;
WARN_ON(1);
error = -EINVAL;
}
fput_close(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
else
error = -ESTALE;
}
return ERR_PTR(error);
}
struct file *do_filp_open(int dfd, struct filename *pathname,
const struct open_flags *op)
{
struct nameidata nd;
int flags = op->lookup_flags;
struct file *filp;
set_nameidata(&nd, dfd, pathname, NULL);
filp = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(filp == ERR_PTR(-ECHILD)))
filp = path_openat(&nd, op, flags);
if (unlikely(filp == ERR_PTR(-ESTALE)))
filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
restore_nameidata();
return filp;
}
struct file *do_file_open_root(const struct path *root,
const char *name, const struct open_flags *op)
{
struct nameidata nd;
struct file *file;
struct filename *filename;
int flags = op->lookup_flags;
if (d_is_symlink(root->dentry) && op->intent & LOOKUP_OPEN)
return ERR_PTR(-ELOOP);
filename = getname_kernel(name);
if (IS_ERR(filename))
return ERR_CAST(filename);
set_nameidata(&nd, -1, filename, root);
file = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(file == ERR_PTR(-ECHILD)))
file = path_openat(&nd, op, flags);
if (unlikely(file == ERR_PTR(-ESTALE)))
file = path_openat(&nd, op, flags | LOOKUP_REVAL);
restore_nameidata();
putname(filename);
return file;
}
static struct dentry *filename_create(int dfd, struct filename *name,
struct path *path, unsigned int lookup_flags)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct qstr last;
bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
int type;
int error;
error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
if (error)
return ERR_PTR(error);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
*/
if (unlikely(type != LAST_NORM))
goto out;
/* don't fail immediately if it's r/o, at least try to report other errors */
error = mnt_want_write(path->mnt);
/*
* Do the final lookup. Suppress 'create' if there is a trailing
* '/', and a directory wasn't requested.
*/
if (last.name[last.len] && !want_dir)
create_flags &= ~LOOKUP_CREATE;
inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
dentry = lookup_one_qstr_excl(&last, path->dentry,
reval_flag | create_flags);
if (IS_ERR(dentry))
goto unlock;
if (unlikely(error))
goto fail;
return dentry;
fail:
dput(dentry);
dentry = ERR_PTR(error);
unlock:
inode_unlock(path->dentry->d_inode);
if (!error)
mnt_drop_write(path->mnt);
out:
path_put(path);
return dentry;
}
struct dentry *start_creating_path(int dfd, const char *pathname,
struct path *path, unsigned int lookup_flags)
{
struct filename *filename = getname_kernel(pathname);
struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
putname(filename);
return res;
}
EXPORT_SYMBOL(start_creating_path);
void end_creating_path(const struct path *path, struct dentry *dentry)
{
if (!IS_ERR(dentry))
dput(dentry);
inode_unlock(path->dentry->d_inode);
mnt_drop_write(path->mnt);
path_put(path);
}
EXPORT_SYMBOL(end_creating_path);
inline struct dentry *start_creating_user_path(
int dfd, const char __user *pathname,
struct path *path, unsigned int lookup_flags)
{
struct filename *filename = getname(pathname);
struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
putname(filename);
return res;
}
EXPORT_SYMBOL(start_creating_user_path);
/**
* vfs_mknod - create device node or file
* @idmap: idmap of the mount the inode was found from
* @dir: inode of the parent directory
* @dentry: dentry of the child device node
* @mode: mode of the child device node
* @dev: device number of device to create
*
* Create a device node or file.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
bool is_whiteout = S_ISCHR(mode) && dev == WHITEOUT_DEV;
int error = may_create(idmap, dir, dentry);
if (error)
return error;
if ((S_ISCHR(mode) || S_ISBLK(mode)) && !is_whiteout &&
!capable(CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
return -EPERM;
mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
error = security_inode_mknod(dir, dentry, mode, dev);
if (error)
return error;
error = dir->i_op->mknod(idmap, dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_mknod);
static int may_mknod(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
case 0: /* zero mode translates to S_IFREG */
return 0;
case S_IFDIR:
return -EPERM;
default:
return -EINVAL;
}
}
static int do_mknodat(int dfd, struct filename *name, umode_t mode,
unsigned int dev)
{
struct mnt_idmap *idmap;
struct dentry *dentry;
struct path path;
int error;
unsigned int lookup_flags = 0;
error = may_mknod(mode);
if (error)
goto out1;
retry:
dentry = filename_create(dfd, name, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out1;
error = security_path_mknod(&path, dentry,
mode_strip_umask(path.dentry->d_inode, mode), dev);
if (error)
goto out2;
idmap = mnt_idmap(path.mnt);
switch (mode & S_IFMT) {
case 0: case S_IFREG:
error = vfs_create(idmap, path.dentry->d_inode,
dentry, mode, true);
if (!error)
security_path_post_mknod(idmap, dentry);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(idmap, path.dentry->d_inode,
dentry, mode, new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
error = vfs_mknod(idmap, path.dentry->d_inode,
dentry, mode, 0);
break;
}
out2:
end_creating_path(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out1:
putname(name);
return error;
}
SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
unsigned int, dev)
{
return do_mknodat(dfd, getname(filename), mode, dev);
}
SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
{
return do_mknodat(AT_FDCWD, getname(filename), mode, dev);
}
/**
* vfs_mkdir - create directory returning correct dentry if possible
* @idmap: idmap of the mount the inode was found from
* @dir: inode of the parent directory
* @dentry: dentry of the child directory
* @mode: mode of the child directory
*
* Create a directory.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*
* In the event that the filesystem does not use the *@dentry but leaves it
* negative or unhashes it and possibly splices a different one returning it,
* the original dentry is dput() and the alternate is returned.
*
* In case of an error the dentry is dput() and an ERR_PTR() is returned.
*/
struct dentry *vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
int error;
unsigned max_links = dir->i_sb->s_max_links;
struct dentry *de;
error = may_create(idmap, dir, dentry);
if (error)
goto err;
error = -EPERM;
if (!dir->i_op->mkdir)
goto err;
mode = vfs_prepare_mode(idmap, dir, mode, S_IRWXUGO | S_ISVTX, 0);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
goto err;
error = -EMLINK;
if (max_links && dir->i_nlink >= max_links)
goto err;
de = dir->i_op->mkdir(idmap, dir, dentry, mode);
error = PTR_ERR(de);
if (IS_ERR(de))
goto err;
if (de) {
dput(dentry);
dentry = de;
}
fsnotify_mkdir(dir, dentry);
return dentry;
err:
dput(dentry);
return ERR_PTR(error);
}
EXPORT_SYMBOL(vfs_mkdir);
int do_mkdirat(int dfd, struct filename *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_DIRECTORY;
retry:
dentry = filename_create(dfd, name, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putname;
error = security_path_mkdir(&path, dentry,
mode_strip_umask(path.dentry->d_inode, mode));
if (!error) {
dentry = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
dentry, mode);
if (IS_ERR(dentry))
error = PTR_ERR(dentry);
}
end_creating_path(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out_putname:
putname(name);
return error;
}
SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
{
return do_mkdirat(dfd, getname(pathname), mode);
}
SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
{
return do_mkdirat(AT_FDCWD, getname(pathname), mode);
}
/**
* vfs_rmdir - remove directory
* @idmap: idmap of the mount the inode was found from
* @dir: inode of the parent directory
* @dentry: dentry of the child directory
*
* Remove a directory.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_rmdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry)
{
int error = may_delete(idmap, dir, dentry, 1);
if (error)
return error;
if (!dir->i_op->rmdir)
return -EPERM;
dget(dentry);
inode_lock(dentry->d_inode);
error = -EBUSY;
if (is_local_mountpoint(dentry) ||
(dentry->d_inode->i_flags & S_KERNEL_FILE))
goto out;
error = security_inode_rmdir(dir, dentry);
if (error)
goto out;
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
shrink_dcache_parent(dentry);
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
detach_mounts(dentry);
out:
inode_unlock(dentry->d_inode);
dput(dentry);
if (!error)
d_delete_notify(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_rmdir);
int do_rmdir(int dfd, struct filename *name)
{
int error;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
unsigned int lookup_flags = 0;
retry:
error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
goto exit1;
switch (type) {
case LAST_DOTDOT:
error = -ENOTEMPTY;
goto exit2;
case LAST_DOT:
error = -EINVAL;
goto exit2;
case LAST_ROOT:
error = -EBUSY;
goto exit2;
}
error = mnt_want_write(path.mnt);
if (error)
goto exit2;
inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
dentry = lookup_one_qstr_excl(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit3;
error = security_path_rmdir(&path, dentry);
if (error)
goto exit4;
error = vfs_rmdir(mnt_idmap(path.mnt), path.dentry->d_inode, dentry);
exit4:
dput(dentry);
exit3:
inode_unlock(path.dentry->d_inode);
mnt_drop_write(path.mnt);
exit2:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
exit1:
putname(name);
return error;
}
SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
{
return do_rmdir(AT_FDCWD, getname(pathname));
}
/**
* vfs_unlink - unlink a filesystem object
* @idmap: idmap of the mount the inode was found from
* @dir: parent directory
* @dentry: victim
* @delegated_inode: returns victim inode, if the inode is delegated.
*
* The caller must hold dir->i_rwsem exclusively.
*
* If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
* return a reference to the inode in delegated_inode. The caller
* should then break the delegation on that inode and retry. Because
* breaking a delegation may take a long time, the caller should drop
* dir->i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_unlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, struct inode **delegated_inode)
{
struct inode *target = dentry->d_inode;
int error = may_delete(idmap, dir, dentry, 0);
if (error)
return error;
if (!dir->i_op->unlink)
return -EPERM;
inode_lock(target);
if (IS_SWAPFILE(target))
error = -EPERM;
else if (is_local_mountpoint(dentry))
error = -EBUSY;
else {
error = security_inode_unlink(dir, dentry);
if (!error) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
error = dir->i_op->unlink(dir, dentry);
if (!error) {
dont_mount(dentry);
detach_mounts(dentry);
}
}
}
out:
inode_unlock(target);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
fsnotify_unlink(dir, dentry);
} else if (!error) {
fsnotify_link_count(target);
d_delete_notify(dir, dentry);
}
return error;
}
EXPORT_SYMBOL(vfs_unlink);
/*
* Make sure that the actual truncation of the file will occur outside its
* directory's i_rwsem. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
int do_unlinkat(int dfd, struct filename *name)
{
int error;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
unsigned int lookup_flags = 0;
retry:
error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
goto exit1;
error = -EISDIR;
if (type != LAST_NORM)
goto exit2;
error = mnt_want_write(path.mnt);
if (error)
goto exit2;
retry_deleg:
inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
dentry = lookup_one_qstr_excl(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
if (last.name[last.len])
goto slashes;
inode = dentry->d_inode;
ihold(inode);
error = security_path_unlink(&path, dentry);
if (error)
goto exit3;
error = vfs_unlink(mnt_idmap(path.mnt), path.dentry->d_inode,
dentry, &delegated_inode);
exit3:
dput(dentry);
}
inode_unlock(path.dentry->d_inode);
if (inode)
iput(inode); /* truncate the inode here */
inode = NULL;
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
mnt_drop_write(path.mnt);
exit2:
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
inode = NULL;
goto retry;
}
exit1:
putname(name);
return error;
slashes:
if (d_is_dir(dentry))
error = -EISDIR;
else
error = -ENOTDIR;
goto exit3;
}
SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
{
if ((flag & ~AT_REMOVEDIR) != 0)
return -EINVAL;
if (flag & AT_REMOVEDIR)
return do_rmdir(dfd, getname(pathname));
return do_unlinkat(dfd, getname(pathname));
}
SYSCALL_DEFINE1(unlink, const char __user *, pathname)
{
return do_unlinkat(AT_FDCWD, getname(pathname));
}
/**
* vfs_symlink - create symlink
* @idmap: idmap of the mount the inode was found from
* @dir: inode of the parent directory
* @dentry: dentry of the child symlink file
* @oldname: name of the file to link to
*
* Create a symlink.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *oldname)
{
int error;
error = may_create(idmap, dir, dentry);
if (error)
return error;
if (!dir->i_op->symlink)
return -EPERM;
error = security_inode_symlink(dir, dentry, oldname);
if (error)
return error;
error = dir->i_op->symlink(idmap, dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_symlink);
int do_symlinkat(struct filename *from, int newdfd, struct filename *to)
{
int error;
struct dentry *dentry;
struct path path;
unsigned int lookup_flags = 0;
if (IS_ERR(from)) {
error = PTR_ERR(from);
goto out_putnames;
}
retry:
dentry = filename_create(newdfd, to, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putnames;
error = security_path_symlink(&path, dentry, from->name);
if (!error)
error = vfs_symlink(mnt_idmap(path.mnt), path.dentry->d_inode,
dentry, from->name);
end_creating_path(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out_putnames:
putname(to);
putname(from);
return error;
}
SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
return do_symlinkat(getname(oldname), newdfd, getname(newname));
}
SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
{
return do_symlinkat(getname(oldname), AT_FDCWD, getname(newname));
}
/**
* vfs_link - create a new link
* @old_dentry: object to be linked
* @idmap: idmap of the mount
* @dir: new parent
* @new_dentry: where to create the new link
* @delegated_inode: returns inode needing a delegation break
*
* The caller must hold dir->i_rwsem exclusively.
*
* If vfs_link discovers a delegation on the to-be-linked file in need
* of breaking, it will return -EWOULDBLOCK and return a reference to the
* inode in delegated_inode. The caller should then break the delegation
* and retry. Because breaking a delegation may take a long time, the
* caller should drop the i_rwsem before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then take
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
*/
int vfs_link(struct dentry *old_dentry, struct mnt_idmap *idmap,
struct inode *dir, struct dentry *new_dentry,
struct inode **delegated_inode)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
int error;
if (!inode)
return -ENOENT;
error = may_create(idmap, dir, new_dentry);
if (error)
return error;
if (dir->i_sb != inode->i_sb)
return -EXDEV;
/*
* A link to an append-only or immutable file cannot be created.
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
/*
* Updating the link count will likely cause i_uid and i_gid to
* be written back improperly if their true value is unknown to
* the vfs.
*/
if (HAS_UNMAPPED_ID(idmap, inode))
return -EPERM;
if (!dir->i_op->link)
return -EPERM;
if (S_ISDIR(inode->i_mode))
return -EPERM;
error = security_inode_link(old_dentry, dir, new_dentry);
if (error)
return error;
inode_lock(inode);
/* Make sure we don't allow creating hardlink to an unlinked file */
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
else {
error = try_break_deleg(inode, delegated_inode);
if (!error)
error = dir->i_op->link(old_dentry, dir, new_dentry);
}
if (!error && (inode->i_state & I_LINKABLE)) {
spin_lock(&inode->i_lock);
inode->i_state &= ~I_LINKABLE;
spin_unlock(&inode->i_lock);
}
inode_unlock(inode);
if (!error)
fsnotify_link(dir, inode, new_dentry);
return error;
}
EXPORT_SYMBOL(vfs_link);
/*
* Hardlinks are often used in delicate situations. We avoid
* security-related surprises by not following symlinks on the
* newname. --KAB
*
* We don't follow them on the oldname either to be compatible
* with linux 2.0, and to avoid hard-linking to directories
* and other special files. --ADM
*/
int do_linkat(int olddfd, struct filename *old, int newdfd,
struct filename *new, int flags)
{
struct mnt_idmap *idmap;
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
int how = 0;
int error;
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) {
error = -EINVAL;
goto out_putnames;
}
/*
* To use null names we require CAP_DAC_READ_SEARCH or
* that the open-time creds of the dfd matches current.
* This ensures that not everyone will be able to create
* a hardlink using the passed file descriptor.
*/
if (flags & AT_EMPTY_PATH)
how |= LOOKUP_LINKAT_EMPTY;
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
retry:
error = filename_lookup(olddfd, old, how, &old_path, NULL);
if (error)
goto out_putnames;
new_dentry = filename_create(newdfd, new, &new_path,
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out_putpath;
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
idmap = mnt_idmap(new_path.mnt);
error = may_linkat(idmap, &old_path);
if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
error = vfs_link(old_path.dentry, idmap, new_path.dentry->d_inode,
new_dentry, &delegated_inode);
out_dput:
end_creating_path(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error) {
path_put(&old_path);
goto retry;
}
}
if (retry_estale(error, how)) {
path_put(&old_path);
how |= LOOKUP_REVAL;
goto retry;
}
out_putpath:
path_put(&old_path);
out_putnames:
putname(old);
putname(new);
return error;
}
SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, int, flags)
{
return do_linkat(olddfd, getname_uflags(oldname, flags),
newdfd, getname(newname), flags);
}
SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
{
return do_linkat(AT_FDCWD, getname(oldname), AT_FDCWD, getname(newname), 0);
}
/**
* vfs_rename - rename a filesystem object
* @rd: pointer to &struct renamedata info
*
* The caller must hold multiple mutexes--see lock_rename()).
*
* If vfs_rename discovers a delegation in need of breaking at either
* the source or destination, it will return -EWOULDBLOCK and return a
* reference to the inode in delegated_inode. The caller should then
* break the delegation and retry. Because breaking a delegation may
* take a long time, the caller should drop all locks before doing
* so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*
* The worst of all namespace operations - renaming directory. "Perverted"
* doesn't even start to describe it. Somebody in UCB had a heck of a trip...
* Problems:
*
* a) we can get into loop creation.
* b) race potential - two innocent renames can create a loop together.
* That's where 4.4BSD screws up. Current fix: serialization on
* sb->s_vfs_rename_mutex. We might be more accurate, but that's another
* story.
* c) we may have to lock up to _four_ objects - parents and victim (if it exists),
* and source (if it's a non-directory or a subdirectory that moves to
* different parent).
* And that - after we got ->i_rwsem on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_mutex _and_ that parent of the object we
* move will be locked. Thus we can rank directories by the tree
* (ancestors first) and rank all non-directories after them.
* That works since everybody except rename does "lock parent, lookup,
* lock child" and rename is under ->s_vfs_rename_mutex.
* HOWEVER, it relies on the assumption that any object with ->lookup()
* has no more than 1 dentry. If "hybrid" objects will ever appear,
* we'd better make sure that there's no link(2) for them.
* d) conversion from fhandle to dentry may come in the wrong moment - when
* we are removing the target. Solution: we will have to grab ->i_rwsem
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
* ->i_rwsem on parents, which works but leads to some truly excessive
* locking].
*/
int vfs_rename(struct renamedata *rd)
{
int error;
struct inode *old_dir = d_inode(rd->old_parent);
struct inode *new_dir = d_inode(rd->new_parent);
struct dentry *old_dentry = rd->old_dentry;
struct dentry *new_dentry = rd->new_dentry;
struct inode **delegated_inode = rd->delegated_inode;
unsigned int flags = rd->flags;
bool is_dir = d_is_dir(old_dentry);
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
struct name_snapshot old_name;
bool lock_old_subdir, lock_new_subdir;
if (source == target)
return 0;
error = may_delete(rd->mnt_idmap, old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
error = may_create(rd->mnt_idmap, new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
error = may_delete(rd->mnt_idmap, new_dir,
new_dentry, is_dir);
else
error = may_delete(rd->mnt_idmap, new_dir,
new_dentry, new_is_dir);
}
if (error)
return error;
if (!old_dir->i_op->rename)
return -EPERM;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
if (is_dir) {
error = inode_permission(rd->mnt_idmap, source,
MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
error = inode_permission(rd->mnt_idmap, target,
MAY_WRITE);
if (error)
return error;
}
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (error)
return error;
take_dentry_name_snapshot(&old_name, old_dentry);
dget(new_dentry);
/*
* Lock children.
* The source subdirectory needs to be locked on cross-directory
* rename or cross-directory exchange since its parent changes.
* The target subdirectory needs to be locked on cross-directory
* exchange due to parent change and on any rename due to becoming
* a victim.
* Non-directories need locking in all cases (for NFS reasons);
* they get locked after any subdirectories (in inode address order).
*
* NOTE: WE ONLY LOCK UNRELATED DIRECTORIES IN CROSS-DIRECTORY CASE.
* NEVER, EVER DO THAT WITHOUT ->s_vfs_rename_mutex.
*/
lock_old_subdir = new_dir != old_dir;
lock_new_subdir = new_dir != old_dir || !(flags & RENAME_EXCHANGE);
if (is_dir) {
if (lock_old_subdir)
inode_lock_nested(source, I_MUTEX_CHILD);
if (target && (!new_is_dir || lock_new_subdir))
inode_lock(target);
} else if (new_is_dir) {
if (lock_new_subdir)
inode_lock_nested(target, I_MUTEX_CHILD);
inode_lock(source);
} else {
lock_two_nondirectories(source, target);
}
error = -EPERM;
if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target)))
goto out;
error = -EBUSY;
if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
goto out;
if (max_links && new_dir != old_dir) {
error = -EMLINK;
if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
goto out;
if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
old_dir->i_nlink >= max_links)
goto out;
}
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
goto out;
}
if (target && !new_is_dir) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
}
error = old_dir->i_op->rename(rd->mnt_idmap, old_dir, old_dentry,
new_dir, new_dentry, flags);
if (error)
goto out;
if (!(flags & RENAME_EXCHANGE) && target) {
if (is_dir) {
shrink_dcache_parent(new_dentry);
target->i_flags |= S_DEAD;
}
dont_mount(new_dentry);
detach_mounts(new_dentry);
}
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
if (!(flags & RENAME_EXCHANGE))
d_move(old_dentry, new_dentry);
else
d_exchange(old_dentry, new_dentry);
}
out:
if (!is_dir || lock_old_subdir)
inode_unlock(source);
if (target && (!new_is_dir || lock_new_subdir))
inode_unlock(target);
dput(new_dentry);
if (!error) {
fsnotify_move(old_dir, new_dir, &old_name.name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, &old_dentry->d_name,
new_is_dir, NULL, new_dentry);
}
}
release_dentry_name_snapshot(&old_name);
return error;
}
EXPORT_SYMBOL(vfs_rename);
int do_renameat2(int olddfd, struct filename *from, int newdfd,
struct filename *to, unsigned int flags)
{
struct renamedata rd;
struct dentry *old_dentry, *new_dentry;
struct dentry *trap;
struct path old_path, new_path;
struct qstr old_last, new_last;
int old_type, new_type;
struct inode *delegated_inode = NULL;
unsigned int lookup_flags = 0, target_flags =
LOOKUP_RENAME_TARGET | LOOKUP_CREATE;
bool should_retry = false;
int error = -EINVAL;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
goto put_names;
if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
(flags & RENAME_EXCHANGE))
goto put_names;
if (flags & RENAME_EXCHANGE)
target_flags = 0;
if (flags & RENAME_NOREPLACE)
target_flags |= LOOKUP_EXCL;
retry:
error = filename_parentat(olddfd, from, lookup_flags, &old_path,
&old_last, &old_type);
if (error)
goto put_names;
error = filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last,
&new_type);
if (error)
goto exit1;
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto exit2;
error = -EBUSY;
if (old_type != LAST_NORM)
goto exit2;
if (flags & RENAME_NOREPLACE)
error = -EEXIST;
if (new_type != LAST_NORM)
goto exit2;
error = mnt_want_write(old_path.mnt);
if (error)
goto exit2;
retry_deleg:
trap = lock_rename(new_path.dentry, old_path.dentry);
if (IS_ERR(trap)) {
error = PTR_ERR(trap);
goto exit_lock_rename;
}
old_dentry = lookup_one_qstr_excl(&old_last, old_path.dentry,
lookup_flags);
error = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
goto exit3;
new_dentry = lookup_one_qstr_excl(&new_last, new_path.dentry,
lookup_flags | target_flags);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto exit4;
if (flags & RENAME_EXCHANGE) {
if (!d_is_dir(new_dentry)) {
error = -ENOTDIR;
if (new_last.name[new_last.len])
goto exit5;
}
}
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!d_is_dir(old_dentry)) {
error = -ENOTDIR;
if (old_last.name[old_last.len])
goto exit5;
if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
goto exit5;
}
/* source should not be ancestor of target */
error = -EINVAL;
if (old_dentry == trap)
goto exit5;
/* target should not be an ancestor of source */
if (!(flags & RENAME_EXCHANGE))
error = -ENOTEMPTY;
if (new_dentry == trap)
goto exit5;
error = security_path_rename(&old_path, old_dentry,
&new_path, new_dentry, flags);
if (error)
goto exit5;
rd.old_parent = old_path.dentry;
rd.old_dentry = old_dentry;
rd.mnt_idmap = mnt_idmap(old_path.mnt);
rd.new_parent = new_path.dentry;
rd.new_dentry = new_dentry;
rd.delegated_inode = &delegated_inode;
rd.flags = flags;
error = vfs_rename(&rd);
exit5:
dput(new_dentry);
exit4:
dput(old_dentry);
exit3:
unlock_rename(new_path.dentry, old_path.dentry);
exit_lock_rename:
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
mnt_drop_write(old_path.mnt);
exit2:
if (retry_estale(error, lookup_flags))
should_retry = true;
path_put(&new_path);
exit1:
path_put(&old_path);
if (should_retry) {
should_retry = false;
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
put_names:
putname(from);
putname(to);
return error;
}
SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, unsigned int, flags)
{
return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname),
flags);
}
SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname),
0);
}
SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
{
return do_renameat2(AT_FDCWD, getname(oldname), AT_FDCWD,
getname(newname), 0);
}
int readlink_copy(char __user *buffer, int buflen, const char *link, int linklen)
{
int copylen;
copylen = linklen;
if (unlikely(copylen > (unsigned) buflen))
copylen = buflen;
if (copy_to_user(buffer, link, copylen))
copylen = -EFAULT;
return copylen;
}
/**
* vfs_readlink - copy symlink body into userspace buffer
* @dentry: dentry on which to get symbolic link
* @buffer: user memory pointer
* @buflen: size of buffer
*
* Does not touch atime. That's up to the caller if necessary
*
* Does not call security hook.
*/
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct inode *inode = d_inode(dentry);
DEFINE_DELAYED_CALL(done);
const char *link;
int res;
if (inode->i_opflags & IOP_CACHED_LINK)
return readlink_copy(buffer, buflen, inode->i_link, inode->i_linklen);
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
return inode->i_op->readlink(dentry, buffer, buflen);
if (!d_is_symlink(dentry))
return -EINVAL;
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_DEFAULT_READLINK;
spin_unlock(&inode->i_lock);
}
link = READ_ONCE(inode->i_link);
if (!link) {
link = inode->i_op->get_link(dentry, inode, &done);
if (IS_ERR(link))
return PTR_ERR(link);
}
res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
EXPORT_SYMBOL(vfs_readlink);
/**
* vfs_get_link - get symlink body
* @dentry: dentry on which to get symbolic link
* @done: caller needs to free returned data with this
*
* Calls security hook and i_op->get_link() on the supplied inode.
*
* It does not touch atime. That's up to the caller if necessary.
*
* Does not work on "special" symlinks like /proc/$$/fd/N
*/
const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
{
const char *res = ERR_PTR(-EINVAL);
struct inode *inode = d_inode(dentry);
if (d_is_symlink(dentry)) {
res = ERR_PTR(security_inode_readlink(dentry));
if (!res)
res = inode->i_op->get_link(dentry, inode, done);
}
return res;
}
EXPORT_SYMBOL(vfs_get_link);
/* get the link contents into pagecache */
static char *__page_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *callback)
{
struct folio *folio;
struct address_space *mapping = inode->i_mapping;
if (!dentry) {
folio = filemap_get_folio(mapping, 0);
if (IS_ERR(folio))
return ERR_PTR(-ECHILD);
if (!folio_test_uptodate(folio)) {
folio_put(folio);
return ERR_PTR(-ECHILD);
}
} else {
folio = read_mapping_folio(mapping, 0, NULL);
if (IS_ERR(folio))
return ERR_CAST(folio);
}
set_delayed_call(callback, page_put_link, folio);
BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
return folio_address(folio);
}
const char *page_get_link_raw(struct dentry *dentry, struct inode *inode,
struct delayed_call *callback)
{
return __page_get_link(dentry, inode, callback);
}
EXPORT_SYMBOL_GPL(page_get_link_raw);
/**
* page_get_link() - An implementation of the get_link inode_operation.
* @dentry: The directory entry which is the symlink.
* @inode: The inode for the symlink.
* @callback: Used to drop the reference to the symlink.
*
* Filesystems which store their symlinks in the page cache should use
* this to implement the get_link() member of their inode_operations.
*
* Return: A pointer to the NUL-terminated symlink.
*/
const char *page_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *callback)
{
char *kaddr = __page_get_link(dentry, inode, callback);
if (!IS_ERR(kaddr))
nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
return kaddr;
}
EXPORT_SYMBOL(page_get_link);
/**
* page_put_link() - Drop the reference to the symlink.
* @arg: The folio which contains the symlink.
*
* This is used internally by page_get_link(). It is exported for use
* by filesystems which need to implement a variant of page_get_link()
* themselves. Despite the apparent symmetry, filesystems which use
* page_get_link() do not need to call page_put_link().
*
* The argument, while it has a void pointer type, must be a pointer to
* the folio which was retrieved from the page cache. The delayed_call
* infrastructure is used to drop the reference count once the caller
* is done with the symlink.
*/
void page_put_link(void *arg)
{
folio_put(arg);
}
EXPORT_SYMBOL(page_put_link);
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
const char *link;
int res;
DEFINE_DELAYED_CALL(done);
link = page_get_link(dentry, d_inode(dentry), &done);
res = PTR_ERR(link);
if (!IS_ERR(link))
res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
EXPORT_SYMBOL(page_readlink);
int page_symlink(struct inode *inode, const char *symname, int len)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
struct folio *folio;
void *fsdata = NULL;
int err;
unsigned int flags;
retry:
if (nofs)
flags = memalloc_nofs_save();
err = aops->write_begin(NULL, mapping, 0, len-1, &folio, &fsdata);
if (nofs)
memalloc_nofs_restore(flags);
if (err)
goto fail;
memcpy(folio_address(folio), symname, len - 1);
err = aops->write_end(NULL, mapping, 0, len - 1, len - 1,
folio, fsdata);
if (err < 0)
goto fail;
if (err < len-1)
goto retry;
mark_inode_dirty(inode);
return 0;
fail:
return err;
}
EXPORT_SYMBOL(page_symlink);
const struct inode_operations page_symlink_inode_operations = {
.get_link = page_get_link,
};
EXPORT_SYMBOL(page_symlink_inode_operations);
// SPDX-License-Identifier: GPL-2.0
/*
* linux/ipc/util.c
* Copyright (C) 1992 Krishna Balasubramanian
*
* Sep 1997 - Call suser() last after "normal" permission checks so we
* get BSD style process accounting right.
* Occurs in several places in the IPC code.
* Chris Evans, <chris@ferret.lmh.ox.ac.uk>
* Nov 1999 - ipc helper functions, unified SMP locking
* Manfred Spraul <manfred@colorfullife.com>
* Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
* Mingming Cao <cmm@us.ibm.com>
* Mar 2006 - support for audit of ipc object properties
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
* Jun 2006 - namespaces ssupport
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*
* General sysv ipc locking scheme:
* rcu_read_lock()
* obtain the ipc object (kern_ipc_perm) by looking up the id in an idr
* tree.
* - perform initial checks (capabilities, auditing and permission,
* etc).
* - perform read-only operations, such as INFO command, that
* do not demand atomicity
* acquire the ipc lock (kern_ipc_perm.lock) through
* ipc_lock_object()
* - perform read-only operations that demand atomicity,
* such as STAT command.
* - perform data updates, such as SET, RMID commands and
* mechanism-specific operations (semop/semtimedop,
* msgsnd/msgrcv, shmat/shmdt).
* drop the ipc lock, through ipc_unlock_object().
* rcu_read_unlock()
*
* The ids->rwsem must be taken when:
* - creating, removing and iterating the existing entries in ipc
* identifier sets.
* - iterating through files under /proc/sysvipc/
*
* Note that sems have a special fast path that avoids kern_ipc_perm.lock -
* see sem_lock().
*/
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/init.h>
#include <linux/msg.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/capability.h>
#include <linux/highuid.h>
#include <linux/security.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/audit.h>
#include <linux/nsproxy.h>
#include <linux/rwsem.h>
#include <linux/memory.h>
#include <linux/ipc_namespace.h>
#include <linux/rhashtable.h>
#include <linux/log2.h>
#include <asm/unistd.h>
#include "util.h"
struct ipc_proc_iface {
const char *path;
const char *header;
int ids;
int (*show)(struct seq_file *, void *);
};
/**
* ipc_init - initialise ipc subsystem
*
* The various sysv ipc resources (semaphores, messages and shared
* memory) are initialised.
*
* A callback routine is registered into the memory hotplug notifier
* chain: since msgmni scales to lowmem this callback routine will be
* called upon successful memory add / remove to recompute msmgni.
*/
static int __init ipc_init(void)
{
proc_mkdir("sysvipc", NULL);
sem_init();
msg_init();
shm_init();
return 0;
}
device_initcall(ipc_init);
static const struct rhashtable_params ipc_kht_params = {
.head_offset = offsetof(struct kern_ipc_perm, khtnode),
.key_offset = offsetof(struct kern_ipc_perm, key),
.key_len = sizeof_field(struct kern_ipc_perm, key),
.automatic_shrinking = true,
};
/**
* ipc_init_ids - initialise ipc identifiers
* @ids: ipc identifier set
*
* Set up the sequence range to use for the ipc identifier range (limited
* below ipc_mni) then initialise the keys hashtable and ids idr.
*/
void ipc_init_ids(struct ipc_ids *ids)
{
ids->in_use = 0;
ids->seq = 0;
init_rwsem(&ids->rwsem);
rhashtable_init(&ids->key_ht, &ipc_kht_params);
idr_init(&ids->ipcs_idr);
ids->max_idx = -1;
ids->last_idx = -1;
#ifdef CONFIG_CHECKPOINT_RESTORE
ids->next_id = -1;
#endif
}
#ifdef CONFIG_PROC_FS
static const struct proc_ops sysvipc_proc_ops;
/**
* ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface.
* @path: Path in procfs
* @header: Banner to be printed at the beginning of the file.
* @ids: ipc id table to iterate.
* @show: show routine.
*/
void __init ipc_init_proc_interface(const char *path, const char *header,
int ids, int (*show)(struct seq_file *, void *))
{
struct proc_dir_entry *pde;
struct ipc_proc_iface *iface;
iface = kmalloc(sizeof(*iface), GFP_KERNEL);
if (!iface)
return;
iface->path = path;
iface->header = header;
iface->ids = ids;
iface->show = show;
pde = proc_create_data(path,
S_IRUGO, /* world readable */
NULL, /* parent dir */
&sysvipc_proc_ops,
iface);
if (!pde)
kfree(iface);
}
#endif
/**
* ipc_findkey - find a key in an ipc identifier set
* @ids: ipc identifier set
* @key: key to find
*
* Returns the locked pointer to the ipc structure if found or NULL
* otherwise. If key is found ipc points to the owning ipc structure
*
* Called with writer ipc_ids.rwsem held.
*/
static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
{
struct kern_ipc_perm *ipcp;
ipcp = rhashtable_lookup_fast(&ids->key_ht, &key,
ipc_kht_params);
if (!ipcp)
return NULL;
rcu_read_lock();
ipc_lock_object(ipcp);
return ipcp;
}
/*
* Insert new IPC object into idr tree, and set sequence number and id
* in the correct order.
* Especially:
* - the sequence number must be set before inserting the object into the idr,
* because the sequence number is accessed without a lock.
* - the id can/must be set after inserting the object into the idr.
* All accesses must be done after getting kern_ipc_perm.lock.
*
* The caller must own kern_ipc_perm.lock.of the new object.
* On error, the function returns a (negative) error code.
*
* To conserve sequence number space, especially with extended ipc_mni,
* the sequence number is incremented only when the returned ID is less than
* the last one.
*/
static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new)
{
int idx, next_id = -1;
#ifdef CONFIG_CHECKPOINT_RESTORE
next_id = ids->next_id;
ids->next_id = -1;
#endif
/*
* As soon as a new object is inserted into the idr,
* ipc_obtain_object_idr() or ipc_obtain_object_check() can find it,
* and the lockless preparations for ipc operations can start.
* This means especially: permission checks, audit calls, allocation
* of undo structures, ...
*
* Thus the object must be fully initialized, and if something fails,
* then the full tear-down sequence must be followed.
* (i.e.: set new->deleted, reduce refcount, call_rcu())
*/
if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */
int max_idx;
max_idx = max(ids->in_use*3/2, ipc_min_cycle);
max_idx = min(max_idx, ipc_mni);
/* allocate the idx, with a NULL struct kern_ipc_perm */
idx = idr_alloc_cyclic(&ids->ipcs_idr, NULL, 0, max_idx,
GFP_NOWAIT);
if (idx >= 0) {
/*
* idx got allocated successfully.
* Now calculate the sequence number and set the
* pointer for real.
*/
if (idx <= ids->last_idx) {
ids->seq++;
if (ids->seq >= ipcid_seq_max())
ids->seq = 0;
}
ids->last_idx = idx;
new->seq = ids->seq;
/* no need for smp_wmb(), this is done
* inside idr_replace, as part of
* rcu_assign_pointer
*/
idr_replace(&ids->ipcs_idr, new, idx);
}
} else {
new->seq = ipcid_to_seqx(next_id);
idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id),
0, GFP_NOWAIT);
}
if (idx >= 0)
new->id = (new->seq << ipcmni_seq_shift()) + idx;
return idx;
}
/**
* ipc_addid - add an ipc identifier
* @ids: ipc identifier set
* @new: new ipc permission set
* @limit: limit for the number of used ids
*
* Add an entry 'new' to the ipc ids idr. The permissions object is
* initialised and the first free entry is set up and the index assigned
* is returned. The 'new' entry is returned in a locked state on success.
*
* On failure the entry is not locked and a negative err-code is returned.
* The caller must use ipc_rcu_putref() to free the identifier.
*
* Called with writer ipc_ids.rwsem held.
*/
int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit)
{
kuid_t euid;
kgid_t egid;
int idx, err;
/* 1) Initialize the refcount so that ipc_rcu_putref works */
refcount_set(&new->refcount, 1);
if (limit > ipc_mni)
limit = ipc_mni;
if (ids->in_use >= limit)
return -ENOSPC;
idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock);
rcu_read_lock();
spin_lock(&new->lock);
current_euid_egid(&euid, &egid);
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
new->deleted = false;
idx = ipc_idr_alloc(ids, new);
idr_preload_end();
if (idx >= 0 && new->key != IPC_PRIVATE) {
err = rhashtable_insert_fast(&ids->key_ht, &new->khtnode,
ipc_kht_params);
if (err < 0) {
idr_remove(&ids->ipcs_idr, idx);
idx = err;
}
}
if (idx < 0) {
new->deleted = true;
spin_unlock(&new->lock);
rcu_read_unlock();
return idx;
}
ids->in_use++;
if (idx > ids->max_idx)
ids->max_idx = idx;
return idx;
}
/**
* ipcget_new - create a new ipc object
* @ns: ipc namespace
* @ids: ipc identifier set
* @ops: the actual creation routine to call
* @params: its parameters
*
* This routine is called by sys_msgget, sys_semget() and sys_shmget()
* when the key is IPC_PRIVATE.
*/
static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
const struct ipc_ops *ops, struct ipc_params *params)
{
int err;
down_write(&ids->rwsem);
err = ops->getnew(ns, params);
up_write(&ids->rwsem);
return err;
}
/**
* ipc_check_perms - check security and permissions for an ipc object
* @ns: ipc namespace
* @ipcp: ipc permission set
* @ops: the actual security routine to call
* @params: its parameters
*
* This routine is called by sys_msgget(), sys_semget() and sys_shmget()
* when the key is not IPC_PRIVATE and that key already exists in the
* ds IDR.
*
* On success, the ipc id is returned.
*
* It is called with ipc_ids.rwsem and ipcp->lock held.
*/
static int ipc_check_perms(struct ipc_namespace *ns,
struct kern_ipc_perm *ipcp,
const struct ipc_ops *ops,
struct ipc_params *params)
{
int err;
if (ipcperms(ns, ipcp, params->flg))
err = -EACCES;
else {
err = ops->associate(ipcp, params->flg);
if (!err)
err = ipcp->id;
}
return err;
}
/**
* ipcget_public - get an ipc object or create a new one
* @ns: ipc namespace
* @ids: ipc identifier set
* @ops: the actual creation routine to call
* @params: its parameters
*
* This routine is called by sys_msgget, sys_semget() and sys_shmget()
* when the key is not IPC_PRIVATE.
* It adds a new entry if the key is not found and does some permission
* / security checkings if the key is found.
*
* On success, the ipc id is returned.
*/
static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
const struct ipc_ops *ops, struct ipc_params *params)
{
struct kern_ipc_perm *ipcp;
int flg = params->flg;
int err;
/*
* Take the lock as a writer since we are potentially going to add
* a new entry + read locks are not "upgradable"
*/
down_write(&ids->rwsem);
ipcp = ipc_findkey(ids, params->key);
if (ipcp == NULL) {
/* key not used */
if (!(flg & IPC_CREAT))
err = -ENOENT;
else
err = ops->getnew(ns, params);
} else {
/* ipc object has been locked by ipc_findkey() */
if (flg & IPC_CREAT && flg & IPC_EXCL)
err = -EEXIST;
else {
err = 0;
if (ops->more_checks)
err = ops->more_checks(ipcp, params);
if (!err)
/*
* ipc_check_perms returns the IPC id on
* success
*/
err = ipc_check_perms(ns, ipcp, ops, params);
}
ipc_unlock(ipcp);
}
up_write(&ids->rwsem);
return err;
}
/**
* ipc_kht_remove - remove an ipc from the key hashtable
* @ids: ipc identifier set
* @ipcp: ipc perm structure containing the key to remove
*
* ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
* before this function is called, and remain locked on the exit.
*/
static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
if (ipcp->key != IPC_PRIVATE)
WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
ipc_kht_params));
}
/**
* ipc_search_maxidx - search for the highest assigned index
* @ids: ipc identifier set
* @limit: known upper limit for highest assigned index
*
* The function determines the highest assigned index in @ids. It is intended
* to be called when ids->max_idx needs to be updated.
* Updating ids->max_idx is necessary when the current highest index ipc
* object is deleted.
* If no ipc object is allocated, then -1 is returned.
*
* ipc_ids.rwsem needs to be held by the caller.
*/
static int ipc_search_maxidx(struct ipc_ids *ids, int limit)
{
int tmpidx;
int i;
int retval;
i = ilog2(limit+1);
retval = 0;
for (; i >= 0; i--) {
tmpidx = retval | (1<<i);
/*
* "0" is a possible index value, thus search using
* e.g. 15,7,3,1,0 instead of 16,8,4,2,1.
*/
tmpidx = tmpidx-1;
if (idr_get_next(&ids->ipcs_idr, &tmpidx))
retval |= (1<<i);
}
return retval - 1;
}
/**
* ipc_rmid - remove an ipc identifier
* @ids: ipc identifier set
* @ipcp: ipc perm structure containing the identifier to remove
*
* ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
* before this function is called, and remain locked on the exit.
*/
void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
int idx = ipcid_to_idx(ipcp->id);
WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, idx) != ipcp);
ipc_kht_remove(ids, ipcp);
ids->in_use--;
ipcp->deleted = true;
if (unlikely(idx == ids->max_idx)) {
idx = ids->max_idx-1;
if (idx >= 0)
idx = ipc_search_maxidx(ids, idx);
ids->max_idx = idx;
}
}
/**
* ipc_set_key_private - switch the key of an existing ipc to IPC_PRIVATE
* @ids: ipc identifier set
* @ipcp: ipc perm structure containing the key to modify
*
* ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
* before this function is called, and remain locked on the exit.
*/
void ipc_set_key_private(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
ipc_kht_remove(ids, ipcp);
ipcp->key = IPC_PRIVATE;
}
bool ipc_rcu_getref(struct kern_ipc_perm *ptr)
{
return refcount_inc_not_zero(&ptr->refcount);
}
void ipc_rcu_putref(struct kern_ipc_perm *ptr,
void (*func)(struct rcu_head *head))
{
if (!refcount_dec_and_test(&ptr->refcount))
return;
call_rcu(&ptr->rcu, func);
}
/**
* ipcperms - check ipc permissions
* @ns: ipc namespace
* @ipcp: ipc permission set
* @flag: desired permission set
*
* Check user, group, other permissions for access
* to ipc resources. return 0 if allowed
*
* @flag will most probably be 0 or ``S_...UGO`` from <linux/stat.h>
*/
int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
{
kuid_t euid = current_euid();
int requested_mode, granted_mode;
audit_ipc_obj(ipcp);
requested_mode = (flag >> 6) | (flag >> 3) | flag;
granted_mode = ipcp->mode;
if (uid_eq(euid, ipcp->cuid) ||
uid_eq(euid, ipcp->uid))
granted_mode >>= 6;
else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
if ((requested_mode & ~granted_mode & 0007) &&
!ns_capable(ns->user_ns, CAP_IPC_OWNER))
return -1;
return security_ipc_permission(ipcp, flag);
}
/*
* Functions to convert between the kern_ipc_perm structure and the
* old/new ipc_perm structures
*/
/**
* kernel_to_ipc64_perm - convert kernel ipc permissions to user
* @in: kernel permissions
* @out: new style ipc permissions
*
* Turn the kernel object @in into a set of permissions descriptions
* for returning to userspace (@out).
*/
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out)
{
out->key = in->key;
out->uid = from_kuid_munged(current_user_ns(), in->uid);
out->gid = from_kgid_munged(current_user_ns(), in->gid);
out->cuid = from_kuid_munged(current_user_ns(), in->cuid);
out->cgid = from_kgid_munged(current_user_ns(), in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
/**
* ipc64_perm_to_ipc_perm - convert new ipc permissions to old
* @in: new style ipc permissions
* @out: old style ipc permissions
*
* Turn the new style permissions object @in into a compatibility
* object and store it into the @out pointer.
*/
void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out)
{
out->key = in->key;
SET_UID(out->uid, in->uid);
SET_GID(out->gid, in->gid);
SET_UID(out->cuid, in->cuid);
SET_GID(out->cgid, in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
/**
* ipc_obtain_object_idr - Look for an id in the ipc ids idr and
* return associated ipc object.
* @ids: ipc identifier set
* @id: ipc id to look for
*
* Call inside the RCU critical section.
* The ipc object is *not* locked on exit.
*/
struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
int idx = ipcid_to_idx(id);
out = idr_find(&ids->ipcs_idr, idx);
if (!out)
return ERR_PTR(-EINVAL);
return out;
}
/**
* ipc_obtain_object_check - Similar to ipc_obtain_object_idr() but
* also checks the ipc object sequence number.
* @ids: ipc identifier set
* @id: ipc id to look for
*
* Call inside the RCU critical section.
* The ipc object is *not* locked on exit.
*/
struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id);
if (IS_ERR(out))
goto out;
if (ipc_checkid(out, id))
return ERR_PTR(-EINVAL);
out:
return out;
}
/**
* ipcget - Common sys_*get() code
* @ns: namespace
* @ids: ipc identifier set
* @ops: operations to be called on ipc object creation, permission checks
* and further checks
* @params: the parameters needed by the previous operations.
*
* Common routine called by sys_msgget(), sys_semget() and sys_shmget().
*/
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
const struct ipc_ops *ops, struct ipc_params *params)
{
if (params->key == IPC_PRIVATE)
return ipcget_new(ns, ids, ops, params);
else
return ipcget_public(ns, ids, ops, params);
}
/**
* ipc_update_perm - update the permissions of an ipc object
* @in: the permission given as input.
* @out: the permission of the ipc to set.
*/
int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
{
kuid_t uid = make_kuid(current_user_ns(), in->uid);
kgid_t gid = make_kgid(current_user_ns(), in->gid);
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
out->uid = uid;
out->gid = gid;
out->mode = (out->mode & ~S_IRWXUGO)
| (in->mode & S_IRWXUGO);
return 0;
}
/**
* ipcctl_obtain_check - retrieve an ipc object and check permissions
* @ns: ipc namespace
* @ids: the table of ids where to look for the ipc
* @id: the id of the ipc to retrieve
* @cmd: the cmd to check
* @perm: the permission to set
* @extra_perm: one extra permission parameter used by msq
*
* This function does some common audit and permissions check for some IPC_XXX
* cmd and is called from semctl_down, shmctl_down and msgctl_down.
*
* It:
* - retrieves the ipc object with the given id in the given table.
* - performs some audit and permission check, depending on the given cmd
* - returns a pointer to the ipc object or otherwise, the corresponding
* error.
*
* Call holding the both the rwsem and the rcu read lock.
*/
struct kern_ipc_perm *ipcctl_obtain_check(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
kuid_t euid;
int err = -EPERM;
struct kern_ipc_perm *ipcp;
ipcp = ipc_obtain_object_check(ids, id);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto err;
}
audit_ipc_obj(ipcp);
if (cmd == IPC_SET)
audit_ipc_set_perm(extra_perm, perm->uid,
perm->gid, perm->mode);
euid = current_euid();
if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) ||
ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return ipcp; /* successful lookup */
err:
return ERR_PTR(err);
}
#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
/**
* ipc_parse_version - ipc call version
* @cmd: pointer to command
*
* Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
* The @cmd value is turned from an encoding command and version into
* just the command code.
*/
int ipc_parse_version(int *cmd)
{
if (*cmd & IPC_64) {
*cmd ^= IPC_64;
return IPC_64;
} else {
return IPC_OLD;
}
}
#endif /* CONFIG_ARCH_WANT_IPC_PARSE_VERSION */
#ifdef CONFIG_PROC_FS
struct ipc_proc_iter {
struct ipc_namespace *ns;
struct pid_namespace *pid_ns;
struct ipc_proc_iface *iface;
};
struct pid_namespace *ipc_seq_pid_ns(struct seq_file *s)
{
struct ipc_proc_iter *iter = s->private;
return iter->pid_ns;
}
/**
* sysvipc_find_ipc - Find and lock the ipc structure based on seq pos
* @ids: ipc identifier set
* @pos: expected position
*
* The function finds an ipc structure, based on the sequence file
* position @pos. If there is no ipc structure at position @pos, then
* the successor is selected.
* If a structure is found, then it is locked (both rcu_read_lock() and
* ipc_lock_object()) and @pos is set to the position needed to locate
* the found ipc structure.
* If nothing is found (i.e. EOF), @pos is not modified.
*
* The function returns the found ipc structure, or NULL at EOF.
*/
static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t *pos)
{
int tmpidx;
struct kern_ipc_perm *ipc;
/* convert from position to idr index -> "-1" */
tmpidx = *pos - 1;
ipc = idr_get_next(&ids->ipcs_idr, &tmpidx);
if (ipc != NULL) {
rcu_read_lock();
ipc_lock_object(ipc);
/* convert from idr index to position -> "+1" */
*pos = tmpidx + 1;
}
return ipc;
}
static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct kern_ipc_perm *ipc = it;
/* If we had an ipc id locked before, unlock it */
if (ipc && ipc != SEQ_START_TOKEN)
ipc_unlock(ipc);
/* Next -> search for *pos+1 */
(*pos)++;
return sysvipc_find_ipc(&iter->ns->ids[iface->ids], pos);
}
/*
* File positions: pos 0 -> header, pos n -> ipc idx = n - 1.
* SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
*/
static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
ids = &iter->ns->ids[iface->ids];
/*
* Take the lock - this will be released by the corresponding
* call to stop().
*/
down_read(&ids->rwsem);
/* pos < 0 is invalid */
if (*pos < 0)
return NULL;
/* pos == 0 means header */
if (*pos == 0)
return SEQ_START_TOKEN;
/* Otherwise return the correct ipc structure */
return sysvipc_find_ipc(ids, pos);
}
static void sysvipc_proc_stop(struct seq_file *s, void *it)
{
struct kern_ipc_perm *ipc = it;
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
/* If we had a locked structure, release it */
if (ipc && ipc != SEQ_START_TOKEN)
ipc_unlock(ipc);
ids = &iter->ns->ids[iface->ids];
/* Release the lock we took in start() */
up_read(&ids->rwsem);
}
static int sysvipc_proc_show(struct seq_file *s, void *it)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
if (it == SEQ_START_TOKEN) {
seq_puts(s, iface->header);
return 0;
}
return iface->show(s, it);
}
static const struct seq_operations sysvipc_proc_seqops = {
.start = sysvipc_proc_start,
.stop = sysvipc_proc_stop,
.next = sysvipc_proc_next,
.show = sysvipc_proc_show,
};
static int sysvipc_proc_open(struct inode *inode, struct file *file)
{
struct ipc_proc_iter *iter;
iter = __seq_open_private(file, &sysvipc_proc_seqops, sizeof(*iter));
if (!iter)
return -ENOMEM;
iter->iface = pde_data(inode);
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
iter->pid_ns = get_pid_ns(task_active_pid_ns(current));
return 0;
}
static int sysvipc_proc_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct ipc_proc_iter *iter = seq->private;
put_ipc_ns(iter->ns);
put_pid_ns(iter->pid_ns);
return seq_release_private(inode, file);
}
static const struct proc_ops sysvipc_proc_ops = {
.proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = sysvipc_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = sysvipc_proc_release,
};
#endif /* CONFIG_PROC_FS */
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/file.c
*
* Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
*
* Manage the dynamic fd arrays in the process files_struct.
*/
#include <linux/syscalls.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/close_range.h>
#include <linux/file_ref.h>
#include <net/sock.h>
#include <linux/init_task.h>
#include "internal.h"
static noinline bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
{
/*
* If the reference count was already in the dead zone, then this
* put() operation is imbalanced. Warn, put the reference count back to
* DEAD and tell the caller to not deconstruct the object.
*/
if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
return false;
}
/*
* This is a put() operation on a saturated refcount. Restore the
* mean saturation value and tell the caller to not deconstruct the
* object.
*/
if (cnt > FILE_REF_MAXREF)
atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
return false;
}
/**
* __file_ref_put - Slowpath of file_ref_put()
* @ref: Pointer to the reference count
* @cnt: Current reference count
*
* Invoked when the reference count is outside of the valid zone.
*
* Return:
* True if this was the last reference with no future references
* possible. This signals the caller that it can safely schedule the
* object, which is protected by the reference counter, for
* deconstruction.
*
* False if there are still active references or the put() raced
* with a concurrent get()/put() pair. Caller is not allowed to
* deconstruct the protected object.
*/
bool __file_ref_put(file_ref_t *ref, unsigned long cnt)
{
/* Did this drop the last reference? */
if (likely(cnt == FILE_REF_NOREF)) {
/*
* Carefully try to set the reference count to FILE_REF_DEAD.
*
* This can fail if a concurrent get() operation has
* elevated it again or the corresponding put() even marked
* it dead already. Both are valid situations and do not
* require a retry. If this fails the caller is not
* allowed to deconstruct the object.
*/
if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD))
return false;
/*
* The caller can safely schedule the object for
* deconstruction. Provide acquire ordering.
*/
smp_acquire__after_ctrl_dep();
return true;
}
return __file_ref_put_badval(ref, cnt);
}
EXPORT_SYMBOL_GPL(__file_ref_put);
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
/* our min() is unusable in constant expressions ;-/ */
#define __const_min(x, y) ((x) < (y) ? (x) : (y))
unsigned int sysctl_nr_open_max =
__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
static void __free_fdtable(struct fdtable *fdt)
{
kvfree(fdt->fd);
kvfree(fdt->open_fds);
kfree(fdt);
}
static void free_fdtable_rcu(struct rcu_head *rcu)
{
__free_fdtable(container_of(rcu, struct fdtable, rcu));
}
#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
/*
* Copy 'count' fd bits from the old table to the new table and clear the extra
* space if any. This does not copy the file pointers. Called with the files
* spinlock held for write.
*/
static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
unsigned int copy_words)
{
unsigned int nwords = fdt_words(nfdt);
bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
copy_words, nwords);
}
/*
* Copy all file descriptors from the old table to the new, expanded table and
* clear the extra space. Called with the files spinlock held for write.
*/
static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
{
size_t cpy, set;
BUG_ON(nfdt->max_fds < ofdt->max_fds);
cpy = ofdt->max_fds * sizeof(struct file *);
set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
memcpy(nfdt->fd, ofdt->fd, cpy);
memset((char *)nfdt->fd + cpy, 0, set);
copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
}
/*
* Note how the fdtable bitmap allocations very much have to be a multiple of
* BITS_PER_LONG. This is not only because we walk those things in chunks of
* 'unsigned long' in some places, but simply because that is how the Linux
* kernel bitmaps are defined to work: they are not "bits in an array of bytes",
* they are very much "bits in an array of unsigned long".
*/
static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
{
struct fdtable *fdt;
unsigned int nr;
void *data;
/*
* Figure out how many fds we actually want to support in this fdtable.
* Allocation steps are keyed to the size of the fdarray, since it
* grows far faster than any of the other dynamic data. We try to fit
* the fdarray into comfortable page-tuned chunks: starting at 1024B
* and growing in powers of two from there on. Since we called only
* with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
* already gives BITS_PER_LONG slots), the above boils down to
* 1. use the smallest power of two large enough to give us that many
* slots.
* 2. on 32bit skip 64 and 128 - the minimal capacity we want there is
* 256 slots (i.e. 1Kb fd array).
* 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there
* and we are never going to be asked for 64 or less.
*/
if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256)
nr = 256;
else
nr = roundup_pow_of_two(slots_wanted);
/*
* Note that this can drive nr *below* what we had passed if sysctl_nr_open
* had been set lower between the check in expand_files() and here.
*
* We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
* bitmaps handling below becomes unpleasant, to put it mildly...
*/
if (unlikely(nr > sysctl_nr_open)) {
nr = round_down(sysctl_nr_open, BITS_PER_LONG);
if (nr < slots_wanted)
return ERR_PTR(-EMFILE);
}
/*
* Check if the allocation size would exceed INT_MAX. kvmalloc_array()
* and kvmalloc() will warn if the allocation size is greater than
* INT_MAX, as filp_cachep objects are not __GFP_NOWARN.
*
* This can happen when sysctl_nr_open is set to a very high value and
* a process tries to use a file descriptor near that limit. For example,
* if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what
* systemd typically sets it to - then trying to use a file descriptor
* close to that value will require allocating a file descriptor table
* that exceeds 8GB in size.
*/
if (unlikely(nr > INT_MAX / sizeof(struct file *)))
return ERR_PTR(-EMFILE);
fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); if (!fdt) goto out;
fdt->max_fds = nr;
data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
if (!data) goto out_fdt;
fdt->fd = data;
data = kvmalloc(max_t(size_t,
2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
GFP_KERNEL_ACCOUNT);
if (!data)
goto out_arr;
fdt->open_fds = data;
data += nr / BITS_PER_BYTE;
fdt->close_on_exec = data;
data += nr / BITS_PER_BYTE;
fdt->full_fds_bits = data;
return fdt;
out_arr:
kvfree(fdt->fd);
out_fdt:
kfree(fdt);
out:
return ERR_PTR(-ENOMEM);
}
/*
* Expand the file descriptor table.
* This function will allocate a new fdtable and both fd array and fdset, of
* the given size.
* Return <0 error code on error; 0 on successful completion.
* The files->file_lock should be held on entry, and will be held on exit.
*/
static int expand_fdtable(struct files_struct *files, unsigned int nr)
__releases(files->file_lock)
__acquires(files->file_lock)
{
struct fdtable *new_fdt, *cur_fdt;
spin_unlock(&files->file_lock);
new_fdt = alloc_fdtable(nr + 1);
/* make sure all fd_install() have seen resize_in_progress
* or have finished their rcu_read_lock_sched() section.
*/
if (atomic_read(&files->count) > 1)
synchronize_rcu();
spin_lock(&files->file_lock);
if (IS_ERR(new_fdt))
return PTR_ERR(new_fdt);
cur_fdt = files_fdtable(files);
BUG_ON(nr < cur_fdt->max_fds);
copy_fdtable(new_fdt, cur_fdt);
rcu_assign_pointer(files->fdt, new_fdt);
if (cur_fdt != &files->fdtab)
call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
/* coupled with smp_rmb() in fd_install() */
smp_wmb();
return 0;
}
/*
* Expand files.
* This function will expand the file structures, if the requested size exceeds
* the current capacity and there is room for expansion.
* Return <0 error code on error; 0 on success.
* The files->file_lock should be held on entry, and will be held on exit.
*/
static int expand_files(struct files_struct *files, unsigned int nr)
__releases(files->file_lock)
__acquires(files->file_lock)
{
struct fdtable *fdt;
int error;
repeat:
fdt = files_fdtable(files);
/* Do we need to expand? */
if (nr < fdt->max_fds)
return 0;
if (unlikely(files->resize_in_progress)) {
spin_unlock(&files->file_lock);
wait_event(files->resize_wait, !files->resize_in_progress);
spin_lock(&files->file_lock);
goto repeat;
}
/* Can we expand? */
if (unlikely(nr >= sysctl_nr_open))
return -EMFILE;
/* All good, so we try */
files->resize_in_progress = true;
error = expand_fdtable(files, nr);
files->resize_in_progress = false;
wake_up_all(&files->resize_wait);
return error;
}
static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt,
bool set)
{
if (set) {
__set_bit(fd, fdt->close_on_exec);
} else {
if (test_bit(fd, fdt->close_on_exec)) __clear_bit(fd, fdt->close_on_exec);
}
}
static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set)
{
__set_bit(fd, fdt->open_fds);
__set_close_on_exec(fd, fdt, set);
fd /= BITS_PER_LONG;
if (!~fdt->open_fds[fd]) __set_bit(fd, fdt->full_fds_bits);
}
static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
{
__clear_bit(fd, fdt->open_fds);
fd /= BITS_PER_LONG;
if (test_bit(fd, fdt->full_fds_bits))
__clear_bit(fd, fdt->full_fds_bits);}
static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
{
return test_bit(fd, fdt->open_fds);
}
/*
* Note that a sane fdtable size always has to be a multiple of
* BITS_PER_LONG, since we have bitmaps that are sized by this.
*
* punch_hole is optional - when close_range() is asked to unshare
* and close, we don't need to copy descriptors in that range, so
* a smaller cloned descriptor table might suffice if the last
* currently opened descriptor falls into that range.
*/
static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
{
unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
if (last == fdt->max_fds)
return NR_OPEN_DEFAULT;
if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) { last = find_last_bit(fdt->open_fds, punch_hole->from); if (last == punch_hole->from)
return NR_OPEN_DEFAULT;
}
return ALIGN(last + 1, BITS_PER_LONG);}
/*
* Allocate a new descriptor table and copy contents from the passed in
* instance. Returns a pointer to cloned table on success, ERR_PTR()
* on failure. For 'punch_hole' see sane_fdtable_size().
*/
struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
{
struct files_struct *newf;
struct file **old_fds, **new_fds;
unsigned int open_files, i;
struct fdtable *old_fdt, *new_fdt;
newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
if (!newf)
return ERR_PTR(-ENOMEM);
atomic_set(&newf->count, 1);
spin_lock_init(&newf->file_lock);
newf->resize_in_progress = false;
init_waitqueue_head(&newf->resize_wait);
newf->next_fd = 0;
new_fdt = &newf->fdtab;
new_fdt->max_fds = NR_OPEN_DEFAULT;
new_fdt->close_on_exec = newf->close_on_exec_init;
new_fdt->open_fds = newf->open_fds_init;
new_fdt->full_fds_bits = newf->full_fds_bits_init;
new_fdt->fd = &newf->fd_array[0];
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf); open_files = sane_fdtable_size(old_fdt, punch_hole);
/*
* Check whether we need to allocate a larger fd array and fd set.
*/
while (unlikely(open_files > new_fdt->max_fds)) {
spin_unlock(&oldf->file_lock);
if (new_fdt != &newf->fdtab)
__free_fdtable(new_fdt);
new_fdt = alloc_fdtable(open_files); if (IS_ERR(new_fdt)) {
kmem_cache_free(files_cachep, newf);
return ERR_CAST(new_fdt);
}
/*
* Reacquire the oldf lock and a pointer to its fd table
* who knows it may have a new bigger fd table. We need
* the latest pointer.
*/
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf); open_files = sane_fdtable_size(old_fdt, punch_hole);
}
copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
/*
* We may be racing against fd allocation from other threads using this
* files_struct, despite holding ->file_lock.
*
* alloc_fd() might have already claimed a slot, while fd_install()
* did not populate it yet. Note the latter operates locklessly, so
* the file can show up as we are walking the array below.
*
* At the same time we know no files will disappear as all other
* operations take the lock.
*
* Instead of trying to placate userspace racing with itself, we
* ref the file if we see it and mark the fd slot as unused otherwise.
*/
for (i = open_files; i != 0; i--) { struct file *f = rcu_dereference_raw(*old_fds++); if (f) { get_file(f);
} else {
__clear_open_fd(open_files - i, new_fdt);
}
rcu_assign_pointer(*new_fds++, f);
}
spin_unlock(&oldf->file_lock);
/* clear the remainder */
memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
rcu_assign_pointer(newf->fdt, new_fdt); return newf;}
static struct fdtable *close_files(struct files_struct * files)
{
/*
* It is safe to dereference the fd table without RCU or
* ->file_lock because this is the last reference to the
* files structure.
*/
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
unsigned int i, j = 0;
for (;;) {
unsigned long set;
i = j * BITS_PER_LONG; if (i >= fdt->max_fds)
break;
set = fdt->open_fds[j++];
while (set) {
if (set & 1) { struct file *file = fdt->fd[i]; if (file) {
filp_close(file, files);
cond_resched();
}
}
i++;
set >>= 1;
}
}
return fdt;
}
void put_files_struct(struct files_struct *files)
{
if (atomic_dec_and_test(&files->count)) {
struct fdtable *fdt = close_files(files);
/* free the arrays if they are not embedded */
if (fdt != &files->fdtab)
__free_fdtable(fdt);
kmem_cache_free(files_cachep, files);
}
}
void exit_files(struct task_struct *tsk)
{
struct files_struct * files = tsk->files;
if (files) {
task_lock(tsk);
tsk->files = NULL;
task_unlock(tsk);
put_files_struct(files);
}
}
struct files_struct init_files = {
.count = ATOMIC_INIT(1),
.fdt = &init_files.fdtab,
.fdtab = {
.max_fds = NR_OPEN_DEFAULT,
.fd = &init_files.fd_array[0],
.close_on_exec = init_files.close_on_exec_init,
.open_fds = init_files.open_fds_init,
.full_fds_bits = init_files.full_fds_bits_init,
},
.file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
.resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
};
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
{
unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
unsigned int maxbit = maxfd / BITS_PER_LONG;
unsigned int bitbit = start / BITS_PER_LONG;
unsigned int bit;
/*
* Try to avoid looking at the second level bitmap
*/
bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG,
start & (BITS_PER_LONG - 1));
if (bit < BITS_PER_LONG)
return bit + bitbit * BITS_PER_LONG;
bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
if (bitbit >= maxfd)
return maxfd;
if (bitbit > start)
start = bitbit;
return find_next_zero_bit(fdt->open_fds, maxfd, start);
}
/*
* allocate a file descriptor, mark it busy.
*/
static int alloc_fd(unsigned start, unsigned end, unsigned flags)
{
struct files_struct *files = current->files;
unsigned int fd;
int error;
struct fdtable *fdt;
spin_lock(&files->file_lock);
repeat:
fdt = files_fdtable(files);
fd = start;
if (fd < files->next_fd)
fd = files->next_fd;
if (likely(fd < fdt->max_fds)) fd = find_next_fd(fdt, fd);
/*
* N.B. For clone tasks sharing a files structure, this test
* will limit the total number of files that can be opened.
*/
error = -EMFILE;
if (unlikely(fd >= end))
goto out;
if (unlikely(fd >= fdt->max_fds)) {
error = expand_files(files, fd);
if (error < 0)
goto out; goto repeat;
}
if (start <= files->next_fd)
files->next_fd = fd + 1;
__set_open_fd(fd, fdt, flags & O_CLOEXEC);
error = fd;
VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
out:
spin_unlock(&files->file_lock);
return error;
}
int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
{
return alloc_fd(0, nofile, flags);
}
int get_unused_fd_flags(unsigned flags)
{
return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
}
EXPORT_SYMBOL(get_unused_fd_flags);
static void __put_unused_fd(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = files_fdtable(files);
__clear_open_fd(fd, fdt);
if (fd < files->next_fd)
files->next_fd = fd;
}
void put_unused_fd(unsigned int fd)
{
struct files_struct *files = current->files;
spin_lock(&files->file_lock);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
}
EXPORT_SYMBOL(put_unused_fd);
/**
* fd_install - install a file pointer in the fd array
* @fd: file descriptor to install the file in
* @file: the file to install
*
* This consumes the "file" refcount, so callers should treat it
* as if they had called fput(file).
*/
void fd_install(unsigned int fd, struct file *file)
{
struct files_struct *files = current->files;
struct fdtable *fdt;
if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING))) return; rcu_read_lock_sched(); if (unlikely(files->resize_in_progress)) {
rcu_read_unlock_sched();
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
spin_unlock(&files->file_lock);
return;
}
/* coupled with smp_wmb() in expand_fdtable() */
smp_rmb();
fdt = rcu_dereference_sched(files->fdt);
VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
rcu_assign_pointer(fdt->fd[fd], file); rcu_read_unlock_sched();}
EXPORT_SYMBOL(fd_install);
/**
* file_close_fd_locked - return file associated with fd
* @files: file struct to retrieve file from
* @fd: file descriptor to retrieve file for
*
* Doesn't take a separate reference count.
*
* Context: files_lock must be held.
*
* Returns: The file associated with @fd (NULL if @fd is not open)
*/
struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
{
struct fdtable *fdt = files_fdtable(files);
struct file *file;
lockdep_assert_held(&files->file_lock);
if (fd >= fdt->max_fds)
return NULL;
fd = array_index_nospec(fd, fdt->max_fds);
file = rcu_dereference_raw(fdt->fd[fd]);
if (file) {
rcu_assign_pointer(fdt->fd[fd], NULL);
__put_unused_fd(files, fd);
}
return file;
}
int close_fd(unsigned fd)
{
struct files_struct *files = current->files;
struct file *file;
spin_lock(&files->file_lock);
file = file_close_fd_locked(files, fd);
spin_unlock(&files->file_lock);
if (!file)
return -EBADF;
return filp_close(file, files);
}
EXPORT_SYMBOL(close_fd);
/**
* last_fd - return last valid index into fd table
* @fdt: File descriptor table.
*
* Context: Either rcu read lock or files_lock must be held.
*
* Returns: Last valid index into fdtable.
*/
static inline unsigned last_fd(struct fdtable *fdt)
{
return fdt->max_fds - 1;
}
static inline void __range_cloexec(struct files_struct *cur_fds,
unsigned int fd, unsigned int max_fd)
{
struct fdtable *fdt;
/* make sure we're using the correct maximum value */
spin_lock(&cur_fds->file_lock);
fdt = files_fdtable(cur_fds);
max_fd = min(last_fd(fdt), max_fd);
if (fd <= max_fd)
bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
spin_unlock(&cur_fds->file_lock);
}
static inline void __range_close(struct files_struct *files, unsigned int fd,
unsigned int max_fd)
{
struct file *file;
unsigned n;
spin_lock(&files->file_lock);
n = last_fd(files_fdtable(files));
max_fd = min(max_fd, n);
for (; fd <= max_fd; fd++) {
file = file_close_fd_locked(files, fd);
if (file) {
spin_unlock(&files->file_lock);
filp_close(file, files);
cond_resched();
spin_lock(&files->file_lock);
} else if (need_resched()) {
spin_unlock(&files->file_lock);
cond_resched();
spin_lock(&files->file_lock);
}
}
spin_unlock(&files->file_lock);
}
/**
* sys_close_range() - Close all file descriptors in a given range.
*
* @fd: starting file descriptor to close
* @max_fd: last file descriptor to close
* @flags: CLOSE_RANGE flags.
*
* This closes a range of file descriptors. All file descriptors
* from @fd up to and including @max_fd are closed.
* Currently, errors to close a given file descriptor are ignored.
*/
SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd,
unsigned int, flags)
{
struct task_struct *me = current;
struct files_struct *cur_fds = me->files, *fds = NULL;
if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
return -EINVAL;
if (fd > max_fd)
return -EINVAL;
if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
struct fd_range range = {fd, max_fd}, *punch_hole = ⦥
/*
* If the caller requested all fds to be made cloexec we always
* copy all of the file descriptors since they still want to
* use them.
*/
if (flags & CLOSE_RANGE_CLOEXEC)
punch_hole = NULL;
fds = dup_fd(cur_fds, punch_hole);
if (IS_ERR(fds))
return PTR_ERR(fds);
/*
* We used to share our file descriptor table, and have now
* created a private one, make sure we're using it below.
*/
swap(cur_fds, fds);
}
if (flags & CLOSE_RANGE_CLOEXEC)
__range_cloexec(cur_fds, fd, max_fd);
else
__range_close(cur_fds, fd, max_fd);
if (fds) {
/*
* We're done closing the files we were supposed to. Time to install
* the new file descriptor table and drop the old one.
*/
task_lock(me);
me->files = cur_fds;
task_unlock(me);
put_files_struct(fds);
}
return 0;
}
/**
* file_close_fd - return file associated with fd
* @fd: file descriptor to retrieve file for
*
* Doesn't take a separate reference count.
*
* Returns: The file associated with @fd (NULL if @fd is not open)
*/
struct file *file_close_fd(unsigned int fd)
{
struct files_struct *files = current->files;
struct file *file;
spin_lock(&files->file_lock);
file = file_close_fd_locked(files, fd);
spin_unlock(&files->file_lock);
return file;
}
void do_close_on_exec(struct files_struct *files)
{
unsigned i;
struct fdtable *fdt;
/* exec unshares first */
spin_lock(&files->file_lock);
for (i = 0; ; i++) {
unsigned long set;
unsigned fd = i * BITS_PER_LONG;
fdt = files_fdtable(files);
if (fd >= fdt->max_fds)
break;
set = fdt->close_on_exec[i];
if (!set)
continue;
fdt->close_on_exec[i] = 0;
for ( ; set ; fd++, set >>= 1) {
struct file *file;
if (!(set & 1))
continue;
file = fdt->fd[fd];
if (!file)
continue;
rcu_assign_pointer(fdt->fd[fd], NULL);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
filp_close(file, files);
cond_resched();
spin_lock(&files->file_lock);
}
}
spin_unlock(&files->file_lock);
}
static struct file *__get_file_rcu(struct file __rcu **f)
{
struct file __rcu *file;
struct file __rcu *file_reloaded;
struct file __rcu *file_reloaded_cmp;
file = rcu_dereference_raw(*f); if (!file) return NULL;
if (unlikely(!file_ref_get(&file->f_ref)))
return ERR_PTR(-EAGAIN);
file_reloaded = rcu_dereference_raw(*f);
/*
* Ensure that all accesses have a dependency on the load from
* rcu_dereference_raw() above so we get correct ordering
* between reuse/allocation and the pointer check below.
*/
file_reloaded_cmp = file_reloaded;
OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
/*
* file_ref_get() above provided a full memory barrier when we
* acquired a reference.
*
* This is paired with the write barrier from assigning to the
* __rcu protected file pointer so that if that pointer still
* matches the current file, we know we have successfully
* acquired a reference to the right file.
*
* If the pointers don't match the file has been reallocated by
* SLAB_TYPESAFE_BY_RCU.
*/
if (file == file_reloaded_cmp)
return file_reloaded;
fput(file);
return ERR_PTR(-EAGAIN);}
/**
* get_file_rcu - try go get a reference to a file under rcu
* @f: the file to get a reference on
*
* This function tries to get a reference on @f carefully verifying that
* @f hasn't been reused.
*
* This function should rarely have to be used and only by users who
* understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
*
* Return: Returns @f with the reference count increased or NULL.
*/
struct file *get_file_rcu(struct file __rcu **f)
{
for (;;) {
struct file __rcu *file;
file = __get_file_rcu(f); if (!IS_ERR(file))
return file;
}
}
EXPORT_SYMBOL_GPL(get_file_rcu);
/**
* get_file_active - try go get a reference to a file
* @f: the file to get a reference on
*
* In contast to get_file_rcu() the pointer itself isn't part of the
* reference counting.
*
* This function should rarely have to be used and only by users who
* understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
*
* Return: Returns @f with the reference count increased or NULL.
*/
struct file *get_file_active(struct file **f)
{
struct file __rcu *file;
rcu_read_lock();
file = __get_file_rcu(f);
rcu_read_unlock();
if (IS_ERR(file))
file = NULL;
return file;
}
EXPORT_SYMBOL_GPL(get_file_active);
static inline struct file *__fget_files_rcu(struct files_struct *files,
unsigned int fd, fmode_t mask)
{
for (;;) {
struct file *file;
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
struct file __rcu **fdentry;
unsigned long nospec_mask;
/* Mask is a 0 for invalid fd's, ~0 for valid ones */
nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
/*
* fdentry points to the 'fd' offset, or fdt->fd[0].
* Loading from fdt->fd[0] is always safe, because the
* array always exists.
*/
fdentry = fdt->fd + (fd & nospec_mask);
/* Do the load, then mask any invalid result */
file = rcu_dereference_raw(*fdentry);
file = (void *)(nospec_mask & (unsigned long)file);
if (unlikely(!file))
return NULL;
/*
* Ok, we have a file pointer that was valid at
* some point, but it might have become stale since.
*
* We need to confirm it by incrementing the refcount
* and then check the lookup again.
*
* file_ref_get() gives us a full memory barrier. We
* only really need an 'acquire' one to protect the
* loads below, but we don't have that.
*/
if (unlikely(!file_ref_get(&file->f_ref)))
continue;
/*
* Such a race can take two forms:
*
* (a) the file ref already went down to zero and the
* file hasn't been reused yet or the file count
* isn't zero but the file has already been reused.
*
* (b) the file table entry has changed under us.
* Note that we don't need to re-check the 'fdt->fd'
* pointer having changed, because it always goes
* hand-in-hand with 'fdt'.
*
* If so, we need to put our ref and try again.
*/
if (unlikely(file != rcu_dereference_raw(*fdentry)) || unlikely(rcu_dereference_raw(files->fdt) != fdt)) { fput(file); continue;
}
/*
* This isn't the file we're looking for or we're not
* allowed to get a reference to it.
*/
if (unlikely(file->f_mode & mask)) {
fput(file);
return NULL;
}
/*
* Ok, we have a ref to the file, and checked that it
* still exists.
*/
return file;
}
}
static struct file *__fget_files(struct files_struct *files, unsigned int fd,
fmode_t mask)
{
struct file *file;
rcu_read_lock(); file = __fget_files_rcu(files, fd, mask); rcu_read_unlock();
return file;
}
static inline struct file *__fget(unsigned int fd, fmode_t mask)
{
return __fget_files(current->files, fd, mask);
}
struct file *fget(unsigned int fd)
{
return __fget(fd, FMODE_PATH);
}
EXPORT_SYMBOL(fget);
struct file *fget_raw(unsigned int fd)
{
return __fget(fd, 0);
}
EXPORT_SYMBOL(fget_raw);
struct file *fget_task(struct task_struct *task, unsigned int fd)
{
struct file *file = NULL;
task_lock(task);
if (task->files)
file = __fget_files(task->files, fd, 0);
task_unlock(task);
return file;
}
struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd)
{
/* Must be called with rcu_read_lock held */
struct files_struct *files;
unsigned int fd = *ret_fd;
struct file *file = NULL;
task_lock(task);
files = task->files;
if (files) {
rcu_read_lock();
for (; fd < files_fdtable(files)->max_fds; fd++) {
file = __fget_files_rcu(files, fd, 0);
if (file)
break;
}
rcu_read_unlock();
}
task_unlock(task);
*ret_fd = fd;
return file;
}
EXPORT_SYMBOL(fget_task_next);
/*
* Lightweight file lookup - no refcnt increment if fd table isn't shared.
*
* You can use this instead of fget if you satisfy all of the following
* conditions:
* 1) You must call fput_light before exiting the syscall and returning control
* to userspace (i.e. you cannot remember the returned struct file * after
* returning to userspace).
* 2) You must not call filp_close on the returned struct file * in between
* calls to fget_light and fput_light.
* 3) You must not clone the current task in between the calls to fget_light
* and fput_light.
*
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
*
* (As an exception to rule 2, you can call filp_close between fget_light and
* fput_light provided that you capture a real refcount with get_file before
* the call to filp_close, and ensure that this real refcount is fput *after*
* the fput_light call.)
*
* See also the documentation in rust/kernel/file.rs.
*/
static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
/*
* If another thread is concurrently calling close_fd() followed
* by put_files_struct(), we must not observe the old table
* entry combined with the new refcount - otherwise we could
* return a file that is concurrently being freed.
*
* atomic_read_acquire() pairs with atomic_dec_and_test() in
* put_files_struct().
*/
if (likely(atomic_read_acquire(&files->count) == 1)) { file = files_lookup_fd_raw(files, fd); if (!file || unlikely(file->f_mode & mask)) return EMPTY_FD; return BORROWED_FD(file);
} else {
file = __fget_files(files, fd, mask);
if (!file)
return EMPTY_FD; return CLONED_FD(file);
}
}
struct fd fdget(unsigned int fd)
{ return __fget_light(fd, FMODE_PATH);}
EXPORT_SYMBOL(fdget);
struct fd fdget_raw(unsigned int fd)
{ return __fget_light(fd, 0);
}
/*
* Try to avoid f_pos locking. We only need it if the
* file is marked for FMODE_ATOMIC_POS, and it can be
* accessed multiple ways.
*
* Always do it for directories, because pidfd_getfd()
* can make a file accessible even if it otherwise would
* not be, and for directories this is a correctness
* issue, not a "POSIX requirement".
*/
static inline bool file_needs_f_pos_lock(struct file *file)
{
if (!(file->f_mode & FMODE_ATOMIC_POS))
return false;
if (__file_ref_read_raw(&file->f_ref) != FILE_REF_ONEREF)
return true;
if (file->f_op->iterate_shared)
return true;
return false;
}
bool file_seek_cur_needs_f_lock(struct file *file)
{
if (!(file->f_mode & FMODE_ATOMIC_POS) && !file->f_op->iterate_shared)
return false;
/*
* Note that we are not guaranteed to be called after fdget_pos() on
* this file obj, in which case the caller is expected to provide the
* appropriate locking.
*/
return true;
}
struct fd fdget_pos(unsigned int fd)
{
struct fd f = fdget(fd);
struct file *file = fd_file(f);
if (likely(file) && file_needs_f_pos_lock(file)) {
f.word |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
return f;
}
void __f_unlock_pos(struct file *f)
{
mutex_unlock(&f->f_pos_lock);
}
/*
* We only lock f_pos if we have threads or if the file might be
* shared with another process. In both cases we'll have an elevated
* file count (done either by fdget() or by fork()).
*/
void set_close_on_exec(unsigned int fd, int flag)
{
struct files_struct *files = current->files;
spin_lock(&files->file_lock);
__set_close_on_exec(fd, files_fdtable(files), flag);
spin_unlock(&files->file_lock);
}
bool get_close_on_exec(unsigned int fd)
{
bool res;
rcu_read_lock();
res = close_on_exec(fd, current->files);
rcu_read_unlock();
return res;
}
static int do_dup2(struct files_struct *files,
struct file *file, unsigned fd, unsigned flags)
__releases(&files->file_lock)
{
struct file *tofree;
struct fdtable *fdt;
/*
* dup2() is expected to close the file installed in the target fd slot
* (if any). However, userspace hand-picking a fd may be racing against
* its own threads which happened to allocate it in open() et al but did
* not populate it yet.
*
* Broadly speaking we may be racing against the following:
* fd = get_unused_fd_flags(); // fd slot reserved, ->fd[fd] == NULL
* file = hard_work_goes_here();
* fd_install(fd, file); // only now ->fd[fd] == file
*
* It is an invariant that a successfully allocated fd has a NULL entry
* in the array until the matching fd_install().
*
* If we fit the window, we have the fd to populate, yet no target file
* to close. Trying to ignore it and install our new file would violate
* the invariant and make fd_install() overwrite our file.
*
* Things can be done(tm) to handle this. However, the issue does not
* concern legitimate programs and we only need to make sure the kernel
* does not trip over it.
*
* The simplest way out is to return an error if we find ourselves here.
*
* POSIX is silent on the issue, we return -EBUSY.
*/
fdt = files_fdtable(files);
fd = array_index_nospec(fd, fdt->max_fds);
tofree = rcu_dereference_raw(fdt->fd[fd]);
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
get_file(file);
rcu_assign_pointer(fdt->fd[fd], file);
__set_open_fd(fd, fdt, flags & O_CLOEXEC);
spin_unlock(&files->file_lock);
if (tofree)
filp_close(tofree, files);
return fd;
Ebusy:
spin_unlock(&files->file_lock);
return -EBUSY;
}
int replace_fd(unsigned fd, struct file *file, unsigned flags)
{
int err;
struct files_struct *files = current->files;
if (!file)
return close_fd(fd);
if (fd >= rlimit(RLIMIT_NOFILE))
return -EBADF;
spin_lock(&files->file_lock);
err = expand_files(files, fd);
if (unlikely(err < 0))
goto out_unlock;
err = do_dup2(files, file, fd, flags);
if (err < 0)
return err;
return 0;
out_unlock:
spin_unlock(&files->file_lock);
return err;
}
/**
* receive_fd() - Install received file into file descriptor table
* @file: struct file that was received from another process
* @ufd: __user pointer to write new fd number to
* @o_flags: the O_* flags to apply to the new fd entry
*
* Installs a received file into the file descriptor table, with appropriate
* checks and count updates. Optionally writes the fd number to userspace, if
* @ufd is non-NULL.
*
* This helper handles its own reference counting of the incoming
* struct file.
*
* Returns newly install fd or -ve on error.
*/
int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
{
int new_fd;
int error;
error = security_file_receive(file);
if (error)
return error;
new_fd = get_unused_fd_flags(o_flags);
if (new_fd < 0)
return new_fd;
if (ufd) {
error = put_user(new_fd, ufd);
if (error) {
put_unused_fd(new_fd);
return error;
}
}
fd_install(new_fd, get_file(file));
__receive_sock(file);
return new_fd;
}
EXPORT_SYMBOL_GPL(receive_fd);
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
{
int error;
error = security_file_receive(file);
if (error)
return error;
error = replace_fd(new_fd, file, o_flags);
if (error)
return error;
__receive_sock(file);
return new_fd;
}
static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
{
int err = -EBADF;
struct file *file;
struct files_struct *files = current->files;
if ((flags & ~O_CLOEXEC) != 0)
return -EINVAL;
if (unlikely(oldfd == newfd))
return -EINVAL;
if (newfd >= rlimit(RLIMIT_NOFILE))
return -EBADF;
spin_lock(&files->file_lock);
err = expand_files(files, newfd);
file = files_lookup_fd_locked(files, oldfd);
if (unlikely(!file))
goto Ebadf;
if (unlikely(err < 0)) {
if (err == -EMFILE)
goto Ebadf;
goto out_unlock;
}
return do_dup2(files, file, newfd, flags);
Ebadf:
err = -EBADF;
out_unlock:
spin_unlock(&files->file_lock);
return err;
}
SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
{
return ksys_dup3(oldfd, newfd, flags);
}
SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
{
if (unlikely(newfd == oldfd)) { /* corner case */
struct files_struct *files = current->files;
struct file *f;
int retval = oldfd;
rcu_read_lock();
f = __fget_files_rcu(files, oldfd, 0);
if (!f)
retval = -EBADF;
rcu_read_unlock();
if (f)
fput(f);
return retval;
}
return ksys_dup3(oldfd, newfd, 0);
}
SYSCALL_DEFINE1(dup, unsigned int, fildes)
{
int ret = -EBADF;
struct file *file = fget_raw(fildes);
if (file) {
ret = get_unused_fd_flags(0);
if (ret >= 0)
fd_install(ret, file);
else
fput(file);
}
return ret;
}
int f_dupfd(unsigned int from, struct file *file, unsigned flags)
{
unsigned long nofile = rlimit(RLIMIT_NOFILE);
int err;
if (from >= nofile)
return -EINVAL;
err = alloc_fd(from, nofile, flags);
if (err >= 0) {
get_file(file);
fd_install(err, file);
}
return err;
}
int iterate_fd(struct files_struct *files, unsigned n,
int (*f)(const void *, struct file *, unsigned),
const void *p)
{
struct fdtable *fdt;
int res = 0;
if (!files)
return 0;
spin_lock(&files->file_lock);
for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
struct file *file;
file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
if (!file)
continue;
res = f(p, file, n);
if (res)
break;
}
spin_unlock(&files->file_lock);
return res;
}
EXPORT_SYMBOL(iterate_fd);
// SPDX-License-Identifier: GPL-2.0
#include <linux/memblock.h>
#include <linux/mmdebug.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <linux/vmalloc.h>
#include "physaddr.h"
#ifdef CONFIG_X86_64
#ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x)
{
unsigned long y = x - __START_KERNEL_map;
/* use the carry flag to determine if x was < __START_KERNEL_map */
if (unlikely(x > y)) {
x = y + phys_base;
VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
} else {
x = y + (__START_KERNEL_map - PAGE_OFFSET);
/* carry flag will be set if starting x was >= PAGE_OFFSET */
VIRTUAL_BUG_ON((x > y) || !phys_addr_valid(x));
}
return x;
}
EXPORT_SYMBOL(__phys_addr);
unsigned long __phys_addr_symbol(unsigned long x)
{
unsigned long y = x - __START_KERNEL_map;
/* only check upper bounds since lower bounds will trigger carry */
VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
return y + phys_base;
}
EXPORT_SYMBOL(__phys_addr_symbol);
#endif
bool __virt_addr_valid(unsigned long x)
{
unsigned long y = x - __START_KERNEL_map;
/* use the carry flag to determine if x was < __START_KERNEL_map */
if (unlikely(x > y)) {
x = y + phys_base;
if (y >= KERNEL_IMAGE_SIZE)
return false;
} else {
x = y + (__START_KERNEL_map - PAGE_OFFSET);
/* carry flag will be set if starting x was >= PAGE_OFFSET */
if ((x > y) || !phys_addr_valid(x))
return false;
}
return pfn_valid(x >> PAGE_SHIFT);}
EXPORT_SYMBOL(__virt_addr_valid);
#else
#ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x)
{
unsigned long phys_addr = x - PAGE_OFFSET;
/* VMALLOC_* aren't constants */
VIRTUAL_BUG_ON(x < PAGE_OFFSET);
VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
/* max_low_pfn is set early, but not _that_ early */
if (max_low_pfn) {
VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn);
BUG_ON(slow_virt_to_phys((void *)x) != phys_addr);
}
return phys_addr;
}
EXPORT_SYMBOL(__phys_addr);
#endif
bool __virt_addr_valid(unsigned long x)
{
if (x < PAGE_OFFSET)
return false;
if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
return false;
if (x >= FIXADDR_START)
return false;
return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
}
EXPORT_SYMBOL(__virt_addr_valid);
#endif /* CONFIG_X86_64 */
// SPDX-License-Identifier: GPL-2.0
/*
* bus.c - bus driver management
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007 Novell Inc.
* Copyright (c) 2023 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
*/
#include <linux/async.h>
#include <linux/device/bus.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include "base.h"
#include "power/power.h"
/* /sys/devices/system */
static struct kset *system_kset;
/* /sys/bus */
static struct kset *bus_kset;
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
/*
* sysfs bindings for drivers
*/
#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct driver_attribute driver_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
/**
* bus_to_subsys - Turn a struct bus_type into a struct subsys_private
*
* @bus: pointer to the struct bus_type to look up
*
* The driver core internals needs to work on the subsys_private structure, not
* the external struct bus_type pointer. This function walks the list of
* registered busses in the system and finds the matching one and returns the
* internal struct subsys_private that relates to that bus.
*
* Note, the reference count of the return value is INCREMENTED if it is not
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
if (!bus || !bus_kset)
return NULL;
spin_lock(&bus_kset->list_lock);
if (list_empty(&bus_kset->list))
goto done;
list_for_each_entry(kobj, &bus_kset->list, entry) { struct kset *kset = container_of(kobj, struct kset, kobj);
sp = container_of_const(kset, struct subsys_private, subsys);
if (sp->bus == bus)
goto done;
}
sp = NULL;
done:
sp = subsys_get(sp); spin_unlock(&bus_kset->list_lock); return sp;
}
static const struct bus_type *bus_get(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
if (sp)
return bus;
return NULL;
}
static void bus_put(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
/* two puts are required as the call to bus_to_subsys incremented it again */
subsys_put(sp);
subsys_put(sp);
}
static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct driver_attribute *drv_attr = to_drv_attr(attr);
struct driver_private *drv_priv = to_driver(kobj);
ssize_t ret = -EIO;
if (drv_attr->show)
ret = drv_attr->show(drv_priv->driver, buf);
return ret;
}
static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct driver_attribute *drv_attr = to_drv_attr(attr);
struct driver_private *drv_priv = to_driver(kobj);
ssize_t ret = -EIO;
if (drv_attr->store)
ret = drv_attr->store(drv_priv->driver, buf, count);
return ret;
}
static const struct sysfs_ops driver_sysfs_ops = {
.show = drv_attr_show,
.store = drv_attr_store,
};
static void driver_release(struct kobject *kobj)
{
struct driver_private *drv_priv = to_driver(kobj);
pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__);
kfree(drv_priv);
}
static const struct kobj_type driver_ktype = {
.sysfs_ops = &driver_sysfs_ops,
.release = driver_release,
};
/*
* sysfs bindings for buses
*/
static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
/* return -EIO for reading a bus attribute without show() */
ssize_t ret = -EIO;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
return ret;
}
static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
/* return -EIO for writing a bus attribute without store() */
ssize_t ret = -EIO;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);
return ret;
}
static const struct sysfs_ops bus_sysfs_ops = {
.show = bus_attr_show,
.store = bus_attr_store,
};
int bus_create_file(const struct bus_type *bus, struct bus_attribute *attr)
{
struct subsys_private *sp = bus_to_subsys(bus);
int error;
if (!sp)
return -EINVAL;
error = sysfs_create_file(&sp->subsys.kobj, &attr->attr);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_create_file);
void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr)
{
struct subsys_private *sp = bus_to_subsys(bus);
if (!sp)
return;
sysfs_remove_file(&sp->subsys.kobj, &attr->attr);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_remove_file);
static void bus_release(struct kobject *kobj)
{
struct subsys_private *priv = to_subsys_private(kobj);
lockdep_unregister_key(&priv->lock_key);
kfree(priv);
}
static const struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
.release = bus_release,
};
static int bus_uevent_filter(const struct kobject *kobj)
{
const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &bus_ktype)
return 1;
return 0;
}
static const struct kset_uevent_ops bus_uevent_ops = {
.filter = bus_uevent_filter,
};
/* Manually detach a device from its associated driver. */
static ssize_t unbind_store(struct device_driver *drv, const char *buf,
size_t count)
{
const struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
int err = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == drv) {
device_driver_detach(dev);
err = count;
}
put_device(dev);
bus_put(bus);
return err;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
/*
* Manually attach a device to a driver.
* Note: the driver must want to bind to the device,
* it is not possible to override the driver's id table.
*/
static ssize_t bind_store(struct device_driver *drv, const char *buf,
size_t count)
{
const struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
int err = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && driver_match_device(drv, dev)) {
err = device_driver_attach(drv, dev);
if (!err) {
/* success */
err = count;
}
}
put_device(dev);
bus_put(bus);
return err;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
static ssize_t drivers_autoprobe_show(const struct bus_type *bus, char *buf)
{
struct subsys_private *sp = bus_to_subsys(bus);
int ret;
if (!sp)
return -EINVAL;
ret = sysfs_emit(buf, "%d\n", sp->drivers_autoprobe);
subsys_put(sp);
return ret;
}
static ssize_t drivers_autoprobe_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct subsys_private *sp = bus_to_subsys(bus);
if (!sp)
return -EINVAL;
if (buf[0] == '0')
sp->drivers_autoprobe = 0;
else
sp->drivers_autoprobe = 1;
subsys_put(sp);
return count;
}
static ssize_t drivers_probe_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct device *dev;
int err = -EINVAL;
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev)
return -ENODEV;
if (bus_rescan_devices_helper(dev, NULL) == 0)
err = count;
put_device(dev);
return err;
}
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
struct device *dev = NULL;
struct device_private *dev_prv;
if (n) {
dev_prv = to_device_private_bus(n);
dev = dev_prv->device;
}
return dev;
}
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
* @start: device to start iterating from.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @bus's list of devices, and call @fn for each,
* passing it @data. If @start is not NULL, we use that device to
* begin iterating from.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*
* NOTE: The device that returns a non-zero value is not retained
* in any way, nor is its refcount incremented. If the caller needs
* to retain this data, it should do so, and increment the reference
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
void *data, device_iter_t fn)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
int error = 0;
if (!sp)
return -EINVAL;
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while (!error && (dev = next_device(&i)))
error = fn(dev, data);
klist_iter_exit(&i);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_dev);
/**
* bus_find_device - device iterator for locating a particular device.
* @bus: bus type
* @start: Device to begin with
* @data: Data to pass to match function
* @match: Callback function to check device
*
* This is similar to the bus_for_each_dev() function above, but it
* returns a reference to a device that is 'found' for later use, as
* determined by the @match callback.
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
device_match_t match)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
if (!sp)
return NULL;
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while ((dev = next_device(&i))) {
if (match(dev, data)) {
get_device(dev);
break;
}
}
klist_iter_exit(&i);
subsys_put(sp);
return dev;
}
EXPORT_SYMBOL_GPL(bus_find_device);
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
struct driver_private *drv_priv;
if (n) {
drv_priv = container_of(n, struct driver_private, knode_bus);
return drv_priv->driver;
}
return NULL;
}
/**
* bus_for_each_drv - driver iterator
* @bus: bus we're dealing with.
* @start: driver to start iterating on.
* @data: data to pass to the callback.
* @fn: function to call for each driver.
*
* This is nearly identical to the device iterator above.
* We iterate over each driver that belongs to @bus, and call
* @fn for each. If @fn returns anything but 0, we break out
* and return it. If @start is not NULL, we use it as the head
* of the list.
*
* NOTE: we don't return the driver that returns a non-zero
* value, nor do we leave the reference count incremented for that
* driver. If the caller needs to know that info, it must set it
* in the callback. It must also be sure to increment the refcount
* so it doesn't disappear before returning to the caller.
*/
int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *))
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device_driver *drv;
int error = 0;
if (!sp)
return -EINVAL;
klist_iter_init_node(&sp->klist_drivers, &i,
start ? &start->p->knode_bus : NULL);
while ((drv = next_driver(&i)) && !error)
error = fn(drv, data);
klist_iter_exit(&i);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_drv);
/**
* bus_add_device - add device to bus
* @dev: device being added
*
* - Add device's bus attributes.
* - Create links to device's bus.
* - Add the device to its bus's list of devices.
*/
int bus_add_device(struct device *dev)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
int error;
if (!sp) {
/*
* This is a normal operation for many devices that do not
* have a bus assigned to them, just say that all went
* well.
*/
return 0;
}
/*
* Reference in sp is now incremented and will be dropped when
* the device is removed from the bus
*/
pr_debug("bus: '%s': add device %s\n", sp->bus->name, dev_name(dev));
error = device_add_groups(dev, sp->bus->dev_groups);
if (error) goto out_put; error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev));
if (error)
goto out_groups;
error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
if (error)
goto out_subsys;
klist_add_tail(&dev->p->knode_bus, &sp->klist_devices);
return 0;
out_subsys:
sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
out_groups:
device_remove_groups(dev, sp->bus->dev_groups);
out_put:
subsys_put(sp); return error;
}
/**
* bus_probe_device - probe drivers for a new device
* @dev: device to probe
*
* - Automatically probe for a driver if the bus allows it.
*/
void bus_probe_device(struct device *dev)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
if (!sp)
return;
if (sp->drivers_autoprobe)
device_initial_probe(dev); mutex_lock(&sp->mutex); list_for_each_entry(sif, &sp->interfaces, node) if (sif->add_dev)
sif->add_dev(dev, sif);
mutex_unlock(&sp->mutex);
subsys_put(sp);}
/**
* bus_remove_device - remove device from bus
* @dev: device to be removed
*
* - Remove device from all interfaces.
* - Remove symlink from bus' directory.
* - Delete device from bus's list.
* - Detach from its driver.
* - Drop reference taken in bus_add_device().
*/
void bus_remove_device(struct device *dev)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
if (!sp)
return;
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
if (sif->remove_dev)
sif->remove_dev(dev, sif);
mutex_unlock(&sp->mutex);
sysfs_remove_link(&dev->kobj, "subsystem");
sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
device_remove_groups(dev, dev->bus->dev_groups);
if (klist_node_attached(&dev->p->knode_bus))
klist_del(&dev->p->knode_bus);
pr_debug("bus: '%s': remove device %s\n",
dev->bus->name, dev_name(dev));
device_release_driver(dev);
/*
* Decrement the reference count twice, once for the bus_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in bus_add_device()
*/
subsys_put(sp);
subsys_put(sp);
}
static int __must_check add_bind_files(struct device_driver *drv)
{
int ret;
ret = driver_create_file(drv, &driver_attr_unbind);
if (ret == 0) {
ret = driver_create_file(drv, &driver_attr_bind);
if (ret)
driver_remove_file(drv, &driver_attr_unbind);
}
return ret;
}
static void remove_bind_files(struct device_driver *drv)
{
driver_remove_file(drv, &driver_attr_bind);
driver_remove_file(drv, &driver_attr_unbind);
}
static BUS_ATTR_WO(drivers_probe);
static BUS_ATTR_RW(drivers_autoprobe);
static int add_probe_files(const struct bus_type *bus)
{
int retval;
retval = bus_create_file(bus, &bus_attr_drivers_probe);
if (retval)
goto out;
retval = bus_create_file(bus, &bus_attr_drivers_autoprobe);
if (retval)
bus_remove_file(bus, &bus_attr_drivers_probe);
out:
return retval;
}
static void remove_probe_files(const struct bus_type *bus)
{
bus_remove_file(bus, &bus_attr_drivers_autoprobe);
bus_remove_file(bus, &bus_attr_drivers_probe);
}
static ssize_t uevent_store(struct device_driver *drv, const char *buf,
size_t count)
{
int rc;
rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
return rc ? rc : count;
}
static DRIVER_ATTR_WO(uevent);
/**
* bus_add_driver - Add a driver to the bus.
* @drv: driver.
*/
int bus_add_driver(struct device_driver *drv)
{
struct subsys_private *sp = bus_to_subsys(drv->bus);
struct driver_private *priv;
int error = 0;
if (!sp)
return -EINVAL;
/*
* Reference in sp is now incremented and will be dropped when
* the driver is removed from the bus
*/
pr_debug("bus: '%s': add driver %s\n", sp->bus->name, drv->name);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
error = -ENOMEM;
goto out_put_bus;
}
klist_init(&priv->klist_devices, NULL, NULL);
priv->driver = drv;
drv->p = priv;
priv->kobj.kset = sp->drivers_kset;
error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
"%s", drv->name);
if (error)
goto out_unregister;
klist_add_tail(&priv->knode_bus, &sp->klist_drivers);
if (sp->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_del_list;
}
error = module_add_driver(drv->owner, drv);
if (error) {
printk(KERN_ERR "%s: failed to create module links for %s\n",
__func__, drv->name);
goto out_detach;
}
error = driver_create_file(drv, &driver_attr_uevent);
if (error) {
printk(KERN_ERR "%s: uevent attr (%s) failed\n",
__func__, drv->name);
}
error = driver_add_groups(drv, sp->bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
printk(KERN_ERR "%s: driver_add_groups(%s) failed\n",
__func__, drv->name);
}
if (!drv->suppress_bind_attrs) {
error = add_bind_files(drv);
if (error) {
/* Ditto */
printk(KERN_ERR "%s: add_bind_files(%s) failed\n",
__func__, drv->name);
}
}
return 0;
out_detach:
driver_detach(drv);
out_del_list:
klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
subsys_put(sp);
return error;
}
/**
* bus_remove_driver - delete driver from bus's knowledge.
* @drv: driver.
*
* Detach the driver from the devices it controls, and remove
* it from its bus's list of drivers. Finally, we drop the reference
* to the bus we took in bus_add_driver().
*/
void bus_remove_driver(struct device_driver *drv)
{
struct subsys_private *sp = bus_to_subsys(drv->bus);
if (!sp)
return;
pr_debug("bus: '%s': remove driver %s\n", sp->bus->name, drv->name);
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
driver_remove_groups(drv, sp->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
driver_detach(drv);
module_remove_driver(drv);
kobject_put(&drv->p->kobj);
/*
* Decrement the reference count twice, once for the bus_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in bus_add_driver()
*/
subsys_put(sp);
subsys_put(sp);
}
/* Helper for bus_rescan_devices's iter */
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data)
{
int ret = 0;
if (!dev->driver) {
if (dev->parent && dev->bus->need_parent_lock)
device_lock(dev->parent);
ret = device_attach(dev);
if (dev->parent && dev->bus->need_parent_lock)
device_unlock(dev->parent);
}
return ret < 0 ? ret : 0;
}
/**
* bus_rescan_devices - rescan devices on the bus for possible drivers
* @bus: the bus to scan.
*
* This function will look for devices on the bus with no driver
* attached and rescan it against existing drivers to see if it matches
* any by calling device_attach() for the unbound devices.
*/
int bus_rescan_devices(const struct bus_type *bus)
{
return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
}
EXPORT_SYMBOL_GPL(bus_rescan_devices);
/**
* device_reprobe - remove driver for a device and probe for a new driver
* @dev: the device to reprobe
*
* This function detaches the attached driver (if any) for the given
* device and restarts the driver probing process. It is intended
* to use if probing criteria changed during a devices lifetime and
* driver attachment should change accordingly.
*/
int device_reprobe(struct device *dev)
{
if (dev->driver)
device_driver_detach(dev);
return bus_rescan_devices_helper(dev, NULL);
}
EXPORT_SYMBOL_GPL(device_reprobe);
static void klist_devices_get(struct klist_node *n)
{
struct device_private *dev_prv = to_device_private_bus(n);
struct device *dev = dev_prv->device;
get_device(dev);
}
static void klist_devices_put(struct klist_node *n)
{
struct device_private *dev_prv = to_device_private_bus(n);
struct device *dev = dev_prv->device;
put_device(dev);
}
static ssize_t bus_uevent_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct subsys_private *sp = bus_to_subsys(bus);
int ret;
if (!sp)
return -EINVAL;
ret = kobject_synth_uevent(&sp->subsys.kobj, buf, count);
subsys_put(sp);
if (ret)
return ret;
return count;
}
/*
* "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO()
* here, but can not use it as earlier in the file we have
* DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store
* function name.
*/
static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL,
bus_uevent_store);
/**
* bus_register - register a driver-core subsystem
* @bus: bus to register
*
* Once we have that, we register the bus with the kobject
* infrastructure, then register the children subsystems it has:
* the devices and drivers that belong to the subsystem.
*/
int bus_register(const struct bus_type *bus)
{
int retval;
struct subsys_private *priv;
struct kobject *bus_kobj;
struct lock_class_key *key;
priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = bus;
BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
bus_kobj = &priv->subsys.kobj;
retval = kobject_set_name(bus_kobj, "%s", bus->name);
if (retval)
goto out;
bus_kobj->kset = bus_kset;
bus_kobj->ktype = &bus_ktype;
priv->drivers_autoprobe = 1;
retval = kset_register(&priv->subsys);
if (retval)
goto out;
retval = bus_create_file(bus, &bus_attr_uevent);
if (retval)
goto bus_uevent_fail;
priv->devices_kset = kset_create_and_add("devices", NULL, bus_kobj);
if (!priv->devices_kset) {
retval = -ENOMEM;
goto bus_devices_fail;
}
priv->drivers_kset = kset_create_and_add("drivers", NULL, bus_kobj);
if (!priv->drivers_kset) {
retval = -ENOMEM;
goto bus_drivers_fail;
}
INIT_LIST_HEAD(&priv->interfaces);
key = &priv->lock_key;
lockdep_register_key(key);
__mutex_init(&priv->mutex, "subsys mutex", key);
klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
klist_init(&priv->klist_drivers, NULL, NULL);
retval = add_probe_files(bus);
if (retval)
goto bus_probe_files_fail;
retval = sysfs_create_groups(bus_kobj, bus->bus_groups);
if (retval)
goto bus_groups_fail;
pr_debug("bus: '%s': registered\n", bus->name);
return 0;
bus_groups_fail:
remove_probe_files(bus);
bus_probe_files_fail:
kset_unregister(priv->drivers_kset);
bus_drivers_fail:
kset_unregister(priv->devices_kset);
bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&priv->subsys);
/* Above kset_unregister() will kfree @priv */
priv = NULL;
out:
kfree(priv);
return retval;
}
EXPORT_SYMBOL_GPL(bus_register);
/**
* bus_unregister - remove a bus from the system
* @bus: bus.
*
* Unregister the child subsystems and the bus itself.
* Finally, we call bus_put() to release the refcount
*/
void bus_unregister(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct kobject *bus_kobj;
if (!sp)
return;
pr_debug("bus: '%s': unregistering\n", bus->name);
if (sp->dev_root)
device_unregister(sp->dev_root);
bus_kobj = &sp->subsys.kobj;
sysfs_remove_groups(bus_kobj, bus->bus_groups);
remove_probe_files(bus);
bus_remove_file(bus, &bus_attr_uevent);
kset_unregister(sp->drivers_kset);
kset_unregister(sp->devices_kset);
kset_unregister(&sp->subsys);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_unregister);
int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
struct subsys_private *sp = bus_to_subsys(bus);
int retval;
if (!sp)
return -EINVAL;
retval = blocking_notifier_chain_register(&sp->bus_notifier, nb);
subsys_put(sp);
return retval;
}
EXPORT_SYMBOL_GPL(bus_register_notifier);
int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
struct subsys_private *sp = bus_to_subsys(bus);
int retval;
if (!sp)
return -EINVAL;
retval = blocking_notifier_chain_unregister(&sp->bus_notifier, nb);
subsys_put(sp);
return retval;
}
EXPORT_SYMBOL_GPL(bus_unregister_notifier);
void bus_notify(struct device *dev, enum bus_notifier_event value)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
if (!sp)
return;
blocking_notifier_call_chain(&sp->bus_notifier, value, dev);
subsys_put(sp);}
struct kset *bus_get_kset(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct kset *kset;
if (!sp)
return NULL;
kset = &sp->subsys;
subsys_put(sp);
return kset;
}
EXPORT_SYMBOL_GPL(bus_get_kset);
/*
* Yes, this forcibly breaks the klist abstraction temporarily. It
* just wants to sort the klist, not change reference counts and
* take/drop locks rapidly in the process. It does all this while
* holding the lock for the list, so objects can't otherwise be
* added/removed while we're swizzling.
*/
static void device_insertion_sort_klist(struct device *a, struct list_head *list,
int (*compare)(const struct device *a,
const struct device *b))
{
struct klist_node *n;
struct device_private *dev_prv;
struct device *b;
list_for_each_entry(n, list, n_node) {
dev_prv = to_device_private_bus(n);
b = dev_prv->device;
if (compare(a, b) <= 0) {
list_move_tail(&a->p->knode_bus.n_node,
&b->p->knode_bus.n_node);
return;
}
}
list_move_tail(&a->p->knode_bus.n_node, list);
}
void bus_sort_breadthfirst(const struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b))
{
struct subsys_private *sp = bus_to_subsys(bus);
LIST_HEAD(sorted_devices);
struct klist_node *n, *tmp;
struct device_private *dev_prv;
struct device *dev;
struct klist *device_klist;
if (!sp)
return;
device_klist = &sp->klist_devices;
spin_lock(&device_klist->k_lock);
list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
dev_prv = to_device_private_bus(n);
dev = dev_prv->device;
device_insertion_sort_klist(dev, &sorted_devices, compare);
}
list_splice(&sorted_devices, &device_klist->k_list);
spin_unlock(&device_klist->k_lock);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
struct subsys_dev_iter {
struct klist_iter ki;
const struct device_type *type;
};
/**
* subsys_dev_iter_init - initialize subsys device iterator
* @iter: subsys iterator to initialize
* @sp: the subsys private (i.e. bus) we wanna iterate over
* @start: the device to start iterating from, if any
* @type: device_type of the devices to iterate over, NULL for all
*
* Initialize subsys iterator @iter such that it iterates over devices
* of @subsys. If @start is set, the list iteration will start there,
* otherwise if it is NULL, the iteration starts at the beginning of
* the list.
*/
static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp,
struct device *start, const struct device_type *type)
{
struct klist_node *start_knode = NULL;
if (start)
start_knode = &start->p->knode_bus;
klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
}
/**
* subsys_dev_iter_next - iterate to the next device
* @iter: subsys iterator to proceed
*
* Proceed @iter to the next device and return it. Returns NULL if
* iteration is complete.
*
* The returned device is referenced and won't be released till
* iterator is proceed to the next device or exited. The caller is
* free to do whatever it wants to do with the device including
* calling back into subsys code.
*/
static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
{
struct klist_node *knode;
struct device *dev;
for (;;) {
knode = klist_next(&iter->ki);
if (!knode)
return NULL;
dev = to_device_private_bus(knode)->device;
if (!iter->type || iter->type == dev->type)
return dev;
}
}
/**
* subsys_dev_iter_exit - finish iteration
* @iter: subsys iterator to finish
*
* Finish an iteration. Always call this function after iteration is
* complete whether the iteration ran till the end or not.
*/
static void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
{
klist_iter_exit(&iter->ki);
}
int subsys_interface_register(struct subsys_interface *sif)
{
struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return -ENODEV;
sp = bus_to_subsys(sif->subsys);
if (!sp)
return -EINVAL;
/*
* Reference in sp is now incremented and will be dropped when
* the interface is removed from the bus
*/
mutex_lock(&sp->mutex);
list_add_tail(&sif->node, &sp->interfaces);
if (sif->add_dev) {
subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->add_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(subsys_interface_register);
void subsys_interface_unregister(struct subsys_interface *sif)
{
struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return;
sp = bus_to_subsys(sif->subsys);
if (!sp)
return;
mutex_lock(&sp->mutex);
list_del_init(&sif->node);
if (sif->remove_dev) {
subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->remove_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
/*
* Decrement the reference count twice, once for the bus_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in subsys_interface_register()
*/
subsys_put(sp);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(subsys_interface_unregister);
static void system_root_device_release(struct device *dev)
{
kfree(dev);
}
static int subsys_register(const struct bus_type *subsys,
const struct attribute_group **groups,
struct kobject *parent_of_root)
{
struct subsys_private *sp;
struct device *dev;
int err;
err = bus_register(subsys);
if (err < 0)
return err;
sp = bus_to_subsys(subsys);
if (!sp) {
err = -EINVAL;
goto err_sp;
}
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!dev) {
err = -ENOMEM;
goto err_dev;
}
err = dev_set_name(dev, "%s", subsys->name);
if (err < 0)
goto err_name;
dev->kobj.parent = parent_of_root;
dev->groups = groups;
dev->release = system_root_device_release;
err = device_register(dev);
if (err < 0)
goto err_dev_reg;
sp->dev_root = dev;
subsys_put(sp);
return 0;
err_dev_reg:
put_device(dev);
dev = NULL;
err_name:
kfree(dev);
err_dev:
subsys_put(sp);
err_sp:
bus_unregister(subsys);
return err;
}
/**
* subsys_system_register - register a subsystem at /sys/devices/system/
* @subsys: system subsystem
* @groups: default attributes for the root device
*
* All 'system' subsystems have a /sys/devices/system/<name> root device
* with the name of the subsystem. The root device can carry subsystem-
* wide attributes. All registered devices are below this single root
* device and are named after the subsystem with a simple enumeration
* number appended. The registered devices are not explicitly named;
* only 'id' in the device needs to be set.
*
* Do not use this interface for anything new, it exists for compatibility
* with bad ideas only. New subsystems should use plain subsystems; and
* add the subsystem-wide attributes should be added to the subsystem
* directory itself and not some create fake root-device placed in
* /sys/devices/system/<name>.
*/
int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups)
{
return subsys_register(subsys, groups, &system_kset->kobj);
}
EXPORT_SYMBOL_GPL(subsys_system_register);
/**
* subsys_virtual_register - register a subsystem at /sys/devices/virtual/
* @subsys: virtual subsystem
* @groups: default attributes for the root device
*
* All 'virtual' subsystems have a /sys/devices/system/<name> root device
* with the name of the subsystem. The root device can carry subsystem-wide
* attributes. All registered devices are below this single root device.
* There's no restriction on device naming. This is for kernel software
* constructs which need sysfs interface.
*/
int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups)
{
struct kobject *virtual_dir;
virtual_dir = virtual_device_parent();
if (!virtual_dir)
return -ENOMEM;
return subsys_register(subsys, groups, virtual_dir);
}
EXPORT_SYMBOL_GPL(subsys_virtual_register);
/**
* driver_find - locate driver on a bus by its name.
* @name: name of the driver.
* @bus: bus to scan for the driver.
*
* Call kset_find_obj() to iterate over list of drivers on
* a bus to find driver by name. Return driver if found.
*
* This routine provides no locking to prevent the driver it returns
* from being unregistered or unloaded while the caller is using it.
* The caller is responsible for preventing this.
*/
struct device_driver *driver_find(const char *name, const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct kobject *k;
struct driver_private *priv;
if (!sp)
return NULL;
k = kset_find_obj(sp->drivers_kset, name);
subsys_put(sp);
if (!k)
return NULL;
priv = to_driver(k);
/* Drop reference added by kset_find_obj() */
kobject_put(k);
return priv->driver;
}
EXPORT_SYMBOL_GPL(driver_find);
/*
* Warning, the value could go to "removed" instantly after calling this function, so be very
* careful when calling it...
*/
bool bus_is_registered(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
bool is_initialized = false;
if (sp) {
is_initialized = true;
subsys_put(sp);
}
return is_initialized;
}
/**
* bus_get_dev_root - return a pointer to the "device root" of a bus
* @bus: bus to return the device root of.
*
* If a bus has a "device root" structure, return it, WITH THE REFERENCE
* COUNT INCREMENTED.
*
* Note, when finished with the device, a call to put_device() is required.
*
* If the device root is not present (or bus is not a valid pointer), NULL
* will be returned.
*/
struct device *bus_get_dev_root(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct device *dev_root;
if (!sp)
return NULL;
dev_root = get_device(sp->dev_root);
subsys_put(sp);
return dev_root;
}
EXPORT_SYMBOL_GPL(bus_get_dev_root);
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
if (!bus_kset)
return -ENOMEM;
system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
if (!system_kset) {
/* Do error handling here as devices_init() do */
kset_unregister(bus_kset);
bus_kset = NULL;
pr_err("%s: failed to create and add kset 'bus'\n", __func__);
return -ENOMEM;
}
return 0;
}
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/sysctl.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/userfaultfd_k.h>
#include <linux/elf.h>
#include <linux/elf-randomize.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/processor.h>
#include <linux/sizes.h>
#include <linux/compat.h>
#include <linux/fsnotify.h>
#include <linux/page_idle.h>
#include <linux/uaccess.h>
#include <kunit/visibility.h>
#include "internal.h"
#include "swap.h"
/**
* kfree_const - conditionally free memory
* @x: pointer to the memory
*
* Function calls kfree only if @x is not in .rodata section.
*/
void kfree_const(const void *x)
{
if (!is_kernel_rodata((unsigned long)x))
kfree(x);
}
EXPORT_SYMBOL(kfree_const);
/**
* __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated.
* @s: The data to copy
* @len: The size of the data, not including the NUL terminator
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Return: newly allocated copy of @s with NUL-termination or %NULL in
* case of error
*/
static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
char *buf;
/* '+1' for the NUL terminator */
buf = kmalloc_track_caller(len + 1, gfp);
if (!buf)
return NULL;
memcpy(buf, s, len);
/* Ensure the buf is always NUL-terminated, regardless of @s. */
buf[len] = '\0';
return buf;
}
/**
* kstrdup - allocate space for and copy an existing string
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Return: newly allocated copy of @s or %NULL in case of error
*/
noinline
char *kstrdup(const char *s, gfp_t gfp)
{
return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL;}
EXPORT_SYMBOL(kstrdup);
/**
* kstrdup_const - conditionally duplicate an existing const string
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Note: Strings allocated by kstrdup_const should be freed by kfree_const and
* must not be passed to krealloc().
*
* Return: source string if it is in .rodata section otherwise
* fallback to kstrdup.
*/
const char *kstrdup_const(const char *s, gfp_t gfp)
{ if (is_kernel_rodata((unsigned long)s))
return s;
return kstrdup(s, gfp);}
EXPORT_SYMBOL(kstrdup_const);
/**
* kstrndup - allocate space for and copy an existing string
* @s: the string to duplicate
* @max: read at most @max chars from @s
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Note: Use kmemdup_nul() instead if the size is known exactly.
*
* Return: newly allocated copy of @s or %NULL in case of error
*/
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL;
}
EXPORT_SYMBOL(kstrndup);
/**
* kmemdup - duplicate region of memory
*
* @src: memory region to duplicate
* @len: memory region length
* @gfp: GFP mask to use
*
* Return: newly allocated copy of @src or %NULL in case of error,
* result is physically contiguous. Use kfree() to free.
*/
void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
{
void *p;
p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
if (p)
memcpy(p, src, len); return p;
}
EXPORT_SYMBOL(kmemdup_noprof);
/**
* kmemdup_array - duplicate a given array.
*
* @src: array to duplicate.
* @count: number of elements to duplicate from array.
* @element_size: size of each element of array.
* @gfp: GFP mask to use.
*
* Return: duplicated array of @src or %NULL in case of error,
* result is physically contiguous. Use kfree() to free.
*/
void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
{
return kmemdup(src, size_mul(element_size, count), gfp);
}
EXPORT_SYMBOL(kmemdup_array);
/**
* kvmemdup - duplicate region of memory
*
* @src: memory region to duplicate
* @len: memory region length
* @gfp: GFP mask to use
*
* Return: newly allocated copy of @src or %NULL in case of error,
* result may be not physically contiguous. Use kvfree() to free.
*/
void *kvmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
p = kvmalloc(len, gfp);
if (p)
memcpy(p, src, len);
return p;
}
EXPORT_SYMBOL(kvmemdup);
/**
* kmemdup_nul - Create a NUL-terminated string from unterminated data
* @s: The data to stringify
* @len: The size of the data
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Return: newly allocated copy of @s with NUL-termination or %NULL in
* case of error
*/
char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
return s ? __kmemdup_nul(s, len, gfp) : NULL;
}
EXPORT_SYMBOL(kmemdup_nul);
static kmem_buckets *user_buckets __ro_after_init;
static int __init init_user_buckets(void)
{
user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
return 0;
}
subsys_initcall(init_user_buckets);
/**
* memdup_user - duplicate memory region from user space
*
* @src: source address in user space
* @len: number of bytes to copy
*
* Return: an ERR_PTR() on failure. Result is physically
* contiguous, to be freed by kfree().
*/
void *memdup_user(const void __user *src, size_t len)
{
void *p;
p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_user(p, src, len)) {
kfree(p);
return ERR_PTR(-EFAULT);
}
return p;
}
EXPORT_SYMBOL(memdup_user);
/**
* vmemdup_user - duplicate memory region from user space
*
* @src: source address in user space
* @len: number of bytes to copy
*
* Return: an ERR_PTR() on failure. Result may be not
* physically contiguous. Use kvfree() to free.
*/
void *vmemdup_user(const void __user *src, size_t len)
{
void *p;
p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_user(p, src, len)) {
kvfree(p);
return ERR_PTR(-EFAULT);
}
return p;
}
EXPORT_SYMBOL(vmemdup_user);
/**
* strndup_user - duplicate an existing string from user space
* @s: The string to duplicate
* @n: Maximum number of bytes to copy, including the trailing NUL.
*
* Return: newly allocated copy of @s or an ERR_PTR() in case of error
*/
char *strndup_user(const char __user *s, long n)
{
char *p;
long length;
length = strnlen_user(s, n);
if (!length)
return ERR_PTR(-EFAULT);
if (length > n)
return ERR_PTR(-EINVAL);
p = memdup_user(s, length);
if (IS_ERR(p))
return p;
p[length - 1] = '\0';
return p;
}
EXPORT_SYMBOL(strndup_user);
/**
* memdup_user_nul - duplicate memory region from user space and NUL-terminate
*
* @src: source address in user space
* @len: number of bytes to copy
*
* Return: an ERR_PTR() on failure.
*/
void *memdup_user_nul(const void __user *src, size_t len)
{
char *p;
p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_user(p, src, len)) {
kfree(p);
return ERR_PTR(-EFAULT);
}
p[len] = '\0';
return p;
}
EXPORT_SYMBOL(memdup_user_nul);
/* Check if the vma is being used as a stack by this task */
int vma_is_stack_for_current(const struct vm_area_struct *vma)
{
struct task_struct * __maybe_unused t = current;
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
/*
* Change backing file, only valid to use during initial VMA setup.
*/
void vma_set_file(struct vm_area_struct *vma, struct file *file)
{
/* Changing an anonymous vma with this is illegal */
get_file(file);
swap(vma->vm_file, file);
fput(file);
}
EXPORT_SYMBOL(vma_set_file);
#ifndef STACK_RND_MASK
#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
#endif
unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned long random_variable = 0;
if (current->flags & PF_RANDOMIZE) {
random_variable = get_random_long();
random_variable &= STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
#ifdef CONFIG_STACK_GROWSUP
return PAGE_ALIGN(stack_top) + random_variable;
#else
return PAGE_ALIGN(stack_top) - random_variable;
#endif
}
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
* @range: The size of the area, starting at @start, within which the
* random address must fall.
*
* If @start + @range would overflow, @range is capped.
*
* NOTE: Historical use of randomize_range, which this replaces, presumed that
* @start was already page aligned. We now align it regardless.
*
* Return: A page aligned address within [start, start + range). On error,
* @start is returned.
*/
unsigned long randomize_page(unsigned long start, unsigned long range)
{
if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start;
start = PAGE_ALIGN(start);
}
if (start > ULONG_MAX - range)
range = ULONG_MAX - start;
range >>= PAGE_SHIFT;
if (range == 0)
return start;
return start + (get_random_long() % range << PAGE_SHIFT);
}
#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
{
/* Is the current task 32bit ? */
if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
return randomize_page(mm->brk, SZ_32M);
return randomize_page(mm->brk, SZ_1G);
}
unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
if (is_compat_task())
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
static int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
/* On parisc the stack always grows up - so a unlimited stack should
* not be an indicator to use the legacy memory layout. */
if (rlim_stack->rlim_cur == RLIM_INFINITY &&
!IS_ENABLED(CONFIG_STACK_GROWSUP))
return 1;
return sysctl_legacy_va_layout;
}
/*
* Leave enough space between the mmap area and the stack to honour ulimit in
* the face of randomisation.
*/
#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP / 6 * 5)
static unsigned long mmap_base(const unsigned long rnd, const struct rlimit *rlim_stack)
{
#ifdef CONFIG_STACK_GROWSUP
/*
* For an upwards growing stack the calculation is much simpler.
* Memory for the maximum stack size is reserved at the top of the
* task. mmap_base starts directly below the stack and grows
* downwards.
*/
return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
#else
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_guard_gap;
/* Account for stack randomization if necessary */
if (current->flags & PF_RANDOMIZE)
pad += (STACK_RND_MASK << PAGE_SHIFT);
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(STACK_TOP - gap - rnd);
#endif
}
void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm_flags_clear(MMF_TOPDOWN, mm);
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm_flags_set(MMF_TOPDOWN, mm);
}
}
#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
mm_flags_clear(MMF_TOPDOWN, mm);
}
#endif
#ifdef CONFIG_MMU
EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
#endif
/**
* __account_locked_vm - account locked pages to an mm's locked_vm
* @mm: mm to account against
* @pages: number of pages to account
* @inc: %true if @pages should be considered positive, %false if not
* @task: task used to check RLIMIT_MEMLOCK
* @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
*
* Assumes @task and @mm are valid (i.e. at least one reference on each), and
* that mmap_lock is held as writer.
*
* Return:
* * 0 on success
* * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
*/
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
const struct task_struct *task, bool bypass_rlim)
{
unsigned long locked_vm, limit;
int ret = 0;
mmap_assert_write_locked(mm);
locked_vm = mm->locked_vm;
if (inc) {
if (!bypass_rlim) {
limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked_vm + pages > limit)
ret = -ENOMEM;
}
if (!ret)
mm->locked_vm = locked_vm + pages;
} else {
WARN_ON_ONCE(pages > locked_vm);
mm->locked_vm = locked_vm - pages;
}
pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
(void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
return ret;
}
EXPORT_SYMBOL_GPL(__account_locked_vm);
/**
* account_locked_vm - account locked pages to an mm's locked_vm
* @mm: mm to account against, may be NULL
* @pages: number of pages to account
* @inc: %true if @pages should be considered positive, %false if not
*
* Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
*
* Return:
* * 0 on success, or if mm is NULL
* * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
*/
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
{
int ret;
if (pages == 0 || !mm)
return 0;
mmap_write_lock(mm);
ret = __account_locked_vm(mm, pages, inc, current,
capable(CAP_IPC_LOCK));
mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(account_locked_vm);
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff)
{
loff_t off = (loff_t)pgoff << PAGE_SHIFT;
unsigned long ret;
struct mm_struct *mm = current->mm;
unsigned long populate;
LIST_HEAD(uf);
ret = security_mmap_file(file, prot, flag);
if (!ret)
ret = fsnotify_mmap_perm(file, prot, off, len);
if (!ret) {
if (mmap_write_lock_killable(mm))
return -EINTR;
ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
&uf);
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(ret, populate);
}
return ret;
}
/*
* Perform a userland memory mapping into the current process address space. See
* the comment for do_mmap() for more details on this operation in general.
*
* This differs from do_mmap() in that:
*
* a. An offset parameter is provided rather than pgoff, which is both checked
* for overflow and page alignment.
* b. mmap locking is performed on the caller's behalf.
* c. Userfaultfd unmap events and memory population are handled.
*
* This means that this function performs essentially the same work as if
* userland were invoking mmap (2).
*
* Returns either an error, or the address at which the requested mapping has
* been performed.
*/
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
if (unlikely(offset + PAGE_ALIGN(len) < offset))
return -EINVAL;
if (unlikely(offset_in_page(offset)))
return -EINVAL;
return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(vm_mmap);
/**
* __vmalloc_array - allocate memory for a virtually contiguous array.
* @n: number of elements.
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
return __vmalloc_noprof(bytes, flags);
}
EXPORT_SYMBOL(__vmalloc_array_noprof);
/**
* vmalloc_array - allocate memory for a virtually contiguous array.
* @n: number of elements.
* @size: element size.
*/
void *vmalloc_array_noprof(size_t n, size_t size)
{
return __vmalloc_array_noprof(n, size, GFP_KERNEL);
}
EXPORT_SYMBOL(vmalloc_array_noprof);
/**
* __vcalloc - allocate and zero memory for a virtually contiguous array.
* @n: number of elements.
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
{
return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(__vcalloc_noprof);
/**
* vcalloc - allocate and zero memory for a virtually contiguous array.
* @n: number of elements.
* @size: element size.
*/
void *vcalloc_noprof(size_t n, size_t size)
{
return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL(vcalloc_noprof);
struct anon_vma *folio_anon_vma(const struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;
if ((mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
return NULL;
return (void *)(mapping - FOLIO_MAPPING_ANON);
}
/**
* folio_mapping - Find the mapping where this folio is stored.
* @folio: The folio.
*
* For folios which are in the page cache, return the mapping that this
* page belongs to. Folios in the swap cache return the swap mapping
* this page is stored in (which is different from the mapping for the
* swap file or swap device where the data is stored).
*
* You can call this for folios which aren't in the swap cache or page
* cache and it will return NULL.
*/
struct address_space *folio_mapping(const struct folio *folio)
{
struct address_space *mapping;
/* This happens if someone calls flush_dcache_page on slab page */
if (unlikely(folio_test_slab(folio)))
return NULL;
if (unlikely(folio_test_swapcache(folio)))
return swap_address_space(folio->swap);
mapping = folio->mapping;
if ((unsigned long)mapping & FOLIO_MAPPING_FLAGS)
return NULL;
return mapping;
}
EXPORT_SYMBOL(folio_mapping);
/**
* folio_copy - Copy the contents of one folio to another.
* @dst: Folio to copy to.
* @src: Folio to copy from.
*
* The bytes in the folio represented by @src are copied to @dst.
* Assumes the caller has validated that @dst is at least as large as @src.
* Can be called in atomic context for order-0 folios, but if the folio is
* larger, it may sleep.
*/
void folio_copy(struct folio *dst, struct folio *src)
{
long i = 0;
long nr = folio_nr_pages(src);
for (;;) {
copy_highpage(folio_page(dst, i), folio_page(src, i));
if (++i == nr)
break;
cond_resched();
}
}
EXPORT_SYMBOL(folio_copy);
int folio_mc_copy(struct folio *dst, struct folio *src)
{
long nr = folio_nr_pages(src);
long i = 0;
for (;;) {
if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i)))
return -EHWPOISON;
if (++i == nr)
break;
cond_resched();
}
return 0;
}
EXPORT_SYMBOL(folio_mc_copy);
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
static int sysctl_overcommit_ratio __read_mostly = 50;
static unsigned long sysctl_overcommit_kbytes __read_mostly;
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
#ifdef CONFIG_SYSCTL
static int overcommit_ratio_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
sysctl_overcommit_kbytes = 0;
return ret;
}
static void sync_overcommit_as(struct work_struct *dummy)
{
percpu_counter_sync(&vm_committed_as);
}
static int overcommit_policy_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int new_policy = -1;
int ret;
/*
* The deviation of sync_overcommit_as could be big with loose policy
* like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
* strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
* with the strict "NEVER", and to avoid possible race condition (even
* though user usually won't too frequently do the switching to policy
* OVERCOMMIT_NEVER), the switch is done in the following order:
* 1. changing the batch
* 2. sync percpu count on each CPU
* 3. switch the policy
*/
if (write) {
t = *table;
t.data = &new_policy;
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (ret || new_policy == -1)
return ret;
mm_compute_batch(new_policy);
if (new_policy == OVERCOMMIT_NEVER)
schedule_on_each_cpu(sync_overcommit_as);
sysctl_overcommit_memory = new_policy;
} else {
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
return ret;
}
static int overcommit_kbytes_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
sysctl_overcommit_ratio = 0;
return ret;
}
static const struct ctl_table util_sysctl_table[] = {
{
.procname = "overcommit_memory",
.data = &sysctl_overcommit_memory,
.maxlen = sizeof(sysctl_overcommit_memory),
.mode = 0644,
.proc_handler = overcommit_policy_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
.maxlen = sizeof(sysctl_overcommit_ratio),
.mode = 0644,
.proc_handler = overcommit_ratio_handler,
},
{
.procname = "overcommit_kbytes",
.data = &sysctl_overcommit_kbytes,
.maxlen = sizeof(sysctl_overcommit_kbytes),
.mode = 0644,
.proc_handler = overcommit_kbytes_handler,
},
{
.procname = "user_reserve_kbytes",
.data = &sysctl_user_reserve_kbytes,
.maxlen = sizeof(sysctl_user_reserve_kbytes),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "admin_reserve_kbytes",
.data = &sysctl_admin_reserve_kbytes,
.maxlen = sizeof(sysctl_admin_reserve_kbytes),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
};
static int __init init_vm_util_sysctls(void)
{
register_sysctl_init("vm", util_sysctl_table);
return 0;
}
subsys_initcall(init_vm_util_sysctls);
#endif /* CONFIG_SYSCTL */
/*
* Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
*/
unsigned long vm_commit_limit(void)
{
unsigned long allowed;
if (sysctl_overcommit_kbytes)
allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
else
allowed = ((totalram_pages() - hugetlb_total_pages())
* sysctl_overcommit_ratio / 100);
allowed += total_swap_pages;
return allowed;
}
/*
* Make sure vm_committed_as in one cacheline and not cacheline shared with
* other variables. It can be updated by several CPUs frequently.
*/
struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
/*
* The global memory commitment made in the system can be a metric
* that can be used to drive ballooning decisions when Linux is hosted
* as a guest. On Hyper-V, the host implements a policy engine for dynamically
* balancing memory across competing virtual machines that are hosted.
* Several metrics drive this policy engine including the guest reported
* memory commitment.
*
* The time cost of this is very low for small platforms, and for big
* platform like a 2S/36C/72T Skylake server, in worst case where
* vm_committed_as's spinlock is under severe contention, the time cost
* could be about 30~40 microseconds.
*/
unsigned long vm_memory_committed(void)
{
return percpu_counter_sum_positive(&vm_committed_as);
}
EXPORT_SYMBOL_GPL(vm_memory_committed);
/*
* Check that a process has enough memory to allocate a new virtual
* mapping. 0 means there is enough memory for the allocation to
* succeed and -ENOMEM implies there is not.
*
* We currently support three overcommit policies, which are set via the
* vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
*
* Strict overcommit modes added 2002 Feb 26 by Alan Cox.
* Additional code 2002 Jul 20 by Robert Love.
*
* cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
*
* Note this is a helper function intended to be used by LSMs which
* wish to use this logic.
*/
int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin)
{
long allowed;
unsigned long bytes_failed;
vm_acct_memory(pages);
/*
* Sometimes we want to use more memory than we have
*/
if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
if (pages > totalram_pages() + total_swap_pages)
goto error;
return 0;
}
allowed = vm_commit_limit();
/*
* Reserve some for root
*/
if (!cap_sys_admin) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
/*
* Don't let a single process grow so big a user can't recover
*/
if (mm) {
long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
allowed -= min_t(long, mm->total_vm / 32, reserve);
}
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
return 0;
error:
bytes_failed = pages << PAGE_SHIFT;
pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
__func__, current->pid, current->comm, bytes_failed);
vm_unacct_memory(pages); return -ENOMEM;}
/**
* get_cmdline() - copy the cmdline value to a buffer.
* @task: the task whose cmdline value to copy.
* @buffer: the buffer to copy to.
* @buflen: the length of the buffer. Larger cmdline values are truncated
* to this length.
*
* Return: the size of the cmdline field copied. Note that the copy does
* not guarantee an ending NULL byte.
*/
int get_cmdline(struct task_struct *task, char *buffer, int buflen)
{
int res = 0;
unsigned int len;
struct mm_struct *mm = get_task_mm(task);
unsigned long arg_start, arg_end, env_start, env_end;
if (!mm)
goto out;
if (!mm->arg_end)
goto out_mm; /* Shh! No looking before we're done */
spin_lock(&mm->arg_lock);
arg_start = mm->arg_start;
arg_end = mm->arg_end;
env_start = mm->env_start;
env_end = mm->env_end;
spin_unlock(&mm->arg_lock);
len = arg_end - arg_start;
if (len > buflen)
len = buflen;
res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
/*
* If the nul at the end of args has been overwritten, then
* assume application is using setproctitle(3).
*/
if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
len = strnlen(buffer, res);
if (len < res) {
res = len;
} else {
len = env_end - env_start;
if (len > buflen - res)
len = buflen - res;
res += access_process_vm(task, env_start,
buffer+res, len,
FOLL_FORCE);
res = strnlen(buffer, res);
}
}
out_mm:
mmput(mm);
out:
return res;
}
int __weak memcmp_pages(struct page *page1, struct page *page2)
{
char *addr1, *addr2;
int ret;
addr1 = kmap_local_page(page1);
addr2 = kmap_local_page(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
kunmap_local(addr2);
kunmap_local(addr1);
return ret;
}
#ifdef CONFIG_PRINTK
/**
* mem_dump_obj - Print available provenance information
* @object: object for which to find provenance information.
*
* This function uses pr_cont(), so that the caller is expected to have
* printed out whatever preamble is appropriate. The provenance information
* depends on the type of object and on how much debugging is enabled.
* For example, for a slab-cache object, the slab name is printed, and,
* if available, the return address and stack trace from the allocation
* and last free path of that object.
*/
void mem_dump_obj(void *object)
{
const char *type;
if (kmem_dump_obj(object))
return;
if (vmalloc_dump_obj(object))
return;
if (is_vmalloc_addr(object))
type = "vmalloc memory";
else if (virt_addr_valid(object))
type = "non-slab/vmalloc memory";
else if (object == NULL)
type = "NULL pointer";
else if (object == ZERO_SIZE_PTR)
type = "zero-size pointer";
else
type = "non-paged memory";
pr_cont(" %s\n", type);
}
EXPORT_SYMBOL_GPL(mem_dump_obj);
#endif
/*
* A driver might set a page logically offline -- PageOffline() -- and
* turn the page inaccessible in the hypervisor; after that, access to page
* content can be fatal.
*
* Some special PFN walkers -- i.e., /proc/kcore -- read content of random
* pages after checking PageOffline(); however, these PFN walkers can race
* with drivers that set PageOffline().
*
* page_offline_freeze()/page_offline_thaw() allows for a subsystem to
* synchronize with such drivers, achieving that a page cannot be set
* PageOffline() while frozen.
*
* page_offline_begin()/page_offline_end() is used by drivers that care about
* such races when setting a page PageOffline().
*/
static DECLARE_RWSEM(page_offline_rwsem);
void page_offline_freeze(void)
{
down_read(&page_offline_rwsem);
}
void page_offline_thaw(void)
{
up_read(&page_offline_rwsem);
}
void page_offline_begin(void)
{
down_write(&page_offline_rwsem);
}
EXPORT_SYMBOL(page_offline_begin);
void page_offline_end(void)
{
up_write(&page_offline_rwsem);
}
EXPORT_SYMBOL(page_offline_end);
#ifndef flush_dcache_folio
void flush_dcache_folio(struct folio *folio)
{
long i, nr = folio_nr_pages(folio);
for (i = 0; i < nr; i++)
flush_dcache_page(folio_page(folio, i));
}
EXPORT_SYMBOL(flush_dcache_folio);
#endif
/**
* __compat_vma_mmap_prepare() - See description for compat_vma_mmap_prepare()
* for details. This is the same operation, only with a specific file operations
* struct which may or may not be the same as vma->vm_file->f_op.
* @f_op: The file operations whose .mmap_prepare() hook is specified.
* @file: The file which backs or will back the mapping.
* @vma: The VMA to apply the .mmap_prepare() hook to.
* Returns: 0 on success or error.
*/
int __compat_vma_mmap_prepare(const struct file_operations *f_op,
struct file *file, struct vm_area_struct *vma)
{
struct vm_area_desc desc = {
.mm = vma->vm_mm,
.file = file,
.start = vma->vm_start,
.end = vma->vm_end,
.pgoff = vma->vm_pgoff,
.vm_file = vma->vm_file,
.vm_flags = vma->vm_flags,
.page_prot = vma->vm_page_prot,
};
int err;
err = f_op->mmap_prepare(&desc);
if (err)
return err;
set_vma_from_desc(vma, &desc);
return 0;
}
EXPORT_SYMBOL(__compat_vma_mmap_prepare);
/**
* compat_vma_mmap_prepare() - Apply the file's .mmap_prepare() hook to an
* existing VMA.
* @file: The file which possesss an f_op->mmap_prepare() hook.
* @vma: The VMA to apply the .mmap_prepare() hook to.
*
* Ordinarily, .mmap_prepare() is invoked directly upon mmap(). However, certain
* stacked filesystems invoke a nested mmap hook of an underlying file.
*
* Until all filesystems are converted to use .mmap_prepare(), we must be
* conservative and continue to invoke these stacked filesystems using the
* deprecated .mmap() hook.
*
* However we have a problem if the underlying file system possesses an
* .mmap_prepare() hook, as we are in a different context when we invoke the
* .mmap() hook, already having a VMA to deal with.
*
* compat_vma_mmap_prepare() is a compatibility function that takes VMA state,
* establishes a struct vm_area_desc descriptor, passes to the underlying
* .mmap_prepare() hook and applies any changes performed by it.
*
* Once the conversion of filesystems is complete this function will no longer
* be required and will be removed.
*
* Returns: 0 on success or error.
*/
int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma)
{
return __compat_vma_mmap_prepare(file->f_op, file, vma);
}
EXPORT_SYMBOL(compat_vma_mmap_prepare);
static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio,
const struct page *page)
{
/*
* Only the first page of a high-order buddy page has PageBuddy() set.
* So we have to check manually whether this page is part of a high-
* order buddy page.
*/
if (PageBuddy(page))
ps->flags |= PAGE_SNAPSHOT_PG_BUDDY;
else if (page_count(page) == 0 && is_free_buddy_page(page))
ps->flags |= PAGE_SNAPSHOT_PG_BUDDY;
if (folio_test_idle(folio))
ps->flags |= PAGE_SNAPSHOT_PG_IDLE;
}
/**
* snapshot_page() - Create a snapshot of a struct page
* @ps: Pointer to a struct page_snapshot to store the page snapshot
* @page: The page to snapshot
*
* Create a snapshot of the page and store both its struct page and struct
* folio representations in @ps.
*
* A snapshot is marked as "faithful" if the compound state of @page was
* stable and allowed safe reconstruction of the folio representation. In
* rare cases where this is not possible (e.g. due to folio splitting),
* snapshot_page() falls back to treating @page as a single page and the
* snapshot is marked as "unfaithful". The snapshot_page_is_faithful()
* helper can be used to check for this condition.
*/
void snapshot_page(struct page_snapshot *ps, const struct page *page)
{
unsigned long head, nr_pages = 1;
struct folio *foliop;
int loops = 5;
ps->pfn = page_to_pfn(page);
ps->flags = PAGE_SNAPSHOT_FAITHFUL;
again:
memset(&ps->folio_snapshot, 0, sizeof(struct folio));
memcpy(&ps->page_snapshot, page, sizeof(*page));
head = ps->page_snapshot.compound_head;
if ((head & 1) == 0) {
ps->idx = 0;
foliop = (struct folio *)&ps->page_snapshot;
if (!folio_test_large(foliop)) {
set_ps_flags(ps, page_folio(page), page);
memcpy(&ps->folio_snapshot, foliop,
sizeof(struct page));
return;
}
foliop = (struct folio *)page;
} else {
foliop = (struct folio *)(head - 1);
ps->idx = folio_page_idx(foliop, page);
}
if (ps->idx < MAX_FOLIO_NR_PAGES) {
memcpy(&ps->folio_snapshot, foliop, 2 * sizeof(struct page));
nr_pages = folio_nr_pages(&ps->folio_snapshot);
if (nr_pages > 1)
memcpy(&ps->folio_snapshot.__page_2, &foliop->__page_2,
sizeof(struct page));
set_ps_flags(ps, foliop, page);
}
if (ps->idx > nr_pages) {
if (loops-- > 0)
goto again;
clear_compound_head(&ps->page_snapshot);
foliop = (struct folio *)&ps->page_snapshot;
memcpy(&ps->folio_snapshot, foliop, sizeof(struct page));
ps->flags = 0;
ps->idx = 0;
}
}
#ifdef CONFIG_MMU
/**
* folio_pte_batch - detect a PTE batch for a large folio
* @folio: The large folio to detect a PTE batch for.
* @ptep: Page table pointer for the first entry.
* @pte: Page table entry for the first page.
* @max_nr: The maximum number of table entries to consider.
*
* This is a simplified variant of folio_pte_batch_flags().
*
* Detect a PTE batch: consecutive (present) PTEs that map consecutive
* pages of the same large folio in a single VMA and a single page table.
*
* All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
* the accessed bit, writable bit, dirt-bit and soft-dirty bit.
*
* ptep must map any page of the folio. max_nr must be at least one and
* must be limited by the caller so scanning cannot exceed a single VMA and
* a single page table.
*
* Return: the number of table entries in the batch.
*/
unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
unsigned int max_nr)
{
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0);
}
#endif /* CONFIG_MMU */
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
/**
* page_range_contiguous - test whether the page range is contiguous
* @page: the start of the page range.
* @nr_pages: the number of pages in the range.
*
* Test whether the page range is contiguous, such that they can be iterated
* naively, corresponding to iterating a contiguous PFN range.
*
* This function should primarily only be used for debug checks, or when
* working with page ranges that are not naturally contiguous (e.g., pages
* within a folio are).
*
* Returns true if contiguous, otherwise false.
*/
bool page_range_contiguous(const struct page *page, unsigned long nr_pages)
{
const unsigned long start_pfn = page_to_pfn(page);
const unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn;
/*
* The memmap is allocated per memory section, so no need to check
* within the first section. However, we need to check each other
* spanned memory section once, making sure the first page in a
* section could similarly be reached by just iterating pages.
*/
for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION);
pfn < end_pfn; pfn += PAGES_PER_SECTION)
if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn)))
return false;
return true;
}
EXPORT_SYMBOL(page_range_contiguous);
#endif
// SPDX-License-Identifier: GPL-2.0
#include <kunit/visibility.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include "printk_ringbuffer.h"
#include "internal.h"
/**
* DOC: printk_ringbuffer overview
*
* Data Structure
* --------------
* The printk_ringbuffer is made up of 3 internal ringbuffers:
*
* desc_ring
* A ring of descriptors and their meta data (such as sequence number,
* timestamp, loglevel, etc.) as well as internal state information about
* the record and logical positions specifying where in the other
* ringbuffer the text strings are located.
*
* text_data_ring
* A ring of data blocks. A data block consists of an unsigned long
* integer (ID) that maps to a desc_ring index followed by the text
* string of the record.
*
* The internal state information of a descriptor is the key element to allow
* readers and writers to locklessly synchronize access to the data.
*
* Implementation
* --------------
*
* Descriptor Ring
* ~~~~~~~~~~~~~~~
* The descriptor ring is an array of descriptors. A descriptor contains
* essential meta data to track the data of a printk record using
* blk_lpos structs pointing to associated text data blocks (see
* "Data Rings" below). Each descriptor is assigned an ID that maps
* directly to index values of the descriptor array and has a state. The ID
* and the state are bitwise combined into a single descriptor field named
* @state_var, allowing ID and state to be synchronously and atomically
* updated.
*
* Descriptors have four states:
*
* reserved
* A writer is modifying the record.
*
* committed
* The record and all its data are written. A writer can reopen the
* descriptor (transitioning it back to reserved), but in the committed
* state the data is consistent.
*
* finalized
* The record and all its data are complete and available for reading. A
* writer cannot reopen the descriptor.
*
* reusable
* The record exists, but its text and/or meta data may no longer be
* available.
*
* Querying the @state_var of a record requires providing the ID of the
* descriptor to query. This can yield a possible fifth (pseudo) state:
*
* miss
* The descriptor being queried has an unexpected ID.
*
* The descriptor ring has a @tail_id that contains the ID of the oldest
* descriptor and @head_id that contains the ID of the newest descriptor.
*
* When a new descriptor should be created (and the ring is full), the tail
* descriptor is invalidated by first transitioning to the reusable state and
* then invalidating all tail data blocks up to and including the data blocks
* associated with the tail descriptor (for the text ring). Then
* @tail_id is advanced, followed by advancing @head_id. And finally the
* @state_var of the new descriptor is initialized to the new ID and reserved
* state.
*
* The @tail_id can only be advanced if the new @tail_id would be in the
* committed or reusable queried state. This makes it possible that a valid
* sequence number of the tail is always available.
*
* Descriptor Finalization
* ~~~~~~~~~~~~~~~~~~~~~~~
* When a writer calls the commit function prb_commit(), record data is
* fully stored and is consistent within the ringbuffer. However, a writer can
* reopen that record, claiming exclusive access (as with prb_reserve()), and
* modify that record. When finished, the writer must again commit the record.
*
* In order for a record to be made available to readers (and also become
* recyclable for writers), it must be finalized. A finalized record cannot be
* reopened and can never become "unfinalized". Record finalization can occur
* in three different scenarios:
*
* 1) A writer can simultaneously commit and finalize its record by calling
* prb_final_commit() instead of prb_commit().
*
* 2) When a new record is reserved and the previous record has been
* committed via prb_commit(), that previous record is automatically
* finalized.
*
* 3) When a record is committed via prb_commit() and a newer record
* already exists, the record being committed is automatically finalized.
*
* Data Ring
* ~~~~~~~~~
* The text data ring is a byte array composed of data blocks. Data blocks are
* referenced by blk_lpos structs that point to the logical position of the
* beginning of a data block and the beginning of the next adjacent data
* block. Logical positions are mapped directly to index values of the byte
* array ringbuffer.
*
* Each data block consists of an ID followed by the writer data. The ID is
* the identifier of a descriptor that is associated with the data block. A
* given data block is considered valid if all of the following conditions
* are met:
*
* 1) The descriptor associated with the data block is in the committed
* or finalized queried state.
*
* 2) The blk_lpos struct within the descriptor associated with the data
* block references back to the same data block.
*
* 3) The data block is within the head/tail logical position range.
*
* If the writer data of a data block would extend beyond the end of the
* byte array, only the ID of the data block is stored at the logical
* position and the full data block (ID and writer data) is stored at the
* beginning of the byte array. The referencing blk_lpos will point to the
* ID before the wrap and the next data block will be at the logical
* position adjacent the full data block after the wrap.
*
* Data rings have a @tail_lpos that points to the beginning of the oldest
* data block and a @head_lpos that points to the logical position of the
* next (not yet existing) data block.
*
* When a new data block should be created (and the ring is full), tail data
* blocks will first be invalidated by putting their associated descriptors
* into the reusable state and then pushing the @tail_lpos forward beyond
* them. Then the @head_lpos is pushed forward and is associated with a new
* descriptor. If a data block is not valid, the @tail_lpos cannot be
* advanced beyond it.
*
* Info Array
* ~~~~~~~~~~
* The general meta data of printk records are stored in printk_info structs,
* stored in an array with the same number of elements as the descriptor ring.
* Each info corresponds to the descriptor of the same index in the
* descriptor ring. Info validity is confirmed by evaluating the corresponding
* descriptor before and after loading the info.
*
* Usage
* -----
* Here are some simple examples demonstrating writers and readers. For the
* examples a global ringbuffer (test_rb) is available (which is not the
* actual ringbuffer used by printk)::
*
* DEFINE_PRINTKRB(test_rb, 15, 5);
*
* This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
* 1 MiB (2 ^ (15 + 5)) for text data.
*
* Sample writer code::
*
* const char *textstr = "message text";
* struct prb_reserved_entry e;
* struct printk_record r;
*
* // specify how much to allocate
* prb_rec_init_wr(&r, strlen(textstr) + 1);
*
* if (prb_reserve(&e, &test_rb, &r)) {
* snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
*
* r.info->text_len = strlen(textstr);
* r.info->ts_nsec = local_clock();
* r.info->caller_id = printk_caller_id();
*
* // commit and finalize the record
* prb_final_commit(&e);
* }
*
* Note that additional writer functions are available to extend a record
* after it has been committed but not yet finalized. This can be done as
* long as no new records have been reserved and the caller is the same.
*
* Sample writer code (record extending)::
*
* // alternate rest of previous example
*
* r.info->text_len = strlen(textstr);
* r.info->ts_nsec = local_clock();
* r.info->caller_id = printk_caller_id();
*
* // commit the record (but do not finalize yet)
* prb_commit(&e);
* }
*
* ...
*
* // specify additional 5 bytes text space to extend
* prb_rec_init_wr(&r, 5);
*
* // try to extend, but only if it does not exceed 32 bytes
* if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id(), 32)) {
* snprintf(&r.text_buf[r.info->text_len],
* r.text_buf_size - r.info->text_len, "hello");
*
* r.info->text_len += 5;
*
* // commit and finalize the record
* prb_final_commit(&e);
* }
*
* Sample reader code::
*
* struct printk_info info;
* struct printk_record r;
* char text_buf[32];
* u64 seq;
*
* prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf));
*
* prb_for_each_record(0, &test_rb, &seq, &r) {
* if (info.seq != seq)
* pr_warn("lost %llu records\n", info.seq - seq);
*
* if (info.text_len > r.text_buf_size) {
* pr_warn("record %llu text truncated\n", info.seq);
* text_buf[r.text_buf_size - 1] = 0;
* }
*
* pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec,
* &text_buf[0]);
* }
*
* Note that additional less convenient reader functions are available to
* allow complex record access.
*
* ABA Issues
* ~~~~~~~~~~
* To help avoid ABA issues, descriptors are referenced by IDs (array index
* values combined with tagged bits counting array wraps) and data blocks are
* referenced by logical positions (array index values combined with tagged
* bits counting array wraps). However, on 32-bit systems the number of
* tagged bits is relatively small such that an ABA incident is (at least
* theoretically) possible. For example, if 4 million maximally sized (1KiB)
* printk messages were to occur in NMI context on a 32-bit system, the
* interrupted context would not be able to recognize that the 32-bit integer
* completely wrapped and thus represents a different data block than the one
* the interrupted context expects.
*
* To help combat this possibility, additional state checking is performed
* (such as using cmpxchg() even though set() would suffice). These extra
* checks are commented as such and will hopefully catch any ABA issue that
* a 32-bit system might experience.
*
* Memory Barriers
* ~~~~~~~~~~~~~~~
* Multiple memory barriers are used. To simplify proving correctness and
* generating litmus tests, lines of code related to memory barriers
* (loads, stores, and the associated memory barriers) are labeled::
*
* LMM(function:letter)
*
* Comments reference the labels using only the "function:letter" part.
*
* The memory barrier pairs and their ordering are:
*
* desc_reserve:D / desc_reserve:B
* push descriptor tail (id), then push descriptor head (id)
*
* desc_reserve:D / data_push_tail:B
* push data tail (lpos), then set new descriptor reserved (state)
*
* desc_reserve:D / desc_push_tail:C
* push descriptor tail (id), then set new descriptor reserved (state)
*
* desc_reserve:D / prb_first_seq:C
* push descriptor tail (id), then set new descriptor reserved (state)
*
* desc_reserve:F / desc_read:D
* set new descriptor id and reserved (state), then allow writer changes
*
* data_alloc:A (or data_realloc:A) / desc_read:D
* set old descriptor reusable (state), then modify new data block area
*
* data_alloc:A (or data_realloc:A) / data_push_tail:B
* push data tail (lpos), then modify new data block area
*
* _prb_commit:B / desc_read:B
* store writer changes, then set new descriptor committed (state)
*
* desc_reopen_last:A / _prb_commit:B
* set descriptor reserved (state), then read descriptor data
*
* _prb_commit:B / desc_reserve:D
* set new descriptor committed (state), then check descriptor head (id)
*
* data_push_tail:D / data_push_tail:A
* set descriptor reusable (state), then push data tail (lpos)
*
* desc_push_tail:B / desc_reserve:D
* set descriptor reusable (state), then push descriptor tail (id)
*
* desc_update_last_finalized:A / desc_last_finalized_seq:A
* store finalized record, then set new highest finalized sequence number
*/
#define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
#define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1)
#define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits)
#define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1)
/* Determine the data array index from a logical position. */
#define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring))
/* Determine the desc array index from an ID or sequence number. */
#define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring))
/* Determine how many times the data array has wrapped. */
#define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits)
/* Determine if a logical position refers to a data-less block. */
#define LPOS_DATALESS(lpos) ((lpos) & 1UL)
#define BLK_DATALESS(blk) (LPOS_DATALESS((blk)->begin) && \
LPOS_DATALESS((blk)->next))
/* Get the logical position at index 0 of the current wrap. */
#define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
((lpos) & ~DATA_SIZE_MASK(data_ring))
/* Get the ID for the same index of the previous wrap as the given ID. */
#define DESC_ID_PREV_WRAP(desc_ring, id) \
DESC_ID((id) - DESCS_COUNT(desc_ring))
/*
* A data block: mapped directly to the beginning of the data block area
* specified as a logical position within the data ring.
*
* @id: the ID of the associated descriptor
* @data: the writer data
*
* Note that the size of a data block is only known by its associated
* descriptor.
*/
struct prb_data_block {
unsigned long id;
char data[];
};
/*
* Return the descriptor associated with @n. @n can be either a
* descriptor ID or a sequence number.
*/
static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n)
{
return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
}
/*
* Return the printk_info associated with @n. @n can be either a
* descriptor ID or a sequence number.
*/
static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n)
{
return &desc_ring->infos[DESC_INDEX(desc_ring, n)];
}
static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
unsigned long begin_lpos)
{
return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)];
}
/*
* Increase the data size to account for data block meta data plus any
* padding so that the adjacent data block is aligned on the ID size.
*/
static unsigned int to_blk_size(unsigned int size)
{
struct prb_data_block *db = NULL;
size += sizeof(*db);
size = ALIGN(size, sizeof(db->id));
return size;
}
/*
* Sanity checker for reserve size. The ringbuffer code assumes that a data
* block does not exceed the maximum possible size that could fit within the
* ringbuffer. This function provides that basic size check so that the
* assumption is safe. In particular, it guarantees that data_push_tail() will
* never attempt to push the tail beyond the head.
*/
static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
{
/* Data-less blocks take no space. */
if (size == 0)
return true;
/*
* If data blocks were allowed to be larger than half the data ring
* size, a wrapping data block could require more space than the full
* ringbuffer.
*/
return to_blk_size(size) <= DATA_SIZE(data_ring) / 2;
}
/* Query the state of a descriptor. */
static enum desc_state get_desc_state(unsigned long id,
unsigned long state_val)
{
if (id != DESC_ID(state_val))
return desc_miss;
return DESC_STATE(state_val);
}
/*
* Get a copy of a specified descriptor and return its queried state. If the
* descriptor is in an inconsistent state (miss or reserved), the caller can
* only expect the descriptor's @state_var field to be valid.
*
* The sequence number and caller_id can be optionally retrieved. Like all
* non-state_var data, they are only valid if the descriptor is in a
* consistent state.
*/
static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
unsigned long id, struct prb_desc *desc_out,
u64 *seq_out, u32 *caller_id_out)
{ struct printk_info *info = to_info(desc_ring, id);
struct prb_desc *desc = to_desc(desc_ring, id);
atomic_long_t *state_var = &desc->state_var;
enum desc_state d_state;
unsigned long state_val;
/* Check the descriptor state. */
state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
d_state = get_desc_state(id, state_val); if (d_state == desc_miss || d_state == desc_reserved) {
/*
* The descriptor is in an inconsistent state. Set at least
* @state_var so that the caller can see the details of
* the inconsistent state.
*/
goto out;
}
/*
* Guarantee the state is loaded before copying the descriptor
* content. This avoids copying obsolete descriptor content that might
* not apply to the descriptor state. This pairs with _prb_commit:B.
*
* Memory barrier involvement:
*
* If desc_read:A reads from _prb_commit:B, then desc_read:C reads
* from _prb_commit:A.
*
* Relies on:
*
* WMB from _prb_commit:A to _prb_commit:B
* matching
* RMB from desc_read:A to desc_read:C
*/
smp_rmb(); /* LMM(desc_read:B) */
/*
* Copy the descriptor data. The data is not valid until the
* state has been re-checked. A memcpy() for all of @desc
* cannot be used because of the atomic_t @state_var field.
*/
if (desc_out) {
memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
}
if (seq_out) *seq_out = info->seq; /* also part of desc_read:C */ if (caller_id_out) *caller_id_out = info->caller_id; /* also part of desc_read:C */
/*
* 1. Guarantee the descriptor content is loaded before re-checking
* the state. This avoids reading an obsolete descriptor state
* that may not apply to the copied content. This pairs with
* desc_reserve:F.
*
* Memory barrier involvement:
*
* If desc_read:C reads from desc_reserve:G, then desc_read:E
* reads from desc_reserve:F.
*
* Relies on:
*
* WMB from desc_reserve:F to desc_reserve:G
* matching
* RMB from desc_read:C to desc_read:E
*
* 2. Guarantee the record data is loaded before re-checking the
* state. This avoids reading an obsolete descriptor state that may
* not apply to the copied data. This pairs with data_alloc:A and
* data_realloc:A.
*
* Memory barrier involvement:
*
* If copy_data:A reads from data_alloc:B, then desc_read:E
* reads from desc_make_reusable:A.
*
* Relies on:
*
* MB from desc_make_reusable:A to data_alloc:B
* matching
* RMB from desc_read:C to desc_read:E
*
* Note: desc_make_reusable:A and data_alloc:B can be different
* CPUs. However, the data_alloc:B CPU (which performs the
* full memory barrier) must have previously seen
* desc_make_reusable:A.
*/
smp_rmb(); /* LMM(desc_read:D) */
/*
* The data has been copied. Return the current descriptor state,
* which may have changed since the load above.
*/
state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
d_state = get_desc_state(id, state_val);
out:
if (desc_out)
atomic_long_set(&desc_out->state_var, state_val);
return d_state;
}
/*
* Take a specified descriptor out of the finalized state by attempting
* the transition from finalized to reusable. Either this context or some
* other context will have been successful.
*/
static void desc_make_reusable(struct prb_desc_ring *desc_ring,
unsigned long id)
{
unsigned long val_finalized = DESC_SV(id, desc_finalized);
unsigned long val_reusable = DESC_SV(id, desc_reusable);
struct prb_desc *desc = to_desc(desc_ring, id);
atomic_long_t *state_var = &desc->state_var;
atomic_long_cmpxchg_relaxed(state_var, val_finalized,
val_reusable); /* LMM(desc_make_reusable:A) */
}
/*
* Given the text data ring, put the associated descriptor of each
* data block from @lpos_begin until @lpos_end into the reusable state.
*
* If there is any problem making the associated descriptor reusable, either
* the descriptor has not yet been finalized or another writer context has
* already pushed the tail lpos past the problematic data block. Regardless,
* on error the caller can re-load the tail lpos to determine the situation.
*/
static bool data_make_reusable(struct printk_ringbuffer *rb,
unsigned long lpos_begin,
unsigned long lpos_end,
unsigned long *lpos_out)
{
struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_desc_ring *desc_ring = &rb->desc_ring;
struct prb_data_block *blk;
enum desc_state d_state;
struct prb_desc desc;
struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos;
unsigned long id;
/* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { blk = to_block(data_ring, lpos_begin);
/*
* Load the block ID from the data block. This is a data race
* against a writer that may have newly reserved this data
* area. If the loaded value matches a valid descriptor ID,
* the blk_lpos of that descriptor will be checked to make
* sure it points back to this data block. If the check fails,
* the data area has been recycled by another writer.
*/
id = blk->id; /* LMM(data_make_reusable:A) */
d_state = desc_read(desc_ring, id, &desc,
NULL, NULL); /* LMM(data_make_reusable:B) */
switch (d_state) {
case desc_miss:
case desc_reserved:
case desc_committed:
return false;
case desc_finalized:
/*
* This data block is invalid if the descriptor
* does not point back to it.
*/
if (blk_lpos->begin != lpos_begin)
return false;
desc_make_reusable(desc_ring, id);
break;
case desc_reusable:
/*
* This data block is invalid if the descriptor
* does not point back to it.
*/
if (blk_lpos->begin != lpos_begin)
return false;
break;
}
/* Advance @lpos_begin to the next data block. */
lpos_begin = blk_lpos->next;
}
*lpos_out = lpos_begin;
return true;
}
/*
* Advance the data ring tail to at least @lpos. This function puts
* descriptors into the reusable state if the tail is pushed beyond
* their associated data block.
*/
static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
{
struct prb_data_ring *data_ring = &rb->text_data_ring;
unsigned long tail_lpos_new;
unsigned long tail_lpos;
unsigned long next_lpos;
/* If @lpos is from a data-less block, there is nothing to do. */
if (LPOS_DATALESS(lpos)) return true;
/*
* Any descriptor states that have transitioned to reusable due to the
* data tail being pushed to this loaded value will be visible to this
* CPU. This pairs with data_push_tail:D.
*
* Memory barrier involvement:
*
* If data_push_tail:A reads from data_push_tail:D, then this CPU can
* see desc_make_reusable:A.
*
* Relies on:
*
* MB from desc_make_reusable:A to data_push_tail:D
* matches
* READFROM from data_push_tail:D to data_push_tail:A
* thus
* READFROM from desc_make_reusable:A to this CPU
*/
tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */
/*
* Loop until the tail lpos is at or beyond @lpos. This condition
* may already be satisfied, resulting in no full memory barrier
* from data_push_tail:D being performed. However, since this CPU
* sees the new tail lpos, any descriptor states that transitioned to
* the reusable state must already be visible.
*/
while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
/*
* Make all descriptors reusable that are associated with
* data blocks before @lpos.
*/
if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) {
/*
* 1. Guarantee the block ID loaded in
* data_make_reusable() is performed before
* reloading the tail lpos. The failed
* data_make_reusable() may be due to a newly
* recycled data area causing the tail lpos to
* have been previously pushed. This pairs with
* data_alloc:A and data_realloc:A.
*
* Memory barrier involvement:
*
* If data_make_reusable:A reads from data_alloc:B,
* then data_push_tail:C reads from
* data_push_tail:D.
*
* Relies on:
*
* MB from data_push_tail:D to data_alloc:B
* matching
* RMB from data_make_reusable:A to
* data_push_tail:C
*
* Note: data_push_tail:D and data_alloc:B can be
* different CPUs. However, the data_alloc:B
* CPU (which performs the full memory
* barrier) must have previously seen
* data_push_tail:D.
*
* 2. Guarantee the descriptor state loaded in
* data_make_reusable() is performed before
* reloading the tail lpos. The failed
* data_make_reusable() may be due to a newly
* recycled descriptor causing the tail lpos to
* have been previously pushed. This pairs with
* desc_reserve:D.
*
* Memory barrier involvement:
*
* If data_make_reusable:B reads from
* desc_reserve:F, then data_push_tail:C reads
* from data_push_tail:D.
*
* Relies on:
*
* MB from data_push_tail:D to desc_reserve:F
* matching
* RMB from data_make_reusable:B to
* data_push_tail:C
*
* Note: data_push_tail:D and desc_reserve:F can
* be different CPUs. However, the
* desc_reserve:F CPU (which performs the
* full memory barrier) must have previously
* seen data_push_tail:D.
*/
smp_rmb(); /* LMM(data_push_tail:B) */
tail_lpos_new = atomic_long_read(&data_ring->tail_lpos
); /* LMM(data_push_tail:C) */
if (tail_lpos_new == tail_lpos)
return false;
/* Another CPU pushed the tail. Try again. */
tail_lpos = tail_lpos_new;
continue;
}
/*
* Guarantee any descriptor states that have transitioned to
* reusable are stored before pushing the tail lpos. A full
* memory barrier is needed since other CPUs may have made
* the descriptor states reusable. This pairs with
* data_push_tail:A.
*/
if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos,
next_lpos)) { /* LMM(data_push_tail:D) */
break;
}
}
return true;
}
/*
* Advance the desc ring tail. This function advances the tail by one
* descriptor, thus invalidating the oldest descriptor. Before advancing
* the tail, the tail descriptor is made reusable and all data blocks up to
* and including the descriptor's data block are invalidated (i.e. the data
* ring tail is pushed past the data block of the descriptor being made
* reusable).
*/
static bool desc_push_tail(struct printk_ringbuffer *rb,
unsigned long tail_id)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
enum desc_state d_state;
struct prb_desc desc;
d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL);
switch (d_state) {
case desc_miss:
/*
* If the ID is exactly 1 wrap behind the expected, it is
* in the process of being reserved by another writer and
* must be considered reserved.
*/
if (DESC_ID(atomic_long_read(&desc.state_var)) ==
DESC_ID_PREV_WRAP(desc_ring, tail_id)) {
return false;
}
/*
* The ID has changed. Another writer must have pushed the
* tail and recycled the descriptor already. Success is
* returned because the caller is only interested in the
* specified tail being pushed, which it was.
*/
return true;
case desc_reserved:
case desc_committed:
return false;
case desc_finalized:
desc_make_reusable(desc_ring, tail_id);
break;
case desc_reusable:
break;
}
/*
* Data blocks must be invalidated before their associated
* descriptor can be made available for recycling. Invalidating
* them later is not possible because there is no way to trust
* data blocks once their associated descriptor is gone.
*/
if (!data_push_tail(rb, desc.text_blk_lpos.next))
return false;
/*
* Check the next descriptor after @tail_id before pushing the tail
* to it because the tail must always be in a finalized or reusable
* state. The implementation of prb_first_seq() relies on this.
*
* A successful read implies that the next descriptor is less than or
* equal to @head_id so there is no risk of pushing the tail past the
* head.
*/
d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc,
NULL, NULL); /* LMM(desc_push_tail:A) */
if (d_state == desc_finalized || d_state == desc_reusable) {
/*
* Guarantee any descriptor states that have transitioned to
* reusable are stored before pushing the tail ID. This allows
* verifying the recycled descriptor state. A full memory
* barrier is needed since other CPUs may have made the
* descriptor states reusable. This pairs with desc_reserve:D.
*/
atomic_long_cmpxchg(&desc_ring->tail_id, tail_id,
DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */
} else {
/*
* Guarantee the last state load from desc_read() is before
* reloading @tail_id in order to see a new tail ID in the
* case that the descriptor has been recycled. This pairs
* with desc_reserve:D.
*
* Memory barrier involvement:
*
* If desc_push_tail:A reads from desc_reserve:F, then
* desc_push_tail:D reads from desc_push_tail:B.
*
* Relies on:
*
* MB from desc_push_tail:B to desc_reserve:F
* matching
* RMB from desc_push_tail:A to desc_push_tail:D
*
* Note: desc_push_tail:B and desc_reserve:F can be different
* CPUs. However, the desc_reserve:F CPU (which performs
* the full memory barrier) must have previously seen
* desc_push_tail:B.
*/
smp_rmb(); /* LMM(desc_push_tail:C) */
/*
* Re-check the tail ID. The descriptor following @tail_id is
* not in an allowed tail state. But if the tail has since
* been moved by another CPU, then it does not matter.
*/
if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */
return false;
}
return true;
}
/* Reserve a new descriptor, invalidating the oldest if necessary. */
static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
unsigned long prev_state_val;
unsigned long id_prev_wrap;
struct prb_desc *desc;
unsigned long head_id;
unsigned long id;
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
do {
id = DESC_ID(head_id + 1);
id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
/*
* Guarantee the head ID is read before reading the tail ID.
* Since the tail ID is updated before the head ID, this
* guarantees that @id_prev_wrap is never ahead of the tail
* ID. This pairs with desc_reserve:D.
*
* Memory barrier involvement:
*
* If desc_reserve:A reads from desc_reserve:D, then
* desc_reserve:C reads from desc_push_tail:B.
*
* Relies on:
*
* MB from desc_push_tail:B to desc_reserve:D
* matching
* RMB from desc_reserve:A to desc_reserve:C
*
* Note: desc_push_tail:B and desc_reserve:D can be different
* CPUs. However, the desc_reserve:D CPU (which performs
* the full memory barrier) must have previously seen
* desc_push_tail:B.
*/
smp_rmb(); /* LMM(desc_reserve:B) */
if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id
)) { /* LMM(desc_reserve:C) */
/*
* Make space for the new descriptor by
* advancing the tail.
*/
if (!desc_push_tail(rb, id_prev_wrap))
return false;
}
/*
* 1. Guarantee the tail ID is read before validating the
* recycled descriptor state. A read memory barrier is
* sufficient for this. This pairs with desc_push_tail:B.
*
* Memory barrier involvement:
*
* If desc_reserve:C reads from desc_push_tail:B, then
* desc_reserve:E reads from desc_make_reusable:A.
*
* Relies on:
*
* MB from desc_make_reusable:A to desc_push_tail:B
* matching
* RMB from desc_reserve:C to desc_reserve:E
*
* Note: desc_make_reusable:A and desc_push_tail:B can be
* different CPUs. However, the desc_push_tail:B CPU
* (which performs the full memory barrier) must have
* previously seen desc_make_reusable:A.
*
* 2. Guarantee the tail ID is stored before storing the head
* ID. This pairs with desc_reserve:B.
*
* 3. Guarantee any data ring tail changes are stored before
* recycling the descriptor. Data ring tail changes can
* happen via desc_push_tail()->data_push_tail(). A full
* memory barrier is needed since another CPU may have
* pushed the data ring tails. This pairs with
* data_push_tail:B.
*
* 4. Guarantee a new tail ID is stored before recycling the
* descriptor. A full memory barrier is needed since
* another CPU may have pushed the tail ID. This pairs
* with desc_push_tail:C and this also pairs with
* prb_first_seq:C.
*
* 5. Guarantee the head ID is stored before trying to
* finalize the previous descriptor. This pairs with
* _prb_commit:B.
*/
} while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
id)); /* LMM(desc_reserve:D) */
desc = to_desc(desc_ring, id);
/*
* If the descriptor has been recycled, verify the old state val.
* See "ABA Issues" about why this verification is performed.
*/
prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
if (prev_state_val && get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) { WARN_ON_ONCE(1); return false;
}
/*
* Assign the descriptor a new ID and set its state to reserved.
* See "ABA Issues" about why cmpxchg() instead of set() is used.
*
* Guarantee the new descriptor ID and state is stored before making
* any other changes. A write memory barrier is sufficient for this.
* This pairs with desc_read:D.
*/
if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */
WARN_ON_ONCE(1);
return false;
}
/* Now data in @desc can be modified: LMM(desc_reserve:G) */
*id_out = id;
return true;
}
/* Determine the end of a data block. */
static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
unsigned long lpos, unsigned int size)
{
unsigned long begin_lpos;
unsigned long next_lpos;
begin_lpos = lpos;
next_lpos = lpos + size;
/* First check if the data block does not wrap. */
if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
return next_lpos;
/* Wrapping data blocks store their data at the beginning. */
return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size);
}
/*
* Allocate a new data block, invalidating the oldest data block(s)
* if necessary. This function also associates the data block with
* a specified descriptor.
*/
static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
struct prb_data_blk_lpos *blk_lpos, unsigned long id)
{
struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_data_block *blk;
unsigned long begin_lpos;
unsigned long next_lpos;
if (size == 0) {
/*
* Data blocks are not created for empty lines. Instead, the
* reader will recognize these special lpos values and handle
* it appropriately.
*/
blk_lpos->begin = EMPTY_LINE_LPOS;
blk_lpos->next = EMPTY_LINE_LPOS;
return NULL;
}
size = to_blk_size(size);
begin_lpos = atomic_long_read(&data_ring->head_lpos);
do {
next_lpos = get_next_lpos(data_ring, begin_lpos, size);
/*
* data_check_size() prevents data block allocation that could
* cause illegal ringbuffer states. But double check that the
* used space will not be bigger than the ring buffer. Wrapped
* messages need to reserve more space, see get_next_lpos().
*
* Specify a data-less block when the check or the allocation
* fails.
*/
if (WARN_ON_ONCE(next_lpos - begin_lpos > DATA_SIZE(data_ring)) || !data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) { blk_lpos->begin = FAILED_LPOS;
blk_lpos->next = FAILED_LPOS;
return NULL;
}
/*
* 1. Guarantee any descriptor states that have transitioned
* to reusable are stored before modifying the newly
* allocated data area. A full memory barrier is needed
* since other CPUs may have made the descriptor states
* reusable. See data_push_tail:A about why the reusable
* states are visible. This pairs with desc_read:D.
*
* 2. Guarantee any updated tail lpos is stored before
* modifying the newly allocated data area. Another CPU may
* be in data_make_reusable() and is reading a block ID
* from this area. data_make_reusable() can handle reading
* a garbage block ID value, but then it must be able to
* load a new tail lpos. A full memory barrier is needed
* since other CPUs may have updated the tail lpos. This
* pairs with data_push_tail:B.
*/
} while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos,
next_lpos)); /* LMM(data_alloc:A) */
blk = to_block(data_ring, begin_lpos);
blk->id = id; /* LMM(data_alloc:B) */
if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
/* Wrapping data blocks store their data at the beginning. */
blk = to_block(data_ring, 0);
/*
* Store the ID on the wrapped block for consistency.
* The printk_ringbuffer does not actually use it.
*/
blk->id = id;
}
blk_lpos->begin = begin_lpos;
blk_lpos->next = next_lpos;
return &blk->data[0];}
/*
* Try to resize an existing data block associated with the descriptor
* specified by @id. If the resized data block should become wrapped, it
* copies the old data to the new data block. If @size yields a data block
* with the same or less size, the data block is left as is.
*
* Fail if this is not the last allocated data block or if there is not
* enough space or it is not possible make enough space.
*
* Return a pointer to the beginning of the entire data buffer or NULL on
* failure.
*/
static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
struct prb_data_blk_lpos *blk_lpos, unsigned long id)
{
struct prb_data_ring *data_ring = &rb->text_data_ring;
struct prb_data_block *blk;
unsigned long head_lpos;
unsigned long next_lpos;
bool wrapped;
/* Reallocation only works if @blk_lpos is the newest data block. */
head_lpos = atomic_long_read(&data_ring->head_lpos);
if (head_lpos != blk_lpos->next)
return NULL;
/* Keep track if @blk_lpos was a wrapping data block. */
wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
size = to_blk_size(size);
next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
/* If the data block does not increase, there is nothing to do. */
if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
if (wrapped)
blk = to_block(data_ring, 0);
else
blk = to_block(data_ring, blk_lpos->begin);
return &blk->data[0];
}
/*
* data_check_size() prevents data block reallocation that could
* cause illegal ringbuffer states. But double check that the
* new used space will not be bigger than the ring buffer. Wrapped
* messages need to reserve more space, see get_next_lpos().
*
* Specify failure when the check or the allocation fails.
*/
if (WARN_ON_ONCE(next_lpos - blk_lpos->begin > DATA_SIZE(data_ring)) ||
!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
return NULL;
}
/* The memory barrier involvement is the same as data_alloc:A. */
if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
next_lpos)) { /* LMM(data_realloc:A) */
return NULL;
}
blk = to_block(data_ring, blk_lpos->begin);
if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
struct prb_data_block *old_blk = blk;
/* Wrapping data blocks store their data at the beginning. */
blk = to_block(data_ring, 0);
/*
* Store the ID on the wrapped block for consistency.
* The printk_ringbuffer does not actually use it.
*/
blk->id = id;
if (!wrapped) {
/*
* Since the allocated space is now in the newly
* created wrapping data block, copy the content
* from the old data block.
*/
memcpy(&blk->data[0], &old_blk->data[0],
(blk_lpos->next - blk_lpos->begin) - sizeof(blk->id));
}
}
blk_lpos->next = next_lpos;
return &blk->data[0];
}
/* Return the number of bytes used by a data block. */
static unsigned int space_used(struct prb_data_ring *data_ring,
struct prb_data_blk_lpos *blk_lpos)
{
/* Data-less blocks take no space. */
if (BLK_DATALESS(blk_lpos)) return 0; if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
/* Data block does not wrap. */
return (DATA_INDEX(data_ring, blk_lpos->next) -
DATA_INDEX(data_ring, blk_lpos->begin));
}
/*
* For wrapping data blocks, the trailing (wasted) space is
* also counted.
*/
return (DATA_INDEX(data_ring, blk_lpos->next) +
DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));}
/*
* Given @blk_lpos, return a pointer to the writer data from the data block
* and calculate the size of the data part. A NULL pointer is returned if
* @blk_lpos specifies values that could never be legal.
*
* This function (used by readers) performs strict validation on the lpos
* values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
* triggered if an internal error is detected.
*/
static const char *get_data(struct prb_data_ring *data_ring,
struct prb_data_blk_lpos *blk_lpos,
unsigned int *data_size)
{
struct prb_data_block *db;
/* Data-less data block description. */
if (BLK_DATALESS(blk_lpos)) {
/*
* Records that are just empty lines are also valid, even
* though they do not have a data block. For such records
* explicitly return empty string data to signify success.
*/
if (blk_lpos->begin == EMPTY_LINE_LPOS &&
blk_lpos->next == EMPTY_LINE_LPOS) {
*data_size = 0;
return "";
}
/* Data lost, invalid, or otherwise unavailable. */
return NULL;
}
/* Regular data block: @begin less than @next and in same wrap. */
if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
blk_lpos->begin < blk_lpos->next) {
db = to_block(data_ring, blk_lpos->begin);
*data_size = blk_lpos->next - blk_lpos->begin;
/* Wrapping data block: @begin is one wrap behind @next. */
} else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
DATA_WRAPS(data_ring, blk_lpos->next)) {
db = to_block(data_ring, 0);
*data_size = DATA_INDEX(data_ring, blk_lpos->next);
/* Illegal block description. */
} else {
WARN_ON_ONCE(1);
return NULL;
}
/* A valid data block will always be aligned to the ID size. */
if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
return NULL;
}
/* A valid data block will always have at least an ID. */
if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
return NULL;
/* Subtract block ID space from size to reflect data size. */
*data_size -= sizeof(db->id);
return &db->data[0];}
/*
* Attempt to transition the newest descriptor from committed back to reserved
* so that the record can be modified by a writer again. This is only possible
* if the descriptor is not yet finalized and the provided @caller_id matches.
*/
static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring,
u32 caller_id, unsigned long *id_out)
{
unsigned long prev_state_val;
enum desc_state d_state;
struct prb_desc desc;
struct prb_desc *d;
unsigned long id;
u32 cid;
id = atomic_long_read(&desc_ring->head_id);
/*
* To reduce unnecessarily reopening, first check if the descriptor
* state and caller ID are correct.
*/
d_state = desc_read(desc_ring, id, &desc, NULL, &cid);
if (d_state != desc_committed || cid != caller_id)
return NULL;
d = to_desc(desc_ring, id);
prev_state_val = DESC_SV(id, desc_committed);
/*
* Guarantee the reserved state is stored before reading any
* record data. A full memory barrier is needed because @state_var
* modification is followed by reading. This pairs with _prb_commit:B.
*
* Memory barrier involvement:
*
* If desc_reopen_last:A reads from _prb_commit:B, then
* prb_reserve_in_last:A reads from _prb_commit:A.
*
* Relies on:
*
* WMB from _prb_commit:A to _prb_commit:B
* matching
* MB If desc_reopen_last:A to prb_reserve_in_last:A
*/
if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */
return NULL;
}
*id_out = id;
return d;
}
/**
* prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer
* used by the newest record.
*
* @e: The entry structure to setup.
* @rb: The ringbuffer to re-reserve and extend data in.
* @r: The record structure to allocate buffers for.
* @caller_id: The caller ID of the caller (reserving writer).
* @max_size: Fail if the extended size would be greater than this.
*
* This is the public function available to writers to re-reserve and extend
* data.
*
* The writer specifies the text size to extend (not the new total size) by
* setting the @text_buf_size field of @r. To ensure proper initialization
* of @r, prb_rec_init_wr() should be used.
*
* This function will fail if @caller_id does not match the caller ID of the
* newest record. In that case the caller must reserve new data using
* prb_reserve().
*
* Context: Any context. Disables local interrupts on success.
* Return: true if text data could be extended, otherwise false.
*
* On success:
*
* - @r->text_buf points to the beginning of the entire text buffer.
*
* - @r->text_buf_size is set to the new total size of the buffer.
*
* - @r->info is not touched so that @r->info->text_len could be used
* to append the text.
*
* - prb_record_text_space() can be used on @e to query the new
* actually used space.
*
* Important: All @r->info fields will already be set with the current values
* for the record. I.e. @r->info->text_len will be less than
* @text_buf_size. Writers can use @r->info->text_len to know
* where concatenation begins and writers should update
* @r->info->text_len after concatenating.
*/
bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
struct printk_record *r, u32 caller_id, unsigned int max_size)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
struct printk_info *info;
unsigned int data_size;
struct prb_desc *d;
unsigned long id;
local_irq_save(e->irqflags);
/* Transition the newest descriptor back to the reserved state. */
d = desc_reopen_last(desc_ring, caller_id, &id);
if (!d) {
local_irq_restore(e->irqflags);
goto fail_reopen;
}
/* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */
info = to_info(desc_ring, id);
/*
* Set the @e fields here so that prb_commit() can be used if
* anything fails from now on.
*/
e->rb = rb;
e->id = id;
/*
* desc_reopen_last() checked the caller_id, but there was no
* exclusive access at that point. The descriptor may have
* changed since then.
*/
if (caller_id != info->caller_id)
goto fail;
if (BLK_DATALESS(&d->text_blk_lpos)) {
if (WARN_ON_ONCE(info->text_len != 0)) {
pr_warn_once("wrong text_len value (%hu, expecting 0)\n",
info->text_len);
info->text_len = 0;
}
if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
goto fail;
if (r->text_buf_size > max_size)
goto fail;
r->text_buf = data_alloc(rb, r->text_buf_size,
&d->text_blk_lpos, id);
} else {
if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
goto fail;
/*
* Increase the buffer size to include the original size. If
* the meta data (@text_len) is not sane, use the full data
* block size.
*/
if (WARN_ON_ONCE(info->text_len > data_size)) {
pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n",
info->text_len, data_size);
info->text_len = data_size;
}
r->text_buf_size += info->text_len;
if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
goto fail;
if (r->text_buf_size > max_size)
goto fail;
r->text_buf = data_realloc(rb, r->text_buf_size,
&d->text_blk_lpos, id);
}
if (r->text_buf_size && !r->text_buf)
goto fail;
r->info = info;
e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
return true;
fail:
prb_commit(e);
/* prb_commit() re-enabled interrupts. */
fail_reopen:
/* Make it clear to the caller that the re-reserve failed. */
memset(r, 0, sizeof(*r));
return false;
}
/*
* @last_finalized_seq value guarantees that all records up to and including
* this sequence number are finalized and can be read. The only exception are
* too old records which have already been overwritten.
*
* It is also guaranteed that @last_finalized_seq only increases.
*
* Be aware that finalized records following non-finalized records are not
* reported because they are not yet available to the reader. For example,
* a new record stored via printk() will not be available to a printer if
* it follows a record that has not been finalized yet. However, once that
* non-finalized record becomes finalized, @last_finalized_seq will be
* appropriately updated and the full set of finalized records will be
* available to the printer. And since each printk() caller will either
* directly print or trigger deferred printing of all available unprinted
* records, all printk() messages will get printed.
*/
static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
unsigned long ulseq;
/*
* Guarantee the sequence number is loaded before loading the
* associated record in order to guarantee that the record can be
* seen by this CPU. This pairs with desc_update_last_finalized:A.
*/
ulseq = atomic_long_read_acquire(&desc_ring->last_finalized_seq
); /* LMM(desc_last_finalized_seq:A) */
return __ulseq_to_u64seq(rb, ulseq);
}
static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
struct printk_record *r, unsigned int *line_count);
/*
* Check if there are records directly following @last_finalized_seq that are
* finalized. If so, update @last_finalized_seq to the latest of these
* records. It is not allowed to skip over records that are not yet finalized.
*/
static void desc_update_last_finalized(struct printk_ringbuffer *rb)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
u64 old_seq = desc_last_finalized_seq(rb);
unsigned long oldval;
unsigned long newval;
u64 finalized_seq;
u64 try_seq;
try_again:
finalized_seq = old_seq;
try_seq = finalized_seq + 1;
/* Try to find later finalized records. */
while (_prb_read_valid(rb, &try_seq, NULL, NULL)) { finalized_seq = try_seq;
try_seq++;
}
/* No update needed if no later finalized record was found. */
if (finalized_seq == old_seq)
return;
oldval = __u64seq_to_ulseq(old_seq);
newval = __u64seq_to_ulseq(finalized_seq);
/*
* Set the sequence number of a later finalized record that has been
* seen.
*
* Guarantee the record data is visible to other CPUs before storing
* its sequence number. This pairs with desc_last_finalized_seq:A.
*
* Memory barrier involvement:
*
* If desc_last_finalized_seq:A reads from
* desc_update_last_finalized:A, then desc_read:A reads from
* _prb_commit:B.
*
* Relies on:
*
* RELEASE from _prb_commit:B to desc_update_last_finalized:A
* matching
* ACQUIRE from desc_last_finalized_seq:A to desc_read:A
*
* Note: _prb_commit:B and desc_update_last_finalized:A can be
* different CPUs. However, the desc_update_last_finalized:A
* CPU (which performs the release) must have previously seen
* _prb_commit:B.
*/
if (!atomic_long_try_cmpxchg_release(&desc_ring->last_finalized_seq,
&oldval, newval)) { /* LMM(desc_update_last_finalized:A) */
old_seq = __ulseq_to_u64seq(rb, oldval);
goto try_again;
}
}
/*
* Attempt to finalize a specified descriptor. If this fails, the descriptor
* is either already final or it will finalize itself when the writer commits.
*/
static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
unsigned long prev_state_val = DESC_SV(id, desc_committed);
struct prb_desc *d = to_desc(desc_ring, id);
if (atomic_long_try_cmpxchg_relaxed(&d->state_var, &prev_state_val,
DESC_SV(id, desc_finalized))) { /* LMM(desc_make_final:A) */
desc_update_last_finalized(rb);
}
}
/**
* prb_reserve() - Reserve space in the ringbuffer.
*
* @e: The entry structure to setup.
* @rb: The ringbuffer to reserve data in.
* @r: The record structure to allocate buffers for.
*
* This is the public function available to writers to reserve data.
*
* The writer specifies the text size to reserve by setting the
* @text_buf_size field of @r. To ensure proper initialization of @r,
* prb_rec_init_wr() should be used.
*
* Context: Any context. Disables local interrupts on success.
* Return: true if at least text data could be allocated, otherwise false.
*
* On success, the fields @info and @text_buf of @r will be set by this
* function and should be filled in by the writer before committing. Also
* on success, prb_record_text_space() can be used on @e to query the actual
* space used for the text data block.
*
* Important: @info->text_len needs to be set correctly by the writer in
* order for data to be readable and/or extended. Its value
* is initialized to 0.
*/
bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
struct printk_record *r)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
struct printk_info *info;
struct prb_desc *d;
unsigned long id;
u64 seq;
if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
goto fail;
/*
* Descriptors in the reserved state act as blockers to all further
* reservations once the desc_ring has fully wrapped. Disable
* interrupts during the reserve/commit window in order to minimize
* the likelihood of this happening.
*/
local_irq_save(e->irqflags); if (!desc_reserve(rb, &id)) {
/* Descriptor reservation failures are tracked. */
atomic_long_inc(&rb->fail);
local_irq_restore(e->irqflags); goto fail;
}
d = to_desc(desc_ring, id);
info = to_info(desc_ring, id);
/*
* All @info fields (except @seq) are cleared and must be filled in
* by the writer. Save @seq before clearing because it is used to
* determine the new sequence number.
*/
seq = info->seq;
memset(info, 0, sizeof(*info));
/*
* Set the @e fields here so that prb_commit() can be used if
* text data allocation fails.
*/
e->rb = rb;
e->id = id;
/*
* Initialize the sequence number if it has "never been set".
* Otherwise just increment it by a full wrap.
*
* @seq is considered "never been set" if it has a value of 0,
* _except_ for @infos[0], which was specially setup by the ringbuffer
* initializer and therefore is always considered as set.
*
* See the "Bootstrap" comment block in printk_ringbuffer.h for
* details about how the initializer bootstraps the descriptors.
*/
if (seq == 0 && DESC_INDEX(desc_ring, id) != 0) info->seq = DESC_INDEX(desc_ring, id);
else
info->seq = seq + DESCS_COUNT(desc_ring);
/*
* New data is about to be reserved. Once that happens, previous
* descriptors are no longer able to be extended. Finalize the
* previous descriptor now so that it can be made available to
* readers. (For seq==0 there is no previous descriptor.)
*/
if (info->seq > 0)
desc_make_final(rb, DESC_ID(id - 1));
r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
/* If text data allocation fails, a data-less record is committed. */
if (r->text_buf_size && !r->text_buf) {
prb_commit(e);
/* prb_commit() re-enabled interrupts. */
goto fail;
}
r->info = info;
/* Record full text space used by record. */
e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
return true;
fail:
/* Make it clear to the caller that the reserve failed. */
memset(r, 0, sizeof(*r)); return false;}
EXPORT_SYMBOL_IF_KUNIT(prb_reserve);
/* Commit the data (possibly finalizing it) and restore interrupts. */
static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
{
struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
struct prb_desc *d = to_desc(desc_ring, e->id);
unsigned long prev_state_val = DESC_SV(e->id, desc_reserved);
/* Now the writer has finished all writing: LMM(_prb_commit:A) */
/*
* Set the descriptor as committed. See "ABA Issues" about why
* cmpxchg() instead of set() is used.
*
* 1 Guarantee all record data is stored before the descriptor state
* is stored as committed. A write memory barrier is sufficient
* for this. This pairs with desc_read:B and desc_reopen_last:A.
*
* 2. Guarantee the descriptor state is stored as committed before
* re-checking the head ID in order to possibly finalize this
* descriptor. This pairs with desc_reserve:D.
*
* Memory barrier involvement:
*
* If prb_commit:A reads from desc_reserve:D, then
* desc_make_final:A reads from _prb_commit:B.
*
* Relies on:
*
* MB _prb_commit:B to prb_commit:A
* matching
* MB desc_reserve:D to desc_make_final:A
*/
if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */
WARN_ON_ONCE(1);
}
/* Restore interrupts, the reserve/commit window is finished. */
local_irq_restore(e->irqflags);}
/**
* prb_commit() - Commit (previously reserved) data to the ringbuffer.
*
* @e: The entry containing the reserved data information.
*
* This is the public function available to writers to commit data.
*
* Note that the data is not yet available to readers until it is finalized.
* Finalizing happens automatically when space for the next record is
* reserved.
*
* See prb_final_commit() for a version of this function that finalizes
* immediately.
*
* Context: Any context. Enables local interrupts.
*/
void prb_commit(struct prb_reserved_entry *e)
{
struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
unsigned long head_id;
_prb_commit(e, desc_committed);
/*
* If this descriptor is no longer the head (i.e. a new record has
* been allocated), extending the data for this record is no longer
* allowed and therefore it must be finalized.
*/
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
if (head_id != e->id)
desc_make_final(e->rb, e->id);
}
EXPORT_SYMBOL_IF_KUNIT(prb_commit);
/**
* prb_final_commit() - Commit and finalize (previously reserved) data to
* the ringbuffer.
*
* @e: The entry containing the reserved data information.
*
* This is the public function available to writers to commit+finalize data.
*
* By finalizing, the data is made immediately available to readers.
*
* This function should only be used if there are no intentions of extending
* this data using prb_reserve_in_last().
*
* Context: Any context. Enables local interrupts.
*/
void prb_final_commit(struct prb_reserved_entry *e)
{
_prb_commit(e, desc_finalized);
desc_update_last_finalized(e->rb);
}
/*
* Count the number of lines in provided text. All text has at least 1 line
* (even if @text_size is 0). Each '\n' processed is counted as an additional
* line.
*/
static unsigned int count_lines(const char *text, unsigned int text_size)
{
unsigned int next_size = text_size;
unsigned int line_count = 1;
const char *next = text;
while (next_size) {
next = memchr(next, '\n', next_size); if (!next)
break;
line_count++;
next++;
next_size = text_size - (next - text);
}
return line_count;
}
/*
* Given @blk_lpos, copy an expected @len of data into the provided buffer.
* If @line_count is provided, count the number of lines in the data.
*
* This function (used by readers) performs strict validation on the data
* size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
* triggered if an internal error is detected.
*/
static bool copy_data(struct prb_data_ring *data_ring,
struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf,
unsigned int buf_size, unsigned int *line_count)
{
unsigned int data_size;
const char *data;
/* Caller might not want any data. */
if ((!buf || !buf_size) && !line_count)
return true;
data = get_data(data_ring, blk_lpos, &data_size);
if (!data)
return false;
/*
* Actual cannot be less than expected. It can be more than expected
* because of the trailing alignment padding.
*
* Note that invalid @len values can occur because the caller loads
* the value during an allowed data race.
*/
if (data_size < (unsigned int)len)
return false;
/* Caller interested in the line count? */
if (line_count) *line_count = count_lines(data, len);
/* Caller interested in the data content? */
if (!buf || !buf_size)
return true;
data_size = min_t(unsigned int, buf_size, len);
memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
return true;
}
/*
* This is an extended version of desc_read(). It gets a copy of a specified
* descriptor. However, it also verifies that the record is finalized and has
* the sequence number @seq. On success, 0 is returned.
*
* Error return values:
* -EINVAL: A finalized record with sequence number @seq does not exist.
* -ENOENT: A finalized record with sequence number @seq exists, but its data
* is not available. This is a valid record, so readers should
* continue with the next record.
*/
static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring,
unsigned long id, u64 seq,
struct prb_desc *desc_out)
{
struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
enum desc_state d_state;
u64 s;
d_state = desc_read(desc_ring, id, desc_out, &s, NULL);
/*
* An unexpected @id (desc_miss) or @seq mismatch means the record
* does not exist. A descriptor in the reserved or committed state
* means the record does not yet exist for the reader.
*/
if (d_state == desc_miss ||
d_state == desc_reserved ||
d_state == desc_committed ||
s != seq) { return -EINVAL;
}
/*
* A descriptor in the reusable state may no longer have its data
* available; report it as existing but with lost data. Or the record
* may actually be a record with lost data.
*/
if (d_state == desc_reusable || (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) { return -ENOENT;
}
return 0;}
/*
* Copy the ringbuffer data from the record with @seq to the provided
* @r buffer. On success, 0 is returned.
*
* See desc_read_finalized_seq() for error return values.
*/
static int prb_read(struct printk_ringbuffer *rb, u64 seq,
struct printk_record *r, unsigned int *line_count)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
struct printk_info *info = to_info(desc_ring, seq);
struct prb_desc *rdesc = to_desc(desc_ring, seq);
atomic_long_t *state_var = &rdesc->state_var;
struct prb_desc desc;
unsigned long id;
int err;
/* Extract the ID, used to specify the descriptor to read. */
id = DESC_ID(atomic_long_read(state_var));
/* Get a local copy of the correct descriptor (if available). */
err = desc_read_finalized_seq(desc_ring, id, seq, &desc);
/*
* If @r is NULL, the caller is only interested in the availability
* of the record.
*/
if (err || !r)
return err;
/* If requested, copy meta data. */
if (r->info)
memcpy(r->info, info, sizeof(*(r->info)));
/* Copy text data. If it fails, this is a data-less record. */
if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len,
r->text_buf, r->text_buf_size, line_count)) {
return -ENOENT;
}
/* Ensure the record is still finalized and has the same @seq. */
return desc_read_finalized_seq(desc_ring, id, seq, &desc);
}
/* Get the sequence number of the tail descriptor. */
u64 prb_first_seq(struct printk_ringbuffer *rb)
{ struct prb_desc_ring *desc_ring = &rb->desc_ring;
enum desc_state d_state;
struct prb_desc desc;
unsigned long id;
u64 seq;
for (;;) {
id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */
/*
* This loop will not be infinite because the tail is
* _always_ in the finalized or reusable state.
*/
if (d_state == desc_finalized || d_state == desc_reusable)
break;
/*
* Guarantee the last state load from desc_read() is before
* reloading @tail_id in order to see a new tail in the case
* that the descriptor has been recycled. This pairs with
* desc_reserve:D.
*
* Memory barrier involvement:
*
* If prb_first_seq:B reads from desc_reserve:F, then
* prb_first_seq:A reads from desc_push_tail:B.
*
* Relies on:
*
* MB from desc_push_tail:B to desc_reserve:F
* matching
* RMB prb_first_seq:B to prb_first_seq:A
*/
smp_rmb(); /* LMM(prb_first_seq:C) */
}
return seq;
}
/**
* prb_next_reserve_seq() - Get the sequence number after the most recently
* reserved record.
*
* @rb: The ringbuffer to get the sequence number from.
*
* This is the public function available to readers to see what sequence
* number will be assigned to the next reserved record.
*
* Note that depending on the situation, this value can be equal to or
* higher than the sequence number returned by prb_next_seq().
*
* Context: Any context.
* Return: The sequence number that will be assigned to the next record
* reserved.
*/
u64 prb_next_reserve_seq(struct printk_ringbuffer *rb)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
unsigned long last_finalized_id;
atomic_long_t *state_var;
u64 last_finalized_seq;
unsigned long head_id;
struct prb_desc desc;
unsigned long diff;
struct prb_desc *d;
int err;
/*
* It may not be possible to read a sequence number for @head_id.
* So the ID of @last_finailzed_seq is used to calculate what the
* sequence number of @head_id will be.
*/
try_again:
last_finalized_seq = desc_last_finalized_seq(rb);
/*
* @head_id is loaded after @last_finalized_seq to ensure that
* it points to the record with @last_finalized_seq or newer.
*
* Memory barrier involvement:
*
* If desc_last_finalized_seq:A reads from
* desc_update_last_finalized:A, then
* prb_next_reserve_seq:A reads from desc_reserve:D.
*
* Relies on:
*
* RELEASE from desc_reserve:D to desc_update_last_finalized:A
* matching
* ACQUIRE from desc_last_finalized_seq:A to prb_next_reserve_seq:A
*
* Note: desc_reserve:D and desc_update_last_finalized:A can be
* different CPUs. However, the desc_update_last_finalized:A CPU
* (which performs the release) must have previously seen
* desc_read:C, which implies desc_reserve:D can be seen.
*/
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_next_reserve_seq:A) */
d = to_desc(desc_ring, last_finalized_seq);
state_var = &d->state_var;
/* Extract the ID, used to specify the descriptor to read. */
last_finalized_id = DESC_ID(atomic_long_read(state_var));
/* Ensure @last_finalized_id is correct. */
err = desc_read_finalized_seq(desc_ring, last_finalized_id, last_finalized_seq, &desc);
if (err == -EINVAL) {
if (last_finalized_seq == 0) {
/*
* No record has been finalized or even reserved yet.
*
* The @head_id is initialized such that the first
* increment will yield the first record (seq=0).
* Handle it separately to avoid a negative @diff
* below.
*/
if (head_id == DESC0_ID(desc_ring->count_bits))
return 0;
/*
* One or more descriptors are already reserved. Use
* the descriptor ID of the first one (@seq=0) for
* the @diff below.
*/
last_finalized_id = DESC0_ID(desc_ring->count_bits) + 1;
} else {
/* Record must have been overwritten. Try again. */
goto try_again;
}
}
/* Diff of known descriptor IDs to compute related sequence numbers. */
diff = head_id - last_finalized_id;
/*
* @head_id points to the most recently reserved record, but this
* function returns the sequence number that will be assigned to the
* next (not yet reserved) record. Thus +1 is needed.
*/
return (last_finalized_seq + diff + 1);
}
/*
* Non-blocking read of a record.
*
* On success @seq is updated to the record that was read and (if provided)
* @r and @line_count will contain the read/calculated data.
*
* On failure @seq is updated to a record that is not yet available to the
* reader, but it will be the next record available to the reader.
*
* Note: When the current CPU is in panic, this function will skip over any
* non-existent/non-finalized records in order to allow the panic CPU
* to print any and all records that have been finalized.
*/
static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
struct printk_record *r, unsigned int *line_count)
{
u64 tail_seq;
int err;
while ((err = prb_read(rb, *seq, r, line_count))) {
tail_seq = prb_first_seq(rb);
if (*seq < tail_seq) {
/*
* Behind the tail. Catch up and try again. This
* can happen for -ENOENT and -EINVAL cases.
*/
*seq = tail_seq; } else if (err == -ENOENT) {
/* Record exists, but the data was lost. Skip. */
(*seq)++;
} else {
/*
* Non-existent/non-finalized record. Must stop.
*
* For panic situations it cannot be expected that
* non-finalized records will become finalized. But
* there may be other finalized records beyond that
* need to be printed for a panic situation. If this
* is the panic CPU, skip this
* non-existent/non-finalized record unless non-panic
* CPUs are still running and their debugging is
* explicitly enabled.
*
* Note that new messages printed on panic CPU are
* finalized when we are here. The only exception
* might be the last message without trailing newline.
* But it would have the sequence number returned
* by "prb_next_reserve_seq() - 1".
*/
if (panic_on_this_cpu() && (!debug_non_panic_cpus || legacy_allow_panic_sync) && ((*seq + 1) < prb_next_reserve_seq(rb))) { (*seq)++;
} else {
return false;
}
}
}
return true;}
/**
* prb_read_valid() - Non-blocking read of a requested record or (if gone)
* the next available record.
*
* @rb: The ringbuffer to read from.
* @seq: The sequence number of the record to read.
* @r: A record data buffer to store the read record to.
*
* This is the public function available to readers to read a record.
*
* The reader provides the @info and @text_buf buffers of @r to be
* filled in. Any of the buffer pointers can be set to NULL if the reader
* is not interested in that data. To ensure proper initialization of @r,
* prb_rec_init_rd() should be used.
*
* Context: Any context.
* Return: true if a record was read, otherwise false.
*
* On success, the reader must check r->info.seq to see which record was
* actually read. This allows the reader to detect dropped records.
*
* Failure means @seq refers to a record not yet available to the reader.
*/
bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
struct printk_record *r)
{
return _prb_read_valid(rb, &seq, r, NULL);
}
EXPORT_SYMBOL_IF_KUNIT(prb_read_valid);
/**
* prb_read_valid_info() - Non-blocking read of meta data for a requested
* record or (if gone) the next available record.
*
* @rb: The ringbuffer to read from.
* @seq: The sequence number of the record to read.
* @info: A buffer to store the read record meta data to.
* @line_count: A buffer to store the number of lines in the record text.
*
* This is the public function available to readers to read only the
* meta data of a record.
*
* The reader provides the @info, @line_count buffers to be filled in.
* Either of the buffer pointers can be set to NULL if the reader is not
* interested in that data.
*
* Context: Any context.
* Return: true if a record's meta data was read, otherwise false.
*
* On success, the reader must check info->seq to see which record meta data
* was actually read. This allows the reader to detect dropped records.
*
* Failure means @seq refers to a record not yet available to the reader.
*/
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
struct printk_info *info, unsigned int *line_count)
{
struct printk_record r;
prb_rec_init_rd(&r, info, NULL, 0);
return _prb_read_valid(rb, &seq, &r, line_count);
}
/**
* prb_first_valid_seq() - Get the sequence number of the oldest available
* record.
*
* @rb: The ringbuffer to get the sequence number from.
*
* This is the public function available to readers to see what the
* first/oldest valid sequence number is.
*
* This provides readers a starting point to begin iterating the ringbuffer.
*
* Context: Any context.
* Return: The sequence number of the first/oldest record or, if the
* ringbuffer is empty, 0 is returned.
*/
u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
{
u64 seq = 0;
if (!_prb_read_valid(rb, &seq, NULL, NULL))
return 0;
return seq;
}
/**
* prb_next_seq() - Get the sequence number after the last available record.
*
* @rb: The ringbuffer to get the sequence number from.
*
* This is the public function available to readers to see what the next
* newest sequence number available to readers will be.
*
* This provides readers a sequence number to jump to if all currently
* available records should be skipped. It is guaranteed that all records
* previous to the returned value have been finalized and are (or were)
* available to the reader.
*
* Context: Any context.
* Return: The sequence number of the next newest (not yet available) record
* for readers.
*/
u64 prb_next_seq(struct printk_ringbuffer *rb)
{
u64 seq;
seq = desc_last_finalized_seq(rb);
/*
* Begin searching after the last finalized record.
*
* On 0, the search must begin at 0 because of hack#2
* of the bootstrapping phase it is not known if a
* record at index 0 exists.
*/
if (seq != 0)
seq++;
/*
* The information about the last finalized @seq might be inaccurate.
* Search forward to find the current one.
*/
while (_prb_read_valid(rb, &seq, NULL, NULL))
seq++;
return seq;
}
/**
* prb_init() - Initialize a ringbuffer to use provided external buffers.
*
* @rb: The ringbuffer to initialize.
* @text_buf: The data buffer for text data.
* @textbits: The size of @text_buf as a power-of-2 value.
* @descs: The descriptor buffer for ringbuffer records.
* @descbits: The count of @descs items as a power-of-2 value.
* @infos: The printk_info buffer for ringbuffer records.
*
* This is the public function available to writers to setup a ringbuffer
* during runtime using provided buffers.
*
* This must match the initialization of DEFINE_PRINTKRB().
*
* Context: Any context.
*/
void prb_init(struct printk_ringbuffer *rb,
char *text_buf, unsigned int textbits,
struct prb_desc *descs, unsigned int descbits,
struct printk_info *infos)
{
memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0]));
rb->desc_ring.count_bits = descbits;
rb->desc_ring.descs = descs;
rb->desc_ring.infos = infos;
atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
atomic_long_set(&rb->desc_ring.last_finalized_seq, 0);
rb->text_data_ring.size_bits = textbits;
rb->text_data_ring.data = text_buf;
atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
atomic_long_set(&rb->fail, 0);
atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
infos[0].seq = -(u64)_DESCS_COUNT(descbits);
infos[_DESCS_COUNT(descbits) - 1].seq = 0;
}
EXPORT_SYMBOL_IF_KUNIT(prb_init);
/**
* prb_record_text_space() - Query the full actual used ringbuffer space for
* the text data of a reserved entry.
*
* @e: The successfully reserved entry to query.
*
* This is the public function available to writers to see how much actual
* space is used in the ringbuffer to store the text data of the specified
* entry.
*
* This function is only valid if @e has been successfully reserved using
* prb_reserve().
*
* Context: Any context.
* Return: The size in bytes used by the text data of the associated record.
*/
unsigned int prb_record_text_space(struct prb_reserved_entry *e)
{
return e->text_space;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Implement CPU time clocks for the POSIX clock interface.
*/
#include <linux/sched/signal.h>
#include <linux/sched/cputime.h>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/kernel_stat.h>
#include <trace/events/timer.h>
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/compat.h>
#include <linux/sched/deadline.h>
#include <linux/task_work.h>
#include "posix-timers.h"
static void posix_cpu_timer_rearm(struct k_itimer *timer);
void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
{
posix_cputimers_init(pct);
if (cpu_limit != RLIM_INFINITY) {
pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
pct->timers_active = true;
}
}
/*
* Called after updating RLIMIT_CPU to run cpu timer and update
* tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
* necessary. Needs siglock protection since other code may update the
* expiration cache as well.
*
* Returns 0 on success, -ESRCH on failure. Can fail if the task is exiting and
* we cannot lock_task_sighand. Cannot fail if task is current.
*/
int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
{
u64 nsecs = rlim_new * NSEC_PER_SEC;
unsigned long irq_fl;
if (!lock_task_sighand(task, &irq_fl))
return -ESRCH;
set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
unlock_task_sighand(task, &irq_fl);
return 0;
}
/*
* Functions for validating access to tasks.
*/
static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
{
const bool thread = !!CPUCLOCK_PERTHREAD(clock);
const pid_t upid = CPUCLOCK_PID(clock);
struct pid *pid;
if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
return NULL;
/*
* If the encoded PID is 0, then the timer is targeted at current
* or the process to which current belongs.
*/
if (upid == 0)
return thread ? task_pid(current) : task_tgid(current);
pid = find_vpid(upid);
if (!pid)
return NULL;
if (thread) {
struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
}
/*
* For clock_gettime(PROCESS) allow finding the process by
* with the pid of the current task. The code needs the tgid
* of the process so that pid_task(pid, PIDTYPE_TGID) can be
* used to find the process.
*/
if (gettime && (pid == task_pid(current)))
return task_tgid(current);
/*
* For processes require that pid identifies a process.
*/
return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
}
static inline int validate_clock_permissions(const clockid_t clock)
{
int ret;
rcu_read_lock();
ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
rcu_read_unlock();
return ret;
}
static inline enum pid_type clock_pid_type(const clockid_t clock)
{
return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
}
static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
{
return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
}
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
{
u64 delta, incr, expires = timer->it.cpu.node.expires;
int i;
if (!timer->it_interval)
return expires;
if (now < expires)
return expires;
incr = timer->it_interval;
delta = now + incr - expires;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
if (delta < incr)
continue;
timer->it.cpu.node.expires += incr;
timer->it_overrun += 1LL << i;
delta -= incr;
}
return timer->it.cpu.node.expires;
}
/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
{
return !(~pct->bases[CPUCLOCK_PROF].nextevt |
~pct->bases[CPUCLOCK_VIRT].nextevt |
~pct->bases[CPUCLOCK_SCHED].nextevt);
}
static int
posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
{
int error = validate_clock_permissions(which_clock);
if (!error) {
tp->tv_sec = 0;
tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
/*
* If sched_clock is using a cycle counter, we
* don't have any idea of its true resolution
* exported, but it is much more than 1s/HZ.
*/
tp->tv_nsec = 1;
}
}
return error;
}
static int
posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
{
int error = validate_clock_permissions(clock);
/*
* You can never reset a CPU clock, but we check for other errors
* in the call before failing with EPERM.
*/
return error ? : -EPERM;
}
/*
* Sample a per-thread clock for the given task. clkid is validated.
*/
static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
{
u64 utime, stime;
if (clkid == CPUCLOCK_SCHED)
return task_sched_runtime(p);
task_cputime(p, &utime, &stime);
switch (clkid) {
case CPUCLOCK_PROF:
return utime + stime;
case CPUCLOCK_VIRT:
return utime;
default:
WARN_ON_ONCE(1);
}
return 0;
}
static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
{
samples[CPUCLOCK_PROF] = stime + utime;
samples[CPUCLOCK_VIRT] = utime;
samples[CPUCLOCK_SCHED] = rtime;
}
static void task_sample_cputime(struct task_struct *p, u64 *samples)
{
u64 stime, utime;
task_cputime(p, &utime, &stime);
store_samples(samples, stime, utime, p->se.sum_exec_runtime);
}
static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
u64 *samples)
{
u64 stime, utime, rtime;
utime = atomic64_read(&at->utime);
stime = atomic64_read(&at->stime);
rtime = atomic64_read(&at->sum_exec_runtime);
store_samples(samples, stime, utime, rtime);
}
/*
* Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
* to avoid race conditions with concurrent updates to cputime.
*/
static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
{
u64 curr_cputime = atomic64_read(cputime);
do {
if (sum_cputime <= curr_cputime)
return;
} while (!atomic64_try_cmpxchg(cputime, &curr_cputime, sum_cputime));
}
static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
struct task_cputime *sum)
{
__update_gt_cputime(&cputime_atomic->utime, sum->utime);
__update_gt_cputime(&cputime_atomic->stime, sum->stime);
__update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
}
/**
* thread_group_sample_cputime - Sample cputime for a given task
* @tsk: Task for which cputime needs to be started
* @samples: Storage for time samples
*
* Called from sys_getitimer() to calculate the expiry time of an active
* timer. That means group cputime accounting is already active. Called
* with task sighand lock held.
*
* Updates @times with an uptodate sample of the thread group cputimes.
*/
void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
WARN_ON_ONCE(!pct->timers_active);
proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
}
/**
* thread_group_start_cputime - Start cputime and return a sample
* @tsk: Task for which cputime needs to be started
* @samples: Storage for time samples
*
* The thread group cputime accounting is avoided when there are no posix
* CPU timers armed. Before starting a timer it's required to check whether
* the time accounting is active. If not, a full update of the atomic
* accounting store needs to be done and the accounting enabled.
*
* Updates @times with an uptodate sample of the thread group cputimes.
*/
static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
lockdep_assert_task_sighand_held(tsk);
/* Check if cputimer isn't running. This is accessed without locking. */
if (!READ_ONCE(pct->timers_active)) {
struct task_cputime sum;
/*
* The POSIX timer interface allows for absolute time expiry
* values through the TIMER_ABSTIME flag, therefore we have
* to synchronize the timer to the clock every time we start it.
*/
thread_group_cputime(tsk, &sum);
update_gt_cputime(&cputimer->cputime_atomic, &sum);
/*
* We're setting timers_active without a lock. Ensure this
* only gets written to in one operation. We set it after
* update_gt_cputime() as a small optimization, but
* barriers are not required because update_gt_cputime()
* can handle concurrent updates.
*/
WRITE_ONCE(pct->timers_active, true);
}
proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
}
static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
{
struct task_cputime ct;
thread_group_cputime(tsk, &ct);
store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
}
/*
* Sample a process (thread group) clock for the given task clkid. If the
* group's cputime accounting is already enabled, read the atomic
* store. Otherwise a full update is required. clkid is already validated.
*/
static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
bool start)
{
struct thread_group_cputimer *cputimer = &p->signal->cputimer;
struct posix_cputimers *pct = &p->signal->posix_cputimers;
u64 samples[CPUCLOCK_MAX];
if (!READ_ONCE(pct->timers_active)) {
if (start)
thread_group_start_cputime(p, samples);
else
__thread_group_cputime(p, samples);
} else {
proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
}
return samples[clkid];
}
static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
{
const clockid_t clkid = CPUCLOCK_WHICH(clock);
struct task_struct *tsk;
u64 t;
rcu_read_lock();
tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
if (!tsk) {
rcu_read_unlock();
return -EINVAL;
}
if (CPUCLOCK_PERTHREAD(clock))
t = cpu_clock_sample(clkid, tsk);
else
t = cpu_clock_sample_group(clkid, tsk, false);
rcu_read_unlock();
*tp = ns_to_timespec64(t);
return 0;
}
/*
* Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
* This is called from sys_timer_create() and do_cpu_nanosleep() with the
* new timer already all-zeros initialized.
*/
static int posix_cpu_timer_create(struct k_itimer *new_timer)
{
static struct lock_class_key posix_cpu_timers_key;
struct pid *pid;
rcu_read_lock();
pid = pid_for_clock(new_timer->it_clock, false);
if (!pid) {
rcu_read_unlock();
return -EINVAL;
}
/*
* If posix timer expiry is handled in task work context then
* timer::it_lock can be taken without disabling interrupts as all
* other locking happens in task context. This requires a separate
* lock class key otherwise regular posix timer expiry would record
* the lock class being taken in interrupt context and generate a
* false positive warning.
*/
if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
new_timer->kclock = &clock_posix_cpu;
timerqueue_init(&new_timer->it.cpu.node);
new_timer->it.cpu.pid = get_pid(pid);
rcu_read_unlock();
return 0;
}
static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
struct task_struct *tsk)
{
int clkidx = CPUCLOCK_WHICH(timer->it_clock);
if (CPUCLOCK_PERTHREAD(timer->it_clock))
return tsk->posix_cputimers.bases + clkidx;
else
return tsk->signal->posix_cputimers.bases + clkidx;
}
/*
* Force recalculating the base earliest expiration on the next tick.
* This will also re-evaluate the need to keep around the process wide
* cputime counter and tick dependency and eventually shut these down
* if necessary.
*/
static void trigger_base_recalc_expires(struct k_itimer *timer,
struct task_struct *tsk)
{
struct posix_cputimer_base *base = timer_base(timer, tsk);
base->nextevt = 0;
}
/*
* Dequeue the timer and reset the base if it was its earliest expiration.
* It makes sure the next tick recalculates the base next expiration so we
* don't keep the costly process wide cputime counter around for a random
* amount of time, along with the tick dependency.
*
* If another timer gets queued between this and the next tick, its
* expiration will update the base next event if necessary on the next
* tick.
*/
static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
{
struct cpu_timer *ctmr = &timer->it.cpu;
struct posix_cputimer_base *base;
if (!cpu_timer_dequeue(ctmr))
return;
base = timer_base(timer, p);
if (cpu_timer_getexpires(ctmr) == base->nextevt)
trigger_base_recalc_expires(timer, p);
}
/*
* Clean up a CPU-clock timer that is about to be destroyed.
* This is called from timer deletion with the timer already locked.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
static int posix_cpu_timer_del(struct k_itimer *timer)
{
struct cpu_timer *ctmr = &timer->it.cpu;
struct sighand_struct *sighand;
struct task_struct *p;
unsigned long flags;
int ret = 0;
rcu_read_lock();
p = cpu_timer_task_rcu(timer);
if (!p)
goto out;
/*
* Protect against sighand release/switch in exit/exec and process/
* thread timer list entry concurrent read/writes.
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
/*
* This raced with the reaping of the task. The exit cleanup
* should have removed this timer from the timer queue.
*/
WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
} else {
if (timer->it.cpu.firing) {
/*
* Prevent signal delivery. The timer cannot be dequeued
* because it is on the firing list which is not protected
* by sighand->lock. The delivery path is waiting for
* the timer lock. So go back, unlock and retry.
*/
timer->it.cpu.firing = false;
ret = TIMER_RETRY;
} else {
disarm_timer(timer, p);
}
unlock_task_sighand(p, &flags);
}
out:
rcu_read_unlock();
if (!ret) {
put_pid(ctmr->pid);
timer->it_status = POSIX_TIMER_DISARMED;
}
return ret;
}
static void cleanup_timerqueue(struct timerqueue_head *head)
{
struct timerqueue_node *node;
struct cpu_timer *ctmr;
while ((node = timerqueue_getnext(head))) {
timerqueue_del(head, node);
ctmr = container_of(node, struct cpu_timer, node);
ctmr->head = NULL;
}
}
/*
* Clean out CPU timers which are still armed when a thread exits. The
* timers are only removed from the list. No other updates are done. The
* corresponding posix timers are still accessible, but cannot be rearmed.
*
* This must be called with the siglock held.
*/
static void cleanup_timers(struct posix_cputimers *pct)
{
cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
}
/*
* These are both called with the siglock held, when the current thread
* is being reaped. When the final (leader) thread in the group is reaped,
* posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
cleanup_timers(&tsk->posix_cputimers);
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
cleanup_timers(&tsk->signal->posix_cputimers);
}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the sighand lock held.
*/
static void arm_timer(struct k_itimer *timer, struct task_struct *p)
{
struct posix_cputimer_base *base = timer_base(timer, p);
struct cpu_timer *ctmr = &timer->it.cpu;
u64 newexp = cpu_timer_getexpires(ctmr);
timer->it_status = POSIX_TIMER_ARMED;
if (!cpu_timer_enqueue(&base->tqhead, ctmr))
return;
/*
* We are the new earliest-expiring POSIX 1.b timer, hence
* need to update expiration cache. Take into account that
* for process timers we share expiration cache with itimers
* and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
*/
if (newexp < base->nextevt)
base->nextevt = newexp;
if (CPUCLOCK_PERTHREAD(timer->it_clock))
tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
else
tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
}
/*
* The timer is locked, fire it and arrange for its reload.
*/
static void cpu_timer_fire(struct k_itimer *timer)
{
struct cpu_timer *ctmr = &timer->it.cpu;
timer->it_status = POSIX_TIMER_DISARMED;
if (unlikely(ctmr->nanosleep)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
*/
wake_up_process(timer->it_process);
cpu_timer_setexpires(ctmr, 0);
} else {
posix_timer_queue_signal(timer);
/* Disable oneshot timers */
if (!timer->it_interval)
cpu_timer_setexpires(ctmr, 0);
}
}
static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now);
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
struct itimerspec64 *new, struct itimerspec64 *old)
{
bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
struct cpu_timer *ctmr = &timer->it.cpu;
u64 old_expires, new_expires, now;
struct sighand_struct *sighand;
struct task_struct *p;
unsigned long flags;
int ret = 0;
rcu_read_lock();
p = cpu_timer_task_rcu(timer);
if (!p) {
/*
* If p has just been reaped, we can no
* longer get any information about it at all.
*/
rcu_read_unlock();
return -ESRCH;
}
/*
* Use the to_ktime conversion because that clamps the maximum
* value to KTIME_MAX and avoid multiplication overflows.
*/
new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
/*
* Protect against sighand release/switch in exit/exec and p->cpu_timers
* and p->signal->cpu_timers read/write in arm_timer()
*/
sighand = lock_task_sighand(p, &flags);
/*
* If p has just been reaped, we can no
* longer get any information about it at all.
*/
if (unlikely(sighand == NULL)) {
rcu_read_unlock();
return -ESRCH;
}
/* Retrieve the current expiry time before disarming the timer */
old_expires = cpu_timer_getexpires(ctmr);
if (unlikely(timer->it.cpu.firing)) {
/*
* Prevent signal delivery. The timer cannot be dequeued
* because it is on the firing list which is not protected
* by sighand->lock. The delivery path is waiting for
* the timer lock. So go back, unlock and retry.
*/
timer->it.cpu.firing = false;
ret = TIMER_RETRY;
} else {
cpu_timer_dequeue(ctmr);
timer->it_status = POSIX_TIMER_DISARMED;
}
/*
* Sample the current clock for saving the previous setting
* and for rearming the timer.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
else
now = cpu_clock_sample_group(clkid, p, !sigev_none);
/* Retrieve the previous expiry value if requested. */
if (old) {
old->it_value = (struct timespec64){ };
if (old_expires)
__posix_cpu_timer_get(timer, old, now);
}
/* Retry if the timer expiry is running concurrently */
if (unlikely(ret)) {
unlock_task_sighand(p, &flags);
goto out;
}
/* Convert relative expiry time to absolute */
if (new_expires && !(timer_flags & TIMER_ABSTIME))
new_expires += now;
/* Set the new expiry time (might be 0) */
cpu_timer_setexpires(ctmr, new_expires);
/*
* Arm the timer if it is not disabled, the new expiry value has
* not yet expired and the timer requires signal delivery.
* SIGEV_NONE timers are never armed. In case the timer is not
* armed, enforce the reevaluation of the timer base so that the
* process wide cputime counter can be disabled eventually.
*/
if (likely(!sigev_none)) {
if (new_expires && now < new_expires)
arm_timer(timer, p);
else
trigger_base_recalc_expires(timer, p);
}
unlock_task_sighand(p, &flags);
posix_timer_set_common(timer, new);
/*
* If the new expiry time was already in the past the timer was not
* queued. Fire it immediately even if the thread never runs to
* accumulate more time on this clock.
*/
if (!sigev_none && new_expires && now >= new_expires)
cpu_timer_fire(timer);
out:
rcu_read_unlock();
return ret;
}
static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now)
{
bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
u64 expires, iv = timer->it_interval;
/*
* Make sure that interval timers are moved forward for the
* following cases:
* - SIGEV_NONE timers which are never armed
* - Timers which expired, but the signal has not yet been
* delivered
*/
if (iv && timer->it_status != POSIX_TIMER_ARMED)
expires = bump_cpu_timer(timer, now);
else
expires = cpu_timer_getexpires(&timer->it.cpu);
/*
* Expired interval timers cannot have a remaining time <= 0.
* The kernel has to move them forward so that the next
* timer expiry is > @now.
*/
if (now < expires) {
itp->it_value = ns_to_timespec64(expires - now);
} else {
/*
* A single shot SIGEV_NONE timer must return 0, when it is
* expired! Timers which have a real signal delivery mode
* must return a remaining time greater than 0 because the
* signal has not yet been delivered.
*/
if (!sigev_none)
itp->it_value.tv_nsec = 1;
}
}
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
{
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
struct task_struct *p;
u64 now;
rcu_read_lock();
p = cpu_timer_task_rcu(timer);
if (p && cpu_timer_getexpires(&timer->it.cpu)) {
itp->it_interval = ktime_to_timespec64(timer->it_interval);
if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
else
now = cpu_clock_sample_group(clkid, p, false);
__posix_cpu_timer_get(timer, itp, now);
}
rcu_read_unlock();
}
#define MAX_COLLECTED 20
static u64 collect_timerqueue(struct timerqueue_head *head,
struct list_head *firing, u64 now)
{
struct timerqueue_node *next;
int i = 0;
while ((next = timerqueue_getnext(head))) {
struct cpu_timer *ctmr;
u64 expires;
ctmr = container_of(next, struct cpu_timer, node);
expires = cpu_timer_getexpires(ctmr);
/* Limit the number of timers to expire at once */
if (++i == MAX_COLLECTED || now < expires)
return expires;
ctmr->firing = true;
/* See posix_cpu_timer_wait_running() */
rcu_assign_pointer(ctmr->handling, current);
cpu_timer_dequeue(ctmr);
list_add_tail(&ctmr->elist, firing);
}
return U64_MAX;
}
static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
struct list_head *firing)
{
struct posix_cputimer_base *base = pct->bases;
int i;
for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
base->nextevt = collect_timerqueue(&base->tqhead, firing,
samples[i]);
}
}
static inline void check_dl_overrun(struct task_struct *tsk)
{
if (tsk->dl.dl_overrun) {
tsk->dl.dl_overrun = 0;
send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
}
}
static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
{
if (time < limit)
return false;
if (print_fatal_signals) {
pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
rt ? "RT" : "CPU", hard ? "hard" : "soft",
current->comm, task_pid_nr(current));
}
send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID);
return true;
}
/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
* tsk->it_*_expires values to reflect the remaining thread CPU timers.
*/
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
struct posix_cputimers *pct = &tsk->posix_cputimers;
u64 samples[CPUCLOCK_MAX];
unsigned long soft;
if (dl_task(tsk))
check_dl_overrun(tsk);
if (expiry_cache_is_inactive(pct))
return;
task_sample_cputime(tsk, samples);
collect_posix_cputimers(pct, samples, firing);
/*
* Check for the special case thread timers.
*/
soft = task_rlimit(tsk, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
/* Task RT timeout is accounted in jiffies. RTTIME is usec */
unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
/* At the hard limit, send SIGKILL. No further action. */
if (hard != RLIM_INFINITY &&
check_rlimit(rttime, hard, SIGKILL, true, true))
return;
/* At the soft limit, send a SIGXCPU every second */
if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
soft += USEC_PER_SEC;
tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
}
}
if (expiry_cache_is_inactive(pct))
tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
}
static inline void stop_process_timers(struct signal_struct *sig)
{
struct posix_cputimers *pct = &sig->posix_cputimers;
/* Turn off the active flag. This is done without locking. */
WRITE_ONCE(pct->timers_active, false);
tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
}
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
u64 *expires, u64 cur_time, int signo)
{
if (!it->expires)
return;
if (cur_time >= it->expires) {
if (it->incr)
it->expires += it->incr;
else
it->expires = 0;
trace_itimer_expire(signo == SIGPROF ?
ITIMER_PROF : ITIMER_VIRTUAL,
task_tgid(tsk), cur_time);
send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
}
if (it->expires && it->expires < *expires)
*expires = it->expires;
}
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
* have already been taken off.
*/
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
struct signal_struct *const sig = tsk->signal;
struct posix_cputimers *pct = &sig->posix_cputimers;
u64 samples[CPUCLOCK_MAX];
unsigned long soft;
/*
* If there are no active process wide timers (POSIX 1.b, itimers,
* RLIMIT_CPU) nothing to check. Also skip the process wide timer
* processing when there is already another task handling them.
*/
if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
return;
/*
* Signify that a thread is checking for process timers.
* Write access to this field is protected by the sighand lock.
*/
pct->expiry_active = true;
/*
* Collect the current process totals. Group accounting is active
* so the sample can be taken directly.
*/
proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
collect_posix_cputimers(pct, samples, firing);
/*
* Check for the special case process timers.
*/
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
&pct->bases[CPUCLOCK_PROF].nextevt,
samples[CPUCLOCK_PROF], SIGPROF);
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
&pct->bases[CPUCLOCK_VIRT].nextevt,
samples[CPUCLOCK_VIRT], SIGVTALRM);
soft = task_rlimit(tsk, RLIMIT_CPU);
if (soft != RLIM_INFINITY) {
/* RLIMIT_CPU is in seconds. Samples are nanoseconds */
unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
u64 ptime = samples[CPUCLOCK_PROF];
u64 softns = (u64)soft * NSEC_PER_SEC;
u64 hardns = (u64)hard * NSEC_PER_SEC;
/* At the hard limit, send SIGKILL. No further action. */
if (hard != RLIM_INFINITY &&
check_rlimit(ptime, hardns, SIGKILL, false, true))
return;
/* At the soft limit, send a SIGXCPU every second */
if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
softns += NSEC_PER_SEC;
}
/* Update the expiry cache */
if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
pct->bases[CPUCLOCK_PROF].nextevt = softns;
}
if (expiry_cache_is_inactive(pct))
stop_process_timers(sig);
pct->expiry_active = false;
}
/*
* This is called from the signal code (via posixtimer_rearm)
* when the last timer signal was delivered and we have to reload the timer.
*/
static void posix_cpu_timer_rearm(struct k_itimer *timer)
{
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
struct task_struct *p;
struct sighand_struct *sighand;
unsigned long flags;
u64 now;
rcu_read_lock();
p = cpu_timer_task_rcu(timer);
if (!p)
goto out;
/* Protect timer list r/w in arm_timer() */
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL))
goto out;
/*
* Fetch the current sample and update the timer's expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
else
now = cpu_clock_sample_group(clkid, p, true);
bump_cpu_timer(timer, now);
/*
* Now re-arm for the new expiry time.
*/
arm_timer(timer, p);
unlock_task_sighand(p, &flags);
out:
rcu_read_unlock();
}
/**
* task_cputimers_expired - Check whether posix CPU timers are expired
*
* @samples: Array of current samples for the CPUCLOCK clocks
* @pct: Pointer to a posix_cputimers container
*
* Returns true if any member of @samples is greater than the corresponding
* member of @pct->bases[CLK].nextevt. False otherwise
*/
static inline bool
task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
{
int i;
for (i = 0; i < CPUCLOCK_MAX; i++) {
if (samples[i] >= pct->bases[i].nextevt)
return true;
}
return false;
}
/**
* fastpath_timer_check - POSIX CPU timers fast path.
*
* @tsk: The task (thread) being checked.
*
* Check the task and thread group timers. If both are zero (there are no
* timers set) return false. Otherwise snapshot the task and thread group
* timers and compare them with the corresponding expiration times. Return
* true if a timer has expired, else return false.
*/
static inline bool fastpath_timer_check(struct task_struct *tsk)
{
struct posix_cputimers *pct = &tsk->posix_cputimers;
struct signal_struct *sig;
if (!expiry_cache_is_inactive(pct)) {
u64 samples[CPUCLOCK_MAX];
task_sample_cputime(tsk, samples);
if (task_cputimers_expired(samples, pct))
return true;
}
sig = tsk->signal;
pct = &sig->posix_cputimers;
/*
* Check if thread group timers expired when timers are active and
* no other thread in the group is already handling expiry for
* thread group cputimers. These fields are read without the
* sighand lock. However, this is fine because this is meant to be
* a fastpath heuristic to determine whether we should try to
* acquire the sighand lock to handle timer expiry.
*
* In the worst case scenario, if concurrently timers_active is set
* or expiry_active is cleared, but the current thread doesn't see
* the change yet, the timer checks are delayed until the next
* thread in the group gets a scheduler interrupt to handle the
* timer. This isn't an issue in practice because these types of
* delays with signals actually getting sent are expected.
*/
if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
u64 samples[CPUCLOCK_MAX];
proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
samples);
if (task_cputimers_expired(samples, pct))
return true;
}
if (dl_task(tsk) && tsk->dl.dl_overrun)
return true;
return false;
}
static void handle_posix_cpu_timers(struct task_struct *tsk);
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
static void posix_cpu_timers_work(struct callback_head *work)
{
struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
mutex_lock(&cw->mutex);
handle_posix_cpu_timers(current);
mutex_unlock(&cw->mutex);
}
/*
* Invoked from the posix-timer core when a cancel operation failed because
* the timer is marked firing. The caller holds rcu_read_lock(), which
* protects the timer and the task which is expiring it from being freed.
*/
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
{
struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
/* Has the handling task completed expiry already? */
if (!tsk)
return;
/* Ensure that the task cannot go away */
get_task_struct(tsk);
/* Now drop the RCU protection so the mutex can be locked */
rcu_read_unlock();
/* Wait on the expiry mutex */
mutex_lock(&tsk->posix_cputimers_work.mutex);
/* Release it immediately again. */
mutex_unlock(&tsk->posix_cputimers_work.mutex);
/* Drop the task reference. */
put_task_struct(tsk);
/* Relock RCU so the callsite is balanced */
rcu_read_lock();
}
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
{
/* Ensure that timr->it.cpu.handling task cannot go away */
rcu_read_lock();
spin_unlock_irq(&timr->it_lock);
posix_cpu_timer_wait_running(timr);
rcu_read_unlock();
/* @timr is on stack and is valid */
spin_lock_irq(&timr->it_lock);
}
/*
* Clear existing posix CPU timers task work.
*/
void clear_posix_cputimers_work(struct task_struct *p)
{
/*
* A copied work entry from the old task is not meaningful, clear it.
* N.B. init_task_work will not do this.
*/
memset(&p->posix_cputimers_work.work, 0,
sizeof(p->posix_cputimers_work.work));
init_task_work(&p->posix_cputimers_work.work,
posix_cpu_timers_work);
mutex_init(&p->posix_cputimers_work.mutex);
p->posix_cputimers_work.scheduled = false;
}
/*
* Initialize posix CPU timers task work in init task. Out of line to
* keep the callback static and to avoid header recursion hell.
*/
void __init posix_cputimers_init_work(void)
{
clear_posix_cputimers_work(current);
}
/*
* Note: All operations on tsk->posix_cputimer_work.scheduled happen either
* in hard interrupt context or in task context with interrupts
* disabled. Aside of that the writer/reader interaction is always in the
* context of the current task, which means they are strict per CPU.
*/
static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
{
return tsk->posix_cputimers_work.scheduled;
}
static inline void __run_posix_cpu_timers(struct task_struct *tsk)
{
if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
return;
/* Schedule task work to actually expire the timers */
tsk->posix_cputimers_work.scheduled = true;
task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
}
static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
unsigned long start)
{
bool ret = true;
/*
* On !RT kernels interrupts are disabled while collecting expired
* timers, so no tick can happen and the fast path check can be
* reenabled without further checks.
*/
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
tsk->posix_cputimers_work.scheduled = false;
return true;
}
/*
* On RT enabled kernels ticks can happen while the expired timers
* are collected under sighand lock. But any tick which observes
* the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
* checks. So reenabling the tick work has do be done carefully:
*
* Disable interrupts and run the fast path check if jiffies have
* advanced since the collecting of expired timers started. If
* jiffies have not advanced or the fast path check did not find
* newly expired timers, reenable the fast path check in the timer
* interrupt. If there are newly expired timers, return false and
* let the collection loop repeat.
*/
local_irq_disable();
if (start != jiffies && fastpath_timer_check(tsk))
ret = false;
else
tsk->posix_cputimers_work.scheduled = false;
local_irq_enable();
return ret;
}
#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
static inline void __run_posix_cpu_timers(struct task_struct *tsk)
{
lockdep_posixtimer_enter();
handle_posix_cpu_timers(tsk);
lockdep_posixtimer_exit();
}
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
{
cpu_relax();
}
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
{
spin_unlock_irq(&timr->it_lock);
cpu_relax();
spin_lock_irq(&timr->it_lock);
}
static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
{
return false;
}
static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
unsigned long start)
{
return true;
}
#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
static void handle_posix_cpu_timers(struct task_struct *tsk)
{
struct k_itimer *timer, *next;
unsigned long flags, start;
LIST_HEAD(firing);
if (!lock_task_sighand(tsk, &flags))
return;
do {
/*
* On RT locking sighand lock does not disable interrupts,
* so this needs to be careful vs. ticks. Store the current
* jiffies value.
*/
start = READ_ONCE(jiffies);
barrier();
/*
* Here we take off tsk->signal->cpu_timers[N] and
* tsk->cpu_timers[N] all the timers that are firing, and
* put them on the firing list.
*/
check_thread_timers(tsk, &firing);
check_process_timers(tsk, &firing);
/*
* The above timer checks have updated the expiry cache and
* because nothing can have queued or modified timers after
* sighand lock was taken above it is guaranteed to be
* consistent. So the next timer interrupt fastpath check
* will find valid data.
*
* If timer expiry runs in the timer interrupt context then
* the loop is not relevant as timers will be directly
* expired in interrupt context. The stub function below
* returns always true which allows the compiler to
* optimize the loop out.
*
* If timer expiry is deferred to task work context then
* the following rules apply:
*
* - On !RT kernels no tick can have happened on this CPU
* after sighand lock was acquired because interrupts are
* disabled. So reenabling task work before dropping
* sighand lock and reenabling interrupts is race free.
*
* - On RT kernels ticks might have happened but the tick
* work ignored posix CPU timer handling because the
* CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
* must be done very carefully including a check whether
* ticks have happened since the start of the timer
* expiry checks. posix_cpu_timers_enable_work() takes
* care of that and eventually lets the expiry checks
* run again.
*/
} while (!posix_cpu_timers_enable_work(tsk, start));
/*
* We must release sighand lock before taking any timer's lock.
* There is a potential race with timer deletion here, as the
* siglock now protects our private firing list. We have set
* the firing flag in each timer, so that a deletion attempt
* that gets the timer lock before we do will give it up and
* spin until we've taken care of that timer below.
*/
unlock_task_sighand(tsk, &flags);
/*
* Now that all the timers on our list have the firing flag,
* no one will touch their list entries but us. We'll take
* each timer's lock before clearing its firing flag, so no
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
bool cpu_firing;
/*
* spin_lock() is sufficient here even independent of the
* expiry context. If expiry happens in hard interrupt
* context it's obvious. For task work context it's safe
* because all other operations on timer::it_lock happen in
* task context (syscall or exit).
*/
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.elist);
cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = false;
/*
* If the firing flag is cleared then this raced with a
* timer rearm/delete operation. So don't generate an
* event.
*/
if (likely(cpu_firing))
cpu_timer_fire(timer);
/* See posix_cpu_timer_wait_running() */
rcu_assign_pointer(timer->it.cpu.handling, NULL);
spin_unlock(&timer->it_lock);
}
}
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
void run_posix_cpu_timers(void)
{
struct task_struct *tsk = current;
lockdep_assert_irqs_disabled();
/*
* Ensure that release_task(tsk) can't happen while
* handle_posix_cpu_timers() is running. Otherwise, a concurrent
* posix_cpu_timer_del() may fail to lock_task_sighand(tsk) and
* miss timer->it.cpu.firing != 0.
*/
if (tsk->exit_state)
return;
/*
* If the actual expiry is deferred to task work context and the
* work is already scheduled there is no point to do anything here.
*/
if (posix_cpu_timers_work_scheduled(tsk))
return;
/*
* The fast path checks that there are no expired thread or thread
* group timers. If that's so, just return.
*/
if (!fastpath_timer_check(tsk))
return;
__run_posix_cpu_timers(tsk);
}
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
*/
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
u64 *newval, u64 *oldval)
{
u64 now, *nextevt;
if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
return;
nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
now = cpu_clock_sample_group(clkid, tsk, true);
if (oldval) {
/*
* We are setting itimer. The *oldval is absolute and we update
* it to be relative, *newval argument is relative and we update
* it to be absolute.
*/
if (*oldval) {
if (*oldval <= now) {
/* Just about to fire. */
*oldval = TICK_NSEC;
} else {
*oldval -= now;
}
}
if (*newval)
*newval += now;
}
/*
* Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
* expiry cache is also used by RLIMIT_CPU!.
*/
if (*newval < *nextevt)
*nextevt = *newval;
tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
}
static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
const struct timespec64 *rqtp)
{
struct itimerspec64 it;
struct k_itimer timer;
u64 expires;
int error;
/*
* Set up a temporary timer and then wait for it to go off.
*/
memset(&timer, 0, sizeof timer);
spin_lock_init(&timer.it_lock);
timer.it_clock = which_clock;
timer.it_overrun = -1;
error = posix_cpu_timer_create(&timer);
timer.it_process = current;
timer.it.cpu.nanosleep = true;
if (!error) {
static struct itimerspec64 zero_it;
struct restart_block *restart;
memset(&it, 0, sizeof(it));
it.it_value = *rqtp;
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_set(&timer, flags, &it, NULL);
if (error) {
spin_unlock_irq(&timer.it_lock);
return error;
}
while (!signal_pending(current)) {
if (!cpu_timer_getexpires(&timer.it.cpu)) {
/*
* Our timer fired and was reset, below
* deletion can not fail.
*/
posix_cpu_timer_del(&timer);
spin_unlock_irq(&timer.it_lock);
return 0;
}
/*
* Block until cpu_timer_fire (or a signal) wakes us.
*/
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&timer.it_lock);
schedule();
spin_lock_irq(&timer.it_lock);
}
/*
* We were interrupted by a signal.
*/
expires = cpu_timer_getexpires(&timer.it.cpu);
error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
if (!error) {
/* Timer is now unarmed, deletion can not fail. */
posix_cpu_timer_del(&timer);
} else {
while (error == TIMER_RETRY) {
posix_cpu_timer_wait_running_nsleep(&timer);
error = posix_cpu_timer_del(&timer);
}
}
spin_unlock_irq(&timer.it_lock);
if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
/*
* It actually did fire already.
*/
return 0;
}
error = -ERESTART_RESTARTBLOCK;
/*
* Report back to the user the time still remaining.
*/
restart = ¤t->restart_block;
restart->nanosleep.expires = expires;
if (restart->nanosleep.type != TT_NONE)
error = nanosleep_copyout(restart, &it.it_value);
}
return error;
}
static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
const struct timespec64 *rqtp)
{
struct restart_block *restart_block = ¤t->restart_block;
int error;
/*
* Diagnose required errors first.
*/
if (CPUCLOCK_PERTHREAD(which_clock) &&
(CPUCLOCK_PID(which_clock) == 0 ||
CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
return -EINVAL;
error = do_cpu_nanosleep(which_clock, flags, rqtp);
if (error == -ERESTART_RESTARTBLOCK) {
if (flags & TIMER_ABSTIME)
return -ERESTARTNOHAND;
restart_block->nanosleep.clockid = which_clock;
set_restart_fn(restart_block, posix_cpu_nsleep_restart);
}
return error;
}
static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
{
clockid_t which_clock = restart_block->nanosleep.clockid;
struct timespec64 t;
t = ns_to_timespec64(restart_block->nanosleep.expires);
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
}
#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
}
static int process_cpu_clock_get(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_get(PROCESS_CLOCK, tp);
}
static int process_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = PROCESS_CLOCK;
return posix_cpu_timer_create(timer);
}
static int process_cpu_nsleep(const clockid_t which_clock, int flags,
const struct timespec64 *rqtp)
{
return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
}
static int thread_cpu_clock_getres(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_getres(THREAD_CLOCK, tp);
}
static int thread_cpu_clock_get(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_get(THREAD_CLOCK, tp);
}
static int thread_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = THREAD_CLOCK;
return posix_cpu_timer_create(timer);
}
const struct k_clock clock_posix_cpu = {
.clock_getres = posix_cpu_clock_getres,
.clock_set = posix_cpu_clock_set,
.clock_get_timespec = posix_cpu_clock_get,
.timer_create = posix_cpu_timer_create,
.nsleep = posix_cpu_nsleep,
.timer_set = posix_cpu_timer_set,
.timer_del = posix_cpu_timer_del,
.timer_get = posix_cpu_timer_get,
.timer_rearm = posix_cpu_timer_rearm,
.timer_wait_running = posix_cpu_timer_wait_running,
};
const struct k_clock clock_process = {
.clock_getres = process_cpu_clock_getres,
.clock_get_timespec = process_cpu_clock_get,
.timer_create = process_cpu_timer_create,
.nsleep = process_cpu_nsleep,
};
const struct k_clock clock_thread = {
.clock_getres = thread_cpu_clock_getres,
.clock_get_timespec = thread_cpu_clock_get,
.timer_create = thread_cpu_timer_create,
};
/*
* linux/include/linux/console.h
*
* Copyright (C) 1993 Hamish Macdonald
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Changed:
* 10-Mar-94: Arno Griffioen: Conversion for vt100 emulator port from PC LINUX
*/
#ifndef _LINUX_CONSOLE_H_
#define _LINUX_CONSOLE_H_ 1
#include <linux/atomic.h>
#include <linux/bits.h>
#include <linux/irq_work.h>
#include <linux/rculist.h>
#include <linux/rcuwait.h>
#include <linux/types.h>
#include <linux/vesa.h>
struct vc_data;
struct console_font_op;
struct console_font;
struct module;
struct tty_struct;
struct notifier_block;
enum con_scroll {
SM_UP,
SM_DOWN,
};
enum vc_intensity;
/**
* struct consw - callbacks for consoles
*
* @owner: the module to get references of when this console is used
* @con_startup: set up the console and return its name (like VGA, EGA, ...)
* @con_init: initialize the console on @vc. @init is true for the very first
* call on this @vc.
* @con_deinit: deinitialize the console from @vc.
* @con_clear: erase @count characters at [@x, @y] on @vc. @count >= 1.
* @con_putc: emit one character with attributes @ca to [@x, @y] on @vc.
* (optional -- @con_putcs would be called instead)
* @con_putcs: emit @count characters with attributes @s to [@x, @y] on @vc.
* @con_cursor: enable/disable cursor depending on @enable
* @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
* Return true if no generic handling should be done.
* Invoked by csi_M and printing to the console.
* @con_switch: notifier about the console switch; it is supposed to return
* true if a redraw is needed.
* @con_blank: blank/unblank the console. The target mode is passed in @blank.
* @mode_switch is set if changing from/to text/graphics. The hook
* is supposed to return true if a redraw is needed.
* @con_font_set: set console @vc font to @font with height @vpitch. @flags can
* be %KD_FONT_FLAG_DONT_RECALC. (optional)
* @con_font_get: fetch the current font on @vc of height @vpitch into @font.
* (optional)
* @con_font_default: set default font on @vc. @name can be %NULL or font name
* to search for. @font can be filled back. (optional)
* @con_resize: resize the @vc console to @width x @height. @from_user is true
* when this change comes from the user space.
* @con_set_palette: sets the palette of the console @vc to @table (optional)
* @con_scrolldelta: the contents of the console should be scrolled by @lines.
* Invoked by user. (optional)
* @con_set_origin: set origin (see &vc_data::vc_origin) of the @vc. If not
* provided or returns false, the origin is set to
* @vc->vc_screenbuf. (optional)
* @con_save_screen: save screen content into @vc->vc_screenbuf. Called e.g.
* upon entering graphics. (optional)
* @con_build_attr: build attributes based on @color, @intensity and other
* parameters. The result is used for both normal and erase
* characters. (optional)
* @con_invert_region: invert a region of length @count on @vc starting at @p.
* (optional)
* @con_debug_enter: prepare the console for the debugger. This includes, but
* is not limited to, unblanking the console, loading an
* appropriate palette, and allowing debugger generated output.
* (optional)
* @con_debug_leave: restore the console to its pre-debug state as closely as
* possible. (optional)
*/
struct consw {
struct module *owner;
const char *(*con_startup)(void);
void (*con_init)(struct vc_data *vc, bool init);
void (*con_deinit)(struct vc_data *vc);
void (*con_clear)(struct vc_data *vc, unsigned int y,
unsigned int x, unsigned int count);
void (*con_putc)(struct vc_data *vc, u16 ca, unsigned int y,
unsigned int x);
void (*con_putcs)(struct vc_data *vc, const u16 *s,
unsigned int count, unsigned int ypos,
unsigned int xpos);
void (*con_cursor)(struct vc_data *vc, bool enable);
bool (*con_scroll)(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int lines);
bool (*con_switch)(struct vc_data *vc);
bool (*con_blank)(struct vc_data *vc, enum vesa_blank_mode blank,
bool mode_switch);
int (*con_font_set)(struct vc_data *vc,
const struct console_font *font,
unsigned int vpitch, unsigned int flags);
int (*con_font_get)(struct vc_data *vc, struct console_font *font,
unsigned int vpitch);
int (*con_font_default)(struct vc_data *vc,
struct console_font *font, const char *name);
int (*con_resize)(struct vc_data *vc, unsigned int width,
unsigned int height, bool from_user);
void (*con_set_palette)(struct vc_data *vc,
const unsigned char *table);
void (*con_scrolldelta)(struct vc_data *vc, int lines);
bool (*con_set_origin)(struct vc_data *vc);
void (*con_save_screen)(struct vc_data *vc);
u8 (*con_build_attr)(struct vc_data *vc, u8 color,
enum vc_intensity intensity,
bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
void (*con_debug_enter)(struct vc_data *vc);
void (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
struct screen_info;
#ifdef CONFIG_VGA_CONSOLE
void vgacon_register_screen(struct screen_info *si);
#else
static inline void vgacon_register_screen(struct screen_info *si) { }
#endif
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
#ifdef CONFIG_VT
void con_debug_enter(struct vc_data *vc);
void con_debug_leave(void);
#else
static inline void con_debug_enter(struct vc_data *vc) { }
static inline void con_debug_leave(void) { }
#endif
/*
* The interface for a console, or any other device that wants to capture
* console messages (printer driver?)
*/
/**
* enum cons_flags - General console flags
* @CON_PRINTBUFFER: Used by newly registered consoles to avoid duplicate
* output of messages that were already shown by boot
* consoles or read by userspace via syslog() syscall.
* @CON_CONSDEV: Indicates that the console driver is backing
* /dev/console.
* @CON_ENABLED: Indicates if a console is allowed to print records. If
* false, the console also will not advance to later
* records.
* @CON_BOOT: Marks the console driver as early console driver which
* is used during boot before the real driver becomes
* available. It will be automatically unregistered
* when the real console driver is registered unless
* "keep_bootcon" parameter is used.
* @CON_ANYTIME: A misnomed historical flag which tells the core code
* that the legacy @console::write callback can be invoked
* on a CPU which is marked OFFLINE. That is misleading as
* it suggests that there is no contextual limit for
* invoking the callback. The original motivation was
* readiness of the per-CPU areas.
* @CON_BRL: Indicates a braille device which is exempt from
* receiving the printk spam for obvious reasons.
* @CON_EXTENDED: The console supports the extended output format of
* /dev/kmesg which requires a larger output buffer.
* @CON_SUSPENDED: Indicates if a console is suspended. If true, the
* printing callbacks must not be called.
* @CON_NBCON: Console can operate outside of the legacy style console_lock
* constraints.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
CON_CONSDEV = BIT(1),
CON_ENABLED = BIT(2),
CON_BOOT = BIT(3),
CON_ANYTIME = BIT(4),
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
CON_SUSPENDED = BIT(7),
CON_NBCON = BIT(8),
};
/**
* struct nbcon_state - console state for nbcon consoles
* @atom: Compound of the state fields for atomic operations
*
* @req_prio: The priority of a handover request
* @prio: The priority of the current owner
* @unsafe: Console is busy in a non takeover region
* @unsafe_takeover: A hostile takeover in an unsafe state happened in the
* past. The console cannot be safe until re-initialized.
* @cpu: The CPU on which the owner runs
*
* To be used for reading and preparing of the value stored in the nbcon
* state variable @console::nbcon_state.
*
* The @prio and @req_prio fields are particularly important to allow
* spin-waiting to timeout and give up without the risk of a waiter being
* assigned the lock after giving up.
*/
struct nbcon_state {
union {
unsigned int atom;
struct {
unsigned int prio : 2;
unsigned int req_prio : 2;
unsigned int unsafe : 1;
unsigned int unsafe_takeover : 1;
unsigned int cpu : 24;
};
};
};
/*
* The nbcon_state struct is used to easily create and interpret values that
* are stored in the @console::nbcon_state variable. Ensure this struct stays
* within the size boundaries of the atomic variable's underlying type in
* order to avoid any accidental truncation.
*/
static_assert(sizeof(struct nbcon_state) <= sizeof(int));
/**
* enum nbcon_prio - console owner priority for nbcon consoles
* @NBCON_PRIO_NONE: Unused
* @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
* @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
* @NBCON_PRIO_PANIC: Panic output
* @NBCON_PRIO_MAX: The number of priority levels
*
* A higher priority context can takeover the console when it is
* in the safe state. The final attempt to flush consoles in panic()
* can be allowed to do so even in an unsafe state (Hope and pray).
*/
enum nbcon_prio {
NBCON_PRIO_NONE = 0,
NBCON_PRIO_NORMAL,
NBCON_PRIO_EMERGENCY,
NBCON_PRIO_PANIC,
NBCON_PRIO_MAX,
};
struct console;
struct printk_buffers;
/**
* struct nbcon_context - Context for console acquire/release
* @console: The associated console
* @spinwait_max_us: Limit for spin-wait acquire
* @prio: Priority of the context
* @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
* be used only with NBCON_PRIO_PANIC @prio. It
* might cause a system freeze when the console
* is used later.
* @backlog: Ringbuffer has pending records
* @pbufs: Pointer to the text buffer for this context
* @seq: The sequence number to print for this context
*/
struct nbcon_context {
/* members set by caller */
struct console *console;
unsigned int spinwait_max_us;
enum nbcon_prio prio;
unsigned int allow_unsafe_takeover : 1;
/* members set by emit */
unsigned int backlog : 1;
/* members set by acquire */
struct printk_buffers *pbufs;
u64 seq;
};
/**
* struct nbcon_write_context - Context handed to the nbcon write callbacks
* @ctxt: The core console context
* @outbuf: Pointer to the text buffer for output
* @len: Length to write
* @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
*/
struct nbcon_write_context {
struct nbcon_context __private ctxt;
char *outbuf;
unsigned int len;
bool unsafe_takeover;
};
/**
* struct console - The console descriptor structure
* @name: The name of the console driver
* @write: Legacy write callback to output messages (Optional)
* @read: Read callback for console input (Optional)
* @device: The underlying TTY device driver (Optional)
* @unblank: Callback to unblank the console (Optional)
* @setup: Callback for initializing the console (Optional)
* @exit: Callback for teardown of the console (Optional)
* @match: Callback for matching a console (Optional)
* @flags: Console flags. See enum cons_flags
* @index: Console index, e.g. port number
* @cflag: TTY control mode flags
* @ispeed: TTY input speed
* @ospeed: TTY output speed
* @seq: Sequence number of the next ringbuffer record to print
* @dropped: Number of unreported dropped ringbuffer records
* @data: Driver private data
* @node: hlist node for the console list
*
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
* @nbcon_device_ctxt: Context available for non-printing operations
* @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
* @pbufs: Pointer to nbcon private buffer
* @kthread: Printer kthread for this console
* @rcuwait: RCU-safe wait object for @kthread waking
* @irq_work: Defer @kthread waking to IRQ work context
*/
struct console {
char name[16];
void (*write)(struct console *co, const char *s, unsigned int count);
int (*read)(struct console *co, char *s, unsigned int count);
struct tty_driver *(*device)(struct console *co, int *index);
void (*unblank)(void);
int (*setup)(struct console *co, char *options);
int (*exit)(struct console *co);
int (*match)(struct console *co, char *name, int idx, char *options);
short flags;
short index;
int cflag;
uint ispeed;
uint ospeed;
u64 seq;
unsigned long dropped;
void *data;
struct hlist_node node;
/* nbcon console specific members */
/**
* @write_atomic:
*
* NBCON callback to write out text in any context. (Optional)
*
* This callback is called with the console already acquired. However,
* a higher priority context is allowed to take it over by default.
*
* The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe()
* around any code where the takeover is not safe, for example, when
* manipulating the serial port registers.
*
* nbcon_enter_unsafe() will fail if the context has lost the console
* ownership in the meantime. In this case, the callback is no longer
* allowed to go forward. It must back out immediately and carefully.
* The buffer content is also no longer trusted since it no longer
* belongs to the context.
*
* The callback should allow the takeover whenever it is safe. It
* increases the chance to see messages when the system is in trouble.
* If the driver must reacquire ownership in order to finalize or
* revert hardware changes, nbcon_reacquire_nobuf() can be used.
* However, on reacquire the buffer content is no longer available. A
* reacquire cannot be used to resume printing.
*
* The callback can be called from any context (including NMI).
* Therefore it must avoid usage of any locking and instead rely
* on the console ownership for synchronization.
*/
void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
/**
* @write_thread:
*
* NBCON callback to write out text in task context.
*
* This callback must be called only in task context with both
* device_lock() and the nbcon console acquired with
* NBCON_PRIO_NORMAL.
*
* The same rules for console ownership verification and unsafe
* sections handling applies as with write_atomic().
*
* The console ownership handling is necessary for synchronization
* against write_atomic() which is synchronized only via the context.
*
* The device_lock() provides the primary serialization for operations
* on the device. It might be as relaxed (mutex)[*] or as tight
* (disabled preemption and interrupts) as needed. It allows
* the kthread to operate in the least restrictive mode[**].
*
* [*] Standalone nbcon_context_try_acquire() is not safe with
* the preemption enabled, see nbcon_owner_matches(). But it
* can be safe when always called in the preemptive context
* under the device_lock().
*
* [**] The device_lock() makes sure that nbcon_context_try_acquire()
* would never need to spin which is important especially with
* PREEMPT_RT.
*/
void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
/**
* @device_lock:
*
* NBCON callback to begin synchronization with driver code.
*
* Console drivers typically must deal with access to the hardware
* via user input/output (such as an interactive login shell) and
* output of kernel messages via printk() calls. This callback is
* called by the printk-subsystem whenever it needs to synchronize
* with hardware access by the driver. It should be implemented to
* use whatever synchronization mechanism the driver is using for
* itself (for example, the port lock for uart serial consoles).
*
* The callback is always called from task context. It may use any
* synchronization method required by the driver.
*
* IMPORTANT: The callback MUST disable migration. The console driver
* may be using a synchronization mechanism that already takes
* care of this (such as spinlocks). Otherwise this function must
* explicitly call migrate_disable().
*
* The flags argument is provided as a convenience to the driver. It
* will be passed again to device_unlock(). It can be ignored if the
* driver does not need it.
*/
void (*device_lock)(struct console *con, unsigned long *flags);
/**
* @device_unlock:
*
* NBCON callback to finish synchronization with driver code.
*
* It is the counterpart to device_lock().
*
* This callback is always called from task context. It must
* appropriately re-enable migration (depending on how device_lock()
* disabled migration).
*
* The flags argument is the value of the same variable that was
* passed to device_lock().
*/
void (*device_unlock)(struct console *con, unsigned long flags);
atomic_t __private nbcon_state;
atomic_long_t __private nbcon_seq;
struct nbcon_context __private nbcon_device_ctxt;
atomic_long_t __private nbcon_prev_seq;
struct printk_buffers *pbufs;
struct task_struct *kthread;
struct rcuwait rcuwait;
struct irq_work irq_work;
};
#ifdef CONFIG_LOCKDEP
extern void lockdep_assert_console_list_lock_held(void);
#else
static inline void lockdep_assert_console_list_lock_held(void)
{
}
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern bool console_srcu_read_lock_is_held(void);
#else
static inline bool console_srcu_read_lock_is_held(void)
{
return 1;
}
#endif
extern int console_srcu_read_lock(void);
extern void console_srcu_read_unlock(int cookie);
extern void console_list_lock(void) __acquires(console_mutex);
extern void console_list_unlock(void) __releases(console_mutex);
extern struct hlist_head console_list;
/**
* console_srcu_read_flags - Locklessly read flags of a possibly registered
* console
* @con: struct console pointer of console to read flags from
*
* Locklessly reading @con->flags provides a consistent read value because
* there is at most one CPU modifying @con->flags and that CPU is using only
* read-modify-write operations to do so.
*
* Requires console_srcu_read_lock to be held, which implies that @con might
* be a registered console. The purpose of holding console_srcu_read_lock is
* to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED)
* and that no exit/cleanup routines will run if the console is currently
* undergoing unregistration.
*
* If the caller is holding the console_list_lock or it is _certain_ that
* @con is not and will not become registered, the caller may read
* @con->flags directly instead.
*
* Context: Any context.
* Return: The current value of the @con->flags field.
*/
static inline short console_srcu_read_flags(const struct console *con)
{
WARN_ON_ONCE(!console_srcu_read_lock_is_held());
/*
* The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
* for registered consoles with console_srcu_write_flags().
*/
return data_race(READ_ONCE(con->flags));
}
/**
* console_srcu_write_flags - Write flags for a registered console
* @con: struct console pointer of console to write flags to
* @flags: new flags value to write
*
* Only use this function to write flags for registered consoles. It
* requires holding the console_list_lock.
*
* Context: Any context.
*/
static inline void console_srcu_write_flags(struct console *con, short flags)
{
lockdep_assert_console_list_lock_held();
/* This matches the READ_ONCE() in console_srcu_read_flags(). */
WRITE_ONCE(con->flags, flags);
}
/* Variant of console_is_registered() when the console_list_lock is held. */
static inline bool console_is_registered_locked(const struct console *con)
{
lockdep_assert_console_list_lock_held();
return !hlist_unhashed(&con->node);
}
/*
* console_is_registered - Check if the console is registered
* @con: struct console pointer of console to check
*
* Context: Process context. May sleep while acquiring console list lock.
* Return: true if the console is in the console list, otherwise false.
*
* If false is returned for a console that was previously registered, it
* can be assumed that the console's unregistration is fully completed,
* including the exit() callback after console list removal.
*/
static inline bool console_is_registered(const struct console *con)
{
bool ret;
console_list_lock();
ret = console_is_registered_locked(con);
console_list_unlock();
return ret;
}
/**
* for_each_console_srcu() - Iterator over registered consoles
* @con: struct console pointer used as loop cursor
*
* Although SRCU guarantees the console list will be consistent, the
* struct console fields may be updated by other CPUs while iterating.
*
* Requires console_srcu_read_lock to be held. Can be invoked from
* any context.
*/
#define for_each_console_srcu(con) \
hlist_for_each_entry_srcu(con, &console_list, node, \
console_srcu_read_lock_is_held())
/**
* for_each_console() - Iterator over registered consoles
* @con: struct console pointer used as loop cursor
*
* The console list and the &console.flags are immutable while iterating.
*
* Requires console_list_lock to be held.
*/
#define for_each_console(con) \
lockdep_assert_console_list_lock_held(); \
hlist_for_each_entry(con, &console_list, node)
#ifdef CONFIG_PRINTK
extern void nbcon_cpu_emergency_enter(void);
extern void nbcon_cpu_emergency_exit(void);
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
#else
static inline void nbcon_cpu_emergency_enter(void) { }
static inline void nbcon_cpu_emergency_exit(void) { }
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
#endif
extern int console_set_on_cmdline;
extern struct console *early_console;
enum con_flush_mode {
CONSOLE_FLUSH_PENDING,
CONSOLE_REPLAY_ALL,
};
extern int add_preferred_console(const char *name, const short idx, char *options);
extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
extern void console_lock(void);
extern int console_trylock(void);
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
extern void console_flush_on_panic(enum con_flush_mode mode);
extern struct tty_driver *console_device(int *);
extern void console_suspend(struct console *);
extern void console_resume(struct console *);
extern int is_console_locked(void);
extern int braille_register_console(struct console *, int index,
char *console_options, char *braille_options);
extern int braille_unregister_console(struct console *);
#ifdef CONFIG_TTY
extern void console_sysfs_notify(void);
#else
static inline void console_sysfs_notify(void)
{ }
#endif
extern bool console_suspend_enabled;
/* Suspend and resume console messages over PM events */
extern void console_suspend_all(void);
extern void console_resume_all(void);
int mda_console_init(void);
void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
#define WARN_CONSOLE_UNLOCKED() \
WARN_ON(!atomic_read(&ignore_console_lock_warning) && \
!is_console_locked() && !oops_in_progress)
/*
* Increment ignore_console_lock_warning if you need to quiet
* WARN_CONSOLE_UNLOCKED() for debugging purposes.
*/
extern atomic_t ignore_console_lock_warning;
DEFINE_LOCK_GUARD_0(console_lock, console_lock(), console_unlock());
extern void console_init(void);
/* For deferred console takeover */
void dummycon_register_output_notifier(struct notifier_block *nb);
void dummycon_unregister_output_notifier(struct notifier_block *nb);
#endif /* _LINUX_CONSOLE_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/vsprintf.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
/*
* Wirzenius wrote this portably, Torvalds fucked it up :-)
*/
/*
* Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* - changed to provide snprintf and vsnprintf functions
* So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
* - scnprintf and vscnprintf
*/
#include <linux/stdarg.h>
#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/errname.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/ioport.h>
#include <linux/dcache.h>
#include <linux/cred.h>
#include <linux/rtc.h>
#include <linux/sprintf.h>
#include <linux/time.h>
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
#include <linux/siphash.h>
#include <linux/compiler.h>
#include <linux/property.h>
#include <linux/notifier.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
#include "../mm/internal.h" /* For the trace_print_flags arrays */
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/byteorder.h> /* cpu_to_le16 */
#include <linux/unaligned.h>
#include <linux/string_helpers.h>
#include "kstrtox.h"
/* Disable pointer hashing if requested */
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
/*
* Hashed pointers policy selected by "hash_pointers=..." boot param
*
* `auto` - Hashed pointers enabled unless disabled by slub_debug_enabled=true
* `always` - Hashed pointers enabled unconditionally
* `never` - Hashed pointers disabled unconditionally
*/
enum hash_pointers_policy {
HASH_PTR_AUTO = 0,
HASH_PTR_ALWAYS,
HASH_PTR_NEVER
};
static enum hash_pointers_policy hash_pointers_mode __initdata;
noinline
static unsigned long long simple_strntoull(const char *startp, char **endp, unsigned int base, size_t max_chars)
{
const char *cp;
unsigned long long result = 0ULL;
size_t prefix_chars;
unsigned int rv;
cp = _parse_integer_fixup_radix(startp, &base);
prefix_chars = cp - startp;
if (prefix_chars < max_chars) {
rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
/* FIXME */
cp += (rv & ~KSTRTOX_OVERFLOW);
} else {
/* Field too short for prefix + digit, skip over without converting */
cp = startp + max_chars;
}
if (endp)
*endp = (char *)cp;
return result;
}
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoull instead.
*/
noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
return simple_strntoull(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoull);
/**
* simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoul instead.
*/
unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
return simple_strtoull(cp, endp, base);
}
EXPORT_SYMBOL(simple_strtoul);
unsigned long simple_strntoul(const char *cp, char **endp, unsigned int base,
size_t max_chars)
{
return simple_strntoull(cp, endp, base, max_chars);
}
EXPORT_SYMBOL(simple_strntoul);
/**
* simple_strtol - convert a string to a signed long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtol instead.
*/
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
if (*cp == '-')
return -simple_strtoul(cp + 1, endp, base);
return simple_strtoul(cp, endp, base);
}
EXPORT_SYMBOL(simple_strtol);
noinline
static long long simple_strntoll(const char *cp, char **endp, unsigned int base, size_t max_chars)
{
/*
* simple_strntoull() safely handles receiving max_chars==0 in the
* case cp[0] == '-' && max_chars == 1.
* If max_chars == 0 we can drop through and pass it to simple_strntoull()
* and the content of *cp is irrelevant.
*/
if (*cp == '-' && max_chars > 0)
return -simple_strntoull(cp + 1, endp, base, max_chars - 1);
return simple_strntoull(cp, endp, base, max_chars);
}
/**
* simple_strtoll - convert a string to a signed long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoll instead.
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
return simple_strntoll(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoll);
static inline int skip_atoi(const char **s)
{
int i = 0;
do {
i = i*10 + *((*s)++) - '0'; } while (isdigit(**s)); return i;
}
/*
* Decimal conversion is by far the most typical, and is used for
* /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed by emitting
* two characters at a time, using a 200 byte lookup table. This
* roughly halves the number of multiplications compared to computing
* the digits one at a time. Implementation strongly inspired by the
* previous version, which in turn used ideas described at
* <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission
* from the author, Douglas W. Jones).
*
* It turns out there is precisely one 26 bit fixed-point
* approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32
* holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual
* range happens to be somewhat larger (x <= 1073741898), but that's
* irrelevant for our purpose.
*
* For dividing a number in the range [10^4, 10^6-1] by 100, we still
* need a 32x32->64 bit multiply, so we simply use the same constant.
*
* For dividing a number in the range [100, 10^4-1] by 100, there are
* several options. The simplest is (x * 0x147b) >> 19, which is valid
* for all x <= 43698.
*/
static const u16 decpair[100] = {
#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030)
_( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9),
_(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19),
_(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29),
_(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39),
_(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49),
_(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59),
_(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69),
_(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79),
_(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89),
_(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99),
#undef _
};
/*
* This will print a single '0' even if r == 0, since we would
* immediately jump to out_r where two 0s would be written but only
* one of them accounted for in buf. This is needed by ip4_string
* below. All other callers pass a non-zero value of r.
*/
static noinline_for_stack
char *put_dec_trunc8(char *buf, unsigned r)
{
unsigned q;
/* 1 <= r < 10^8 */
if (r < 100)
goto out_r;
/* 100 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 1 <= q < 10^6 */
if (q < 100)
goto out_q;
/* 100 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
/* 1 <= r < 10^4 */
if (r < 100)
goto out_r;
/* 100 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
out_q:
/* 1 <= q < 100 */
r = q;
out_r:
/* 1 <= r < 100 */
*((u16 *)buf) = decpair[r]; buf += r < 10 ? 1 : 2;
return buf;
}
#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64
static noinline_for_stack
char *put_dec_full8(char *buf, unsigned r)
{
unsigned q;
/* 0 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
buf += 2;
return buf;
}
static noinline_for_stack
char *put_dec(char *buf, unsigned long long n)
{
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n <= 1.6e11 */
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n < 1e8 */
return put_dec_trunc8(buf, n);}
#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64
static void
put_dec_full4(char *buf, unsigned r)
{
unsigned q;
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
}
/*
* Call put_dec_full4 on x % 10000, return x / 10000.
* The approximation x/10000 == (x * 0x346DC5D7) >> 43
* holds for all x < 1,128,869,999. The largest value this
* helper will ever be asked to convert is 1,125,520,955.
* (second call in the put_dec code, assuming n is all-ones).
*/
static noinline_for_stack
unsigned put_dec_helper4(char *buf, unsigned x)
{
uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
put_dec_full4(buf, x - q * 10000);
return q;
}
/* Based on code by Douglas W. Jones found at
* <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
* (with permission from the author).
* Performs no 64-bit division and hence should be fast on 32-bit machines.
*/
static
char *put_dec(char *buf, unsigned long long n)
{
uint32_t d3, d2, d1, q, h;
if (n < 100*1000*1000)
return put_dec_trunc8(buf, n);
d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
h = (n >> 32);
d2 = (h ) & 0xffff;
d3 = (h >> 16); /* implicit "& 0xffff" */
/* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
= 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
q = put_dec_helper4(buf, q);
q += 7671 * d3 + 9496 * d2 + 6 * d1;
q = put_dec_helper4(buf+4, q);
q += 4749 * d3 + 42 * d2;
q = put_dec_helper4(buf+8, q);
q += 281 * d3;
buf += 12;
if (q)
buf = put_dec_trunc8(buf, q);
else while (buf[-1] == '0')
--buf;
return buf;
}
#endif
/*
* Convert passed number to decimal string.
* Returns the length of string. On buffer overflow, returns 0.
*
* If speed is not important, use snprintf(). It's easy to read the code.
*/
int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
{
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[sizeof(num) * 3] __aligned(2);
int idx, len;
/* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
if (num <= 9) {
tmp[0] = '0' + num;
len = 1;
} else {
len = put_dec(tmp, num) - tmp;
}
if (len > size || width > size)
return 0;
if (width > len) {
width = width - len;
for (idx = 0; idx < width; idx++)
buf[idx] = ' ';
} else {
width = 0;
}
for (idx = 0; idx < len; ++idx)
buf[idx + width] = tmp[len - idx - 1];
return len + width;
}
#define SIGN 1 /* unsigned/signed */
#define LEFT 2 /* left justified */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
static_assert(ZEROPAD == ('0' - ' '));
static_assert(SMALL == ('a' ^ 'A'));
enum format_state {
FORMAT_STATE_NONE, /* Just a string part */
FORMAT_STATE_NUM,
FORMAT_STATE_WIDTH,
FORMAT_STATE_PRECISION,
FORMAT_STATE_CHAR,
FORMAT_STATE_STR,
FORMAT_STATE_PTR,
FORMAT_STATE_PERCENT_CHAR,
FORMAT_STATE_INVALID,
};
struct printf_spec {
unsigned char flags; /* flags to number() */
unsigned char base; /* number base, 8, 10 or 16 only */
short precision; /* # of digits/chars */
int field_width; /* width of output field */
} __packed;
static_assert(sizeof(struct printf_spec) == 8);
#define FIELD_WIDTH_MAX ((1 << 23) - 1)
#define PRECISION_MAX ((1 << 15) - 1)
static noinline_for_stack
char *number(char *buf, char *end, unsigned long long num,
struct printf_spec spec)
{
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[3 * sizeof(num)] __aligned(2);
char sign;
char locase;
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
int i;
bool is_zero = num == 0LL;
int field_width = spec.field_width;
int precision = spec.precision;
/* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters */
locase = (spec.flags & SMALL);
if (spec.flags & LEFT) spec.flags &= ~ZEROPAD;
sign = 0;
if (spec.flags & SIGN) {
if ((signed long long)num < 0) { sign = '-';
num = -(signed long long)num;
field_width--;
} else if (spec.flags & PLUS) {
sign = '+';
field_width--;
} else if (spec.flags & SPACE) {
sign = ' ';
field_width--;
}
}
if (need_pfx) { if (spec.base == 16)
field_width -= 2;
else if (!is_zero)
field_width--;
}
/* generate full string in tmp[], in reverse order */
i = 0;
if (num < spec.base)
tmp[i++] = hex_asc_upper[num] | locase;
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
if (spec.base == 16)
shift = 4;
do {
tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase);
num >>= shift;
} while (num);
} else { /* base 10 */
i = put_dec(tmp, num) - tmp;
}
/* printing 100 using %2d gives "100", not "00" */
if (i > precision)
precision = i;
/* leading space padding */
field_width -= precision;
if (!(spec.flags & (ZEROPAD | LEFT))) {
while (--field_width >= 0) { if (buf < end)
*buf = ' ';
++buf;
}
}
/* sign */
if (sign) {
if (buf < end)
*buf = sign; ++buf;
}
/* "0x" / "0" prefix */
if (need_pfx) { if (spec.base == 16 || !is_zero) { if (buf < end)
*buf = '0';
++buf;
}
if (spec.base == 16) { if (buf < end)
*buf = ('X' | locase);
++buf;
}
}
/* zero or space padding */
if (!(spec.flags & LEFT)) {
char c = ' ' + (spec.flags & ZEROPAD);
while (--field_width >= 0) { if (buf < end)
*buf = c;
++buf;
}
}
/* hmm even more zero padding? */
while (i <= --precision) { if (buf < end)
*buf = '0';
++buf;
}
/* actual digits of result */
while (--i >= 0) { if (buf < end)
*buf = tmp[i];
++buf;
}
/* trailing space padding */
while (--field_width >= 0) { if (buf < end)
*buf = ' ';
++buf;
}
return buf;}
static noinline_for_stack
char *special_hex_number(char *buf, char *end, unsigned long long num, int size)
{
struct printf_spec spec;
spec.field_width = 2 + 2 * size; /* 0x + hex */
spec.flags = SPECIAL | SMALL | ZEROPAD;
spec.base = 16;
spec.precision = -1;
return number(buf, end, num, spec);
}
static void move_right(char *buf, char *end, unsigned len, unsigned spaces)
{
size_t size;
if (buf >= end) /* nowhere to put anything */
return;
size = end - buf;
if (size <= spaces) {
memset(buf, ' ', size); return;
}
if (len) {
if (len > size - spaces)
len = size - spaces;
memmove(buf + spaces, buf, len);
}
memset(buf, ' ', spaces);
}
/*
* Handle field width padding for a string.
* @buf: current buffer position
* @n: length of string
* @end: end of output buffer
* @spec: for field width and flags
* Returns: new buffer position after padding.
*/
static noinline_for_stack
char *widen_string(char *buf, int n, char *end, struct printf_spec spec)
{
unsigned spaces;
if (likely(n >= spec.field_width))
return buf;
/* we want to pad the sucker */
spaces = spec.field_width - n;
if (!(spec.flags & LEFT)) { move_right(buf - n, end, n, spaces); return buf + spaces;
}
while (spaces--) { if (buf < end)
*buf = ' ';
++buf;
}
return buf;
}
/* Handle string from a well known address. */
static char *string_nocheck(char *buf, char *end, const char *s,
struct printf_spec spec)
{
int len = 0;
int lim = spec.precision;
while (lim--) {
char c = *s++; if (!c)
break;
if (buf < end)
*buf = c;
++buf;
++len;
}
return widen_string(buf, len, end, spec);
}
static char *err_ptr(char *buf, char *end, void *ptr,
struct printf_spec spec)
{
int err = PTR_ERR(ptr);
const char *sym = errname(err);
if (sym)
return string_nocheck(buf, end, sym, spec);
/*
* Somebody passed ERR_PTR(-1234) or some other non-existing
* Efoo - or perhaps CONFIG_SYMBOLIC_ERRNAME=n. Fall back to
* printing it as its decimal representation.
*/
spec.flags |= SIGN;
spec.base = 10;
return number(buf, end, err, spec);
}
/* Be careful: error messages must fit into the given buffer. */
static char *error_string(char *buf, char *end, const char *s,
struct printf_spec spec)
{
/*
* Hard limit to avoid a completely insane messages. It actually
* works pretty well because most error messages are in
* the many pointer format modifiers.
*/
if (spec.precision == -1)
spec.precision = 2 * sizeof(void *);
return string_nocheck(buf, end, s, spec);
}
/*
* Do not call any complex external code here. Nested printk()/vsprintf()
* might cause infinite loops. Failures might break printk() and would
* be hard to debug.
*/
static const char *check_pointer_msg(const void *ptr)
{
if (!ptr)
return "(null)";
if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr))
return "(efault)";
return NULL;
}
static int check_pointer(char **buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *err_msg;
err_msg = check_pointer_msg(ptr);
if (err_msg) {
*buf = error_string(*buf, end, err_msg, spec); return -EFAULT;
}
return 0;
}
static noinline_for_stack
char *string(char *buf, char *end, const char *s,
struct printf_spec spec)
{
if (check_pointer(&buf, end, s, spec))
return buf;
return string_nocheck(buf, end, s, spec);
}
static char *pointer_string(char *buf, char *end,
const void *ptr,
struct printf_spec spec)
{
spec.base = 16;
spec.flags |= SMALL;
if (spec.field_width == -1) {
spec.field_width = 2 * sizeof(ptr);
spec.flags |= ZEROPAD;
}
return number(buf, end, (unsigned long int)ptr, spec);
}
/* Make pointers available for printing early in the boot sequence. */
static int debug_boot_weak_hash __ro_after_init;
static int __init debug_boot_weak_hash_enable(char *str)
{
debug_boot_weak_hash = 1;
pr_info("debug_boot_weak_hash enabled\n");
return 0;
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
static bool filled_random_ptr_key __read_mostly;
static siphash_key_t ptr_key __read_mostly;
static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
/* Pairs with smp_rmb() before reading ptr_key. */
smp_wmb();
WRITE_ONCE(filled_random_ptr_key, true);
return NOTIFY_DONE;
}
static int __init vsprintf_init_hashval(void)
{
static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key };
execute_with_initialized_rng(&fill_ptr_key_nb);
return 0;
}
subsys_initcall(vsprintf_init_hashval)
/* Maps a pointer to a 32 bit unique identifier. */
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
unsigned long hashval;
if (!READ_ONCE(filled_random_ptr_key))
return -EBUSY;
/* Pairs with smp_wmb() after writing ptr_key. */
smp_rmb();
#ifdef CONFIG_64BIT
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
/*
* Mask off the first 32 bits, this makes explicit that we have
* modified the address (and 32 bits is plenty for a unique ID).
*/
hashval = hashval & 0xffffffff;
#else
hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
#endif
*hashval_out = hashval;
return 0;
}
int ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
return __ptr_to_hashval(ptr, hashval_out);
}
static char *ptr_to_id(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
int ret;
/*
* Print the real pointer value for NULL and error pointers,
* as they are not actual addresses.
*/
if (IS_ERR_OR_NULL(ptr))
return pointer_string(buf, end, ptr, spec);
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
hashval = hash_long((unsigned long)ptr, 32);
return pointer_string(buf, end, (const void *)hashval, spec);
}
ret = __ptr_to_hashval(ptr, &hashval);
if (ret) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
return error_string(buf, end, str, spec);
}
return pointer_string(buf, end, (const void *)hashval, spec);
}
static char *default_pointer(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
/*
* default is to _not_ leak addresses, so hash before printing,
* unless no_hash_pointers is specified on the command line.
*/
if (unlikely(no_hash_pointers))
return pointer_string(buf, end, ptr, spec);
return ptr_to_id(buf, end, ptr, spec);
}
int kptr_restrict __read_mostly;
static noinline_for_stack
char *restricted_pointer(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
switch (kptr_restrict) {
case 0:
/* Handle as %p, hash and do _not_ leak addresses. */
return default_pointer(buf, end, ptr, spec);
case 1: {
const struct cred *cred;
/*
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
if (in_hardirq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
spec.field_width = 2 * sizeof(ptr);
return error_string(buf, end, "pK-error", spec);
}
/*
* Only print the real pointer value if the current
* process has CAP_SYSLOG and is running with the
* same credentials it started with. This is because
* access to files is checked at open() time, but %pK
* checks permission at read() time. We don't want to
* leak pointer values if a binary opens a file using
* %pK and then elevates privileges before reading it.
*/
cred = current_cred();
if (!has_capability_noaudit(current, CAP_SYSLOG) ||
!uid_eq(cred->euid, cred->uid) ||
!gid_eq(cred->egid, cred->gid))
ptr = NULL;
break;
}
case 2:
default:
/* Always print 0's for %pK */
ptr = NULL;
break;
}
return pointer_string(buf, end, ptr, spec);
}
static noinline_for_stack
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
const char *fmt)
{
const char *array[4], *s;
const struct dentry *p;
int depth;
int i, n;
switch (fmt[1]) {
case '2': case '3': case '4':
depth = fmt[1] - '0';
break;
default:
depth = 1;
}
rcu_read_lock();
for (i = 0; i < depth; i++, d = p) {
if (check_pointer(&buf, end, d, spec)) {
rcu_read_unlock();
return buf;
}
p = READ_ONCE(d->d_parent);
array[i] = READ_ONCE(d->d_name.name);
if (p == d) {
if (i)
array[i] = "";
i++;
break;
}
}
s = array[--i];
for (n = 0; n != spec.precision; n++, buf++) {
char c = *s++;
if (!c) {
if (!i)
break;
c = '/';
s = array[--i];
}
if (buf < end)
*buf = c;
}
rcu_read_unlock();
return widen_string(buf, n, end, spec);
}
static noinline_for_stack
char *file_dentry_name(char *buf, char *end, const struct file *f,
struct printf_spec spec, const char *fmt)
{
if (check_pointer(&buf, end, f, spec))
return buf;
return dentry_name(buf, end, f->f_path.dentry, spec, fmt);
}
#ifdef CONFIG_BLOCK
static noinline_for_stack
char *bdev_name(char *buf, char *end, struct block_device *bdev,
struct printf_spec spec, const char *fmt)
{
struct gendisk *hd;
if (check_pointer(&buf, end, bdev, spec))
return buf;
hd = bdev->bd_disk;
buf = string(buf, end, hd->disk_name, spec);
if (bdev_is_partition(bdev)) {
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
if (buf < end)
*buf = 'p';
buf++;
}
buf = number(buf, end, bdev_partno(bdev), spec);
}
return buf;
}
#endif
static noinline_for_stack
char *symbol_string(char *buf, char *end, void *ptr,
struct printf_spec spec, const char *fmt)
{
unsigned long value;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
#endif
if (fmt[1] == 'R')
ptr = __builtin_extract_return_addr(ptr);
value = (unsigned long)ptr;
#ifdef CONFIG_KALLSYMS
if (*fmt == 'B' && fmt[1] == 'b')
sprint_backtrace_build_id(sym, value);
else if (*fmt == 'B')
sprint_backtrace(sym, value);
else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
sprint_symbol_build_id(sym, value);
else if (*fmt != 's')
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
return string_nocheck(buf, end, sym, spec);
#else
return special_hex_number(buf, end, value, sizeof(void *));
#endif
}
static const struct printf_spec default_str_spec = {
.field_width = -1,
.precision = -1,
};
static const struct printf_spec default_flag_spec = {
.base = 16,
.precision = -1,
.flags = SPECIAL | SMALL,
};
static const struct printf_spec default_dec_spec = {
.base = 10,
.precision = -1,
};
static const struct printf_spec default_dec02_spec = {
.base = 10,
.field_width = 2,
.precision = -1,
.flags = ZEROPAD,
};
static const struct printf_spec default_dec04_spec = {
.base = 10,
.field_width = 4,
.precision = -1,
.flags = ZEROPAD,
};
static noinline_for_stack
char *hex_range(char *buf, char *end, u64 start_val, u64 end_val,
struct printf_spec spec)
{
buf = number(buf, end, start_val, spec);
if (start_val == end_val)
return buf;
if (buf < end)
*buf = '-';
++buf;
return number(buf, end, end_val, spec);
}
static noinline_for_stack
char *resource_string(char *buf, char *end, struct resource *res,
struct printf_spec spec, const char *fmt)
{
#ifndef IO_RSRC_PRINTK_SIZE
#define IO_RSRC_PRINTK_SIZE 6
#endif
#ifndef MEM_RSRC_PRINTK_SIZE
#define MEM_RSRC_PRINTK_SIZE 10
#endif
static const struct printf_spec io_spec = {
.base = 16,
.field_width = IO_RSRC_PRINTK_SIZE,
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
static const struct printf_spec mem_spec = {
.base = 16,
.field_width = MEM_RSRC_PRINTK_SIZE,
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
static const struct printf_spec bus_spec = {
.base = 16,
.field_width = 2,
.precision = -1,
.flags = SMALL | ZEROPAD,
};
static const struct printf_spec str_spec = {
.field_width = -1,
.precision = 10,
.flags = LEFT,
};
/* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
* 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
char sym[MAX(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
char *p = sym, *pend = sym + sizeof(sym);
int decode = (fmt[0] == 'R') ? 1 : 0;
const struct printf_spec *specp;
if (check_pointer(&buf, end, res, spec))
return buf;
*p++ = '[';
if (res->flags & IORESOURCE_IO) {
p = string_nocheck(p, pend, "io ", str_spec);
specp = &io_spec;
} else if (res->flags & IORESOURCE_MEM) {
p = string_nocheck(p, pend, "mem ", str_spec);
specp = &mem_spec;
} else if (res->flags & IORESOURCE_IRQ) {
p = string_nocheck(p, pend, "irq ", str_spec);
specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_DMA) {
p = string_nocheck(p, pend, "dma ", str_spec);
specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_BUS) {
p = string_nocheck(p, pend, "bus ", str_spec);
specp = &bus_spec;
} else {
p = string_nocheck(p, pend, "??? ", str_spec);
specp = &mem_spec;
decode = 0;
}
if (decode && res->flags & IORESOURCE_UNSET) {
p = string_nocheck(p, pend, "size ", str_spec);
p = number(p, pend, resource_size(res), *specp);
} else {
p = hex_range(p, pend, res->start, res->end, *specp);
}
if (decode) {
if (res->flags & IORESOURCE_MEM_64)
p = string_nocheck(p, pend, " 64bit", str_spec);
if (res->flags & IORESOURCE_PREFETCH)
p = string_nocheck(p, pend, " pref", str_spec);
if (res->flags & IORESOURCE_WINDOW)
p = string_nocheck(p, pend, " window", str_spec);
if (res->flags & IORESOURCE_DISABLED)
p = string_nocheck(p, pend, " disabled", str_spec);
} else {
p = string_nocheck(p, pend, " flags ", str_spec);
p = number(p, pend, res->flags, default_flag_spec);
}
*p++ = ']';
*p = '\0';
return string_nocheck(buf, end, sym, spec);
}
static noinline_for_stack
char *range_string(char *buf, char *end, const struct range *range,
struct printf_spec spec, const char *fmt)
{
char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")];
char *p = sym, *pend = sym + sizeof(sym);
struct printf_spec range_spec = {
.field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */
.flags = SPECIAL | SMALL | ZEROPAD,
.base = 16,
.precision = -1,
};
if (check_pointer(&buf, end, range, spec))
return buf;
p = string_nocheck(p, pend, "[range ", default_str_spec);
p = hex_range(p, pend, range->start, range->end, range_spec);
*p++ = ']';
*p = '\0';
return string_nocheck(buf, end, sym, spec);
}
static noinline_for_stack
char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
const char *fmt)
{
int i, len = 1; /* if we pass '%ph[CDN]', field width remains
negative value, fallback to the default */
char separator;
if (spec.field_width == 0)
/* nothing to print */
return buf;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'C':
separator = ':';
break;
case 'D':
separator = '-';
break;
case 'N':
separator = 0;
break;
default:
separator = ' ';
break;
}
if (spec.field_width > 0)
len = min_t(int, spec.field_width, 64);
for (i = 0; i < len; ++i) {
if (buf < end)
*buf = hex_asc_hi(addr[i]);
++buf;
if (buf < end)
*buf = hex_asc_lo(addr[i]);
++buf;
if (separator && i != len - 1) {
if (buf < end)
*buf = separator;
++buf;
}
}
return buf;
}
static noinline_for_stack
char *bitmap_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
const int CHUNKSZ = 32;
int nr_bits = max_t(int, spec.field_width, 0);
int i, chunksz;
bool first = true;
if (check_pointer(&buf, end, bitmap, spec))
return buf;
/* reused to print numbers */
spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
chunksz = nr_bits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
u32 chunkmask, val;
int word, bit;
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (bitmap[word] >> bit) & chunkmask;
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
spec.field_width = DIV_ROUND_UP(chunksz, 4);
buf = number(buf, end, val, spec);
chunksz = CHUNKSZ;
}
return buf;
}
static noinline_for_stack
char *bitmap_list_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
bool first = true;
int rbot, rtop;
if (check_pointer(&buf, end, bitmap, spec))
return buf;
for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) {
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
buf = number(buf, end, rbot, default_dec_spec);
if (rtop == rbot + 1)
continue;
if (buf < end)
*buf = '-';
buf = number(++buf, end, rtop - 1, default_dec_spec);
}
return buf;
}
static noinline_for_stack
char *mac_address_string(char *buf, char *end, u8 *addr,
struct printf_spec spec, const char *fmt)
{
char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
char *p = mac_addr;
int i;
char separator;
bool reversed = false;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'F':
separator = '-';
break;
case 'R':
reversed = true;
fallthrough;
default:
separator = ':';
break;
}
for (i = 0; i < 6; i++) {
if (reversed)
p = hex_byte_pack(p, addr[5 - i]);
else
p = hex_byte_pack(p, addr[i]);
if (fmt[0] == 'M' && i != 5)
*p++ = separator;
}
*p = '\0';
return string_nocheck(buf, end, mac_addr, spec);
}
static noinline_for_stack
char *ip4_string(char *p, const u8 *addr, const char *fmt)
{
int i;
bool leading_zeros = (fmt[0] == 'i');
int index;
int step;
switch (fmt[2]) {
case 'h':
#ifdef __BIG_ENDIAN
index = 0;
step = 1;
#else
index = 3;
step = -1;
#endif
break;
case 'l':
index = 3;
step = -1;
break;
case 'n':
case 'b':
default:
index = 0;
step = 1;
break;
}
for (i = 0; i < 4; i++) {
char temp[4] __aligned(2); /* hold each IP quad in reverse order */
int digits = put_dec_trunc8(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)
*p++ = '0';
if (digits < 2)
*p++ = '0';
}
/* reverse the digits in the quad */
while (digits--)
*p++ = temp[digits];
if (i < 3)
*p++ = '.';
index += step;
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_compressed_string(char *p, const char *addr)
{
int i, j, range;
unsigned char zerolength[8];
int longest = 1;
int colonpos = -1;
u16 word;
u8 hi, lo;
bool needcolon = false;
bool useIPv4;
struct in6_addr in6;
memcpy(&in6, addr, sizeof(struct in6_addr));
useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
memset(zerolength, 0, sizeof(zerolength));
if (useIPv4)
range = 6;
else
range = 8;
/* find position of longest 0 run */
for (i = 0; i < range; i++) {
for (j = i; j < range; j++) {
if (in6.s6_addr16[j] != 0)
break;
zerolength[i]++;
}
}
for (i = 0; i < range; i++) {
if (zerolength[i] > longest) {
longest = zerolength[i];
colonpos = i;
}
}
if (longest == 1) /* don't compress a single 0 */
colonpos = -1;
/* emit address */
for (i = 0; i < range; i++) {
if (i == colonpos) {
if (needcolon || i == 0)
*p++ = ':';
*p++ = ':';
needcolon = false;
i += longest - 1;
continue;
}
if (needcolon) {
*p++ = ':';
needcolon = false;
}
/* hex u16 without leading 0s */
word = ntohs(in6.s6_addr16[i]);
hi = word >> 8;
lo = word & 0xff;
if (hi) {
if (hi > 0x0f)
p = hex_byte_pack(p, hi);
else
*p++ = hex_asc_lo(hi);
p = hex_byte_pack(p, lo);
}
else if (lo > 0x0f)
p = hex_byte_pack(p, lo);
else
*p++ = hex_asc_lo(lo);
needcolon = true;
}
if (useIPv4) {
if (needcolon)
*p++ = ':';
p = ip4_string(p, &in6.s6_addr[12], "I4");
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_string(char *p, const char *addr, const char *fmt)
{
int i;
for (i = 0; i < 8; i++) {
p = hex_byte_pack(p, *addr++);
p = hex_byte_pack(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_addr_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
if (fmt[0] == 'I' && fmt[2] == 'c')
ip6_compressed_string(ip6_addr, addr);
else
ip6_string(ip6_addr, addr, fmt);
return string_nocheck(buf, end, ip6_addr, spec);
}
static noinline_for_stack
char *ip4_addr_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char ip4_addr[sizeof("255.255.255.255")];
ip4_string(ip4_addr, addr, fmt);
return string_nocheck(buf, end, ip4_addr, spec);
}
static noinline_for_stack
char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa,
struct printf_spec spec, const char *fmt)
{
bool have_p = false, have_s = false, have_f = false, have_c = false;
char ip6_addr[sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") +
sizeof(":12345") + sizeof("/123456789") +
sizeof("%1234567890")];
char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr);
const u8 *addr = (const u8 *) &sa->sin6_addr;
char fmt6[2] = { fmt[0], '6' };
u8 off = 0;
fmt++;
while (isalpha(*++fmt)) {
switch (*fmt) {
case 'p':
have_p = true;
break;
case 'f':
have_f = true;
break;
case 's':
have_s = true;
break;
case 'c':
have_c = true;
break;
}
}
if (have_p || have_s || have_f) {
*p = '[';
off = 1;
}
if (fmt6[0] == 'I' && have_c)
p = ip6_compressed_string(ip6_addr + off, addr);
else
p = ip6_string(ip6_addr + off, addr, fmt6);
if (have_p || have_s || have_f)
*p++ = ']';
if (have_p) {
*p++ = ':';
p = number(p, pend, ntohs(sa->sin6_port), spec);
}
if (have_f) {
*p++ = '/';
p = number(p, pend, ntohl(sa->sin6_flowinfo &
IPV6_FLOWINFO_MASK), spec);
}
if (have_s) {
*p++ = '%';
p = number(p, pend, sa->sin6_scope_id, spec);
}
*p = '\0';
return string_nocheck(buf, end, ip6_addr, spec);
}
static noinline_for_stack
char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
struct printf_spec spec, const char *fmt)
{
bool have_p = false;
char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")];
char *pend = ip4_addr + sizeof(ip4_addr);
const u8 *addr = (const u8 *) &sa->sin_addr.s_addr;
char fmt4[3] = { fmt[0], '4', 0 };
fmt++;
while (isalpha(*++fmt)) {
switch (*fmt) {
case 'p':
have_p = true;
break;
case 'h':
case 'l':
case 'n':
case 'b':
fmt4[2] = *fmt;
break;
}
}
p = ip4_string(ip4_addr, addr, fmt4);
if (have_p) {
*p++ = ':';
p = number(p, pend, ntohs(sa->sin_port), spec);
}
*p = '\0';
return string_nocheck(buf, end, ip4_addr, spec);
}
static noinline_for_stack
char *ip_addr_string(char *buf, char *end, const void *ptr,
struct printf_spec spec, const char *fmt)
{
char *err_fmt_msg;
if (check_pointer(&buf, end, ptr, spec))
return buf;
switch (fmt[1]) {
case '6':
return ip6_addr_string(buf, end, ptr, spec, fmt);
case '4':
return ip4_addr_string(buf, end, ptr, spec, fmt);
case 'S': {
const union {
struct sockaddr raw;
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} *sa = ptr;
switch (sa->raw.sa_family) {
case AF_INET:
return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
case AF_INET6:
return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
default:
return error_string(buf, end, "(einval)", spec);
}}
}
err_fmt_msg = fmt[0] == 'i' ? "(%pi?)" : "(%pI?)";
return error_string(buf, end, err_fmt_msg, spec);
}
static noinline_for_stack
char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
const char *fmt)
{
bool found = true;
int count = 1;
unsigned int flags = 0;
int len;
if (spec.field_width == 0)
return buf; /* nothing to print */
if (check_pointer(&buf, end, addr, spec))
return buf;
do {
switch (fmt[count++]) {
case 'a':
flags |= ESCAPE_ANY;
break;
case 'c':
flags |= ESCAPE_SPECIAL;
break;
case 'h':
flags |= ESCAPE_HEX;
break;
case 'n':
flags |= ESCAPE_NULL;
break;
case 'o':
flags |= ESCAPE_OCTAL;
break;
case 'p':
flags |= ESCAPE_NP;
break;
case 's':
flags |= ESCAPE_SPACE;
break;
default:
found = false;
break;
}
} while (found);
if (!flags)
flags = ESCAPE_ANY_NP;
len = spec.field_width < 0 ? 1 : spec.field_width;
/*
* string_escape_mem() writes as many characters as it can to
* the given buffer, and returns the total size of the output
* had the buffer been big enough.
*/
buf += string_escape_mem(addr, len, buf, buf < end ? end - buf : 0, flags, NULL);
return buf;
}
__diag_push();
__diag_ignore(GCC, all, "-Wsuggest-attribute=format",
"Not a valid __printf() conversion candidate.");
static char *va_format(char *buf, char *end, struct va_format *va_fmt,
struct printf_spec spec)
{
va_list va;
if (check_pointer(&buf, end, va_fmt, spec))
return buf;
va_copy(va, *va_fmt->va);
buf += vsnprintf(buf, end > buf ? end - buf : 0, va_fmt->fmt, va);
va_end(va);
return buf;
}
__diag_pop();
static noinline_for_stack
char *uuid_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char uuid[UUID_STRING_LEN + 1];
char *p = uuid;
int i;
const u8 *index = uuid_index;
bool uc = false;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (*(++fmt)) {
case 'L':
uc = true;
fallthrough;
case 'l':
index = guid_index;
break;
case 'B':
uc = true;
break;
}
for (i = 0; i < 16; i++) {
if (uc)
p = hex_byte_pack_upper(p, addr[index[i]]);
else
p = hex_byte_pack(p, addr[index[i]]);
switch (i) {
case 3:
case 5:
case 7:
case 9:
*p++ = '-';
break;
}
}
*p = 0;
return string_nocheck(buf, end, uuid, spec);
}
static noinline_for_stack
char *netdev_bits(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'F':
num = *(const netdev_features_t *)addr;
size = sizeof(netdev_features_t);
break;
default:
return error_string(buf, end, "(%pN?)", spec);
}
return special_hex_number(buf, end, num, size);
}
static noinline_for_stack
char *fourcc_string(char *buf, char *end, const u32 *fourcc,
struct printf_spec spec, const char *fmt)
{
char output[sizeof("0123 little-endian (0x01234567)")];
char *p = output;
unsigned int i;
bool pixel_fmt = false;
u32 orig, val;
if (fmt[1] != 'c')
return error_string(buf, end, "(%p4?)", spec);
if (check_pointer(&buf, end, fourcc, spec))
return buf;
orig = get_unaligned(fourcc);
switch (fmt[2]) {
case 'h':
if (fmt[3] == 'R')
orig = swab32(orig);
break;
case 'l':
orig = (__force u32)cpu_to_le32(orig);
break;
case 'b':
orig = (__force u32)cpu_to_be32(orig);
break;
case 'c':
/* Pixel formats are printed LSB-first */
pixel_fmt = true;
break;
default:
return error_string(buf, end, "(%p4?)", spec);
}
val = pixel_fmt ? swab32(orig & ~BIT(31)) : orig;
for (i = 0; i < sizeof(u32); i++) {
unsigned char c = val >> ((3 - i) * 8);
/* Print non-control ASCII characters as-is, dot otherwise */
*p++ = isascii(c) && isprint(c) ? c : '.';
}
if (pixel_fmt) {
*p++ = ' ';
strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
p += strlen(p);
}
*p++ = ' ';
*p++ = '(';
p = special_hex_number(p, output + sizeof(output) - 2, orig, sizeof(u32));
*p++ = ')';
*p = '\0';
return string(buf, end, output, spec);
}
static noinline_for_stack
char *address_val(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'd':
num = *(const dma_addr_t *)addr;
size = sizeof(dma_addr_t);
break;
case 'p':
default:
num = *(const phys_addr_t *)addr;
size = sizeof(phys_addr_t);
break;
}
return special_hex_number(buf, end, num, size);
}
static noinline_for_stack
char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r)
{
int year = tm->tm_year + (r ? 0 : 1900);
int mon = tm->tm_mon + (r ? 0 : 1);
buf = number(buf, end, year, default_dec04_spec);
if (buf < end)
*buf = '-';
buf++;
buf = number(buf, end, mon, default_dec02_spec);
if (buf < end)
*buf = '-';
buf++;
return number(buf, end, tm->tm_mday, default_dec02_spec);
}
static noinline_for_stack
char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
{
buf = number(buf, end, tm->tm_hour, default_dec02_spec);
if (buf < end)
*buf = ':';
buf++;
buf = number(buf, end, tm->tm_min, default_dec02_spec);
if (buf < end)
*buf = ':';
buf++;
return number(buf, end, tm->tm_sec, default_dec02_spec);
}
static noinline_for_stack
char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
struct printf_spec spec, const char *fmt)
{
bool have_t = true, have_d = true;
bool raw = false, iso8601_separator = true;
bool found = true;
int count = 2;
if (check_pointer(&buf, end, tm, spec))
return buf;
switch (fmt[count]) {
case 'd':
have_t = false;
count++;
break;
case 't':
have_d = false;
count++;
break;
}
do {
switch (fmt[count++]) {
case 'r':
raw = true;
break;
case 's':
iso8601_separator = false;
break;
default:
found = false;
break;
}
} while (found);
if (have_d)
buf = date_str(buf, end, tm, raw);
if (have_d && have_t) {
if (buf < end)
*buf = iso8601_separator ? 'T' : ' ';
buf++;
}
if (have_t)
buf = time_str(buf, end, tm, raw);
return buf;
}
static noinline_for_stack
char *time64_str(char *buf, char *end, const time64_t time,
struct printf_spec spec, const char *fmt)
{
struct rtc_time rtc_time;
struct tm tm;
time64_to_tm(time, 0, &tm);
rtc_time.tm_sec = tm.tm_sec;
rtc_time.tm_min = tm.tm_min;
rtc_time.tm_hour = tm.tm_hour;
rtc_time.tm_mday = tm.tm_mday;
rtc_time.tm_mon = tm.tm_mon;
rtc_time.tm_year = tm.tm_year;
rtc_time.tm_wday = tm.tm_wday;
rtc_time.tm_yday = tm.tm_yday;
rtc_time.tm_isdst = 0;
return rtc_str(buf, end, &rtc_time, spec, fmt);
}
static noinline_for_stack
char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
const char *fmt)
{
switch (fmt[1]) {
case 'R':
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
case 'T':
return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
default:
return error_string(buf, end, "(%pt?)", spec);
}
}
static noinline_for_stack
char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
const char *fmt)
{
if (!IS_ENABLED(CONFIG_HAVE_CLK))
return error_string(buf, end, "(%pC?)", spec);
if (check_pointer(&buf, end, clk, spec))
return buf;
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
return ptr_to_id(buf, end, clk, spec);
#endif
}
static
char *format_flags(char *buf, char *end, unsigned long flags,
const struct trace_print_flags *names)
{
unsigned long mask;
for ( ; flags && names->name; names++) {
mask = names->mask;
if ((flags & mask) != mask)
continue;
buf = string(buf, end, names->name, default_str_spec);
flags &= ~mask;
if (flags) {
if (buf < end)
*buf = '|';
buf++;
}
}
if (flags)
buf = number(buf, end, flags, default_flag_spec);
return buf;
}
struct page_flags_fields {
int width;
int shift;
int mask;
const struct printf_spec *spec;
const char *name;
};
static const struct page_flags_fields pff[] = {
{SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
&default_dec_spec, "section"},
{NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
&default_dec_spec, "node"},
{ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
&default_dec_spec, "zone"},
{LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
&default_flag_spec, "lastcpupid"},
{KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
&default_flag_spec, "kasantag"},
};
static
char *format_page_flags(char *buf, char *end, unsigned long flags)
{
unsigned long main_flags = flags & PAGEFLAGS_MASK;
bool append = false;
int i;
buf = number(buf, end, flags, default_flag_spec);
if (buf < end)
*buf = '(';
buf++;
/* Page flags from the main area. */
if (main_flags) {
buf = format_flags(buf, end, main_flags, pageflag_names);
append = true;
}
/* Page flags from the fields area */
for (i = 0; i < ARRAY_SIZE(pff); i++) {
/* Skip undefined fields. */
if (!pff[i].width)
continue;
/* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */
if (append) {
if (buf < end)
*buf = '|';
buf++;
}
buf = string(buf, end, pff[i].name, default_str_spec);
if (buf < end)
*buf = '=';
buf++;
buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask,
*pff[i].spec);
append = true;
}
if (buf < end)
*buf = ')';
buf++;
return buf;
}
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
{
unsigned long flags;
const struct trace_print_flags *names;
if (check_pointer(&buf, end, flags_ptr, spec))
return buf;
switch (fmt[1]) {
case 'p':
return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
break;
case 'g':
flags = (__force unsigned long)(*(gfp_t *)flags_ptr);
names = gfpflag_names;
break;
default:
return error_string(buf, end, "(%pG?)", spec);
}
return format_flags(buf, end, flags, names);
}
static noinline_for_stack
char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
char *end)
{
int depth;
/* Loop starting from the root node to the current node. */
for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
/*
* Only get a reference for other nodes (i.e. parent nodes).
* fwnode refcount may be 0 here.
*/
struct fwnode_handle *__fwnode = depth ?
fwnode_get_nth_parent(fwnode, depth) : fwnode;
buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
default_str_spec);
buf = string(buf, end, fwnode_get_name(__fwnode),
default_str_spec);
if (depth)
fwnode_handle_put(__fwnode);
}
return buf;
}
static noinline_for_stack
char *device_node_string(char *buf, char *end, struct device_node *dn,
struct printf_spec spec, const char *fmt)
{
char tbuf[sizeof("xxxx") + 1];
const char *p;
int ret;
char *buf_start = buf;
struct property *prop;
bool has_mult, pass;
struct printf_spec str_spec = spec;
str_spec.field_width = -1;
if (fmt[0] != 'F')
return error_string(buf, end, "(%pO?)", spec);
if (!IS_ENABLED(CONFIG_OF))
return error_string(buf, end, "(%pOF?)", spec);
if (check_pointer(&buf, end, dn, spec))
return buf;
/* simple case without anything any more format specifiers */
fmt++;
if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0)
fmt = "f";
for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
int precision;
if (pass) {
if (buf < end)
*buf = ':';
buf++;
}
switch (*fmt) {
case 'f': /* full_name */
buf = fwnode_full_name_string(of_fwnode_handle(dn), buf,
end);
break;
case 'n': /* name */
p = fwnode_get_name(of_fwnode_handle(dn));
precision = str_spec.precision;
str_spec.precision = strchrnul(p, '@') - p;
buf = string(buf, end, p, str_spec);
str_spec.precision = precision;
break;
case 'p': /* phandle */
buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec);
break;
case 'P': /* path-spec */
p = fwnode_get_name(of_fwnode_handle(dn));
if (!p[1])
p = "/";
buf = string(buf, end, p, str_spec);
break;
case 'F': /* flags */
tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-';
tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-';
tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
tbuf[4] = 0;
buf = string_nocheck(buf, end, tbuf, str_spec);
break;
case 'c': /* major compatible string */
ret = of_property_read_string(dn, "compatible", &p);
if (!ret)
buf = string(buf, end, p, str_spec);
break;
case 'C': /* full compatible string */
has_mult = false;
of_property_for_each_string(dn, "compatible", prop, p) {
if (has_mult)
buf = string_nocheck(buf, end, ",", str_spec);
buf = string_nocheck(buf, end, "\"", str_spec);
buf = string(buf, end, p, str_spec);
buf = string_nocheck(buf, end, "\"", str_spec);
has_mult = true;
}
break;
default:
break;
}
}
return widen_string(buf, buf - buf_start, end, spec);
}
static noinline_for_stack
char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
struct printf_spec spec, const char *fmt)
{
struct printf_spec str_spec = spec;
char *buf_start = buf;
str_spec.field_width = -1;
if (*fmt != 'w')
return error_string(buf, end, "(%pf?)", spec);
if (check_pointer(&buf, end, fwnode, spec))
return buf;
fmt++;
switch (*fmt) {
case 'P': /* name */
buf = string(buf, end, fwnode_get_name(fwnode), str_spec);
break;
case 'f': /* full_name */
default:
buf = fwnode_full_name_string(fwnode, buf, end);
break;
}
return widen_string(buf, buf - buf_start, end, spec);
}
static noinline_for_stack
char *resource_or_range(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
if (*fmt == 'r' && fmt[1] == 'a')
return range_string(buf, end, ptr, spec, fmt);
return resource_string(buf, end, ptr, spec, fmt);
}
void __init hash_pointers_finalize(bool slub_debug)
{
switch (hash_pointers_mode) {
case HASH_PTR_ALWAYS:
no_hash_pointers = false;
break;
case HASH_PTR_NEVER:
no_hash_pointers = true;
break;
case HASH_PTR_AUTO:
default:
no_hash_pointers = slub_debug;
break;
}
if (!no_hash_pointers)
return;
pr_warn("**********************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** This system shows unhashed kernel memory addresses **\n");
pr_warn("** via the console, logs, and other interfaces. This **\n");
pr_warn("** might reduce the security of your system. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging **\n");
pr_warn("** the kernel, report this immediately to your system **\n");
pr_warn("** administrator! **\n");
pr_warn("** **\n");
pr_warn("** Use hash_pointers=always to force this mode off **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("**********************************************************\n");
}
static int __init hash_pointers_mode_parse(char *str)
{
if (!str) {
pr_warn("Hash pointers mode empty; falling back to auto.\n");
hash_pointers_mode = HASH_PTR_AUTO;
} else if (strncmp(str, "auto", 4) == 0) {
pr_info("Hash pointers mode set to auto.\n");
hash_pointers_mode = HASH_PTR_AUTO;
} else if (strncmp(str, "never", 5) == 0) {
pr_info("Hash pointers mode set to never.\n");
hash_pointers_mode = HASH_PTR_NEVER;
} else if (strncmp(str, "always", 6) == 0) {
pr_info("Hash pointers mode set to always.\n");
hash_pointers_mode = HASH_PTR_ALWAYS;
} else {
pr_warn("Unknown hash_pointers mode '%s' specified; assuming auto.\n", str);
hash_pointers_mode = HASH_PTR_AUTO;
}
return 0;
}
early_param("hash_pointers", hash_pointers_mode_parse);
static int __init no_hash_pointers_enable(char *str)
{
return hash_pointers_mode_parse("never");
}
early_param("no_hash_pointers", no_hash_pointers_enable);
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
* specifiers.
*
* Please update scripts/checkpatch.pl when adding/removing conversion
* characters. (Search for "check for vsprintf extension").
*
* Right now we handle:
*
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
* - '[Ss]R' as above with __builtin_extract_return_addr() translation
* - 'S[R]b' as above with module build ID (for use in backtraces)
* - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
* %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
* - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'ra' For struct ranges, e.g., [range 0x0000000000000000 - 0x00000000000000ff]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
* width which must be explicitly specified either as part of the
* format string '%32b[l]' or through '%*b[l]', [l] selects
* range-list format instead of hex format
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
* - 'MF' For a 6-byte MAC FDDI address, it prints the address
* with a dash-separated hex notation
* - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
* [S][pfs]
* Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - 'i' [46] for 'raw' IPv4/IPv6 addresses
* IPv6 omits the colons (01020304...0f)
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
* [S][pfs]
* Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I[6S]c' for IPv6 addresses printed as specified by
* https://tools.ietf.org/html/rfc5952
* - 'E[achnops]' For an escaped buffer, where rules are defined by combination
* of the following flags (see string_escape_mem() for the
* details):
* a - ESCAPE_ANY
* c - ESCAPE_SPECIAL
* h - ESCAPE_HEX
* n - ESCAPE_NULL
* o - ESCAPE_OCTAL
* p - ESCAPE_NP
* s - ESCAPE_SPACE
* By default ESCAPE_ANY_NP is used.
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
* "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
* Options for %pU are:
* b big endian lower case hex (default)
* B big endian UPPER case hex
* l little endian lower case hex
* L little endian UPPER case hex
* big endian output byte order is:
* [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
* little endian output byte order is:
* [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
* - 'V' For a struct va_format which contains a format string * and va_list *,
* call vsnprintf(->format, *->va_list).
* Implements a "recursive vsnprintf".
* Do not use this feature without some mechanism to verify the
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users.
* Use only for procfs, sysfs and similar files, not printk(); please
* read the documentation (path below) first.
* - 'NF' For a netdev_features_t
* - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
* - '4c[h[R]lb]' For generic FourCC code with raw numerical value. Both are
* displayed in the big-endian format. This is the opposite of V4L2 or
* DRM FourCCs.
* The additional specifiers define what endianness is used to load
* the stored bytes. The data might be interpreted using the host,
* reversed host byte order, little-endian, or big-endian.
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
* C colon
* D dash
* N no separator
* The maximum supported length is 64 bytes of the input. Consider
* to use print_hex_dump() for the larger input.
* - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives
* (default assumed to be phys_addr_t, passed by reference)
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
* - 't[RT][dt][r][s]' For time and date as represented by:
* R struct rtc_time
* T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'G' For flags to be printed as a collection of symbolic strings that would
* construct the specific value. Supported flags given by option:
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
* - 'OF[fnpPcCF]' For a device tree object
* Without any optional arguments prints the full_name
* f device node full_name
* n device node name
* p device node phandle
* P device node path spec (name + @unit)
* F device node flags
* c major compatible string
* C full compatible string
* - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer
* Without an option prints the full name of the node
* f full name
* P node name, including a possible unit address
* - 'x' For printing the address unmodified. Equivalent to "%lx".
* Please read the documentation (path below) before using!
* - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
* bpf_trace_printk() where [ku] prefix specifies either kernel (k)
* or user (u) memory to probe, and:
* s a string, equivalent to "%s" on direct vsnprintf() use
*
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
*
* There is also a '%pA' format specifier, but it is only intended to be used
* from Rust code to format core::fmt::Arguments. Do *not* use it from C.
* See rust/kernel/print.rs for details.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
switch (*fmt) {
case 'S':
case 's':
ptr = dereference_symbol_descriptor(ptr);
fallthrough;
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
case 'r':
return resource_or_range(fmt, buf, end, ptr, spec);
case 'h':
return hex_string(buf, end, ptr, spec, fmt);
case 'b':
switch (fmt[1]) {
case 'l':
return bitmap_list_string(buf, end, ptr, spec, fmt);
default:
return bitmap_string(buf, end, ptr, spec, fmt);
}
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
/* [mM]F (FDDI) */
/* [mM]R (Reverse order; Bluetooth) */
return mac_address_string(buf, end, ptr, spec, fmt);
case 'I': /* Formatted IP supported
* 4: 1.2.3.4
* 6: 0001:0203:...:0708
* 6c: 1::708 or 1::1.2.3.4
*/
case 'i': /* Contiguous:
* 4: 001.002.003.004
* 6: 000102...0f
*/
return ip_addr_string(buf, end, ptr, spec, fmt);
case 'E':
return escaped_string(buf, end, ptr, spec, fmt);
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
return va_format(buf, end, ptr, spec);
case 'K':
return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, spec, fmt);
case '4':
return fourcc_string(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, spec, fmt);
case 'd':
return dentry_name(buf, end, ptr, spec, fmt);
case 't':
return time_and_date(buf, end, ptr, spec, fmt);
case 'C':
return clock(buf, end, ptr, spec, fmt);
case 'D':
return file_dentry_name(buf, end, ptr, spec, fmt);
#ifdef CONFIG_BLOCK
case 'g':
return bdev_name(buf, end, ptr, spec, fmt);
#endif
case 'G':
return flags_string(buf, end, ptr, spec, fmt);
case 'O':
return device_node_string(buf, end, ptr, spec, fmt + 1);
case 'f':
return fwnode_string(buf, end, ptr, spec, fmt + 1);
case 'A':
if (!IS_ENABLED(CONFIG_RUST)) {
WARN_ONCE(1, "Please remove %%pA from non-Rust code\n");
return error_string(buf, end, "(%pA?)", spec);
}
return rust_fmt_argument(buf, end, ptr);
case 'x':
return pointer_string(buf, end, ptr, spec);
case 'e':
/* %pe with a non-ERR_PTR gets treated as plain %p */
if (!IS_ERR(ptr))
return default_pointer(buf, end, ptr, spec);
return err_ptr(buf, end, ptr, spec);
case 'u':
case 'k':
switch (fmt[1]) {
case 's':
return string(buf, end, ptr, spec);
default:
return error_string(buf, end, "(einval)", spec);
}
default:
return default_pointer(buf, end, ptr, spec);
}
}
struct fmt {
const char *str;
unsigned char state; // enum format_state
unsigned char size; // size of numbers
};
#define SPEC_CHAR(x, flag) [(x)-32] = flag
static unsigned char spec_flag(unsigned char c)
{
static const unsigned char spec_flag_array[] = {
SPEC_CHAR(' ', SPACE),
SPEC_CHAR('#', SPECIAL),
SPEC_CHAR('+', PLUS),
SPEC_CHAR('-', LEFT),
SPEC_CHAR('0', ZEROPAD),
};
c -= 32;
return (c < sizeof(spec_flag_array)) ? spec_flag_array[c] : 0;
}
/*
* Helper function to decode printf style format.
* Each call decode a token from the format and return the
* number of characters read (or likely the delta where it wants
* to go on the next call).
* The decoded token is returned through the parameters
*
* 'h', 'l', or 'L' for integer fields
* 'z' support added 23/7/1999 S.H.
* 'z' changed to 'Z' --davidm 1/25/99
* 'Z' changed to 'z' --adobriyan 2017-01-25
* 't' added for ptrdiff_t
*
* @fmt: the format string
* @type of the token returned
* @flags: various flags such as +, -, # tokens..
* @field_width: overwritten width
* @base: base of the number (octal, hex, ...)
* @precision: precision of a number
* @qualifier: qualifier of a number (long, size_t, ...)
*/
static noinline_for_stack
struct fmt format_decode(struct fmt fmt, struct printf_spec *spec)
{
const char *start = fmt.str;
char flag;
/* we finished early by reading the field width */
if (unlikely(fmt.state == FORMAT_STATE_WIDTH)) {
if (spec->field_width < 0) {
spec->field_width = -spec->field_width;
spec->flags |= LEFT;
}
fmt.state = FORMAT_STATE_NONE;
goto precision;
}
/* we finished early by reading the precision */
if (unlikely(fmt.state == FORMAT_STATE_PRECISION)) { if (spec->precision < 0)
spec->precision = 0;
fmt.state = FORMAT_STATE_NONE;
goto qualifier;
}
/* By default */
fmt.state = FORMAT_STATE_NONE;
for (; *fmt.str ; fmt.str++) { if (*fmt.str == '%')
break;
}
/* Return the current non-format string */
if (fmt.str != start || !*fmt.str) return fmt;
/* Process flags. This also skips the first '%' */
spec->flags = 0;
do {
/* this also skips first '%' */
flag = spec_flag(*++fmt.str);
spec->flags |= flag;
} while (flag);
/* get field width */
spec->field_width = -1;
if (isdigit(*fmt.str))
spec->field_width = skip_atoi(&fmt.str); else if (unlikely(*fmt.str == '*')) {
/* it's the next argument */
fmt.state = FORMAT_STATE_WIDTH;
fmt.str++;
return fmt;
}
precision:
/* get the precision */
spec->precision = -1;
if (unlikely(*fmt.str == '.')) {
fmt.str++; if (isdigit(*fmt.str)) { spec->precision = skip_atoi(&fmt.str);
if (spec->precision < 0)
spec->precision = 0; } else if (*fmt.str == '*') {
/* it's the next argument */
fmt.state = FORMAT_STATE_PRECISION;
fmt.str++;
return fmt;
}
}
qualifier:
/* Set up default numeric format */
spec->base = 10;
fmt.state = FORMAT_STATE_NUM;
fmt.size = sizeof(int);
static const struct format_state {
unsigned char state;
unsigned char size;
unsigned char flags_or_double_size;
unsigned char base;
} lookup_state[256] = {
// Length
['l'] = { 0, sizeof(long), sizeof(long long) },
['L'] = { 0, sizeof(long long) },
['h'] = { 0, sizeof(short), sizeof(char) },
['H'] = { 0, sizeof(char) }, // Questionable historical
['z'] = { 0, sizeof(size_t) },
['t'] = { 0, sizeof(ptrdiff_t) },
// Non-numeric formats
['c'] = { FORMAT_STATE_CHAR },
['s'] = { FORMAT_STATE_STR },
['p'] = { FORMAT_STATE_PTR },
['%'] = { FORMAT_STATE_PERCENT_CHAR },
// Numerics
['o'] = { FORMAT_STATE_NUM, 0, 0, 8 },
['x'] = { FORMAT_STATE_NUM, 0, SMALL, 16 },
['X'] = { FORMAT_STATE_NUM, 0, 0, 16 },
['d'] = { FORMAT_STATE_NUM, 0, SIGN, 10 },
['i'] = { FORMAT_STATE_NUM, 0, SIGN, 10 },
['u'] = { FORMAT_STATE_NUM, 0, 0, 10, },
/*
* Since %n poses a greater security risk than
* utility, treat it as any other invalid or
* unsupported format specifier.
*/
};
const struct format_state *p = lookup_state + (u8)*fmt.str;
if (p->size) {
fmt.size = p->size;
if (p->flags_or_double_size && fmt.str[0] == fmt.str[1]) {
fmt.size = p->flags_or_double_size;
fmt.str++;
}
fmt.str++;
p = lookup_state + *fmt.str;
}
if (p->state) { if (p->base)
spec->base = p->base;
spec->flags |= p->flags_or_double_size;
fmt.state = p->state;
fmt.str++;
return fmt;
}
WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt.str);
fmt.state = FORMAT_STATE_INVALID;
return fmt;
}
static void
set_field_width(struct printf_spec *spec, int width)
{
spec->field_width = width;
if (WARN_ONCE(spec->field_width != width, "field width %d too large", width)) {
spec->field_width = clamp(width, -FIELD_WIDTH_MAX, FIELD_WIDTH_MAX);
}
}
static void
set_precision(struct printf_spec *spec, int prec)
{
spec->precision = prec;
if (WARN_ONCE(spec->precision != prec, "precision %d too large", prec)) {
spec->precision = clamp(prec, 0, PRECISION_MAX);
}
}
/*
* Turn a 1/2/4-byte value into a 64-bit one for printing: truncate
* as necessary and deal with signedness.
*
* 'size' is the size of the value in bytes.
*/
static unsigned long long convert_num_spec(unsigned int val, int size, struct printf_spec spec)
{
unsigned int shift = 32 - size*8;
val <<= shift;
if (!(spec.flags & SIGN))
return val >> shift; return (int)val >> shift;
}
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt_str: The format string to use
* @args: Arguments for the format string
*
* This function generally follows C99 vsnprintf, but has some
* extensions and a few limitations:
*
* - ``%n`` is unsupported
* - ``%p*`` is handled by pointer()
*
* See pointer() or Documentation/core-api/printk-formats.rst for more
* extensive description.
*
* **Please update the documentation in both places when making changes**
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
* (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*
* If you're not already dealing with a va_list consider using snprintf().
*/
int vsnprintf(char *buf, size_t size, const char *fmt_str, va_list args)
{
char *str, *end;
struct printf_spec spec = {0};
struct fmt fmt = {
.str = fmt_str,
.state = FORMAT_STATE_NONE,
};
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
end = buf + size;
/* Make sure end is always >= buf */
if (end < buf) {
end = ((void *)-1);
size = end - buf;
}
while (*fmt.str) {
const char *old_fmt = fmt.str;
fmt = format_decode(fmt, &spec); switch (fmt.state) {
case FORMAT_STATE_NONE: {
int read = fmt.str - old_fmt;
if (str < end) {
int copy = read;
if (copy > end - str) copy = end - str; memcpy(str, old_fmt, copy);
}
str += read;
continue;
}
case FORMAT_STATE_NUM: {
unsigned long long num;
if (fmt.size <= sizeof(int)) num = convert_num_spec(va_arg(args, int), fmt.size, spec);
else
num = va_arg(args, long long); str = number(str, end, num, spec);
continue;
}
case FORMAT_STATE_WIDTH:
set_field_width(&spec, va_arg(args, int));
continue;
case FORMAT_STATE_PRECISION:
set_precision(&spec, va_arg(args, int));
continue;
case FORMAT_STATE_CHAR: {
char c;
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str;
}
}
c = (unsigned char) va_arg(args, int);
if (str < end)
*str = c;
++str;
while (--spec.field_width > 0) {
if (str < end) *str = ' '; ++str;
}
continue;
}
case FORMAT_STATE_STR:
str = string(str, end, va_arg(args, char *), spec);
continue;
case FORMAT_STATE_PTR:
str = pointer(fmt.str, str, end, va_arg(args, void *),
spec);
while (isalnum(*fmt.str)) fmt.str++;
continue;
case FORMAT_STATE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
continue;
default:
/*
* Presumably the arguments passed gcc's type
* checking, but there is no safe or sane way
* for us to continue parsing the format and
* fetching from the va_list; the remaining
* specifiers and arguments would be out of
* sync.
*/
goto out;
}
}
out: if (size > 0) { if (str < end) *str = '\0';
else
end[-1] = '\0';
}
/* the trailing null byte doesn't count towards the total */
return str-buf;}
EXPORT_SYMBOL(vsnprintf);
/**
* vscnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The return value is the number of characters which have been written into
* the @buf not including the trailing '\0'. If @size is == 0 the function
* returns 0.
*
* If you're not already dealing with a va_list consider using scnprintf().
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
if (unlikely(!size)) return 0;
i = vsnprintf(buf, size, fmt, args);
if (likely(i < size))
return i;
return size - 1;
}
EXPORT_SYMBOL(vscnprintf);
/**
* snprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters which would be
* generated for the given input, excluding the trailing null,
* as per ISO C99. If the return is greater than or equal to
* @size, the resulting string is truncated.
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int snprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(snprintf);
/**
* scnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters written into @buf not including
* the trailing '\0'. If @size is == 0 the function returns 0.
*/
int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vscnprintf(buf, size, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(scnprintf);
/**
* vsprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use vsnprintf() or vscnprintf() in order to avoid
* buffer overflows.
*
* If you're not already dealing with a va_list consider using sprintf().
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int vsprintf(char *buf, const char *fmt, va_list args)
{
return vsnprintf(buf, INT_MAX, fmt, args);
}
EXPORT_SYMBOL(vsprintf);
/**
* sprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use snprintf() or scnprintf() in order to avoid
* buffer overflows.
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int sprintf(char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sprintf);
#ifdef CONFIG_BINARY_PRINTF
/*
* bprintf service:
* vbin_printf() - VA arguments to binary data
* bstr_printf() - Binary data to text string
*/
/**
* vbin_printf - Parse a format string and place args' binary value in a buffer
* @bin_buf: The buffer to place args' binary value
* @size: The size of the buffer(by words(32bits), not characters)
* @fmt_str: The format string to use
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
* is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.
*
* NOTE:
* If the return value is greater than @size, the resulting bin_buf is NOT
* valid for bstr_printf().
*/
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt_str, va_list args)
{
struct fmt fmt = {
.str = fmt_str,
.state = FORMAT_STATE_NONE,
};
struct printf_spec spec = {0};
char *str, *end;
int width;
str = (char *)bin_buf;
end = (char *)(bin_buf + size);
#define save_arg(type) \
({ \
unsigned long long value; \
if (sizeof(type) == 8) { \
unsigned long long val8; \
str = PTR_ALIGN(str, sizeof(u32)); \
val8 = va_arg(args, unsigned long long); \
if (str + sizeof(type) <= end) { \
*(u32 *)str = *(u32 *)&val8; \
*(u32 *)(str + 4) = *((u32 *)&val8 + 1); \
} \
value = val8; \
} else { \
unsigned int val4; \
str = PTR_ALIGN(str, sizeof(type)); \
val4 = va_arg(args, int); \
if (str + sizeof(type) <= end) \
*(typeof(type) *)str = (type)(long)val4; \
value = (unsigned long long)val4; \
} \
str += sizeof(type); \
value; \
})
while (*fmt.str) {
fmt = format_decode(fmt, &spec);
switch (fmt.state) {
case FORMAT_STATE_NONE:
case FORMAT_STATE_PERCENT_CHAR:
break;
case FORMAT_STATE_INVALID:
goto out;
case FORMAT_STATE_WIDTH:
case FORMAT_STATE_PRECISION:
width = (int)save_arg(int);
/* Pointers may require the width */
if (*fmt.str == 'p')
set_field_width(&spec, width);
break;
case FORMAT_STATE_CHAR:
save_arg(char);
break;
case FORMAT_STATE_STR: {
const char *save_str = va_arg(args, char *);
const char *err_msg;
size_t len;
err_msg = check_pointer_msg(save_str);
if (err_msg)
save_str = err_msg;
len = strlen(save_str) + 1;
if (str + len < end)
memcpy(str, save_str, len);
str += len;
break;
}
case FORMAT_STATE_PTR:
/* Dereferenced pointers must be done now */
switch (*fmt.str) {
/* Dereference of functions is still OK */
case 'S':
case 's':
case 'x':
case 'K':
case 'e':
save_arg(void *);
break;
default:
if (!isalnum(*fmt.str)) {
save_arg(void *);
break;
}
str = pointer(fmt.str, str, end, va_arg(args, void *),
spec);
if (str + 1 < end)
*str++ = '\0';
else
end[-1] = '\0'; /* Must be nul terminated */
}
/* skip all alphanumeric pointer suffixes */
while (isalnum(*fmt.str))
fmt.str++;
break;
case FORMAT_STATE_NUM:
if (fmt.size > sizeof(int)) {
save_arg(long long);
} else {
save_arg(int);
}
}
}
out:
return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
#undef save_arg
}
EXPORT_SYMBOL_GPL(vbin_printf);
/**
* bstr_printf - Format a string from binary arguments and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt_str: The format string to use
* @bin_buf: Binary arguments for the format string
*
* This function like C99 vsnprintf, but the difference is that vsnprintf gets
* arguments from stack, and bstr_printf gets arguments from @bin_buf which is
* a binary buffer that generated by vbin_printf.
*
* The format follows C99 vsnprintf, but has some extensions:
* see vsnprintf comment for details.
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
* (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*/
int bstr_printf(char *buf, size_t size, const char *fmt_str, const u32 *bin_buf)
{
struct fmt fmt = {
.str = fmt_str,
.state = FORMAT_STATE_NONE,
};
struct printf_spec spec = {0};
char *str, *end;
const char *args = (const char *)bin_buf;
if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
end = buf + size;
#define get_arg(type) \
({ \
typeof(type) value; \
if (sizeof(type) == 8) { \
args = PTR_ALIGN(args, sizeof(u32)); \
*(u32 *)&value = *(u32 *)args; \
*((u32 *)&value + 1) = *(u32 *)(args + 4); \
} else { \
args = PTR_ALIGN(args, sizeof(type)); \
value = *(typeof(type) *)args; \
} \
args += sizeof(type); \
value; \
})
/* Make sure end is always >= buf */
if (end < buf) {
end = ((void *)-1);
size = end - buf;
}
while (*fmt.str) {
const char *old_fmt = fmt.str;
unsigned long long num;
fmt = format_decode(fmt, &spec);
switch (fmt.state) {
case FORMAT_STATE_NONE: {
int read = fmt.str - old_fmt;
if (str < end) {
int copy = read;
if (copy > end - str)
copy = end - str;
memcpy(str, old_fmt, copy);
}
str += read;
continue;
}
case FORMAT_STATE_WIDTH:
set_field_width(&spec, get_arg(int));
continue;
case FORMAT_STATE_PRECISION:
set_precision(&spec, get_arg(int));
continue;
case FORMAT_STATE_CHAR: {
char c;
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
}
c = (unsigned char) get_arg(char);
if (str < end)
*str = c;
++str;
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
continue;
}
case FORMAT_STATE_STR: {
const char *str_arg = args;
args += strlen(str_arg) + 1;
str = string(str, end, (char *)str_arg, spec);
continue;
}
case FORMAT_STATE_PTR: {
bool process = false;
int copy, len;
/* Non function dereferences were already done */
switch (*fmt.str) {
case 'S':
case 's':
case 'x':
case 'K':
case 'e':
process = true;
break;
default:
if (!isalnum(*fmt.str)) {
process = true;
break;
}
/* Pointer dereference was already processed */
if (str < end) {
len = copy = strlen(args);
if (copy > end - str)
copy = end - str;
memcpy(str, args, copy);
str += len;
args += len + 1;
}
}
if (process)
str = pointer(fmt.str, str, end, get_arg(void *), spec);
while (isalnum(*fmt.str))
fmt.str++;
continue;
}
case FORMAT_STATE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
continue;
case FORMAT_STATE_INVALID:
goto out;
case FORMAT_STATE_NUM:
if (fmt.size > sizeof(int)) {
num = get_arg(long long);
} else {
num = convert_num_spec(get_arg(int), fmt.size, spec);
}
str = number(str, end, num, spec);
continue;
}
} /* while(*fmt.str) */
out:
if (size > 0) {
if (str < end)
*str = '\0';
else
end[-1] = '\0';
}
#undef get_arg
/* the trailing null byte doesn't count towards the total */
return str - buf;
}
EXPORT_SYMBOL_GPL(bstr_printf);
#endif /* CONFIG_BINARY_PRINTF */
/**
* vsscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: format of buffer
* @args: arguments
*/
int vsscanf(const char *buf, const char *fmt, va_list args)
{
const char *str = buf;
char *next;
char digit;
int num = 0;
u8 qualifier;
unsigned int base;
union {
long long s;
unsigned long long u;
} val;
s16 field_width;
bool is_sign;
while (*fmt) {
/* skip any white space in format */
/* white space in format matches any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
fmt = skip_spaces(++fmt);
str = skip_spaces(str);
}
/* anything that is not a conversion must match exactly */
if (*fmt != '%' && *fmt) {
if (*fmt++ != *str++)
break;
continue;
}
if (!*fmt)
break;
++fmt;
/* skip this conversion.
* advance both strings to next white space
*/
if (*fmt == '*') {
if (!*str)
break;
while (!isspace(*fmt) && *fmt != '%' && *fmt) {
/* '%*[' not yet supported, invalid format */
if (*fmt == '[')
return num;
fmt++;
}
while (!isspace(*str) && *str)
str++;
continue;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt)) {
field_width = skip_atoi(&fmt);
if (field_width <= 0)
break;
}
/* get conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
*fmt == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
qualifier = 'H';
fmt++;
} else if (qualifier == 'l') {
qualifier = 'L';
fmt++;
}
}
}
if (!*fmt)
break;
if (*fmt == 'n') {
/* return number of characters read so far */
*va_arg(args, int *) = str - buf;
++fmt;
continue;
}
if (!*str)
break;
base = 10;
is_sign = false;
switch (*fmt++) {
case 'c':
{
char *s = (char *)va_arg(args, char*);
if (field_width == -1)
field_width = 1;
do {
*s++ = *str++;
} while (--field_width > 0 && *str);
num++;
}
continue;
case 's':
{
char *s = (char *)va_arg(args, char *);
if (field_width == -1)
field_width = SHRT_MAX;
/* first, skip leading white space in buffer */
str = skip_spaces(str);
/* now copy until next white space */
while (*str && !isspace(*str) && field_width--)
*s++ = *str++;
*s = '\0';
num++;
}
continue;
/*
* Warning: This implementation of the '[' conversion specifier
* deviates from its glibc counterpart in the following ways:
* (1) It does NOT support ranges i.e. '-' is NOT a special
* character
* (2) It cannot match the closing bracket ']' itself
* (3) A field width is required
* (4) '%*[' (discard matching input) is currently not supported
*
* Example usage:
* ret = sscanf("00:0a:95","%2[^:]:%2[^:]:%2[^:]",
* buf1, buf2, buf3);
* if (ret < 3)
* // etc..
*/
case '[':
{
char *s = (char *)va_arg(args, char *);
DECLARE_BITMAP(set, 256) = {0};
unsigned int len = 0;
bool negate = (*fmt == '^');
/* field width is required */
if (field_width == -1)
return num;
if (negate)
++fmt;
for ( ; *fmt && *fmt != ']'; ++fmt, ++len)
__set_bit((u8)*fmt, set);
/* no ']' or no character set found */
if (!*fmt || !len)
return num;
++fmt;
if (negate) {
bitmap_complement(set, set, 256);
/* exclude null '\0' byte */
__clear_bit(0, set);
}
/* match must be non-empty */
if (!test_bit((u8)*str, set))
return num;
while (test_bit((u8)*str, set) && field_width--)
*s++ = *str++;
*s = '\0';
++num;
}
continue;
case 'o':
base = 8;
break;
case 'x':
case 'X':
base = 16;
break;
case 'i':
base = 0;
fallthrough;
case 'd':
is_sign = true;
fallthrough;
case 'u':
break;
case '%':
/* looking for '%' in str */
if (*str++ != '%')
return num;
continue;
default:
/* invalid format; stop here */
return num;
}
/* have some sort of integer conversion.
* first, skip white space in buffer.
*/
str = skip_spaces(str);
digit = *str;
if (is_sign && digit == '-') {
if (field_width == 1)
break;
digit = *(str + 1);
}
if (!digit
|| (base == 16 && !isxdigit(digit))
|| (base == 10 && !isdigit(digit))
|| (base == 8 && !isodigit(digit))
|| (base == 0 && !isdigit(digit)))
break;
if (is_sign)
val.s = simple_strntoll(str, &next, base,
field_width >= 0 ? field_width : INT_MAX);
else
val.u = simple_strntoull(str, &next, base,
field_width >= 0 ? field_width : INT_MAX);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
if (is_sign)
*va_arg(args, signed char *) = val.s;
else
*va_arg(args, unsigned char *) = val.u;
break;
case 'h':
if (is_sign)
*va_arg(args, short *) = val.s;
else
*va_arg(args, unsigned short *) = val.u;
break;
case 'l':
if (is_sign)
*va_arg(args, long *) = val.s;
else
*va_arg(args, unsigned long *) = val.u;
break;
case 'L':
if (is_sign)
*va_arg(args, long long *) = val.s;
else
*va_arg(args, unsigned long long *) = val.u;
break;
case 'z':
*va_arg(args, size_t *) = val.u;
break;
default:
if (is_sign)
*va_arg(args, int *) = val.s;
else
*va_arg(args, unsigned int *) = val.u;
break;
}
num++;
if (!next)
break;
str = next;
}
return num;
}
EXPORT_SYMBOL(vsscanf);
/**
* sscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: formatting of buffer
* @...: resulting arguments
*/
int sscanf(const char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsscanf(buf, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sscanf);
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
*
* 2003-06-02 Jim Houston - Concurrent Computer Corp.
* Changes to use preallocated sigqueue structures
* to allow signals to be sent reliably.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/user.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/coredump.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/signalfd.h>
#include <linux/ratelimit.h>
#include <linux/task_work.h>
#include <linux/capability.h>
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/uprobes.h>
#include <linux/compat.h>
#include <linux/cn_proc.h>
#include <linux/compiler.h>
#include <linux/posix-timers.h>
#include <linux/cgroup.h>
#include <linux/audit.h>
#include <linux/sysctl.h>
#include <uapi/linux/pidfd.h>
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
#include <asm/param.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/siginfo.h>
#include <asm/cacheflush.h>
#include <asm/syscall.h> /* for syscall_get_* */
#include "time/posix-timers.h"
/*
* SLAB caches for signal bits.
*/
static struct kmem_cache *sigqueue_cachep;
int print_fatal_signals __read_mostly;
static void __user *sig_handler(struct task_struct *t, int sig)
{
return t->sighand->action[sig - 1].sa.sa_handler;
}
static inline bool sig_handler_ignored(void __user *handler, int sig)
{
/* Is it explicitly or implicitly ignored? */
return handler == SIG_IGN ||
(handler == SIG_DFL && sig_kernel_ignore(sig));
}
static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
{
void __user *handler;
handler = sig_handler(t, sig);
/* SIGKILL and SIGSTOP may not be sent to the global init */
if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
return true;
if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
handler == SIG_DFL && !(force && sig_kernel_only(sig)))
return true;
/* Only allow kernel generated signals to this kthread */
if (unlikely((t->flags & PF_KTHREAD) &&
(handler == SIG_KTHREAD_KERNEL) && !force))
return true;
return sig_handler_ignored(handler, sig);
}
static bool sig_ignored(struct task_struct *t, int sig, bool force)
{
/*
* Blocked signals are never ignored, since the
* signal handler may change by the time it is
* unblocked.
*/
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
return false;
/*
* Tracers may want to know about even ignored signal unless it
* is SIGKILL which can't be reported anyway but can be ignored
* by SIGNAL_UNKILLABLE task.
*/
if (t->ptrace && sig != SIGKILL)
return false;
return sig_task_ignored(t, sig, force);
}
/*
* Re-calculate pending state from the set of locally pending
* signals, globally pending signals, and blocked signals.
*/
static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
{
unsigned long ready;
long i;
switch (_NSIG_WORDS) {
default:
for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
ready |= signal->sig[i] &~ blocked->sig[i];
break;
case 4: ready = signal->sig[3] &~ blocked->sig[3];
ready |= signal->sig[2] &~ blocked->sig[2];
ready |= signal->sig[1] &~ blocked->sig[1];
ready |= signal->sig[0] &~ blocked->sig[0];
break;
case 2: ready = signal->sig[1] &~ blocked->sig[1];
ready |= signal->sig[0] &~ blocked->sig[0];
break;
case 1: ready = signal->sig[0] &~ blocked->sig[0];
}
return ready != 0;
}
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
static bool recalc_sigpending_tsk(struct task_struct *t)
{
if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked) ||
cgroup_task_frozen(t)) {
set_tsk_thread_flag(t, TIF_SIGPENDING); return true;
}
/*
* We must never clear the flag in another thread, or in current
* when it's possible the current syscall is returning -ERESTART*.
* So we don't clear it here, and only callers who know they should do.
*/
return false;
}
void recalc_sigpending(void)
{ if (!recalc_sigpending_tsk(current) && !freezing(current)) { if (unlikely(test_thread_flag(TIF_SIGPENDING))) clear_thread_flag(TIF_SIGPENDING);
}
}
EXPORT_SYMBOL(recalc_sigpending);
void calculate_sigpending(void)
{
/* Have any signals or users of TIF_SIGPENDING been delayed
* until after fork?
*/
spin_lock_irq(¤t->sighand->siglock);
set_tsk_thread_flag(current, TIF_SIGPENDING);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
/* Given the mask, find the first available signal that should be serviced. */
#define SYNCHRONOUS_MASK \
(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
int next_signal(struct sigpending *pending, sigset_t *mask)
{
unsigned long i, *s, *m, x;
int sig = 0;
s = pending->signal.sig;
m = mask->sig;
/*
* Handle the first word specially: it contains the
* synchronous signals that need to be dequeued first.
*/
x = *s &~ *m;
if (x) {
if (x & SYNCHRONOUS_MASK)
x &= SYNCHRONOUS_MASK;
sig = ffz(~x) + 1;
return sig;
}
switch (_NSIG_WORDS) {
default:
for (i = 1; i < _NSIG_WORDS; ++i) {
x = *++s &~ *++m;
if (!x)
continue;
sig = ffz(~x) + i*_NSIG_BPW + 1;
break;
}
break;
case 2:
x = s[1] &~ m[1];
if (!x)
break;
sig = ffz(~x) + _NSIG_BPW + 1;
break;
case 1:
/* Nothing to do */
break;
}
return sig;
}
static inline void print_dropped_signal(int sig)
{
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
if (!print_fatal_signals)
return;
if (!__ratelimit(&ratelimit_state))
return;
pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
current->comm, current->pid, sig);
}
/**
* task_set_jobctl_pending - set jobctl pending bits
* @task: target task
* @mask: pending bits to set
*
* Clear @mask from @task->jobctl. @mask must be subset of
* %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
* %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
* cleared. If @task is already being killed or exiting, this function
* becomes noop.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*
* RETURNS:
* %true if @mask is set, %false if made noop because @task was dying.
*/
bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
{
BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
return false;
if (mask & JOBCTL_STOP_SIGMASK)
task->jobctl &= ~JOBCTL_STOP_SIGMASK;
task->jobctl |= mask;
return true;
}
/**
* task_clear_jobctl_trapping - clear jobctl trapping bit
* @task: target task
*
* If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
* Clear it and wake up the ptracer. Note that we don't need any further
* locking. @task->siglock guarantees that @task->parent points to the
* ptracer.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
void task_clear_jobctl_trapping(struct task_struct *task)
{
if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
task->jobctl &= ~JOBCTL_TRAPPING;
smp_mb(); /* advised by wake_up_bit() */
wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
}
}
/**
* task_clear_jobctl_pending - clear jobctl pending bits
* @task: target task
* @mask: pending bits to clear
*
* Clear @mask from @task->jobctl. @mask must be subset of
* %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
* STOP bits are cleared together.
*
* If clearing of @mask leaves no stop or trap pending, this function calls
* task_clear_jobctl_trapping().
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
{
BUG_ON(mask & ~JOBCTL_PENDING_MASK);
if (mask & JOBCTL_STOP_PENDING)
mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
task->jobctl &= ~mask;
if (!(task->jobctl & JOBCTL_PENDING_MASK))
task_clear_jobctl_trapping(task);
}
/**
* task_participate_group_stop - participate in a group stop
* @task: task participating in a group stop
*
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
* Group stop states are cleared and the group stop count is consumed if
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
* stop, the appropriate `SIGNAL_*` flags are set.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*
* RETURNS:
* %true if group stop completion should be notified to the parent, %false
* otherwise.
*/
static bool task_participate_group_stop(struct task_struct *task)
{
struct signal_struct *sig = task->signal;
bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
if (!consume)
return false;
if (!WARN_ON_ONCE(sig->group_stop_count == 0))
sig->group_stop_count--;
/*
* Tell the caller to notify completion iff we are entering into a
* fresh group stop. Read comment in do_signal_stop() for details.
*/
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
return true;
}
return false;
}
void task_join_group_stop(struct task_struct *task)
{
unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
struct signal_struct *sig = current->signal;
if (sig->group_stop_count) {
sig->group_stop_count++;
mask |= JOBCTL_STOP_CONSUME;
} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
return;
/* Have the new thread join an on-going signal group stop */
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}
static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
int override_rlimit)
{
struct ucounts *ucounts;
long sigpending;
/*
* Protect access to @t credentials. This can go away when all
* callers hold rcu read lock.
*
* NOTE! A pending signal will hold on to the user refcount,
* and we get/put the refcount only when the sigpending count
* changes from/to zero.
*/
rcu_read_lock();
ucounts = task_ucounts(t);
sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
override_rlimit);
rcu_read_unlock();
if (!sigpending)
return NULL;
if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
print_dropped_signal(sig);
return NULL;
}
return ucounts;
}
static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
const unsigned int sigqueue_flags)
{
INIT_LIST_HEAD(&q->list);
q->flags = sigqueue_flags;
q->ucounts = ucounts;
}
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
int override_rlimit)
{
struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
struct sigqueue *q;
if (!ucounts)
return NULL;
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
if (!q) {
dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
return NULL;
}
__sigqueue_init(q, ucounts, 0);
return q;
}
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC) {
posixtimer_sigqueue_putref(q);
return;
}
if (q->ucounts) {
dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
q->ucounts = NULL;
}
kmem_cache_free(sigqueue_cachep, q);
}
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
sigemptyset(&queue->signal);
while (!list_empty(&queue->list)) {
q = list_entry(queue->list.next, struct sigqueue , list);
list_del_init(&q->list);
__sigqueue_free(q);
}
}
/*
* Flush all pending signals for this kthread.
*/
void flush_signals(struct task_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&t->sighand->siglock, flags);
clear_tsk_thread_flag(t, TIF_SIGPENDING);
flush_sigqueue(&t->pending);
flush_sigqueue(&t->signal->shared_pending);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
EXPORT_SYMBOL(flush_signals);
void ignore_signals(struct task_struct *t)
{
int i;
for (i = 0; i < _NSIG; ++i)
t->sighand->action[i].sa.sa_handler = SIG_IGN;
flush_signals(t);
}
/*
* Flush all handlers for a task.
*/
void
flush_signal_handlers(struct task_struct *t, int force_default)
{
int i;
struct k_sigaction *ka = &t->sighand->action[0];
for (i = _NSIG ; i != 0 ; i--) {
if (force_default || ka->sa.sa_handler != SIG_IGN)
ka->sa.sa_handler = SIG_DFL;
ka->sa.sa_flags = 0;
#ifdef __ARCH_HAS_SA_RESTORER
ka->sa.sa_restorer = NULL;
#endif
sigemptyset(&ka->sa.sa_mask);
ka++;
}
}
bool unhandled_signal(struct task_struct *tsk, int sig)
{
void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
if (is_global_init(tsk))
return true;
if (handler != SIG_IGN && handler != SIG_DFL)
return false;
/* If dying, we handle all new signals by ignoring them */
if (fatal_signal_pending(tsk))
return false;
/* if ptraced, let the tracer determine */
return !tsk->ptrace;
}
static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
struct sigqueue **timer_sigq)
{
struct sigqueue *q, *first = NULL;
/*
* Collect the siginfo appropriate to this signal. Check if
* there is another siginfo for the same signal.
*/
list_for_each_entry(q, &list->list, list) {
if (q->info.si_signo == sig) {
if (first)
goto still_pending;
first = q;
}
}
sigdelset(&list->signal, sig);
if (first) {
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
/*
* posix-timer signals are preallocated and freed when the last
* reference count is dropped in posixtimer_deliver_signal() or
* immediately on timer deletion when the signal is not pending.
* Spare the extra round through __sigqueue_free() which is
* ignoring preallocated signals.
*/
if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
*timer_sigq = first;
else
__sigqueue_free(first);
} else {
/*
* Ok, it wasn't in the queue. This must be
* a fast-pathed signal or we must have been
* out of queue space. So zero out the info.
*/
clear_siginfo(info);
info->si_signo = sig;
info->si_errno = 0;
info->si_code = SI_USER;
info->si_pid = 0;
info->si_uid = 0;
}
}
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
kernel_siginfo_t *info, struct sigqueue **timer_sigq)
{
int sig = next_signal(pending, mask);
if (sig)
collect_signal(sig, pending, info, timer_sigq);
return sig;
}
/*
* Try to dequeue a signal. If a deliverable signal is found fill in the
* caller provided siginfo and return the signal number. Otherwise return
* 0.
*/
int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
{
struct task_struct *tsk = current;
struct sigqueue *timer_sigq;
int signr;
lockdep_assert_held(&tsk->sighand->siglock);
again:
*type = PIDTYPE_PID;
timer_sigq = NULL;
signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
if (!signr) {
*type = PIDTYPE_TGID;
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info, &timer_sigq);
if (unlikely(signr == SIGALRM))
posixtimer_rearm_itimer(tsk);
}
recalc_sigpending();
if (!signr)
return 0;
if (unlikely(sig_kernel_stop(signr))) {
/*
* Set a marker that we have dequeued a stop signal. Our
* caller might release the siglock and then the pending
* stop signal it is about to process is no longer in the
* pending bitmasks, but must still be cleared by a SIGCONT
* (and overruled by a SIGKILL). So those cases clear this
* shared flag after we've set it. Note that this flag may
* remain set after the signal we return is ignored or
* handled. That doesn't matter because its only purpose
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
if (!posixtimer_deliver_signal(info, timer_sigq))
goto again;
}
return signr;
}
EXPORT_SYMBOL_GPL(dequeue_signal);
static int dequeue_synchronous_signal(kernel_siginfo_t *info)
{
struct task_struct *tsk = current;
struct sigpending *pending = &tsk->pending;
struct sigqueue *q, *sync = NULL;
/*
* Might a synchronous signal be in the queue?
*/
if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
return 0;
/*
* Return the first synchronous signal in the queue.
*/
list_for_each_entry(q, &pending->list, list) {
/* Synchronous signals have a positive si_code */
if ((q->info.si_code > SI_USER) &&
(sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
sync = q;
goto next;
}
}
return 0;
next:
/*
* Check if there is another siginfo for the same signal.
*/
list_for_each_entry_continue(q, &pending->list, list) {
if (q->info.si_signo == sync->info.si_signo)
goto still_pending;
}
sigdelset(&pending->signal, sync->info.si_signo);
recalc_sigpending();
still_pending:
list_del_init(&sync->list);
copy_siginfo(info, &sync->info);
__sigqueue_free(sync);
return info->si_signo;
}
/*
* Tell a process that it has a new active signal..
*
* NOTE! we rely on the previous spin_lock to
* lock interrupts for us! We can only be called with
* "siglock" held, and the local interrupt must
* have been disabled when that got acquired!
*
* No need to set need_resched since signal event passing
* goes through ->blocked
*/
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
lockdep_assert_held(&t->sighand->siglock);
set_tsk_thread_flag(t, TIF_SIGPENDING);
/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
* executing another processor and just now entering stopped state.
* By using wake_up_state, we ensure the process will wake up and
* handle its death signal.
*/
if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
kick_process(t);
}
static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
{
if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
__sigqueue_free(q);
else
posixtimer_sig_ignore(tsk, q);
}
/* Remove signals in mask from the pending set and queue. */
static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
{
struct sigqueue *q, *n;
sigset_t m;
lockdep_assert_held(&p->sighand->siglock);
sigandsets(&m, mask, &s->signal);
if (sigisemptyset(&m))
return;
sigandnsets(&s->signal, &s->signal, mask);
list_for_each_entry_safe(q, n, &s->list, list) {
if (sigismember(mask, q->info.si_signo)) {
list_del_init(&q->list);
sigqueue_free_ignored(p, q);
}
}
}
static inline int is_si_special(const struct kernel_siginfo *info)
{
return info <= SEND_SIG_PRIV;
}
static inline bool si_fromuser(const struct kernel_siginfo *info)
{
return info == SEND_SIG_NOINFO ||
(!is_si_special(info) && SI_FROMUSER(info));
}
/*
* called with RCU read lock from check_kill_permission()
*/
static bool kill_ok_by_cred(struct task_struct *t)
{
const struct cred *cred = current_cred();
const struct cred *tcred = __task_cred(t);
return uid_eq(cred->euid, tcred->suid) ||
uid_eq(cred->euid, tcred->uid) ||
uid_eq(cred->uid, tcred->suid) ||
uid_eq(cred->uid, tcred->uid) ||
ns_capable(tcred->user_ns, CAP_KILL);
}
/*
* Bad permissions for sending the signal
* - the caller must hold the RCU read lock
*/
static int check_kill_permission(int sig, struct kernel_siginfo *info,
struct task_struct *t)
{
struct pid *sid;
int error;
if (!valid_signal(sig))
return -EINVAL;
if (!si_fromuser(info))
return 0;
error = audit_signal_info(sig, t); /* Let audit system see the signal */
if (error)
return error;
if (!same_thread_group(current, t) &&
!kill_ok_by_cred(t)) {
switch (sig) {
case SIGCONT:
sid = task_session(t);
/*
* We don't return the error if sid == NULL. The
* task was unhashed, the caller must notice this.
*/
if (!sid || sid == task_session(current))
break;
fallthrough;
default:
return -EPERM;
}
}
return security_task_kill(t, info, sig, NULL);
}
/**
* ptrace_trap_notify - schedule trap to notify ptracer
* @t: tracee wanting to notify tracer
*
* This function schedules sticky ptrace trap which is cleared on the next
* TRAP_STOP to notify ptracer of an event. @t must have been seized by
* ptracer.
*
* If @t is running, STOP trap will be taken. If trapped for STOP and
* ptracer is listening for events, tracee is woken up so that it can
* re-trap for the new event. If trapped otherwise, STOP trap will be
* eventually taken without returning to userland after the existing traps
* are finished by PTRACE_CONT.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
static void ptrace_trap_notify(struct task_struct *t)
{
WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
lockdep_assert_held(&t->sighand->siglock);
task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
}
/*
* Handle magic process-wide effects of stop/continue signals. Unlike
* the signal actions, these happen immediately at signal-generation
* time regardless of blocking, ignoring, or handling. This does the
* actual continuing for SIGCONT, but not the actual stopping for stop
* signals. The process stop is done as a signal action for SIG_DFL.
*
* Returns true if the signal should be actually delivered, otherwise
* it should be dropped.
*/
static bool prepare_signal(int sig, struct task_struct *p, bool force)
{
struct signal_struct *signal = p->signal;
struct task_struct *t;
sigset_t flush;
if (signal->flags & SIGNAL_GROUP_EXIT) {
if (signal->core_state)
return sig == SIGKILL;
/*
* The process is in the middle of dying, drop the signal.
*/
return false;
} else if (sig_kernel_stop(sig)) {
/*
* This is a stop signal. Remove SIGCONT from all queues.
*/
siginitset(&flush, sigmask(SIGCONT));
flush_sigqueue_mask(p, &flush, &signal->shared_pending);
for_each_thread(p, t)
flush_sigqueue_mask(p, &flush, &t->pending);
} else if (sig == SIGCONT) {
unsigned int why;
/*
* Remove all stop signals from all queues, wake all threads.
*/
siginitset(&flush, SIG_KERNEL_STOP_MASK);
flush_sigqueue_mask(p, &flush, &signal->shared_pending);
for_each_thread(p, t) {
flush_sigqueue_mask(p, &flush, &t->pending);
task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
if (likely(!(t->ptrace & PT_SEIZED))) {
t->jobctl &= ~JOBCTL_STOPPED;
wake_up_state(t, __TASK_STOPPED);
} else
ptrace_trap_notify(t);
}
/*
* Notify the parent with CLD_CONTINUED if we were stopped.
*
* If we were in the middle of a group stop, we pretend it
* was already finished, and then continued. Since SIGCHLD
* doesn't queue we report only CLD_STOPPED, as if the next
* CLD_CONTINUED was dropped.
*/
why = 0;
if (signal->flags & SIGNAL_STOP_STOPPED)
why |= SIGNAL_CLD_CONTINUED;
else if (signal->group_stop_count)
why |= SIGNAL_CLD_STOPPED;
if (why) {
/*
* The first thread which returns from do_signal_stop()
* will take ->siglock, notice SIGNAL_CLD_MASK, and
* notify its parent. See get_signal().
*/
signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
signal->group_stop_count = 0;
signal->group_exit_code = 0;
}
}
return !sig_ignored(p, sig, force);
}
/*
* Test if P wants to take SIG. After we've checked all threads with this,
* it's equivalent to finding no threads not blocking SIG. Any threads not
* blocking SIG were ruled out because they are not running and already
* have pending signals. Such threads will dequeue from the shared queue
* as soon as they're available, so putting the signal on the shared queue
* will be equivalent to sending it to one such thread.
*/
static inline bool wants_signal(int sig, struct task_struct *p)
{
if (sigismember(&p->blocked, sig))
return false;
if (p->flags & PF_EXITING)
return false;
if (sig == SIGKILL)
return true;
if (task_is_stopped_or_traced(p))
return false;
return task_curr(p) || !task_sigpending(p);
}
static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
{
struct signal_struct *signal = p->signal;
struct task_struct *t;
/*
* Now find a thread we can wake up to take the signal off the queue.
*
* Try the suggested task first (may or may not be the main thread).
*/
if (wants_signal(sig, p))
t = p;
else if ((type == PIDTYPE_PID) || thread_group_empty(p))
/*
* There is just one thread and it does not need to be woken.
* It will dequeue unblocked signals before it runs again.
*/
return;
else {
/*
* Otherwise try to find a suitable thread.
*/
t = signal->curr_target;
while (!wants_signal(sig, t)) {
t = next_thread(t);
if (t == signal->curr_target)
/*
* No thread needs to be woken.
* Any eligible threads will see
* the signal in the queue soon.
*/
return;
}
signal->curr_target = t;
}
/*
* Found a killable thread. If the signal will be fatal,
* then start taking the whole group down immediately.
*/
if (sig_fatal(p, sig) &&
(signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
!sigismember(&t->real_blocked, sig) &&
(sig == SIGKILL || !p->ptrace)) {
/*
* This signal will be fatal to the whole group.
*/
if (!sig_kernel_coredump(sig)) {
/*
* Start a group exit and wake everybody up.
* This way we don't have other threads
* running and doing things after a slower
* thread has the fatal signal pending.
*/
signal->flags = SIGNAL_GROUP_EXIT;
signal->group_exit_code = sig;
signal->group_stop_count = 0;
__for_each_thread(signal, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
}
return;
}
}
/*
* The signal is already in the shared-pending queue.
* Tell the chosen thread to wake up and dequeue it.
*/
signal_wake_up(t, sig == SIGKILL);
return;
}
static inline bool legacy_queue(struct sigpending *signals, int sig)
{
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}
static int __send_signal_locked(int sig, struct kernel_siginfo *info,
struct task_struct *t, enum pid_type type, bool force)
{
struct sigpending *pending;
struct sigqueue *q;
int override_rlimit;
int ret = 0, result;
lockdep_assert_held(&t->sighand->siglock);
result = TRACE_SIGNAL_IGNORED;
if (!prepare_signal(sig, t, force))
goto ret;
pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
/*
* Short-circuit ignored signals and support queuing
* exactly one non-rt signal, so that we can get more
* detailed information about the cause of the signal.
*/
result = TRACE_SIGNAL_ALREADY_PENDING;
if (legacy_queue(pending, sig))
goto ret;
result = TRACE_SIGNAL_DELIVERED;
/*
* Skip useless siginfo allocation for SIGKILL and kernel threads.
*/
if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
goto out_set;
/*
* Real-time signals must be queued if sent by sigqueue, or
* some other real-time mechanism. It is implementation
* defined whether kill() does so. We attempt to do so, on
* the principle of least surprise, but since kill is not
* allowed to fail with EAGAIN when low on memory we just
* make sure at least one signal gets delivered and don't
* pass on the info struct.
*/
if (sig < SIGRTMIN)
override_rlimit = (is_si_special(info) || info->si_code >= 0);
else
override_rlimit = 0;
q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
if (q) {
list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
case (unsigned long) SEND_SIG_NOINFO:
clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
q->info.si_pid = task_tgid_nr_ns(current,
task_active_pid_ns(t));
rcu_read_lock();
q->info.si_uid =
from_kuid_munged(task_cred_xxx(t, user_ns),
current_uid());
rcu_read_unlock();
break;
case (unsigned long) SEND_SIG_PRIV:
clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_KERNEL;
q->info.si_pid = 0;
q->info.si_uid = 0;
break;
default:
copy_siginfo(&q->info, info);
break;
}
} else if (!is_si_special(info) &&
sig >= SIGRTMIN && info->si_code != SI_USER) {
/*
* Queue overflow, abort. We may abort if the
* signal was rt and sent by user using something
* other than kill().
*/
result = TRACE_SIGNAL_OVERFLOW_FAIL;
ret = -EAGAIN;
goto ret;
} else {
/*
* This is a silent loss of information. We still
* send the signal, but the *info bits are lost.
*/
result = TRACE_SIGNAL_LOSE_INFO;
}
out_set:
signalfd_notify(t, sig);
sigaddset(&pending->signal, sig);
/* Let multiprocess signals appear after on-going forks */
if (type > PIDTYPE_TGID) {
struct multiprocess_signals *delayed;
hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
sigset_t *signal = &delayed->signal;
/* Can't queue both a stop and a continue signal */
if (sig == SIGCONT)
sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
else if (sig_kernel_stop(sig))
sigdelset(signal, SIGCONT);
sigaddset(signal, sig);
}
}
complete_signal(sig, t, type);
ret:
trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
return ret;
}
static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
{
bool ret = false;
switch (siginfo_layout(info->si_signo, info->si_code)) {
case SIL_KILL:
case SIL_CHLD:
case SIL_RT:
ret = true;
break;
case SIL_TIMER:
case SIL_POLL:
case SIL_FAULT:
case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
case SIL_FAULT_PERF_EVENT:
case SIL_SYS:
ret = false;
break;
}
return ret;
}
int send_signal_locked(int sig, struct kernel_siginfo *info,
struct task_struct *t, enum pid_type type)
{
/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
bool force = false;
if (info == SEND_SIG_NOINFO) {
/* Force if sent from an ancestor pid namespace */
force = !task_pid_nr_ns(current, task_active_pid_ns(t));
} else if (info == SEND_SIG_PRIV) {
/* Don't ignore kernel generated signals */
force = true;
} else if (has_si_pid_and_uid(info)) {
/* SIGKILL and SIGSTOP is special or has ids */
struct user_namespace *t_user_ns;
rcu_read_lock();
t_user_ns = task_cred_xxx(t, user_ns);
if (current_user_ns() != t_user_ns) {
kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
info->si_uid = from_kuid_munged(t_user_ns, uid);
}
rcu_read_unlock();
/* A kernel generated signal? */
force = (info->si_code == SI_KERNEL);
/* From an ancestor pid namespace? */
if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
info->si_pid = 0;
force = true;
}
}
return __send_signal_locked(sig, info, t, type, force);
}
static void print_fatal_signal(int signr)
{
struct pt_regs *regs = task_pt_regs(current);
struct file *exe_file;
exe_file = get_task_exe_file(current);
if (exe_file) {
pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
exe_file, current->comm, signr);
fput(exe_file);
} else {
pr_info("%s: potentially unexpected fatal signal %d.\n",
current->comm, signr);
}
#if defined(__i386__) && !defined(__arch_um__)
pr_info("code at %08lx: ", regs->ip);
{
int i;
for (i = 0; i < 16; i++) {
unsigned char insn;
if (get_user(insn, (unsigned char *)(regs->ip + i)))
break;
pr_cont("%02x ", insn);
}
}
pr_cont("\n");
#endif
preempt_disable();
show_regs(regs);
preempt_enable();
}
static int __init setup_print_fatal_signals(char *str)
{
get_option (&str, &print_fatal_signals);
return 1;
}
__setup("print-fatal-signals=", setup_print_fatal_signals);
int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
enum pid_type type)
{
unsigned long flags;
int ret = -ESRCH;
if (lock_task_sighand(p, &flags)) {
ret = send_signal_locked(sig, info, p, type);
unlock_task_sighand(p, &flags);
}
return ret;
}
enum sig_handler {
HANDLER_CURRENT, /* If reachable use the current handler */
HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
HANDLER_EXIT, /* Only visible as the process exit code */
};
/*
* Force a signal that the process can't ignore: if necessary
* we unblock the signal and change any SIG_IGN to SIG_DFL.
*
* Note: If we unblock the signal, we always reset it to SIG_DFL,
* since we do not want to have a signal handler that was blocked
* be invoked when user space had explicitly blocked it.
*
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
static int
force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
enum sig_handler handler)
{
unsigned long int flags;
int ret, blocked, ignored;
struct k_sigaction *action;
int sig = info->si_signo;
spin_lock_irqsave(&t->sighand->siglock, flags);
action = &t->sighand->action[sig-1];
ignored = action->sa.sa_handler == SIG_IGN;
blocked = sigismember(&t->blocked, sig);
if (blocked || ignored || (handler != HANDLER_CURRENT)) {
action->sa.sa_handler = SIG_DFL;
if (handler == HANDLER_EXIT)
action->sa.sa_flags |= SA_IMMUTABLE;
if (blocked)
sigdelset(&t->blocked, sig);
}
/*
* Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
* debugging to leave init killable. But HANDLER_EXIT is always fatal.
*/
if (action->sa.sa_handler == SIG_DFL &&
(!t->ptrace || (handler == HANDLER_EXIT)))
t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
/* This can happen if the signal was already pending and blocked */
if (!task_sigpending(t))
signal_wake_up(t, 0);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
return ret;
}
int force_sig_info(struct kernel_siginfo *info)
{
return force_sig_info_to_task(info, current, HANDLER_CURRENT);
}
/*
* Nuke all other threads in the group.
*/
int zap_other_threads(struct task_struct *p)
{
struct task_struct *t;
int count = 0;
p->signal->group_stop_count = 0;
for_other_threads(p, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
count++;
/* Don't bother with already dead threads */
if (t->exit_state)
continue;
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
}
return count;
}
struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
struct sighand_struct *sighand;
rcu_read_lock();
for (;;) {
sighand = rcu_dereference(tsk->sighand);
if (unlikely(sighand == NULL))
break;
/*
* This sighand can be already freed and even reused, but
* we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
* initializes ->siglock: this slab can't go away, it has
* the same object type, ->siglock can't be reinitialized.
*
* We need to ensure that tsk->sighand is still the same
* after we take the lock, we can race with de_thread() or
* __exit_signal(). In the latter case the next iteration
* must see ->sighand == NULL.
*/
spin_lock_irqsave(&sighand->siglock, *flags);
if (likely(sighand == rcu_access_pointer(tsk->sighand)))
break;
spin_unlock_irqrestore(&sighand->siglock, *flags);
}
rcu_read_unlock();
return sighand;
}
#ifdef CONFIG_LOCKDEP
void lockdep_assert_task_sighand_held(struct task_struct *task)
{
struct sighand_struct *sighand;
rcu_read_lock();
sighand = rcu_dereference(task->sighand);
if (sighand)
lockdep_assert_held(&sighand->siglock);
else
WARN_ON_ONCE(1);
rcu_read_unlock();
}
#endif
/*
* send signal info to all the members of a thread group or to the
* individual thread if type == PIDTYPE_PID.
*/
int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type)
{
int ret;
rcu_read_lock();
ret = check_kill_permission(sig, info, p);
rcu_read_unlock();
if (!ret && sig)
ret = do_send_sig_info(sig, info, p, type);
return ret;
}
/*
* __kill_pgrp_info() sends a signal to a process group: this is what the tty
* control characters do (^C, ^Z etc)
* - the caller must hold at least a readlock on tasklist_lock
*/
int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
{
struct task_struct *p = NULL;
int ret = -ESRCH;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
/*
* If group_send_sig_info() succeeds at least once ret
* becomes 0 and after that the code below has no effect.
* Otherwise we return the last err or -ESRCH if this
* process group is empty.
*/
if (ret)
ret = err;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return ret;
}
static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
struct pid *pid, enum pid_type type)
{
int error = -ESRCH;
struct task_struct *p;
for (;;) {
rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID);
if (p)
error = group_send_sig_info(sig, info, p, type);
rcu_read_unlock();
if (likely(!p || error != -ESRCH))
return error;
/*
* The task was unhashed in between, try again. If it
* is dead, pid_task() will return NULL, if we race with
* de_thread() it will find the new leader.
*/
}
}
int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
{
return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
}
static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
{
int error;
rcu_read_lock();
error = kill_pid_info(sig, info, find_vpid(pid));
rcu_read_unlock();
return error;
}
static inline bool kill_as_cred_perm(const struct cred *cred,
struct task_struct *target)
{
const struct cred *pcred = __task_cred(target);
return uid_eq(cred->euid, pcred->suid) ||
uid_eq(cred->euid, pcred->uid) ||
uid_eq(cred->uid, pcred->suid) ||
uid_eq(cred->uid, pcred->uid);
}
/*
* The usb asyncio usage of siginfo is wrong. The glibc support
* for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
* AKA after the generic fields:
* kernel_pid_t si_pid;
* kernel_uid32_t si_uid;
* sigval_t si_value;
*
* Unfortunately when usb generates SI_ASYNCIO it assumes the layout
* after the generic fields is:
* void __user *si_addr;
*
* This is a practical problem when there is a 64bit big endian kernel
* and a 32bit userspace. As the 32bit address will encoded in the low
* 32bits of the pointer. Those low 32bits will be stored at higher
* address than appear in a 32 bit pointer. So userspace will not
* see the address it was expecting for it's completions.
*
* There is nothing in the encoding that can allow
* copy_siginfo_to_user32 to detect this confusion of formats, so
* handle this by requiring the caller of kill_pid_usb_asyncio to
* notice when this situration takes place and to store the 32bit
* pointer in sival_int, instead of sival_addr of the sigval_t addr
* parameter.
*/
int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
struct pid *pid, const struct cred *cred)
{
struct kernel_siginfo info;
struct task_struct *p;
unsigned long flags;
int ret = -EINVAL;
if (!valid_signal(sig))
return ret;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = errno;
info.si_code = SI_ASYNCIO;
*((sigval_t *)&info.si_pid) = addr;
rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID);
if (!p) {
ret = -ESRCH;
goto out_unlock;
}
if (!kill_as_cred_perm(cred, p)) {
ret = -EPERM;
goto out_unlock;
}
ret = security_task_kill(p, &info, sig, cred);
if (ret)
goto out_unlock;
if (sig) {
if (lock_task_sighand(p, &flags)) {
ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
unlock_task_sighand(p, &flags);
} else
ret = -ESRCH;
}
out_unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
*
* POSIX specifies that kill(-1,sig) is unspecified, but what we have
* is probably wrong. Should make it like BSD or SYSV.
*/
static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
{
int ret;
if (pid > 0)
return kill_proc_info(sig, info, pid);
/* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
if (pid == INT_MIN)
return -ESRCH;
read_lock(&tasklist_lock);
if (pid != -1) {
ret = __kill_pgrp_info(sig, info,
pid ? find_vpid(-pid) : task_pgrp(current));
} else {
int retval = 0, count = 0;
struct task_struct * p;
for_each_process(p) {
if (task_pid_vnr(p) > 1 &&
!same_thread_group(p, current)) {
int err = group_send_sig_info(sig, info, p,
PIDTYPE_MAX);
++count;
if (err != -EPERM)
retval = err;
}
}
ret = count ? retval : -ESRCH;
}
read_unlock(&tasklist_lock);
return ret;
}
/*
* These are for backward compatibility with the rest of the kernel source.
*/
int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
{
/*
* Make sure legacy kernel users don't send in bad values
* (normal paths check this in check_kill_permission).
*/
if (!valid_signal(sig))
return -EINVAL;
return do_send_sig_info(sig, info, p, PIDTYPE_PID);
}
EXPORT_SYMBOL(send_sig_info);
#define __si_special(priv) \
((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
int
send_sig(int sig, struct task_struct *p, int priv)
{
return send_sig_info(sig, __si_special(priv), p);
}
EXPORT_SYMBOL(send_sig);
void force_sig(int sig)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_KERNEL;
info.si_pid = 0;
info.si_uid = 0;
force_sig_info(&info);
}
EXPORT_SYMBOL(force_sig);
void force_fatal_sig(int sig)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_KERNEL;
info.si_pid = 0;
info.si_uid = 0;
force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
}
void force_exit_sig(int sig)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_KERNEL;
info.si_pid = 0;
info.si_uid = 0;
force_sig_info_to_task(&info, current, HANDLER_EXIT);
}
/*
* When things go south during signal handling, we
* will force a SIGSEGV. And if the signal that caused
* the problem was already a SIGSEGV, we'll want to
* make sure we don't even try to deliver the signal..
*/
void force_sigsegv(int sig)
{
if (sig == SIGSEGV)
force_fatal_sig(SIGSEGV);
else
force_sig(SIGSEGV);
}
int force_sig_fault_to_task(int sig, int code, void __user *addr,
struct task_struct *t)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
}
int force_sig_fault(int sig, int code, void __user *addr)
{
return force_sig_fault_to_task(sig, code, addr, current);
}
int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
return send_sig_info(info.si_signo, &info, t);
}
int force_sig_mceerr(int code, void __user *addr, short lsb)
{
struct kernel_siginfo info;
WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
clear_siginfo(&info);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
info.si_addr_lsb = lsb;
return force_sig_info(&info);
}
int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
{
struct kernel_siginfo info;
WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
clear_siginfo(&info);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
info.si_addr_lsb = lsb;
return send_sig_info(info.si_signo, &info, t);
}
EXPORT_SYMBOL(send_sig_mceerr);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_BNDERR;
info.si_addr = addr;
info.si_lower = lower;
info.si_upper = upper;
return force_sig_info(&info);
}
#ifdef SEGV_PKUERR
int force_sig_pkuerr(void __user *addr, u32 pkey)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_PKUERR;
info.si_addr = addr;
info.si_pkey = pkey;
return force_sig_info(&info);
}
#endif
int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_PERF;
info.si_addr = addr;
info.si_perf_data = sig_data;
info.si_perf_type = type;
/*
* Signals generated by perf events should not terminate the whole
* process if SIGTRAP is blocked, however, delivering the signal
* asynchronously is better than not delivering at all. But tell user
* space if the signal was asynchronous, so it can clearly be
* distinguished from normal synchronous ones.
*/
info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
TRAP_PERF_FLAG_ASYNC :
0;
return send_sig_info(info.si_signo, &info, current);
}
/**
* force_sig_seccomp - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
* @reason: filter-supplied reason code to send to userland (via si_errno)
* @force_coredump: true to trigger a coredump
*
* Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
*/
int force_sig_seccomp(int syscall, int reason, bool force_coredump)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGSYS;
info.si_code = SYS_SECCOMP;
info.si_call_addr = (void __user *)KSTK_EIP(current);
info.si_errno = reason;
info.si_arch = syscall_get_arch(current);
info.si_syscall = syscall;
return force_sig_info_to_task(&info, current,
force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
}
/* For the crazy architectures that include trap information in
* the errno field, instead of an actual errno value.
*/
int force_sig_ptrace_errno_trap(int errno, void __user *addr)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGTRAP;
info.si_errno = errno;
info.si_code = TRAP_HWBKPT;
info.si_addr = addr;
return force_sig_info(&info);
}
/* For the rare architectures that include trap information using
* si_trapno.
*/
int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
info.si_trapno = trapno;
return force_sig_info(&info);
}
/* For the rare architectures that include trap information using
* si_trapno.
*/
int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
struct task_struct *t)
{
struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
info.si_trapno = trapno;
return send_sig_info(info.si_signo, &info, t);
}
static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
{
int ret;
read_lock(&tasklist_lock);
ret = __kill_pgrp_info(sig, info, pgrp);
read_unlock(&tasklist_lock);
return ret;
}
int kill_pgrp(struct pid *pid, int sig, int priv)
{
return kill_pgrp_info(sig, __si_special(priv), pid);
}
EXPORT_SYMBOL(kill_pgrp);
int kill_pid(struct pid *pid, int sig, int priv)
{
return kill_pid_info(sig, __si_special(priv), pid);
}
EXPORT_SYMBOL(kill_pid);
#ifdef CONFIG_POSIX_TIMERS
/*
* These functions handle POSIX timer signals. POSIX timers use
* preallocated sigqueue structs for sending signals.
*/
static void __flush_itimer_signals(struct sigpending *pending)
{
sigset_t signal, retain;
struct sigqueue *q, *n;
signal = pending->signal;
sigemptyset(&retain);
list_for_each_entry_safe(q, n, &pending->list, list) {
int sig = q->info.si_signo;
if (likely(q->info.si_code != SI_TIMER)) {
sigaddset(&retain, sig);
} else {
sigdelset(&signal, sig);
list_del_init(&q->list);
__sigqueue_free(q);
}
}
sigorsets(&pending->signal, &signal, &retain);
}
void flush_itimer_signals(void)
{
struct task_struct *tsk = current;
guard(spinlock_irqsave)(&tsk->sighand->siglock);
__flush_itimer_signals(&tsk->pending);
__flush_itimer_signals(&tsk->signal->shared_pending);
}
bool posixtimer_init_sigqueue(struct sigqueue *q)
{
struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
if (!ucounts)
return false;
clear_siginfo(&q->info);
__sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
return true;
}
static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
{
struct sigpending *pending;
int sig = q->info.si_signo;
signalfd_notify(t, sig);
pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
list_add_tail(&q->list, &pending->list);
sigaddset(&pending->signal, sig);
complete_signal(sig, t, type);
}
/*
* This function is used by POSIX timers to deliver a timer signal.
* Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
* set), the signal must be delivered to the specific thread (queues
* into t->pending).
*
* Where type is not PIDTYPE_PID, signals must be delivered to the
* process. In this case, prefer to deliver to current if it is in
* the same thread group as the target process and its sighand is
* stable, which avoids unnecessarily waking up a potentially idle task.
*/
static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
{
struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
if (t && tmr->it_pid_type != PIDTYPE_PID &&
same_thread_group(t, current) && !current->exit_state)
t = current;
return t;
}
void posixtimer_send_sigqueue(struct k_itimer *tmr)
{
struct sigqueue *q = &tmr->sigq;
int sig = q->info.si_signo;
struct task_struct *t;
unsigned long flags;
int result;
guard(rcu)();
t = posixtimer_get_target(tmr);
if (!t)
return;
if (!likely(lock_task_sighand(t, &flags)))
return;
/*
* Update @tmr::sigqueue_seq for posix timer signals with sighand
* locked to prevent a race against dequeue_signal().
*/
tmr->it_sigqueue_seq = tmr->it_signal_seq;
/*
* Set the signal delivery status under sighand lock, so that the
* ignored signal handling can distinguish between a periodic and a
* non-periodic timer.
*/
tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
if (!prepare_signal(sig, t, false)) {
result = TRACE_SIGNAL_IGNORED;
if (!list_empty(&q->list)) {
/*
* The signal was ignored and blocked. The timer
* expiry queued it because blocked signals are
* queued independent of the ignored state.
*
* The unblocking set SIGPENDING, but the signal
* was not yet dequeued from the pending list.
* So prepare_signal() sees unblocked and ignored,
* which ends up here. Leave it queued like a
* regular signal.
*
* The same happens when the task group is exiting
* and the signal is already queued.
* prepare_signal() treats SIGNAL_GROUP_EXIT as
* ignored independent of its queued state. This
* gets cleaned up in __exit_signal().
*/
goto out;
}
/* Periodic timers with SIG_IGN are queued on the ignored list */
if (tmr->it_sig_periodic) {
/*
* Already queued means the timer was rearmed after
* the previous expiry got it on the ignore list.
* Nothing to do for that case.
*/
if (hlist_unhashed(&tmr->ignored_list)) {
/*
* Take a signal reference and queue it on
* the ignored list.
*/
posixtimer_sigqueue_getref(q);
posixtimer_sig_ignore(t, q);
}
} else if (!hlist_unhashed(&tmr->ignored_list)) {
/*
* Covers the case where a timer was periodic and
* then the signal was ignored. Later it was rearmed
* as oneshot timer. The previous signal is invalid
* now, and this oneshot signal has to be dropped.
* Remove it from the ignored list and drop the
* reference count as the signal is not longer
* queued.
*/
hlist_del_init(&tmr->ignored_list);
posixtimer_putref(tmr);
}
goto out;
}
if (unlikely(!list_empty(&q->list))) {
/* This holds a reference count already */
result = TRACE_SIGNAL_ALREADY_PENDING;
goto out;
}
/*
* If the signal is on the ignore list, it got blocked after it was
* ignored earlier. But nothing lifted the ignore. Move it back to
* the pending list to be consistent with the regular signal
* handling. This already holds a reference count.
*
* If it's not on the ignore list acquire a reference count.
*/
if (likely(hlist_unhashed(&tmr->ignored_list)))
posixtimer_sigqueue_getref(q);
else
hlist_del_init(&tmr->ignored_list);
posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
result = TRACE_SIGNAL_DELIVERED;
out:
trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
unlock_task_sighand(t, &flags);
}
static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
{
struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
/*
* If the timer is marked deleted already or the signal originates
* from a non-periodic timer, then just drop the reference
* count. Otherwise queue it on the ignored list.
*/
if (posixtimer_valid(tmr) && tmr->it_sig_periodic)
hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
else
posixtimer_putref(tmr);
}
static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
{
struct hlist_head *head = &tsk->signal->ignored_posix_timers;
struct hlist_node *tmp;
struct k_itimer *tmr;
if (likely(hlist_empty(head)))
return;
/*
* Rearming a timer with sighand lock held is not possible due to
* lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
* let the signal delivery path deal with it whether it needs to be
* rearmed or not. This cannot be decided here w/o dropping sighand
* lock and creating a loop retry horror show.
*/
hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
struct task_struct *target;
/*
* tmr::sigq.info.si_signo is immutable, so accessing it
* without holding tmr::it_lock is safe.
*/
if (tmr->sigq.info.si_signo != sig)
continue;
hlist_del_init(&tmr->ignored_list);
/* This should never happen and leaks a reference count */
if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
continue;
/*
* Get the target for the signal. If target is a thread and
* has exited by now, drop the reference count.
*/
guard(rcu)();
target = posixtimer_get_target(tmr);
if (target)
posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
else
posixtimer_putref(tmr);
}
}
#else /* CONFIG_POSIX_TIMERS */
static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
#endif /* !CONFIG_POSIX_TIMERS */
void do_notify_pidfd(struct task_struct *task)
{
struct pid *pid = task_pid(task);
WARN_ON(task->exit_state == 0);
__wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
poll_to_key(EPOLLIN | EPOLLRDNORM));
}
/*
* Let a parent know about the death of a child.
* For a stopped/continued status change, use do_notify_parent_cldstop instead.
*
* Returns true if our parent ignored us and so we've switched to
* self-reaping.
*/
bool do_notify_parent(struct task_struct *tsk, int sig)
{
struct kernel_siginfo info;
unsigned long flags;
struct sighand_struct *psig;
bool autoreap = false;
u64 utime, stime;
WARN_ON_ONCE(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
WARN_ON_ONCE(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
/* ptraced, or group-leader without sub-threads */
do_notify_pidfd(tsk);
if (sig != SIGCHLD) {
/*
* This is only possible if parent == real_parent.
* Check if it has changed security domain.
*/
if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
sig = SIGCHLD;
}
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
/*
* We are under tasklist_lock here so our parent is tied to
* us and cannot change.
*
* task_active_pid_ns will always return the same pid namespace
* until a task passes through release_task.
*
* write_lock() currently calls preempt_disable() which is the
* same as rcu_read_lock(), but according to Oleg, this is not
* correct to rely on this
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
task_uid(tsk));
rcu_read_unlock();
task_cputime(tsk, &utime, &stime);
info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
info.si_code = CLD_DUMPED;
else if (tsk->exit_code & 0x7f)
info.si_code = CLD_KILLED;
else {
info.si_code = CLD_EXITED;
info.si_status = tsk->exit_code >> 8;
}
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags);
if (!tsk->ptrace && sig == SIGCHLD &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
/*
* We are exiting and our parent doesn't care. POSIX.1
* defines special semantics for setting SIGCHLD to SIG_IGN
* or setting the SA_NOCLDWAIT flag: we should be reaped
* automatically and not left for our parent's wait4 call.
* Rather than having the parent do it as a magic kind of
* signal handler, we just set this to tell do_exit that we
* can be cleaned up without becoming a zombie. Note that
* we still call __wake_up_parent in this case, because a
* blocked sys_wait4 might now return -ECHILD.
*
* Whether we send SIGCHLD or not for SA_NOCLDWAIT
* is implementation-defined: we do (if you don't want
* it, just use SIG_IGN instead).
*/
autoreap = true;
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
sig = 0;
}
/*
* Send with __send_signal as si_pid and si_uid are in the
* parent's namespaces.
*/
if (valid_signal(sig) && sig)
__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
return autoreap;
}
/**
* do_notify_parent_cldstop - notify parent of stopped/continued state change
* @tsk: task reporting the state change
* @for_ptracer: the notification is for ptracer
* @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
*
* Notify @tsk's parent that the stopped/continued state has changed. If
* @for_ptracer is %false, @tsk's group leader notifies to its real parent.
* If %true, @tsk reports to @tsk->parent which should be the ptracer.
*
* CONTEXT:
* Must be called with tasklist_lock at least read locked.
*/
static void do_notify_parent_cldstop(struct task_struct *tsk,
bool for_ptracer, int why)
{
struct kernel_siginfo info;
unsigned long flags;
struct task_struct *parent;
struct sighand_struct *sighand;
u64 utime, stime;
if (for_ptracer) {
parent = tsk->parent;
} else {
tsk = tsk->group_leader;
parent = tsk->real_parent;
}
clear_siginfo(&info);
info.si_signo = SIGCHLD;
info.si_errno = 0;
/*
* see comment in do_notify_parent() about the following 4 lines
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
rcu_read_unlock();
task_cputime(tsk, &utime, &stime);
info.si_utime = nsec_to_clock_t(utime);
info.si_stime = nsec_to_clock_t(stime);
info.si_code = why;
switch (why) {
case CLD_CONTINUED:
info.si_status = SIGCONT;
break;
case CLD_STOPPED:
info.si_status = tsk->signal->group_exit_code & 0x7f;
break;
case CLD_TRAPPED:
info.si_status = tsk->exit_code & 0x7f;
break;
default:
BUG();
}
sighand = parent->sighand;
spin_lock_irqsave(&sighand->siglock, flags);
if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
!(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
/*
* Even if SIGCHLD is not generated, we must wake up wait4 calls.
*/
__wake_up_parent(tsk, parent);
spin_unlock_irqrestore(&sighand->siglock, flags);
}
/*
* This must be called with current->sighand->siglock held.
*
* This should be the path for all ptrace stops.
* We always set current->last_siginfo while stopped here.
* That makes it a way to test a stopped process for
* being ptrace-stopped vs being job-control-stopped.
*
* Returns the signal the ptracer requested the code resume
* with. If the code did not stop because the tracer is gone,
* the stop signal remains unchanged unless clear_code.
*/
static int ptrace_stop(int exit_code, int why, unsigned long message,
kernel_siginfo_t *info)
__releases(¤t->sighand->siglock)
__acquires(¤t->sighand->siglock)
{
bool gstop_done = false;
if (arch_ptrace_stop_needed()) {
/*
* The arch code has something special to do before a
* ptrace stop. This is allowed to block, e.g. for faults
* on user stack pages. We can't keep the siglock while
* calling arch_ptrace_stop, so we must release it now.
* To preserve proper semantics, we must do this before
* any signal bookkeeping like checking group_stop_count.
*/
spin_unlock_irq(¤t->sighand->siglock);
arch_ptrace_stop();
spin_lock_irq(¤t->sighand->siglock);
}
/*
* After this point ptrace_signal_wake_up or signal_wake_up
* will clear TASK_TRACED if ptrace_unlink happens or a fatal
* signal comes in. Handle previous ptrace_unlinks and fatal
* signals here to prevent ptrace_stop sleeping in schedule.
*/
if (!current->ptrace || __fatal_signal_pending(current))
return exit_code;
set_special_state(TASK_TRACED);
current->jobctl |= JOBCTL_TRACED;
/*
* We're committing to trapping. TRACED should be visible before
* TRAPPING is cleared; otherwise, the tracer might fail do_wait().
* Also, transition to TRACED and updates to ->jobctl should be
* atomic with respect to siglock and should be done after the arch
* hook as siglock is released and regrabbed across it.
*
* TRACER TRACEE
*
* ptrace_attach()
* [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
* do_wait()
* set_current_state() smp_wmb();
* ptrace_do_wait()
* wait_task_stopped()
* task_stopped_code()
* [L] task_is_traced() [S] task_clear_jobctl_trapping();
*/
smp_wmb();
current->ptrace_message = message;
current->last_siginfo = info;
current->exit_code = exit_code;
/*
* If @why is CLD_STOPPED, we're trapping to participate in a group
* stop. Do the bookkeeping. Note that if SIGCONT was delievered
* across siglock relocks since INTERRUPT was scheduled, PENDING
* could be clear now. We act as if SIGCONT is received after
* TASK_TRACED is entered - ignore it.
*/
if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
gstop_done = task_participate_group_stop(current);
/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
/* entering a trap, clear TRAPPING */
task_clear_jobctl_trapping(current);
spin_unlock_irq(¤t->sighand->siglock);
read_lock(&tasklist_lock);
/*
* Notify parents of the stop.
*
* While ptraced, there are two parents - the ptracer and
* the real_parent of the group_leader. The ptracer should
* know about every stop while the real parent is only
* interested in the completion of group stop. The states
* for the two don't interact with each other. Notify
* separately unless they're gonna be duplicates.
*/
if (current->ptrace)
do_notify_parent_cldstop(current, true, why);
if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
do_notify_parent_cldstop(current, false, why);
/*
* The previous do_notify_parent_cldstop() invocation woke ptracer.
* One a PREEMPTION kernel this can result in preemption requirement
* which will be fulfilled after read_unlock() and the ptracer will be
* put on the CPU.
* The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
* this task wait in schedule(). If this task gets preempted then it
* remains enqueued on the runqueue. The ptracer will observe this and
* then sleep for a delay of one HZ tick. In the meantime this task
* gets scheduled, enters schedule() and will wait for the ptracer.
*
* This preemption point is not bad from a correctness point of
* view but extends the runtime by one HZ tick time due to the
* ptracer's sleep. The preempt-disable section ensures that there
* will be no preemption between unlock and schedule() and so
* improving the performance since the ptracer will observe that
* the tracee is scheduled out once it gets on the CPU.
*
* On PREEMPT_RT locking tasklist_lock does not disable preemption.
* Therefore the task can be preempted after do_notify_parent_cldstop()
* before unlocking tasklist_lock so there is no benefit in doing this.
*
* In fact disabling preemption is harmful on PREEMPT_RT because
* the spinlock_t in cgroup_enter_frozen() must not be acquired
* with preemption disabled due to the 'sleeping' spinlock
* substitution of RT.
*/
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
read_unlock(&tasklist_lock);
cgroup_enter_frozen();
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable_no_resched();
schedule();
cgroup_leave_frozen(true);
/*
* We are back. Now reacquire the siglock before touching
* last_siginfo, so that we are sure to have synchronized with
* any signal-sending on another CPU that wants to examine it.
*/
spin_lock_irq(¤t->sighand->siglock);
exit_code = current->exit_code;
current->last_siginfo = NULL;
current->ptrace_message = 0;
current->exit_code = 0;
/* LISTENING can be set only during STOP traps, clear it */
current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
/*
* Queued signals ignored us while we were stopped for tracing.
* So check for any that we should take before resuming user mode.
* This sets TIF_SIGPENDING, but never clears it.
*/
recalc_sigpending_tsk(current);
return exit_code;
}
static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
{
kernel_siginfo_t info;
clear_siginfo(&info);
info.si_signo = signr;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
/* Let the debugger run. */
return ptrace_stop(exit_code, why, message, &info);
}
int ptrace_notify(int exit_code, unsigned long message)
{
int signr;
BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
if (unlikely(task_work_pending(current)))
task_work_run();
spin_lock_irq(¤t->sighand->siglock);
signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
spin_unlock_irq(¤t->sighand->siglock);
return signr;
}
/**
* do_signal_stop - handle group stop for SIGSTOP and other stop signals
* @signr: signr causing group stop if initiating
*
* If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
* and participate in it. If already set, participate in the existing
* group stop. If participated in a group stop (and thus slept), %true is
* returned with siglock released.
*
* If ptraced, this function doesn't handle stop itself. Instead,
* %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
* untouched. The caller must ensure that INTERRUPT trap handling takes
* places afterwards.
*
* CONTEXT:
* Must be called with @current->sighand->siglock held, which is released
* on %true return.
*
* RETURNS:
* %false if group stop is already cancelled or ptrace trap is scheduled.
* %true if participated in group stop.
*/
static bool do_signal_stop(int signr)
__releases(¤t->sighand->siglock)
{
struct signal_struct *sig = current->signal;
if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
struct task_struct *t;
/* signr will be recorded in task->jobctl for retries */
WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
unlikely(sig->group_exec_task))
return false;
/*
* There is no group stop already in progress. We must
* initiate one now.
*
* While ptraced, a task may be resumed while group stop is
* still in effect and then receive a stop signal and
* initiate another group stop. This deviates from the
* usual behavior as two consecutive stop signals can't
* cause two group stops when !ptraced. That is why we
* also check !task_is_stopped(t) below.
*
* The condition can be distinguished by testing whether
* SIGNAL_STOP_STOPPED is already set. Don't generate
* group_exit_code in such case.
*
* This is not necessary for SIGNAL_STOP_CONTINUED because
* an intervening stop signal is required to cause two
* continued events regardless of ptrace.
*/
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
sig->group_stop_count = 0;
if (task_set_jobctl_pending(current, signr | gstop))
sig->group_stop_count++;
for_other_threads(current, t) {
/*
* Setting state to TASK_STOPPED for a group
* stop is always done with the siglock held,
* so this check has no races.
*/
if (!task_is_stopped(t) &&
task_set_jobctl_pending(t, signr | gstop)) {
sig->group_stop_count++;
if (likely(!(t->ptrace & PT_SEIZED)))
signal_wake_up(t, 0);
else
ptrace_trap_notify(t);
}
}
}
if (likely(!current->ptrace)) {
int notify = 0;
/*
* If there are no other threads in the group, or if there
* is a group stop in progress and we are the last to stop,
* report to the parent.
*/
if (task_participate_group_stop(current))
notify = CLD_STOPPED;
current->jobctl |= JOBCTL_STOPPED;
set_special_state(TASK_STOPPED);
spin_unlock_irq(¤t->sighand->siglock);
/*
* Notify the parent of the group stop completion. Because
* we're not holding either the siglock or tasklist_lock
* here, ptracer may attach inbetween; however, this is for
* group stop and should always be delivered to the real
* parent of the group leader. The new ptracer will get
* its notification when this task transitions into
* TASK_TRACED.
*/
if (notify) {
read_lock(&tasklist_lock);
do_notify_parent_cldstop(current, false, notify);
read_unlock(&tasklist_lock);
}
/* Now we don't run again until woken by SIGCONT or SIGKILL */
cgroup_enter_frozen();
schedule();
return true;
} else {
/*
* While ptraced, group stop is handled by STOP trap.
* Schedule it and let the caller deal with it.
*/
task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
return false;
}
}
/**
* do_jobctl_trap - take care of ptrace jobctl traps
*
* When PT_SEIZED, it's used for both group stop and explicit
* SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
* accompanying siginfo. If stopped, lower eight bits of exit_code contain
* the stop signal; otherwise, %SIGTRAP.
*
* When !PT_SEIZED, it's used only for group stop trap with stop signal
* number as exit_code and no siginfo.
*
* CONTEXT:
* Must be called with @current->sighand->siglock held, which may be
* released and re-acquired before returning with intervening sleep.
*/
static void do_jobctl_trap(void)
{
struct signal_struct *signal = current->signal;
int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
if (current->ptrace & PT_SEIZED) {
if (!signal->group_stop_count &&
!(signal->flags & SIGNAL_STOP_STOPPED))
signr = SIGTRAP;
WARN_ON_ONCE(!signr);
ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
CLD_STOPPED, 0);
} else {
WARN_ON_ONCE(!signr);
ptrace_stop(signr, CLD_STOPPED, 0, NULL);
}
}
/**
* do_freezer_trap - handle the freezer jobctl trap
*
* Puts the task into frozen state, if only the task is not about to quit.
* In this case it drops JOBCTL_TRAP_FREEZE.
*
* CONTEXT:
* Must be called with @current->sighand->siglock held,
* which is always released before returning.
*/
static void do_freezer_trap(void)
__releases(¤t->sighand->siglock)
{
/*
* If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
* let's make another loop to give it a chance to be handled.
* In any case, we'll return back.
*/
if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
JOBCTL_TRAP_FREEZE) {
spin_unlock_irq(¤t->sighand->siglock);
return;
}
/*
* Now we're sure that there is no pending fatal signal and no
* pending traps. Clear TIF_SIGPENDING to not get out of schedule()
* immediately (if there is a non-fatal signal pending), and
* put the task into sleep.
*/
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
clear_thread_flag(TIF_SIGPENDING);
spin_unlock_irq(¤t->sighand->siglock);
cgroup_enter_frozen();
schedule();
/*
* We could've been woken by task_work, run it to clear
* TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
*/
clear_notify_signal();
if (unlikely(task_work_pending(current)))
task_work_run();
}
static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
{
/*
* We do not check sig_kernel_stop(signr) but set this marker
* unconditionally because we do not know whether debugger will
* change signr. This flag has no meaning unless we are going
* to stop after return from ptrace_stop(). In this case it will
* be checked in do_signal_stop(), we should only stop if it was
* not cleared by SIGCONT while we were sleeping. See also the
* comment in dequeue_signal().
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
/* We're back. Did the debugger cancel the sig? */
if (signr == 0)
return signr;
/*
* Update the siginfo structure if the signal has
* changed. If the debugger wanted something
* specific in the siginfo structure then it should
* have updated *info via PTRACE_SETSIGINFO.
*/
if (signr != info->si_signo) {
clear_siginfo(info);
info->si_signo = signr;
info->si_errno = 0;
info->si_code = SI_USER;
rcu_read_lock();
info->si_pid = task_pid_vnr(current->parent);
info->si_uid = from_kuid_munged(current_user_ns(),
task_uid(current->parent));
rcu_read_unlock();
}
/* If the (new) signal is now blocked, requeue it. */
if (sigismember(¤t->blocked, signr) ||
fatal_signal_pending(current)) {
send_signal_locked(signr, info, current, type);
signr = 0;
}
return signr;
}
static void hide_si_addr_tag_bits(struct ksignal *ksig)
{
switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
case SIL_FAULT:
case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
case SIL_FAULT_PERF_EVENT:
ksig->info.si_addr = arch_untagged_si_addr(
ksig->info.si_addr, ksig->sig, ksig->info.si_code);
break;
case SIL_KILL:
case SIL_TIMER:
case SIL_POLL:
case SIL_CHLD:
case SIL_RT:
case SIL_SYS:
break;
}
}
bool get_signal(struct ksignal *ksig)
{
struct sighand_struct *sighand = current->sighand;
struct signal_struct *signal = current->signal;
int signr;
clear_notify_signal();
if (unlikely(task_work_pending(current)))
task_work_run();
if (!task_sigpending(current))
return false;
if (unlikely(uprobe_deny_signal()))
return false;
/*
* Do this once, we can't return to user-mode if freezing() == T.
* do_signal_stop() and ptrace_stop() do freezable_schedule() and
* thus do not need another check after return.
*/
try_to_freeze();
relock:
spin_lock_irq(&sighand->siglock);
/*
* Every stopped thread goes here after wakeup. Check to see if
* we should notify the parent, prepare_signal(SIGCONT) encodes
* the CLD_ si_code into SIGNAL_CLD_MASK bits.
*/
if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
int why;
if (signal->flags & SIGNAL_CLD_CONTINUED)
why = CLD_CONTINUED;
else
why = CLD_STOPPED;
signal->flags &= ~SIGNAL_CLD_MASK;
spin_unlock_irq(&sighand->siglock);
/*
* Notify the parent that we're continuing. This event is
* always per-process and doesn't make whole lot of sense
* for ptracers, who shouldn't consume the state via
* wait(2) either, but, for backward compatibility, notify
* the ptracer of the group leader too unless it's gonna be
* a duplicate.
*/
read_lock(&tasklist_lock);
do_notify_parent_cldstop(current, false, why);
if (ptrace_reparented(current->group_leader))
do_notify_parent_cldstop(current->group_leader,
true, why);
read_unlock(&tasklist_lock);
goto relock;
}
for (;;) {
struct k_sigaction *ka;
enum pid_type type;
/* Has this task already been marked for death? */
if ((signal->flags & SIGNAL_GROUP_EXIT) ||
signal->group_exec_task) {
signr = SIGKILL;
sigdelset(¤t->pending.signal, SIGKILL);
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
&sighand->action[SIGKILL-1]);
recalc_sigpending();
/*
* implies do_group_exit() or return to PF_USER_WORKER,
* no need to initialize ksig->info/etc.
*/
goto fatal;
}
if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
do_signal_stop(0))
goto relock;
if (unlikely(current->jobctl &
(JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
if (current->jobctl & JOBCTL_TRAP_MASK) {
do_jobctl_trap();
spin_unlock_irq(&sighand->siglock);
} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
do_freezer_trap();
goto relock;
}
/*
* If the task is leaving the frozen state, let's update
* cgroup counters and reset the frozen bit.
*/
if (unlikely(cgroup_task_frozen(current))) {
spin_unlock_irq(&sighand->siglock);
cgroup_leave_frozen(false);
goto relock;
}
/*
* Signals generated by the execution of an instruction
* need to be delivered before any other pending signals
* so that the instruction pointer in the signal stack
* frame points to the faulting instruction.
*/
type = PIDTYPE_PID;
signr = dequeue_synchronous_signal(&ksig->info);
if (!signr)
signr = dequeue_signal(¤t->blocked, &ksig->info, &type);
if (!signr)
break; /* will return 0 */
if (unlikely(current->ptrace) && (signr != SIGKILL) &&
!(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
signr = ptrace_signal(signr, &ksig->info, type);
if (!signr)
continue;
}
ka = &sighand->action[signr-1];
/* Trace actually delivered signals. */
trace_signal_deliver(signr, &ksig->info, ka);
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
continue;
if (ka->sa.sa_handler != SIG_DFL) {
/* Run the handler. */
ksig->ka = *ka;
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
break; /* will return non-zero "signr" value */
}
/*
* Now we are doing the default action for this signal.
*/
if (sig_kernel_ignore(signr)) /* Default is nothing. */
continue;
/*
* Global init gets no signals it doesn't want.
* Container-init gets no signals it doesn't want from same
* container.
*
* Note that if global/container-init sees a sig_kernel_only()
* signal here, the signal must have been generated internally
* or must have come from an ancestor namespace. In either
* case, the signal cannot be dropped.
*/
if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
!sig_kernel_only(signr))
continue;
if (sig_kernel_stop(signr)) {
/*
* The default action is to stop all threads in
* the thread group. The job control signals
* do nothing in an orphaned pgrp, but SIGSTOP
* always works. Note that siglock needs to be
* dropped during the call to is_orphaned_pgrp()
* because of lock ordering with tasklist_lock.
* This allows an intervening SIGCONT to be posted.
* We need to check for that and bail out if necessary.
*/
if (signr != SIGSTOP) {
spin_unlock_irq(&sighand->siglock);
/* signals can be posted during this window */
if (is_current_pgrp_orphaned())
goto relock;
spin_lock_irq(&sighand->siglock);
}
if (likely(do_signal_stop(signr))) {
/* It released the siglock. */
goto relock;
}
/*
* We didn't actually stop, due to a race
* with SIGCONT or something like that.
*/
continue;
}
fatal:
spin_unlock_irq(&sighand->siglock);
if (unlikely(cgroup_task_frozen(current)))
cgroup_leave_frozen(true);
/*
* Anything else is fatal, maybe with a core dump.
*/
current->flags |= PF_SIGNALED;
if (sig_kernel_coredump(signr)) {
if (print_fatal_signals)
print_fatal_signal(signr);
proc_coredump_connector(current);
/*
* If it was able to dump core, this kills all
* other threads in the group and synchronizes with
* their demise. If we lost the race with another
* thread getting here, it set group_exit_code
* first and our do_group_exit call below will use
* that value and ignore the one we pass it.
*/
vfs_coredump(&ksig->info);
}
/*
* PF_USER_WORKER threads will catch and exit on fatal signals
* themselves. They have cleanup that must be performed, so we
* cannot call do_exit() on their behalf. Note that ksig won't
* be properly initialized, PF_USER_WORKER's shouldn't use it.
*/
if (current->flags & PF_USER_WORKER)
goto out;
/*
* Death signals, no core dump.
*/
do_group_exit(signr);
/* NOTREACHED */
}
spin_unlock_irq(&sighand->siglock);
ksig->sig = signr;
if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
hide_si_addr_tag_bits(ksig);
out:
return signr > 0;
}
/**
* signal_delivered - called after signal delivery to update blocked signals
* @ksig: kernel signal struct
* @stepping: nonzero if debugger single-step or block-step in use
*
* This function should be called when a signal has successfully been
* delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
* is always blocked), and the signal itself is blocked unless %SA_NODEFER
* is set in @ksig->ka.sa.sa_flags. Tracing is notified.
*/
static void signal_delivered(struct ksignal *ksig, int stepping)
{
sigset_t blocked;
/* A signal was successfully delivered, and the
saved sigmask was stored on the signal frame,
and will be restored by sigreturn. So we can
simply clear the restore sigmask flag. */
clear_restore_sigmask();
sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
sigaddset(&blocked, ksig->sig);
set_current_blocked(&blocked);
if (current->sas_ss_flags & SS_AUTODISARM)
sas_ss_reset(current);
if (stepping)
ptrace_notify(SIGTRAP, 0);
}
void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
{
if (failed)
force_sigsegv(ksig->sig);
else
signal_delivered(ksig, stepping);
}
/*
* It could be that complete_signal() picked us to notify about the
* group-wide signal. Other threads should be notified now to take
* the shared signals in @which since we will not.
*/
static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
{
sigset_t retarget;
struct task_struct *t;
sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
if (sigisemptyset(&retarget))
return;
for_other_threads(tsk, t) {
if (t->flags & PF_EXITING)
continue;
if (!has_pending_signals(&retarget, &t->blocked))
continue;
/* Remove the signals this thread can handle. */
sigandsets(&retarget, &retarget, &t->blocked);
if (!task_sigpending(t))
signal_wake_up(t, 0);
if (sigisemptyset(&retarget))
break;
}
}
void exit_signals(struct task_struct *tsk)
{
int group_stop = 0;
sigset_t unblocked;
/*
* @tsk is about to have PF_EXITING set - lock out users which
* expect stable threadgroup.
*/
cgroup_threadgroup_change_begin(tsk);
if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk);
return;
}
spin_lock_irq(&tsk->sighand->siglock);
/*
* From now this task is not visible for group-wide signals,
* see wants_signal(), do_signal_stop().
*/
sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk);
if (!task_sigpending(tsk))
goto out;
unblocked = tsk->blocked;
signotset(&unblocked);
retarget_shared_pending(tsk, &unblocked);
if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
task_participate_group_stop(tsk))
group_stop = CLD_STOPPED;
out:
spin_unlock_irq(&tsk->sighand->siglock);
/*
* If group stop has completed, deliver the notification. This
* should always go to the real parent of the group leader.
*/
if (unlikely(group_stop)) {
read_lock(&tasklist_lock);
do_notify_parent_cldstop(tsk, false, group_stop);
read_unlock(&tasklist_lock);
}
}
/*
* System call entry points.
*/
/**
* sys_restart_syscall - restart a system call
*/
SYSCALL_DEFINE0(restart_syscall)
{
struct restart_block *restart = ¤t->restart_block;
return restart->fn(restart);
}
long do_no_restart_syscall(struct restart_block *param)
{
return -EINTR;
}
static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
{
if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
sigset_t newblocked;
/* A set of now blocked but previously unblocked signals. */
sigandnsets(&newblocked, newset, ¤t->blocked);
retarget_shared_pending(tsk, &newblocked);
}
tsk->blocked = *newset;
recalc_sigpending();
}
/**
* set_current_blocked - change current->blocked mask
* @newset: new mask
*
* It is wrong to change ->blocked directly, this helper should be used
* to ensure the process can't miss a shared signal we are going to block.
*/
void set_current_blocked(sigset_t *newset)
{
sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
__set_current_blocked(newset);
}
void __set_current_blocked(const sigset_t *newset)
{
struct task_struct *tsk = current;
/*
* In case the signal mask hasn't changed, there is nothing we need
* to do. The current->blocked shouldn't be modified by other task.
*/
if (sigequalsets(&tsk->blocked, newset))
return;
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, newset);
spin_unlock_irq(&tsk->sighand->siglock);
}
/*
* This is also useful for kernel threads that want to temporarily
* (or permanently) block certain signals.
*
* NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
* interface happily blocks "unblockable" signals like SIGKILL
* and friends.
*/
int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
{
struct task_struct *tsk = current;
sigset_t newset;
/* Lockless, only current can change ->blocked, never from irq */
if (oldset)
*oldset = tsk->blocked;
switch (how) {
case SIG_BLOCK:
sigorsets(&newset, &tsk->blocked, set);
break;
case SIG_UNBLOCK:
sigandnsets(&newset, &tsk->blocked, set);
break;
case SIG_SETMASK:
newset = *set;
break;
default:
return -EINVAL;
}
__set_current_blocked(&newset);
return 0;
}
EXPORT_SYMBOL(sigprocmask);
/*
* The api helps set app-provided sigmasks.
*
* This is useful for syscalls such as ppoll, pselect, io_pgetevents and
* epoll_pwait where a new sigmask is passed from userland for the syscalls.
*
* Note that it does set_restore_sigmask() in advance, so it must be always
* paired with restore_saved_sigmask_unless() before return from syscall.
*/
int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
{
sigset_t kmask;
if (!umask)
return 0;
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
return -EFAULT;
set_restore_sigmask();
current->saved_sigmask = current->blocked;
set_current_blocked(&kmask);
return 0;
}
#ifdef CONFIG_COMPAT
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize)
{
sigset_t kmask;
if (!umask)
return 0;
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (get_compat_sigset(&kmask, umask))
return -EFAULT;
set_restore_sigmask();
current->saved_sigmask = current->blocked;
set_current_blocked(&kmask);
return 0;
}
#endif
/**
* sys_rt_sigprocmask - change the list of currently blocked signals
* @how: whether to add, remove, or set signals
* @nset: stores pending signals
* @oset: previous value of signal mask if non-null
* @sigsetsize: size of sigset_t type
*/
SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
sigset_t __user *, oset, size_t, sigsetsize)
{
sigset_t old_set, new_set;
int error;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
old_set = current->blocked;
if (nset) {
if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
return -EFAULT;
sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
error = sigprocmask(how, &new_set, NULL);
if (error)
return error;
}
if (oset) {
if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
return -EFAULT;
}
return 0;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
{
sigset_t old_set = current->blocked;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (nset) {
sigset_t new_set;
int error;
if (get_compat_sigset(&new_set, nset))
return -EFAULT;
sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
error = sigprocmask(how, &new_set, NULL);
if (error)
return error;
}
return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
}
#endif
static void do_sigpending(sigset_t *set)
{
spin_lock_irq(¤t->sighand->siglock);
sigorsets(set, ¤t->pending.signal,
¤t->signal->shared_pending.signal);
spin_unlock_irq(¤t->sighand->siglock);
/* Outside the lock because only this thread touches it. */
sigandsets(set, ¤t->blocked, set);
}
/**
* sys_rt_sigpending - examine a pending signal that has been raised
* while blocked
* @uset: stores pending signals
* @sigsetsize: size of sigset_t type or larger
*/
SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
{
sigset_t set;
if (sigsetsize > sizeof(*uset))
return -EINVAL;
do_sigpending(&set);
if (copy_to_user(uset, &set, sigsetsize))
return -EFAULT;
return 0;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
compat_size_t, sigsetsize)
{
sigset_t set;
if (sigsetsize > sizeof(*uset))
return -EINVAL;
do_sigpending(&set);
return put_compat_sigset(uset, &set, sigsetsize);
}
#endif
static const struct {
unsigned char limit, layout;
} sig_sicodes[] = {
[SIGILL] = { NSIGILL, SIL_FAULT },
[SIGFPE] = { NSIGFPE, SIL_FAULT },
[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
[SIGBUS] = { NSIGBUS, SIL_FAULT },
[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
#if defined(SIGEMT)
[SIGEMT] = { NSIGEMT, SIL_FAULT },
#endif
[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
[SIGPOLL] = { NSIGPOLL, SIL_POLL },
[SIGSYS] = { NSIGSYS, SIL_SYS },
};
static bool known_siginfo_layout(unsigned sig, int si_code)
{
if (si_code == SI_KERNEL)
return true;
else if ((si_code > SI_USER)) {
if (sig_specific_sicodes(sig)) {
if (si_code <= sig_sicodes[sig].limit)
return true;
}
else if (si_code <= NSIGPOLL)
return true;
}
else if (si_code >= SI_DETHREAD)
return true;
else if (si_code == SI_ASYNCNL)
return true;
return false;
}
enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
{
enum siginfo_layout layout = SIL_KILL;
if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
if ((sig < ARRAY_SIZE(sig_sicodes)) &&
(si_code <= sig_sicodes[sig].limit)) {
layout = sig_sicodes[sig].layout;
/* Handle the exceptions */
if ((sig == SIGBUS) &&
(si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
layout = SIL_FAULT_MCEERR;
else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
layout = SIL_FAULT_BNDERR;
#ifdef SEGV_PKUERR
else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
layout = SIL_FAULT_PKUERR;
#endif
else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
layout = SIL_FAULT_PERF_EVENT;
else if (IS_ENABLED(CONFIG_SPARC) &&
(sig == SIGILL) && (si_code == ILL_ILLTRP))
layout = SIL_FAULT_TRAPNO;
else if (IS_ENABLED(CONFIG_ALPHA) &&
((sig == SIGFPE) ||
((sig == SIGTRAP) && (si_code == TRAP_UNK))))
layout = SIL_FAULT_TRAPNO;
}
else if (si_code <= NSIGPOLL)
layout = SIL_POLL;
} else {
if (si_code == SI_TIMER)
layout = SIL_TIMER;
else if (si_code == SI_SIGIO)
layout = SIL_POLL;
else if (si_code < 0)
layout = SIL_RT;
}
return layout;
}
static inline char __user *si_expansion(const siginfo_t __user *info)
{
return ((char __user *)info) + sizeof(struct kernel_siginfo);
}
int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
{
char __user *expansion = si_expansion(to);
if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
return -EFAULT;
if (clear_user(expansion, SI_EXPANSION_SIZE))
return -EFAULT;
return 0;
}
static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
const siginfo_t __user *from)
{
if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
char __user *expansion = si_expansion(from);
char buf[SI_EXPANSION_SIZE];
int i;
/*
* An unknown si_code might need more than
* sizeof(struct kernel_siginfo) bytes. Verify all of the
* extra bytes are 0. This guarantees copy_siginfo_to_user
* will return this data to userspace exactly.
*/
if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
return -EFAULT;
for (i = 0; i < SI_EXPANSION_SIZE; i++) {
if (buf[i] != 0)
return -E2BIG;
}
}
return 0;
}
static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
const siginfo_t __user *from)
{
if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
return -EFAULT;
to->si_signo = signo;
return post_copy_siginfo_from_user(to, from);
}
int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
{
if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
return -EFAULT;
return post_copy_siginfo_from_user(to, from);
}
#ifdef CONFIG_COMPAT
/**
* copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
* @to: compat siginfo destination
* @from: kernel siginfo source
*
* Note: This function does not work properly for the SIGCHLD on x32, but
* fortunately it doesn't have to. The only valid callers for this function are
* copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
* The latter does not care because SIGCHLD will never cause a coredump.
*/
void copy_siginfo_to_external32(struct compat_siginfo *to,
const struct kernel_siginfo *from)
{
memset(to, 0, sizeof(*to));
to->si_signo = from->si_signo;
to->si_errno = from->si_errno;
to->si_code = from->si_code;
switch(siginfo_layout(from->si_signo, from->si_code)) {
case SIL_KILL:
to->si_pid = from->si_pid;
to->si_uid = from->si_uid;
break;
case SIL_TIMER:
to->si_tid = from->si_tid;
to->si_overrun = from->si_overrun;
to->si_int = from->si_int;
break;
case SIL_POLL:
to->si_band = from->si_band;
to->si_fd = from->si_fd;
break;
case SIL_FAULT:
to->si_addr = ptr_to_compat(from->si_addr);
break;
case SIL_FAULT_TRAPNO:
to->si_addr = ptr_to_compat(from->si_addr);
to->si_trapno = from->si_trapno;
break;
case SIL_FAULT_MCEERR:
to->si_addr = ptr_to_compat(from->si_addr);
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = ptr_to_compat(from->si_addr);
to->si_lower = ptr_to_compat(from->si_lower);
to->si_upper = ptr_to_compat(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = ptr_to_compat(from->si_addr);
to->si_pkey = from->si_pkey;
break;
case SIL_FAULT_PERF_EVENT:
to->si_addr = ptr_to_compat(from->si_addr);
to->si_perf_data = from->si_perf_data;
to->si_perf_type = from->si_perf_type;
to->si_perf_flags = from->si_perf_flags;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
to->si_uid = from->si_uid;
to->si_status = from->si_status;
to->si_utime = from->si_utime;
to->si_stime = from->si_stime;
break;
case SIL_RT:
to->si_pid = from->si_pid;
to->si_uid = from->si_uid;
to->si_int = from->si_int;
break;
case SIL_SYS:
to->si_call_addr = ptr_to_compat(from->si_call_addr);
to->si_syscall = from->si_syscall;
to->si_arch = from->si_arch;
break;
}
}
int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
const struct kernel_siginfo *from)
{
struct compat_siginfo new;
copy_siginfo_to_external32(&new, from);
if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
return -EFAULT;
return 0;
}
static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
const struct compat_siginfo *from)
{
clear_siginfo(to);
to->si_signo = from->si_signo;
to->si_errno = from->si_errno;
to->si_code = from->si_code;
switch(siginfo_layout(from->si_signo, from->si_code)) {
case SIL_KILL:
to->si_pid = from->si_pid;
to->si_uid = from->si_uid;
break;
case SIL_TIMER:
to->si_tid = from->si_tid;
to->si_overrun = from->si_overrun;
to->si_int = from->si_int;
break;
case SIL_POLL:
to->si_band = from->si_band;
to->si_fd = from->si_fd;
break;
case SIL_FAULT:
to->si_addr = compat_ptr(from->si_addr);
break;
case SIL_FAULT_TRAPNO:
to->si_addr = compat_ptr(from->si_addr);
to->si_trapno = from->si_trapno;
break;
case SIL_FAULT_MCEERR:
to->si_addr = compat_ptr(from->si_addr);
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = compat_ptr(from->si_addr);
to->si_lower = compat_ptr(from->si_lower);
to->si_upper = compat_ptr(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = compat_ptr(from->si_addr);
to->si_pkey = from->si_pkey;
break;
case SIL_FAULT_PERF_EVENT:
to->si_addr = compat_ptr(from->si_addr);
to->si_perf_data = from->si_perf_data;
to->si_perf_type = from->si_perf_type;
to->si_perf_flags = from->si_perf_flags;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
to->si_uid = from->si_uid;
to->si_status = from->si_status;
#ifdef CONFIG_X86_X32_ABI
if (in_x32_syscall()) {
to->si_utime = from->_sifields._sigchld_x32._utime;
to->si_stime = from->_sifields._sigchld_x32._stime;
} else
#endif
{
to->si_utime = from->si_utime;
to->si_stime = from->si_stime;
}
break;
case SIL_RT:
to->si_pid = from->si_pid;
to->si_uid = from->si_uid;
to->si_int = from->si_int;
break;
case SIL_SYS:
to->si_call_addr = compat_ptr(from->si_call_addr);
to->si_syscall = from->si_syscall;
to->si_arch = from->si_arch;
break;
}
return 0;
}
static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
const struct compat_siginfo __user *ufrom)
{
struct compat_siginfo from;
if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
return -EFAULT;
from.si_signo = signo;
return post_copy_siginfo_from_user32(to, &from);
}
int copy_siginfo_from_user32(struct kernel_siginfo *to,
const struct compat_siginfo __user *ufrom)
{
struct compat_siginfo from;
if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
return -EFAULT;
return post_copy_siginfo_from_user32(to, &from);
}
#endif /* CONFIG_COMPAT */
/**
* do_sigtimedwait - wait for queued signals specified in @which
* @which: queued signals to wait for
* @info: if non-null, the signal's siginfo is returned here
* @ts: upper bound on process time suspension
*/
static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
const struct timespec64 *ts)
{
ktime_t *to = NULL, timeout = KTIME_MAX;
struct task_struct *tsk = current;
sigset_t mask = *which;
enum pid_type type;
int sig, ret = 0;
if (ts) {
if (!timespec64_valid(ts))
return -EINVAL;
timeout = timespec64_to_ktime(*ts);
to = &timeout;
}
/*
* Invert the set of allowed signals to get those we want to block.
*/
sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
signotset(&mask);
spin_lock_irq(&tsk->sighand->siglock);
sig = dequeue_signal(&mask, info, &type);
if (!sig && timeout) {
/*
* None ready, temporarily unblock those we're interested
* while we are sleeping in so that we'll be awakened when
* they arrive. Unblocking is always fine, we can avoid
* set_current_blocked().
*/
tsk->real_blocked = tsk->blocked;
sigandsets(&tsk->blocked, &tsk->blocked, &mask);
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
HRTIMER_MODE_REL);
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
sigemptyset(&tsk->real_blocked);
sig = dequeue_signal(&mask, info, &type);
}
spin_unlock_irq(&tsk->sighand->siglock);
if (sig)
return sig;
return ret ? -EINTR : -EAGAIN;
}
/**
* sys_rt_sigtimedwait - synchronously wait for queued signals specified
* in @uthese
* @uthese: queued signals to wait for
* @uinfo: if non-null, the signal's siginfo is returned here
* @uts: upper bound on process time suspension
* @sigsetsize: size of sigset_t type
*/
SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
siginfo_t __user *, uinfo,
const struct __kernel_timespec __user *, uts,
size_t, sigsetsize)
{
sigset_t these;
struct timespec64 ts;
kernel_siginfo_t info;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&these, uthese, sizeof(these)))
return -EFAULT;
if (uts) {
if (get_timespec64(&ts, uts))
return -EFAULT;
}
ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
if (ret > 0 && uinfo) {
if (copy_siginfo_to_user(uinfo, &info))
ret = -EFAULT;
}
return ret;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
siginfo_t __user *, uinfo,
const struct old_timespec32 __user *, uts,
size_t, sigsetsize)
{
sigset_t these;
struct timespec64 ts;
kernel_siginfo_t info;
int ret;
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&these, uthese, sizeof(these)))
return -EFAULT;
if (uts) {
if (get_old_timespec32(&ts, uts))
return -EFAULT;
}
ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
if (ret > 0 && uinfo) {
if (copy_siginfo_to_user(uinfo, &info))
ret = -EFAULT;
}
return ret;
}
#endif
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
struct compat_siginfo __user *, uinfo,
struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
{
sigset_t s;
struct timespec64 t;
kernel_siginfo_t info;
long ret;
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (get_compat_sigset(&s, uthese))
return -EFAULT;
if (uts) {
if (get_timespec64(&t, uts))
return -EFAULT;
}
ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
if (ret > 0 && uinfo) {
if (copy_siginfo_to_user32(uinfo, &info))
ret = -EFAULT;
}
return ret;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
struct compat_siginfo __user *, uinfo,
struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
{
sigset_t s;
struct timespec64 t;
kernel_siginfo_t info;
long ret;
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (get_compat_sigset(&s, uthese))
return -EFAULT;
if (uts) {
if (get_old_timespec32(&t, uts))
return -EFAULT;
}
ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
if (ret > 0 && uinfo) {
if (copy_siginfo_to_user32(uinfo, &info))
ret = -EFAULT;
}
return ret;
}
#endif
#endif
static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
enum pid_type type)
{
clear_siginfo(info);
info->si_signo = sig;
info->si_errno = 0;
info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
info->si_pid = task_tgid_vnr(current);
info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
}
/**
* sys_kill - send a signal to a process
* @pid: the PID of the process
* @sig: signal to be sent
*/
SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
{
struct kernel_siginfo info;
prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
return kill_something_info(sig, &info, pid);
}
/*
* Verify that the signaler and signalee either are in the same pid namespace
* or that the signaler's pid namespace is an ancestor of the signalee's pid
* namespace.
*/
static bool access_pidfd_pidns(struct pid *pid)
{
struct pid_namespace *active = task_active_pid_ns(current);
struct pid_namespace *p = ns_of_pid(pid);
for (;;) {
if (!p)
return false;
if (p == active)
break;
p = p->parent;
}
return true;
}
static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
siginfo_t __user *info)
{
#ifdef CONFIG_COMPAT
/*
* Avoid hooking up compat syscalls and instead handle necessary
* conversions here. Note, this is a stop-gap measure and should not be
* considered a generic solution.
*/
if (in_compat_syscall())
return copy_siginfo_from_user32(
kinfo, (struct compat_siginfo __user *)info);
#endif
return copy_siginfo_from_user(kinfo, info);
}
static struct pid *pidfd_to_pid(const struct file *file)
{
struct pid *pid;
pid = pidfd_pid(file);
if (!IS_ERR(pid))
return pid;
return tgid_pidfd_to_pid(file);
}
#define PIDFD_SEND_SIGNAL_FLAGS \
(PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
PIDFD_SIGNAL_PROCESS_GROUP)
static int do_pidfd_send_signal(struct pid *pid, int sig, enum pid_type type,
siginfo_t __user *info, unsigned int flags)
{
kernel_siginfo_t kinfo;
switch (flags) {
case PIDFD_SIGNAL_THREAD:
type = PIDTYPE_PID;
break;
case PIDFD_SIGNAL_THREAD_GROUP:
type = PIDTYPE_TGID;
break;
case PIDFD_SIGNAL_PROCESS_GROUP:
type = PIDTYPE_PGID;
break;
}
if (info) {
int ret;
ret = copy_siginfo_from_user_any(&kinfo, info);
if (unlikely(ret))
return ret;
if (unlikely(sig != kinfo.si_signo))
return -EINVAL;
/* Only allow sending arbitrary signals to yourself. */
if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
(kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
return -EPERM;
} else {
prepare_kill_siginfo(sig, &kinfo, type);
}
if (type == PIDTYPE_PGID)
return kill_pgrp_info(sig, &kinfo, pid);
return kill_pid_info_type(sig, &kinfo, pid, type);
}
/**
* sys_pidfd_send_signal - Signal a process through a pidfd
* @pidfd: file descriptor of the process
* @sig: signal to send
* @info: signal info
* @flags: future flags
*
* Send the signal to the thread group or to the individual thread depending
* on PIDFD_THREAD.
* In the future extension to @flags may be used to override the default scope
* of @pidfd.
*
* Return: 0 on success, negative errno on failure
*/
SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
siginfo_t __user *, info, unsigned int, flags)
{
struct pid *pid;
enum pid_type type;
int ret;
/* Enforce flags be set to 0 until we add an extension. */
if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
return -EINVAL;
/* Ensure that only a single signal scope determining flag is set. */
if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
return -EINVAL;
switch (pidfd) {
case PIDFD_SELF_THREAD:
pid = get_task_pid(current, PIDTYPE_PID);
type = PIDTYPE_PID;
break;
case PIDFD_SELF_THREAD_GROUP:
pid = get_task_pid(current, PIDTYPE_TGID);
type = PIDTYPE_TGID;
break;
default: {
CLASS(fd, f)(pidfd);
if (fd_empty(f))
return -EBADF;
/* Is this a pidfd? */
pid = pidfd_to_pid(fd_file(f));
if (IS_ERR(pid))
return PTR_ERR(pid);
if (!access_pidfd_pidns(pid))
return -EINVAL;
/* Infer scope from the type of pidfd. */
if (fd_file(f)->f_flags & PIDFD_THREAD)
type = PIDTYPE_PID;
else
type = PIDTYPE_TGID;
return do_pidfd_send_signal(pid, sig, type, info, flags);
}
}
ret = do_pidfd_send_signal(pid, sig, type, info, flags);
put_pid(pid);
return ret;
}
static int
do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
{
struct task_struct *p;
int error = -ESRCH;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
error = check_kill_permission(sig, info, p);
/*
* The null signal is a permissions and process existence
* probe. No signal is actually delivered.
*/
if (!error && sig) {
error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
/*
* If lock_task_sighand() failed we pretend the task
* dies after receiving the signal. The window is tiny,
* and the signal is private anyway.
*/
if (unlikely(error == -ESRCH))
error = 0;
}
}
rcu_read_unlock();
return error;
}
static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
struct kernel_siginfo info;
prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
return do_send_specific(tgid, pid, sig, &info);
}
/**
* sys_tgkill - send signal to one specific thread
* @tgid: the thread group ID of the thread
* @pid: the PID of the thread
* @sig: signal to be sent
*
* This syscall also checks the @tgid and returns -ESRCH even if the PID
* exists but it's not belonging to the target process anymore. This
* method solves the problem of threads exiting and PIDs getting reused.
*/
SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
{
/* This is only valid for single tasks */
if (pid <= 0 || tgid <= 0) return -EINVAL; return do_tkill(tgid, pid, sig);
}
/**
* sys_tkill - send signal to one specific task
* @pid: the PID of the task
* @sig: signal to be sent
*
* Send a signal to only one task, even if it's a CLONE_THREAD task.
*/
SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
{
/* This is only valid for single tasks */
if (pid <= 0)
return -EINVAL;
return do_tkill(0, pid, sig);
}
static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
{
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
(task_pid_vnr(current) != pid))
return -EPERM;
/* POSIX.1b doesn't mention process groups. */
return kill_proc_info(sig, info, pid);
}
/**
* sys_rt_sigqueueinfo - send signal information to a signal
* @pid: the PID of the thread
* @sig: signal to be sent
* @uinfo: signal info to be sent
*/
SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
kernel_siginfo_t info;
int ret = __copy_siginfo_from_user(sig, &info, uinfo);
if (unlikely(ret))
return ret;
return do_rt_sigqueueinfo(pid, sig, &info);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
compat_pid_t, pid,
int, sig,
struct compat_siginfo __user *, uinfo)
{
kernel_siginfo_t info;
int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
if (unlikely(ret))
return ret;
return do_rt_sigqueueinfo(pid, sig, &info);
}
#endif
static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
{
/* This is only valid for single tasks */
if (pid <= 0 || tgid <= 0)
return -EINVAL;
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
(task_pid_vnr(current) != pid))
return -EPERM;
return do_send_specific(tgid, pid, sig, info);
}
SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
kernel_siginfo_t info;
int ret = __copy_siginfo_from_user(sig, &info, uinfo);
if (unlikely(ret))
return ret;
return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
compat_pid_t, tgid,
compat_pid_t, pid,
int, sig,
struct compat_siginfo __user *, uinfo)
{
kernel_siginfo_t info;
int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
if (unlikely(ret))
return ret;
return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
}
#endif
/*
* For kthreads only, must not be used if cloned with CLONE_SIGHAND
*/
void kernel_sigaction(int sig, __sighandler_t action)
{
spin_lock_irq(¤t->sighand->siglock);
current->sighand->action[sig - 1].sa.sa_handler = action;
if (action == SIG_IGN) {
sigset_t mask;
sigemptyset(&mask);
sigaddset(&mask, sig);
flush_sigqueue_mask(current, &mask, ¤t->signal->shared_pending);
flush_sigqueue_mask(current, &mask, ¤t->pending);
recalc_sigpending();
}
spin_unlock_irq(¤t->sighand->siglock);
}
EXPORT_SYMBOL(kernel_sigaction);
void __weak sigaction_compat_abi(struct k_sigaction *act,
struct k_sigaction *oact)
{
}
int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
{
struct task_struct *p = current, *t;
struct k_sigaction *k;
sigset_t mask;
if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
return -EINVAL;
k = &p->sighand->action[sig-1];
spin_lock_irq(&p->sighand->siglock);
if (k->sa.sa_flags & SA_IMMUTABLE) {
spin_unlock_irq(&p->sighand->siglock);
return -EINVAL;
}
if (oact)
*oact = *k;
/*
* Make sure that we never accidentally claim to support SA_UNSUPPORTED,
* e.g. by having an architecture use the bit in their uapi.
*/
BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
/*
* Clear unknown flag bits in order to allow userspace to detect missing
* support for flag bits and to allow the kernel to use non-uapi bits
* internally.
*/
if (act)
act->sa.sa_flags &= UAPI_SA_FLAGS;
if (oact)
oact->sa.sa_flags &= UAPI_SA_FLAGS;
sigaction_compat_abi(act, oact);
if (act) {
bool was_ignored = k->sa.sa_handler == SIG_IGN;
sigdelsetmask(&act->sa.sa_mask,
sigmask(SIGKILL) | sigmask(SIGSTOP));
*k = *act;
/*
* POSIX 3.3.1.3:
* "Setting a signal action to SIG_IGN for a signal that is
* pending shall cause the pending signal to be discarded,
* whether or not it is blocked."
*
* "Setting a signal action to SIG_DFL for a signal that is
* pending and whose default action is to ignore the signal
* (for example, SIGCHLD), shall cause the pending signal to
* be discarded, whether or not it is blocked"
*/
if (sig_handler_ignored(sig_handler(p, sig), sig)) {
sigemptyset(&mask);
sigaddset(&mask, sig);
flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
for_each_thread(p, t)
flush_sigqueue_mask(p, &mask, &t->pending);
} else if (was_ignored) {
posixtimer_sig_unignore(p, sig);
}
}
spin_unlock_irq(&p->sighand->siglock);
return 0;
}
#ifdef CONFIG_DYNAMIC_SIGFRAME
static inline void sigaltstack_lock(void)
__acquires(¤t->sighand->siglock)
{
spin_lock_irq(¤t->sighand->siglock);
}
static inline void sigaltstack_unlock(void)
__releases(¤t->sighand->siglock)
{
spin_unlock_irq(¤t->sighand->siglock);
}
#else
static inline void sigaltstack_lock(void) { }
static inline void sigaltstack_unlock(void) { }
#endif
static int
do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
size_t min_ss_size)
{
struct task_struct *t = current;
int ret = 0;
if (oss) {
memset(oss, 0, sizeof(stack_t));
oss->ss_sp = (void __user *) t->sas_ss_sp;
oss->ss_size = t->sas_ss_size;
oss->ss_flags = sas_ss_flags(sp) |
(current->sas_ss_flags & SS_FLAG_BITS);
}
if (ss) {
void __user *ss_sp = ss->ss_sp;
size_t ss_size = ss->ss_size;
unsigned ss_flags = ss->ss_flags;
int ss_mode;
if (unlikely(on_sig_stack(sp)))
return -EPERM;
ss_mode = ss_flags & ~SS_FLAG_BITS;
if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
ss_mode != 0))
return -EINVAL;
/*
* Return before taking any locks if no actual
* sigaltstack changes were requested.
*/
if (t->sas_ss_sp == (unsigned long)ss_sp &&
t->sas_ss_size == ss_size &&
t->sas_ss_flags == ss_flags)
return 0;
sigaltstack_lock();
if (ss_mode == SS_DISABLE) {
ss_size = 0;
ss_sp = NULL;
} else {
if (unlikely(ss_size < min_ss_size))
ret = -ENOMEM;
if (!sigaltstack_size_valid(ss_size))
ret = -ENOMEM;
}
if (!ret) {
t->sas_ss_sp = (unsigned long) ss_sp;
t->sas_ss_size = ss_size;
t->sas_ss_flags = ss_flags;
}
sigaltstack_unlock();
}
return ret;
}
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
{
stack_t new, old;
int err;
if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
return -EFAULT;
err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
current_user_stack_pointer(),
MINSIGSTKSZ);
if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
err = -EFAULT;
return err;
}
int restore_altstack(const stack_t __user *uss)
{
stack_t new;
if (copy_from_user(&new, uss, sizeof(stack_t)))
return -EFAULT;
(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
MINSIGSTKSZ);
/* squash all but EFAULT for now */
return 0;
}
int __save_altstack(stack_t __user *uss, unsigned long sp)
{
struct task_struct *t = current;
int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
__put_user(t->sas_ss_flags, &uss->ss_flags) |
__put_user(t->sas_ss_size, &uss->ss_size);
return err;
}
#ifdef CONFIG_COMPAT
static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
compat_stack_t __user *uoss_ptr)
{
stack_t uss, uoss;
int ret;
if (uss_ptr) {
compat_stack_t uss32;
if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
return -EFAULT;
uss.ss_sp = compat_ptr(uss32.ss_sp);
uss.ss_flags = uss32.ss_flags;
uss.ss_size = uss32.ss_size;
}
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
compat_user_stack_pointer(),
COMPAT_MINSIGSTKSZ);
if (ret >= 0 && uoss_ptr) {
compat_stack_t old;
memset(&old, 0, sizeof(old));
old.ss_sp = ptr_to_compat(uoss.ss_sp);
old.ss_flags = uoss.ss_flags;
old.ss_size = uoss.ss_size;
if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
ret = -EFAULT;
}
return ret;
}
COMPAT_SYSCALL_DEFINE2(sigaltstack,
const compat_stack_t __user *, uss_ptr,
compat_stack_t __user *, uoss_ptr)
{
return do_compat_sigaltstack(uss_ptr, uoss_ptr);
}
int compat_restore_altstack(const compat_stack_t __user *uss)
{
int err = do_compat_sigaltstack(uss, NULL);
/* squash all but -EFAULT for now */
return err == -EFAULT ? err : 0;
}
int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
{
int err;
struct task_struct *t = current;
err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
&uss->ss_sp) |
__put_user(t->sas_ss_flags, &uss->ss_flags) |
__put_user(t->sas_ss_size, &uss->ss_size);
return err;
}
#endif
#ifdef __ARCH_WANT_SYS_SIGPENDING
/**
* sys_sigpending - examine pending signals
* @uset: where mask of pending signal is returned
*/
SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
{
sigset_t set;
if (sizeof(old_sigset_t) > sizeof(*uset))
return -EINVAL;
do_sigpending(&set);
if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
{
sigset_t set;
do_sigpending(&set);
return put_user(set.sig[0], set32);
}
#endif
#endif
#ifdef __ARCH_WANT_SYS_SIGPROCMASK
/**
* sys_sigprocmask - examine and change blocked signals
* @how: whether to add, remove, or set signals
* @nset: signals to add or remove (if non-null)
* @oset: previous value of signal mask if non-null
*
* Some platforms have their own version with special arguments;
* others support only sys_rt_sigprocmask.
*/
SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
old_sigset_t __user *, oset)
{
old_sigset_t old_set, new_set;
sigset_t new_blocked;
old_set = current->blocked.sig[0];
if (nset) {
if (copy_from_user(&new_set, nset, sizeof(*nset)))
return -EFAULT;
new_blocked = current->blocked;
switch (how) {
case SIG_BLOCK:
sigaddsetmask(&new_blocked, new_set);
break;
case SIG_UNBLOCK:
sigdelsetmask(&new_blocked, new_set);
break;
case SIG_SETMASK:
new_blocked.sig[0] = new_set;
break;
default:
return -EINVAL;
}
set_current_blocked(&new_blocked);
}
if (oset) {
if (copy_to_user(oset, &old_set, sizeof(*oset)))
return -EFAULT;
}
return 0;
}
#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
#ifndef CONFIG_ODD_RT_SIGACTION
/**
* sys_rt_sigaction - alter an action taken by a process
* @sig: signal to be sent
* @act: new sigaction
* @oact: used to save the previous sigaction
* @sigsetsize: size of sigset_t type
*/
SYSCALL_DEFINE4(rt_sigaction, int, sig,
const struct sigaction __user *, act,
struct sigaction __user *, oact,
size_t, sigsetsize)
{
struct k_sigaction new_sa, old_sa;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
return -EFAULT;
ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
if (ret)
return ret;
if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
const struct compat_sigaction __user *, act,
struct compat_sigaction __user *, oact,
compat_size_t, sigsetsize)
{
struct k_sigaction new_ka, old_ka;
#ifdef __ARCH_HAS_SA_RESTORER
compat_uptr_t restorer;
#endif
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (act) {
compat_uptr_t handler;
ret = get_user(handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(handler);
#ifdef __ARCH_HAS_SA_RESTORER
ret |= get_user(restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(restorer);
#endif
ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
if (ret)
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
&oact->sa_handler);
ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
sizeof(oact->sa_mask));
ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
#ifdef __ARCH_HAS_SA_RESTORER
ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
&oact->sa_restorer);
#endif
}
return ret;
}
#endif
#endif /* !CONFIG_ODD_RT_SIGACTION */
#ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction, int, sig,
const struct old_sigaction __user *, act,
struct old_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
#ifdef __ARCH_HAS_KA_RESTORER
new_ka.ka_restorer = NULL;
#endif
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
}
return ret;
}
#endif
#ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
const struct compat_old_sigaction __user *, act,
struct compat_old_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
compat_old_sigset_t mask;
compat_uptr_t handler, restorer;
if (act) {
if (!access_ok(act, sizeof(*act)) ||
__get_user(handler, &act->sa_handler) ||
__get_user(restorer, &act->sa_restorer) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
#ifdef __ARCH_HAS_KA_RESTORER
new_ka.ka_restorer = NULL;
#endif
new_ka.sa.sa_handler = compat_ptr(handler);
new_ka.sa.sa_restorer = compat_ptr(restorer);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(oact, sizeof(*oact)) ||
__put_user(ptr_to_compat(old_ka.sa.sa_handler),
&oact->sa_handler) ||
__put_user(ptr_to_compat(old_ka.sa.sa_restorer),
&oact->sa_restorer) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
}
return ret;
}
#endif
#ifdef CONFIG_SGETMASK_SYSCALL
/*
* For backwards compatibility. Functionality superseded by sigprocmask.
*/
SYSCALL_DEFINE0(sgetmask)
{
/* SMP safe */
return current->blocked.sig[0];
}
SYSCALL_DEFINE1(ssetmask, int, newmask)
{
int old = current->blocked.sig[0];
sigset_t newset;
siginitset(&newset, newmask);
set_current_blocked(&newset);
return old;
}
#endif /* CONFIG_SGETMASK_SYSCALL */
#ifdef __ARCH_WANT_SYS_SIGNAL
/*
* For backwards compatibility. Functionality superseded by sigaction.
*/
SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
{
struct k_sigaction new_sa, old_sa;
int ret;
new_sa.sa.sa_handler = handler;
new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
sigemptyset(&new_sa.sa.sa_mask);
ret = do_sigaction(sig, &new_sa, &old_sa);
return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
}
#endif /* __ARCH_WANT_SYS_SIGNAL */
#ifdef __ARCH_WANT_SYS_PAUSE
SYSCALL_DEFINE0(pause)
{
while (!signal_pending(current)) {
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
return -ERESTARTNOHAND;
}
#endif
static int sigsuspend(sigset_t *set)
{
current->saved_sigmask = current->blocked;
set_current_blocked(set);
while (!signal_pending(current)) {
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
set_restore_sigmask();
return -ERESTARTNOHAND;
}
/**
* sys_rt_sigsuspend - replace the signal mask for a value with the
* @unewset value until a signal is received
* @unewset: new signal mask value
* @sigsetsize: size of sigset_t type
*/
SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
{
sigset_t newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
return sigsuspend(&newset);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
{
sigset_t newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (get_compat_sigset(&newset, unewset))
return -EFAULT;
return sigsuspend(&newset);
}
#endif
#ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
{
sigset_t blocked;
siginitset(&blocked, mask);
return sigsuspend(&blocked);
}
#endif
#ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
{
sigset_t blocked;
siginitset(&blocked, mask);
return sigsuspend(&blocked);
}
#endif
__weak const char *arch_vma_name(struct vm_area_struct *vma)
{
return NULL;
}
static inline void siginfo_buildtime_checks(void)
{
BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
/* Verify the offsets in the two siginfos match */
#define CHECK_OFFSET(field) \
BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
/* kill */
CHECK_OFFSET(si_pid);
CHECK_OFFSET(si_uid);
/* timer */
CHECK_OFFSET(si_tid);
CHECK_OFFSET(si_overrun);
CHECK_OFFSET(si_value);
/* rt */
CHECK_OFFSET(si_pid);
CHECK_OFFSET(si_uid);
CHECK_OFFSET(si_value);
/* sigchld */
CHECK_OFFSET(si_pid);
CHECK_OFFSET(si_uid);
CHECK_OFFSET(si_status);
CHECK_OFFSET(si_utime);
CHECK_OFFSET(si_stime);
/* sigfault */
CHECK_OFFSET(si_addr);
CHECK_OFFSET(si_trapno);
CHECK_OFFSET(si_addr_lsb);
CHECK_OFFSET(si_lower);
CHECK_OFFSET(si_upper);
CHECK_OFFSET(si_pkey);
CHECK_OFFSET(si_perf_data);
CHECK_OFFSET(si_perf_type);
CHECK_OFFSET(si_perf_flags);
/* sigpoll */
CHECK_OFFSET(si_band);
CHECK_OFFSET(si_fd);
/* sigsys */
CHECK_OFFSET(si_call_addr);
CHECK_OFFSET(si_syscall);
CHECK_OFFSET(si_arch);
#undef CHECK_OFFSET
/* usb asyncio */
BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
offsetof(struct siginfo, si_addr));
if (sizeof(int) == sizeof(void __user *)) {
BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
sizeof(void __user *));
} else {
BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
sizeof_field(struct siginfo, si_uid)) !=
sizeof(void __user *));
BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
offsetof(struct siginfo, si_uid));
}
#ifdef CONFIG_COMPAT
BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
offsetof(struct compat_siginfo, si_addr));
BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
sizeof(compat_uptr_t));
BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
sizeof_field(struct siginfo, si_pid));
#endif
}
#if defined(CONFIG_SYSCTL)
static const struct ctl_table signal_debug_table[] = {
#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
{
.procname = "exception-trace",
.data = &show_unhandled_signals,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#endif
};
static const struct ctl_table signal_table[] = {
{
.procname = "print-fatal-signals",
.data = &print_fatal_signals,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
};
static int __init init_signal_sysctls(void)
{
register_sysctl_init("debug", signal_debug_table);
register_sysctl_init("kernel", signal_table);
return 0;
}
early_initcall(init_signal_sysctls);
#endif /* CONFIG_SYSCTL */
void __init signals_init(void)
{
siginfo_buildtime_checks();
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
}
#ifdef CONFIG_KGDB_KDB
#include <linux/kdb.h>
/*
* kdb_send_sig - Allows kdb to send signals without exposing
* signal internals. This function checks if the required locks are
* available before calling the main signal code, to avoid kdb
* deadlocks.
*/
void kdb_send_sig(struct task_struct *t, int sig)
{
static struct task_struct *kdb_prev_t;
int new_t, ret;
if (!spin_trylock(&t->sighand->siglock)) {
kdb_printf("Can't do kill command now.\n"
"The sigmask lock is held somewhere else in "
"kernel, try again later\n");
return;
}
new_t = kdb_prev_t != t;
kdb_prev_t = t;
if (!task_is_running(t) && new_t) {
spin_unlock(&t->sighand->siglock);
kdb_printf("Process is not RUNNING, sending a signal from "
"kdb risks deadlock\n"
"on the run queue locks. "
"The signal has _not_ been sent.\n"
"Reissue the kill command if you want to risk "
"the deadlock.\n");
return;
}
ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
spin_unlock(&t->sighand->siglock);
if (ret)
kdb_printf("Fail to deliver Signal %d to process %d.\n",
sig, t->pid);
else
kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
}
#endif /* CONFIG_KGDB_KDB */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Authentication token and access key management
*
* Copyright (C) 2004, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* See Documentation/security/keys/core.rst for information on keys/keyrings.
*/
#ifndef _LINUX_KEY_H
#define _LINUX_KEY_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
#include <linux/sysctl.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
#include <linux/assoc_array.h>
#include <linux/refcount.h>
#include <linux/time64.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
/* key handle serial number */
typedef int32_t key_serial_t;
/* key handle permissions mask */
typedef uint32_t key_perm_t;
struct key;
struct net;
#ifdef CONFIG_KEYS
#undef KEY_DEBUGGING
#define KEY_POS_VIEW 0x01000000 /* possessor can view a key's attributes */
#define KEY_POS_READ 0x02000000 /* possessor can read key payload / view keyring */
#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */
#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */
#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */
#define KEY_POS_SETATTR 0x20000000 /* possessor can set key attributes */
#define KEY_POS_ALL 0x3f000000
#define KEY_USR_VIEW 0x00010000 /* user permissions... */
#define KEY_USR_READ 0x00020000
#define KEY_USR_WRITE 0x00040000
#define KEY_USR_SEARCH 0x00080000
#define KEY_USR_LINK 0x00100000
#define KEY_USR_SETATTR 0x00200000
#define KEY_USR_ALL 0x003f0000
#define KEY_GRP_VIEW 0x00000100 /* group permissions... */
#define KEY_GRP_READ 0x00000200
#define KEY_GRP_WRITE 0x00000400
#define KEY_GRP_SEARCH 0x00000800
#define KEY_GRP_LINK 0x00001000
#define KEY_GRP_SETATTR 0x00002000
#define KEY_GRP_ALL 0x00003f00
#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */
#define KEY_OTH_READ 0x00000002
#define KEY_OTH_WRITE 0x00000004
#define KEY_OTH_SEARCH 0x00000008
#define KEY_OTH_LINK 0x00000010
#define KEY_OTH_SETATTR 0x00000020
#define KEY_OTH_ALL 0x0000003f
#define KEY_PERM_UNDEF 0xffffffff
/*
* The permissions required on a key that we're looking up.
*/
enum key_need_perm {
KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */
KEY_NEED_VIEW, /* Require permission to view attributes */
KEY_NEED_READ, /* Require permission to read content */
KEY_NEED_WRITE, /* Require permission to update / modify */
KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */
KEY_NEED_LINK, /* Require permission to link */
KEY_NEED_SETATTR, /* Require permission to change attributes */
KEY_NEED_UNLINK, /* Require permission to unlink key */
KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */
KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */
KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
};
enum key_lookup_flag {
KEY_LOOKUP_CREATE = 0x01,
KEY_LOOKUP_PARTIAL = 0x02,
KEY_LOOKUP_ALL = (KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL),
};
struct seq_file;
struct user_struct;
struct signal_struct;
struct cred;
struct key_type;
struct key_owner;
struct key_tag;
struct keyring_list;
struct keyring_name;
struct key_tag {
struct rcu_head rcu;
refcount_t usage;
bool removed; /* T when subject removed */
};
struct keyring_index_key {
/* [!] If this structure is altered, the union in struct key must change too! */
unsigned long hash; /* Hash value */
union {
struct {
#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
u16 desc_len;
char desc[sizeof(long) - 2]; /* First few chars of description */
#else
char desc[sizeof(long) - 2]; /* First few chars of description */
u16 desc_len;
#endif
};
unsigned long x;
};
struct key_type *type;
struct key_tag *domain_tag; /* Domain of operation */
const char *description;
};
union key_payload {
void __rcu *rcu_data0;
void *data[4];
};
/*****************************************************************************/
/*
* key reference with possession attribute handling
*
* NOTE! key_ref_t is a typedef'd pointer to a type that is not actually
* defined. This is because we abuse the bottom bit of the reference to carry a
* flag to indicate whether the calling process possesses that key in one of
* its keyrings.
*
* the key_ref_t has been made a separate type so that the compiler can reject
* attempts to dereference it without proper conversion.
*
* the three functions are used to assemble and disassemble references
*/
typedef struct __key_reference_with_attributes *key_ref_t;
static inline key_ref_t make_key_ref(const struct key *key,
bool possession)
{
return (key_ref_t) ((unsigned long) key | possession);
}
static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
{
return (struct key *) ((unsigned long) key_ref & ~1UL);
}
static inline bool is_key_possessed(const key_ref_t key_ref)
{
return (unsigned long) key_ref & 1UL;
}
typedef int (*key_restrict_link_func_t)(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
struct key_restriction {
key_restrict_link_func_t check;
struct key *key;
struct key_type *keytype;
};
enum key_state {
KEY_IS_UNINSTANTIATED,
KEY_IS_POSITIVE, /* Positively instantiated */
};
/*****************************************************************************/
/*
* authentication token / access credential / keyring
* - types of key include:
* - keyrings
* - disk encryption IDs
* - Kerberos TGTs and tickets
*/
struct key {
refcount_t usage; /* number of references */
key_serial_t serial; /* key serial number */
union {
struct list_head graveyard_link;
struct rb_node serial_node;
};
#ifdef CONFIG_KEY_NOTIFICATIONS
struct watch_list *watchers; /* Entities watching this key for changes */
#endif
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
union {
time64_t expiry; /* time at which key expires (or 0) */
time64_t revoked_at; /* time at which key was revoked */
};
time64_t last_used_at; /* last time used for LRU keyring discard */
kuid_t uid;
kgid_t gid;
key_perm_t perm; /* access permissions */
unsigned short quotalen; /* length added to quota */
unsigned short datalen; /* payload data length
* - may not match RCU dereferenced payload
* - payload should contain own length
*/
short state; /* Key state (+) or rejection error (-) */
#ifdef KEY_DEBUGGING
unsigned magic;
#define KEY_DEBUG_MAGIC 0x18273645u
#endif
unsigned long flags; /* status flags (change with bitops) */
#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
#define KEY_FLAG_USER_ALIVE 10 /* set if final put has not happened on key yet */
/* the key type and key description string
* - the desc is used to match a key against search criteria
* - it should be a printable string
* - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
*/
union {
struct keyring_index_key index_key;
struct {
unsigned long hash;
unsigned long len_desc;
struct key_type *type; /* type of key */
struct key_tag *domain_tag; /* Domain of operation */
char *description;
};
};
/* key data
* - this is used to hold the data actually used in cryptography or
* whatever
*/
union {
union key_payload payload;
struct {
/* Keyring bits */
struct list_head name_link;
struct assoc_array keys;
};
};
/* This is set on a keyring to restrict the addition of a link to a key
* to it. If this structure isn't provided then it is assumed that the
* keyring is open to any addition. It is ignored for non-keyring
* keys. Only set this value using keyring_restrict(), keyring_alloc(),
* or key_alloc().
*
* This is intended for use with rings of trusted keys whereby addition
* to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION
* overrides this, allowing the kernel to add extra keys without
* restriction.
*/
struct key_restriction *restrict_link;
};
extern struct key *key_alloc(struct key_type *type,
const char *desc,
kuid_t uid, kgid_t gid,
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
struct key_restriction *restrict_link);
#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
extern void key_put(struct key *key);
extern bool key_put_tag(struct key_tag *tag);
extern void key_remove_domain(struct key_tag *domain_tag);
static inline struct key *__key_get(struct key *key)
{
refcount_inc(&key->usage); return key;
}
static inline struct key *key_get(struct key *key)
{
return key ? __key_get(key) : key;
}
static inline void key_ref_put(key_ref_t key_ref)
{
key_put(key_ref_to_ptr(key_ref));
}
extern struct key *request_key_tag(struct key_type *type,
const char *description,
struct key_tag *domain_tag,
const char *callout_info);
extern struct key *request_key_rcu(struct key_type *type,
const char *description,
struct key_tag *domain_tag);
extern struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux);
/**
* request_key - Request a key and wait for construction
* @type: Type of key.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
*
* As for request_key_tag(), but with the default global domain tag.
*/
static inline struct key *request_key(struct key_type *type,
const char *description,
const char *callout_info)
{
return request_key_tag(type, description, NULL, callout_info);
}
#ifdef CONFIG_NET
/**
* request_key_net - Request a key for a net namespace and wait for construction
* @type: Type of key.
* @description: The searchable description of the key.
* @net: The network namespace that is the key's domain of operation.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
*
* As for request_key() except that it does not add the returned key to a
* keyring if found, new keys are always allocated in the user's quota, the
* callout_info must be a NUL-terminated string and no auxiliary data can be
* passed. Only keys that operate the specified network namespace are used.
*
* Furthermore, it then works as wait_for_key_construction() to wait for the
* completion of keys undergoing construction with a non-interruptible wait.
*/
#define request_key_net(type, description, net, callout_info) \
request_key_tag(type, description, net->key_domain, callout_info)
/**
* request_key_net_rcu - Request a key for a net namespace under RCU conditions
* @type: Type of key.
* @description: The searchable description of the key.
* @net: The network namespace that is the key's domain of operation.
*
* As for request_key_rcu() except that only keys that operate the specified
* network namespace are used.
*/
#define request_key_net_rcu(type, description, net) \
request_key_rcu(type, description, net->key_domain)
#endif /* CONFIG_NET */
extern int wait_for_key_construction(struct key *key, bool intr);
extern int key_validate(const struct key *key);
extern key_ref_t key_create(key_ref_t keyring,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags);
extern key_ref_t key_create_or_update(key_ref_t keyring,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags);
extern int key_update(key_ref_t key,
const void *payload,
size_t plen);
extern int key_link(struct key *keyring,
struct key *key);
extern int key_move(struct key *key,
struct key *from_keyring,
struct key *to_keyring,
unsigned int flags);
extern int key_unlink(struct key *keyring,
struct key *key);
extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
struct key_restriction *restrict_link,
struct key *dest);
extern int restrict_link_reject(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
extern int keyring_clear(struct key *keyring);
extern key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description,
bool recurse);
extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);
extern struct key *key_lookup(key_serial_t id);
static inline key_serial_t key_serial(const struct key *key)
{
return key ? key->serial : 0;
}
extern void key_set_timeout(struct key *, unsigned);
extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
enum key_need_perm need_perm);
extern void key_free_user_ns(struct user_namespace *);
static inline short key_read_state(const struct key *key)
{
/* Barrier versus mark_key_instantiated(). */
return smp_load_acquire(&key->state);
}
/**
* key_is_positive - Determine if a key has been positively instantiated
* @key: The key to check.
*
* Return true if the specified key has been positively instantiated, false
* otherwise.
*/
static inline bool key_is_positive(const struct key *key)
{
return key_read_state(key) == KEY_IS_POSITIVE;
}
static inline bool key_is_negative(const struct key *key)
{
return key_read_state(key) < 0;
}
#define dereference_key_rcu(KEY) \
(rcu_dereference((KEY)->payload.rcu_data0))
#define dereference_key_locked(KEY) \
(rcu_dereference_protected((KEY)->payload.rcu_data0, \
rwsem_is_locked(&((struct key *)(KEY))->sem)))
#define rcu_assign_keypointer(KEY, PAYLOAD) \
do { \
rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \
} while (0)
/*
* the userspace interface
*/
extern int install_thread_keyring_to_cred(struct cred *cred);
extern void key_fsuid_changed(struct cred *new_cred);
extern void key_fsgid_changed(struct cred *new_cred);
extern void key_init(void);
#else /* CONFIG_KEYS */
#define key_validate(k) 0
#define key_serial(k) 0
#define key_get(k) ({ NULL; })
#define key_revoke(k) do { } while(0)
#define key_invalidate(k) do { } while(0)
#define key_put(k) do { } while(0)
#define key_ref_put(k) do { } while(0)
#define make_key_ref(k, p) NULL
#define key_ref_to_ptr(k) NULL
#define is_key_possessed(k) 0
#define key_fsuid_changed(c) do { } while(0)
#define key_fsgid_changed(c) do { } while(0)
#define key_init() do { } while(0)
#define key_free_user_ns(ns) do { } while(0)
#define key_remove_domain(d) do { } while(0)
#define key_lookup(k) NULL
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
#endif /* _LINUX_KEY_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KERNEL_PRINTK__
#define __KERNEL_PRINTK__
#include <linux/stdarg.h>
#include <linux/init.h>
#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <linux/ratelimit_types.h>
#include <linux/once_lite.h>
struct console;
extern const char linux_banner[];
extern const char linux_proc_banner[];
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
#define PRINTK_MAX_SINGLE_HEADER_LEN 2
static inline int printk_get_level(const char *buffer)
{
if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { switch (buffer[1]) {
case '0' ... '7':
case 'c': /* KERN_CONT */
return buffer[1];
}
}
return 0;
}
static inline const char *printk_skip_level(const char *buffer)
{
if (printk_get_level(buffer))
return buffer + 2;
return buffer;
}
static inline const char *printk_skip_headers(const char *buffer)
{
while (printk_get_level(buffer))
buffer = printk_skip_level(buffer);
return buffer;
}
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
/* We show everything that is MORE important than this.. */
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
/*
* Default used to be hard-coded at 7, quiet used to be hardcoded at 4,
* we're now allowing both to be set from kernel config.
*/
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
int match_devname_and_update_preferred_console(const char *match,
const char *name,
const short idx);
extern int console_printk[];
#define console_loglevel (console_printk[0])
#define default_message_loglevel (console_printk[1])
#define minimum_console_loglevel (console_printk[2])
#define default_console_loglevel (console_printk[3])
extern void console_verbose(void);
/* strlen("ratelimit") + 1 */
#define DEVKMSG_STR_MAX_SIZE 10
extern char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE];
struct ctl_table;
extern int suppress_printk;
struct va_format {
const char *fmt;
va_list *va;
};
/*
* FW_BUG
* Add this to a message where you are sure the firmware is buggy or behaves
* really stupid or out of spec. Be aware that the responsible BIOS developer
* should be able to fix this issue or at least get a concrete idea of the
* problem by reading your message without the need of looking at the kernel
* code.
*
* Use it for definite and high priority BIOS bugs.
*
* FW_WARN
* Use it for not that clear (e.g. could the kernel messed up things already?)
* and medium priority BIOS bugs.
*
* FW_INFO
* Use this one if you want to tell the user or vendor about something
* suspicious, but generally harmless related to the firmware.
*
* Use it for information or very low priority BIOS bugs.
*/
#define FW_BUG "[Firmware Bug]: "
#define FW_WARN "[Firmware Warn]: "
#define FW_INFO "[Firmware Info]: "
/*
* HW_ERR
* Add this to a message for hardware errors, so that user can report
* it to hardware vendor instead of LKML or software vendor.
*/
#define HW_ERR "[Hardware Error]: "
/*
* DEPRECATED
* Add this to a message whenever you want to warn user space about the use
* of a deprecated aspect of an API so they can stop using it
*/
#define DEPRECATED "[Deprecated]: "
/*
* Dummy printk for disabled debugging statements to use whilst maintaining
* gcc's format checking.
*/
#define no_printk(fmt, ...) \
({ \
if (0) \
_printk(fmt, ##__VA_ARGS__); \
0; \
})
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
#else
static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
struct dev_printk_info;
#ifdef CONFIG_PRINTK
asmlinkage __printf(4, 0)
int vprintk_emit(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args);
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
__printf(1, 0)
int vprintk_deferred(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
int _printk(const char *fmt, ...);
/*
* Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
*/
__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
extern void __printk_deferred_enter(void);
extern void __printk_deferred_exit(void);
extern void printk_force_console_enter(void);
extern void printk_force_console_exit(void);
/*
* The printk_deferred_enter/exit macros are available only as a hack for
* some code paths that need to defer all printk console printing. Interrupts
* must be disabled for the deferred duration.
*/
#define printk_deferred_enter() __printk_deferred_enter()
#define printk_deferred_exit() __printk_deferred_exit()
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use
* printk_ratelimited() or plain old __ratelimit().
*/
extern int __printk_ratelimit(const char *func);
#define printk_ratelimit() __printk_ratelimit(__func__)
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
extern int printk_delay_msec;
extern int dmesg_restrict;
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
void log_buf_vmcoreinfo_setup(void);
void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
void console_try_replay_all(void);
void printk_legacy_allow_panic_sync(void);
extern bool nbcon_device_try_acquire(struct console *con);
extern void nbcon_device_release(struct console *con);
void nbcon_atomic_flush_unsafe(void);
bool pr_flush(int timeout_ms, bool reset_on_progress);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
{
return 0;
}
static inline __printf(1, 0)
int vprintk_deferred(const char *fmt, va_list args)
{
return 0;
}
static inline __printf(1, 2) __cold
int _printk(const char *s, ...)
{
return 0;
}
static inline __printf(1, 2) __cold
int _printk_deferred(const char *s, ...)
{
return 0;
}
static inline void printk_deferred_enter(void)
{
}
static inline void printk_deferred_exit(void)
{
}
static inline void printk_force_console_enter(void)
{
}
static inline void printk_force_console_exit(void)
{
}
static inline int printk_ratelimit(void)
{
return 0;
}
static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec)
{
return false;
}
static inline void wake_up_klogd(void)
{
}
static inline char *log_buf_addr_get(void)
{
return NULL;
}
static inline u32 log_buf_len_get(void)
{
return 0;
}
static inline void log_buf_vmcoreinfo_setup(void)
{
}
static inline void setup_log_buf(int early)
{
}
static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
{
}
static inline void dump_stack_print_info(const char *log_lvl)
{
}
static inline void show_regs_print_info(const char *log_lvl)
{
}
static inline void dump_stack_lvl(const char *log_lvl)
{
}
static inline void dump_stack(void)
{
}
static inline void printk_trigger_flush(void)
{
}
static inline void console_try_replay_all(void)
{
}
static inline void printk_legacy_allow_panic_sync(void)
{
}
static inline bool nbcon_device_try_acquire(struct console *con)
{
return false;
}
static inline void nbcon_device_release(struct console *con)
{
}
static inline void nbcon_atomic_flush_unsafe(void)
{
}
static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
{
return true;
}
#endif
#ifdef CONFIG_SMP
extern int __printk_cpu_sync_try_get(void);
extern void __printk_cpu_sync_wait(void);
extern void __printk_cpu_sync_put(void);
#else
#define __printk_cpu_sync_try_get() true
#define __printk_cpu_sync_wait()
#define __printk_cpu_sync_put()
#endif /* CONFIG_SMP */
/**
* printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
* cpu-reentrant spinning lock.
* @flags: Stack-allocated storage for saving local interrupt state,
* to be passed to printk_cpu_sync_put_irqrestore().
*
* If the lock is owned by another CPU, spin until it becomes available.
* Interrupts are restored while spinning.
*
* CAUTION: This function must be used carefully. It does not behave like a
* typical lock. Here are important things to watch out for...
*
* * This function is reentrant on the same CPU. Therefore the calling
* code must not assume exclusive access to data if code accessing the
* data can run reentrant or within NMI context on the same CPU.
*
* * If there exists usage of this function from NMI context, it becomes
* unsafe to perform any type of locking or spinning to wait for other
* CPUs after calling this function from any context. This includes
* using spinlocks or any other busy-waiting synchronization methods.
*/
#define printk_cpu_sync_get_irqsave(flags) \
for (;;) { \
local_irq_save(flags); \
if (__printk_cpu_sync_try_get()) \
break; \
local_irq_restore(flags); \
__printk_cpu_sync_wait(); \
}
/**
* printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
* lock and restore interrupts.
* @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
*/
#define printk_cpu_sync_put_irqrestore(flags) \
do { \
__printk_cpu_sync_put(); \
local_irq_restore(flags); \
} while (0)
extern int kptr_restrict;
/**
* pr_fmt - used by the pr_*() macros to generate the printk format string
* @fmt: format string passed from a pr_*() macro
*
* This macro can be used to generate a unified format string for pr_*()
* macros. A common use is to prefix all pr_*() messages in a file with a common
* string. For example, defining this at the top of a source file:
*
* #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
*
* would prefix all pr_info, pr_emerg... messages in the file with the module
* name.
*/
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
struct module;
#ifdef CONFIG_PRINTK_INDEX
struct pi_entry {
const char *fmt;
const char *func;
const char *file;
unsigned int line;
/*
* While printk and pr_* have the level stored in the string at compile
* time, some subsystems dynamically add it at runtime through the
* format string. For these dynamic cases, we allow the subsystem to
* tell us the level at compile time.
*
* NULL indicates that the level, if any, is stored in fmt.
*/
const char *level;
/*
* The format string used by various subsystem specific printk()
* wrappers to prefix the message.
*
* Note that the static prefix defined by the pr_fmt() macro is stored
* directly in the message format (@fmt), not here.
*/
const char *subsys_fmt_prefix;
} __packed;
#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \
do { \
if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \
/*
* We check __builtin_constant_p multiple times here
* for the same input because GCC will produce an error
* if we try to assign a static variable to fmt if it
* is not a constant, even with the outer if statement.
*/ \
static const struct pi_entry _entry \
__used = { \
.fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
.level = __builtin_constant_p(_level) ? (_level) : NULL, \
.subsys_fmt_prefix = _subsys_fmt_prefix,\
}; \
static const struct pi_entry *_entry_ptr \
__used __section(".printk_index") = &_entry; \
} \
} while (0)
#else /* !CONFIG_PRINTK_INDEX */
#define __printk_index_emit(...) do {} while (0)
#endif /* CONFIG_PRINTK_INDEX */
/*
* Some subsystems have their own custom printk that applies a va_format to a
* generic format, for example, to include a device number or other metadata
* alongside the format supplied by the caller.
*
* In order to store these in the way they would be emitted by the printk
* infrastructure, the subsystem provides us with the start, fixed string, and
* any subsequent text in the format string.
*
* We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed
* as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the
* first one.
*
* subsys_fmt_prefix must be known at compile time, or compilation will fail
* (since this is a mistake). If fmt or level is not known at compile time, no
* index entry will be made (since this can legitimately happen).
*/
#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \
__printk_index_emit(fmt, level, subsys_fmt_prefix)
#define printk_index_wrap(_p_func, _fmt, ...) \
({ \
__printk_index_emit(_fmt, NULL, NULL); \
_p_func(_fmt, ##__VA_ARGS__); \
})
/**
* printk - print a kernel message
* @fmt: format string
*
* This is printk(). It can be called from any context. We want it to work.
*
* If printk indexing is enabled, _printk() is called from printk_index_wrap.
* Otherwise, printk is simply #defined to _printk.
*
* We try to grab the console_lock. If we succeed, it's easy - we log the
* output and call the console drivers. If we fail to get the semaphore, we
* place the output into the log buffer and return. The current holder of
* the console_sem will notice the new output in console_unlock(); and will
* send it to the consoles before releasing the lock.
*
* One effect of this deferred printing is that code which calls printk() and
* then changes console_loglevel may break. This is because console_loglevel
* is inspected when the actual printing occurs.
*
* See also:
* printf(3)
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
#define printk_deferred(fmt, ...) \
printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__)
/**
* pr_emerg - Print an emergency-level message
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_EMERG loglevel. It uses pr_fmt() to
* generate the format string.
*/
#define pr_emerg(fmt, ...) \
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
/**
* pr_alert - Print an alert-level message
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_ALERT loglevel. It uses pr_fmt() to
* generate the format string.
*/
#define pr_alert(fmt, ...) \
printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
/**
* pr_crit - Print a critical-level message
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_CRIT loglevel. It uses pr_fmt() to
* generate the format string.
*/
#define pr_crit(fmt, ...) \
printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
/**
* pr_err - Print an error-level message
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_ERR loglevel. It uses pr_fmt() to
* generate the format string.
*/
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
/**
* pr_warn - Print a warning-level message
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_WARNING loglevel. It uses pr_fmt()
* to generate the format string.
*/
#define pr_warn(fmt, ...) \
printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
/**
* pr_notice - Print a notice-level message
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_NOTICE loglevel. It uses pr_fmt() to
* generate the format string.
*/
#define pr_notice(fmt, ...) \
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
/**
* pr_info - Print an info-level message
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_INFO loglevel. It uses pr_fmt() to
* generate the format string.
*/
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/**
* pr_cont - Continues a previous log message in the same line.
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_CONT loglevel. It should only be
* used when continuing a log message with no newline ('\n') enclosed. Otherwise
* it defaults back to KERN_DEFAULT loglevel.
*/
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
/**
* pr_devel - Print a debug-level message conditionally
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to a printk with KERN_DEBUG loglevel if DEBUG is
* defined. Otherwise it does nothing.
*
* It uses pr_fmt() to generate the format string.
*/
#ifdef DEBUG
#define pr_devel(fmt, ...) \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_devel(fmt, ...) \
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
/* If you are writing a driver, please use dev_dbg instead */
#if defined(CONFIG_DYNAMIC_DEBUG) || \
(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#include <linux/dynamic_debug.h>
/**
* pr_debug - Print a debug-level message conditionally
* @fmt: format string
* @...: arguments for the format string
*
* This macro expands to dynamic_pr_debug() if CONFIG_DYNAMIC_DEBUG is
* set. Otherwise, if DEBUG is defined, it's equivalent to a printk with
* KERN_DEBUG loglevel. If DEBUG is not defined it does nothing.
*
* It uses pr_fmt() to generate the format string (dynamic_pr_debug() uses
* pr_fmt() internally).
*/
#define pr_debug(fmt, ...) \
dynamic_pr_debug(fmt, ##__VA_ARGS__)
#elif defined(DEBUG)
#define pr_debug(fmt, ...) \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
/*
* Print a one-time message (analogous to WARN_ONCE() et al):
*/
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
#define printk_deferred_once(fmt, ...) \
DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
#else
#define printk_once(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#define printk_deferred_once(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#endif
#define pr_emerg_once(fmt, ...) \
printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
#define pr_alert_once(fmt, ...) \
printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
#define pr_crit_once(fmt, ...) \
printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
#define pr_err_once(fmt, ...) \
printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
#define pr_warn_once(fmt, ...) \
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
#define pr_notice_once(fmt, ...) \
printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info_once(fmt, ...) \
printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/* no pr_cont_once, don't do that... */
#if defined(DEBUG)
#define pr_devel_once(fmt, ...) \
printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_devel_once(fmt, ...) \
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
/* If you are writing a driver, please use dev_dbg instead */
#if defined(DEBUG)
#define pr_debug_once(fmt, ...) \
printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_debug_once(fmt, ...) \
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
/*
* ratelimited messages with local ratelimit_state,
* no local ratelimit_state used in the !PRINTK case
*/
#ifdef CONFIG_PRINTK
#define printk_ratelimited(fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
\
if (__ratelimit(&_rs)) \
printk(fmt, ##__VA_ARGS__); \
})
#else
#define printk_ratelimited(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#endif
#define pr_emerg_ratelimited(fmt, ...) \
printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
#define pr_alert_ratelimited(fmt, ...) \
printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
#define pr_crit_ratelimited(fmt, ...) \
printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
#define pr_err_ratelimited(fmt, ...) \
printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
#define pr_warn_ratelimited(fmt, ...) \
printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
#define pr_notice_ratelimited(fmt, ...) \
printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info_ratelimited(fmt, ...) \
printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/* no pr_cont_ratelimited, don't do that... */
#if defined(DEBUG)
#define pr_devel_ratelimited(fmt, ...) \
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_devel_ratelimited(fmt, ...) \
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
/* If you are writing a driver, please use dev_dbg instead */
#if defined(CONFIG_DYNAMIC_DEBUG) || \
(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define pr_debug_ratelimited(fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
if (DYNAMIC_DEBUG_BRANCH(descriptor) && \
__ratelimit(&_rs)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_debug_ratelimited(fmt, ...) \
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
extern const struct file_operations kmsg_fops;
enum {
DUMP_PREFIX_NONE,
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii);
#ifdef CONFIG_PRINTK
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
#else
static inline void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
}
static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len)
{
}
#endif
#if defined(CONFIG_DYNAMIC_DEBUG) || \
(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
#elif defined(DEBUG)
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
#else
static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
}
#endif
/**
* print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @buf: data blob to dump
* @len: number of bytes in the @buf
*
* Calls print_hex_dump(), with log level of KERN_DEBUG,
* rowsize of 16, groupsize of 1, and ASCII output included.
*/
#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
#endif
// SPDX-License-Identifier: GPL-2.0+
/*
* XArray implementation
* Copyright (c) 2017-2018 Microsoft Corporation
* Copyright (c) 2018-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include "radix-tree.h"
/*
* Coding conventions in this file:
*
* @xa is used to refer to the entire xarray.
* @xas is the 'xarray operation state'. It may be either a pointer to
* an xa_state, or an xa_state stored on the stack. This is an unfortunate
* ambiguity.
* @index is the index of the entry being operated on
* @mark is an xa_mark_t; a small number indicating one of the mark bits.
* @node refers to an xa_node; usually the primary one being operated on by
* this function.
* @offset is the index into the slots array inside an xa_node.
* @parent refers to the @xa_node closer to the head than @node.
* @entry refers to something stored in a slot in the xarray
*/
static inline unsigned int xa_lock_type(const struct xarray *xa)
{
return (__force unsigned int)xa->xa_flags & 3;
}
static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
{
if (lock_type == XA_LOCK_IRQ)
xas_lock_irq(xas);
else if (lock_type == XA_LOCK_BH)
xas_lock_bh(xas);
else
xas_lock(xas);
}
static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
{
if (lock_type == XA_LOCK_IRQ)
xas_unlock_irq(xas);
else if (lock_type == XA_LOCK_BH)
xas_unlock_bh(xas);
else
xas_unlock(xas);
}
static inline bool xa_track_free(const struct xarray *xa)
{
return xa->xa_flags & XA_FLAGS_TRACK_FREE;
}
static inline bool xa_zero_busy(const struct xarray *xa)
{
return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
}
static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
{
if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) xa->xa_flags |= XA_FLAGS_MARK(mark);
}
static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
{
if (xa->xa_flags & XA_FLAGS_MARK(mark)) xa->xa_flags &= ~(XA_FLAGS_MARK(mark));}
static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
{
return node->marks[(__force unsigned)mark];
}
static inline bool node_get_mark(struct xa_node *node,
unsigned int offset, xa_mark_t mark)
{
return test_bit(offset, node_marks(node, mark));
}
/* returns true if the bit was set */
static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
xa_mark_t mark)
{
return __test_and_set_bit(offset, node_marks(node, mark));
}
/* returns true if the bit was set */
static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
xa_mark_t mark)
{
return __test_and_clear_bit(offset, node_marks(node, mark));
}
static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
{
return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
}
static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
{
bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
}
#define mark_inc(mark) do { \
mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
} while (0)
/*
* xas_squash_marks() - Merge all marks to the first entry
* @xas: Array operation state.
*
* Set a mark on the first entry if any entry has it set. Clear marks on
* all sibling entries.
*/
static void xas_squash_marks(const struct xa_state *xas)
{
xa_mark_t mark = 0;
unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
for (;;) { unsigned long *marks = node_marks(xas->xa_node, mark); if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) { __set_bit(xas->xa_offset, marks);
bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
}
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
}
/* extracts the offset within this node from the index */
static unsigned int get_offset(unsigned long index, struct xa_node *node)
{
return (index >> node->shift) & XA_CHUNK_MASK;
}
static void xas_set_offset(struct xa_state *xas)
{
xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
}
/* move the index either forwards (find) or backwards (sibling slot) */
static void xas_move_index(struct xa_state *xas, unsigned long offset)
{
unsigned int shift = xas->xa_node->shift;
xas->xa_index &= ~XA_CHUNK_MASK << shift;
xas->xa_index += offset << shift;
}
static void xas_next_offset(struct xa_state *xas)
{
xas->xa_offset++;
xas_move_index(xas, xas->xa_offset);
}
static void *set_bounds(struct xa_state *xas)
{
xas->xa_node = XAS_BOUNDS;
return NULL;
}
/*
* Starts a walk. If the @xas is already valid, we assume that it's on
* the right path and just return where we've got to. If we're in an
* error state, return NULL. If the index is outside the current scope
* of the xarray, return NULL without changing @xas->xa_node. Otherwise
* set @xas->xa_node to NULL and return the current head of the array.
*/
static void *xas_start(struct xa_state *xas)
{
void *entry;
if (xas_valid(xas)) return xas_reload(xas); if (xas_error(xas)) return NULL; entry = xa_head(xas->xa); if (!xa_is_node(entry)) {
if (xas->xa_index)
return set_bounds(xas);
} else {
if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) return set_bounds(xas);
}
xas->xa_node = NULL; return entry;
}
static __always_inline void *xas_descend(struct xa_state *xas,
struct xa_node *node)
{
unsigned int offset = get_offset(xas->xa_index, node); void *entry = xa_entry(xas->xa, node, offset); xas->xa_node = node; while (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset); if (node->shift && xa_is_node(entry)) entry = XA_RETRY_ENTRY;
}
xas->xa_offset = offset;
return entry;
}
/**
* xas_load() - Load an entry from the XArray (advanced).
* @xas: XArray operation state.
*
* Usually walks the @xas to the appropriate state to load the entry
* stored at xa_index. However, it will do nothing and return %NULL if
* @xas is in an error state. xas_load() will never expand the tree.
*
* If the xa_state is set up to operate on a multi-index entry, xas_load()
* may return %NULL or an internal entry, even if there are entries
* present within the range specified by @xas.
*
* Context: Any context. The caller should hold the xa_lock or the RCU lock.
* Return: Usually an entry in the XArray, but see description for exceptions.
*/
void *xas_load(struct xa_state *xas)
{
void *entry = xas_start(xas);
while (xa_is_node(entry)) { struct xa_node *node = xa_to_node(entry); if (xas->xa_shift > node->shift)
break;
entry = xas_descend(xas, node); if (node->shift == 0)
break;
}
return entry;}
EXPORT_SYMBOL_GPL(xas_load);
#define XA_RCU_FREE ((struct xarray *)1)
static void xa_node_free(struct xa_node *node)
{
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
node->array = XA_RCU_FREE;
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
/*
* xas_destroy() - Free any resources allocated during the XArray operation.
* @xas: XArray operation state.
*
* Most users will not need to call this function; it is called for you
* by xas_nomem().
*/
void xas_destroy(struct xa_state *xas)
{
struct xa_node *next, *node = xas->xa_alloc;
while (node) {
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
next = rcu_dereference_raw(node->parent);
radix_tree_node_rcu_free(&node->rcu_head);
xas->xa_alloc = node = next;
}
}
EXPORT_SYMBOL_GPL(xas_destroy);
/**
* xas_nomem() - Allocate memory if needed.
* @xas: XArray operation state.
* @gfp: Memory allocation flags.
*
* If we need to add new nodes to the XArray, we try to allocate memory
* with GFP_NOWAIT while holding the lock, which will usually succeed.
* If it fails, @xas is flagged as needing memory to continue. The caller
* should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
* the caller should retry the operation.
*
* Forward progress is guaranteed as one node is allocated here and
* stored in the xa_state where it will be found by xas_alloc(). More
* nodes will likely be found in the slab allocator, but we do not tie
* them up here.
*
* Return: true if memory was needed, and was successfully allocated.
*/
bool xas_nomem(struct xa_state *xas, gfp_t gfp)
{
if (xas->xa_node != XA_ERROR(-ENOMEM)) { xas_destroy(xas);
return false;
}
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT; xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!xas->xa_alloc)
return false;
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;}
EXPORT_SYMBOL_GPL(xas_nomem);
/*
* __xas_nomem() - Drop locks and allocate memory if needed.
* @xas: XArray operation state.
* @gfp: Memory allocation flags.
*
* Internal variant of xas_nomem().
*
* Return: true if memory was needed, and was successfully allocated.
*/
static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
__must_hold(xas->xa->xa_lock)
{
unsigned int lock_type = xa_lock_type(xas->xa);
if (xas->xa_node != XA_ERROR(-ENOMEM)) {
xas_destroy(xas); return false;
}
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
if (gfpflags_allow_blocking(gfp)) { xas_unlock_type(xas, lock_type);
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
xas_lock_type(xas, lock_type);
} else {
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
}
if (!xas->xa_alloc)
return false;
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
}
static void xas_update(struct xa_state *xas, struct xa_node *node)
{
if (xas->xa_update)
xas->xa_update(node);
else
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
}
static void *xas_alloc(struct xa_state *xas, unsigned int shift)
{
struct xa_node *parent = xas->xa_node;
struct xa_node *node = xas->xa_alloc;
if (xas_invalid(xas))
return NULL; if (node) { xas->xa_alloc = NULL;
} else {
gfp_t gfp = GFP_NOWAIT;
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node) {
xas_set_err(xas, -ENOMEM); return NULL;
}
}
if (parent) { node->offset = xas->xa_offset;
parent->count++;
XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
xas_update(xas, parent);
}
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
node->shift = shift;
node->count = 0;
node->nr_values = 0;
RCU_INIT_POINTER(node->parent, xas->xa_node);
node->array = xas->xa;
return node;}
#ifdef CONFIG_XARRAY_MULTI
/* Returns the number of indices covered by a given xa_state */
static unsigned long xas_size(const struct xa_state *xas)
{
return (xas->xa_sibs + 1UL) << xas->xa_shift;
}
#endif
/*
* Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a
* multi-index entry at index 0, the calculation is a little more complex
* than you might expect.
*/
static unsigned long xas_max(struct xa_state *xas)
{
unsigned long max = xas->xa_index;
#ifdef CONFIG_XARRAY_MULTI
if (xas->xa_shift || xas->xa_sibs) {
unsigned long mask = xas_size(xas) - 1;
max |= mask;
if (mask == max)
max++;
}
#endif
return max;
}
/* The maximum index that can be contained in the array without expanding it */
static unsigned long max_index(void *entry)
{
if (!xa_is_node(entry)) return 0; return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;}
static inline void *xa_zero_to_null(void *entry)
{
return xa_is_zero(entry) ? NULL : entry;
}
static void xas_shrink(struct xa_state *xas)
{
struct xarray *xa = xas->xa;
struct xa_node *node = xas->xa_node;
for (;;) {
void *entry;
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
if (node->count != 1)
break;
entry = xa_entry_locked(xa, node, 0);
if (!entry)
break;
if (!xa_is_node(entry) && node->shift)
break;
if (xa_zero_busy(xa))
entry = xa_zero_to_null(entry);
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) xa_mark_clear(xa, XA_FREE_MARK); node->count = 0;
node->nr_values = 0;
if (!xa_is_node(entry))
RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
xas_update(xas, node); xa_node_free(node); if (!xa_is_node(entry))
break;
node = xa_to_node(entry);
node->parent = NULL;
}
}
/*
* xas_delete_node() - Attempt to delete an xa_node
* @xas: Array operation state.
*
* Attempts to delete the @xas->xa_node. This will fail if xa->node has
* a non-zero reference count.
*/
static void xas_delete_node(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
for (;;) {
struct xa_node *parent;
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
if (node->count)
break;
parent = xa_parent_locked(xas->xa, node);
xas->xa_node = parent;
xas->xa_offset = node->offset;
xa_node_free(node);
if (!parent) {
xas->xa->xa_head = NULL;
xas->xa_node = XAS_BOUNDS;
return;
}
parent->slots[xas->xa_offset] = NULL;
parent->count--;
XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
node = parent;
xas_update(xas, node);
}
if (!node->parent) xas_shrink(xas);
}
/**
* xas_free_nodes() - Free this node and all nodes that it references
* @xas: Array operation state.
* @top: Node to free
*
* This node has been removed from the tree. We must now free it and all
* of its subnodes. There may be RCU walkers with references into the tree,
* so we must replace all entries with retry markers.
*/
static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
{
unsigned int offset = 0;
struct xa_node *node = top;
for (;;) {
void *entry = xa_entry_locked(xas->xa, node, offset);
if (node->shift && xa_is_node(entry)) {
node = xa_to_node(entry);
offset = 0;
continue;
}
if (entry)
RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
offset++;
while (offset == XA_CHUNK_SIZE) {
struct xa_node *parent;
parent = xa_parent_locked(xas->xa, node);
offset = node->offset + 1;
node->count = 0;
node->nr_values = 0;
xas_update(xas, node);
xa_node_free(node);
if (node == top)
return;
node = parent;
}
}
}
/*
* xas_expand adds nodes to the head of the tree until it has reached
* sufficient height to be able to contain @xas->xa_index
*/
static int xas_expand(struct xa_state *xas, void *head)
{
struct xarray *xa = xas->xa;
struct xa_node *node = NULL;
unsigned int shift = 0;
unsigned long max = xas_max(xas); if (!head) { if (max == 0)
return 0;
while ((max >> shift) >= XA_CHUNK_SIZE) shift += XA_CHUNK_SHIFT; return shift + XA_CHUNK_SHIFT; } else if (xa_is_node(head)) {
node = xa_to_node(head);
shift = node->shift + XA_CHUNK_SHIFT;
}
xas->xa_node = NULL;
while (max > max_index(head)) {
xa_mark_t mark = 0;
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
node = xas_alloc(xas, shift);
if (!node)
return -ENOMEM;
node->count = 1; if (xa_is_value(head))
node->nr_values = 1;
RCU_INIT_POINTER(node->slots[0], head);
/* Propagate the aggregated mark info to the new child */
for (;;) {
if (xa_track_free(xa) && mark == XA_FREE_MARK) {
node_mark_all(node, XA_FREE_MARK);
if (!xa_marked(xa, XA_FREE_MARK)) {
node_clear_mark(node, 0, XA_FREE_MARK);
xa_mark_set(xa, XA_FREE_MARK);
}
} else if (xa_marked(xa, mark)) { node_set_mark(node, 0, mark);
}
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
/*
* Now that the new node is fully initialised, we can add
* it to the tree
*/
if (xa_is_node(head)) {
xa_to_node(head)->offset = 0;
rcu_assign_pointer(xa_to_node(head)->parent, node);
}
head = xa_mk_node(node);
rcu_assign_pointer(xa->xa_head, head);
xas_update(xas, node); shift += XA_CHUNK_SHIFT;
}
xas->xa_node = node;
return shift;
}
/*
* xas_create() - Create a slot to store an entry in.
* @xas: XArray operation state.
* @allow_root: %true if we can store the entry in the root directly
*
* Most users will not need to call this function directly, as it is called
* by xas_store(). It is useful for doing conditional store operations
* (see the xa_cmpxchg() implementation for an example).
*
* Return: If the slot already existed, returns the contents of this slot.
* If the slot was newly created, returns %NULL. If it failed to create the
* slot, returns %NULL and indicates the error in @xas.
*/
static void *xas_create(struct xa_state *xas, bool allow_root)
{
struct xarray *xa = xas->xa;
void *entry;
void __rcu **slot;
struct xa_node *node = xas->xa_node;
int shift;
unsigned int order = xas->xa_shift; if (xas_top(node)) { entry = xa_head_locked(xa);
xas->xa_node = NULL;
if (!entry && xa_zero_busy(xa)) entry = XA_ZERO_ENTRY; shift = xas_expand(xas, entry); if (shift < 0) return NULL; if (!shift && !allow_root) shift = XA_CHUNK_SHIFT; entry = xa_head_locked(xa); slot = &xa->xa_head; } else if (xas_error(xas)) {
return NULL;
} else if (node) {
unsigned int offset = xas->xa_offset;
shift = node->shift;
entry = xa_entry_locked(xa, node, offset);
slot = &node->slots[offset];
} else {
shift = 0;
entry = xa_head_locked(xa);
slot = &xa->xa_head;
}
while (shift > order) { shift -= XA_CHUNK_SHIFT;
if (!entry) {
node = xas_alloc(xas, shift);
if (!node)
break;
if (xa_track_free(xa))
node_mark_all(node, XA_FREE_MARK); rcu_assign_pointer(*slot, xa_mk_node(node)); } else if (xa_is_node(entry)) { node = xa_to_node(entry);
} else {
break;
}
entry = xas_descend(xas, node);
slot = &node->slots[xas->xa_offset];
}
return entry;
}
/**
* xas_create_range() - Ensure that stores to this range will succeed
* @xas: XArray operation state.
*
* Creates all of the slots in the range covered by @xas. Sets @xas to
* create single-index entries and positions it at the beginning of the
* range. This is for the benefit of users which have not yet been
* converted to use multi-index entries.
*/
void xas_create_range(struct xa_state *xas)
{
unsigned long index = xas->xa_index;
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
xas->xa_offset |= sibs;
xas->xa_shift = 0;
xas->xa_sibs = 0;
for (;;) {
xas_create(xas, true);
if (xas_error(xas))
goto restore;
if (xas->xa_index <= (index | XA_CHUNK_MASK))
goto success;
xas->xa_index -= XA_CHUNK_SIZE;
for (;;) {
struct xa_node *node = xas->xa_node;
if (node->shift >= shift)
break;
xas->xa_node = xa_parent_locked(xas->xa, node);
xas->xa_offset = node->offset - 1;
if (node->offset != 0)
break;
}
}
restore:
xas->xa_shift = shift;
xas->xa_sibs = sibs;
xas->xa_index = index;
return;
success:
xas->xa_index = index;
if (xas->xa_node)
xas_set_offset(xas);
}
EXPORT_SYMBOL_GPL(xas_create_range);
static void update_node(struct xa_state *xas, struct xa_node *node,
int count, int values)
{
if (!node || (!count && !values))
return;
node->count += count;
node->nr_values += values;
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
xas_update(xas, node); if (count < 0) xas_delete_node(xas);
}
/**
* xas_store() - Store this entry in the XArray.
* @xas: XArray operation state.
* @entry: New entry.
*
* If @xas is operating on a multi-index entry, the entry returned by this
* function is essentially meaningless (it may be an internal entry or it
* may be %NULL, even if there are non-NULL entries at some of the indices
* covered by the range). This is not a problem for any current users,
* and can be changed if needed.
*
* Return: The old entry at this index.
*/
void *xas_store(struct xa_state *xas, void *entry)
{
struct xa_node *node;
void __rcu **slot = &xas->xa->xa_head;
unsigned int offset, max;
int count = 0;
int values = 0;
void *first, *next;
bool value = xa_is_value(entry);
if (entry) {
bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry); first = xas_create(xas, allow_root);
} else {
first = xas_load(xas);
}
if (xas_invalid(xas)) return first;
node = xas->xa_node;
if (node && (xas->xa_shift < node->shift))
xas->xa_sibs = 0;
if ((first == entry) && !xas->xa_sibs)
return first;
next = first;
offset = xas->xa_offset; max = xas->xa_offset + xas->xa_sibs;
if (node) {
slot = &node->slots[offset];
if (xas->xa_sibs) xas_squash_marks(xas);
}
if (!entry) xas_init_marks(xas);
for (;;) {
/*
* Must clear the marks before setting the entry to NULL,
* otherwise xas_for_each_marked may find a NULL entry and
* stop early. rcu_assign_pointer contains a release barrier
* so the mark clearing will appear to happen before the
* entry is set to NULL.
*/
rcu_assign_pointer(*slot, entry); if (xa_is_node(next) && (!node || node->shift))
xas_free_nodes(xas, xa_to_node(next));
if (!node)
break;
count += !next - !entry;
values += !xa_is_value(first) - !value;
if (entry) {
if (offset == max)
break;
if (!xa_is_sibling(entry)) entry = xa_mk_sibling(xas->xa_offset);
} else {
if (offset == XA_CHUNK_MASK)
break;
}
next = xa_entry_locked(xas->xa, node, ++offset); if (!xa_is_sibling(next)) { if (!entry && (offset > max))
break;
first = next;
}
slot++;
}
update_node(xas, node, count, values); return first;}
EXPORT_SYMBOL_GPL(xas_store);
/**
* xas_get_mark() - Returns the state of this mark.
* @xas: XArray operation state.
* @mark: Mark number.
*
* Return: true if the mark is set, false if the mark is clear or @xas
* is in an error state.
*/
bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
{
if (xas_invalid(xas))
return false;
if (!xas->xa_node)
return xa_marked(xas->xa, mark);
return node_get_mark(xas->xa_node, xas->xa_offset, mark);
}
EXPORT_SYMBOL_GPL(xas_get_mark);
/**
* xas_set_mark() - Sets the mark on this entry and its parents.
* @xas: XArray operation state.
* @mark: Mark number.
*
* Sets the specified mark on this entry, and walks up the tree setting it
* on all the ancestor entries. Does nothing if @xas has not been walked to
* an entry, or is in an error state.
*/
void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
unsigned int offset = xas->xa_offset;
if (xas_invalid(xas))
return;
while (node) {
if (node_set_mark(node, offset, mark))
return;
offset = node->offset;
node = xa_parent_locked(xas->xa, node);
}
if (!xa_marked(xas->xa, mark))
xa_mark_set(xas->xa, mark);
}
EXPORT_SYMBOL_GPL(xas_set_mark);
/**
* xas_clear_mark() - Clears the mark on this entry and its parents.
* @xas: XArray operation state.
* @mark: Mark number.
*
* Clears the specified mark on this entry, and walks back to the head
* attempting to clear it on all the ancestor entries. Does nothing if
* @xas has not been walked to an entry, or is in an error state.
*/
void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
unsigned int offset = xas->xa_offset;
if (xas_invalid(xas))
return;
while (node) {
if (!node_clear_mark(node, offset, mark))
return;
if (node_any_mark(node, mark))
return;
offset = node->offset;
node = xa_parent_locked(xas->xa, node);
}
if (xa_marked(xas->xa, mark)) xa_mark_clear(xas->xa, mark);}
EXPORT_SYMBOL_GPL(xas_clear_mark);
/**
* xas_init_marks() - Initialise all marks for the entry
* @xas: Array operations state.
*
* Initialise all marks for the entry specified by @xas. If we're tracking
* free entries with a mark, we need to set it on all entries. All other
* marks are cleared.
*
* This implementation is not as efficient as it could be; we may walk
* up the tree multiple times.
*/
void xas_init_marks(const struct xa_state *xas)
{
xa_mark_t mark = 0;
for (;;) {
if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
xas_set_mark(xas, mark);
else
xas_clear_mark(xas, mark);
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
}
EXPORT_SYMBOL_GPL(xas_init_marks);
#ifdef CONFIG_XARRAY_MULTI
static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
{
unsigned int marks = 0;
xa_mark_t mark = XA_MARK_0;
for (;;) {
if (node_get_mark(node, offset, mark))
marks |= 1 << (__force unsigned int)mark;
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
return marks;
}
static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
xa_mark_t mark)
{
int i;
if (sibs == 0)
node_mark_all(node, mark);
else {
for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
node_set_mark(node, i, mark);
}
}
static void node_set_marks(struct xa_node *node, unsigned int offset,
struct xa_node *child, unsigned int sibs,
unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;
for (;;) {
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
node_mark_slots(child, sibs, mark);
}
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
}
static void __xas_init_node_for_split(struct xa_state *xas,
struct xa_node *node, void *entry)
{
unsigned int i;
void *sibling = NULL;
unsigned int mask = xas->xa_sibs;
if (!node)
return;
node->array = xas->xa;
for (i = 0; i < XA_CHUNK_SIZE; i++) {
if ((i & mask) == 0) {
RCU_INIT_POINTER(node->slots[i], entry);
sibling = xa_mk_sibling(i);
} else {
RCU_INIT_POINTER(node->slots[i], sibling);
}
}
}
/**
* xas_split_alloc() - Allocate memory for splitting an entry.
* @xas: XArray operation state.
* @entry: New entry which will be stored in the array.
* @order: Current entry order.
* @gfp: Memory allocation flags.
*
* This function should be called before calling xas_split().
* If necessary, it will allocate new nodes (and fill them with @entry)
* to prepare for the upcoming split of an entry of @order size into
* entries of the order stored in the @xas.
*
* Context: May sleep if @gfp flags permit.
*/
void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
gfp_t gfp)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
/* XXX: no support for splitting really large entries yet */
if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
goto nomem;
if (xas->xa_shift + XA_CHUNK_SHIFT > order)
return;
do {
struct xa_node *node;
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node)
goto nomem;
__xas_init_node_for_split(xas, node, entry);
RCU_INIT_POINTER(node->parent, xas->xa_alloc);
xas->xa_alloc = node;
} while (sibs-- > 0);
return;
nomem:
xas_destroy(xas);
xas_set_err(xas, -ENOMEM);
}
EXPORT_SYMBOL_GPL(xas_split_alloc);
/**
* xas_split() - Split a multi-index entry into smaller entries.
* @xas: XArray operation state.
* @entry: New entry to store in the array.
* @order: Current entry order.
*
* The size of the new entries is set in @xas. The value in @entry is
* copied to all the replacement entries.
*
* Context: Any context. The caller should hold the xa_lock.
*/
void xas_split(struct xa_state *xas, void *entry, unsigned int order)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
unsigned int offset, marks;
struct xa_node *node;
void *curr = xas_load(xas);
int values = 0;
node = xas->xa_node;
if (xas_top(node))
return;
marks = node_get_marks(node, xas->xa_offset);
offset = xas->xa_offset + sibs;
do {
if (xas->xa_shift < node->shift) {
struct xa_node *child = xas->xa_alloc;
xas->xa_alloc = rcu_dereference_raw(child->parent);
child->shift = node->shift - XA_CHUNK_SHIFT;
child->offset = offset;
child->count = XA_CHUNK_SIZE;
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
node_set_marks(node, offset, child, xas->xa_sibs,
marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
values--;
xas_update(xas, child);
} else {
unsigned int canon = offset - xas->xa_sibs;
node_set_marks(node, canon, NULL, 0, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
xa_mk_sibling(canon));
values += (xa_is_value(entry) - xa_is_value(curr)) *
(xas->xa_sibs + 1);
}
} while (offset-- > xas->xa_offset);
node->nr_values += values;
xas_update(xas, node);
}
EXPORT_SYMBOL_GPL(xas_split);
/**
* xas_try_split_min_order() - Minimal split order xas_try_split() can accept
* @order: Current entry order.
*
* xas_try_split() can split a multi-index entry to smaller than @order - 1 if
* no new xa_node is needed. This function provides the minimal order
* xas_try_split() supports.
*
* Return: the minimal order xas_try_split() supports
*
* Context: Any context.
*
*/
unsigned int xas_try_split_min_order(unsigned int order)
{
if (order % XA_CHUNK_SHIFT == 0)
return order == 0 ? 0 : order - 1;
return order - (order % XA_CHUNK_SHIFT);
}
EXPORT_SYMBOL_GPL(xas_try_split_min_order);
/**
* xas_try_split() - Try to split a multi-index entry.
* @xas: XArray operation state.
* @entry: New entry to store in the array.
* @order: Current entry order.
*
* The size of the new entries is set in @xas. The value in @entry is
* copied to all the replacement entries. If and only if one new xa_node is
* needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is
* NULL. If more new xa_node are needed, the function gives EINVAL error.
*
* NOTE: use xas_try_split_min_order() to get next split order instead of
* @order - 1 if you want to minmize xas_try_split() calls.
*
* Context: Any context. The caller should hold the xa_lock.
*/
void xas_try_split(struct xa_state *xas, void *entry, unsigned int order)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
unsigned int offset, marks;
struct xa_node *node;
void *curr = xas_load(xas);
int values = 0;
gfp_t gfp = GFP_NOWAIT;
node = xas->xa_node;
if (xas_top(node))
return;
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
marks = node_get_marks(node, xas->xa_offset);
offset = xas->xa_offset + sibs;
if (xas->xa_shift < node->shift) {
struct xa_node *child = xas->xa_alloc;
unsigned int expected_sibs =
(1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1;
/*
* No support for splitting sibling entries
* (horizontally) or cascade split (vertically), which
* requires two or more new xa_nodes.
* Since if one xa_node allocation fails,
* it is hard to free the prior allocations.
*/
if (sibs || xas->xa_sibs != expected_sibs) {
xas_destroy(xas);
xas_set_err(xas, -EINVAL);
return;
}
if (!child) {
child = kmem_cache_alloc_lru(radix_tree_node_cachep,
xas->xa_lru, gfp);
if (!child) {
xas_destroy(xas);
xas_set_err(xas, -ENOMEM);
return;
}
RCU_INIT_POINTER(child->parent, xas->xa_alloc);
}
__xas_init_node_for_split(xas, child, entry);
xas->xa_alloc = rcu_dereference_raw(child->parent);
child->shift = node->shift - XA_CHUNK_SHIFT;
child->offset = offset;
child->count = XA_CHUNK_SIZE;
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
node_set_marks(node, offset, child, xas->xa_sibs,
marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
values--;
xas_update(xas, child);
} else {
do {
unsigned int canon = offset - xas->xa_sibs;
node_set_marks(node, canon, NULL, 0, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
xa_mk_sibling(canon));
values += (xa_is_value(entry) - xa_is_value(curr)) *
(xas->xa_sibs + 1);
} while (offset-- > xas->xa_offset);
}
node->nr_values += values;
xas_update(xas, node);
}
EXPORT_SYMBOL_GPL(xas_try_split);
#endif
/**
* xas_pause() - Pause a walk to drop a lock.
* @xas: XArray operation state.
*
* Some users need to pause a walk and drop the lock they're holding in
* order to yield to a higher priority thread or carry out an operation
* on an entry. Those users should call this function before they drop
* the lock. It resets the @xas to be suitable for the next iteration
* of the loop after the user has reacquired the lock. If most entries
* found during a walk require you to call xas_pause(), the xa_for_each()
* iterator may be more appropriate.
*
* Note that xas_pause() only works for forward iteration. If a user needs
* to pause a reverse iteration, we will need a xas_pause_rev().
*/
void xas_pause(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
if (xas_invalid(xas))
return;
xas->xa_node = XAS_RESTART;
if (node) {
unsigned long offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
xas->xa_index &= ~0UL << node->shift;
xas->xa_index += (offset - xas->xa_offset) << node->shift;
if (xas->xa_index == 0)
xas->xa_node = XAS_BOUNDS;
} else {
xas->xa_index++;
}
}
EXPORT_SYMBOL_GPL(xas_pause);
/*
* __xas_prev() - Find the previous entry in the XArray.
* @xas: XArray operation state.
*
* Helper function for xas_prev() which handles all the complex cases
* out of line.
*/
void *__xas_prev(struct xa_state *xas)
{
void *entry;
if (!xas_frozen(xas->xa_node))
xas->xa_index--;
if (!xas->xa_node)
return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
xas->xa_offset--;
while (xas->xa_offset == 255) {
xas->xa_offset = xas->xa_node->offset - 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
if (!xas->xa_node)
return set_bounds(xas);
}
for (;;) {
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
xas_set_offset(xas);
}
}
EXPORT_SYMBOL_GPL(__xas_prev);
/*
* __xas_next() - Find the next entry in the XArray.
* @xas: XArray operation state.
*
* Helper function for xas_next() which handles all the complex cases
* out of line.
*/
void *__xas_next(struct xa_state *xas)
{
void *entry;
if (!xas_frozen(xas->xa_node))
xas->xa_index++;
if (!xas->xa_node)
return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
xas->xa_offset++;
while (xas->xa_offset == XA_CHUNK_SIZE) {
xas->xa_offset = xas->xa_node->offset + 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
if (!xas->xa_node)
return set_bounds(xas);
}
for (;;) {
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
xas_set_offset(xas);
}
}
EXPORT_SYMBOL_GPL(__xas_next);
/**
* xas_find() - Find the next present entry in the XArray.
* @xas: XArray operation state.
* @max: Highest index to return.
*
* If the @xas has not yet been walked to an entry, return the entry
* which has an index >= xas.xa_index. If it has been walked, the entry
* currently being pointed at has been processed, and so we move to the
* next entry.
*
* If no entry is found and the array is smaller than @max, the iterator
* is set to the smallest index not yet in the array. This allows @xas
* to be immediately passed to xas_store().
*
* Return: The entry, if found, otherwise %NULL.
*/
void *xas_find(struct xa_state *xas, unsigned long max)
{
void *entry;
if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
return NULL;
if (xas->xa_index > max)
return set_bounds(xas);
if (!xas->xa_node) {
xas->xa_index = 1;
return set_bounds(xas);
} else if (xas->xa_node == XAS_RESTART) {
entry = xas_load(xas);
if (entry || xas_not_node(xas->xa_node))
return entry;
} else if (!xas->xa_node->shift &&
xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
}
xas_next_offset(xas);
while (xas->xa_node && (xas->xa_index <= max)) {
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
xas->xa_offset = xas->xa_node->offset + 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
continue;
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (xa_is_node(entry)) {
xas->xa_node = xa_to_node(entry);
xas->xa_offset = 0;
continue;
}
if (entry && !xa_is_sibling(entry))
return entry;
xas_next_offset(xas);
}
if (!xas->xa_node)
xas->xa_node = XAS_BOUNDS;
return NULL;
}
EXPORT_SYMBOL_GPL(xas_find);
/**
* xas_find_marked() - Find the next marked entry in the XArray.
* @xas: XArray operation state.
* @max: Highest index to return.
* @mark: Mark number to search for.
*
* If the @xas has not yet been walked to an entry, return the marked entry
* which has an index >= xas.xa_index. If it has been walked, the entry
* currently being pointed at has been processed, and so we return the
* first marked entry with an index > xas.xa_index.
*
* If no marked entry is found and the array is smaller than @max, @xas is
* set to the bounds state and xas->xa_index is set to the smallest index
* not yet in the array. This allows @xas to be immediately passed to
* xas_store().
*
* If no entry is found before @max is reached, @xas is set to the restart
* state.
*
* Return: The entry, if found, otherwise %NULL.
*/
void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
{
bool advance = true;
unsigned int offset;
void *entry;
if (xas_error(xas)) return NULL; if (xas->xa_index > max) goto max; if (!xas->xa_node) { xas->xa_index = 1;
goto out;
} else if (xas_top(xas->xa_node)) { advance = false; entry = xa_head(xas->xa); xas->xa_node = NULL;
if (xas->xa_index > max_index(entry))
goto out; if (!xa_is_node(entry)) { if (xa_marked(xas->xa, mark))
return entry;
xas->xa_index = 1;
goto out;
}
xas->xa_node = xa_to_node(entry);
xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
}
while (xas->xa_index <= max) { if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
xas->xa_offset = xas->xa_node->offset + 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
if (!xas->xa_node)
break;
advance = false;
continue;
}
if (!advance) { entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (xa_is_sibling(entry)) {
xas->xa_offset = xa_to_sibling(entry);
xas_move_index(xas, xas->xa_offset);
}
}
offset = xas_find_chunk(xas, advance, mark); if (offset > xas->xa_offset) {
advance = false;
xas_move_index(xas, offset);
/* Mind the wrap */
if ((xas->xa_index - 1) >= max) goto max;
xas->xa_offset = offset;
if (offset == XA_CHUNK_SIZE)
continue;
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) continue; if (xa_is_sibling(entry))
continue;
if (!xa_is_node(entry)) return entry; xas->xa_node = xa_to_node(entry); xas_set_offset(xas);
}
out: if (xas->xa_index > max)
goto max;
return set_bounds(xas);
max:
xas->xa_node = XAS_RESTART; return NULL;
}
EXPORT_SYMBOL_GPL(xas_find_marked);
/**
* xas_find_conflict() - Find the next present entry in a range.
* @xas: XArray operation state.
*
* The @xas describes both a range and a position within that range.
*
* Context: Any context. Expects xa_lock to be held.
* Return: The next entry in the range covered by @xas or %NULL.
*/
void *xas_find_conflict(struct xa_state *xas)
{
void *curr;
if (xas_error(xas))
return NULL;
if (!xas->xa_node)
return NULL;
if (xas_top(xas->xa_node)) {
curr = xas_start(xas);
if (!curr)
return NULL;
while (xa_is_node(curr)) {
struct xa_node *node = xa_to_node(curr);
curr = xas_descend(xas, node);
}
if (curr)
return curr;
}
if (xas->xa_node->shift > xas->xa_shift)
return NULL;
for (;;) {
if (xas->xa_node->shift == xas->xa_shift) {
if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
break;
} else if (xas->xa_offset == XA_CHUNK_MASK) {
xas->xa_offset = xas->xa_node->offset;
xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
if (!xas->xa_node)
break;
continue;
}
curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
if (xa_is_sibling(curr))
continue;
while (xa_is_node(curr)) {
xas->xa_node = xa_to_node(curr);
xas->xa_offset = 0;
curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
}
if (curr)
return curr;
}
xas->xa_offset -= xas->xa_sibs;
return NULL;
}
EXPORT_SYMBOL_GPL(xas_find_conflict);
/**
* xa_load() - Load an entry from an XArray.
* @xa: XArray.
* @index: index into array.
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The entry at @index in @xa.
*/
void *xa_load(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
void *entry;
rcu_read_lock();
do {
entry = xa_zero_to_null(xas_load(&xas)); } while (xas_retry(&xas, entry)); rcu_read_unlock();
return entry;
}
EXPORT_SYMBOL(xa_load);
static void *xas_result(struct xa_state *xas, void *curr)
{
if (xas_error(xas))
curr = xas->xa_node;
return curr;
}
/**
* __xa_erase() - Erase this entry from the XArray while locked.
* @xa: XArray.
* @index: Index into array.
*
* After this function returns, loading from @index will return %NULL.
* If the index is part of a multi-index entry, all indices will be erased
* and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Expects xa_lock to be held on entry.
* Return: The entry which used to be at this index.
*/
void *__xa_erase(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL)));
}
EXPORT_SYMBOL(__xa_erase);
/**
* xa_erase() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
*
* After this function returns, loading from @index will return %NULL.
* If the index is part of a multi-index entry, all indices will be erased
* and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Takes and releases the xa_lock.
* Return: The entry which used to be at this index.
*/
void *xa_erase(struct xarray *xa, unsigned long index)
{
void *entry;
xa_lock(xa);
entry = __xa_erase(xa, index);
xa_unlock(xa);
return entry;
}
EXPORT_SYMBOL(xa_erase);
/**
* __xa_store() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* You must already be holding the xa_lock when calling this function.
* It will drop the lock if needed to allocate memory, and then reacquire
* it afterwards.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: The old entry at this index or xa_err() if an error happened.
*/
void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); if (xa_track_free(xa) && !entry) entry = XA_ZERO_ENTRY;
do {
curr = xas_store(&xas, entry); if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp)); return xas_result(&xas, xa_zero_to_null(curr));}
EXPORT_SYMBOL(__xa_store);
/**
* xa_store() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* After this function returns, loads from this index will return @entry.
* Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Any context. Takes and releases the xa_lock.
* May sleep if the @gfp flags permit.
* Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
* cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
* failed.
*/
void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
void *curr;
xa_lock(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock(xa);
return curr;
}
EXPORT_SYMBOL(xa_store);
static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp);
/**
* __xa_cmpxchg() - Conditionally replace an entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
* @entry: New value to place in array.
* @gfp: Memory allocation flags.
*
* You must already be holding the xa_lock when calling this function.
* It will drop the lock if needed to allocate memory, and then reacquire
* it afterwards.
*
* If the entry at @index is the same as @old, replace it with @entry.
* If the return value is equal to @old, then the exchange was successful.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: The old value at this index or xa_err() if an error happened.
*/
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp));
}
EXPORT_SYMBOL(__xa_cmpxchg);
static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
do {
curr = xas_load(&xas);
if (curr == old) {
xas_store(&xas, entry);
if (xa_track_free(xa) && entry && !curr)
xas_clear_mark(&xas, XA_FREE_MARK);
}
} while (__xas_nomem(&xas, gfp));
return xas_result(&xas, curr);
}
/**
* __xa_insert() - Store this entry in the XArray if no entry is present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
void *curr;
int errno;
if (!entry)
entry = XA_ZERO_ENTRY;
curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp);
errno = xa_err(curr);
if (errno)
return errno;
return (curr != NULL) ? -EBUSY : 0;
}
EXPORT_SYMBOL(__xa_insert);
#ifdef CONFIG_XARRAY_MULTI
static void xas_set_range(struct xa_state *xas, unsigned long first,
unsigned long last)
{
unsigned int shift = 0;
unsigned long sibs = last - first;
unsigned int offset = XA_CHUNK_MASK;
xas_set(xas, first);
while ((first & XA_CHUNK_MASK) == 0) {
if (sibs < XA_CHUNK_MASK)
break;
if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
break;
shift += XA_CHUNK_SHIFT;
if (offset == XA_CHUNK_MASK)
offset = sibs & XA_CHUNK_MASK;
sibs >>= XA_CHUNK_SHIFT;
first >>= XA_CHUNK_SHIFT;
}
offset = first & XA_CHUNK_MASK;
if (offset + sibs > XA_CHUNK_MASK)
sibs = XA_CHUNK_MASK - offset;
if ((((first + sibs + 1) << shift) - 1) > last)
sibs -= 1;
xas->xa_shift = shift;
xas->xa_sibs = sibs;
}
/**
* xa_store_range() - Store this entry at a range of indices in the XArray.
* @xa: XArray.
* @first: First index to affect.
* @last: Last index to affect.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* After this function returns, loads from any index between @first and @last,
* inclusive will return @entry.
* Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Process context. Takes and releases the xa_lock. May sleep
* if the @gfp flags permit.
* Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
* an XArray, or xa_err(-ENOMEM) if memory allocation failed.
*/
void *xa_store_range(struct xarray *xa, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, 0);
if (WARN_ON_ONCE(xa_is_internal(entry)))
return XA_ERROR(-EINVAL);
if (last < first)
return XA_ERROR(-EINVAL);
do {
xas_lock(&xas);
if (entry) {
unsigned int order = BITS_PER_LONG;
if (last + 1)
order = __ffs(last + 1);
xas_set_order(&xas, last, order);
xas_create(&xas, true);
if (xas_error(&xas))
goto unlock;
}
do {
xas_set_range(&xas, first, last);
xas_store(&xas, entry);
if (xas_error(&xas))
goto unlock;
first += xas_size(&xas);
} while (first <= last);
unlock:
xas_unlock(&xas);
} while (xas_nomem(&xas, gfp));
return xas_result(&xas, NULL);
}
EXPORT_SYMBOL(xa_store_range);
/**
* xas_get_order() - Get the order of an entry.
* @xas: XArray operation state.
*
* Called after xas_load, the xas should not be in an error state.
* The xas should not be pointing to a sibling entry.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
int xas_get_order(struct xa_state *xas)
{
int order = 0;
if (!xas->xa_node)
return 0;
XA_NODE_BUG_ON(xas->xa_node, xa_is_sibling(xa_entry(xas->xa,
xas->xa_node, xas->xa_offset)));
for (;;) {
unsigned int slot = xas->xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
break;
order++;
}
order += xas->xa_node->shift;
return order;
}
EXPORT_SYMBOL_GPL(xas_get_order);
/**
* xa_get_order() - Get the order of an entry.
* @xa: XArray.
* @index: Index of the entry.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
int xa_get_order(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
int order = 0;
void *entry;
rcu_read_lock();
entry = xas_load(&xas);
if (entry)
order = xas_get_order(&xas);
rcu_read_unlock();
return order;
}
EXPORT_SYMBOL(xa_get_order);
#endif /* CONFIG_XARRAY_MULTI */
/**
* __xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @limit: Range for allocated ID.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
* -EBUSY if there are no free entries in @limit.
*/
int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, gfp_t gfp)
{
XA_STATE(xas, xa, 0); if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; if (WARN_ON_ONCE(!xa_track_free(xa)))
return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
do {
xas.xa_index = limit.min;
xas_find_marked(&xas, limit.max, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART)
xas_set_err(&xas, -EBUSY);
else
*id = xas.xa_index; xas_store(&xas, entry);
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
return xas_error(&xas);}
EXPORT_SYMBOL(__xa_alloc);
/**
* __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of allocated ID.
* @next: Pointer to next ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
* allocation succeeded after wrapping, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
u32 min = limit.min;
int ret;
limit.min = max(min, *next);
ret = __xa_alloc(xa, id, entry, limit, gfp);
if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
ret = 1;
}
if (ret < 0 && limit.min > min) {
limit.min = min;
ret = __xa_alloc(xa, id, entry, limit, gfp);
if (ret == 0)
ret = 1;
}
if (ret >= 0) {
*next = *id + 1; if (*next == 0) xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
}
return ret;
}
EXPORT_SYMBOL(__xa_alloc_cyclic);
/**
* __xa_set_mark() - Set this mark on this entry while locked.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Any context. Expects xa_lock to be held on entry.
*/
void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
XA_STATE(xas, xa, index);
void *entry = xas_load(&xas);
if (entry)
xas_set_mark(&xas, mark);
}
EXPORT_SYMBOL(__xa_set_mark);
/**
* __xa_clear_mark() - Clear this mark on this entry while locked.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Context: Any context. Expects xa_lock to be held on entry.
*/
void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
XA_STATE(xas, xa, index);
void *entry = xas_load(&xas);
if (entry)
xas_clear_mark(&xas, mark);
}
EXPORT_SYMBOL(__xa_clear_mark);
/**
* xa_get_mark() - Inquire whether this mark is set on this entry.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* This function uses the RCU read lock, so the result may be out of date
* by the time it returns. If you need the result to be stable, use a lock.
*
* Context: Any context. Takes and releases the RCU lock.
* Return: True if the entry at @index has this mark set, false if it doesn't.
*/
bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
XA_STATE(xas, xa, index);
void *entry;
rcu_read_lock();
entry = xas_start(&xas);
while (xas_get_mark(&xas, mark)) {
if (!xa_is_node(entry))
goto found;
entry = xas_descend(&xas, xa_to_node(entry));
}
rcu_read_unlock();
return false;
found:
rcu_read_unlock();
return true;
}
EXPORT_SYMBOL(xa_get_mark);
/**
* xa_set_mark() - Set this mark on this entry.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Process context. Takes and releases the xa_lock.
*/
void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
xa_lock(xa);
__xa_set_mark(xa, index, mark);
xa_unlock(xa);
}
EXPORT_SYMBOL(xa_set_mark);
/**
* xa_clear_mark() - Clear this mark on this entry.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Clearing a mark always succeeds.
*
* Context: Process context. Takes and releases the xa_lock.
*/
void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
xa_lock(xa);
__xa_clear_mark(xa, index, mark);
xa_unlock(xa);
}
EXPORT_SYMBOL(xa_clear_mark);
/**
* xa_find() - Search the XArray for an entry.
* @xa: XArray.
* @indexp: Pointer to an index.
* @max: Maximum index to search to.
* @filter: Selection criterion.
*
* Finds the entry in @xa which matches the @filter, and has the lowest
* index that is at least @indexp and no more than @max.
* If an entry is found, @indexp is updated to be the index of the entry.
* This function is protected by the RCU read lock, so it may not find
* entries which are being simultaneously added. It will not return an
* %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The entry, if found, otherwise %NULL.
*/
void *xa_find(struct xarray *xa, unsigned long *indexp,
unsigned long max, xa_mark_t filter)
{
XA_STATE(xas, xa, *indexp);
void *entry;
rcu_read_lock();
do {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
} while (xas_retry(&xas, entry)); rcu_read_unlock();
if (entry)
*indexp = xas.xa_index; return entry;}
EXPORT_SYMBOL(xa_find);
static bool xas_sibling(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
unsigned long mask;
if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
return false;
mask = (XA_CHUNK_SIZE << node->shift) - 1;
return (xas->xa_index & mask) >
((unsigned long)xas->xa_offset << node->shift);
}
/**
* xa_find_after() - Search the XArray for a present entry.
* @xa: XArray.
* @indexp: Pointer to an index.
* @max: Maximum index to search to.
* @filter: Selection criterion.
*
* Finds the entry in @xa which matches the @filter and has the lowest
* index that is above @indexp and no more than @max.
* If an entry is found, @indexp is updated to be the index of the entry.
* This function is protected by the RCU read lock, so it may miss entries
* which are being simultaneously added. It will not return an
* %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The pointer, if found, otherwise %NULL.
*/
void *xa_find_after(struct xarray *xa, unsigned long *indexp,
unsigned long max, xa_mark_t filter)
{
XA_STATE(xas, xa, *indexp + 1);
void *entry;
if (xas.xa_index == 0)
return NULL;
rcu_read_lock();
for (;;) {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
if (xas_invalid(&xas))
break;
if (xas_sibling(&xas))
continue;
if (!xas_retry(&xas, entry))
break;
}
rcu_read_unlock();
if (entry)
*indexp = xas.xa_index;
return entry;
}
EXPORT_SYMBOL(xa_find_after);
static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
unsigned long max, unsigned int n)
{
void *entry;
unsigned int i = 0;
rcu_read_lock();
xas_for_each(xas, entry, max) {
if (xas_retry(xas, entry))
continue;
dst[i++] = entry;
if (i == n)
break;
}
rcu_read_unlock();
return i;
}
static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
unsigned long max, unsigned int n, xa_mark_t mark)
{
void *entry;
unsigned int i = 0;
rcu_read_lock();
xas_for_each_marked(xas, entry, max, mark) {
if (xas_retry(xas, entry))
continue;
dst[i++] = entry;
if (i == n)
break;
}
rcu_read_unlock();
return i;
}
/**
* xa_extract() - Copy selected entries from the XArray into a normal array.
* @xa: The source XArray to copy from.
* @dst: The buffer to copy entries into.
* @start: The first index in the XArray eligible to be selected.
* @max: The last index in the XArray eligible to be selected.
* @n: The maximum number of entries to copy.
* @filter: Selection criterion.
*
* Copies up to @n entries that match @filter from the XArray. The
* copied entries will have indices between @start and @max, inclusive.
*
* The @filter may be an XArray mark value, in which case entries which are
* marked with that mark will be copied. It may also be %XA_PRESENT, in
* which case all entries which are not %NULL will be copied.
*
* The entries returned may not represent a snapshot of the XArray at a
* moment in time. For example, if another thread stores to index 5, then
* index 10, calling xa_extract() may return the old contents of index 5
* and the new contents of index 10. Indices not modified while this
* function is running will not be skipped.
*
* If you need stronger guarantees, holding the xa_lock across calls to this
* function will prevent concurrent modification.
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The number of entries copied.
*/
unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
unsigned long max, unsigned int n, xa_mark_t filter)
{
XA_STATE(xas, xa, start);
if (!n)
return 0;
if ((__force unsigned int)filter < XA_MAX_MARKS)
return xas_extract_marked(&xas, dst, max, n, filter);
return xas_extract_present(&xas, dst, max, n);
}
EXPORT_SYMBOL(xa_extract);
/**
* xa_delete_node() - Private interface for workingset code.
* @node: Node to be removed from the tree.
* @update: Function to call to update ancestor nodes.
*
* Context: xa_lock must be held on entry and will not be released.
*/
void xa_delete_node(struct xa_node *node, xa_update_node_t update)
{
struct xa_state xas = {
.xa = node->array,
.xa_index = (unsigned long)node->offset <<
(node->shift + XA_CHUNK_SHIFT),
.xa_shift = node->shift + XA_CHUNK_SHIFT,
.xa_offset = node->offset,
.xa_node = xa_parent_locked(node->array, node),
.xa_update = update,
};
xas_store(&xas, NULL);
}
EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
/**
* xa_destroy() - Free all internal data structures.
* @xa: XArray.
*
* After calling this function, the XArray is empty and has freed all memory
* allocated for its internal data structures. You are responsible for
* freeing the objects referenced by the XArray.
*
* Context: Any context. Takes and releases the xa_lock, interrupt-safe.
*/
void xa_destroy(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
unsigned long flags;
void *entry;
xas.xa_node = NULL;
xas_lock_irqsave(&xas, flags);
entry = xa_head_locked(xa);
RCU_INIT_POINTER(xa->xa_head, NULL);
xas_init_marks(&xas);
if (xa_zero_busy(xa))
xa_mark_clear(xa, XA_FREE_MARK);
/* lockdep checks we're still holding the lock in xas_free_nodes() */
if (xa_is_node(entry))
xas_free_nodes(&xas, xa_to_node(entry));
xas_unlock_irqrestore(&xas, flags);
}
EXPORT_SYMBOL(xa_destroy);
#ifdef XA_DEBUG
void xa_dump_node(const struct xa_node *node)
{
unsigned i, j;
if (!node)
return;
if ((unsigned long)node & 3) {
pr_cont("node %px\n", node);
return;
}
pr_cont("node %px %s %d parent %px shift %d count %d values %d "
"array %px list %px %px marks",
node, node->parent ? "offset" : "max", node->offset,
node->parent, node->shift, node->count, node->nr_values,
node->array, node->private_list.prev, node->private_list.next);
for (i = 0; i < XA_MAX_MARKS; i++)
for (j = 0; j < XA_MARK_LONGS; j++)
pr_cont(" %lx", node->marks[i][j]);
pr_cont("\n");
}
void xa_dump_index(unsigned long index, unsigned int shift)
{
if (!shift)
pr_info("%lu: ", index);
else if (shift >= BITS_PER_LONG)
pr_info("0-%lu: ", ~0UL);
else
pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
}
void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
{
if (!entry)
return;
xa_dump_index(index, shift);
if (xa_is_node(entry)) {
if (shift == 0) {
pr_cont("%px\n", entry);
} else {
unsigned long i;
struct xa_node *node = xa_to_node(entry);
xa_dump_node(node);
for (i = 0; i < XA_CHUNK_SIZE; i++)
xa_dump_entry(node->slots[i],
index + (i << node->shift), node->shift);
}
} else if (xa_is_value(entry))
pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
xa_to_value(entry), entry);
else if (!xa_is_internal(entry))
pr_cont("%px\n", entry);
else if (xa_is_retry(entry))
pr_cont("retry (%ld)\n", xa_to_internal(entry));
else if (xa_is_sibling(entry))
pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
else
pr_cont("UNKNOWN ENTRY (%px)\n", entry);
}
void xa_dump(const struct xarray *xa)
{
void *entry = xa->xa_head;
unsigned int shift = 0;
pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
xa->xa_flags, xa_marked(xa, XA_MARK_0),
xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
if (xa_is_node(entry))
shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
xa_dump_entry(entry, 0, shift);
}
#endif
// SPDX-License-Identifier: GPL-2.0
#include <linux/memcontrol.h>
#include <linux/rwsem.h>
#include <linux/shrinker.h>
#include <linux/rculist.h>
#include <trace/events/vmscan.h>
#include "internal.h"
LIST_HEAD(shrinker_list);
DEFINE_MUTEX(shrinker_mutex);
#ifdef CONFIG_MEMCG
static int shrinker_nr_max;
static inline int shrinker_unit_size(int nr_items)
{
return (DIV_ROUND_UP(nr_items, SHRINKER_UNIT_BITS) * sizeof(struct shrinker_info_unit *));
}
static inline void shrinker_unit_free(struct shrinker_info *info, int start)
{
struct shrinker_info_unit **unit;
int nr, i;
if (!info)
return;
unit = info->unit;
nr = DIV_ROUND_UP(info->map_nr_max, SHRINKER_UNIT_BITS);
for (i = start; i < nr; i++) {
if (!unit[i])
break;
kfree(unit[i]);
unit[i] = NULL;
}
}
static inline int shrinker_unit_alloc(struct shrinker_info *new,
struct shrinker_info *old, int nid)
{
struct shrinker_info_unit *unit;
int nr = DIV_ROUND_UP(new->map_nr_max, SHRINKER_UNIT_BITS);
int start = old ? DIV_ROUND_UP(old->map_nr_max, SHRINKER_UNIT_BITS) : 0;
int i;
for (i = start; i < nr; i++) { unit = kzalloc_node(sizeof(*unit), GFP_KERNEL, nid);
if (!unit) {
shrinker_unit_free(new, start); return -ENOMEM;
}
new->unit[i] = unit;
}
return 0;
}
void free_shrinker_info(struct mem_cgroup *memcg)
{
struct mem_cgroup_per_node *pn;
struct shrinker_info *info;
int nid;
for_each_node(nid) {
pn = memcg->nodeinfo[nid];
info = rcu_dereference_protected(pn->shrinker_info, true);
shrinker_unit_free(info, 0);
kvfree(info);
rcu_assign_pointer(pn->shrinker_info, NULL);
}
}
int alloc_shrinker_info(struct mem_cgroup *memcg)
{
int nid, ret = 0;
int array_size = 0;
mutex_lock(&shrinker_mutex);
array_size = shrinker_unit_size(shrinker_nr_max);
for_each_node(nid) {
struct shrinker_info *info = kvzalloc_node(sizeof(*info) + array_size,
GFP_KERNEL, nid);
if (!info)
goto err;
info->map_nr_max = shrinker_nr_max;
if (shrinker_unit_alloc(info, NULL, nid)) {
kvfree(info);
goto err;
}
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
}
mutex_unlock(&shrinker_mutex);
return ret;
err:
mutex_unlock(&shrinker_mutex);
free_shrinker_info(memcg);
return -ENOMEM;
}
static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
int nid)
{
return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
lockdep_is_held(&shrinker_mutex));
}
static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size,
int old_size, int new_nr_max)
{
struct shrinker_info *new, *old;
struct mem_cgroup_per_node *pn;
int nid;
for_each_node(nid) { pn = memcg->nodeinfo[nid];
old = shrinker_info_protected(memcg, nid);
/* Not yet online memcg */
if (!old)
return 0;
/* Already expanded this shrinker_info */
if (new_nr_max <= old->map_nr_max)
continue;
new = kvzalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid);
if (!new)
return -ENOMEM;
new->map_nr_max = new_nr_max; memcpy(new->unit, old->unit, old_size); if (shrinker_unit_alloc(new, old, nid)) {
kvfree(new);
return -ENOMEM;
}
rcu_assign_pointer(pn->shrinker_info, new); kvfree_rcu(old, rcu);
}
return 0;
}
static int expand_shrinker_info(int new_id)
{
int ret = 0;
int new_nr_max = round_up(new_id + 1, SHRINKER_UNIT_BITS);
int new_size, old_size = 0;
struct mem_cgroup *memcg;
if (!root_mem_cgroup)
goto out;
lockdep_assert_held(&shrinker_mutex); new_size = shrinker_unit_size(new_nr_max); old_size = shrinker_unit_size(shrinker_nr_max);
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
ret = expand_one_shrinker_info(memcg, new_size, old_size,
new_nr_max);
if (ret) {
mem_cgroup_iter_break(NULL, memcg);
goto out;
}
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
out:
if (!ret)
shrinker_nr_max = new_nr_max;
return ret;
}
static inline int shrinker_id_to_index(int shrinker_id)
{
return shrinker_id / SHRINKER_UNIT_BITS;
}
static inline int shrinker_id_to_offset(int shrinker_id)
{
return shrinker_id % SHRINKER_UNIT_BITS;
}
static inline int calc_shrinker_id(int index, int offset)
{
return index * SHRINKER_UNIT_BITS + offset;
}
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
{
if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
struct shrinker_info *info;
struct shrinker_info_unit *unit;
rcu_read_lock();
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
unit = info->unit[shrinker_id_to_index(shrinker_id)];
if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) {
/* Pairs with smp mb in shrink_slab() */
smp_mb__before_atomic();
set_bit(shrinker_id_to_offset(shrinker_id), unit->map);
}
rcu_read_unlock();
}
}
static DEFINE_IDR(shrinker_idr);
static int shrinker_memcg_alloc(struct shrinker *shrinker)
{
int id, ret = -ENOMEM;
if (mem_cgroup_disabled())
return -ENOSYS;
mutex_lock(&shrinker_mutex);
id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
if (id < 0)
goto unlock; if (id >= shrinker_nr_max) { if (expand_shrinker_info(id)) {
idr_remove(&shrinker_idr, id);
goto unlock;
}
}
shrinker->id = id;
ret = 0;
unlock:
mutex_unlock(&shrinker_mutex);
return ret;
}
static void shrinker_memcg_remove(struct shrinker *shrinker)
{
int id = shrinker->id;
BUG_ON(id < 0);
lockdep_assert_held(&shrinker_mutex);
idr_remove(&shrinker_idr, id);
}
static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
struct mem_cgroup *memcg)
{
struct shrinker_info *info;
struct shrinker_info_unit *unit;
long nr_deferred;
rcu_read_lock();
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
unit = info->unit[shrinker_id_to_index(shrinker->id)];
nr_deferred = atomic_long_xchg(&unit->nr_deferred[shrinker_id_to_offset(shrinker->id)], 0);
rcu_read_unlock();
return nr_deferred;
}
static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
struct mem_cgroup *memcg)
{
struct shrinker_info *info;
struct shrinker_info_unit *unit;
long nr_deferred;
rcu_read_lock();
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
unit = info->unit[shrinker_id_to_index(shrinker->id)];
nr_deferred =
atomic_long_add_return(nr, &unit->nr_deferred[shrinker_id_to_offset(shrinker->id)]);
rcu_read_unlock();
return nr_deferred;
}
void reparent_shrinker_deferred(struct mem_cgroup *memcg)
{
int nid, index, offset;
long nr;
struct mem_cgroup *parent;
struct shrinker_info *child_info, *parent_info;
struct shrinker_info_unit *child_unit, *parent_unit;
parent = parent_mem_cgroup(memcg);
if (!parent)
parent = root_mem_cgroup;
/* Prevent from concurrent shrinker_info expand */
mutex_lock(&shrinker_mutex);
for_each_node(nid) {
child_info = shrinker_info_protected(memcg, nid);
parent_info = shrinker_info_protected(parent, nid);
for (index = 0; index < shrinker_id_to_index(child_info->map_nr_max); index++) {
child_unit = child_info->unit[index];
parent_unit = parent_info->unit[index];
for (offset = 0; offset < SHRINKER_UNIT_BITS; offset++) {
nr = atomic_long_read(&child_unit->nr_deferred[offset]);
atomic_long_add(nr, &parent_unit->nr_deferred[offset]);
}
}
}
mutex_unlock(&shrinker_mutex);
}
#else
static int shrinker_memcg_alloc(struct shrinker *shrinker)
{
return -ENOSYS;
}
static void shrinker_memcg_remove(struct shrinker *shrinker)
{
}
static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
struct mem_cgroup *memcg)
{
return 0;
}
static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
struct mem_cgroup *memcg)
{
return 0;
}
#endif /* CONFIG_MEMCG */
static long xchg_nr_deferred(struct shrinker *shrinker,
struct shrink_control *sc)
{
int nid = sc->nid;
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
nid = 0;
if (sc->memcg &&
(shrinker->flags & SHRINKER_MEMCG_AWARE))
return xchg_nr_deferred_memcg(nid, shrinker,
sc->memcg);
return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
}
static long add_nr_deferred(long nr, struct shrinker *shrinker,
struct shrink_control *sc)
{
int nid = sc->nid;
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
nid = 0;
if (sc->memcg &&
(shrinker->flags & SHRINKER_MEMCG_AWARE))
return add_nr_deferred_memcg(nr, nid, shrinker,
sc->memcg);
return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
}
#define SHRINK_BATCH 128
static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
struct shrinker *shrinker, int priority)
{
unsigned long freed = 0;
unsigned long long delta;
long total_scan;
long freeable;
long nr;
long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
long scanned = 0, next_deferred;
freeable = shrinker->count_objects(shrinker, shrinkctl);
if (freeable == 0 || freeable == SHRINK_EMPTY)
return freeable;
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
nr = xchg_nr_deferred(shrinker, shrinkctl);
if (shrinker->seeks) {
delta = freeable >> priority;
delta *= 4;
do_div(delta, shrinker->seeks);
} else {
/*
* These objects don't require any IO to create. Trim
* them aggressively under memory pressure to keep
* them from causing refetches in the IO caches.
*/
delta = freeable / 2;
}
total_scan = nr >> priority;
total_scan += delta;
total_scan = min(total_scan, (2 * freeable));
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
freeable, delta, total_scan, priority);
/*
* Normally, we should not scan less than batch_size objects in one
* pass to avoid too frequent shrinker calls, but if the slab has less
* than batch_size objects in total and we are really tight on memory,
* we will try to reclaim all available objects, otherwise we can end
* up failing allocations although there are plenty of reclaimable
* objects spread over several slabs with usage less than the
* batch_size.
*
* We detect the "tight on memory" situations by looking at the total
* number of objects we want to scan (total_scan). If it is greater
* than the total number of objects on slab (freeable), we must be
* scanning at high prio and therefore should try to reclaim as much as
* possible.
*/
while (total_scan >= batch_size ||
total_scan >= freeable) {
unsigned long ret;
unsigned long nr_to_scan = min(batch_size, total_scan);
shrinkctl->nr_to_scan = nr_to_scan;
shrinkctl->nr_scanned = nr_to_scan;
ret = shrinker->scan_objects(shrinker, shrinkctl);
if (ret == SHRINK_STOP)
break;
freed += ret;
count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
total_scan -= shrinkctl->nr_scanned;
scanned += shrinkctl->nr_scanned;
cond_resched();
}
/*
* The deferred work is increased by any new work (delta) that wasn't
* done, decreased by old deferred work that was done now.
*
* And it is capped to two times of the freeable items.
*/
next_deferred = max_t(long, (nr + delta - scanned), 0);
next_deferred = min(next_deferred, (2 * freeable));
/*
* move the unused scan count back into the shrinker in a
* manner that handles concurrent updates.
*/
new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);
trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
return freed;
}
#ifdef CONFIG_MEMCG
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg, int priority)
{
struct shrinker_info *info;
unsigned long ret, freed = 0;
int offset, index = 0;
if (!mem_cgroup_online(memcg))
return 0;
/*
* lockless algorithm of memcg shrink.
*
* The shrinker_info may be freed asynchronously via RCU in the
* expand_one_shrinker_info(), so the rcu_read_lock() needs to be used
* to ensure the existence of the shrinker_info.
*
* The shrinker_info_unit is never freed unless its corresponding memcg
* is destroyed. Here we already hold the refcount of memcg, so the
* memcg will not be destroyed, and of course shrinker_info_unit will
* not be freed.
*
* So in the memcg shrink:
* step 1: use rcu_read_lock() to guarantee existence of the
* shrinker_info.
* step 2: after getting shrinker_info_unit we can safely release the
* RCU lock.
* step 3: traverse the bitmap and calculate shrinker_id
* step 4: use rcu_read_lock() to guarantee existence of the shrinker.
* step 5: use shrinker_id to find the shrinker, then use
* shrinker_try_get() to guarantee existence of the shrinker,
* then we can release the RCU lock to do do_shrink_slab() that
* may sleep.
* step 6: do shrinker_put() paired with step 5 to put the refcount,
* if the refcount reaches 0, then wake up the waiter in
* shrinker_free() by calling complete().
* Note: here is different from the global shrink, we don't
* need to acquire the RCU lock to guarantee existence of
* the shrinker, because we don't need to use this
* shrinker to traverse the next shrinker in the bitmap.
* step 7: we have already exited the read-side of rcu critical section
* before calling do_shrink_slab(), the shrinker_info may be
* released in expand_one_shrinker_info(), so go back to step 1
* to reacquire the shrinker_info.
*/
again:
rcu_read_lock();
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
if (unlikely(!info))
goto unlock;
if (index < shrinker_id_to_index(info->map_nr_max)) {
struct shrinker_info_unit *unit;
unit = info->unit[index];
rcu_read_unlock();
for_each_set_bit(offset, unit->map, SHRINKER_UNIT_BITS) {
struct shrink_control sc = {
.gfp_mask = gfp_mask,
.nid = nid,
.memcg = memcg,
};
struct shrinker *shrinker;
int shrinker_id = calc_shrinker_id(index, offset);
rcu_read_lock();
shrinker = idr_find(&shrinker_idr, shrinker_id);
if (unlikely(!shrinker || !shrinker_try_get(shrinker))) {
clear_bit(offset, unit->map);
rcu_read_unlock();
continue;
}
rcu_read_unlock();
/* Call non-slab shrinkers even though kmem is disabled */
if (!memcg_kmem_online() &&
!(shrinker->flags & SHRINKER_NONSLAB))
continue;
ret = do_shrink_slab(&sc, shrinker, priority);
if (ret == SHRINK_EMPTY) {
clear_bit(offset, unit->map);
/*
* After the shrinker reported that it had no objects to
* free, but before we cleared the corresponding bit in
* the memcg shrinker map, a new object might have been
* added. To make sure, we have the bit set in this
* case, we invoke the shrinker one more time and reset
* the bit if it reports that it is not empty anymore.
* The memory barrier here pairs with the barrier in
* set_shrinker_bit():
*
* list_lru_add() shrink_slab_memcg()
* list_add_tail() clear_bit()
* <MB> <MB>
* set_bit() do_shrink_slab()
*/
smp_mb__after_atomic();
ret = do_shrink_slab(&sc, shrinker, priority);
if (ret == SHRINK_EMPTY)
ret = 0;
else
set_shrinker_bit(memcg, nid, shrinker_id);
}
freed += ret;
shrinker_put(shrinker);
}
index++;
goto again;
}
unlock:
rcu_read_unlock();
return freed;
}
#else /* !CONFIG_MEMCG */
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg, int priority)
{
return 0;
}
#endif /* CONFIG_MEMCG */
/**
* shrink_slab - shrink slab caches
* @gfp_mask: allocation context
* @nid: node whose slab caches to target
* @memcg: memory cgroup whose slab caches to target
* @priority: the reclaim priority
*
* Call the shrink functions to age shrinkable caches.
*
* @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
* unaware shrinkers will receive a node id of 0 instead.
*
* @memcg specifies the memory cgroup to target. Unaware shrinkers
* are called only if it is the root cgroup.
*
* @priority is sc->priority, we take the number of objects and >> by priority
* in order to get the scan target.
*
* Returns the number of reclaimed slab objects.
*/
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
int priority)
{
unsigned long ret, freed = 0;
struct shrinker *shrinker;
/*
* The root memcg might be allocated even though memcg is disabled
* via "cgroup_disable=memory" boot parameter. This could make
* mem_cgroup_is_root() return false, then just run memcg slab
* shrink, but skip global shrink. This may result in premature
* oom.
*/
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
/*
* lockless algorithm of global shrink.
*
* In the unregistration setp, the shrinker will be freed asynchronously
* via RCU after its refcount reaches 0. So both rcu_read_lock() and
* shrinker_try_get() can be used to ensure the existence of the shrinker.
*
* So in the global shrink:
* step 1: use rcu_read_lock() to guarantee existence of the shrinker
* and the validity of the shrinker_list walk.
* step 2: use shrinker_try_get() to try get the refcount, if successful,
* then the existence of the shrinker can also be guaranteed,
* so we can release the RCU lock to do do_shrink_slab() that
* may sleep.
* step 3: *MUST* to reacquire the RCU lock before calling shrinker_put(),
* which ensures that neither this shrinker nor the next shrinker
* will be freed in the next traversal operation.
* step 4: do shrinker_put() paired with step 2 to put the refcount,
* if the refcount reaches 0, then wake up the waiter in
* shrinker_free() by calling complete().
*/
rcu_read_lock();
list_for_each_entry_rcu(shrinker, &shrinker_list, list) {
struct shrink_control sc = {
.gfp_mask = gfp_mask,
.nid = nid,
.memcg = memcg,
};
if (!shrinker_try_get(shrinker))
continue;
rcu_read_unlock();
ret = do_shrink_slab(&sc, shrinker, priority);
if (ret == SHRINK_EMPTY)
ret = 0;
freed += ret;
rcu_read_lock();
shrinker_put(shrinker);
}
rcu_read_unlock();
cond_resched();
return freed;
}
struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...)
{
struct shrinker *shrinker;
unsigned int size;
va_list ap;
int err;
shrinker = kzalloc(sizeof(struct shrinker), GFP_KERNEL);
if (!shrinker)
return NULL;
va_start(ap, fmt);
err = shrinker_debugfs_name_alloc(shrinker, fmt, ap);
va_end(ap);
if (err)
goto err_name;
shrinker->flags = flags | SHRINKER_ALLOCATED;
shrinker->seeks = DEFAULT_SEEKS;
if (flags & SHRINKER_MEMCG_AWARE) { err = shrinker_memcg_alloc(shrinker);
if (err == -ENOSYS) {
/* Memcg is not supported, fallback to non-memcg-aware shrinker. */
shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
goto non_memcg;
}
if (err)
goto err_flags;
return shrinker;
}
non_memcg:
/*
* The nr_deferred is available on per memcg level for memcg aware
* shrinkers, so only allocate nr_deferred in the following cases:
* - non-memcg-aware shrinkers
* - !CONFIG_MEMCG
* - memcg is disabled by kernel command line
*/
size = sizeof(*shrinker->nr_deferred);
if (flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); if (!shrinker->nr_deferred) goto err_flags;
return shrinker;
err_flags:
shrinker_debugfs_name_free(shrinker);
err_name:
kfree(shrinker); return NULL;}
EXPORT_SYMBOL_GPL(shrinker_alloc);
void shrinker_register(struct shrinker *shrinker)
{
if (unlikely(!(shrinker->flags & SHRINKER_ALLOCATED))) {
pr_warn("Must use shrinker_alloc() to dynamically allocate the shrinker");
return;
}
mutex_lock(&shrinker_mutex);
list_add_tail_rcu(&shrinker->list, &shrinker_list);
shrinker->flags |= SHRINKER_REGISTERED;
shrinker_debugfs_add(shrinker);
mutex_unlock(&shrinker_mutex);
init_completion(&shrinker->done);
/*
* Now the shrinker is fully set up, take the first reference to it to
* indicate that lookup operations are now allowed to use it via
* shrinker_try_get().
*/
refcount_set(&shrinker->refcount, 1);}
EXPORT_SYMBOL_GPL(shrinker_register);
static void shrinker_free_rcu_cb(struct rcu_head *head)
{
struct shrinker *shrinker = container_of(head, struct shrinker, rcu);
kfree(shrinker->nr_deferred);
kfree(shrinker);
}
void shrinker_free(struct shrinker *shrinker)
{
struct dentry *debugfs_entry = NULL;
int debugfs_id;
if (!shrinker)
return;
if (shrinker->flags & SHRINKER_REGISTERED) {
/* drop the initial refcount */
shrinker_put(shrinker);
/*
* Wait for all lookups of the shrinker to complete, after that,
* no shrinker is running or will run again, then we can safely
* free it asynchronously via RCU and safely free the structure
* where the shrinker is located, such as super_block etc.
*/
wait_for_completion(&shrinker->done);
}
mutex_lock(&shrinker_mutex);
if (shrinker->flags & SHRINKER_REGISTERED) {
/*
* Now we can safely remove it from the shrinker_list and then
* free it.
*/
list_del_rcu(&shrinker->list);
debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
shrinker->flags &= ~SHRINKER_REGISTERED;
}
shrinker_debugfs_name_free(shrinker);
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
shrinker_memcg_remove(shrinker);
mutex_unlock(&shrinker_mutex);
if (debugfs_entry)
shrinker_debugfs_remove(debugfs_entry, debugfs_id);
call_rcu(&shrinker->rcu, shrinker_free_rcu_cb);
}
EXPORT_SYMBOL_GPL(shrinker_free);
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Queue of folios definitions
*
* Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* See:
*
* Documentation/core-api/folio_queue.rst
*
* for a description of the API.
*/
#ifndef _LINUX_FOLIO_QUEUE_H
#define _LINUX_FOLIO_QUEUE_H
#include <linux/pagevec.h>
#include <linux/mm.h>
/*
* Segment in a queue of running buffers. Each segment can hold a number of
* folios and a portion of the queue can be referenced with the ITER_FOLIOQ
* iterator. The possibility exists of inserting non-folio elements into the
* queue (such as gaps).
*
* Explicit prev and next pointers are used instead of a list_head to make it
* easier to add segments to tail and remove them from the head without the
* need for a lock.
*/
struct folio_queue {
struct folio_batch vec; /* Folios in the queue segment */
u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
struct folio_queue *next; /* Next queue segment or NULL */
struct folio_queue *prev; /* Previous queue segment of NULL */
unsigned long marks; /* 1-bit mark per folio */
unsigned long marks2; /* Second 1-bit mark per folio */
#if PAGEVEC_SIZE > BITS_PER_LONG
#error marks is not big enough
#endif
unsigned int rreq_id;
unsigned int debug_id;
};
/**
* folioq_init - Initialise a folio queue segment
* @folioq: The segment to initialise
* @rreq_id: The request identifier to use in tracelines.
*
* Initialise a folio queue segment and set an identifier to be used in traces.
*
* Note that the folio pointers are left uninitialised.
*/
static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id)
{
folio_batch_init(&folioq->vec);
folioq->next = NULL;
folioq->prev = NULL;
folioq->marks = 0;
folioq->marks2 = 0;
folioq->rreq_id = rreq_id;
folioq->debug_id = 0;
}
/**
* folioq_nr_slots: Query the capacity of a folio queue segment
* @folioq: The segment to query
*
* Query the number of folios that a particular folio queue segment might hold.
* [!] NOTE: This must not be assumed to be the same for every segment!
*/
static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
{
return PAGEVEC_SIZE;
}
/**
* folioq_count: Query the occupancy of a folio queue segment
* @folioq: The segment to query
*
* Query the number of folios that have been added to a folio queue segment.
* Note that this is not decreased as folios are removed from a segment.
*/
static inline unsigned int folioq_count(struct folio_queue *folioq)
{
return folio_batch_count(&folioq->vec);
}
/**
* folioq_full: Query if a folio queue segment is full
* @folioq: The segment to query
*
* Query if a folio queue segment is fully occupied. Note that this does not
* change if folios are removed from a segment.
*/
static inline bool folioq_full(struct folio_queue *folioq)
{
//return !folio_batch_space(&folioq->vec);
return folioq_count(folioq) >= folioq_nr_slots(folioq);
}
/**
* folioq_is_marked: Check first folio mark in a folio queue segment
* @folioq: The segment to query
* @slot: The slot number of the folio to query
*
* Determine if the first mark is set for the folio in the specified slot in a
* folio queue segment.
*/
static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks);
}
/**
* folioq_mark: Set the first mark on a folio in a folio queue segment
* @folioq: The segment to modify
* @slot: The slot number of the folio to modify
*
* Set the first mark for the folio in the specified slot in a folio queue
* segment.
*/
static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks);
}
/**
* folioq_unmark: Clear the first mark on a folio in a folio queue segment
* @folioq: The segment to modify
* @slot: The slot number of the folio to modify
*
* Clear the first mark for the folio in the specified slot in a folio queue
* segment.
*/
static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks);
}
/**
* folioq_is_marked2: Check second folio mark in a folio queue segment
* @folioq: The segment to query
* @slot: The slot number of the folio to query
*
* Determine if the second mark is set for the folio in the specified slot in a
* folio queue segment.
*/
static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks2);
}
/**
* folioq_mark2: Set the second mark on a folio in a folio queue segment
* @folioq: The segment to modify
* @slot: The slot number of the folio to modify
*
* Set the second mark for the folio in the specified slot in a folio queue
* segment.
*/
static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks2);
}
/**
* folioq_unmark2: Clear the second mark on a folio in a folio queue segment
* @folioq: The segment to modify
* @slot: The slot number of the folio to modify
*
* Clear the second mark for the folio in the specified slot in a folio queue
* segment.
*/
static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks2);
}
/**
* folioq_append: Add a folio to a folio queue segment
* @folioq: The segment to add to
* @folio: The folio to add
*
* Add a folio to the tail of the sequence in a folio queue segment, increasing
* the occupancy count and returning the slot number for the folio just added.
* The folio size is extracted and stored in the queue and the marks are left
* unmodified.
*
* Note that it's left up to the caller to check that the segment capacity will
* not be exceeded and to extend the queue.
*/
static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
{
unsigned int slot = folioq->vec.nr++;
folioq->vec.folios[slot] = folio;
folioq->orders[slot] = folio_order(folio);
return slot;
}
/**
* folioq_append_mark: Add a folio to a folio queue segment
* @folioq: The segment to add to
* @folio: The folio to add
*
* Add a folio to the tail of the sequence in a folio queue segment, increasing
* the occupancy count and returning the slot number for the folio just added.
* The folio size is extracted and stored in the queue, the first mark is set
* and and the second and third marks are left unmodified.
*
* Note that it's left up to the caller to check that the segment capacity will
* not be exceeded and to extend the queue.
*/
static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
{
unsigned int slot = folioq->vec.nr++;
folioq->vec.folios[slot] = folio;
folioq->orders[slot] = folio_order(folio);
folioq_mark(folioq, slot);
return slot;
}
/**
* folioq_folio: Get a folio from a folio queue segment
* @folioq: The segment to access
* @slot: The folio slot to access
*
* Retrieve the folio in the specified slot from a folio queue segment. Note
* that no bounds check is made and if the slot hasn't been added into yet, the
* pointer will be undefined. If the slot has been cleared, NULL will be
* returned.
*/
static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
{
return folioq->vec.folios[slot];
}
/**
* folioq_folio_order: Get the order of a folio from a folio queue segment
* @folioq: The segment to access
* @slot: The folio slot to access
*
* Retrieve the order of the folio in the specified slot from a folio queue
* segment. Note that no bounds check is made and if the slot hasn't been
* added into yet, the order returned will be 0.
*/
static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
{
return folioq->orders[slot];
}
/**
* folioq_folio_size: Get the size of a folio from a folio queue segment
* @folioq: The segment to access
* @slot: The folio slot to access
*
* Retrieve the size of the folio in the specified slot from a folio queue
* segment. Note that no bounds check is made and if the slot hasn't been
* added into yet, the size returned will be PAGE_SIZE.
*/
static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
{
return PAGE_SIZE << folioq_folio_order(folioq, slot);
}
/**
* folioq_clear: Clear a folio from a folio queue segment
* @folioq: The segment to clear
* @slot: The folio slot to clear
*
* Clear a folio from a sequence in a folio queue segment and clear its marks.
* The occupancy count is left unchanged.
*/
static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
{
folioq->vec.folios[slot] = NULL;
folioq_unmark(folioq, slot);
folioq_unmark2(folioq, slot);
}
#endif /* _LINUX_FOLIO_QUEUE_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NetLabel Domain Hash Table
*
* This file manages the domain hash table that NetLabel uses to determine
* which network labeling protocol to use for a given domain. The NetLabel
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
* Author: Paul Moore <paul@paul-moore.com>
*/
/*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008
*/
#include <linux/types.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/audit.h>
#include <linux/slab.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <net/calipso.h>
#include <asm/bug.h>
#include "netlabel_mgmt.h"
#include "netlabel_addrlist.h"
#include "netlabel_calipso.h"
#include "netlabel_domainhash.h"
#include "netlabel_user.h"
struct netlbl_domhsh_tbl {
struct list_head *tbl;
u32 size;
};
/* Domain hash table */
/* updates should be so rare that having one spinlock for the entire hash table
* should be okay */
static DEFINE_SPINLOCK(netlbl_domhsh_lock);
#define netlbl_domhsh_rcu_deref(p) \
rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock))
static struct netlbl_domhsh_tbl __rcu *netlbl_domhsh;
static struct netlbl_dom_map __rcu *netlbl_domhsh_def_ipv4;
static struct netlbl_dom_map __rcu *netlbl_domhsh_def_ipv6;
/*
* Domain Hash Table Helper Functions
*/
/**
* netlbl_domhsh_free_entry - Frees a domain hash table entry
* @entry: the entry's RCU field
*
* Description:
* This function is designed to be used as a callback to the call_rcu()
* function so that the memory allocated to a hash table entry can be released
* safely.
*
*/
static void netlbl_domhsh_free_entry(struct rcu_head *entry)
{
struct netlbl_dom_map *ptr;
struct netlbl_af4list *iter4;
struct netlbl_af4list *tmp4;
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
ptr = container_of(entry, struct netlbl_dom_map, rcu);
if (ptr->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_safe(iter4, tmp4,
&ptr->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
kfree(netlbl_domhsh_addr4_entry(iter4));
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
&ptr->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
kfree(netlbl_domhsh_addr6_entry(iter6));
}
#endif /* IPv6 */
kfree(ptr->def.addrsel);
}
kfree(ptr->domain);
kfree(ptr);
}
/**
* netlbl_domhsh_hash - Hashing function for the domain hash table
* @key: the domain name to hash
*
* Description:
* This is the hashing function for the domain hash table, it returns the
* correct bucket number for the domain. The caller is responsible for
* ensuring that the hash table is protected with either a RCU read lock or the
* hash table lock.
*
*/
static u32 netlbl_domhsh_hash(const char *key)
{
u32 iter;
u32 val;
u32 len;
/* This is taken (with slight modification) from
* security/selinux/ss/symtab.c:symhash() */
for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1);}
static bool netlbl_family_match(u16 f1, u16 f2)
{
return (f1 == f2) || (f1 == AF_UNSPEC) || (f2 == AF_UNSPEC);
}
/**
* netlbl_domhsh_search - Search for a domain entry
* @domain: the domain
* @family: the address family
*
* Description:
* Searches the domain hash table and returns a pointer to the hash table
* entry if found, otherwise NULL is returned. @family may be %AF_UNSPEC
* which matches any address family entries. The caller is responsible for
* ensuring that the hash table is protected with either a RCU read lock or the
* hash table lock.
*
*/
static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain,
u16 family)
{
u32 bkt;
struct list_head *bkt_list;
struct netlbl_dom_map *iter;
if (domain != NULL) {
bkt = netlbl_domhsh_hash(domain);
bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; list_for_each_entry_rcu(iter, bkt_list, list,
lockdep_is_held(&netlbl_domhsh_lock))
if (iter->valid && netlbl_family_match(iter->family, family) && strcmp(iter->domain, domain) == 0)
return iter;
}
return NULL;}
/**
* netlbl_domhsh_search_def - Search for a domain entry
* @domain: the domain
* @family: the address family
*
* Description:
* Searches the domain hash table and returns a pointer to the hash table
* entry if an exact match is found, if an exact match is not present in the
* hash table then the default entry is returned if valid otherwise NULL is
* returned. @family may be %AF_UNSPEC which matches any address family
* entries. The caller is responsible ensuring that the hash table is
* protected with either a RCU read lock or the hash table lock.
*
*/
static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain,
u16 family)
{
struct netlbl_dom_map *entry;
entry = netlbl_domhsh_search(domain, family);
if (entry != NULL)
return entry;
if (family == AF_INET || family == AF_UNSPEC) { entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv4); if (entry != NULL && entry->valid) return entry;
}
if (family == AF_INET6 || family == AF_UNSPEC) { entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv6); if (entry != NULL && entry->valid) return entry;
}
return NULL;
}
/**
* netlbl_domhsh_audit_add - Generate an audit entry for an add event
* @entry: the entry being added
* @addr4: the IPv4 address information
* @addr6: the IPv6 address information
* @result: the result code
* @audit_info: NetLabel audit information
*
* Description:
* Generate an audit record for adding a new NetLabel/LSM mapping entry with
* the given information. Caller is responsible for holding the necessary
* locks.
*
*/
static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
struct netlbl_af4list *addr4,
struct netlbl_af6list *addr6,
int result,
struct netlbl_audit *audit_info)
{
struct audit_buffer *audit_buf;
struct cipso_v4_doi *cipsov4 = NULL;
struct calipso_doi *calipso = NULL;
u32 type;
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf, " nlbl_domain=%s",
entry->domain ? entry->domain : "(default)");
if (addr4 != NULL) {
struct netlbl_domaddr4_map *map4;
map4 = netlbl_domhsh_addr4_entry(addr4);
type = map4->def.type;
cipsov4 = map4->def.cipso;
netlbl_af4list_audit_addr(audit_buf, 0, NULL,
addr4->addr, addr4->mask);
#if IS_ENABLED(CONFIG_IPV6)
} else if (addr6 != NULL) {
struct netlbl_domaddr6_map *map6;
map6 = netlbl_domhsh_addr6_entry(addr6);
type = map6->def.type;
calipso = map6->def.calipso;
netlbl_af6list_audit_addr(audit_buf, 0, NULL,
&addr6->addr, &addr6->mask);
#endif /* IPv6 */
} else {
type = entry->def.type;
cipsov4 = entry->def.cipso;
calipso = entry->def.calipso;
}
switch (type) {
case NETLBL_NLTYPE_UNLABELED:
audit_log_format(audit_buf, " nlbl_protocol=unlbl");
break;
case NETLBL_NLTYPE_CIPSOV4:
BUG_ON(cipsov4 == NULL);
audit_log_format(audit_buf,
" nlbl_protocol=cipsov4 cipso_doi=%u",
cipsov4->doi);
break;
case NETLBL_NLTYPE_CALIPSO:
BUG_ON(calipso == NULL);
audit_log_format(audit_buf,
" nlbl_protocol=calipso calipso_doi=%u",
calipso->doi);
break;
}
audit_log_format(audit_buf, " res=%u", result == 0 ? 1 : 0);
audit_log_end(audit_buf);
}
}
/**
* netlbl_domhsh_validate - Validate a new domain mapping entry
* @entry: the entry to validate
*
* This function validates the new domain mapping entry to ensure that it is
* a valid entry. Returns zero on success, negative values on failure.
*
*/
static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
{
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_domaddr6_map *map6;
#endif /* IPv6 */
if (entry == NULL)
return -EINVAL;
if (entry->family != AF_INET && entry->family != AF_INET6 &&
(entry->family != AF_UNSPEC ||
entry->def.type != NETLBL_NLTYPE_UNLABELED))
return -EINVAL;
switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
if (entry->def.cipso != NULL || entry->def.calipso != NULL ||
entry->def.addrsel != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
if (entry->family != AF_INET ||
entry->def.cipso == NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CALIPSO:
if (entry->family != AF_INET6 ||
entry->def.calipso == NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_ADDRSELECT:
netlbl_af4list_foreach(iter4, &entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
switch (map4->def.type) {
case NETLBL_NLTYPE_UNLABELED:
if (map4->def.cipso != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
if (map4->def.cipso == NULL)
return -EINVAL;
break;
default:
return -EINVAL;
}
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach(iter6, &entry->def.addrsel->list6) {
map6 = netlbl_domhsh_addr6_entry(iter6);
switch (map6->def.type) {
case NETLBL_NLTYPE_UNLABELED:
if (map6->def.calipso != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CALIPSO:
if (map6->def.calipso == NULL)
return -EINVAL;
break;
default:
return -EINVAL;
}
}
#endif /* IPv6 */
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Domain Hash Table Functions
*/
/**
* netlbl_domhsh_init - Init for the domain hash
* @size: the number of bits to use for the hash buckets
*
* Description:
* Initializes the domain hash table, should be called only by
* netlbl_user_init() during initialization. Returns zero on success, non-zero
* values on error.
*
*/
int __init netlbl_domhsh_init(u32 size)
{
u32 iter;
struct netlbl_domhsh_tbl *hsh_tbl;
if (size == 0)
return -EINVAL;
hsh_tbl = kmalloc(sizeof(*hsh_tbl), GFP_KERNEL);
if (hsh_tbl == NULL)
return -ENOMEM;
hsh_tbl->size = 1 << size;
hsh_tbl->tbl = kcalloc(hsh_tbl->size,
sizeof(struct list_head),
GFP_KERNEL);
if (hsh_tbl->tbl == NULL) {
kfree(hsh_tbl);
return -ENOMEM;
}
for (iter = 0; iter < hsh_tbl->size; iter++)
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
spin_lock(&netlbl_domhsh_lock);
rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
spin_unlock(&netlbl_domhsh_lock);
return 0;
}
/**
* netlbl_domhsh_add - Adds a entry to the domain hash table
* @entry: the entry to add
* @audit_info: NetLabel audit information
*
* Description:
* Adds a new entry to the domain hash table and handles any updates to the
* lower level protocol handler (i.e. CIPSO). @entry->family may be set to
* %AF_UNSPEC which will add an entry that matches all address families. This
* is only useful for the unlabelled type and will only succeed if there is no
* existing entry for any address family with the same domain. Returns zero
* on success, negative on failure.
*
*/
int netlbl_domhsh_add(struct netlbl_dom_map *entry,
struct netlbl_audit *audit_info)
{
int ret_val = 0;
struct netlbl_dom_map *entry_old, *entry_b;
struct netlbl_af4list *iter4;
struct netlbl_af4list *tmp4;
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
ret_val = netlbl_domhsh_validate(entry);
if (ret_val != 0)
return ret_val;
/* XXX - we can remove this RCU read lock as the spinlock protects the
* entire function, but before we do we need to fixup the
* netlbl_af[4,6]list RCU functions to do "the right thing" with
* respect to rcu_dereference() when only a spinlock is held. */
rcu_read_lock();
spin_lock(&netlbl_domhsh_lock);
if (entry->domain != NULL)
entry_old = netlbl_domhsh_search(entry->domain, entry->family);
else
entry_old = netlbl_domhsh_search_def(entry->domain,
entry->family);
if (entry_old == NULL) {
entry->valid = 1;
if (entry->domain != NULL) {
u32 bkt = netlbl_domhsh_hash(entry->domain);
list_add_tail_rcu(&entry->list,
&rcu_dereference(netlbl_domhsh)->tbl[bkt]);
} else {
INIT_LIST_HEAD(&entry->list);
switch (entry->family) {
case AF_INET:
rcu_assign_pointer(netlbl_domhsh_def_ipv4,
entry);
break;
case AF_INET6:
rcu_assign_pointer(netlbl_domhsh_def_ipv6,
entry);
break;
case AF_UNSPEC:
if (entry->def.type !=
NETLBL_NLTYPE_UNLABELED) {
ret_val = -EINVAL;
goto add_return;
}
entry_b = kzalloc(sizeof(*entry_b), GFP_ATOMIC);
if (entry_b == NULL) {
ret_val = -ENOMEM;
goto add_return;
}
entry_b->family = AF_INET6;
entry_b->def.type = NETLBL_NLTYPE_UNLABELED;
entry_b->valid = 1;
entry->family = AF_INET;
rcu_assign_pointer(netlbl_domhsh_def_ipv4,
entry);
rcu_assign_pointer(netlbl_domhsh_def_ipv6,
entry_b);
break;
default:
/* Already checked in
* netlbl_domhsh_validate(). */
ret_val = -EINVAL;
goto add_return;
}
}
if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_rcu(iter4,
&entry->def.addrsel->list4)
netlbl_domhsh_audit_add(entry, iter4, NULL,
ret_val, audit_info);
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
&entry->def.addrsel->list6)
netlbl_domhsh_audit_add(entry, NULL, iter6,
ret_val, audit_info);
#endif /* IPv6 */
} else
netlbl_domhsh_audit_add(entry, NULL, NULL,
ret_val, audit_info);
} else if (entry_old->def.type == NETLBL_NLTYPE_ADDRSELECT &&
entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
struct list_head *old_list4;
struct list_head *old_list6;
old_list4 = &entry_old->def.addrsel->list4;
old_list6 = &entry_old->def.addrsel->list6;
/* we only allow the addition of address selectors if all of
* the selectors do not exist in the existing domain map */
netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4)
if (netlbl_af4list_search_exact(iter4->addr,
iter4->mask,
old_list4)) {
ret_val = -EEXIST;
goto add_return;
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6)
if (netlbl_af6list_search_exact(&iter6->addr,
&iter6->mask,
old_list6)) {
ret_val = -EEXIST;
goto add_return;
}
#endif /* IPv6 */
netlbl_af4list_foreach_safe(iter4, tmp4,
&entry->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
iter4->valid = 1;
ret_val = netlbl_af4list_add(iter4, old_list4);
netlbl_domhsh_audit_add(entry_old, iter4, NULL,
ret_val, audit_info);
if (ret_val != 0)
goto add_return;
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
&entry->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
iter6->valid = 1;
ret_val = netlbl_af6list_add(iter6, old_list6);
netlbl_domhsh_audit_add(entry_old, NULL, iter6,
ret_val, audit_info);
if (ret_val != 0)
goto add_return;
}
#endif /* IPv6 */
/* cleanup the new entry since we've moved everything over */
netlbl_domhsh_free_entry(&entry->rcu);
} else
ret_val = -EINVAL;
add_return:
spin_unlock(&netlbl_domhsh_lock);
rcu_read_unlock();
return ret_val;
}
/**
* netlbl_domhsh_add_default - Adds the default entry to the domain hash table
* @entry: the entry to add
* @audit_info: NetLabel audit information
*
* Description:
* Adds a new default entry to the domain hash table and handles any updates
* to the lower level protocol handler (i.e. CIPSO). Returns zero on success,
* negative on failure.
*
*/
int netlbl_domhsh_add_default(struct netlbl_dom_map *entry,
struct netlbl_audit *audit_info)
{
return netlbl_domhsh_add(entry, audit_info);
}
/**
* netlbl_domhsh_remove_entry - Removes a given entry from the domain table
* @entry: the entry to remove
* @audit_info: NetLabel audit information
*
* Description:
* Removes an entry from the domain hash table and handles any updates to the
* lower level protocol handler (i.e. CIPSO). Caller is responsible for
* ensuring that the RCU read lock is held. Returns zero on success, negative
* on failure.
*
*/
int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
struct netlbl_audit *audit_info)
{
int ret_val = 0;
struct audit_buffer *audit_buf;
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_domaddr6_map *map6;
#endif /* IPv6 */
if (entry == NULL)
return -ENOENT;
spin_lock(&netlbl_domhsh_lock);
if (entry->valid) {
entry->valid = 0;
if (entry == rcu_dereference(netlbl_domhsh_def_ipv4))
RCU_INIT_POINTER(netlbl_domhsh_def_ipv4, NULL);
else if (entry == rcu_dereference(netlbl_domhsh_def_ipv6))
RCU_INIT_POINTER(netlbl_domhsh_def_ipv6, NULL);
else
list_del_rcu(&entry->list);
} else
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
if (ret_val)
return ret_val;
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf,
" nlbl_domain=%s res=1",
entry->domain ? entry->domain : "(default)");
audit_log_end(audit_buf);
}
switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
cipso_v4_doi_putdef(map4->def.cipso);
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
map6 = netlbl_domhsh_addr6_entry(iter6);
calipso_doi_putdef(map6->def.calipso);
}
#endif /* IPv6 */
break;
case NETLBL_NLTYPE_CIPSOV4:
cipso_v4_doi_putdef(entry->def.cipso);
break;
#if IS_ENABLED(CONFIG_IPV6)
case NETLBL_NLTYPE_CALIPSO:
calipso_doi_putdef(entry->def.calipso);
break;
#endif /* IPv6 */
}
call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val;
}
/**
* netlbl_domhsh_remove_af4 - Removes an address selector entry
* @domain: the domain
* @addr: IPv4 address
* @mask: IPv4 address mask
* @audit_info: NetLabel audit information
*
* Description:
* Removes an individual address selector from a domain mapping and potentially
* the entire mapping if it is empty. Returns zero on success, negative values
* on failure.
*
*/
int netlbl_domhsh_remove_af4(const char *domain,
const struct in_addr *addr,
const struct in_addr *mask,
struct netlbl_audit *audit_info)
{
struct netlbl_dom_map *entry_map;
struct netlbl_af4list *entry_addr;
struct netlbl_af4list *iter4;
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
#endif /* IPv6 */
struct netlbl_domaddr4_map *entry;
rcu_read_lock();
if (domain)
entry_map = netlbl_domhsh_search(domain, AF_INET);
else
entry_map = netlbl_domhsh_search_def(domain, AF_INET);
if (entry_map == NULL ||
entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT)
goto remove_af4_failure;
spin_lock(&netlbl_domhsh_lock);
entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
&entry_map->def.addrsel->list4);
spin_unlock(&netlbl_domhsh_lock);
if (entry_addr == NULL)
goto remove_af4_failure;
netlbl_af4list_foreach_rcu(iter4, &entry_map->def.addrsel->list4)
goto remove_af4_single_addr;
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6, &entry_map->def.addrsel->list6)
goto remove_af4_single_addr;
#endif /* IPv6 */
/* the domain mapping is empty so remove it from the mapping table */
netlbl_domhsh_remove_entry(entry_map, audit_info);
remove_af4_single_addr:
rcu_read_unlock();
/* yick, we can't use call_rcu here because we don't have a rcu head
* pointer but hopefully this should be a rare case so the pause
* shouldn't be a problem */
synchronize_rcu();
entry = netlbl_domhsh_addr4_entry(entry_addr);
cipso_v4_doi_putdef(entry->def.cipso);
kfree(entry);
return 0;
remove_af4_failure:
rcu_read_unlock();
return -ENOENT;
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_domhsh_remove_af6 - Removes an address selector entry
* @domain: the domain
* @addr: IPv6 address
* @mask: IPv6 address mask
* @audit_info: NetLabel audit information
*
* Description:
* Removes an individual address selector from a domain mapping and potentially
* the entire mapping if it is empty. Returns zero on success, negative values
* on failure.
*
*/
int netlbl_domhsh_remove_af6(const char *domain,
const struct in6_addr *addr,
const struct in6_addr *mask,
struct netlbl_audit *audit_info)
{
struct netlbl_dom_map *entry_map;
struct netlbl_af6list *entry_addr;
struct netlbl_af4list *iter4;
struct netlbl_af6list *iter6;
struct netlbl_domaddr6_map *entry;
rcu_read_lock();
if (domain)
entry_map = netlbl_domhsh_search(domain, AF_INET6);
else
entry_map = netlbl_domhsh_search_def(domain, AF_INET6);
if (entry_map == NULL ||
entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT)
goto remove_af6_failure;
spin_lock(&netlbl_domhsh_lock);
entry_addr = netlbl_af6list_remove(addr, mask,
&entry_map->def.addrsel->list6);
spin_unlock(&netlbl_domhsh_lock);
if (entry_addr == NULL)
goto remove_af6_failure;
netlbl_af4list_foreach_rcu(iter4, &entry_map->def.addrsel->list4)
goto remove_af6_single_addr;
netlbl_af6list_foreach_rcu(iter6, &entry_map->def.addrsel->list6)
goto remove_af6_single_addr;
/* the domain mapping is empty so remove it from the mapping table */
netlbl_domhsh_remove_entry(entry_map, audit_info);
remove_af6_single_addr:
rcu_read_unlock();
/* yick, we can't use call_rcu here because we don't have a rcu head
* pointer but hopefully this should be a rare case so the pause
* shouldn't be a problem */
synchronize_rcu();
entry = netlbl_domhsh_addr6_entry(entry_addr);
calipso_doi_putdef(entry->def.calipso);
kfree(entry);
return 0;
remove_af6_failure:
rcu_read_unlock();
return -ENOENT;
}
#endif /* IPv6 */
/**
* netlbl_domhsh_remove - Removes an entry from the domain hash table
* @domain: the domain to remove
* @family: address family
* @audit_info: NetLabel audit information
*
* Description:
* Removes an entry from the domain hash table and handles any updates to the
* lower level protocol handler (i.e. CIPSO). @family may be %AF_UNSPEC which
* removes all address family entries. Returns zero on success, negative on
* failure.
*
*/
int netlbl_domhsh_remove(const char *domain, u16 family,
struct netlbl_audit *audit_info)
{
int ret_val = -EINVAL;
struct netlbl_dom_map *entry;
rcu_read_lock();
if (family == AF_INET || family == AF_UNSPEC) {
if (domain)
entry = netlbl_domhsh_search(domain, AF_INET);
else
entry = netlbl_domhsh_search_def(domain, AF_INET);
ret_val = netlbl_domhsh_remove_entry(entry, audit_info);
if (ret_val && ret_val != -ENOENT)
goto done;
}
if (family == AF_INET6 || family == AF_UNSPEC) {
int ret_val2;
if (domain)
entry = netlbl_domhsh_search(domain, AF_INET6);
else
entry = netlbl_domhsh_search_def(domain, AF_INET6);
ret_val2 = netlbl_domhsh_remove_entry(entry, audit_info);
if (ret_val2 != -ENOENT)
ret_val = ret_val2;
}
done:
rcu_read_unlock();
return ret_val;
}
/**
* netlbl_domhsh_remove_default - Removes the default entry from the table
* @family: address family
* @audit_info: NetLabel audit information
*
* Description:
* Removes/resets the default entry corresponding to @family from the domain
* hash table and handles any updates to the lower level protocol handler
* (i.e. CIPSO). @family may be %AF_UNSPEC which removes all address family
* entries. Returns zero on success, negative on failure.
*
*/
int netlbl_domhsh_remove_default(u16 family, struct netlbl_audit *audit_info)
{
return netlbl_domhsh_remove(NULL, family, audit_info);
}
/**
* netlbl_domhsh_getentry - Get an entry from the domain hash table
* @domain: the domain name to search for
* @family: address family
*
* Description:
* Look through the domain hash table searching for an entry to match @domain,
* with address family @family, return a pointer to a copy of the entry or
* NULL. The caller is responsible for ensuring that rcu_read_[un]lock() is
* called.
*
*/
struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain, u16 family)
{
if (family == AF_UNSPEC) return NULL; return netlbl_domhsh_search_def(domain, family);}
/**
* netlbl_domhsh_getentry_af4 - Get an entry from the domain hash table
* @domain: the domain name to search for
* @addr: the IP address to search for
*
* Description:
* Look through the domain hash table searching for an entry to match @domain
* and @addr, return a pointer to a copy of the entry or NULL. The caller is
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
__be32 addr)
{
struct netlbl_dom_map *dom_iter;
struct netlbl_af4list *addr_iter;
dom_iter = netlbl_domhsh_search_def(domain, AF_INET);
if (dom_iter == NULL)
return NULL;
if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
return &dom_iter->def;
addr_iter = netlbl_af4list_search(addr, &dom_iter->def.addrsel->list4);
if (addr_iter == NULL)
return NULL;
return &(netlbl_domhsh_addr4_entry(addr_iter)->def);
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
* @domain: the domain name to search for
* @addr: the IP address to search for
*
* Description:
* Look through the domain hash table searching for an entry to match @domain
* and @addr, return a pointer to a copy of the entry or NULL. The caller is
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
const struct in6_addr *addr)
{
struct netlbl_dom_map *dom_iter;
struct netlbl_af6list *addr_iter;
dom_iter = netlbl_domhsh_search_def(domain, AF_INET6);
if (dom_iter == NULL)
return NULL;
if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
return &dom_iter->def;
addr_iter = netlbl_af6list_search(addr, &dom_iter->def.addrsel->list6);
if (addr_iter == NULL)
return NULL;
return &(netlbl_domhsh_addr6_entry(addr_iter)->def);
}
#endif /* IPv6 */
/**
* netlbl_domhsh_walk - Iterate through the domain mapping hash table
* @skip_bkt: the number of buckets to skip at the start
* @skip_chain: the number of entries to skip in the first iterated bucket
* @callback: callback for each entry
* @cb_arg: argument for the callback function
*
* Description:
* Iterate over the domain mapping hash table, skipping the first @skip_bkt
* buckets and @skip_chain entries. For each entry in the table call
* @callback, if @callback returns a negative value stop 'walking' through the
* table and return. Updates the values in @skip_bkt and @skip_chain on
* return. Returns zero on success, negative values on failure.
*
*/
int netlbl_domhsh_walk(u32 *skip_bkt,
u32 *skip_chain,
int (*callback) (struct netlbl_dom_map *entry, void *arg),
void *cb_arg)
{
int ret_val = -ENOENT;
u32 iter_bkt;
struct list_head *iter_list;
struct netlbl_dom_map *iter_entry;
u32 chain_cnt = 0;
rcu_read_lock();
for (iter_bkt = *skip_bkt;
iter_bkt < rcu_dereference(netlbl_domhsh)->size;
iter_bkt++, chain_cnt = 0) {
iter_list = &rcu_dereference(netlbl_domhsh)->tbl[iter_bkt];
list_for_each_entry_rcu(iter_entry, iter_list, list)
if (iter_entry->valid) {
if (chain_cnt++ < *skip_chain)
continue;
ret_val = callback(iter_entry, cb_arg);
if (ret_val < 0) {
chain_cnt--;
goto walk_return;
}
}
}
walk_return:
rcu_read_unlock();
*skip_bkt = iter_bkt;
*skip_chain = chain_cnt;
return ret_val;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
*
* High-resolution kernel timers
*
* In contrast to the low-resolution timeout API, aka timer wheel,
* hrtimers provide finer resolution and accuracy depending on system
* configuration and capabilities.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* Credits:
* Based on the original timer wheel code
*
* Help, testing, suggestions, bugfixes, improvements were
* provided by:
*
* George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
* et. al.
*/
#include <linux/cpu.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/notifier.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>
#include <linux/tick.h>
#include <linux/err.h>
#include <linux/debugobjects.h>
#include <linux/sched/signal.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
#include <linux/sched/nohz.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <trace/events/timer.h>
#include "tick-internal.h"
/*
* Masks for selecting the soft and hard context timers from
* cpu_base->active
*/
#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
static void retrigger_next_event(void *arg);
static ktime_t __hrtimer_cb_get_time(clockid_t clock_id);
/*
* The timer bases:
*
* There are more clockids than hrtimer bases. Thus, we index
* into the timer bases by the hrtimer_base_type enum. When trying
* to reach a base using a clockid, hrtimer_clockid_to_base()
* is used to convert from clockid to the proper hrtimer_base_type.
*/
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
.clock_base =
{
{
.index = HRTIMER_BASE_MONOTONIC,
.clockid = CLOCK_MONOTONIC,
},
{
.index = HRTIMER_BASE_REALTIME,
.clockid = CLOCK_REALTIME,
},
{
.index = HRTIMER_BASE_BOOTTIME,
.clockid = CLOCK_BOOTTIME,
},
{
.index = HRTIMER_BASE_TAI,
.clockid = CLOCK_TAI,
},
{
.index = HRTIMER_BASE_MONOTONIC_SOFT,
.clockid = CLOCK_MONOTONIC,
},
{
.index = HRTIMER_BASE_REALTIME_SOFT,
.clockid = CLOCK_REALTIME,
},
{
.index = HRTIMER_BASE_BOOTTIME_SOFT,
.clockid = CLOCK_BOOTTIME,
},
{
.index = HRTIMER_BASE_TAI_SOFT,
.clockid = CLOCK_TAI,
},
},
.csd = CSD_INIT(retrigger_next_event, NULL)
};
static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
{
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
return true;
else
return likely(base->online);
}
/*
* Functions and macros which are different for UP/SMP systems are kept in a
* single place
*/
#ifdef CONFIG_SMP
/*
* We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
* such that hrtimer_callback_running() can unconditionally dereference
* timer->base->cpu_base
*/
static struct hrtimer_cpu_base migration_cpu_base = {
.clock_base = { {
.cpu_base = &migration_cpu_base,
.seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
&migration_cpu_base.lock),
}, },
};
#define migration_base migration_cpu_base.clock_base[0]
/*
* We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
* means that all timers which are tied to this base via timer->base are
* locked, and the base itself is locked too.
*
* So __run_timers/migrate_timers can safely modify all timers which could
* be found on the lists/queues.
*
* When the timer's base is locked, and the timer removed from list, it is
* possible to set timer->base = &migration_base and drop the lock: the timer
* remains locked.
*/
static
struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
unsigned long *flags)
__acquires(&timer->base->lock)
{
struct hrtimer_clock_base *base;
for (;;) {
base = READ_ONCE(timer->base);
if (likely(base != &migration_base)) {
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); if (likely(base == timer->base))
return base;
/* The timer has migrated to another CPU: */
raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
}
cpu_relax();
}
}
/*
* Check if the elected target is suitable considering its next
* event and the hotplug state of the current CPU.
*
* If the elected target is remote and its next event is after the timer
* to queue, then a remote reprogram is necessary. However there is no
* guarantee the IPI handling the operation would arrive in time to meet
* the high resolution deadline. In this case the local CPU becomes a
* preferred target, unless it is offline.
*
* High and low resolution modes are handled the same way for simplicity.
*
* Called with cpu_base->lock of target cpu held.
*/
static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
struct hrtimer_cpu_base *new_cpu_base,
struct hrtimer_cpu_base *this_cpu_base)
{
ktime_t expires;
/*
* The local CPU clockevent can be reprogrammed. Also get_target_base()
* guarantees it is online.
*/
if (new_cpu_base == this_cpu_base)
return true;
/*
* The offline local CPU can't be the default target if the
* next remote target event is after this timer. Keep the
* elected new base. An IPI will be issued to reprogram
* it as a last resort.
*/
if (!hrtimer_base_is_online(this_cpu_base))
return true;
expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
return expires >= new_base->cpu_base->expires_next;
}
static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
{
if (!hrtimer_base_is_online(base)) { int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER)); return &per_cpu(hrtimer_bases, cpu);
}
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
if (static_branch_likely(&timers_migration_enabled) && !pinned)
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
#endif
return base;
}
/*
* We switch the timer base to a power-optimized selected CPU target,
* if:
* - NO_HZ_COMMON is enabled
* - timer migration is enabled
* - the timer callback is not running
* - the timer is not the first expiring timer on the new target
*
* If one of the above requirements is not fulfilled we move the timer
* to the current CPU or leave it on the previously assigned CPU if
* the timer callback is currently running.
*/
static inline struct hrtimer_clock_base *
switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
int pinned)
{
struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
struct hrtimer_clock_base *new_base;
int basenum = base->index;
this_cpu_base = this_cpu_ptr(&hrtimer_bases);
new_cpu_base = get_target_base(this_cpu_base, pinned);
again:
new_base = &new_cpu_base->clock_base[basenum];
if (base != new_base) {
/*
* We are trying to move timer to new_base.
* However we can't change timer's base while it is running,
* so we keep it on the same CPU. No hassle vs. reprogramming
* the event source in the high resolution case. The softirq
* code will take care of this when the timer function has
* completed. There is no conflict as we hold the lock until
* the timer is enqueued.
*/
if (unlikely(hrtimer_callback_running(timer))) return base;
/* See the comment in lock_hrtimer_base() */
WRITE_ONCE(timer->base, &migration_base);
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
this_cpu_base)) {
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
new_cpu_base = this_cpu_base;
WRITE_ONCE(timer->base, base);
goto again;
}
WRITE_ONCE(timer->base, new_base);
} else {
if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, this_cpu_base)) {
new_cpu_base = this_cpu_base;
goto again;
}
}
return new_base;
}
#else /* CONFIG_SMP */
static inline struct hrtimer_clock_base *
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
__acquires(&timer->base->cpu_base->lock)
{
struct hrtimer_clock_base *base = timer->base;
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
return base;
}
# define switch_hrtimer_base(t, b, p) (b)
#endif /* !CONFIG_SMP */
/*
* Functions for the union type storage format of ktime_t which are
* too large for inlining:
*/
#if BITS_PER_LONG < 64
/*
* Divide a ktime value by a nanosecond value
*/
s64 __ktime_divns(const ktime_t kt, s64 div)
{
int sft = 0;
s64 dclc;
u64 tmp;
dclc = ktime_to_ns(kt);
tmp = dclc < 0 ? -dclc : dclc;
/* Make sure the divisor is less than 2^32: */
while (div >> 32) {
sft++;
div >>= 1;
}
tmp >>= sft;
do_div(tmp, (u32) div);
return dclc < 0 ? -tmp : tmp;
}
EXPORT_SYMBOL_GPL(__ktime_divns);
#endif /* BITS_PER_LONG >= 64 */
/*
* Add two ktime values and do a safety check for overflow:
*/
ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
{
ktime_t res = ktime_add_unsafe(lhs, rhs);
/*
* We use KTIME_SEC_MAX here, the maximum timeout which we can
* return to user space in a timespec:
*/
if (res < 0 || res < lhs || res < rhs) res = ktime_set(KTIME_SEC_MAX, 0);
return res;
}
EXPORT_SYMBOL_GPL(ktime_add_safe);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
static const struct debug_obj_descr hrtimer_debug_descr;
static void *hrtimer_debug_hint(void *addr)
{
return ACCESS_PRIVATE((struct hrtimer *)addr, function);
}
/*
* fixup_init is called when:
* - an active object is initialized
*/
static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
{
struct hrtimer *timer = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
hrtimer_cancel(timer);
debug_object_init(timer, &hrtimer_debug_descr);
return true;
default:
return false;
}
}
/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown non-static object is activated
*/
static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
{
switch (state) {
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
fallthrough;
default:
return false;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
{
struct hrtimer *timer = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
hrtimer_cancel(timer);
debug_object_free(timer, &hrtimer_debug_descr);
return true;
default:
return false;
}
}
static const struct debug_obj_descr hrtimer_debug_descr = {
.name = "hrtimer",
.debug_hint = hrtimer_debug_hint,
.fixup_init = hrtimer_fixup_init,
.fixup_activate = hrtimer_fixup_activate,
.fixup_free = hrtimer_fixup_free,
};
static inline void debug_hrtimer_init(struct hrtimer *timer)
{
debug_object_init(timer, &hrtimer_debug_descr);
}
static inline void debug_hrtimer_init_on_stack(struct hrtimer *timer)
{
debug_object_init_on_stack(timer, &hrtimer_debug_descr);
}
static inline void debug_hrtimer_activate(struct hrtimer *timer,
enum hrtimer_mode mode)
{
debug_object_activate(timer, &hrtimer_debug_descr);
}
static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
{
debug_object_deactivate(timer, &hrtimer_debug_descr);
}
void destroy_hrtimer_on_stack(struct hrtimer *timer)
{
debug_object_free(timer, &hrtimer_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
static inline void debug_hrtimer_init_on_stack(struct hrtimer *timer) { }
static inline void debug_hrtimer_activate(struct hrtimer *timer,
enum hrtimer_mode mode) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
static inline void debug_setup(struct hrtimer *timer, clockid_t clockid, enum hrtimer_mode mode)
{
debug_hrtimer_init(timer);
trace_hrtimer_setup(timer, clockid, mode);
}
static inline void debug_setup_on_stack(struct hrtimer *timer, clockid_t clockid,
enum hrtimer_mode mode)
{
debug_hrtimer_init_on_stack(timer);
trace_hrtimer_setup(timer, clockid, mode);
}
static inline void debug_activate(struct hrtimer *timer,
enum hrtimer_mode mode)
{
debug_hrtimer_activate(timer, mode);
trace_hrtimer_start(timer, mode);
}
static inline void debug_deactivate(struct hrtimer *timer)
{
debug_hrtimer_deactivate(timer);
trace_hrtimer_cancel(timer);
}
static struct hrtimer_clock_base *
__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
{
unsigned int idx;
if (!*active)
return NULL;
idx = __ffs(*active);
*active &= ~(1U << idx);
return &cpu_base->clock_base[idx];
}
#define for_each_active_base(base, cpu_base, active) \
while ((base = __next_base((cpu_base), &(active))))
static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
const struct hrtimer *exclude,
unsigned int active,
ktime_t expires_next)
{
struct hrtimer_clock_base *base;
ktime_t expires;
for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *next;
struct hrtimer *timer;
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node);
if (timer == exclude) {
/* Get to the next timer in the queue. */
next = timerqueue_iterate_next(next);
if (!next)
continue;
timer = container_of(next, struct hrtimer, node);
}
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires < expires_next) {
expires_next = expires;
/* Skip cpu_base update if a timer is being excluded. */
if (exclude)
continue;
if (timer->is_soft)
cpu_base->softirq_next_timer = timer;
else
cpu_base->next_timer = timer;
}
}
/*
* clock_was_set() might have changed base->offset of any of
* the clock bases so the result might be negative. Fix it up
* to prevent a false positive in clockevents_program_event().
*/
if (expires_next < 0)
expires_next = 0;
return expires_next;
}
/*
* Recomputes cpu_base::*next_timer and returns the earliest expires_next
* but does not set cpu_base::*expires_next, that is done by
* hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
* cpu_base::*expires_next right away, reprogramming logic would no longer
* work.
*
* When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
* those timers will get run whenever the softirq gets handled, at the end of
* hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
*
* Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
* The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
* softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
*
* @active_mask must be one of:
* - HRTIMER_ACTIVE_ALL,
* - HRTIMER_ACTIVE_SOFT, or
* - HRTIMER_ACTIVE_HARD.
*/
static ktime_t
__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
{
unsigned int active;
struct hrtimer *next_timer = NULL;
ktime_t expires_next = KTIME_MAX;
if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
cpu_base->softirq_next_timer = NULL;
expires_next = __hrtimer_next_event_base(cpu_base, NULL,
active, KTIME_MAX);
next_timer = cpu_base->softirq_next_timer;
}
if (active_mask & HRTIMER_ACTIVE_HARD) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
cpu_base->next_timer = next_timer;
expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
expires_next);
}
return expires_next;
}
static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
{
ktime_t expires_next, soft = KTIME_MAX;
/*
* If the soft interrupt has already been activated, ignore the
* soft bases. They will be handled in the already raised soft
* interrupt.
*/
if (!cpu_base->softirq_activated) {
soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
/*
* Update the soft expiry time. clock_settime() might have
* affected it.
*/
cpu_base->softirq_expires_next = soft;
}
expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
/*
* If a softirq timer is expiring first, update cpu_base->next_timer
* and program the hardware with the soft expiry time.
*/
if (expires_next > soft) {
cpu_base->next_timer = cpu_base->softirq_next_timer;
expires_next = soft;
}
return expires_next;
}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
{
ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
offs_real, offs_boot, offs_tai);
base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
return now;
}
/*
* Is the high resolution mode active ?
*/
static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
{
return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
cpu_base->hres_active : 0;
}
static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
struct hrtimer *next_timer,
ktime_t expires_next)
{
cpu_base->expires_next = expires_next;
/*
* If hres is not active, hardware does not have to be
* reprogrammed yet.
*
* If a hang was detected in the last timer interrupt then we
* leave the hang delay active in the hardware. We want the
* system to make progress. That also prevents the following
* scenario:
* T1 expires 50ms from now
* T2 expires 5s from now
*
* T1 is removed, so this code is called and would reprogram
* the hardware to 5s from now. Any hrtimer_start after that
* will not reprogram the hardware due to hang_detected being
* set. So we'd effectively block all timers until the T2 event
* fires.
*/
if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
return;
tick_program_event(expires_next, 1);
}
/*
* Reprogram the event source with checking both queues for the
* next event
* Called with interrupts disabled and base->lock held
*/
static void
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
expires_next = hrtimer_update_next_event(cpu_base);
if (skip_equal && expires_next == cpu_base->expires_next)
return;
__hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next);
}
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* High resolution timer enabled ?
*/
static bool hrtimer_hres_enabled __read_mostly = true;
unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
EXPORT_SYMBOL_GPL(hrtimer_resolution);
/*
* Enable / Disable high resolution mode
*/
static int __init setup_hrtimer_hres(char *str)
{
return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
}
__setup("highres=", setup_hrtimer_hres);
/*
* hrtimer_high_res_enabled - query, if the highres mode is enabled
*/
static inline int hrtimer_is_hres_enabled(void)
{
return hrtimer_hres_enabled;
}
/*
* Switch to high resolution mode
*/
static void hrtimer_switch_to_hres(void)
{
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (tick_init_highres()) {
pr_warn("Could not switch to high resolution mode on CPU %u\n",
base->cpu);
return;
}
base->hres_active = 1;
hrtimer_resolution = HIGH_RES_NSEC;
tick_setup_sched_timer(true);
/* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
}
#else
static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline void hrtimer_switch_to_hres(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
* Retrigger next event is called after clock was set with interrupts
* disabled through an SMP function call or directly from low level
* resume code.
*
* This is only invoked when:
* - CONFIG_HIGH_RES_TIMERS is enabled.
* - CONFIG_NOHZ_COMMON is enabled
*
* For the other cases this function is empty and because the call sites
* are optimized out it vanishes as well, i.e. no need for lots of
* #ifdeffery.
*/
static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
/*
* When high resolution mode or nohz is active, then the offsets of
* CLOCK_REALTIME/TAI/BOOTTIME have to be updated. Otherwise the
* next tick will take care of that.
*
* If high resolution mode is active then the next expiring timer
* must be reevaluated and the clock event device reprogrammed if
* necessary.
*
* In the NOHZ case the update of the offset and the reevaluation
* of the next expiring timer is enough. The return from the SMP
* function call will take care of the reprogramming in case the
* CPU was in a NOHZ idle sleep.
*
* In periodic low resolution mode, the next softirq expiration
* must also be updated.
*/
raw_spin_lock(&base->lock);
hrtimer_update_base(base);
if (hrtimer_hres_active(base))
hrtimer_force_reprogram(base, 0);
else
hrtimer_update_next_event(base);
raw_spin_unlock(&base->lock);
}
/*
* When a timer is enqueued and expires earlier than the already enqueued
* timers, we have to check, whether it expires earlier than the timer for
* which the clock event device was armed.
*
* Called with interrupts disabled and base->cpu_base.lock held
*/
static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *base = timer->base;
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
/*
* CLOCK_REALTIME timer might be requested with an absolute
* expiry time which is less than base->offset. Set it to 0.
*/
if (expires < 0)
expires = 0;
if (timer->is_soft) {
/*
* soft hrtimer could be started on a remote CPU. In this
* case softirq_expires_next needs to be updated on the
* remote CPU. The soft hrtimer will not expire before the
* first hard hrtimer on the remote CPU -
* hrtimer_check_target() prevents this case.
*/
struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
if (timer_cpu_base->softirq_activated)
return;
if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
return;
timer_cpu_base->softirq_next_timer = timer;
timer_cpu_base->softirq_expires_next = expires;
if (!ktime_before(expires, timer_cpu_base->expires_next) ||
!reprogram)
return;
}
/*
* If the timer is not on the current cpu, we cannot reprogram
* the other cpus clock event device.
*/
if (base->cpu_base != cpu_base)
return;
if (expires >= cpu_base->expires_next)
return;
/*
* If the hrtimer interrupt is running, then it will reevaluate the
* clock bases and reprogram the clock event device.
*/
if (cpu_base->in_hrtirq)
return;
cpu_base->next_timer = timer;
__hrtimer_reprogram(cpu_base, timer, expires);
}
static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base,
unsigned int active)
{
struct hrtimer_clock_base *base;
unsigned int seq;
ktime_t expires;
/*
* Update the base offsets unconditionally so the following
* checks whether the SMP function call is required works.
*
* The update is safe even when the remote CPU is in the hrtimer
* interrupt or the hrtimer soft interrupt and expiring affected
* bases. Either it will see the update before handling a base or
* it will see it when it finishes the processing and reevaluates
* the next expiring timer.
*/
seq = cpu_base->clock_was_set_seq;
hrtimer_update_base(cpu_base);
/*
* If the sequence did not change over the update then the
* remote CPU already handled it.
*/
if (seq == cpu_base->clock_was_set_seq)
return false;
/*
* If the remote CPU is currently handling an hrtimer interrupt, it
* will reevaluate the first expiring timer of all clock bases
* before reprogramming. Nothing to do here.
*/
if (cpu_base->in_hrtirq)
return false;
/*
* Walk the affected clock bases and check whether the first expiring
* timer in a clock base is moving ahead of the first expiring timer of
* @cpu_base. If so, the IPI must be invoked because per CPU clock
* event devices cannot be remotely reprogrammed.
*/
active &= cpu_base->active_bases;
for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *next;
next = timerqueue_getnext(&base->active);
expires = ktime_sub(next->expires, base->offset);
if (expires < cpu_base->expires_next)
return true;
/* Extra check for softirq clock bases */
if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT)
continue;
if (cpu_base->softirq_activated)
continue;
if (expires < cpu_base->softirq_expires_next)
return true;
}
return false;
}
/*
* Clock was set. This might affect CLOCK_REALTIME, CLOCK_TAI and
* CLOCK_BOOTTIME (for late sleep time injection).
*
* This requires to update the offsets for these clocks
* vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this
* also requires to eventually reprogram the per CPU clock event devices
* when the change moves an affected timer ahead of the first expiring
* timer on that CPU. Obviously remote per CPU clock event devices cannot
* be reprogrammed. The other reason why an IPI has to be sent is when the
* system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets
* in the tick, which obviously might be stopped, so this has to bring out
* the remote CPU which might sleep in idle to get this sorted.
*/
void clock_was_set(unsigned int bases)
{
struct hrtimer_cpu_base *cpu_base = raw_cpu_ptr(&hrtimer_bases);
cpumask_var_t mask;
int cpu;
if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active)
goto out_timerfd;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
on_each_cpu(retrigger_next_event, NULL, 1);
goto out_timerfd;
}
/* Avoid interrupting CPUs if possible */
cpus_read_lock();
for_each_online_cpu(cpu) {
unsigned long flags;
cpu_base = &per_cpu(hrtimer_bases, cpu);
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (update_needs_ipi(cpu_base, bases))
cpumask_set_cpu(cpu, mask);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
preempt_disable();
smp_call_function_many(mask, retrigger_next_event, NULL, 1);
preempt_enable();
cpus_read_unlock();
free_cpumask_var(mask);
out_timerfd:
timerfd_clock_was_set();
}
static void clock_was_set_work(struct work_struct *work)
{
clock_was_set(CLOCK_SET_WALL);
}
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
/*
* Called from timekeeping code to reprogram the hrtimer interrupt device
* on all cpus and to notify timerfd.
*/
void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
/*
* Called during resume either directly from via timekeeping_resume()
* or in the case of s2idle from tick_unfreeze() to ensure that the
* hrtimers are up to date.
*/
void hrtimers_resume_local(void)
{
lockdep_assert_irqs_disabled();
/* Retrigger on the local CPU */
retrigger_next_event(NULL);
}
/*
* Counterpart to lock_hrtimer_base above:
*/
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
__releases(&timer->base->cpu_base->lock)
{
raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
}
/**
* hrtimer_forward() - forward the timer expiry
* @timer: hrtimer to forward
* @now: forward past this time
* @interval: the interval to forward
*
* Forward the timer expiry so it will expire in the future.
*
* .. note::
* This only updates the timer expiry value and does not requeue the timer.
*
* There is also a variant of the function hrtimer_forward_now().
*
* Context: Can be safely called from the callback function of @timer. If called
* from other contexts @timer must neither be enqueued nor running the
* callback and the caller needs to take care of serialization.
*
* Return: The number of overruns are returned.
*/
u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
u64 orun = 1;
ktime_t delta;
delta = ktime_sub(now, hrtimer_get_expires(timer));
if (delta < 0)
return 0;
if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
return 0;
if (interval < hrtimer_resolution)
interval = hrtimer_resolution;
if (unlikely(delta >= interval)) {
s64 incr = ktime_to_ns(interval);
orun = ktime_divns(delta, incr);
hrtimer_add_expires_ns(timer, incr * orun);
if (hrtimer_get_expires_tv64(timer) > now)
return orun;
/*
* This (and the ktime_add() below) is the
* correction for exact:
*/
orun++;
}
hrtimer_add_expires(timer, interval);
return orun;
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
* The timer is inserted in expiry order. Insertion into the
* red black tree is O(log(n)). Must hold the base lock.
*
* Returns true when the new timer is the leftmost timer in the tree.
*/
static bool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
enum hrtimer_mode mode)
{ debug_activate(timer, mode); WARN_ON_ONCE(!base->cpu_base->online); base->cpu_base->active_bases |= 1 << base->index;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
return timerqueue_add(&base->active, &timer->node);
}
/*
* __remove_hrtimer - internal function to remove a timer
*
* Caller must hold the base lock.
*
* High resolution timer mode reprograms the clock event device when the
* timer is the one which expires next. The caller can disable this by setting
* reprogram to zero. This is useful, when the context does a reprogramming
* anyway (e.g. timer interrupt)
*/
static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
u8 newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
u8 state = timer->state;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->state, newstate);
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
/*
* Note: If reprogram is false we do not update
* cpu_base->next_timer. This happens when we remove the first
* timer on a remote cpu. No harm as we never dereference
* cpu_base->next_timer. So the worst thing what can happen is
* an superfluous call to hrtimer_force_reprogram() on the
* remote cpu later on if the same timer gets enqueued again.
*/
if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
}
/*
* remove hrtimer, called with base lock held
*/
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
bool restart, bool keep_local)
{
u8 state = timer->state;
if (state & HRTIMER_STATE_ENQUEUED) {
bool reprogram;
/*
* Remove the timer and force reprogramming when high
* resolution mode is active and the timer is on the current
* CPU. If we remove a timer on another CPU, reprogramming is
* skipped. The interrupt event on this CPU is fired and
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
debug_deactivate(timer);
reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
/*
* If the timer is not restarted then reprogramming is
* required if the timer is local. If it is local and about
* to be restarted, avoid programming it twice (on removal
* and a moment later when it's requeued).
*/
if (!restart)
state = HRTIMER_STATE_INACTIVE;
else
reprogram &= !keep_local;
__remove_hrtimer(timer, base, state, reprogram);
return 1;
}
return 0;
}
static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
{
#ifdef CONFIG_TIME_LOW_RES
/*
* CONFIG_TIME_LOW_RES indicates that the system has no way to return
* granular time values. For relative timers we add hrtimer_resolution
* (i.e. one jiffy) to prevent short timeouts.
*/
timer->is_rel = mode & HRTIMER_MODE_REL;
if (timer->is_rel)
tim = ktime_add_safe(tim, hrtimer_resolution);
#endif
return tim;
}
static void
hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
{
ktime_t expires;
/*
* Find the next SOFT expiration.
*/
expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
/*
* reprogramming needs to be triggered, even if the next soft
* hrtimer expires at the same time than the next hard
* hrtimer. cpu_base->softirq_expires_next needs to be updated!
*/
if (expires == KTIME_MAX)
return;
/*
* cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
* cpu_base->*expires_next is only set by hrtimer_reprogram()
*/
hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
}
static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 delta_ns, const enum hrtimer_mode mode,
struct hrtimer_clock_base *base)
{
struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *new_base;
bool force_local, first;
/*
* If the timer is on the local cpu base and is the first expiring
* timer then this might end up reprogramming the hardware twice
* (on removal and on enqueue). To avoid that by prevent the
* reprogram on removal, keep the timer local to the current CPU
* and enforce reprogramming after it is queued no matter whether
* it is the new first expiring timer again or not.
*/
force_local = base->cpu_base == this_cpu_base;
force_local &= base->cpu_base->next_timer == timer;
/*
* Don't force local queuing if this enqueue happens on a unplugged
* CPU after hrtimer_cpu_dying() has been invoked.
*/
force_local &= this_cpu_base->online;
/*
* Remove an active timer from the queue. In case it is not queued
* on the current CPU, make sure that remove_hrtimer() updates the
* remote data correctly.
*
* If it's on the current CPU and the first expiring timer, then
* skip reprogramming, keep the timer local and enforce
* reprogramming later if it was the first expiring timer. This
* avoids programming the underlying clock event twice (once at
* removal and once after enqueue).
*/
remove_hrtimer(timer, base, true, force_local); if (mode & HRTIMER_MODE_REL) tim = ktime_add_safe(tim, __hrtimer_cb_get_time(base->clockid));
tim = hrtimer_update_lowres(timer, tim, mode);
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
/* Switch the timer base, if necessary: */
if (!force_local) {
new_base = switch_hrtimer_base(timer, base,
mode & HRTIMER_MODE_PINNED);
} else {
new_base = base;
}
first = enqueue_hrtimer(timer, new_base, mode);
if (!force_local) {
/*
* If the current CPU base is online, then the timer is
* never queued on a remote CPU if it would be the first
* expiring timer there.
*/
if (hrtimer_base_is_online(this_cpu_base))
return first;
/*
* Timer was enqueued remote because the current base is
* already offline. If the timer is the first to expire,
* kick the remote CPU to reprogram the clock event.
*/
if (first) {
struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;
smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
}
return 0;
}
/*
* Timer was forced to stay on the current CPU to avoid
* reprogramming on removal and enqueue. Force reprogram the
* hardware by evaluating the new first expiring timer.
*/
hrtimer_force_reprogram(new_base->cpu_base, 1);
return 0;
}
/**
* hrtimer_start_range_ns - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
* @delta_ns: "slack" range for the timer
* @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
* relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
* softirq based mode is considered for debug purpose only!
*/
void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 delta_ns, const enum hrtimer_mode mode)
{
struct hrtimer_clock_base *base;
unsigned long flags;
/*
* Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
* match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
* expiry mode because unmarked timers are moved to softirq expiry.
*/
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
else
WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
base = lock_hrtimer_base(timer, &flags); if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) hrtimer_reprogram(timer, true); unlock_hrtimer_base(timer, &flags);}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
/**
* hrtimer_try_to_cancel - try to deactivate a timer
* @timer: hrtimer to stop
*
* Returns:
*
* * 0 when the timer was not active
* * 1 when the timer was active
* * -1 when the timer is currently executing the callback function and
* cannot be stopped
*/
int hrtimer_try_to_cancel(struct hrtimer *timer)
{
struct hrtimer_clock_base *base;
unsigned long flags;
int ret = -1;
/*
* Check lockless first. If the timer is not active (neither
* enqueued nor running the callback, nothing to do here. The
* base lock does not serialize against a concurrent enqueue,
* so we can avoid taking it.
*/
if (!hrtimer_active(timer))
return 0;
base = lock_hrtimer_base(timer, &flags);
if (!hrtimer_callback_running(timer)) ret = remove_hrtimer(timer, base, false, false); unlock_hrtimer_base(timer, &flags); return ret;}
EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
#ifdef CONFIG_PREEMPT_RT
static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
{
spin_lock_init(&base->softirq_expiry_lock);
}
static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
__acquires(&base->softirq_expiry_lock)
{
spin_lock(&base->softirq_expiry_lock);
}
static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
__releases(&base->softirq_expiry_lock)
{
spin_unlock(&base->softirq_expiry_lock);
}
/*
* The counterpart to hrtimer_cancel_wait_running().
*
* If there is a waiter for cpu_base->expiry_lock, then it was waiting for
* the timer callback to finish. Drop expiry_lock and reacquire it. That
* allows the waiter to acquire the lock and make progress.
*/
static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
unsigned long flags)
{
if (atomic_read(&cpu_base->timer_waiters)) {
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
spin_unlock(&cpu_base->softirq_expiry_lock);
spin_lock(&cpu_base->softirq_expiry_lock);
raw_spin_lock_irq(&cpu_base->lock);
}
}
#ifdef CONFIG_SMP
static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
{
return base == &migration_base;
}
#else
static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
{
return false;
}
#endif
/*
* This function is called on PREEMPT_RT kernels when the fast path
* deletion of a timer failed because the timer callback function was
* running.
*
* This prevents priority inversion: if the soft irq thread is preempted
* in the middle of a timer callback, then calling hrtimer_cancel() can
* lead to two issues:
*
* - If the caller is on a remote CPU then it has to spin wait for the timer
* handler to complete. This can result in unbound priority inversion.
*
* - If the caller originates from the task which preempted the timer
* handler on the same CPU, then spin waiting for the timer handler to
* complete is never going to end.
*/
void hrtimer_cancel_wait_running(const struct hrtimer *timer)
{
/* Lockless read. Prevent the compiler from reloading it below */
struct hrtimer_clock_base *base = READ_ONCE(timer->base);
/*
* Just relax if the timer expires in hard interrupt context or if
* it is currently on the migration base.
*/
if (!timer->is_soft || is_migration_base(base)) {
cpu_relax();
return;
}
/*
* Mark the base as contended and grab the expiry lock, which is
* held by the softirq across the timer callback. Drop the lock
* immediately so the softirq can expire the next timer. In theory
* the timer could already be running again, but that's more than
* unlikely and just causes another wait loop.
*/
atomic_inc(&base->cpu_base->timer_waiters);
spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
atomic_dec(&base->cpu_base->timer_waiters);
spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
}
#else
static inline void
hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
static inline void
hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
static inline void
hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
unsigned long flags) { }
#endif
/**
* hrtimer_cancel - cancel a timer and wait for the handler to finish.
* @timer: the timer to be cancelled
*
* Returns:
* 0 when the timer was not active
* 1 when the timer was active
*/
int hrtimer_cancel(struct hrtimer *timer)
{
int ret;
do {
ret = hrtimer_try_to_cancel(timer);
if (ret < 0)
hrtimer_cancel_wait_running(timer);
} while (ret < 0);
return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
/**
* __hrtimer_get_remaining - get remaining time for the timer
* @timer: the timer to read
* @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
{
unsigned long flags;
ktime_t rem;
lock_hrtimer_base(timer, &flags);
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
rem = hrtimer_expires_remaining_adjusted(timer);
else
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
return rem;
}
EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
#ifdef CONFIG_NO_HZ_COMMON
/**
* hrtimer_get_next_event - get the time until next expiry event
*
* Returns the next expiry time or KTIME_MAX if no timer is pending.
*/
u64 hrtimer_get_next_event(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
u64 expires = KTIME_MAX;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
return expires;
}
/**
* hrtimer_next_event_without - time until next expiry event w/o one timer
* @exclude: timer to exclude
*
* Returns the next expiry time over all timers except for the @exclude one or
* KTIME_MAX if none of them is pending.
*/
u64 hrtimer_next_event_without(const struct hrtimer *exclude)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
u64 expires = KTIME_MAX;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (hrtimer_hres_active(cpu_base)) {
unsigned int active;
if (!cpu_base->softirq_activated) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
expires = __hrtimer_next_event_base(cpu_base, exclude,
active, KTIME_MAX);
}
active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
expires = __hrtimer_next_event_base(cpu_base, exclude, active,
expires);
}
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
return expires;
}
#endif
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
{
switch (clock_id) {
case CLOCK_MONOTONIC:
return HRTIMER_BASE_MONOTONIC;
case CLOCK_REALTIME:
return HRTIMER_BASE_REALTIME;
case CLOCK_BOOTTIME:
return HRTIMER_BASE_BOOTTIME;
case CLOCK_TAI:
return HRTIMER_BASE_TAI;
default:
WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
return HRTIMER_BASE_MONOTONIC;
}
}
static ktime_t __hrtimer_cb_get_time(clockid_t clock_id)
{
switch (clock_id) {
case CLOCK_MONOTONIC:
return ktime_get();
case CLOCK_REALTIME:
return ktime_get_real();
case CLOCK_BOOTTIME:
return ktime_get_boottime();
case CLOCK_TAI:
return ktime_get_clocktai();
default:
WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
return ktime_get();
}
}
ktime_t hrtimer_cb_get_time(const struct hrtimer *timer)
{
return __hrtimer_cb_get_time(timer->base->clockid);
}
EXPORT_SYMBOL_GPL(hrtimer_cb_get_time);
static void __hrtimer_setup(struct hrtimer *timer,
enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t clock_id, enum hrtimer_mode mode)
{
bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
struct hrtimer_cpu_base *cpu_base;
int base;
/*
* On PREEMPT_RT enabled kernels hrtimers which are not explicitly
* marked for hard interrupt expiry mode are moved into soft
* interrupt context for latency reasons and because the callbacks
* can invoke functions which might sleep on RT, e.g. spin_lock().
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
softtimer = true;
memset(timer, 0, sizeof(struct hrtimer));
cpu_base = raw_cpu_ptr(&hrtimer_bases);
/*
* POSIX magic: Relative CLOCK_REALTIME timers are not affected by
* clock modifications, so they needs to become CLOCK_MONOTONIC to
* ensure POSIX compliance.
*/
if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;
base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; base += hrtimer_clockid_to_base(clock_id); timer->is_soft = softtimer;
timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
timer->base = &cpu_base->clock_base[base];
timerqueue_init(&timer->node);
if (WARN_ON_ONCE(!function))
ACCESS_PRIVATE(timer, function) = hrtimer_dummy_timeout;
else
ACCESS_PRIVATE(timer, function) = function;}
/**
* hrtimer_setup - initialize a timer to the given clock
* @timer: the timer to be initialized
* @function: the callback function
* @clock_id: the clock to be used
* @mode: The modes which are relevant for initialization:
* HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
* HRTIMER_MODE_REL_SOFT
*
* The PINNED variants of the above can be handed in,
* but the PINNED bit is ignored as pinning happens
* when the hrtimer is started
*/
void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t clock_id, enum hrtimer_mode mode)
{
debug_setup(timer, clock_id, mode);
__hrtimer_setup(timer, function, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_setup);
/**
* hrtimer_setup_on_stack - initialize a timer on stack memory
* @timer: The timer to be initialized
* @function: the callback function
* @clock_id: The clock to be used
* @mode: The timer mode
*
* Similar to hrtimer_setup(), except that this one must be used if struct hrtimer is in stack
* memory.
*/
void hrtimer_setup_on_stack(struct hrtimer *timer,
enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t clock_id, enum hrtimer_mode mode)
{
debug_setup_on_stack(timer, clock_id, mode);
__hrtimer_setup(timer, function, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_setup_on_stack);
/*
* A timer is active, when it is enqueued into the rbtree or the
* callback function is running or it's in the state of being migrated
* to another cpu.
*
* It is important for this function to not return a false negative.
*/
bool hrtimer_active(const struct hrtimer *timer)
{
struct hrtimer_clock_base *base;
unsigned int seq;
do {
base = READ_ONCE(timer->base); seq = raw_read_seqcount_begin(&base->seq); if (timer->state != HRTIMER_STATE_INACTIVE || base->running == timer) return true; } while (read_seqcount_retry(&base->seq, seq) ||
base != READ_ONCE(timer->base));
return false;
}
EXPORT_SYMBOL_GPL(hrtimer_active);
/*
* The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
* distinct sections:
*
* - queued: the timer is queued
* - callback: the timer is being ran
* - post: the timer is inactive or (re)queued
*
* On the read side we ensure we observe timer->state and cpu_base->running
* from the same section, if anything changed while we looked at it, we retry.
* This includes timer->base changing because sequence numbers alone are
* insufficient for that.
*
* The sequence numbers are required because otherwise we could still observe
* a false negative if the read side got smeared over multiple consecutive
* __run_hrtimer() invocations.
*/
static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer_clock_base *base,
struct hrtimer *timer, ktime_t *now,
unsigned long flags) __must_hold(&cpu_base->lock)
{
enum hrtimer_restart (*fn)(struct hrtimer *);
bool expires_in_hardirq;
int restart;
lockdep_assert_held(&cpu_base->lock);
debug_deactivate(timer);
base->running = timer;
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
* hrtimer_active() cannot observe base->running == NULL &&
* timer->state == INACTIVE.
*/
raw_write_seqcount_barrier(&base->seq);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
fn = ACCESS_PRIVATE(timer, function);
/*
* Clear the 'is relative' flag for the TIME_LOW_RES case. If the
* timer is restarted with a period then it becomes an absolute
* timer. If its not restarted it does not matter.
*/
if (IS_ENABLED(CONFIG_TIME_LOW_RES))
timer->is_rel = false;
/*
* The timer is marked as running in the CPU base, so it is
* protected against migration to a different CPU even if the lock
* is dropped.
*/
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
trace_hrtimer_expire_entry(timer, now);
expires_in_hardirq = lockdep_hrtimer_enter(timer);
restart = fn(timer);
lockdep_hrtimer_exit(expires_in_hardirq);
trace_hrtimer_expire_exit(timer);
raw_spin_lock_irq(&cpu_base->lock);
/*
* Note: We clear the running state after enqueue_hrtimer and
* we do not reprogram the event hardware. Happens either in
* hrtimer_start_range_ns() or in hrtimer_interrupt()
*
* Note: Because we dropped the cpu_base->lock above,
* hrtimer_start_range_ns() can have popped in and enqueued the timer
* for us already.
*/
if (restart != HRTIMER_NORESTART &&
!(timer->state & HRTIMER_STATE_ENQUEUED))
enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
* hrtimer_active() cannot observe base->running.timer == NULL &&
* timer->state == INACTIVE.
*/
raw_write_seqcount_barrier(&base->seq);
WARN_ON_ONCE(base->running != timer);
base->running = NULL;
}
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
unsigned long flags, unsigned int active_mask)
{
struct hrtimer_clock_base *base;
unsigned int active = cpu_base->active_bases & active_mask;
for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *node;
ktime_t basenow;
basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) {
struct hrtimer *timer;
timer = container_of(node, struct hrtimer, node);
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
* earliest interrupt after their soft expiration.
* This allows us to avoid using a Priority Search
* Tree, which can answer a stabbing query for
* overlapping intervals and instead use the simple
* BST we already have.
* We don't add extra wakeups by delaying timers that
* are right-of a not yet expired timer, because that
* timer will have to trigger a wakeup anyway.
*/
if (basenow < hrtimer_get_softexpires_tv64(timer))
break;
__run_hrtimer(cpu_base, base, timer, &basenow, flags);
if (active_mask == HRTIMER_ACTIVE_SOFT)
hrtimer_sync_wait_running(cpu_base, flags);
}
}
}
static __latent_entropy void hrtimer_run_softirq(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
unsigned long flags;
ktime_t now;
hrtimer_cpu_base_lock_expiry(cpu_base);
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
cpu_base->softirq_activated = 0;
hrtimer_update_softirq_timer(cpu_base, true);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
hrtimer_cpu_base_unlock_expiry(cpu_base);
}
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* High resolution timer interrupt
* Called with interrupts disabled
*/
void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
unsigned long flags;
int retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event = KTIME_MAX;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
cpu_base->in_hrtirq = 1;
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
* the migration code. This does not affect enqueueing of
* timers which run their callback and need to be requeued on
* this CPU.
*/
cpu_base->expires_next = KTIME_MAX;
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
raise_timer_softirq(HRTIMER_SOFTIRQ);
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
/* Reevaluate the clock bases for the [soft] next expiry */
expires_next = hrtimer_update_next_event(cpu_base);
/*
* Store the new expiry value so the migration code can verify
* against it.
*/
cpu_base->expires_next = expires_next;
cpu_base->in_hrtirq = 0;
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
/* Reprogramming necessary ? */
if (!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
return;
}
/*
* The next timer was already expired due to:
* - tracing
* - long lasting callbacks
* - being scheduled away when running in a VM
*
* We need to prevent that we loop forever in the hrtimer
* interrupt routine. We give it 3 attempts to avoid
* overreacting on some spurious event.
*
* Acquire base lock for updating the offsets and retrieving
* the current time.
*/
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++;
if (++retries < 3)
goto retry;
/*
* Give the system a chance to do something else than looping
* here. We stored the entry time, so we know exactly how long
* we spent here. We schedule the next event this amount of
* time away.
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
delta = ktime_sub(now, entry_time);
if ((unsigned int)delta > cpu_base->max_hang_time)
cpu_base->max_hang_time = (unsigned int) delta;
/*
* Limit it to a sensible value as we enforce a longer
* delay. Give the CPU at least 100ms to catch up.
*/
if (delta > 100 * NSEC_PER_MSEC)
expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
else
expires_next = ktime_add(now, delta);
tick_program_event(expires_next, 1);
pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
}
#endif /* !CONFIG_HIGH_RES_TIMERS */
/*
* Called from run_local_timers in hardirq context every jiffy
*/
void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
unsigned long flags;
ktime_t now;
if (hrtimer_hres_active(cpu_base))
return;
/*
* This _is_ ugly: We have to check periodically, whether we
* can switch to highres and / or nohz mode. The clocksource
* switch happens with xtime_lock held. Notification from
* there only sets the check bit in the tick_oneshot code,
* otherwise we might deadlock vs. xtime_lock.
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
hrtimer_switch_to_hres();
return;
}
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
raise_timer_softirq(HRTIMER_SOFTIRQ);
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
/*
* Sleep related functions:
*/
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
{
struct hrtimer_sleeper *t =
container_of(timer, struct hrtimer_sleeper, timer);
struct task_struct *task = t->task;
t->task = NULL;
if (task)
wake_up_process(task);
return HRTIMER_NORESTART;
}
/**
* hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
* @sl: sleeper to be started
* @mode: timer mode abs/rel
*
* Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers
* to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
*/
void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
enum hrtimer_mode mode)
{
/*
* Make the enqueue delivery mode check work on RT. If the sleeper
* was initialized for hard interrupt delivery, force the mode bit.
* This is a special case for hrtimer_sleepers because
* __hrtimer_setup_sleeper() determines the delivery mode on RT so the
* fiddling with this decision is avoided at the call sites.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
mode |= HRTIMER_MODE_HARD;
hrtimer_start_expires(&sl->timer, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
static void __hrtimer_setup_sleeper(struct hrtimer_sleeper *sl,
clockid_t clock_id, enum hrtimer_mode mode)
{
/*
* On PREEMPT_RT enabled kernels hrtimers which are not explicitly
* marked for hard interrupt expiry mode are moved into soft
* interrupt context either for latency reasons or because the
* hrtimer callback takes regular spinlocks or invokes other
* functions which are not suitable for hard interrupt context on
* PREEMPT_RT.
*
* The hrtimer_sleeper callback is RT compatible in hard interrupt
* context, but there is a latency concern: Untrusted userspace can
* spawn many threads which arm timers for the same expiry time on
* the same CPU. That causes a latency spike due to the wakeup of
* a gazillion threads.
*
* OTOH, privileged real-time user space applications rely on the
* low latency of hard interrupt wakeups. If the current task is in
* a real-time scheduling class, mark the mode for hard interrupt
* expiry.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT))
mode |= HRTIMER_MODE_HARD;
}
__hrtimer_setup(&sl->timer, hrtimer_wakeup, clock_id, mode);
sl->task = current;
}
/**
* hrtimer_setup_sleeper_on_stack - initialize a sleeper in stack memory
* @sl: sleeper to be initialized
* @clock_id: the clock to be used
* @mode: timer mode abs/rel
*/
void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl,
clockid_t clock_id, enum hrtimer_mode mode)
{
debug_setup_on_stack(&sl->timer, clock_id, mode);
__hrtimer_setup_sleeper(sl, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_setup_sleeper_on_stack);
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
{
switch(restart->nanosleep.type) {
#ifdef CONFIG_COMPAT_32BIT_TIME
case TT_COMPAT:
if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
return -EFAULT;
break;
#endif
case TT_NATIVE:
if (put_timespec64(ts, restart->nanosleep.rmtp))
return -EFAULT;
break;
default:
BUG();
}
return -ERESTART_RESTARTBLOCK;
}
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
{
struct restart_block *restart;
do {
set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
hrtimer_sleeper_start_expires(t, mode);
if (likely(t->task))
schedule();
hrtimer_cancel(&t->timer);
mode = HRTIMER_MODE_ABS;
} while (t->task && !signal_pending(current));
__set_current_state(TASK_RUNNING);
if (!t->task)
return 0;
restart = ¤t->restart_block;
if (restart->nanosleep.type != TT_NONE) {
ktime_t rem = hrtimer_expires_remaining(&t->timer);
struct timespec64 rmt;
if (rem <= 0)
return 0;
rmt = ktime_to_timespec64(rem);
return nanosleep_copyout(restart, &rmt);
}
return -ERESTART_RESTARTBLOCK;
}
static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
{
struct hrtimer_sleeper t;
int ret;
hrtimer_setup_sleeper_on_stack(&t, restart->nanosleep.clockid, HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
destroy_hrtimer_on_stack(&t.timer);
return ret;
}
long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
const clockid_t clockid)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
int ret = 0;
hrtimer_setup_sleeper_on_stack(&t, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
goto out;
/* Absolute timers do not update the rmtp value and restart: */
if (mode == HRTIMER_MODE_ABS) {
ret = -ERESTARTNOHAND;
goto out;
}
restart = ¤t->restart_block;
restart->nanosleep.clockid = t.timer.base->clockid;
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
set_restart_fn(restart, hrtimer_nanosleep_restart);
out:
destroy_hrtimer_on_stack(&t.timer);
return ret;
}
#ifdef CONFIG_64BIT
SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
struct __kernel_timespec __user *, rmtp)
{
struct timespec64 tu;
if (get_timespec64(&tu, rqtp))
return -EFAULT;
if (!timespec64_valid(&tu))
return -EINVAL;
current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
current->restart_block.nanosleep.rmtp = rmtp;
return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
CLOCK_MONOTONIC);
}
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
struct old_timespec32 __user *, rmtp)
{
struct timespec64 tu;
if (get_old_timespec32(&tu, rqtp))
return -EFAULT;
if (!timespec64_valid(&tu))
return -EINVAL;
current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
current->restart_block.nanosleep.compat_rmtp = rmtp;
return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
CLOCK_MONOTONIC);
}
#endif
/*
* Functions related to boot-time initialization:
*/
int hrtimers_prepare_cpu(unsigned int cpu)
{
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
clock_b->cpu_base = cpu_base;
seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
timerqueue_init_head(&clock_b->active);
}
cpu_base->cpu = cpu;
hrtimer_cpu_base_init_expiry_lock(cpu_base);
return 0;
}
int hrtimers_cpu_starting(unsigned int cpu)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
/* Clear out any left over state from a CPU down operation */
cpu_base->active_bases = 0;
cpu_base->hres_active = 0;
cpu_base->hang_detected = 0;
cpu_base->next_timer = NULL;
cpu_base->softirq_next_timer = NULL;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->online = 1;
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
struct hrtimer_clock_base *new_base)
{
struct hrtimer *timer;
struct timerqueue_node *node;
while ((node = timerqueue_getnext(&old_base->active))) {
timer = container_of(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer));
debug_deactivate(timer);
/*
* Mark it as ENQUEUED not INACTIVE otherwise the
* timer could be seen as !active and just vanish away
* under us on another CPU
*/
__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
timer->base = new_base;
/*
* Enqueue the timers on the new cpu. This does not
* reprogram the event device in case the timer
* expires before the earliest on this CPU, but we run
* hrtimer_interrupt after we migrated everything to
* sort out already expired timers and reprogram the
* event device.
*/
enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
}
}
int hrtimers_cpu_dying(unsigned int dying_cpu)
{
int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER));
struct hrtimer_cpu_base *old_base, *new_base;
old_base = this_cpu_ptr(&hrtimer_bases);
new_base = &per_cpu(hrtimer_bases, ncpu);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
raw_spin_lock(&old_base->lock);
raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
/* Tell the other CPU to retrigger the next event */
smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
raw_spin_unlock(&new_base->lock);
old_base->online = 0;
raw_spin_unlock(&old_base->lock);
return 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
void __init hrtimers_init(void)
{
hrtimers_prepare_cpu(smp_processor_id());
hrtimers_cpu_starting(smp_processor_id());
open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
#include <linux/pid_types.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/wait.h>
/*
* What is struct pid?
*
* A struct pid is the kernel's internal notion of a process identifier.
* It refers to individual tasks, process groups, and sessions. While
* there are processes attached to it the struct pid lives in a hash
* table, so it and then the processes that it refers to can be found
* quickly from the numeric pid value. The attached processes may be
* quickly accessed by following pointers from struct pid.
*
* Storing pid_t values in the kernel and referring to them later has a
* problem. The process originally with that pid may have exited and the
* pid allocator wrapped, and another process could have come along
* and been assigned that pid.
*
* Referring to user space processes by holding a reference to struct
* task_struct has a problem. When the user space process exits
* the now useless task_struct is still kept. A task_struct plus a
* stack consumes around 10K of low kernel memory. More precisely
* this is THREAD_SIZE + sizeof(struct task_struct). By comparison
* a struct pid is about 64 bytes.
*
* Holding a reference to struct pid solves both of these problems.
* It is small so holding a reference does not consume a lot of
* resources, and since a new struct pid is allocated when the numeric pid
* value is reused (when pids wrap around) we don't mistakenly refer to new
* processes.
*/
/*
* struct upid is used to get the id of the struct pid, as it is
* seen in particular namespace. Later the struct pid is found with
* find_pid_ns() using the int nr and struct pid_namespace *ns.
*/
#define RESERVED_PIDS 300
struct pidfs_attr;
struct upid {
int nr;
struct pid_namespace *ns;
};
struct pid {
refcount_t count;
unsigned int level;
spinlock_t lock;
struct {
u64 ino;
struct rb_node pidfs_node;
struct dentry *stashed;
struct pidfs_attr *attr;
};
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
struct hlist_head inodes;
/* wait queue for pidfd notifications */
wait_queue_head_t wait_pidfd;
struct rcu_head rcu;
struct upid numbers[];
};
extern seqcount_spinlock_t pidmap_lock_seq;
extern struct pid init_struct_pid;
struct file;
struct pid *pidfd_pid(const struct file *file);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file);
void do_notify_pidfd(struct task_struct *task);
static inline struct pid *get_pid(struct pid *pid)
{
if (pid) refcount_inc(&pid->count);
return pid;
}
extern void put_pid(struct pid *pid);
extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
static inline bool pid_has_task(struct pid *pid, enum pid_type type)
{
return !hlist_empty(&pid->tasks[type]);
}
extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
/*
* these helpers must be called with the tasklist_lock write-held.
*/
extern void attach_pid(struct task_struct *task, enum pid_type);
void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type);
void change_pid(struct pid **pids, struct task_struct *task, enum pid_type,
struct pid *pid);
extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
/*
* look up a PID in the hash table. Must be called with the tasklist_lock
* or rcu_read_lock() held.
*
* find_pid_ns() finds the pid in the namespace specified
* find_vpid() finds the pid by its virtual id, i.e. in the current namespace
*
* see also find_task_by_vpid() set in include/linux/sched.h
*/
extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
extern struct pid *find_vpid(int nr);
/*
* Lookup a PID in the hash table, and return with it's count elevated.
*/
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
size_t set_tid_size);
extern void free_pid(struct pid *pid);
void free_pids(struct pid **pids);
extern void disable_pid_allocation(struct pid_namespace *ns);
/*
* ns_of_pid() returns the pid namespace in which the specified pid was
* allocated.
*
* NOTE:
* ns_of_pid() is expected to be called for a process (task) that has
* an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid
* is expected to be non-NULL. If @pid is NULL, caller should handle
* the resulting NULL pid-ns.
*/
static inline struct pid_namespace *ns_of_pid(struct pid *pid)
{
struct pid_namespace *ns = NULL;
if (pid) ns = pid->numbers[pid->level].ns;
return ns;
}
/*
* is_child_reaper returns true if the pid is the init process
* of the current namespace. As this one could be checked before
* pid_ns->child_reaper is assigned in copy_process, we check
* with the pid number.
*/
static inline bool is_child_reaper(struct pid *pid)
{
return pid->numbers[pid->level].nr == 1;
}
/*
* the helpers to get the pid's id seen from different namespaces
*
* pid_nr() : global id, i.e. the id seen from the init namespace;
* pid_vnr() : virtual id, i.e. the id seen from the pid namespace of
* current.
* pid_nr_ns() : id seen from the ns specified.
*
* see also task_xid_nr() etc in include/linux/sched.h
*/
static inline pid_t pid_nr(struct pid *pid)
{
pid_t nr = 0;
if (pid)
nr = pid->numbers[0].nr;
return nr;
}
pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
pid_t pid_vnr(struct pid *pid);
#define do_each_pid_task(pid, type, task) \
do { \
if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), \
&(pid)->tasks[type], pid_links[type]) {
/*
* Both old and new leaders may be attached to
* the same pid in the middle of de_thread().
*/
#define while_each_pid_task(pid, type, task) \
if (type == PIDTYPE_PID) \
break; \
} \
} while (0)
#define do_each_pid_thread(pid, type, task) \
do_each_pid_task(pid, type, task) { \
struct task_struct *tg___ = task; \
for_each_thread(tg___, task) {
#define while_each_pid_thread(pid, type, task) \
} \
task = tg___; \
} while_each_pid_task(pid, type, task)
static inline struct pid *task_pid(struct task_struct *task)
{
return task->thread_pid;
}
/*
* the helpers to get the task's different pids as they are seen
* from various namespaces
*
* task_xid_nr() : global id, i.e. the id seen from the init namespace;
* task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
* current.
* task_xid_nr_ns() : id seen from the ns specified;
*
* see also pid_nr() etc in include/linux/pid.h
*/
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
static inline pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
}
static inline pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
*
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(const struct task_struct *p)
{
return p->thread_pid != NULL;
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
}
static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
}
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
/* Obsolete, do not use: */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
/**
* is_global_init - check if a task structure is init. Since init
* is free to have sub-threads we need to check tgid.
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
*
* Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
return task_tgid_nr(tsk) == 1;
}
#endif /* _LINUX_PID_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
*
* Copyright (C) 2004 Paul Mackerras, IBM Corp.
*/
#include <linux/bsearch.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
#include <linux/extable.h>
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define ex_to_insn(x) ((x)->insn)
#else
static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
{
return (unsigned long)&x->insn + x->insn;
}
#endif
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex NULL
#else
static void swap_ex(void *a, void *b, int size)
{
struct exception_table_entry *x = a, *y = b, tmp;
int delta = b - a;
tmp = *x;
x->insn = y->insn + delta;
y->insn = tmp.insn - delta;
#ifdef swap_ex_entry_fixup
swap_ex_entry_fixup(x, y, tmp, delta);
#else
x->fixup = y->fixup + delta;
y->fixup = tmp.fixup - delta;
#endif
}
#endif /* ARCH_HAS_RELATIVE_EXTABLE */
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*/
static int cmp_ex_sort(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
/* avoid overflow */
if (ex_to_insn(x) > ex_to_insn(y))
return 1;
if (ex_to_insn(x) < ex_to_insn(y))
return -1;
return 0;
}
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex_sort, swap_ex);
}
#ifdef CONFIG_MODULES
/*
* If the exception table is sorted, any referring to the module init
* will be at the beginning or the end.
*/
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
while (m->num_exentries &&
within_module_init(ex_to_insn(&m->extable[0]), m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]),
m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
static int cmp_ex_search(const void *key, const void *elt)
{
const struct exception_table_entry *_elt = elt;
unsigned long _key = *(unsigned long *)key;
/* avoid overflow */
if (_key > ex_to_insn(_elt)) return 1; if (_key < ex_to_insn(_elt)) return -1;
return 0;
}
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
* or NULL if none is found.
* We use a binary search, and thus we assume that the table is
* already sorted.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *base,
const size_t num,
unsigned long value)
{
return bsearch(&value, base, num,
sizeof(struct exception_table_entry), cmp_ex_search);
}
// SPDX-License-Identifier: GPL-2.0
/*
* linux/kernel/capability.c
*
* Copyright (C) 1997 Andrew Main <zefram@fysh.org>
*
* Integrated into 2.1.97+, Andrew G. Morgan <morgan@kernel.org>
* 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
int file_caps_enabled = 1;
static int __init file_caps_disable(char *str)
{
file_caps_enabled = 0;
return 1;
}
__setup("no_file_caps", file_caps_disable);
#ifdef CONFIG_MULTIUSER
/*
* More recent versions of libcap are available from:
*
* http://www.kernel.org/pub/linux/libs/security/linux-privs/
*/
static void warn_legacy_capability_use(void)
{
pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n",
current->comm);
}
/*
* Version 2 capabilities worked fine, but the linux/capability.h file
* that accompanied their introduction encouraged their use without
* the necessary user-space source code changes. As such, we have
* created a version 3 with equivalent functionality to version 2, but
* with a header change to protect legacy source code from using
* version 2 when it wanted to use version 1. If your system has code
* that trips the following warning, it is using version 2 specific
* capabilities and may be doing so insecurely.
*
* The remedy is to either upgrade your version of libcap (to 2.10+,
* if the application is linked against it), or recompile your
* application with modern kernel headers and this warning will go
* away.
*/
static void warn_deprecated_v2(void)
{
pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n",
current->comm);
}
/*
* Version check. Return the number of u32s in each capability flag
* array, or a negative value on error.
*/
static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
{
__u32 version;
if (get_user(version, &header->version))
return -EFAULT; switch (version) {
case _LINUX_CAPABILITY_VERSION_1:
warn_legacy_capability_use(); *tocopy = _LINUX_CAPABILITY_U32S_1; break;
case _LINUX_CAPABILITY_VERSION_2:
warn_deprecated_v2();
fallthrough; /* v3 is otherwise equivalent to v2 */
case _LINUX_CAPABILITY_VERSION_3:
*tocopy = _LINUX_CAPABILITY_U32S_3; break;
default:
if (put_user((u32)_KERNEL_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
}
return 0;
}
/*
* The only thing that can change the capabilities of the current
* process is the current process. As such, we can't be in this code
* at the same time as we are in the process of setting capabilities
* in this process. The net result is that we can limit our use of
* locks to when we are reading the caps of another process.
*/
static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
kernel_cap_t *pIp, kernel_cap_t *pPp)
{
int ret;
if (pid && (pid != task_pid_vnr(current))) {
const struct task_struct *target;
rcu_read_lock(); target = find_task_by_vpid(pid); if (!target)
ret = -ESRCH;
else
ret = security_capget(target, pEp, pIp, pPp);
rcu_read_unlock();
} else
ret = security_capget(current, pEp, pIp, pPp);
return ret;
}
/**
* sys_capget - get the capabilities of a given process.
* @header: pointer to struct that contains capability version and
* target pid data
* @dataptr: pointer to struct that contains the effective, permitted,
* and inheritable capabilities that are returned
*
* Returns 0 on success and < 0 on error.
*/
SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr){
int ret = 0;
pid_t pid;
unsigned tocopy;
kernel_cap_t pE, pI, pP;
struct __user_cap_data_struct kdata[2];
ret = cap_validate_magic(header, &tocopy);
if ((dataptr == NULL) || (ret != 0)) return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret;
if (get_user(pid, &header->pid))
return -EFAULT; if (pid < 0)
return -EINVAL;
ret = cap_get_target_pid(pid, &pE, &pI, &pP); if (ret) return ret;
/*
* Annoying legacy format with 64-bit capabilities exposed
* as two sets of 32-bit fields, so we need to split the
* capability values up.
*/
kdata[0].effective = pE.val; kdata[1].effective = pE.val >> 32;
kdata[0].permitted = pP.val; kdata[1].permitted = pP.val >> 32;
kdata[0].inheritable = pI.val; kdata[1].inheritable = pI.val >> 32;
/*
* Note, in the case, tocopy < _KERNEL_CAPABILITY_U32S,
* we silently drop the upper capabilities here. This
* has the effect of making older libcap
* implementations implicitly drop upper capability
* bits when they perform a: capget/modify/capset
* sequence.
*
* This behavior is considered fail-safe
* behavior. Upgrading the application to a newer
* version of libcap will enable access to the newer
* capabilities.
*
* An alternative would be to return an error here
* (-ERANGE), but that causes legacy applications to
* unexpectedly fail; the capget/modify/capset aborts
* before modification is attempted and the application
* fails.
*/
if (copy_to_user(dataptr, kdata, tocopy * sizeof(kdata[0])))
return -EFAULT;
return 0;
}
static kernel_cap_t mk_kernel_cap(u32 low, u32 high)
{
return (kernel_cap_t) { (low | ((u64)high << 32)) & CAP_VALID_MASK };
}
/**
* sys_capset - set capabilities for a process or (*) a group of processes
* @header: pointer to struct that contains capability version and
* target pid data
* @data: pointer to struct that contains the effective, permitted,
* and inheritable capabilities
*
* Set capabilities for the current process only. The ability to any other
* process(es) has been deprecated and removed.
*
* The restrictions on setting capabilities are specified as:
*
* I: any raised capabilities must be a subset of the old permitted
* P: any raised capabilities must be a subset of the old permitted
* E: must be set to a subset of new permitted
*
* Returns 0 on success and < 0 on error.
*/
SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data){
struct __user_cap_data_struct kdata[2] = { { 0, }, };
unsigned tocopy, copybytes;
kernel_cap_t inheritable, permitted, effective;
struct cred *new;
int ret;
pid_t pid;
ret = cap_validate_magic(header, &tocopy);
if (ret != 0)
return ret;
if (get_user(pid, &header->pid))
return -EFAULT;
/* may only affect current now */
if (pid != 0 && pid != task_pid_vnr(current)) return -EPERM; copybytes = tocopy * sizeof(struct __user_cap_data_struct); if (copybytes > sizeof(kdata))
return -EFAULT;
if (copy_from_user(&kdata, data, copybytes))
return -EFAULT;
effective = mk_kernel_cap(kdata[0].effective, kdata[1].effective);
permitted = mk_kernel_cap(kdata[0].permitted, kdata[1].permitted);
inheritable = mk_kernel_cap(kdata[0].inheritable, kdata[1].inheritable);
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = security_capset(new, current_cred(),
&effective, &inheritable, &permitted);
if (ret < 0)
goto error;
audit_log_capset(new, current_cred()); return commit_creds(new);
error:
abort_creds(new);
return ret;}
/**
* has_ns_capability - Does a task have a capability in a specific user ns
* @t: The task in question
* @ns: target user namespace
* @cap: The capability to be tested for
*
* Return true if the specified task has the given superior capability
* currently in effect to the specified user namespace, false if not.
*
* Note that this does not set PF_SUPERPRIV on the task.
*/
bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap)
{
int ret;
rcu_read_lock();
ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NONE);
rcu_read_unlock();
return (ret == 0);
}
/**
* has_ns_capability_noaudit - Does a task have a capability (unaudited)
* in a specific user ns.
* @t: The task in question
* @ns: target user namespace
* @cap: The capability to be tested for
*
* Return true if the specified task has the given superior capability
* currently in effect to the specified user namespace, false if not.
* Do not write an audit message for the check.
*
* Note that this does not set PF_SUPERPRIV on the task.
*/
bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap)
{
int ret;
rcu_read_lock();
ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NOAUDIT);
rcu_read_unlock();
return (ret == 0);
}
/**
* has_capability_noaudit - Does a task have a capability (unaudited) in the
* initial user ns
* @t: The task in question
* @cap: The capability to be tested for
*
* Return true if the specified task has the given superior capability
* currently in effect to init_user_ns, false if not. Don't write an
* audit message for the check.
*
* Note that this does not set PF_SUPERPRIV on the task.
*/
bool has_capability_noaudit(struct task_struct *t, int cap)
{
return has_ns_capability_noaudit(t, &init_user_ns, cap);
}
EXPORT_SYMBOL(has_capability_noaudit);
static bool ns_capable_common(struct user_namespace *ns,
int cap,
unsigned int opts)
{
int capable;
if (unlikely(!cap_valid(cap))) {
pr_crit("capable() called with invalid cap=%u\n", cap);
BUG();
}
capable = security_capable(current_cred(), ns, cap, opts);
if (capable == 0) { current->flags |= PF_SUPERPRIV; return true;
}
return false;
}
/**
* ns_capable - Determine if the current task has a superior capability in effect
* @ns: The usernamespace we want the capability in
* @cap: The capability to be tested for
*
* Return true if the current task has the given superior capability currently
* available for use, false if not.
*
* This sets PF_SUPERPRIV on the task if the capability is available on the
* assumption that it's about to be used.
*/
bool ns_capable(struct user_namespace *ns, int cap)
{
return ns_capable_common(ns, cap, CAP_OPT_NONE);
}
EXPORT_SYMBOL(ns_capable);
/**
* ns_capable_noaudit - Determine if the current task has a superior capability
* (unaudited) in effect
* @ns: The usernamespace we want the capability in
* @cap: The capability to be tested for
*
* Return true if the current task has the given superior capability currently
* available for use, false if not.
*
* This sets PF_SUPERPRIV on the task if the capability is available on the
* assumption that it's about to be used.
*/
bool ns_capable_noaudit(struct user_namespace *ns, int cap)
{
return ns_capable_common(ns, cap, CAP_OPT_NOAUDIT);
}
EXPORT_SYMBOL(ns_capable_noaudit);
/**
* ns_capable_setid - Determine if the current task has a superior capability
* in effect, while signalling that this check is being done from within a
* setid or setgroups syscall.
* @ns: The usernamespace we want the capability in
* @cap: The capability to be tested for
*
* Return true if the current task has the given superior capability currently
* available for use, false if not.
*
* This sets PF_SUPERPRIV on the task if the capability is available on the
* assumption that it's about to be used.
*/
bool ns_capable_setid(struct user_namespace *ns, int cap)
{
return ns_capable_common(ns, cap, CAP_OPT_INSETID);
}
EXPORT_SYMBOL(ns_capable_setid);
/**
* capable - Determine if the current task has a superior capability in effect
* @cap: The capability to be tested for
*
* Return true if the current task has the given superior capability currently
* available for use, false if not.
*
* This sets PF_SUPERPRIV on the task if the capability is available on the
* assumption that it's about to be used.
*/
bool capable(int cap)
{
return ns_capable(&init_user_ns, cap);
}
EXPORT_SYMBOL(capable);
#endif /* CONFIG_MULTIUSER */
/**
* file_ns_capable - Determine if the file's opener had a capability in effect
* @file: The file we want to check
* @ns: The usernamespace we want the capability in
* @cap: The capability to be tested for
*
* Return true if task that opened the file had a capability in effect
* when the file was opened.
*
* This does not set PF_SUPERPRIV because the caller may not
* actually be privileged.
*/
bool file_ns_capable(const struct file *file, struct user_namespace *ns,
int cap)
{
if (WARN_ON_ONCE(!cap_valid(cap))) return false; if (security_capable(file->f_cred, ns, cap, CAP_OPT_NONE) == 0) return true;
return false;
}
EXPORT_SYMBOL(file_ns_capable);
/**
* privileged_wrt_inode_uidgid - Do capabilities in the namespace work over the inode?
* @ns: The user namespace in question
* @idmap: idmap of the mount @inode was found from
* @inode: The inode in question
*
* Return true if the inode uid and gid are within the namespace.
*/
bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
struct mnt_idmap *idmap,
const struct inode *inode)
{
return vfsuid_has_mapping(ns, i_uid_into_vfsuid(idmap, inode)) &&
vfsgid_has_mapping(ns, i_gid_into_vfsgid(idmap, inode));
}
/**
* capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
* @idmap: idmap of the mount @inode was found from
* @inode: The inode in question
* @cap: The capability in question
*
* Return true if the current task has the given capability targeted at
* its own user namespace and that the given inode's uid and gid are
* mapped into the current user namespace.
*/
bool capable_wrt_inode_uidgid(struct mnt_idmap *idmap,
const struct inode *inode, int cap)
{
struct user_namespace *ns = current_user_ns();
return ns_capable(ns, cap) &&
privileged_wrt_inode_uidgid(ns, idmap, inode);
}
EXPORT_SYMBOL(capable_wrt_inode_uidgid);
/**
* ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
* @tsk: The task that may be ptraced
* @ns: The user namespace to search for CAP_SYS_PTRACE in
*
* Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
* in the specified user namespace.
*/
bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
{
int ret = 0; /* An absent tracer adds no restrictions */
const struct cred *cred;
rcu_read_lock();
cred = rcu_dereference(tsk->ptracer_cred);
if (cred)
ret = security_capable(cred, ns, CAP_SYS_PTRACE,
CAP_OPT_NOAUDIT);
rcu_read_unlock();
return (ret == 0);
}
// SPDX-License-Identifier: GPL-2.0-or-later
/* Basic authentication token and access key management
*
* Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
#include "internal.h"
struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
struct rb_root key_user_tree; /* tree of quota records indexed by UID */
DEFINE_SPINLOCK(key_user_lock);
unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
unsigned int key_quota_maxkeys = 200; /* general key count quota */
unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
printk("__key_check: key %p {%08x} should be {%08x}\n",
key, key->magic, KEY_DEBUG_MAGIC);
BUG();
}
#endif
/*
* Get the key quota record for a user, allocating a new record if one doesn't
* already exist.
*/
struct key_user *key_user_lookup(kuid_t uid)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent, **p;
try_again:
parent = NULL;
p = &key_user_tree.rb_node;
spin_lock(&key_user_lock);
/* search the tree for a user record with a matching UID */
while (*p) {
parent = *p;
user = rb_entry(parent, struct key_user, node);
if (uid_lt(uid, user->uid))
p = &(*p)->rb_left;
else if (uid_gt(uid, user->uid))
p = &(*p)->rb_right;
else
goto found;
}
/* if we get here, we failed to find a match in the tree */
if (!candidate) {
/* allocate a candidate user record if we don't already have
* one */
spin_unlock(&key_user_lock);
user = NULL;
candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
if (unlikely(!candidate))
goto out;
/* the allocation may have scheduled, so we need to repeat the
* search lest someone else added the record whilst we were
* asleep */
goto try_again;
}
/* if we get here, then the user record still hadn't appeared on the
* second pass - so we use the candidate record */
refcount_set(&candidate->usage, 1);
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
mutex_init(&candidate->cons_lock);
rb_link_node(&candidate->node, parent, p);
rb_insert_color(&candidate->node, &key_user_tree);
spin_unlock(&key_user_lock);
user = candidate;
goto out;
/* okay - we found a user record for this UID */
found:
refcount_inc(&user->usage);
spin_unlock(&key_user_lock);
kfree(candidate);
out:
return user;
}
/*
* Dispose of a user structure
*/
void key_user_put(struct key_user *user)
{
if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
kfree(user);
}
}
/*
* Allocate a serial number for a key. These are assigned randomly to avoid
* security issues through covert channel problems.
*/
static inline void key_alloc_serial(struct key *key)
{
struct rb_node *parent, **p;
struct key *xkey;
/* propose a random serial number and look for a hole for it in the
* serial number tree */
do {
get_random_bytes(&key->serial, sizeof(key->serial));
key->serial >>= 1; /* negative numbers are not permitted */
} while (key->serial < 3);
spin_lock(&key_serial_lock);
attempt_insertion:
parent = NULL;
p = &key_serial_tree.rb_node;
while (*p) {
parent = *p;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
p = &(*p)->rb_left;
else if (key->serial > xkey->serial)
p = &(*p)->rb_right;
else
goto serial_exists;
}
/* we've found a suitable hole - arrange for this key to occupy it */
rb_link_node(&key->serial_node, parent, p);
rb_insert_color(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
return;
/* we found a key with the proposed serial number - walk the tree from
* that point looking for the next unused serial number */
serial_exists:
for (;;) {
key->serial++;
if (key->serial < 3) {
key->serial = 3;
goto attempt_insertion;
}
parent = rb_next(parent);
if (!parent)
goto attempt_insertion;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
goto attempt_insertion;
}
}
/**
* key_alloc - Allocate a key of the specified type.
* @type: The type of key to allocate.
* @desc: The key description to allow the key to be searched out.
* @uid: The owner of the new key.
* @gid: The group ID for the new key's group permissions.
* @cred: The credentials specifying UID namespace.
* @perm: The permissions mask of the new key.
* @flags: Flags specifying quota properties.
* @restrict_link: Optional link restriction for new keyrings.
*
* Allocate a key of the specified type with the attributes given. The key is
* returned in an uninstantiated state and the caller needs to instantiate the
* key before returning.
*
* The restrict_link structure (if not NULL) will be freed when the
* keyring is destroyed, so it must be dynamically allocated.
*
* The user's key count quota is updated to reflect the creation of the key and
* the user's key data quota has the default for the key type reserved. The
* instantiation function should amend this as necessary. If insufficient
* quota is available, -EDQUOT will be returned.
*
* The LSM security modules can prevent a key being created, in which case
* -EACCES will be returned.
*
* Returns a pointer to the new key if successful and an error code otherwise.
*
* Note that the caller needs to ensure the key type isn't uninstantiated.
* Internally this can be done by locking key_types_sem. Externally, this can
* be done by either never unregistering the key type, or making sure
* key_alloc() calls don't race with module unloading.
*/
struct key *key_alloc(struct key_type *type, const char *desc,
kuid_t uid, kgid_t gid, const struct cred *cred,
key_perm_t perm, unsigned long flags,
struct key_restriction *restrict_link)
{
struct key_user *user = NULL;
struct key *key;
size_t desclen, quotalen;
int ret;
unsigned long irqflags;
key = ERR_PTR(-EINVAL);
if (!desc || !*desc)
goto error;
if (type->vet_description) {
ret = type->vet_description(desc);
if (ret < 0) {
key = ERR_PTR(ret);
goto error;
}
}
desclen = strlen(desc);
quotalen = desclen + 1 + type->def_datalen;
/* get hold of the key tracking for this user */
user = key_user_lookup(uid);
if (!user)
goto no_memory_1;
/* check that the user's quota permits allocation of another key and
* its description */
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock_irqsave(&user->lock, irqflags);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
if (user->qnkeys + 1 > maxkeys ||
user->qnbytes + quotalen > maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
user->qnkeys++;
user->qnbytes += quotalen;
spin_unlock_irqrestore(&user->lock, irqflags);
}
/* allocate and initialise the key and its description */
key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
key->index_key.desc_len = desclen;
key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->index_key.description)
goto no_memory_3;
key->index_key.type = type;
key_set_index_key(&key->index_key);
refcount_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
key->uid = uid;
key->gid = gid;
key->perm = perm;
key->expiry = TIME64_MAX;
key->restrict_link = restrict_link;
key->last_used_at = ktime_get_real_seconds();
key->flags |= 1 << KEY_FLAG_USER_ALIVE;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_BUILT_IN)
key->flags |= 1 << KEY_FLAG_BUILTIN;
if (flags & KEY_ALLOC_UID_KEYRING)
key->flags |= 1 << KEY_FLAG_UID_KEYRING;
if (flags & KEY_ALLOC_SET_KEEP)
key->flags |= 1 << KEY_FLAG_KEEP;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
#endif
/* let the security module know about the key */
ret = security_key_alloc(key, cred, flags);
if (ret < 0)
goto security_error;
/* publish the key by giving it a serial number */
refcount_inc(&key->domain_tag->usage);
atomic_inc(&user->nkeys);
key_alloc_serial(key);
error:
return key;
security_error:
kfree(key->description);
kmem_cache_free(key_jar, key);
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock_irqsave(&user->lock, irqflags);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock_irqrestore(&user->lock, irqflags);
}
key_user_put(user);
key = ERR_PTR(ret);
goto error;
no_memory_3:
kmem_cache_free(key_jar, key);
no_memory_2:
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock_irqsave(&user->lock, irqflags);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock_irqrestore(&user->lock, irqflags);
}
key_user_put(user);
no_memory_1:
key = ERR_PTR(-ENOMEM);
goto error;
no_quota:
spin_unlock_irqrestore(&user->lock, irqflags);
key_user_put(user);
key = ERR_PTR(-EDQUOT);
goto error;
}
EXPORT_SYMBOL(key_alloc);
/**
* key_payload_reserve - Adjust data quota reservation for the key's payload
* @key: The key to make the reservation for.
* @datalen: The amount of data payload the caller now wants.
*
* Adjust the amount of the owning user's key data quota that a key reserves.
* If the amount is increased, then -EDQUOT may be returned if there isn't
* enough free quota available.
*
* If successful, 0 is returned.
*/
int key_payload_reserve(struct key *key, size_t datalen)
{
int delta = (int)datalen - key->datalen;
int ret = 0;
key_check(key);
/* contemplate the quota adjustment */
if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
unsigned long flags;
spin_lock_irqsave(&key->user->lock, flags);
if (delta > 0 &&
(key->user->qnbytes + delta > maxbytes ||
key->user->qnbytes + delta < key->user->qnbytes)) {
ret = -EDQUOT;
}
else {
key->user->qnbytes += delta;
key->quotalen += delta;
}
spin_unlock_irqrestore(&key->user->lock, flags);
}
/* change the recorded data length if that didn't generate an error */
if (ret == 0)
key->datalen = datalen;
return ret;
}
EXPORT_SYMBOL(key_payload_reserve);
/*
* Change the key state to being instantiated.
*/
static void mark_key_instantiated(struct key *key, int reject_error)
{
/* Commit the payload before setting the state; barrier versus
* key_read_state().
*/
smp_store_release(&key->state,
(reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
}
/*
* Instantiate a key and link it into the target keyring atomically. Must be
* called with the target keyring's semaphore writelocked. The target key's
* semaphore need not be locked as instantiation is serialised by
* key_construction_mutex.
*/
static int __key_instantiate_and_link(struct key *key,
struct key_preparsed_payload *prep,
struct key *keyring,
struct key *authkey,
struct assoc_array_edit **_edit)
{
int ret, awaken;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (key->state == KEY_IS_UNINSTANTIATED) {
/* instantiate the key */
ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
atomic_inc(&key->user->nikeys);
mark_key_instantiated(key, 0);
notify_key(key, NOTIFY_KEY_INSTANTIATED, 0);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
/* and link it into the destination keyring */
if (keyring) {
if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
set_bit(KEY_FLAG_KEEP, &key->flags);
__key_link(keyring, key, _edit);
}
/* disable the authorisation key */
if (authkey)
key_invalidate(authkey);
if (prep->expiry != TIME64_MAX)
key_set_expiry(key, prep->expiry);
}
}
mutex_unlock(&key_construction_mutex);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret;
}
/**
* key_instantiate_and_link - Instantiate a key and link it into the keyring.
* @key: The key to instantiate.
* @data: The data to use to instantiate the keyring.
* @datalen: The length of @data.
* @keyring: Keyring to create a link in on success (or NULL).
* @authkey: The authorisation token permitting instantiation.
*
* Instantiate a key that's in the uninstantiated state using the provided data
* and, if successful, link it in to the destination keyring if one is
* supplied.
*
* If successful, 0 is returned, the authorisation token is revoked and anyone
* waiting for the key is woken up. If the key was already instantiated,
* -EBUSY will be returned.
*/
int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
struct key *authkey)
{
struct key_preparsed_payload prep;
struct assoc_array_edit *edit = NULL;
int ret;
memset(&prep, 0, sizeof(prep));
prep.orig_description = key->description;
prep.data = data;
prep.datalen = datalen;
prep.quotalen = key->type->def_datalen;
prep.expiry = TIME64_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
goto error;
}
if (keyring) {
ret = __key_link_lock(keyring, &key->index_key);
if (ret < 0)
goto error;
ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret < 0)
goto error_link_end;
if (keyring->restrict_link && keyring->restrict_link->check) {
struct key_restriction *keyres = keyring->restrict_link;
ret = keyres->check(keyring, key->type, &prep.payload,
keyres->key);
if (ret < 0)
goto error_link_end;
}
}
ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
error_link_end:
if (keyring)
__key_link_end(keyring, &key->index_key, edit);
error:
if (key->type->preparse)
key->type->free_preparse(&prep);
return ret;
}
EXPORT_SYMBOL(key_instantiate_and_link);
/**
* key_reject_and_link - Negatively instantiate a key and link it into the keyring.
* @key: The key to instantiate.
* @timeout: The timeout on the negative key.
* @error: The error to return when the key is hit.
* @keyring: Keyring to create a link in on success (or NULL).
* @authkey: The authorisation token permitting instantiation.
*
* Negatively instantiate a key that's in the uninstantiated state and, if
* successful, set its timeout and stored error and link it in to the
* destination keyring if one is supplied. The key and any links to the key
* will be automatically garbage collected after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return the stored error code (typically ENOKEY) until the negative
* key expires.
*
* If successful, 0 is returned, the authorisation token is revoked and anyone
* waiting for the key is woken up. If the key was already instantiated,
* -EBUSY will be returned.
*/
int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit = NULL;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring) {
if (keyring->restrict_link)
return -EPERM;
link_ret = __key_link_lock(keyring, &key->index_key);
if (link_ret == 0) {
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
if (link_ret < 0)
__key_link_end(keyring, &key->index_key, edit);
}
}
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (key->state == KEY_IS_UNINSTANTIATED) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
mark_key_instantiated(key, -error);
notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
key_set_expiry(key, ktime_get_real_seconds() + timeout);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(keyring, key, &edit);
/* disable the authorisation key */
if (authkey)
key_invalidate(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
}
EXPORT_SYMBOL(key_reject_and_link);
/**
* key_put - Discard a reference to a key.
* @key: The key to discard a reference from.
*
* Discard a reference to a key, and when all the references are gone, we
* schedule the cleanup task to come and pull it out of the tree in process
* context at some later time.
*/
void key_put(struct key *key)
{
if (key) {
key_check(key);
if (refcount_dec_and_test(&key->usage)) {
unsigned long flags;
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock_irqsave(&key->user->lock, flags);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock_irqrestore(&key->user->lock, flags);
}
/* Mark key as safe for GC after key->user done. */
clear_bit_unlock(KEY_FLAG_USER_ALIVE, &key->flags); schedule_work(&key_gc_work);
}
}
}
EXPORT_SYMBOL(key_put);
/*
* Find a key by its serial number.
*/
struct key *key_lookup(key_serial_t id)
{
struct rb_node *n;
struct key *key;
spin_lock(&key_serial_lock);
/* search the tree for the specified key */
n = key_serial_tree.rb_node;
while (n) {
key = rb_entry(n, struct key, serial_node);
if (id < key->serial)
n = n->rb_left;
else if (id > key->serial)
n = n->rb_right;
else
goto found;
}
not_found:
key = ERR_PTR(-ENOKEY);
goto error;
found:
/* A key is allowed to be looked up only if someone still owns a
* reference to it - otherwise it's awaiting the gc.
*/
if (!refcount_inc_not_zero(&key->usage))
goto not_found;
error:
spin_unlock(&key_serial_lock);
return key;
}
EXPORT_SYMBOL(key_lookup);
/*
* Find and lock the specified key type against removal.
*
* We return with the sem read-locked if successful. If the type wasn't
* available -ENOKEY is returned instead.
*/
struct key_type *key_type_lookup(const char *type)
{
struct key_type *ktype;
down_read(&key_types_sem);
/* look up the key type to see if it's one of the registered kernel
* types */
list_for_each_entry(ktype, &key_types_list, link) {
if (strcmp(ktype->name, type) == 0)
goto found_kernel_type;
}
up_read(&key_types_sem);
ktype = ERR_PTR(-ENOKEY);
found_kernel_type:
return ktype;
}
void key_set_timeout(struct key *key, unsigned timeout)
{
time64_t expiry = TIME64_MAX;
/* make the changes with the locks held to prevent races */
down_write(&key->sem);
if (timeout > 0)
expiry = ktime_get_real_seconds() + timeout;
key_set_expiry(key, expiry);
up_write(&key->sem);
}
EXPORT_SYMBOL_GPL(key_set_timeout);
/*
* Unlock a key type locked by key_type_lookup().
*/
void key_type_put(struct key_type *ktype)
{
up_read(&key_types_sem);
}
/*
* Attempt to update an existing key.
*
* The key is given to us with an incremented refcount that we need to discard
* if we get an error.
*/
static inline key_ref_t __key_update(key_ref_t key_ref,
struct key_preparsed_payload *prep)
{
struct key *key = key_ref_to_ptr(key_ref);
int ret;
/* need write permission on the key to update it */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
ret = -EEXIST;
if (!key->type->update)
goto error;
down_write(&key->sem);
ret = key->type->update(key, prep);
if (ret == 0) {
/* Updating a negative key positively instantiates it */
mark_key_instantiated(key, 0);
notify_key(key, NOTIFY_KEY_UPDATED, 0);
}
up_write(&key->sem);
if (ret < 0)
goto error;
out:
return key_ref;
error:
key_put(key);
key_ref = ERR_PTR(ret);
goto out;
}
/*
* Create or potentially update a key. The combined logic behind
* key_create_or_update() and key_create()
*/
static key_ref_t __key_create_or_update(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags,
bool allow_update)
{
struct keyring_index_key index_key = {
.description = description,
};
struct key_preparsed_payload prep;
struct assoc_array_edit *edit = NULL;
const struct cred *cred = current_cred();
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
struct key_restriction *restrict_link = NULL;
/* look up the key type to see if it's one of the registered kernel
* types */
index_key.type = key_type_lookup(type);
if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
if (!index_key.type->instantiate ||
(!index_key.description && !index_key.type->preparse))
goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
restrict_link = keyring->restrict_link;
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
goto error_put_type;
memset(&prep, 0, sizeof(prep));
prep.orig_description = description;
prep.data = payload;
prep.datalen = plen;
prep.quotalen = index_key.type->def_datalen;
prep.expiry = TIME64_MAX;
if (index_key.type->preparse) {
ret = index_key.type->preparse(&prep);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
if (!index_key.description)
index_key.description = prep.description;
key_ref = ERR_PTR(-EINVAL);
if (!index_key.description)
goto error_free_prep;
}
index_key.desc_len = strlen(index_key.description);
key_set_index_key(&index_key);
ret = __key_link_lock(keyring, &index_key);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
ret = __key_link_begin(keyring, &index_key, &edit);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
if (restrict_link && restrict_link->check) {
ret = restrict_link->check(keyring, index_key.type,
&prep.payload, restrict_link->key);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
}
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
ret = key_permission(keyring_ref, KEY_NEED_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
/* if it's requested and possible to update this type of key, search
* for an existing key of the same type and description in the
* destination keyring and update that instead if possible
*/
if (allow_update) {
if (index_key.type->update) {
key_ref = find_key_to_update(keyring_ref, &index_key);
if (key_ref)
goto found_matching_key;
}
} else {
key_ref = find_key_to_update(keyring_ref, &index_key);
if (key_ref) {
key_ref_put(key_ref);
key_ref = ERR_PTR(-EEXIST);
goto error_link_end;
}
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
if (index_key.type->read)
perm |= KEY_POS_READ;
if (index_key.type == &key_type_keyring ||
index_key.type->update)
perm |= KEY_POS_WRITE;
}
/* allocate a new key */
key = key_alloc(index_key.type, index_key.description,
cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_link_end;
}
/* instantiate it and link it into the target keyring */
ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
goto error_link_end;
}
security_key_post_create_or_update(keyring, key, payload, plen, flags,
true);
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
__key_link_end(keyring, &index_key, edit);
error_free_prep:
if (index_key.type->preparse)
index_key.type->free_preparse(&prep);
error_put_type:
key_type_put(index_key.type);
error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
__key_link_end(keyring, &index_key, edit);
key = key_ref_to_ptr(key_ref);
if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
ret = wait_for_key_construction(key, true);
if (ret < 0) {
key_ref_put(key_ref);
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
}
key_ref = __key_update(key_ref, &prep);
if (!IS_ERR(key_ref))
security_key_post_create_or_update(keyring, key, payload, plen,
flags, false);
goto error_free_prep;
}
/**
* key_create_or_update - Update or create and instantiate a key.
* @keyring_ref: A pointer to the destination keyring with possession flag.
* @type: The type of key.
* @description: The searchable description for the key.
* @payload: The data to use to instantiate or update the key.
* @plen: The length of @payload.
* @perm: The permissions mask for a new key.
* @flags: The quota flags for a new key.
*
* Search the destination keyring for a key of the same description and if one
* is found, update it, otherwise create and instantiate a new one and create a
* link to it from that keyring.
*
* If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
* concocted.
*
* Returns a pointer to the new key if successful, -ENODEV if the key type
* wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
* caller isn't permitted to modify the keyring or the LSM did not permit
* creation of the key.
*
* On success, the possession flag from the keyring ref will be tacked on to
* the key ref before it is returned.
*/
key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags)
{
return __key_create_or_update(keyring_ref, type, description, payload,
plen, perm, flags, true);
}
EXPORT_SYMBOL(key_create_or_update);
/**
* key_create - Create and instantiate a key.
* @keyring_ref: A pointer to the destination keyring with possession flag.
* @type: The type of key.
* @description: The searchable description for the key.
* @payload: The data to use to instantiate or update the key.
* @plen: The length of @payload.
* @perm: The permissions mask for a new key.
* @flags: The quota flags for a new key.
*
* Create and instantiate a new key and link to it from the destination keyring.
*
* If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
* concocted.
*
* Returns a pointer to the new key if successful, -EEXIST if a key with the
* same description already exists, -ENODEV if the key type wasn't available,
* -ENOTDIR if the keyring wasn't a keyring, -EACCES if the caller isn't
* permitted to modify the keyring or the LSM did not permit creation of the
* key.
*
* On success, the possession flag from the keyring ref will be tacked on to
* the key ref before it is returned.
*/
key_ref_t key_create(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags)
{
return __key_create_or_update(keyring_ref, type, description, payload,
plen, perm, flags, false);
}
EXPORT_SYMBOL(key_create);
/**
* key_update - Update a key's contents.
* @key_ref: The pointer (plus possession flag) to the key.
* @payload: The data to be used to update the key.
* @plen: The length of @payload.
*
* Attempt to update the contents of a key with the given payload data. The
* caller must be granted Write permission on the key. Negative keys can be
* instantiated by this method.
*
* Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
* type does not support updating. The key type may return other errors.
*/
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
{
struct key_preparsed_payload prep;
struct key *key = key_ref_to_ptr(key_ref);
int ret;
key_check(key);
/* the key must be writable */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
return ret;
/* attempt to update it if supported */
if (!key->type->update)
return -EOPNOTSUPP;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
prep.quotalen = key->type->def_datalen;
prep.expiry = TIME64_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
goto error;
}
down_write(&key->sem);
ret = key->type->update(key, &prep);
if (ret == 0) {
/* Updating a negative key positively instantiates it */
mark_key_instantiated(key, 0);
notify_key(key, NOTIFY_KEY_UPDATED, 0);
}
up_write(&key->sem);
error:
if (key->type->preparse)
key->type->free_preparse(&prep);
return ret;
}
EXPORT_SYMBOL(key_update);
/**
* key_revoke - Revoke a key.
* @key: The key to be revoked.
*
* Mark a key as being revoked and ask the type to free up its resources. The
* revocation timeout is set and the key and all its links will be
* automatically garbage collected after key_gc_delay amount of time if they
* are not manually dealt with first.
*/
void key_revoke(struct key *key)
{
time64_t time;
key_check(key);
/* make sure no one's trying to change or use the key when we mark it
* - we tell lockdep that we might nest because we might be revoking an
* authorisation key whilst holding the sem on a key we've just
* instantiated
*/
down_write_nested(&key->sem, 1);
if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags)) {
notify_key(key, NOTIFY_KEY_REVOKED, 0);
if (key->type->revoke)
key->type->revoke(key);
/* set the death time to no more than the expiry time */
time = ktime_get_real_seconds();
if (key->revoked_at == 0 || key->revoked_at > time) {
key->revoked_at = time;
key_schedule_gc(key->revoked_at + key_gc_delay);
}
}
up_write(&key->sem);
}
EXPORT_SYMBOL(key_revoke);
/**
* key_invalidate - Invalidate a key.
* @key: The key to be invalidated.
*
* Mark a key as being invalidated and have it cleaned up immediately. The key
* is ignored by all searches and other operations from this point.
*/
void key_invalidate(struct key *key)
{
kenter("%d", key_serial(key));
key_check(key);
if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
down_write_nested(&key->sem, 1);
if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
notify_key(key, NOTIFY_KEY_INVALIDATED, 0);
key_schedule_gc_links();
}
up_write(&key->sem);
}
}
EXPORT_SYMBOL(key_invalidate);
/**
* generic_key_instantiate - Simple instantiation of a key from preparsed data
* @key: The key to be instantiated
* @prep: The preparsed data to load.
*
* Instantiate a key from preparsed data. We assume we can just copy the data
* in directly and clear the old pointers.
*
* This can be pointed to directly by the key type instantiate op pointer.
*/
int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
int ret;
pr_devel("==>%s()\n", __func__);
ret = key_payload_reserve(key, prep->quotalen);
if (ret == 0) {
rcu_assign_keypointer(key, prep->payload.data[0]);
key->payload.data[1] = prep->payload.data[1];
key->payload.data[2] = prep->payload.data[2];
key->payload.data[3] = prep->payload.data[3];
prep->payload.data[0] = NULL;
prep->payload.data[1] = NULL;
prep->payload.data[2] = NULL;
prep->payload.data[3] = NULL;
}
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL(generic_key_instantiate);
/**
* register_key_type - Register a type of key.
* @ktype: The new key type.
*
* Register a new key type.
*
* Returns 0 on success or -EEXIST if a type of this name already exists.
*/
int register_key_type(struct key_type *ktype)
{
struct key_type *p;
int ret;
memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
ret = -EEXIST;
down_write(&key_types_sem);
/* disallow key types with the same name */
list_for_each_entry(p, &key_types_list, link) {
if (strcmp(p->name, ktype->name) == 0)
goto out;
}
/* store the type */
list_add(&ktype->link, &key_types_list);
pr_notice("Key type %s registered\n", ktype->name);
ret = 0;
out:
up_write(&key_types_sem);
return ret;
}
EXPORT_SYMBOL(register_key_type);
/**
* unregister_key_type - Unregister a type of key.
* @ktype: The key type.
*
* Unregister a key type and mark all the extant keys of this type as dead.
* Those keys of this type are then destroyed to get rid of their payloads and
* they and their links will be garbage collected as soon as possible.
*/
void unregister_key_type(struct key_type *ktype)
{
down_write(&key_types_sem);
list_del_init(&ktype->link);
downgrade_write(&key_types_sem);
key_gc_keytype(ktype);
pr_notice("Key type %s unregistered\n", ktype->name);
up_read(&key_types_sem);
}
EXPORT_SYMBOL(unregister_key_type);
/*
* Initialise the key management state.
*/
void __init key_init(void)
{
/* allocate a slab in which we can store keys */
key_jar = kmem_cache_create("key_jar", sizeof(struct key),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* add the special key types */
list_add_tail(&key_type_keyring.link, &key_types_list);
list_add_tail(&key_type_dead.link, &key_types_list);
list_add_tail(&key_type_user.link, &key_types_list);
list_add_tail(&key_type_logon.link, &key_types_list);
/* record the root user tracking */
rb_link_node(&root_key_user.node,
NULL,
&key_user_tree.rb_node);
rb_insert_color(&root_key_user.node,
&key_user_tree);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* rwsem.h: R/W semaphores, public interface
*
* Written by David Howells (dhowells@redhat.com).
* Derived from asm-i386/semaphore.h
*/
#ifndef _LINUX_RWSEM_H
#define _LINUX_RWSEM_H
#include <linux/linkage.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/cleanup.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
},
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#ifndef CONFIG_PREEMPT_RT
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
/*
* For an uncontended rwsem, count and owner are the only fields a task
* needs to touch when acquiring the rwsem. So they are put next to each
* other to increase the chance that they will share the same cacheline.
*
* In a contended rwsem, the owner is likely the most frequently accessed
* field in the structure as the optimistic waiter that holds the osq lock
* will spin on owner. For an embedded rwsem, other hot fields in the
* containing structure should be moved further away from the rwsem to
* reduce the chance that they will share the same cacheline causing
* cacheline bouncing problem.
*/
struct rw_semaphore {
atomic_long_t count;
/*
* Write owner or one of the read owners as well flags regarding
* the current state of the rwsem. Can be used as a speculative
* check to see if the write owner is running on the cpu.
*/
atomic_long_t owner;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* spinner MCS lock */
#endif
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_RWSEMS
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define RWSEM_UNLOCKED_VALUE 0UL
#define RWSEM_WRITER_LOCKED (1UL << 0)
#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
}
static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
{
WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
}
static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
{
WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
}
/* Common initializer macros and functions */
#ifdef CONFIG_DEBUG_RWSEMS
# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
#else
# define __RWSEM_DEBUG_INIT(lockname)
#endif
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
#else
#define __RWSEM_OPT_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
{ __RWSEM_COUNT_INIT(name), \
.owner = ATOMIC_LONG_INIT(0), \
__RWSEM_OPT_INIT(name) \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
.wait_list = LIST_HEAD_INIT((name).wait_list), \
__RWSEM_DEBUG_INIT(name) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
} while (0)
/*
* This is the same regardless of which rwsem implementation that is being used.
* It is just a heuristic meant to be called by somebody already holding the
* rwsem to see if somebody from an incompatible type is wanting access to the
* lock.
*/
static inline int rwsem_is_contended(struct rw_semaphore *sem)
{
return !list_empty(&sem->wait_list);
}
#if defined(CONFIG_DEBUG_RWSEMS) || defined(CONFIG_DETECT_HUNG_TASK_BLOCKER)
/*
* Return just the real task structure pointer of the owner
*/
extern struct task_struct *rwsem_owner(struct rw_semaphore *sem);
/*
* Return true if the rwsem is owned by a reader.
*/
extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
#endif
#else /* !CONFIG_PREEMPT_RT */
#include <linux/rwbase_rt.h>
struct rw_semaphore {
struct rwbase_rt rwbase;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define __RWSEM_INITIALIZER(name) \
{ \
.rwbase = __RWBASE_INITIALIZER(name), \
__RWSEM_DEP_MAP_INIT(name) \
}
#define DECLARE_RWSEM(lockname) \
struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
struct lock_class_key *key);
#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
} while (0)
static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
{
return rw_base_is_locked(&sem->rwbase);
}
static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
{
WARN_ON(!rwsem_is_locked(sem));
}
static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
{
WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
}
static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
{
return rw_base_is_contended(&sem->rwbase);
}
#endif /* CONFIG_PREEMPT_RT */
/*
* The functions below are the same for all rwsem implementations including
* the RT specific variant.
*/
static inline void rwsem_assert_held(const struct rw_semaphore *sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held(sem);
else
rwsem_assert_held_nolockdep(sem);
}
static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held_write(sem);
else
rwsem_assert_held_write_nolockdep(sem);
}
/*
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
extern int down_read_trylock(struct rw_semaphore *sem);
/*
* lock for writing
*/
extern void down_write(struct rw_semaphore *sem);
extern int __must_check down_write_killable(struct rw_semaphore *sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
extern int down_write_trylock(struct rw_semaphore *sem);
/*
* release a read lock
*/
extern void up_read(struct rw_semaphore *sem);
/*
* release a write lock
*/
extern void up_write(struct rw_semaphore *sem);
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
/*
* downgrade write lock to read lock
*/
extern void downgrade_write(struct rw_semaphore *sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* nested locking. NOTE: rwsems are not allowed to recurse
* (which occurs if the same task tries to acquire the same
* lock instance multiple times), but multiple locks of the
* same lock class might be taken, if the order of the locks
* is always the same. This ordering rule can be expressed
* to lockdep via the _nested() APIs, but enumerating the
* subclasses that are used. (If the nesting relationship is
* static then another method for expressing nested locking is
* the explicit definition of lock class keys and the use of
* lockdep_set_class() at lock initialization time.
* See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
# define down_write_nest_lock(sem, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
} while (0)
/*
* Take/release a lock when not the owner will release it.
*
* [ This API should be avoided as much as possible - the
* proper abstraction for this case is completions. ]
*/
extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
# define down_read_non_owner(sem) down_read(sem)
# define up_read_non_owner(sem) up_read(sem)
#endif
#endif /* _LINUX_RWSEM_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* xsave/xrstor support.
*
* Author: Suresh Siddha <suresh.b.siddha@intel.com>
*/
#include <linux/bitops.h>
#include <linux/compat.h>
#include <linux/cpu.h>
#include <linux/mman.h>
#include <linux/nospec.h>
#include <linux/pkeys.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/coredump.h>
#include <linux/sort.h>
#include <asm/fpu/api.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/xcr.h>
#include <asm/cpuid/api.h>
#include <asm/msr.h>
#include <asm/tlbflush.h>
#include <asm/prctl.h>
#include <asm/elf.h>
#include <uapi/asm/elf.h>
#include "context.h"
#include "internal.h"
#include "legacy.h"
#include "xstate.h"
#define for_each_extended_xfeature(bit, mask) \
(bit) = FIRST_EXTENDED_XFEATURE; \
for_each_set_bit_from(bit, (unsigned long *)&(mask), 8 * sizeof(mask))
/*
* Although we spell it out in here, the Processor Trace
* xfeature is completely unused. We use other mechanisms
* to save/restore PT state in Linux.
*/
static const char *xfeature_names[] =
{
"x87 floating point registers",
"SSE registers",
"AVX registers",
"MPX bounds registers",
"MPX CSR",
"AVX-512 opmask",
"AVX-512 Hi256",
"AVX-512 ZMM_Hi256",
"Processor Trace (unused)",
"Protection Keys User registers",
"PASID state",
"Control-flow User registers",
"Control-flow Kernel registers (KVM only)",
"unknown xstate feature",
"unknown xstate feature",
"unknown xstate feature",
"unknown xstate feature",
"AMX Tile config",
"AMX Tile data",
"APX registers",
"unknown xstate feature",
};
static unsigned short xsave_cpuid_features[] __initdata = {
[XFEATURE_FP] = X86_FEATURE_FPU,
[XFEATURE_SSE] = X86_FEATURE_XMM,
[XFEATURE_YMM] = X86_FEATURE_AVX,
[XFEATURE_BNDREGS] = X86_FEATURE_MPX,
[XFEATURE_BNDCSR] = X86_FEATURE_MPX,
[XFEATURE_OPMASK] = X86_FEATURE_AVX512F,
[XFEATURE_ZMM_Hi256] = X86_FEATURE_AVX512F,
[XFEATURE_Hi16_ZMM] = X86_FEATURE_AVX512F,
[XFEATURE_PT_UNIMPLEMENTED_SO_FAR] = X86_FEATURE_INTEL_PT,
[XFEATURE_PKRU] = X86_FEATURE_OSPKE,
[XFEATURE_PASID] = X86_FEATURE_ENQCMD,
[XFEATURE_CET_USER] = X86_FEATURE_SHSTK,
[XFEATURE_CET_KERNEL] = X86_FEATURE_SHSTK,
[XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE,
[XFEATURE_XTILE_DATA] = X86_FEATURE_AMX_TILE,
[XFEATURE_APX] = X86_FEATURE_APX,
};
static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
{ [ 0 ... XFEATURE_MAX - 1] = -1};
static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
{ [ 0 ... XFEATURE_MAX - 1] = -1};
static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init;
/*
* Ordering of xstate components in uncompacted format: The xfeature
* number does not necessarily indicate its position in the XSAVE buffer.
* This array defines the traversal order of xstate features.
*/
static unsigned int xfeature_uncompact_order[XFEATURE_MAX] __ro_after_init =
{ [ 0 ... XFEATURE_MAX - 1] = -1};
static inline unsigned int next_xfeature_order(unsigned int i, u64 mask)
{
for (; xfeature_uncompact_order[i] != -1; i++) {
if (mask & BIT_ULL(xfeature_uncompact_order[i]))
break;
}
return i;
}
/* Iterate xstate features in uncompacted order: */
#define for_each_extended_xfeature_in_order(i, mask) \
for (i = 0; \
i = next_xfeature_order(i, mask), \
xfeature_uncompact_order[i] != -1; \
i++)
#define XSTATE_FLAG_SUPERVISOR BIT(0)
#define XSTATE_FLAG_ALIGNED64 BIT(1)
/*
* Return whether the system supports a given xfeature.
*
* Also return the name of the (most advanced) feature that the caller requested:
*/
int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
{
u64 xfeatures_missing = xfeatures_needed & ~fpu_kernel_cfg.max_features;
if (unlikely(feature_name)) {
long xfeature_idx, max_idx;
u64 xfeatures_print;
/*
* So we use FLS here to be able to print the most advanced
* feature that was requested but is missing. So if a driver
* asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
* missing AVX feature - this is the most informative message
* to users:
*/
if (xfeatures_missing)
xfeatures_print = xfeatures_missing;
else
xfeatures_print = xfeatures_needed;
xfeature_idx = fls64(xfeatures_print)-1;
max_idx = ARRAY_SIZE(xfeature_names)-1;
xfeature_idx = min(xfeature_idx, max_idx);
*feature_name = xfeature_names[xfeature_idx];
}
if (xfeatures_missing)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
static bool xfeature_is_aligned64(int xfeature_nr)
{
return xstate_flags[xfeature_nr] & XSTATE_FLAG_ALIGNED64;
}
static bool xfeature_is_supervisor(int xfeature_nr)
{
return xstate_flags[xfeature_nr] & XSTATE_FLAG_SUPERVISOR;
}
static unsigned int xfeature_get_offset(u64 xcomp_bv, int xfeature)
{
unsigned int offs, i;
/*
* Non-compacted format and legacy features use the cached fixed
* offsets.
*/
if (!cpu_feature_enabled(X86_FEATURE_XCOMPACTED) ||
xfeature <= XFEATURE_SSE)
return xstate_offsets[xfeature];
/*
* Compacted format offsets depend on the actual content of the
* compacted xsave area which is determined by the xcomp_bv header
* field.
*/
offs = FXSAVE_SIZE + XSAVE_HDR_SIZE;
for_each_extended_xfeature(i, xcomp_bv) {
if (xfeature_is_aligned64(i))
offs = ALIGN(offs, 64);
if (i == xfeature)
break;
offs += xstate_sizes[i];
}
return offs;
}
/*
* Enable the extended processor state save/restore feature.
* Called once per CPU onlining.
*/
void fpu__init_cpu_xstate(void)
{
if (!boot_cpu_has(X86_FEATURE_XSAVE) || !fpu_kernel_cfg.max_features)
return;
cr4_set_bits(X86_CR4_OSXSAVE);
/*
* Must happen after CR4 setup and before xsetbv() to allow KVM
* lazy passthrough. Write independent of the dynamic state static
* key as that does not work on the boot CPU. This also ensures
* that any stale state is wiped out from XFD. Reset the per CPU
* xfd cache too.
*/
if (cpu_feature_enabled(X86_FEATURE_XFD))
xfd_set_state(init_fpstate.xfd);
/*
* XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
* managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user
* states can be set here.
*/
xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features);
/*
* MSR_IA32_XSS sets supervisor states managed by XSAVES.
*/
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() |
xfeatures_mask_independent());
}
}
static bool xfeature_enabled(enum xfeature xfeature)
{
return fpu_kernel_cfg.max_features & BIT_ULL(xfeature);
}
static int compare_xstate_offsets(const void *xfeature1, const void *xfeature2)
{
return xstate_offsets[*(unsigned int *)xfeature1] -
xstate_offsets[*(unsigned int *)xfeature2];
}
/*
* Record the offsets and sizes of various xstates contained
* in the XSAVE state memory layout. Also, create an ordered
* list of xfeatures for handling out-of-order offsets.
*/
static void __init setup_xstate_cache(void)
{
u32 eax, ebx, ecx, edx, xfeature, i = 0;
/*
* The FP xstates and SSE xstates are legacy states. They are always
* in the fixed offsets in the xsave area in either compacted form
* or standard form.
*/
xstate_offsets[XFEATURE_FP] = 0;
xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state,
xmm_space);
xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
xmm_space);
for_each_extended_xfeature(xfeature, fpu_kernel_cfg.max_features) {
cpuid_count(CPUID_LEAF_XSTATE, xfeature, &eax, &ebx, &ecx, &edx);
xstate_sizes[xfeature] = eax;
xstate_flags[xfeature] = ecx;
/*
* If an xfeature is supervisor state, the offset in EBX is
* invalid, leave it to -1.
*/
if (xfeature_is_supervisor(xfeature))
continue;
xstate_offsets[xfeature] = ebx;
/* Populate the list of xfeatures before sorting */
xfeature_uncompact_order[i++] = xfeature;
}
/*
* Sort xfeatures by their offsets to support out-of-order
* offsets in the uncompacted format.
*/
sort(xfeature_uncompact_order, i, sizeof(unsigned int), compare_xstate_offsets, NULL);
}
/*
* Print out all the supported xstate features:
*/
static void __init print_xstate_features(void)
{
int i;
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = BIT_ULL(i);
const char *name;
if (cpu_has_xfeatures(mask, &name))
pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", mask, name);
}
}
/*
* This check is important because it is easy to get XSTATE_*
* confused with XSTATE_BIT_*.
*/
#define CHECK_XFEATURE(nr) do { \
WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
WARN_ON(nr >= XFEATURE_MAX); \
} while (0)
/*
* Print out xstate component offsets and sizes
*/
static void __init print_xstate_offset_size(void)
{
int i;
for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
i, xfeature_get_offset(fpu_kernel_cfg.max_features, i),
i, xstate_sizes[i]);
}
}
/*
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static __init void os_xrstor_booting(struct xregs_state *xstate)
{
u64 mask = fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSTATE;
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
if (cpu_feature_enabled(X86_FEATURE_XSAVES))
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
/*
* We should never fault when copying from a kernel buffer, and the FPU
* state we set at boot time should be valid.
*/
WARN_ON_FPU(err);
}
/*
* All supported features have either init state all zeros or are
* handled in setup_init_fpu() individually. This is an explicit
* feature list and does not use XFEATURE_MASK*SUPPORTED to catch
* newly added supported features at build time and make people
* actually look at the init state for the new feature.
*/
#define XFEATURES_INIT_FPSTATE_HANDLED \
(XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | \
XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM | \
XFEATURE_MASK_PKRU | \
XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR | \
XFEATURE_MASK_PASID | \
XFEATURE_MASK_CET_USER | \
XFEATURE_MASK_CET_KERNEL | \
XFEATURE_MASK_XTILE | \
XFEATURE_MASK_APX)
/*
* setup the xstate image representing the init state
*/
static void __init setup_init_fpu_buf(void)
{
BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
XFEATURES_INIT_FPSTATE_HANDLED);
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return;
print_xstate_features();
xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures);
/*
* Init all the features state with header.xfeatures being 0x0
*/
os_xrstor_booting(&init_fpstate.regs.xsave);
/*
* All components are now in init state. Read the state back so
* that init_fpstate contains all non-zero init state. This only
* works with XSAVE, but not with XSAVEOPT and XSAVEC/S because
* those use the init optimization which skips writing data for
* components in init state.
*
* XSAVE could be used, but that would require to reshuffle the
* data when XSAVEC/S is available because XSAVEC/S uses xstate
* compaction. But doing so is a pointless exercise because most
* components have an all zeros init state except for the legacy
* ones (FP and SSE). Those can be saved with FXSAVE into the
* legacy area. Adding new features requires to ensure that init
* state is all zeroes or if not to add the necessary handling
* here.
*/
fxsave(&init_fpstate.regs.fxsave);
}
int xfeature_size(int xfeature_nr)
{
u32 eax, ebx, ecx, edx;
CHECK_XFEATURE(xfeature_nr);
cpuid_count(CPUID_LEAF_XSTATE, xfeature_nr, &eax, &ebx, &ecx, &edx);
return eax;
}
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
static int validate_user_xstate_header(const struct xstate_header *hdr,
struct fpstate *fpstate)
{
/* No unknown or supervisor features may be set */
if (hdr->xfeatures & ~fpstate->user_xfeatures)
return -EINVAL;
/* Userspace must use the uncompacted format */
if (hdr->xcomp_bv)
return -EINVAL;
/*
* If 'reserved' is shrunken to add a new field, make sure to validate
* that new field here!
*/
BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
/* No reserved bits may be set */
if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
return -EINVAL;
return 0;
}
static void __init __xstate_dump_leaves(void)
{
int i;
u32 eax, ebx, ecx, edx;
static int should_dump = 1;
if (!should_dump)
return;
should_dump = 0;
/*
* Dump out a few leaves past the ones that we support
* just in case there are some goodies up there
*/
for (i = 0; i < XFEATURE_MAX + 10; i++) {
cpuid_count(CPUID_LEAF_XSTATE, i, &eax, &ebx, &ecx, &edx);
pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
CPUID_LEAF_XSTATE, i, eax, ebx, ecx, edx);
}
}
#define XSTATE_WARN_ON(x, fmt, ...) do { \
if (WARN_ONCE(x, "XSAVE consistency problem: " fmt, ##__VA_ARGS__)) { \
__xstate_dump_leaves(); \
} \
} while (0)
#define XCHECK_SZ(sz, nr, __struct) ({ \
if (WARN_ONCE(sz != sizeof(__struct), \
"[%s]: struct is %zu bytes, cpu state %d bytes\n", \
xfeature_names[nr], sizeof(__struct), sz)) { \
__xstate_dump_leaves(); \
} \
true; \
})
/**
* check_xtile_data_against_struct - Check tile data state size.
*
* Calculate the state size by multiplying the single tile size which is
* recorded in a C struct, and the number of tiles that the CPU informs.
* Compare the provided size with the calculation.
*
* @size: The tile data state size
*
* Returns: 0 on success, -EINVAL on mismatch.
*/
static int __init check_xtile_data_against_struct(int size)
{
u32 max_palid, palid, state_size;
u32 eax, ebx, ecx, edx;
u16 max_tile;
/*
* Check the maximum palette id:
* eax: the highest numbered palette subleaf.
*/
cpuid_count(CPUID_LEAF_TILE, 0, &max_palid, &ebx, &ecx, &edx);
/*
* Cross-check each tile size and find the maximum number of
* supported tiles.
*/
for (palid = 1, max_tile = 0; palid <= max_palid; palid++) {
u16 tile_size, max;
/*
* Check the tile size info:
* eax[31:16]: bytes per title
* ebx[31:16]: the max names (or max number of tiles)
*/
cpuid_count(CPUID_LEAF_TILE, palid, &eax, &ebx, &edx, &edx);
tile_size = eax >> 16;
max = ebx >> 16;
if (tile_size != sizeof(struct xtile_data)) {
pr_err("%s: struct is %zu bytes, cpu xtile %d bytes\n",
__stringify(XFEATURE_XTILE_DATA),
sizeof(struct xtile_data), tile_size);
__xstate_dump_leaves();
return -EINVAL;
}
if (max > max_tile)
max_tile = max;
}
state_size = sizeof(struct xtile_data) * max_tile;
if (size != state_size) {
pr_err("%s: calculated size is %u bytes, cpu state %d bytes\n",
__stringify(XFEATURE_XTILE_DATA), state_size, size);
__xstate_dump_leaves();
return -EINVAL;
}
return 0;
}
/*
* We have a C struct for each 'xstate'. We need to ensure
* that our software representation matches what the CPU
* tells us about the state's size.
*/
static bool __init check_xstate_against_struct(int nr)
{
/*
* Ask the CPU for the size of the state.
*/
int sz = xfeature_size(nr);
/*
* Match each CPU state with the corresponding software
* structure.
*/
switch (nr) {
case XFEATURE_YMM: return XCHECK_SZ(sz, nr, struct ymmh_struct);
case XFEATURE_BNDREGS: return XCHECK_SZ(sz, nr, struct mpx_bndreg_state);
case XFEATURE_BNDCSR: return XCHECK_SZ(sz, nr, struct mpx_bndcsr_state);
case XFEATURE_OPMASK: return XCHECK_SZ(sz, nr, struct avx_512_opmask_state);
case XFEATURE_ZMM_Hi256: return XCHECK_SZ(sz, nr, struct avx_512_zmm_uppers_state);
case XFEATURE_Hi16_ZMM: return XCHECK_SZ(sz, nr, struct avx_512_hi16_state);
case XFEATURE_PKRU: return XCHECK_SZ(sz, nr, struct pkru_state);
case XFEATURE_PASID: return XCHECK_SZ(sz, nr, struct ia32_pasid_state);
case XFEATURE_XTILE_CFG: return XCHECK_SZ(sz, nr, struct xtile_cfg);
case XFEATURE_CET_USER: return XCHECK_SZ(sz, nr, struct cet_user_state);
case XFEATURE_CET_KERNEL: return XCHECK_SZ(sz, nr, struct cet_supervisor_state);
case XFEATURE_APX: return XCHECK_SZ(sz, nr, struct apx_state);
case XFEATURE_XTILE_DATA: check_xtile_data_against_struct(sz); return true;
default:
XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr);
return false;
}
return true;
}
static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted)
{
unsigned int topmost = fls64(xfeatures) - 1;
unsigned int offset, i;
if (topmost <= XFEATURE_SSE)
return sizeof(struct xregs_state);
if (compacted) {
offset = xfeature_get_offset(xfeatures, topmost);
} else {
/* Walk through the xfeature order to pick the last */
for_each_extended_xfeature_in_order(i, xfeatures)
topmost = xfeature_uncompact_order[i];
offset = xstate_offsets[topmost];
}
return offset + xstate_sizes[topmost];
}
/*
* This essentially double-checks what the cpu told us about
* how large the XSAVE buffer needs to be. We are recalculating
* it to be safe.
*
* Independent XSAVE features allocate their own buffers and are not
* covered by these checks. Only the size of the buffer for task->fpu
* is checked here.
*/
static bool __init paranoid_xstate_size_valid(unsigned int kernel_size)
{
bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
bool xsaves = cpu_feature_enabled(X86_FEATURE_XSAVES);
unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
int i;
for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
if (!check_xstate_against_struct(i))
return false;
/*
* Supervisor state components can be managed only by
* XSAVES.
*/
if (!xsaves && xfeature_is_supervisor(i)) {
XSTATE_WARN_ON(1, "Got supervisor feature %d, but XSAVES not advertised\n", i);
return false;
}
}
size = xstate_calculate_size(fpu_kernel_cfg.max_features, compacted);
XSTATE_WARN_ON(size != kernel_size,
"size %u != kernel_size %u\n", size, kernel_size);
return size == kernel_size;
}
/*
* Get total size of enabled xstates in XCR0 | IA32_XSS.
*
* Note the SDM's wording here. "sub-function 0" only enumerates
* the size of the *user* states. If we use it to size a buffer
* that we use 'XSAVES' on, we could potentially overflow the
* buffer because 'XSAVES' saves system states too.
*
* This also takes compaction into account. So this works for
* XSAVEC as well.
*/
static unsigned int __init get_compacted_size(void)
{
unsigned int eax, ebx, ecx, edx;
/*
* - CPUID function 0DH, sub-function 1:
* EBX enumerates the size (in bytes) required by
* the XSAVES instruction for an XSAVE area
* containing all the state components
* corresponding to bits currently set in
* XCR0 | IA32_XSS.
*
* When XSAVES is not available but XSAVEC is (virt), then there
* are no supervisor states, but XSAVEC still uses compacted
* format.
*/
cpuid_count(CPUID_LEAF_XSTATE, 1, &eax, &ebx, &ecx, &edx);
return ebx;
}
/*
* Get the total size of the enabled xstates without the independent supervisor
* features.
*/
static unsigned int __init get_xsave_compacted_size(void)
{
u64 mask = xfeatures_mask_independent();
unsigned int size;
if (!mask)
return get_compacted_size();
/* Disable independent features. */
wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor());
/*
* Ask the hardware what size is required of the buffer.
* This is the size required for the task->fpu buffer.
*/
size = get_compacted_size();
/* Re-enable independent features so XSAVES will work on them again. */
wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
return size;
}
static unsigned int __init get_xsave_size_user(void)
{
unsigned int eax, ebx, ecx, edx;
/*
* - CPUID function 0DH, sub-function 0:
* EBX enumerates the size (in bytes) required by
* the XSAVE instruction for an XSAVE area
* containing all the *user* state components
* corresponding to bits currently set in XCR0.
*/
cpuid_count(CPUID_LEAF_XSTATE, 0, &eax, &ebx, &ecx, &edx);
return ebx;
}
static int __init init_xstate_size(void)
{
/* Recompute the context size for enabled features: */
unsigned int user_size, kernel_size, kernel_default_size;
bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
/* Uncompacted user space size */
user_size = get_xsave_size_user();
/*
* XSAVES kernel size includes supervisor states and uses compacted
* format. XSAVEC uses compacted format, but does not save
* supervisor states.
*
* XSAVE[OPT] do not support supervisor states so kernel and user
* size is identical.
*/
if (compacted)
kernel_size = get_xsave_compacted_size();
else
kernel_size = user_size;
kernel_default_size =
xstate_calculate_size(fpu_kernel_cfg.default_features, compacted);
if (!paranoid_xstate_size_valid(kernel_size))
return -EINVAL;
fpu_kernel_cfg.max_size = kernel_size;
fpu_user_cfg.max_size = user_size;
fpu_kernel_cfg.default_size = kernel_default_size;
fpu_user_cfg.default_size =
xstate_calculate_size(fpu_user_cfg.default_features, false);
guest_default_cfg.size =
xstate_calculate_size(guest_default_cfg.features, compacted);
return 0;
}
/*
* We enabled the XSAVE hardware, but something went wrong and
* we can not use it. Disable it.
*/
static void __init fpu__init_disable_system_xstate(unsigned int legacy_size)
{
pr_info("x86/fpu: XSAVE disabled\n");
fpu_kernel_cfg.max_features = 0;
cr4_clear_bits(X86_CR4_OSXSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
/* Restore the legacy size.*/
fpu_kernel_cfg.max_size = legacy_size;
fpu_kernel_cfg.default_size = legacy_size;
fpu_user_cfg.max_size = legacy_size;
fpu_user_cfg.default_size = legacy_size;
guest_default_cfg.size = legacy_size;
/*
* Prevent enabling the static branch which enables writes to the
* XFD MSR.
*/
init_fpstate.xfd = 0;
fpstate_reset(x86_task_fpu(current));
}
static u64 __init host_default_mask(void)
{
/*
* Exclude dynamic features (require userspace opt-in) and features
* that are supported only for KVM guests.
*/
return ~((u64)XFEATURE_MASK_USER_DYNAMIC | XFEATURE_MASK_GUEST_SUPERVISOR);
}
static u64 __init guest_default_mask(void)
{
/*
* Exclude dynamic features, which require userspace opt-in even
* for KVM guests.
*/
return ~(u64)XFEATURE_MASK_USER_DYNAMIC;
}
/*
* Enable and initialize the xsave feature.
* Called once per system bootup.
*/
void __init fpu__init_system_xstate(unsigned int legacy_size)
{
unsigned int eax, ebx, ecx, edx;
u64 xfeatures;
int err;
int i;
if (!boot_cpu_has(X86_FEATURE_FPU)) {
pr_info("x86/fpu: No FPU detected\n");
return;
}
if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
pr_info("x86/fpu: x87 FPU will use %s\n",
boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
return;
}
/*
* Find user xstates supported by the processor.
*/
cpuid_count(CPUID_LEAF_XSTATE, 0, &eax, &ebx, &ecx, &edx);
fpu_kernel_cfg.max_features = eax + ((u64)edx << 32);
/*
* Find supervisor xstates supported by the processor.
*/
cpuid_count(CPUID_LEAF_XSTATE, 1, &eax, &ebx, &ecx, &edx);
fpu_kernel_cfg.max_features |= ecx + ((u64)edx << 32);
if ((fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
/*
* This indicates that something really unexpected happened
* with the enumeration. Disable XSAVE and try to continue
* booting without it. This is too early to BUG().
*/
pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n",
fpu_kernel_cfg.max_features);
goto out_disable;
}
if (fpu_kernel_cfg.max_features & XFEATURE_MASK_APX &&
fpu_kernel_cfg.max_features & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) {
/*
* This is a problematic CPU configuration where two
* conflicting state components are both enumerated.
*/
pr_err("x86/fpu: Both APX/MPX present in the CPU's xstate features: 0x%llx.\n",
fpu_kernel_cfg.max_features);
goto out_disable;
}
fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features &
XFEATURE_MASK_INDEPENDENT;
/*
* Clear XSAVE features that are disabled in the normal CPUID.
*/
for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
unsigned short cid = xsave_cpuid_features[i];
/* Careful: X86_FEATURE_FPU is 0! */
if ((i != XFEATURE_FP && !cid) || !boot_cpu_has(cid))
fpu_kernel_cfg.max_features &= ~BIT_ULL(i);
}
if (!cpu_feature_enabled(X86_FEATURE_XFD))
fpu_kernel_cfg.max_features &= ~XFEATURE_MASK_USER_DYNAMIC;
if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED;
else
fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED |
XFEATURE_MASK_SUPERVISOR_SUPPORTED;
fpu_user_cfg.max_features = fpu_kernel_cfg.max_features;
fpu_user_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED;
/*
* Now, given maximum feature set, determine default values by
* applying default masks.
*/
fpu_kernel_cfg.default_features = fpu_kernel_cfg.max_features & host_default_mask();
fpu_user_cfg.default_features = fpu_user_cfg.max_features & host_default_mask();
guest_default_cfg.features = fpu_kernel_cfg.max_features & guest_default_mask();
/* Store it for paranoia check at the end */
xfeatures = fpu_kernel_cfg.max_features;
/*
* Initialize the default XFD state in initfp_state and enable the
* dynamic sizing mechanism if dynamic states are available. The
* static key cannot be enabled here because this runs before
* jump_label_init(). This is delayed to an initcall.
*/
init_fpstate.xfd = fpu_user_cfg.max_features & XFEATURE_MASK_USER_DYNAMIC;
/* Set up compaction feature bit */
if (cpu_feature_enabled(X86_FEATURE_XSAVEC) ||
cpu_feature_enabled(X86_FEATURE_XSAVES))
setup_force_cpu_cap(X86_FEATURE_XCOMPACTED);
/* Enable xstate instructions to be able to continue with initialization: */
fpu__init_cpu_xstate();
/* Cache size, offset and flags for initialization */
setup_xstate_cache();
err = init_xstate_size();
if (err)
goto out_disable;
/*
* Update info used for ptrace frames; use standard-format size and no
* supervisor xstates:
*/
update_regset_xstate_info(fpu_user_cfg.max_size,
fpu_user_cfg.max_features);
/*
* init_fpstate excludes dynamic states as they are large but init
* state is zero.
*/
init_fpstate.size = fpu_kernel_cfg.default_size;
init_fpstate.xfeatures = fpu_kernel_cfg.default_features;
if (init_fpstate.size > sizeof(init_fpstate.regs)) {
pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d)\n",
sizeof(init_fpstate.regs), init_fpstate.size);
goto out_disable;
}
setup_init_fpu_buf();
/*
* Paranoia check whether something in the setup modified the
* xfeatures mask.
*/
if (xfeatures != fpu_kernel_cfg.max_features) {
pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init\n",
xfeatures, fpu_kernel_cfg.max_features);
goto out_disable;
}
/*
* CPU capabilities initialization runs before FPU init. So
* X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
* functional, set the feature bit so depending code works.
*/
setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
fpu_kernel_cfg.max_features,
fpu_kernel_cfg.max_size,
boot_cpu_has(X86_FEATURE_XCOMPACTED) ? "compacted" : "standard");
return;
out_disable:
/* something went wrong, try to boot without any XSAVE support */
fpu__init_disable_system_xstate(legacy_size);
}
/*
* Restore minimal FPU state after suspend:
*/
void fpu__resume_cpu(void)
{
/*
* Restore XCR0 on xsave capable CPUs:
*/
if (cpu_feature_enabled(X86_FEATURE_XSAVE))
xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features);
/*
* Restore IA32_XSS. The same CPUID bit enumerates support
* of XSAVES and MSR_IA32_XSS.
*/
if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() |
xfeatures_mask_independent());
}
if (fpu_state_size_dynamic())
wrmsrq(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd);
}
/*
* Given an xstate feature nr, calculate where in the xsave
* buffer the state is. Callers should ensure that the buffer
* is valid.
*/
static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
{
u64 xcomp_bv = xsave->header.xcomp_bv;
if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
return NULL;
if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED)) {
if (WARN_ON_ONCE(!(xcomp_bv & BIT_ULL(xfeature_nr))))
return NULL;
}
return (void *)xsave + xfeature_get_offset(xcomp_bv, xfeature_nr);
}
/*
* Given the xsave area and a state inside, this function returns the
* address of the state.
*
* This is the API that is called to get xstate address in either
* standard format or compacted format of xsave area.
*
* Note that if there is no data for the field in the xsave buffer
* this will return NULL.
*
* Inputs:
* xstate: the thread's storage area for all FPU data
* xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
* XFEATURE_SSE, etc...)
* Output:
* address of the state in the xsave area, or NULL if the
* field is not present in the xsave buffer.
*/
void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
{
/*
* Do we even *have* xsave state?
*/
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return NULL;
/*
* We should not ever be requesting features that we
* have not enabled.
*/
if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
return NULL;
/*
* This assumes the last 'xsave*' instruction to
* have requested that 'xfeature_nr' be saved.
* If it did not, we might be seeing and old value
* of the field in the buffer.
*
* This can happen because the last 'xsave' did not
* request that this feature be saved (unlikely)
* or because the "init optimization" caused it
* to not be saved.
*/
if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
return NULL;
return __raw_xsave_addr(xsave, xfeature_nr);
}
EXPORT_SYMBOL_GPL(get_xsave_addr);
/*
* Given an xstate feature nr, calculate where in the xsave buffer the state is.
* The xsave buffer should be in standard format, not compacted (e.g. user mode
* signal frames).
*/
void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr)
{
if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
return NULL;
return (void __user *)xsave + xstate_offsets[xfeature_nr];
}
#ifdef CONFIG_ARCH_HAS_PKEYS
/*
* This will go out and modify PKRU register to set the access
* rights for @pkey to @init_val.
*/
int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
unsigned long init_val)
{
u32 old_pkru, new_pkru_bits = 0;
int pkey_shift;
/*
* This check implies XSAVE support. OSPKE only gets
* set if we enable XSAVE and we enable PKU in XCR0.
*/
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return -EINVAL;
/*
* This code should only be called with valid 'pkey'
* values originating from in-kernel users. Complain
* if a bad value is observed.
*/
if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
return -EINVAL;
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
new_pkru_bits |= PKRU_AD_BIT;
if (init_val & PKEY_DISABLE_WRITE)
new_pkru_bits |= PKRU_WD_BIT;
/* Shift the bits in to the correct place in PKRU for pkey: */
pkey_shift = pkey * PKRU_BITS_PER_PKEY;
new_pkru_bits <<= pkey_shift;
/* Get old PKRU and mask off any old bits in place: */
old_pkru = read_pkru();
old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
/* Write old part along with new part: */
write_pkru(old_pkru | new_pkru_bits);
return 0;
}
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
void *init_xstate, unsigned int size)
{
membuf_write(to, from_xstate ? xstate : init_xstate, size);
}
/**
* __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
* @to: membuf descriptor
* @fpstate: The fpstate buffer from which to copy
* @xfeatures: The mask of xfeatures to save (XSAVE mode only)
* @pkru_val: The PKRU value to store in the PKRU component
* @copy_mode: The requested copy mode
*
* Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
* format, i.e. from the kernel internal hardware dependent storage format
* to the requested @mode. UABI XSTATE is always uncompacted!
*
* It supports partial copy but @to.pos always starts from zero.
*/
void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
u64 xfeatures, u32 pkru_val,
enum xstate_copy_mode copy_mode)
{
const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
struct xregs_state *xinit = &init_fpstate.regs.xsave;
struct xregs_state *xsave = &fpstate->regs.xsave;
unsigned int zerofrom, i, xfeature;
struct xstate_header header;
u64 mask;
memset(&header, 0, sizeof(header));
header.xfeatures = xsave->header.xfeatures;
/* Mask out the feature bits depending on copy mode */
switch (copy_mode) {
case XSTATE_COPY_FP:
header.xfeatures &= XFEATURE_MASK_FP;
break;
case XSTATE_COPY_FX:
header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
break;
case XSTATE_COPY_XSAVE:
header.xfeatures &= fpstate->user_xfeatures & xfeatures;
break;
}
/* Copy FP state up to MXCSR */
copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
&xinit->i387, off_mxcsr);
/* Copy MXCSR when SSE or YMM are set in the feature mask */
copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
&to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
MXCSR_AND_FLAGS_SIZE);
/* Copy the remaining FP state */
copy_feature(header.xfeatures & XFEATURE_MASK_FP,
&to, &xsave->i387.st_space, &xinit->i387.st_space,
sizeof(xsave->i387.st_space));
/* Copy the SSE state - shared with YMM, but independently managed */
copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
&to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
sizeof(xsave->i387.xmm_space));
if (copy_mode != XSTATE_COPY_XSAVE)
goto out;
/* Zero the padding area */
membuf_zero(&to, sizeof(xsave->i387.padding));
/* Copy xsave->i387.sw_reserved */
membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
/* Copy the user space relevant state of @xsave->header */
membuf_write(&to, &header, sizeof(header));
zerofrom = offsetof(struct xregs_state, extended_state_area);
/*
* This 'mask' indicates which states to copy from fpstate.
* Those extended states that are not present in fpstate are
* either disabled or initialized:
*
* In non-compacted format, disabled features still occupy
* state space but there is no state to copy from in the
* compacted init_fpstate. The gap tracking will zero these
* states.
*
* The extended features have an all zeroes init state. Thus,
* remove them from 'mask' to zero those features in the user
* buffer instead of retrieving them from init_fpstate.
*/
mask = header.xfeatures;
for_each_extended_xfeature_in_order(i, mask) {
xfeature = xfeature_uncompact_order[i];
/*
* If there was a feature or alignment gap, zero the space
* in the destination buffer.
*/
if (zerofrom < xstate_offsets[xfeature])
membuf_zero(&to, xstate_offsets[xfeature] - zerofrom);
if (xfeature == XFEATURE_PKRU) {
struct pkru_state pkru = {0};
/*
* PKRU is not necessarily up to date in the
* XSAVE buffer. Use the provided value.
*/
pkru.pkru = pkru_val;
membuf_write(&to, &pkru, sizeof(pkru));
} else {
membuf_write(&to,
__raw_xsave_addr(xsave, xfeature),
xstate_sizes[xfeature]);
}
/*
* Keep track of the last copied state in the non-compacted
* target buffer for gap zeroing.
*/
zerofrom = xstate_offsets[xfeature] + xstate_sizes[xfeature];
}
out:
if (to.left)
membuf_zero(&to, to.left);
}
/**
* copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
* @to: membuf descriptor
* @tsk: The task from which to copy the saved xstate
* @copy_mode: The requested copy mode
*
* Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
* format, i.e. from the kernel internal hardware dependent storage format
* to the requested @mode. UABI XSTATE is always uncompacted!
*
* It supports partial copy but @to.pos always starts from zero.
*/
void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
enum xstate_copy_mode copy_mode)
{
__copy_xstate_to_uabi_buf(to, x86_task_fpu(tsk)->fpstate,
x86_task_fpu(tsk)->fpstate->user_xfeatures,
tsk->thread.pkru, copy_mode);
}
static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
const void *kbuf, const void __user *ubuf)
{
if (kbuf) {
memcpy(dst, kbuf + offset, size);
} else {
if (copy_from_user(dst, ubuf + offset, size))
return -EFAULT;
}
return 0;
}
/**
* copy_uabi_to_xstate - Copy a UABI format buffer to the kernel xstate
* @fpstate: The fpstate buffer to copy to
* @kbuf: The UABI format buffer, if it comes from the kernel
* @ubuf: The UABI format buffer, if it comes from userspace
* @pkru: The location to write the PKRU value to
*
* Converts from the UABI format into the kernel internal hardware
* dependent format.
*
* This function ultimately has three different callers with distinct PKRU
* behavior.
* 1. When called from sigreturn the PKRU register will be restored from
* @fpstate via an XRSTOR. Correctly copying the UABI format buffer to
* @fpstate is sufficient to cover this case, but the caller will also
* pass a pointer to the thread_struct's pkru field in @pkru and updating
* it is harmless.
* 2. When called from ptrace the PKRU register will be restored from the
* thread_struct's pkru field. A pointer to that is passed in @pkru.
* The kernel will restore it manually, so the XRSTOR behavior that resets
* the PKRU register to the hardware init value (0) if the corresponding
* xfeatures bit is not set is emulated here.
* 3. When called from KVM the PKRU register will be restored from the vcpu's
* pkru field. A pointer to that is passed in @pkru. KVM hasn't used
* XRSTOR and hasn't had the PKRU resetting behavior described above. To
* preserve that KVM behavior, it passes NULL for @pkru if the xfeatures
* bit is not set.
*/
static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
const void __user *ubuf, u32 *pkru)
{
struct xregs_state *xsave = &fpstate->regs.xsave;
unsigned int offset, size;
struct xstate_header hdr;
u64 mask;
int i;
offset = offsetof(struct xregs_state, header);
if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf))
return -EFAULT;
if (validate_user_xstate_header(&hdr, fpstate))
return -EINVAL;
/* Validate MXCSR when any of the related features is in use */
mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
if (hdr.xfeatures & mask) {
u32 mxcsr[2];
offset = offsetof(struct fxregs_state, mxcsr);
if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
return -EFAULT;
/* Reserved bits in MXCSR must be zero. */
if (mxcsr[0] & ~mxcsr_feature_mask)
return -EINVAL;
/* SSE and YMM require MXCSR even when FP is not in use. */
if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
xsave->i387.mxcsr = mxcsr[0];
xsave->i387.mxcsr_mask = mxcsr[1];
}
}
for (i = 0; i < XFEATURE_MAX; i++) {
mask = BIT_ULL(i);
if (hdr.xfeatures & mask) {
void *dst = __raw_xsave_addr(xsave, i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
return -EFAULT;
}
}
if (hdr.xfeatures & XFEATURE_MASK_PKRU) {
struct pkru_state *xpkru;
xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU);
*pkru = xpkru->pkru;
} else {
/*
* KVM may pass NULL here to indicate that it does not need
* PKRU updated.
*/
if (pkru)
*pkru = 0;
}
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
*/
xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
/*
* Add back in the features that came in from userspace:
*/
xsave->header.xfeatures |= hdr.xfeatures;
return 0;
}
/*
* Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
* format and copy to the target thread. Used by ptrace and KVM.
*/
int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru)
{
return copy_uabi_to_xstate(fpstate, kbuf, NULL, pkru);
}
/*
* Convert from a sigreturn standard-format user-space buffer to kernel
* XSAVE[S] format and copy to the target thread. This is called from the
* sigreturn() and rt_sigreturn() system calls.
*/
int copy_sigframe_from_user_to_xstate(struct task_struct *tsk,
const void __user *ubuf)
{
return copy_uabi_to_xstate(x86_task_fpu(tsk)->fpstate, NULL, ubuf, &tsk->thread.pkru);
}
static bool validate_independent_components(u64 mask)
{
u64 xchk;
if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES)))
return false;
xchk = ~xfeatures_mask_independent();
if (WARN_ON_ONCE(!mask || mask & xchk))
return false;
return true;
}
/**
* xsaves - Save selected components to a kernel xstate buffer
* @xstate: Pointer to the buffer
* @mask: Feature mask to select the components to save
*
* The @xstate buffer must be 64 byte aligned and correctly initialized as
* XSAVES does not write the full xstate header. Before first use the
* buffer should be zeroed otherwise a consecutive XRSTORS from that buffer
* can #GP.
*
* The feature mask must be a subset of the independent features.
*/
void xsaves(struct xregs_state *xstate, u64 mask)
{
int err;
if (!validate_independent_components(mask))
return;
XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err);
WARN_ON_ONCE(err);
}
/**
* xrstors - Restore selected components from a kernel xstate buffer
* @xstate: Pointer to the buffer
* @mask: Feature mask to select the components to restore
*
* The @xstate buffer must be 64 byte aligned and correctly initialized
* otherwise XRSTORS from that buffer can #GP.
*
* Proper usage is to restore the state which was saved with
* xsaves() into @xstate.
*
* The feature mask must be a subset of the independent features.
*/
void xrstors(struct xregs_state *xstate, u64 mask)
{
int err;
if (!validate_independent_components(mask))
return;
XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err);
WARN_ON_ONCE(err);
}
#if IS_ENABLED(CONFIG_KVM)
void fpstate_clear_xstate_component(struct fpstate *fpstate, unsigned int xfeature)
{
void *addr = get_xsave_addr(&fpstate->regs.xsave, xfeature);
if (addr)
memset(addr, 0, xstate_sizes[xfeature]);
}
EXPORT_SYMBOL_GPL(fpstate_clear_xstate_component);
#endif
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_DEBUG_FPU
/*
* Ensure that a subsequent XSAVE* or XRSTOR* instruction with RFBM=@mask
* can safely operate on the @fpstate buffer.
*/
static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor)
{
u64 xfd = __this_cpu_read(xfd_state);
if (fpstate->xfd == xfd)
return true;
/*
* The XFD MSR does not match fpstate->xfd. That's invalid when
* the passed in fpstate is current's fpstate.
*/
if (fpstate->xfd == x86_task_fpu(current)->fpstate->xfd)
return false;
/*
* XRSTOR(S) from init_fpstate are always correct as it will just
* bring all components into init state and not read from the
* buffer. XSAVE(S) raises #PF after init.
*/
if (fpstate == &init_fpstate)
return rstor;
/*
* XSAVE(S): clone(), fpu_swap_kvm_fpstate()
* XRSTORS(S): fpu_swap_kvm_fpstate()
*/
/*
* No XSAVE/XRSTOR instructions (except XSAVE itself) touch
* the buffer area for XFD-disabled state components.
*/
mask &= ~xfd;
/*
* Remove features which are valid in fpstate. They
* have space allocated in fpstate.
*/
mask &= ~fpstate->xfeatures;
/*
* Any remaining state components in 'mask' might be written
* by XSAVE/XRSTOR. Fail validation it found.
*/
return !mask;
}
void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor)
{ WARN_ON_ONCE(!xstate_op_valid(fpstate, mask, rstor));}
#endif /* CONFIG_X86_DEBUG_FPU */
static int __init xfd_update_static_branch(void)
{
/*
* If init_fpstate.xfd has bits set then dynamic features are
* available and the dynamic sizing must be enabled.
*/
if (init_fpstate.xfd)
static_branch_enable(&__fpu_state_size_dynamic);
return 0;
}
arch_initcall(xfd_update_static_branch)
void fpstate_free(struct fpu *fpu)
{
if (fpu->fpstate && fpu->fpstate != &fpu->__fpstate)
vfree(fpu->fpstate);
}
/**
* fpstate_realloc - Reallocate struct fpstate for the requested new features
*
* @xfeatures: A bitmap of xstate features which extend the enabled features
* of that task
* @ksize: The required size for the kernel buffer
* @usize: The required size for user space buffers
* @guest_fpu: Pointer to a guest FPU container. NULL for host allocations
*
* Note vs. vmalloc(): If the task with a vzalloc()-allocated buffer
* terminates quickly, vfree()-induced IPIs may be a concern, but tasks
* with large states are likely to live longer.
*
* Returns: 0 on success, -ENOMEM on allocation error.
*/
static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
unsigned int usize, struct fpu_guest *guest_fpu)
{
struct fpu *fpu = x86_task_fpu(current);
struct fpstate *curfps, *newfps = NULL;
unsigned int fpsize;
bool in_use;
fpsize = ksize + ALIGN(offsetof(struct fpstate, regs), 64);
newfps = vzalloc(fpsize);
if (!newfps)
return -ENOMEM;
newfps->size = ksize;
newfps->user_size = usize;
newfps->is_valloc = true;
/*
* When a guest FPU is supplied, use @guest_fpu->fpstate
* as reference independent whether it is in use or not.
*/
curfps = guest_fpu ? guest_fpu->fpstate : fpu->fpstate;
/* Determine whether @curfps is the active fpstate */
in_use = fpu->fpstate == curfps;
if (guest_fpu) {
newfps->is_guest = true;
newfps->is_confidential = curfps->is_confidential;
newfps->in_use = curfps->in_use;
guest_fpu->xfeatures |= xfeatures;
guest_fpu->uabi_size = usize;
}
fpregs_lock();
/*
* If @curfps is in use, ensure that the current state is in the
* registers before swapping fpstate as that might invalidate it
* due to layout changes.
*/
if (in_use && test_thread_flag(TIF_NEED_FPU_LOAD))
fpregs_restore_userregs();
newfps->xfeatures = curfps->xfeatures | xfeatures;
newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
newfps->xfd = curfps->xfd & ~xfeatures;
/* Do the final updates within the locked region */
xstate_init_xcomp_bv(&newfps->regs.xsave, newfps->xfeatures);
if (guest_fpu) {
guest_fpu->fpstate = newfps;
/* If curfps is active, update the FPU fpstate pointer */
if (in_use)
fpu->fpstate = newfps;
} else {
fpu->fpstate = newfps;
}
if (in_use)
xfd_update_state(fpu->fpstate);
fpregs_unlock();
/* Only free valloc'ed state */
if (curfps && curfps->is_valloc)
vfree(curfps);
return 0;
}
static int validate_sigaltstack(unsigned int usize)
{
struct task_struct *thread, *leader = current->group_leader;
unsigned long framesize = get_sigframe_size();
lockdep_assert_held(¤t->sighand->siglock);
/* get_sigframe_size() is based on fpu_user_cfg.max_size */
framesize -= fpu_user_cfg.max_size;
framesize += usize;
for_each_thread(leader, thread) {
if (thread->sas_ss_size && thread->sas_ss_size < framesize)
return -ENOSPC;
}
return 0;
}
static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
{
/*
* This deliberately does not exclude !XSAVES as we still might
* decide to optionally context switch XCR0 or talk the silicon
* vendors into extending XFD for the pre AMX states, especially
* AVX512.
*/
bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
struct fpu *fpu = x86_task_fpu(current->group_leader);
struct fpu_state_perm *perm;
unsigned int ksize, usize;
u64 mask;
int ret = 0;
/* Check whether fully enabled */
if ((permitted & requested) == requested)
return 0;
/*
* Calculate the resulting kernel state size. Note, @permitted also
* contains supervisor xfeatures even though supervisor are always
* permitted for kernel and guest FPUs, and never permitted for user
* FPUs.
*/
mask = permitted | requested;
ksize = xstate_calculate_size(mask, compacted);
/*
* Calculate the resulting user state size. Take care not to clobber
* the supervisor xfeatures in the new mask!
*/
usize = xstate_calculate_size(mask & XFEATURE_MASK_USER_SUPPORTED, false);
if (!guest) {
ret = validate_sigaltstack(usize);
if (ret)
return ret;
}
perm = guest ? &fpu->guest_perm : &fpu->perm;
/* Pairs with the READ_ONCE() in xstate_get_group_perm() */
WRITE_ONCE(perm->__state_perm, mask);
/* Protected by sighand lock */
perm->__state_size = ksize;
perm->__user_state_size = usize;
return ret;
}
/*
* Permissions array to map facilities with more than one component
*/
static const u64 xstate_prctl_req[XFEATURE_MAX] = {
[XFEATURE_XTILE_DATA] = XFEATURE_MASK_XTILE_DATA,
};
static int xstate_request_perm(unsigned long idx, bool guest)
{
u64 permitted, requested;
int ret;
if (idx >= XFEATURE_MAX)
return -EINVAL;
/*
* Look up the facility mask which can require more than
* one xstate component.
*/
idx = array_index_nospec(idx, ARRAY_SIZE(xstate_prctl_req));
requested = xstate_prctl_req[idx];
if (!requested)
return -EOPNOTSUPP;
if ((fpu_user_cfg.max_features & requested) != requested)
return -EOPNOTSUPP;
/* Lockless quick check */
permitted = xstate_get_group_perm(guest);
if ((permitted & requested) == requested)
return 0;
/* Protect against concurrent modifications */
spin_lock_irq(¤t->sighand->siglock);
permitted = xstate_get_group_perm(guest);
/* First vCPU allocation locks the permissions. */
if (guest && (permitted & FPU_GUEST_PERM_LOCKED))
ret = -EBUSY;
else
ret = __xstate_request_perm(permitted, requested, guest);
spin_unlock_irq(¤t->sighand->siglock);
return ret;
}
int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu)
{
u64 xfd_event = xfd_err & XFEATURE_MASK_USER_DYNAMIC;
struct fpu_state_perm *perm;
unsigned int ksize, usize;
struct fpu *fpu;
if (!xfd_event) {
if (!guest_fpu)
pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err);
return 0;
}
/* Protect against concurrent modifications */
spin_lock_irq(¤t->sighand->siglock);
/* If not permitted let it die */
if ((xstate_get_group_perm(!!guest_fpu) & xfd_event) != xfd_event) {
spin_unlock_irq(¤t->sighand->siglock);
return -EPERM;
}
fpu = x86_task_fpu(current->group_leader);
perm = guest_fpu ? &fpu->guest_perm : &fpu->perm;
ksize = perm->__state_size;
usize = perm->__user_state_size;
/*
* The feature is permitted. State size is sufficient. Dropping
* the lock is safe here even if more features are added from
* another task, the retrieved buffer sizes are valid for the
* currently requested feature(s).
*/
spin_unlock_irq(¤t->sighand->siglock);
/*
* Try to allocate a new fpstate. If that fails there is no way
* out.
*/
if (fpstate_realloc(xfd_event, ksize, usize, guest_fpu))
return -EFAULT;
return 0;
}
int xfd_enable_feature(u64 xfd_err)
{
return __xfd_enable_feature(xfd_err, NULL);
}
#else /* CONFIG_X86_64 */
static inline int xstate_request_perm(unsigned long idx, bool guest)
{
return -EPERM;
}
#endif /* !CONFIG_X86_64 */
u64 xstate_get_guest_group_perm(void)
{
return xstate_get_group_perm(true);
}
EXPORT_SYMBOL_GPL(xstate_get_guest_group_perm);
/**
* fpu_xstate_prctl - xstate permission operations
* @option: A subfunction of arch_prctl()
* @arg2: option argument
* Return: 0 if successful; otherwise, an error code
*
* Option arguments:
*
* ARCH_GET_XCOMP_SUPP: Pointer to user space u64 to store the info
* ARCH_GET_XCOMP_PERM: Pointer to user space u64 to store the info
* ARCH_REQ_XCOMP_PERM: Facility number requested
*
* For facilities which require more than one XSTATE component, the request
* must be the highest state component number related to that facility,
* e.g. for AMX which requires XFEATURE_XTILE_CFG(17) and
* XFEATURE_XTILE_DATA(18) this would be XFEATURE_XTILE_DATA(18).
*/
long fpu_xstate_prctl(int option, unsigned long arg2)
{
u64 __user *uptr = (u64 __user *)arg2;
u64 permitted, supported;
unsigned long idx = arg2;
bool guest = false;
switch (option) {
case ARCH_GET_XCOMP_SUPP:
supported = fpu_user_cfg.max_features | fpu_user_cfg.legacy_features;
return put_user(supported, uptr);
case ARCH_GET_XCOMP_PERM:
/*
* Lockless snapshot as it can also change right after the
* dropping the lock.
*/
permitted = xstate_get_host_group_perm();
permitted &= XFEATURE_MASK_USER_SUPPORTED;
return put_user(permitted, uptr);
case ARCH_GET_XCOMP_GUEST_PERM:
permitted = xstate_get_guest_group_perm();
permitted &= XFEATURE_MASK_USER_SUPPORTED;
return put_user(permitted, uptr);
case ARCH_REQ_XCOMP_GUEST_PERM:
guest = true;
fallthrough;
case ARCH_REQ_XCOMP_PERM:
if (!IS_ENABLED(CONFIG_X86_64))
return -EOPNOTSUPP;
return xstate_request_perm(idx, guest);
default:
return -EINVAL;
}
}
#ifdef CONFIG_PROC_PID_ARCH_STATUS
/*
* Report the amount of time elapsed in millisecond since last AVX512
* use in the task. Report -1 if no AVX-512 usage.
*/
static void avx512_status(struct seq_file *m, struct task_struct *task)
{
unsigned long timestamp;
long delta = -1;
/* AVX-512 usage is not tracked for kernel threads. Don't report anything. */
if (task->flags & (PF_KTHREAD | PF_USER_WORKER))
return;
timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
if (timestamp) {
delta = (long)(jiffies - timestamp);
/*
* Cap to LONG_MAX if time difference > LONG_MAX
*/
if (delta < 0)
delta = LONG_MAX;
delta = jiffies_to_msecs(delta);
}
seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
seq_putc(m, '\n');
}
/*
* Report architecture specific information
*/
int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
/*
* Report AVX512 state if the processor and build option supported.
*/
if (cpu_feature_enabled(X86_FEATURE_AVX512F))
avx512_status(m, task);
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
#ifdef CONFIG_COREDUMP
static const char owner_name[] = "LINUX";
/*
* Dump type, size, offset and flag values for every xfeature that is present.
*/
static int dump_xsave_layout_desc(struct coredump_params *cprm)
{
int num_records = 0;
int i;
for_each_extended_xfeature(i, fpu_user_cfg.max_features) {
struct x86_xfeat_component xc = {
.type = i,
.size = xstate_sizes[i],
.offset = xstate_offsets[i],
/* reserved for future use */
.flags = 0,
};
if (!dump_emit(cprm, &xc, sizeof(xc)))
return 0;
num_records++;
}
return num_records;
}
static u32 get_xsave_desc_size(void)
{
u32 cnt = 0;
u32 i;
for_each_extended_xfeature(i, fpu_user_cfg.max_features)
cnt++;
return cnt * (sizeof(struct x86_xfeat_component));
}
int elf_coredump_extra_notes_write(struct coredump_params *cprm)
{
int num_records = 0;
struct elf_note en;
if (!fpu_user_cfg.max_features)
return 0;
en.n_namesz = sizeof(owner_name);
en.n_descsz = get_xsave_desc_size();
en.n_type = NT_X86_XSAVE_LAYOUT;
if (!dump_emit(cprm, &en, sizeof(en)))
return 1;
if (!dump_emit(cprm, owner_name, en.n_namesz))
return 1;
if (!dump_align(cprm, 4))
return 1;
num_records = dump_xsave_layout_desc(cprm);
if (!num_records)
return 1;
/* Total size should be equal to the number of records */
if ((sizeof(struct x86_xfeat_component) * num_records) != en.n_descsz)
return 1;
return 0;
}
int elf_coredump_extra_notes_size(void)
{
int size;
if (!fpu_user_cfg.max_features)
return 0;
/* .note header */
size = sizeof(struct elf_note);
/* Name plus alignment to 4 bytes */
size += roundup(sizeof(owner_name), 4);
size += get_xsave_desc_size();
return size;
}
#endif /* CONFIG_COREDUMP */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NetLabel Kernel API
*
* This file defines the kernel API for the NetLabel system. The NetLabel
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
* Author: Paul Moore <paul@paul-moore.com>
*/
/*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <net/calipso.h>
#include <asm/bug.h>
#include <linux/atomic.h>
#include "netlabel_domainhash.h"
#include "netlabel_unlabeled.h"
#include "netlabel_cipso_v4.h"
#include "netlabel_calipso.h"
#include "netlabel_user.h"
#include "netlabel_mgmt.h"
#include "netlabel_addrlist.h"
/*
* Configuration Functions
*/
/**
* netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping
* @domain: the domain mapping to remove
* @family: address family
* @addr: IP address
* @mask: IP address mask
* @audit_info: NetLabel audit information
*
* Description:
* Removes a NetLabel/LSM domain mapping. A @domain value of NULL causes the
* default domain mapping to be removed. Returns zero on success, negative
* values on failure.
*
*/
int netlbl_cfg_map_del(const char *domain,
u16 family,
const void *addr,
const void *mask,
struct netlbl_audit *audit_info)
{
if (addr == NULL && mask == NULL) {
return netlbl_domhsh_remove(domain, family, audit_info);
} else if (addr != NULL && mask != NULL) {
switch (family) {
case AF_INET:
return netlbl_domhsh_remove_af4(domain, addr, mask,
audit_info);
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return netlbl_domhsh_remove_af6(domain, addr, mask,
audit_info);
#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
} else
return -EINVAL;
}
/**
* netlbl_cfg_unlbl_map_add - Add a new unlabeled mapping
* @domain: the domain mapping to add
* @family: address family
* @addr: IP address
* @mask: IP address mask
* @audit_info: NetLabel audit information
*
* Description:
* Adds a new unlabeled NetLabel/LSM domain mapping. A @domain value of NULL
* causes a new default domain mapping to be added. Returns zero on success,
* negative values on failure.
*
*/
int netlbl_cfg_unlbl_map_add(const char *domain,
u16 family,
const void *addr,
const void *mask,
struct netlbl_audit *audit_info)
{
int ret_val = -ENOMEM;
struct netlbl_dom_map *entry;
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr4_map *map4 = NULL;
struct netlbl_domaddr6_map *map6 = NULL;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
return -ENOMEM;
if (domain != NULL) {
entry->domain = kstrdup(domain, GFP_ATOMIC);
if (entry->domain == NULL)
goto cfg_unlbl_map_add_failure;
}
entry->family = family;
if (addr == NULL && mask == NULL)
entry->def.type = NETLBL_NLTYPE_UNLABELED;
else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
goto cfg_unlbl_map_add_failure;
INIT_LIST_HEAD(&addrmap->list4);
INIT_LIST_HEAD(&addrmap->list6);
switch (family) {
case AF_INET: {
const struct in_addr *addr4 = addr;
const struct in_addr *mask4 = mask;
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
map4->def.type = NETLBL_NLTYPE_UNLABELED;
map4->list.addr = addr4->s_addr & mask4->s_addr;
map4->list.mask = mask4->s_addr;
map4->list.valid = 1;
ret_val = netlbl_af4list_add(&map4->list,
&addrmap->list4);
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
const struct in6_addr *addr6 = addr;
const struct in6_addr *mask6 = mask;
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
map6->def.type = NETLBL_NLTYPE_UNLABELED;
map6->list.addr = *addr6;
map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
map6->list.mask = *mask6;
map6->list.valid = 1;
ret_val = netlbl_af6list_add(&map6->list,
&addrmap->list6);
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
}
#endif /* IPv6 */
default:
goto cfg_unlbl_map_add_failure;
}
entry->def.addrsel = addrmap;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto cfg_unlbl_map_add_failure;
}
ret_val = netlbl_domhsh_add(entry, audit_info);
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
return 0;
cfg_unlbl_map_add_failure:
kfree(entry->domain);
kfree(entry);
kfree(addrmap);
kfree(map4);
kfree(map6);
return ret_val;
}
/**
* netlbl_cfg_unlbl_static_add - Adds a new static label
* @net: network namespace
* @dev_name: interface name
* @addr: IP address in network byte order (struct in[6]_addr)
* @mask: address mask in network byte order (struct in[6]_addr)
* @family: address family
* @secid: LSM secid value for the entry
* @audit_info: NetLabel audit information
*
* Description:
* Adds a new NetLabel static label to be used when protocol provided labels
* are not present on incoming traffic. If @dev_name is NULL then the default
* interface will be used. Returns zero on success, negative values on failure.
*
*/
int netlbl_cfg_unlbl_static_add(struct net *net,
const char *dev_name,
const void *addr,
const void *mask,
u16 family,
u32 secid,
struct netlbl_audit *audit_info)
{
u32 addr_len;
switch (family) {
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
return netlbl_unlhsh_add(net,
dev_name, addr, mask, addr_len,
secid, audit_info);
}
/**
* netlbl_cfg_unlbl_static_del - Removes an existing static label
* @net: network namespace
* @dev_name: interface name
* @addr: IP address in network byte order (struct in[6]_addr)
* @mask: address mask in network byte order (struct in[6]_addr)
* @family: address family
* @audit_info: NetLabel audit information
*
* Description:
* Removes an existing NetLabel static label used when protocol provided labels
* are not present on incoming traffic. If @dev_name is NULL then the default
* interface will be used. Returns zero on success, negative values on failure.
*
*/
int netlbl_cfg_unlbl_static_del(struct net *net,
const char *dev_name,
const void *addr,
const void *mask,
u16 family,
struct netlbl_audit *audit_info)
{
u32 addr_len;
switch (family) {
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
return netlbl_unlhsh_remove(net,
dev_name, addr, mask, addr_len,
audit_info);
}
/**
* netlbl_cfg_cipsov4_add - Add a new CIPSOv4 DOI definition
* @doi_def: CIPSO DOI definition
* @audit_info: NetLabel audit information
*
* Description:
* Add a new CIPSO DOI definition as defined by @doi_def. Returns zero on
* success and negative values on failure.
*
*/
int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def,
struct netlbl_audit *audit_info)
{
return cipso_v4_doi_add(doi_def, audit_info);
}
/**
* netlbl_cfg_cipsov4_del - Remove an existing CIPSOv4 DOI definition
* @doi: CIPSO DOI
* @audit_info: NetLabel audit information
*
* Description:
* Remove an existing CIPSO DOI definition matching @doi. Returns zero on
* success and negative values on failure.
*
*/
void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info)
{
cipso_v4_doi_remove(doi, audit_info);
}
/**
* netlbl_cfg_cipsov4_map_add - Add a new CIPSOv4 DOI mapping
* @doi: the CIPSO DOI
* @domain: the domain mapping to add
* @addr: IP address
* @mask: IP address mask
* @audit_info: NetLabel audit information
*
* Description:
* Add a new NetLabel/LSM domain mapping for the given CIPSO DOI to the NetLabel
* subsystem. A @domain value of NULL adds a new default domain mapping.
* Returns zero on success, negative values on failure.
*
*/
int netlbl_cfg_cipsov4_map_add(u32 doi,
const char *domain,
const struct in_addr *addr,
const struct in_addr *mask,
struct netlbl_audit *audit_info)
{
int ret_val = -ENOMEM;
struct cipso_v4_doi *doi_def;
struct netlbl_dom_map *entry;
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr4_map *addrinfo = NULL;
doi_def = cipso_v4_doi_getdef(doi);
if (doi_def == NULL)
return -ENOENT;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
goto out_entry;
entry->family = AF_INET;
if (domain != NULL) {
entry->domain = kstrdup(domain, GFP_ATOMIC);
if (entry->domain == NULL)
goto out_domain;
}
if (addr == NULL && mask == NULL) {
entry->def.cipso = doi_def;
entry->def.type = NETLBL_NLTYPE_CIPSOV4;
} else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
goto out_addrmap;
INIT_LIST_HEAD(&addrmap->list4);
INIT_LIST_HEAD(&addrmap->list6);
addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
if (addrinfo == NULL)
goto out_addrinfo;
addrinfo->def.cipso = doi_def;
addrinfo->def.type = NETLBL_NLTYPE_CIPSOV4;
addrinfo->list.addr = addr->s_addr & mask->s_addr;
addrinfo->list.mask = mask->s_addr;
addrinfo->list.valid = 1;
ret_val = netlbl_af4list_add(&addrinfo->list, &addrmap->list4);
if (ret_val != 0)
goto cfg_cipsov4_map_add_failure;
entry->def.addrsel = addrmap;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto out_addrmap;
}
ret_val = netlbl_domhsh_add(entry, audit_info);
if (ret_val != 0)
goto cfg_cipsov4_map_add_failure;
return 0;
cfg_cipsov4_map_add_failure:
kfree(addrinfo);
out_addrinfo:
kfree(addrmap);
out_addrmap:
kfree(entry->domain);
out_domain:
kfree(entry);
out_entry:
cipso_v4_doi_putdef(doi_def);
return ret_val;
}
/**
* netlbl_cfg_calipso_add - Add a new CALIPSO DOI definition
* @doi_def: CALIPSO DOI definition
* @audit_info: NetLabel audit information
*
* Description:
* Add a new CALIPSO DOI definition as defined by @doi_def. Returns zero on
* success and negative values on failure.
*
*/
int netlbl_cfg_calipso_add(struct calipso_doi *doi_def,
struct netlbl_audit *audit_info)
{
#if IS_ENABLED(CONFIG_IPV6)
return calipso_doi_add(doi_def, audit_info);
#else /* IPv6 */
return -ENOSYS;
#endif /* IPv6 */
}
/**
* netlbl_cfg_calipso_del - Remove an existing CALIPSO DOI definition
* @doi: CALIPSO DOI
* @audit_info: NetLabel audit information
*
* Description:
* Remove an existing CALIPSO DOI definition matching @doi. Returns zero on
* success and negative values on failure.
*
*/
void netlbl_cfg_calipso_del(u32 doi, struct netlbl_audit *audit_info)
{
#if IS_ENABLED(CONFIG_IPV6)
calipso_doi_remove(doi, audit_info);
#endif /* IPv6 */
}
/**
* netlbl_cfg_calipso_map_add - Add a new CALIPSO DOI mapping
* @doi: the CALIPSO DOI
* @domain: the domain mapping to add
* @addr: IP address
* @mask: IP address mask
* @audit_info: NetLabel audit information
*
* Description:
* Add a new NetLabel/LSM domain mapping for the given CALIPSO DOI to the
* NetLabel subsystem. A @domain value of NULL adds a new default domain
* mapping. Returns zero on success, negative values on failure.
*
*/
int netlbl_cfg_calipso_map_add(u32 doi,
const char *domain,
const struct in6_addr *addr,
const struct in6_addr *mask,
struct netlbl_audit *audit_info)
{
#if IS_ENABLED(CONFIG_IPV6)
int ret_val = -ENOMEM;
struct calipso_doi *doi_def;
struct netlbl_dom_map *entry;
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr6_map *addrinfo = NULL;
doi_def = calipso_doi_getdef(doi);
if (doi_def == NULL)
return -ENOENT;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
goto out_entry;
entry->family = AF_INET6;
if (domain != NULL) {
entry->domain = kstrdup(domain, GFP_ATOMIC);
if (entry->domain == NULL)
goto out_domain;
}
if (addr == NULL && mask == NULL) {
entry->def.calipso = doi_def;
entry->def.type = NETLBL_NLTYPE_CALIPSO;
} else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
goto out_addrmap;
INIT_LIST_HEAD(&addrmap->list4);
INIT_LIST_HEAD(&addrmap->list6);
addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
if (addrinfo == NULL)
goto out_addrinfo;
addrinfo->def.calipso = doi_def;
addrinfo->def.type = NETLBL_NLTYPE_CALIPSO;
addrinfo->list.addr = *addr;
addrinfo->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
addrinfo->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
addrinfo->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
addrinfo->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
addrinfo->list.mask = *mask;
addrinfo->list.valid = 1;
ret_val = netlbl_af6list_add(&addrinfo->list, &addrmap->list6);
if (ret_val != 0)
goto cfg_calipso_map_add_failure;
entry->def.addrsel = addrmap;
entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto out_addrmap;
}
ret_val = netlbl_domhsh_add(entry, audit_info);
if (ret_val != 0)
goto cfg_calipso_map_add_failure;
return 0;
cfg_calipso_map_add_failure:
kfree(addrinfo);
out_addrinfo:
kfree(addrmap);
out_addrmap:
kfree(entry->domain);
out_domain:
kfree(entry);
out_entry:
calipso_doi_putdef(doi_def);
return ret_val;
#else /* IPv6 */
return -ENOSYS;
#endif /* IPv6 */
}
/*
* Security Attribute Functions
*/
#define _CM_F_NONE 0x00000000
#define _CM_F_ALLOC 0x00000001
#define _CM_F_WALK 0x00000002
/**
* _netlbl_catmap_getnode - Get a individual node from a catmap
* @catmap: pointer to the category bitmap
* @offset: the requested offset
* @cm_flags: catmap flags, see _CM_F_*
* @gfp_flags: memory allocation flags
*
* Description:
* Iterate through the catmap looking for the node associated with @offset.
* If the _CM_F_ALLOC flag is set in @cm_flags and there is no associated node,
* one will be created and inserted into the catmap. If the _CM_F_WALK flag is
* set in @cm_flags and there is no associated node, the next highest node will
* be returned. Returns a pointer to the node on success, NULL on failure.
*
*/
static struct netlbl_lsm_catmap *_netlbl_catmap_getnode(
struct netlbl_lsm_catmap **catmap,
u32 offset,
unsigned int cm_flags,
gfp_t gfp_flags)
{
struct netlbl_lsm_catmap *iter = *catmap;
struct netlbl_lsm_catmap *prev = NULL;
if (iter == NULL)
goto catmap_getnode_alloc;
if (offset < iter->startbit)
goto catmap_getnode_walk;
while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
prev = iter;
iter = iter->next;
}
if (iter == NULL || offset < iter->startbit)
goto catmap_getnode_walk;
return iter;
catmap_getnode_walk:
if (cm_flags & _CM_F_WALK)
return iter;
catmap_getnode_alloc:
if (!(cm_flags & _CM_F_ALLOC))
return NULL;
iter = netlbl_catmap_alloc(gfp_flags);
if (iter == NULL)
return NULL;
iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1);
if (prev == NULL) {
iter->next = *catmap;
*catmap = iter;
} else {
iter->next = prev->next;
prev->next = iter;
}
return iter;
}
/**
* netlbl_catmap_walk - Walk a LSM secattr catmap looking for a bit
* @catmap: the category bitmap
* @offset: the offset to start searching at, in bits
*
* Description:
* This function walks a LSM secattr category bitmap starting at @offset and
* returns the spot of the first set bit or -ENOENT if no bits are set.
*
*/
int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset)
{
struct netlbl_lsm_catmap *iter;
u32 idx;
u32 bit;
u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
return -ENOENT;
if (offset > iter->startbit) {
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
bit = offset % NETLBL_CATMAP_MAPSIZE;
} else {
idx = 0;
bit = 0;
}
bitmap = iter->bitmap[idx] >> bit;
for (;;) {
if (bitmap != 0) {
while ((bitmap & NETLBL_CATMAP_BIT) == 0) {
bitmap >>= 1;
bit++;
}
return iter->startbit +
(NETLBL_CATMAP_MAPSIZE * idx) + bit;
}
if (++idx >= NETLBL_CATMAP_MAPCNT) {
if (iter->next != NULL) {
iter = iter->next;
idx = 0;
} else
return -ENOENT;
}
bitmap = iter->bitmap[idx];
bit = 0;
}
return -ENOENT;
}
EXPORT_SYMBOL(netlbl_catmap_walk);
/**
* netlbl_catmap_walkrng - Find the end of a string of set bits
* @catmap: the category bitmap
* @offset: the offset to start searching at, in bits
*
* Description:
* This function walks a LSM secattr category bitmap starting at @offset and
* returns the spot of the first cleared bit or -ENOENT if the offset is past
* the end of the bitmap.
*
*/
int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset)
{
struct netlbl_lsm_catmap *iter;
struct netlbl_lsm_catmap *prev = NULL;
u32 idx;
u32 bit;
u64 bitmask;
u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
return -ENOENT;
if (offset > iter->startbit) {
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
bit = offset % NETLBL_CATMAP_MAPSIZE;
} else {
idx = 0;
bit = 0;
}
bitmask = NETLBL_CATMAP_BIT << bit;
for (;;) {
bitmap = iter->bitmap[idx];
while (bitmask != 0 && (bitmap & bitmask) != 0) {
bitmask <<= 1;
bit++;
}
if (prev && idx == 0 && bit == 0)
return prev->startbit + NETLBL_CATMAP_SIZE - 1;
else if (bitmask != 0)
return iter->startbit +
(NETLBL_CATMAP_MAPSIZE * idx) + bit - 1;
else if (++idx >= NETLBL_CATMAP_MAPCNT) {
if (iter->next == NULL)
return iter->startbit + NETLBL_CATMAP_SIZE - 1;
prev = iter;
iter = iter->next;
idx = 0;
}
bitmask = NETLBL_CATMAP_BIT;
bit = 0;
}
return -ENOENT;
}
/**
* netlbl_catmap_getlong - Export an unsigned long bitmap
* @catmap: pointer to the category bitmap
* @offset: pointer to the requested offset
* @bitmap: the exported bitmap
*
* Description:
* Export a bitmap with an offset greater than or equal to @offset and return
* it in @bitmap. The @offset must be aligned to an unsigned long and will be
* updated on return if different from what was requested; if the catmap is
* empty at the requested offset and beyond, the @offset is set to (u32)-1.
* Returns zero on success, negative values on failure.
*
*/
int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
u32 *offset,
unsigned long *bitmap)
{
struct netlbl_lsm_catmap *iter;
u32 off = *offset;
u32 idx;
/* only allow aligned offsets */
if ((off & (BITS_PER_LONG - 1)) != 0)
return -EINVAL;
/* a null catmap is equivalent to an empty one */
if (!catmap) {
*offset = (u32)-1;
return 0;
}
if (off < catmap->startbit) {
off = catmap->startbit;
*offset = off;
}
iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_WALK, 0);
if (iter == NULL) {
*offset = (u32)-1;
return 0;
}
if (off < iter->startbit) {
*offset = iter->startbit;
off = 0;
} else
off -= iter->startbit;
idx = off / NETLBL_CATMAP_MAPSIZE;
*bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_MAPSIZE);
return 0;
}
/**
* netlbl_catmap_setbit - Set a bit in a LSM secattr catmap
* @catmap: pointer to the category bitmap
* @bit: the bit to set
* @flags: memory allocation flags
*
* Description:
* Set the bit specified by @bit in @catmap. Returns zero on success,
* negative values on failure.
*
*/
int netlbl_catmap_setbit(struct netlbl_lsm_catmap **catmap,
u32 bit,
gfp_t flags)
{
struct netlbl_lsm_catmap *iter;
u32 idx;
iter = _netlbl_catmap_getnode(catmap, bit, _CM_F_ALLOC, flags);
if (iter == NULL)
return -ENOMEM;
bit -= iter->startbit;
idx = bit / NETLBL_CATMAP_MAPSIZE;
iter->bitmap[idx] |= NETLBL_CATMAP_BIT << (bit % NETLBL_CATMAP_MAPSIZE);
return 0;
}
EXPORT_SYMBOL(netlbl_catmap_setbit);
/**
* netlbl_catmap_setrng - Set a range of bits in a LSM secattr catmap
* @catmap: pointer to the category bitmap
* @start: the starting bit
* @end: the last bit in the string
* @flags: memory allocation flags
*
* Description:
* Set a range of bits, starting at @start and ending with @end. Returns zero
* on success, negative values on failure.
*
*/
int netlbl_catmap_setrng(struct netlbl_lsm_catmap **catmap,
u32 start,
u32 end,
gfp_t flags)
{
int rc = 0;
u32 spot = start;
while (rc == 0 && spot <= end) {
if (((spot & (BITS_PER_LONG - 1)) == 0) &&
((end - spot) > BITS_PER_LONG)) {
rc = netlbl_catmap_setlong(catmap,
spot,
(unsigned long)-1,
flags);
spot += BITS_PER_LONG;
} else
rc = netlbl_catmap_setbit(catmap, spot++, flags);
}
return rc;
}
/**
* netlbl_catmap_setlong - Import an unsigned long bitmap
* @catmap: pointer to the category bitmap
* @offset: offset to the start of the imported bitmap
* @bitmap: the bitmap to import
* @flags: memory allocation flags
*
* Description:
* Import the bitmap specified in @bitmap into @catmap, using the offset
* in @offset. The offset must be aligned to an unsigned long. Returns zero
* on success, negative values on failure.
*
*/
int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
u32 offset,
unsigned long bitmap,
gfp_t flags)
{
struct netlbl_lsm_catmap *iter;
u32 idx;
/* only allow aligned offsets */
if ((offset & (BITS_PER_LONG - 1)) != 0)
return -EINVAL;
iter = _netlbl_catmap_getnode(catmap, offset, _CM_F_ALLOC, flags);
if (iter == NULL)
return -ENOMEM;
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
iter->bitmap[idx] |= (u64)bitmap
<< (offset % NETLBL_CATMAP_MAPSIZE);
return 0;
}
/* Bitmap functions
*/
/**
* netlbl_bitmap_walk - Walk a bitmap looking for a bit
* @bitmap: the bitmap
* @bitmap_len: length in bits
* @offset: starting offset
* @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit
*
* Description:
* Starting at @offset, walk the bitmap from left to right until either the
* desired bit is found or we reach the end. Return the bit offset, -1 if
* not found.
*/
int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
u32 offset, u8 state)
{
u32 bit_spot;
u32 byte_offset;
unsigned char bitmask;
unsigned char byte;
if (offset >= bitmap_len)
return -1;
byte_offset = offset / 8;
byte = bitmap[byte_offset];
bit_spot = offset;
bitmask = 0x80 >> (offset % 8);
while (bit_spot < bitmap_len) {
if ((state && (byte & bitmask) == bitmask) ||
(state == 0 && (byte & bitmask) == 0))
return bit_spot;
if (++bit_spot >= bitmap_len)
return -1;
bitmask >>= 1;
if (bitmask == 0) {
byte = bitmap[++byte_offset];
bitmask = 0x80;
}
}
return -1;
}
EXPORT_SYMBOL(netlbl_bitmap_walk);
/**
* netlbl_bitmap_setbit - Sets a single bit in a bitmap
* @bitmap: the bitmap
* @bit: the bit
* @state: if non-zero, set the bit (1) else clear the bit (0)
*
* Description:
* Set a single bit in the bitmask. Returns zero on success, negative values
* on error.
*/
void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state)
{
u32 byte_spot;
u8 bitmask;
/* gcc always rounds to zero when doing integer division */
byte_spot = bit / 8;
bitmask = 0x80 >> (bit % 8);
if (state)
bitmap[byte_spot] |= bitmask;
else
bitmap[byte_spot] &= ~bitmask;
}
EXPORT_SYMBOL(netlbl_bitmap_setbit);
/*
* LSM Functions
*/
/**
* netlbl_enabled - Determine if the NetLabel subsystem is enabled
*
* Description:
* The LSM can use this function to determine if it should use NetLabel
* security attributes in it's enforcement mechanism. Currently, NetLabel is
* considered to be enabled when it's configuration contains a valid setup for
* at least one labeled protocol (i.e. NetLabel can understand incoming
* labeled packets of at least one type); otherwise NetLabel is considered to
* be disabled.
*
*/
int netlbl_enabled(void)
{
/* At some point we probably want to expose this mechanism to the user
* as well so that admins can toggle NetLabel regardless of the
* configuration */
return (atomic_read(&netlabel_mgmt_protocount) > 0);
}
/**
* netlbl_sock_setattr - Label a socket using the correct protocol
* @sk: the socket to label
* @family: protocol family
* @secattr: the security attributes
* @sk_locked: true if caller holds the socket lock
*
* Description:
* Attach the correct label to the given socket using the security attributes
* specified in @secattr. This function requires exclusive access to @sk,
* which means it either needs to be in the process of being created or locked.
* Returns zero on success, -EDESTADDRREQ if the domain is configured to use
* network address selectors (can't blindly label the socket), and negative
* values on all other failures.
*
*/
int netlbl_sock_setattr(struct sock *sk,
u16 family,
const struct netlbl_lsm_secattr *secattr,
bool sk_locked)
{
int ret_val;
struct netlbl_dom_map *dom_entry;
rcu_read_lock(); dom_entry = netlbl_domhsh_getentry(secattr->domain, family);
if (dom_entry == NULL) {
ret_val = -ENOENT; goto socket_setattr_return;
}
switch (family) {
case AF_INET:
switch (dom_entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
ret_val = -EDESTADDRREQ;
break;
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
dom_entry->def.cipso,
secattr, sk_locked);
break;
case NETLBL_NLTYPE_UNLABELED:
ret_val = 0;
break;
default:
ret_val = -ENOENT;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
switch (dom_entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
ret_val = -EDESTADDRREQ;
break;
case NETLBL_NLTYPE_CALIPSO:
ret_val = calipso_sock_setattr(sk,
dom_entry->def.calipso,
secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
ret_val = 0;
break;
default:
ret_val = -ENOENT;
}
break;
#endif /* IPv6 */
default:
ret_val = -EPROTONOSUPPORT;
}
socket_setattr_return:
rcu_read_unlock();
return ret_val;
}
/**
* netlbl_sock_delattr - Delete all the NetLabel labels on a socket
* @sk: the socket
*
* Description:
* Remove all the NetLabel labeling from @sk. The caller is responsible for
* ensuring that @sk is locked.
*
*/
void netlbl_sock_delattr(struct sock *sk)
{
switch (sk->sk_family) {
case AF_INET:
cipso_v4_sock_delattr(sk);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
calipso_sock_delattr(sk);
break;
#endif /* IPv6 */
}
}
/**
* netlbl_sock_getattr - Determine the security attributes of a sock
* @sk: the sock
* @secattr: the security attributes
*
* Description:
* Examines the given sock to see if any NetLabel style labeling has been
* applied to the sock, if so it parses the socket label and returns the
* security attributes in @secattr. Returns zero on success, negative values
* on failure.
*
*/
int netlbl_sock_getattr(struct sock *sk,
struct netlbl_lsm_secattr *secattr)
{
int ret_val;
switch (sk->sk_family) {
case AF_INET:
ret_val = cipso_v4_sock_getattr(sk, secattr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
ret_val = calipso_sock_getattr(sk, secattr);
break;
#endif /* IPv6 */
default:
ret_val = -EPROTONOSUPPORT;
}
return ret_val;
}
/**
* netlbl_sk_lock_check - Check if the socket lock has been acquired.
* @sk: the socket to be checked
*
* Return: true if socket @sk is locked or if lock debugging is disabled at
* runtime or compile-time; false otherwise
*
*/
#ifdef CONFIG_LOCKDEP
bool netlbl_sk_lock_check(struct sock *sk)
{
if (debug_locks)
return lockdep_sock_is_held(sk);
return true;
}
#else
bool netlbl_sk_lock_check(struct sock *sk)
{
return true;
}
#endif
/**
* netlbl_conn_setattr - Label a connected socket using the correct protocol
* @sk: the socket to label
* @addr: the destination address
* @secattr: the security attributes
*
* Description:
* Attach the correct label to the given connected socket using the security
* attributes specified in @secattr. The caller is responsible for ensuring
* that @sk is locked. Returns zero on success, negative values on failure.
*
*/
int netlbl_conn_setattr(struct sock *sk,
struct sockaddr *addr,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
struct sockaddr_in *addr4;
#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 *addr6;
#endif
struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (addr->sa_family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
entry = netlbl_domhsh_getentry_af4(secattr->domain,
addr4->sin_addr.s_addr);
if (entry == NULL) {
ret_val = -ENOENT;
goto conn_setattr_return;
}
switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
entry->cipso, secattr,
netlbl_sk_lock_check(sk));
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
* but we could remove other protocols if needed */
netlbl_sock_delattr(sk);
ret_val = 0;
break;
default:
ret_val = -ENOENT;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (sk->sk_family != AF_INET6) {
ret_val = -EAFNOSUPPORT;
goto conn_setattr_return;
}
addr6 = (struct sockaddr_in6 *)addr;
entry = netlbl_domhsh_getentry_af6(secattr->domain,
&addr6->sin6_addr);
if (entry == NULL) {
ret_val = -ENOENT;
goto conn_setattr_return;
}
switch (entry->type) {
case NETLBL_NLTYPE_CALIPSO:
ret_val = calipso_sock_setattr(sk,
entry->calipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
* but we could remove other protocols if needed */
netlbl_sock_delattr(sk);
ret_val = 0;
break;
default:
ret_val = -ENOENT;
}
break;
#endif /* IPv6 */
default:
ret_val = -EPROTONOSUPPORT;
}
conn_setattr_return:
rcu_read_unlock();
return ret_val;
}
/**
* netlbl_req_setattr - Label a request socket using the correct protocol
* @req: the request socket to label
* @secattr: the security attributes
*
* Description:
* Attach the correct label to the given socket using the security attributes
* specified in @secattr. Returns zero on success, negative values on failure.
*
*/
int netlbl_req_setattr(struct request_sock *req,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
struct netlbl_dommap_def *entry;
struct inet_request_sock *ireq = inet_rsk(req);
rcu_read_lock();
switch (req->rsk_ops->family) {
case AF_INET:
entry = netlbl_domhsh_getentry_af4(secattr->domain,
ireq->ir_rmt_addr);
if (entry == NULL) {
ret_val = -ENOENT;
goto req_setattr_return;
}
switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_req_setattr(req,
entry->cipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
netlbl_req_delattr(req);
ret_val = 0;
break;
default:
ret_val = -ENOENT;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
entry = netlbl_domhsh_getentry_af6(secattr->domain,
&ireq->ir_v6_rmt_addr);
if (entry == NULL) {
ret_val = -ENOENT;
goto req_setattr_return;
}
switch (entry->type) {
case NETLBL_NLTYPE_CALIPSO:
ret_val = calipso_req_setattr(req,
entry->calipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
netlbl_req_delattr(req);
ret_val = 0;
break;
default:
ret_val = -ENOENT;
}
break;
#endif /* IPv6 */
default:
ret_val = -EPROTONOSUPPORT;
}
req_setattr_return:
rcu_read_unlock();
return ret_val;
}
/**
* netlbl_req_delattr - Delete all the NetLabel labels on a socket
* @req: the socket
*
* Description:
* Remove all the NetLabel labeling from @req.
*
*/
void netlbl_req_delattr(struct request_sock *req)
{
switch (req->rsk_ops->family) {
case AF_INET:
cipso_v4_req_delattr(req);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
calipso_req_delattr(req);
break;
#endif /* IPv6 */
}
}
/**
* netlbl_skbuff_setattr - Label a packet using the correct protocol
* @skb: the packet
* @family: protocol family
* @secattr: the security attributes
*
* Description:
* Attach the correct label to the given packet using the security attributes
* specified in @secattr. Returns zero on success, negative values on failure.
*
*/
int netlbl_skbuff_setattr(struct sk_buff *skb,
u16 family,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
struct iphdr *hdr4;
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *hdr6;
#endif
struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (family) {
case AF_INET:
hdr4 = ip_hdr(skb);
entry = netlbl_domhsh_getentry_af4(secattr->domain,
hdr4->daddr);
if (entry == NULL) {
ret_val = -ENOENT;
goto skbuff_setattr_return;
}
switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso,
secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
* but we could remove other protocols if needed */
ret_val = cipso_v4_skbuff_delattr(skb);
break;
default:
ret_val = -ENOENT;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
hdr6 = ipv6_hdr(skb);
entry = netlbl_domhsh_getentry_af6(secattr->domain,
&hdr6->daddr);
if (entry == NULL) {
ret_val = -ENOENT;
goto skbuff_setattr_return;
}
switch (entry->type) {
case NETLBL_NLTYPE_CALIPSO:
ret_val = calipso_skbuff_setattr(skb, entry->calipso,
secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
* but we could remove other protocols if needed */
ret_val = calipso_skbuff_delattr(skb);
break;
default:
ret_val = -ENOENT;
}
break;
#endif /* IPv6 */
default:
ret_val = -EPROTONOSUPPORT;
}
skbuff_setattr_return:
rcu_read_unlock();
return ret_val;
}
/**
* netlbl_skbuff_getattr - Determine the security attributes of a packet
* @skb: the packet
* @family: protocol family
* @secattr: the security attributes
*
* Description:
* Examines the given packet to see if a recognized form of packet labeling
* is present, if so it parses the packet label and returns the security
* attributes in @secattr. Returns zero on success, negative values on
* failure.
*
*/
int netlbl_skbuff_getattr(const struct sk_buff *skb,
u16 family,
struct netlbl_lsm_secattr *secattr)
{
unsigned char *ptr;
switch (family) {
case AF_INET:
ptr = cipso_v4_optptr(skb);
if (ptr && cipso_v4_getattr(ptr, secattr) == 0)
return 0;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
ptr = calipso_optptr(skb);
if (ptr && calipso_getattr(ptr, secattr) == 0)
return 0;
break;
#endif /* IPv6 */
}
return netlbl_unlabel_getattr(skb, family, secattr);
}
/**
* netlbl_skbuff_err - Handle a LSM error on a sk_buff
* @skb: the packet
* @family: the family
* @error: the error code
* @gateway: true if host is acting as a gateway, false otherwise
*
* Description:
* Deal with a LSM problem when handling the packet in @skb, typically this is
* a permission denied problem (-EACCES). The correct action is determined
* according to the packet's labeling protocol.
*
*/
void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway)
{
switch (family) {
case AF_INET:
if (cipso_v4_optptr(skb))
cipso_v4_error(skb, error, gateway);
break;
}
}
/**
* netlbl_cache_invalidate - Invalidate all of the NetLabel protocol caches
*
* Description:
* For all of the NetLabel protocols that support some form of label mapping
* cache, invalidate the cache. Returns zero on success, negative values on
* error.
*
*/
void netlbl_cache_invalidate(void)
{
cipso_v4_cache_invalidate();
#if IS_ENABLED(CONFIG_IPV6)
calipso_cache_invalidate();
#endif /* IPv6 */
}
/**
* netlbl_cache_add - Add an entry to a NetLabel protocol cache
* @skb: the packet
* @family: the family
* @secattr: the packet's security attributes
*
* Description:
* Add the LSM security attributes for the given packet to the underlying
* NetLabel protocol's label mapping cache. Returns zero on success, negative
* values on error.
*
*/
int netlbl_cache_add(const struct sk_buff *skb, u16 family,
const struct netlbl_lsm_secattr *secattr)
{
unsigned char *ptr;
if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0)
return -ENOMSG;
switch (family) {
case AF_INET:
ptr = cipso_v4_optptr(skb);
if (ptr)
return cipso_v4_cache_add(ptr, secattr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
ptr = calipso_optptr(skb);
if (ptr)
return calipso_cache_add(ptr, secattr);
break;
#endif /* IPv6 */
}
return -ENOMSG;
}
/*
* Protocol Engine Functions
*/
/**
* netlbl_audit_start - Start an audit message
* @type: audit message type
* @audit_info: NetLabel audit information
*
* Description:
* Start an audit message using the type specified in @type and fill the audit
* message with some fields common to all NetLabel audit messages. This
* function should only be used by protocol engines, not LSMs. Returns a
* pointer to the audit buffer on success, NULL on failure.
*
*/
struct audit_buffer *netlbl_audit_start(int type,
struct netlbl_audit *audit_info)
{
return netlbl_audit_start_common(type, audit_info);
}
EXPORT_SYMBOL(netlbl_audit_start);
/*
* Setup Functions
*/
/**
* netlbl_init - Initialize NetLabel
*
* Description:
* Perform the required NetLabel initialization before first use.
*
*/
static int __init netlbl_init(void)
{
int ret_val;
printk(KERN_INFO "NetLabel: Initializing\n");
printk(KERN_INFO "NetLabel: domain hash size = %u\n",
(1 << NETLBL_DOMHSH_BITSIZE));
printk(KERN_INFO "NetLabel: protocols = UNLABELED CIPSOv4 CALIPSO\n");
ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE);
if (ret_val != 0)
goto init_failure;
ret_val = netlbl_unlabel_init(NETLBL_UNLHSH_BITSIZE);
if (ret_val != 0)
goto init_failure;
ret_val = netlbl_netlink_init();
if (ret_val != 0)
goto init_failure;
ret_val = netlbl_unlabel_defconf();
if (ret_val != 0)
goto init_failure;
printk(KERN_INFO "NetLabel: unlabeled traffic allowed by default\n");
return 0;
init_failure:
panic("NetLabel: failed to initialize properly (%d)\n", ret_val);
}
subsys_initcall(netlbl_init);
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <linux/compat.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
#include <linux/iov_iter.h>
static __always_inline
size_t copy_to_user_iter(void __user *iter_to, size_t progress,
size_t len, void *from, void *priv2)
{
if (should_fail_usercopy())
return len;
if (access_ok(iter_to, len)) {
from += progress;
instrument_copy_to_user(iter_to, from, len);
len = raw_copy_to_user(iter_to, from, len);
}
return len;
}
static __always_inline
size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
size_t len, void *from, void *priv2)
{
ssize_t res;
if (should_fail_usercopy())
return len;
from += progress;
res = copy_to_user_nofault(iter_to, from, len);
return res < 0 ? len : res;
}
static __always_inline
size_t copy_from_user_iter(void __user *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
size_t res = len;
if (should_fail_usercopy())
return len;
if (access_ok(iter_from, len)) {
to += progress;
instrument_copy_from_user_before(to, iter_from, len);
res = raw_copy_from_user(to, iter_from, len);
instrument_copy_from_user_after(to, iter_from, len, res);
}
return res;
}
static __always_inline
size_t memcpy_to_iter(void *iter_to, size_t progress,
size_t len, void *from, void *priv2)
{
memcpy(iter_to, from + progress, len);
return 0;
}
static __always_inline
size_t memcpy_from_iter(void *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
memcpy(to + progress, iter_from, len);
return 0;
}
/*
* fault_in_iov_iter_readable - fault in iov iterator for reading
* @i: iterator
* @size: maximum length
*
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
* @size. For each iovec, fault in each page that constitutes the iovec.
*
* Returns the number of bytes not faulted in (like copy_to_user() and
* copy_from_user()).
*
* Always returns 0 for non-userspace iterators.
*/
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
{
if (iter_is_ubuf(i)) {
size_t n = min(size, iov_iter_count(i));
n -= fault_in_readable(i->ubuf + i->iov_offset, n);
return size - n;
} else if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
size -= count;
for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
if (unlikely(!len))
continue;
ret = fault_in_readable(p->iov_base + skip, len);
count -= len - ret;
if (ret)
break;
}
return count + size;
}
return 0;
}
EXPORT_SYMBOL(fault_in_iov_iter_readable);
/*
* fault_in_iov_iter_writeable - fault in iov iterator for writing
* @i: iterator
* @size: maximum length
*
* Faults in the iterator using get_user_pages(), i.e., without triggering
* hardware page faults. This is primarily useful when we already know that
* some or all of the pages in @i aren't in memory.
*
* Returns the number of bytes not faulted in, like copy_to_user() and
* copy_from_user().
*
* Always returns 0 for non-user-space iterators.
*/
size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
{
if (iter_is_ubuf(i)) {
size_t n = min(size, iov_iter_count(i));
n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
return size - n;
} else if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
size -= count;
for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
if (unlikely(!len))
continue;
ret = fault_in_safe_writeable(p->iov_base + skip, len);
count -= len - ret;
if (ret)
break;
}
return count + size;
}
return 0;
}
EXPORT_SYMBOL(fault_in_iov_iter_writeable);
void iov_iter_init(struct iov_iter *i, unsigned int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) {
.iter_type = ITER_IOVEC,
.nofault = false,
.data_source = direction,
.__iov = iov,
.nr_segs = nr_segs,
.iov_offset = 0,
.count = count
};
}
EXPORT_SYMBOL(iov_iter_init);
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
return iterate_and_advance(i, bytes, (void *)addr,
copy_to_user_iter, memcpy_to_iter);
}
EXPORT_SYMBOL(_copy_to_iter);
#ifdef CONFIG_ARCH_HAS_COPY_MC
static __always_inline
size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
size_t len, void *from, void *priv2)
{
if (access_ok(iter_to, len)) {
from += progress;
instrument_copy_to_user(iter_to, from, len);
len = copy_mc_to_user(iter_to, from, len);
}
return len;
}
static __always_inline
size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
size_t len, void *from, void *priv2)
{
return copy_mc_to_kernel(iter_to, from + progress, len);
}
/**
* _copy_mc_to_iter - copy to iter with source memory error exception handling
* @addr: source kernel address
* @bytes: total transfer length
* @i: destination iterator
*
* The pmem driver deploys this for the dax operation
* (dax_copy_to_iter()) for dax reads (bypass page-cache and the
* block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
* successfully copied.
*
* The main differences between this and typical _copy_to_iter().
*
* * Typical tail/residue handling after a fault retries the copy
* byte-by-byte until the fault happens again. Re-triggering machine
* checks is potentially fatal so the implementation uses source
* alignment and poison alignment assumptions to avoid re-triggering
* hardware exceptions.
*
* * ITER_KVEC and ITER_BVEC can return short copies. Compare to
* copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
*
* Return: number of bytes copied (may be %0)
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
return iterate_and_advance(i, bytes, (void *)addr,
copy_to_user_iter_mc, memcpy_to_iter_mc);
}
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
static __always_inline
size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
return iterate_and_advance(i, bytes, addr,
copy_from_user_iter, memcpy_from_iter);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source)) return 0; if (user_backed_iter(i)) might_fault(); return __copy_from_iter(addr, bytes, i);}
EXPORT_SYMBOL(_copy_from_iter);
static __always_inline
size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
}
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
return iterate_and_advance(i, bytes, addr,
copy_from_user_iter_nocache,
memcpy_from_iter);
}
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
static __always_inline
size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
return __copy_from_user_flushcache(to + progress, iter_from, len);
}
static __always_inline
size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
size_t len, void *to, void *priv2)
{
memcpy_flushcache(to + progress, iter_from, len);
return 0;
}
/**
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
* @bytes: total transfer length
* @i: source iterator
*
* The pmem driver arranges for filesystem-dax to use this facility via
* dax_copy_from_iter() for ensuring that writes to persistent memory
* are flushed through the CPU cache. It is differentiated from
* _copy_from_iter_nocache() in that guarantees all data is flushed for
* all iterator types. The _copy_from_iter_nocache() only attempts to
* bypass the cache for the ITER_IOVEC case, and on some archs may use
* instructions that strand dirty-data in the cache.
*
* Return: number of bytes copied (may be %0)
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
return iterate_and_advance(i, bytes, addr,
copy_from_user_iter_flushcache,
memcpy_from_iter_flushcache);
}
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
struct page *head;
size_t v = n + offset;
/*
* The general case needs to access the page order in order
* to compute the page size.
* However, we mostly deal with order-0 pages and thus can
* avoid a possible cache line miss for requests that fit all
* page orders.
*/
if (n <= v && v <= PAGE_SIZE)
return true;
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;
if (WARN_ON(n > v || v > page_size(head)))
return false;
return true;
}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
if (WARN_ON_ONCE(i->data_source))
return 0;
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
n = _copy_to_iter(kaddr + offset, n, i);
kunmap_local(kaddr);
res += n;
bytes -= n;
if (!bytes || !n)
break;
offset += n;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
}
return res;
}
EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
if (WARN_ON_ONCE(i->data_source))
return 0;
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
n = iterate_and_advance(i, n, kaddr + offset,
copy_to_user_iter_nofault,
memcpy_to_iter);
kunmap_local(kaddr);
res += n;
bytes -= n;
if (!bytes || !n)
break;
offset += n;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
}
return res;
}
EXPORT_SYMBOL(copy_page_to_iter_nofault);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
n = _copy_from_iter(kaddr + offset, n, i);
kunmap_local(kaddr);
res += n;
bytes -= n;
if (!bytes || !n)
break;
offset += n;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
}
return res;
}
EXPORT_SYMBOL(copy_page_from_iter);
static __always_inline
size_t zero_to_user_iter(void __user *iter_to, size_t progress,
size_t len, void *priv, void *priv2)
{
return clear_user(iter_to, len);
}
static __always_inline
size_t zero_to_iter(void *iter_to, size_t progress,
size_t len, void *priv, void *priv2)
{
memset(iter_to, 0, len);
return 0;
}
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
return iterate_and_advance(i, bytes, NULL,
zero_to_user_iter, zero_to_iter);
}
EXPORT_SYMBOL(iov_iter_zero);
size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset,
size_t bytes, struct iov_iter *i)
{
size_t n, copied = 0;
if (!page_copy_sane(&folio->page, offset, bytes))
return 0;
if (WARN_ON_ONCE(!i->data_source))
return 0;
do {
char *to = kmap_local_folio(folio, offset);
n = bytes - copied;
if (folio_test_partial_kmap(folio) &&
n > PAGE_SIZE - offset_in_page(offset))
n = PAGE_SIZE - offset_in_page(offset);
pagefault_disable();
n = __copy_from_iter(to, n, i);
pagefault_enable();
kunmap_local(to);
copied += n;
offset += n;
} while (copied != bytes && n > 0);
return copied;
}
EXPORT_SYMBOL(copy_folio_from_iter_atomic);
static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
{
const struct bio_vec *bvec, *end;
if (!i->count)
return;
i->count -= size;
size += i->iov_offset;
for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
if (likely(size < bvec->bv_len))
break;
size -= bvec->bv_len;
}
i->iov_offset = size;
i->nr_segs -= bvec - i->bvec;
i->bvec = bvec;
}
static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
{
const struct iovec *iov, *end;
if (!i->count)
return;
i->count -= size;
size += i->iov_offset; // from beginning of current segment
for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
if (likely(size < iov->iov_len))
break;
size -= iov->iov_len;
}
i->iov_offset = size;
i->nr_segs -= iov - iter_iov(i);
i->__iov = iov;
}
static void iov_iter_folioq_advance(struct iov_iter *i, size_t size)
{
const struct folio_queue *folioq = i->folioq;
unsigned int slot = i->folioq_slot;
if (!i->count)
return;
i->count -= size;
if (slot >= folioq_nr_slots(folioq)) {
folioq = folioq->next;
slot = 0;
}
size += i->iov_offset; /* From beginning of current segment. */
do {
size_t fsize = folioq_folio_size(folioq, slot);
if (likely(size < fsize))
break;
size -= fsize;
slot++;
if (slot >= folioq_nr_slots(folioq) && folioq->next) {
folioq = folioq->next;
slot = 0;
}
} while (size);
i->iov_offset = size;
i->folioq_slot = slot;
i->folioq = folioq;
}
void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(i->count < size))
size = i->count;
if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
i->iov_offset += size;
i->count -= size;
} else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
/* iovec and kvec have identical layouts */
iov_iter_iovec_advance(i, size);
} else if (iov_iter_is_bvec(i)) {
iov_iter_bvec_advance(i, size);
} else if (iov_iter_is_folioq(i)) {
iov_iter_folioq_advance(i, size);
} else if (iov_iter_is_discard(i)) {
i->count -= size;
}
}
EXPORT_SYMBOL(iov_iter_advance);
static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll)
{
const struct folio_queue *folioq = i->folioq;
unsigned int slot = i->folioq_slot;
for (;;) {
size_t fsize;
if (slot == 0) {
folioq = folioq->prev;
slot = folioq_nr_slots(folioq);
}
slot--; fsize = folioq_folio_size(folioq, slot); if (unroll <= fsize) { i->iov_offset = fsize - unroll;
break;
}
unroll -= fsize;
}
i->folioq_slot = slot;
i->folioq = folioq;
}
void iov_iter_revert(struct iov_iter *i, size_t unroll)
{
if (!unroll)
return;
if (WARN_ON(unroll > MAX_RW_COUNT)) return; i->count += unroll;
if (unlikely(iov_iter_is_discard(i)))
return;
if (unroll <= i->iov_offset) {
i->iov_offset -= unroll;
return;
}
unroll -= i->iov_offset; if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { BUG(); /* We should never go beyond the start of the specified
* range since we might then be straying into pages that
* aren't pinned.
*/
} else if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
i->nr_segs++; if (unroll <= n) {
i->bvec = bvec;
i->iov_offset = n - unroll;
return;
}
unroll -= n;
}
} else if (iov_iter_is_folioq(i)) {
i->iov_offset = 0;
iov_iter_folioq_revert(i, unroll);
} else { /* same logics for iovec and kvec */
const struct iovec *iov = iter_iov(i);
while (1) {
size_t n = (--iov)->iov_len; i->nr_segs++; if (unroll <= n) { i->__iov = iov;
i->iov_offset = n - unroll;
return;
}
unroll -= n;
}
}
}
EXPORT_SYMBOL(iov_iter_revert);
/*
* Return the count of just the current iov_iter segment.
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
if (i->nr_segs > 1) {
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
}
if (unlikely(iov_iter_is_folioq(i)))
return !i->count ? 0 :
umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count);
return i->count;
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
const struct kvec *kvec, unsigned long nr_segs,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_KVEC,
.data_source = direction,
.kvec = kvec,
.nr_segs = nr_segs,
.iov_offset = 0,
.count = count
};
}
EXPORT_SYMBOL(iov_iter_kvec);
void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
const struct bio_vec *bvec, unsigned long nr_segs,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_BVEC,
.data_source = direction,
.bvec = bvec,
.nr_segs = nr_segs,
.iov_offset = 0,
.count = count
};
}
EXPORT_SYMBOL(iov_iter_bvec);
/**
* iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
* @folioq: The starting point in the folio queue.
* @first_slot: The first slot in the folio queue to use
* @offset: The offset into the folio in the first slot to start at
* @count: The size of the I/O buffer in bytes.
*
* Set up an I/O iterator to either draw data out of the pages attached to an
* inode or to inject data into those pages. The pages *must* be prevented
* from evaporation, either by taking a ref on them or locking them by the
* caller.
*/
void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
const struct folio_queue *folioq, unsigned int first_slot,
unsigned int offset, size_t count)
{
BUG_ON(direction & ~1);
*i = (struct iov_iter) {
.iter_type = ITER_FOLIOQ,
.data_source = direction,
.folioq = folioq,
.folioq_slot = first_slot,
.count = count,
.iov_offset = offset,
};
}
EXPORT_SYMBOL(iov_iter_folio_queue);
/**
* iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
* @xarray: The xarray to access.
* @start: The start file position.
* @count: The size of the I/O buffer in bytes.
*
* Set up an I/O iterator to either draw data out of the pages attached to an
* inode or to inject data into those pages. The pages *must* be prevented
* from evaporation, either by taking a ref on them or locking them by the
* caller.
*/
void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
struct xarray *xarray, loff_t start, size_t count)
{
BUG_ON(direction & ~1);
*i = (struct iov_iter) {
.iter_type = ITER_XARRAY,
.data_source = direction,
.xarray = xarray,
.xarray_start = start,
.count = count,
.iov_offset = 0
};
}
EXPORT_SYMBOL(iov_iter_xarray);
/**
* iov_iter_discard - Initialise an I/O iterator that discards data
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
* @count: The size of the I/O buffer in bytes.
*
* Set up an I/O iterator that just discards everything that's written to it.
* It's only available as a READ iterator.
*/
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
{
BUG_ON(direction != READ);
*i = (struct iov_iter){
.iter_type = ITER_DISCARD,
.data_source = false,
.count = count,
.iov_offset = 0
};
}
EXPORT_SYMBOL(iov_iter_discard);
static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
const struct iovec *iov = iter_iov(i);
unsigned long res = 0;
size_t size = i->count;
size_t skip = i->iov_offset;
do {
size_t len = iov->iov_len - skip;
if (len) {
res |= (unsigned long)iov->iov_base + skip;
if (len > size)
len = size;
res |= len;
size -= len;
}
iov++;
skip = 0;
} while (size);
return res;
}
static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{
const struct bio_vec *bvec = i->bvec;
unsigned res = 0;
size_t size = i->count;
unsigned skip = i->iov_offset;
do {
size_t len = bvec->bv_len - skip;
res |= (unsigned long)bvec->bv_offset + skip;
if (len > size)
len = size;
res |= len;
bvec++;
size -= len;
skip = 0;
} while (size);
return res;
}
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
if (likely(iter_is_ubuf(i))) {
size_t size = i->count;
if (size)
return ((unsigned long)i->ubuf + i->iov_offset) | size;
return 0;
}
/* iovec and kvec have identical layouts */
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_iter_alignment_iovec(i);
if (iov_iter_is_bvec(i))
return iov_iter_alignment_bvec(i);
/* With both xarray and folioq types, we're dealing with whole folios. */
if (iov_iter_is_folioq(i))
return i->iov_offset | i->count;
if (iov_iter_is_xarray(i))
return (i->xarray_start + i->iov_offset) | i->count;
return 0;
}
EXPORT_SYMBOL(iov_iter_alignment);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
unsigned long v = 0;
size_t size = i->count;
unsigned k;
if (iter_is_ubuf(i))
return 0;
if (WARN_ON(!iter_is_iovec(i)))
return ~0U;
for (k = 0; k < i->nr_segs; k++) {
const struct iovec *iov = iter_iov(i) + k;
if (iov->iov_len) {
unsigned long base = (unsigned long)iov->iov_base;
if (v) // if not the first one
res |= base | v; // this start | previous end
v = base + iov->iov_len;
if (size <= iov->iov_len)
break;
size -= iov->iov_len;
}
}
return res;
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
static int want_pages_array(struct page ***res, size_t size,
size_t start, unsigned int maxpages)
{
unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
if (count > maxpages)
count = maxpages;
WARN_ON(!count); // caller should've prevented that
if (!*res) {
*res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
if (!*res)
return 0;
}
return count;
}
static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
struct page ***ppages, size_t maxsize,
unsigned maxpages, size_t *_start_offset)
{
const struct folio_queue *folioq = iter->folioq;
struct page **pages;
unsigned int slot = iter->folioq_slot;
size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset;
if (slot >= folioq_nr_slots(folioq)) {
folioq = folioq->next;
slot = 0;
if (WARN_ON(iov_offset != 0))
return -EIO;
}
maxpages = want_pages_array(ppages, maxsize, iov_offset & ~PAGE_MASK, maxpages);
if (!maxpages)
return -ENOMEM;
*_start_offset = iov_offset & ~PAGE_MASK;
pages = *ppages;
for (;;) {
struct folio *folio = folioq_folio(folioq, slot);
size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot);
size_t part = PAGE_SIZE - offset % PAGE_SIZE;
if (offset < fsize) {
part = umin(part, umin(maxsize - extracted, fsize - offset));
count -= part;
iov_offset += part;
extracted += part;
*pages = folio_page(folio, offset / PAGE_SIZE);
get_page(*pages);
pages++;
maxpages--;
}
if (maxpages == 0 || extracted >= maxsize)
break;
if (iov_offset >= fsize) {
iov_offset = 0;
slot++;
if (slot == folioq_nr_slots(folioq) && folioq->next) {
folioq = folioq->next;
slot = 0;
}
}
}
iter->count = count;
iter->iov_offset = iov_offset;
iter->folioq = folioq;
iter->folioq_slot = slot;
return extracted;
}
static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
pgoff_t index, unsigned int nr_pages)
{
XA_STATE(xas, xa, index);
struct folio *folio;
unsigned int ret = 0;
rcu_read_lock();
for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
if (xas_retry(&xas, folio))
continue;
/* Has the folio moved or been split? */
if (unlikely(folio != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
pages[ret] = folio_file_page(folio, xas.xa_index);
folio_get(folio);
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
static ssize_t iter_xarray_get_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned maxpages, size_t *_start_offset)
{
unsigned nr, offset, count;
pgoff_t index;
loff_t pos;
pos = i->xarray_start + i->iov_offset;
index = pos >> PAGE_SHIFT;
offset = pos & ~PAGE_MASK;
*_start_offset = offset;
count = want_pages_array(pages, maxsize, offset, maxpages);
if (!count)
return -ENOMEM;
nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
if (nr == 0)
return 0;
maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
i->iov_offset += maxsize;
i->count -= maxsize;
return maxsize;
}
/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
{
size_t skip;
long k;
if (iter_is_ubuf(i))
return (unsigned long)i->ubuf + i->iov_offset;
for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
const struct iovec *iov = iter_iov(i) + k;
size_t len = iov->iov_len - skip;
if (unlikely(!len))
continue;
if (*size > len)
*size = len;
return (unsigned long)iov->iov_base + skip;
}
BUG(); // if it had been empty, we wouldn't get called
}
/* must be done on non-empty ITER_BVEC one */
static struct page *first_bvec_segment(const struct iov_iter *i,
size_t *size, size_t *start)
{
struct page *page;
size_t skip = i->iov_offset, len;
len = i->bvec->bv_len - skip;
if (*size > len)
*size = len;
skip += i->bvec->bv_offset;
page = i->bvec->bv_page + skip / PAGE_SIZE;
*start = skip % PAGE_SIZE;
return page;
}
static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages, size_t *start)
{
unsigned int n, gup_flags = 0;
if (maxsize > i->count)
maxsize = i->count;
if (!maxsize)
return 0;
if (maxsize > MAX_RW_COUNT)
maxsize = MAX_RW_COUNT;
if (likely(user_backed_iter(i))) {
unsigned long addr;
int res;
if (iov_iter_rw(i) != WRITE)
gup_flags |= FOLL_WRITE;
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
addr = first_iovec_segment(i, &maxsize);
*start = addr % PAGE_SIZE;
addr &= PAGE_MASK;
n = want_pages_array(pages, maxsize, *start, maxpages);
if (!n)
return -ENOMEM;
res = get_user_pages_fast(addr, n, gup_flags, *pages);
if (unlikely(res <= 0))
return res;
maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
iov_iter_advance(i, maxsize);
return maxsize;
}
if (iov_iter_is_bvec(i)) {
struct page **p;
struct page *page;
page = first_bvec_segment(i, &maxsize, start);
n = want_pages_array(pages, maxsize, *start, maxpages);
if (!n)
return -ENOMEM;
p = *pages;
for (int k = 0; k < n; k++) {
struct folio *folio = page_folio(page + k);
p[k] = page + k;
if (!folio_test_slab(folio))
folio_get(folio);
}
maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
i->count -= maxsize;
i->iov_offset += maxsize;
if (i->iov_offset == i->bvec->bv_len) {
i->iov_offset = 0;
i->bvec++;
i->nr_segs--;
}
return maxsize;
}
if (iov_iter_is_folioq(i))
return iter_folioq_get_pages(i, pages, maxsize, maxpages, start);
if (iov_iter_is_xarray(i))
return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
return -EFAULT;
}
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start)
{
if (!maxpages)
return 0;
BUG_ON(!pages);
return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
}
EXPORT_SYMBOL(iov_iter_get_pages2);
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
struct page ***pages, size_t maxsize, size_t *start)
{
ssize_t len;
*pages = NULL;
len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
if (len <= 0) {
kvfree(*pages);
*pages = NULL;
}
return len;
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
static int iov_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;
const struct iovec *p;
int npages = 0;
for (p = iter_iov(i); size; skip = 0, p++) {
unsigned offs = offset_in_page(p->iov_base + skip);
size_t len = min(p->iov_len - skip, size);
if (len) {
size -= len;
npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
if (unlikely(npages > maxpages))
return maxpages;
}
}
return npages;
}
static int bvec_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;
const struct bio_vec *p;
int npages = 0;
for (p = i->bvec; size; skip = 0, p++) {
unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
size_t len = min(p->bv_len - skip, size);
size -= len;
npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
if (unlikely(npages > maxpages))
return maxpages;
}
return npages;
}
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
if (unlikely(!i->count))
return 0;
if (likely(iter_is_ubuf(i))) {
unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
return min(npages, maxpages);
}
/* iovec and kvec have identical layouts */
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_npages(i, maxpages);
if (iov_iter_is_bvec(i))
return bvec_npages(i, maxpages);
if (iov_iter_is_folioq(i)) {
unsigned offset = i->iov_offset % PAGE_SIZE;
int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
return min(npages, maxpages);
}
if (iov_iter_is_xarray(i)) {
unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
return min(npages, maxpages);
}
return 0;
}
EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
/* iovec and kvec have identical layout */
return new->__iov = kmemdup(new->__iov,
new->nr_segs * sizeof(struct iovec),
flags);
return NULL;
}
EXPORT_SYMBOL(dup_iter);
static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
const struct iovec __user *uvec, u32 nr_segs)
{
const struct compat_iovec __user *uiov =
(const struct compat_iovec __user *)uvec;
int ret = -EFAULT;
u32 i;
if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
for (i = 0; i < nr_segs; i++) {
compat_uptr_t buf;
compat_ssize_t len;
unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
/* check for compat_size_t not fitting in compat_ssize_t .. */
if (len < 0) {
ret = -EINVAL;
goto uaccess_end;
}
iov[i].iov_base = compat_ptr(buf);
iov[i].iov_len = len;
}
ret = 0;
uaccess_end:
user_access_end();
return ret;
}
static __noclone int copy_iovec_from_user(struct iovec *iov,
const struct iovec __user *uiov, unsigned long nr_segs)
{ int ret = -EFAULT; if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) return -EFAULT;
do {
void __user *buf;
ssize_t len;
unsafe_get_user(len, &uiov->iov_len, uaccess_end); unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
/* check for size_t not fitting in ssize_t .. */
if (unlikely(len < 0)) {
ret = -EINVAL;
goto uaccess_end;
}
iov->iov_base = buf;
iov->iov_len = len;
uiov++; iov++;
} while (--nr_segs);
ret = 0;
uaccess_end:
user_access_end(); return ret;}
struct iovec *iovec_from_user(const struct iovec __user *uvec,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_iov, bool compat)
{
struct iovec *iov = fast_iov;
int ret;
/*
* SuS says "The readv() function *may* fail if the iovcnt argument was
* less than or equal to 0, or greater than {IOV_MAX}. Linux has
* traditionally returned zero for zero segments, so...
*/
if (nr_segs == 0)
return iov;
if (nr_segs > UIO_MAXIOV) return ERR_PTR(-EINVAL); if (nr_segs > fast_segs) {
iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
if (!iov) return ERR_PTR(-ENOMEM);
}
if (unlikely(compat))
ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
else
ret = copy_iovec_from_user(iov, uvec, nr_segs);
if (ret) { if (iov != fast_iov)
kfree(iov);
return ERR_PTR(ret);
}
return iov;
}
/*
* Single segment iovec supplied by the user, import it as ITER_UBUF.
*/
static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
struct iovec **iovp, struct iov_iter *i,
bool compat)
{
struct iovec *iov = *iovp;
ssize_t ret;
*iovp = NULL;
if (compat) ret = copy_compat_iovec_from_user(iov, uvec, 1);
else
ret = copy_iovec_from_user(iov, uvec, 1);
if (unlikely(ret))
return ret;
ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
if (unlikely(ret))
return ret;
return i->count;
}
ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat)
{
ssize_t total_len = 0;
unsigned long seg;
struct iovec *iov;
if (nr_segs == 1) return __import_iovec_ubuf(type, uvec, iovp, i, compat);
iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
if (IS_ERR(iov)) {
*iovp = NULL;
return PTR_ERR(iov);
}
/*
* According to the Single Unix Specification we should return EINVAL if
* an element length is < 0 when cast to ssize_t or if the total length
* would overflow the ssize_t return value of the system call.
*
* Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
* overflow case.
*/
for (seg = 0; seg < nr_segs; seg++) { ssize_t len = (ssize_t)iov[seg].iov_len; if (!access_ok(iov[seg].iov_base, len)) { if (iov != *iovp)
kfree(iov);
*iovp = NULL; return -EFAULT;
}
if (len > MAX_RW_COUNT - total_len) {
len = MAX_RW_COUNT - total_len;
iov[seg].iov_len = len;
}
total_len += len;
}
iov_iter_init(i, type, iov, nr_segs, total_len); if (iov == *iovp) *iovp = NULL;
else
*iovp = iov;
return total_len;
}
/**
* import_iovec() - Copy an array of &struct iovec from userspace
* into the kernel, check that it is valid, and initialize a new
* &struct iov_iter iterator to access it.
*
* @type: One of %READ or %WRITE.
* @uvec: Pointer to the userspace array.
* @nr_segs: Number of elements in userspace array.
* @fast_segs: Number of elements in @iov.
* @iovp: (input and output parameter) Pointer to pointer to (usually small
* on-stack) kernel array.
* @i: Pointer to iterator that will be initialized on success.
*
* If the array pointed to by *@iov is large enough to hold all @nr_segs,
* then this function places %NULL in *@iov on return. Otherwise, a new
* array will be allocated and the result placed in *@iov. This means that
* the caller may call kfree() on *@iov regardless of whether the small
* on-stack array was used or not (and regardless of whether this function
* returns an error or not).
*
* Return: Negative error code on error, bytes imported on success
*/
ssize_t import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iovp, struct iov_iter *i)
{ return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, in_compat_syscall());
}
EXPORT_SYMBOL(import_iovec);
int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
len = MAX_RW_COUNT;
if (unlikely(!access_ok(buf, len))) return -EFAULT; iov_iter_ubuf(i, rw, buf, len); return 0;}
EXPORT_SYMBOL_GPL(import_ubuf);
/**
* iov_iter_restore() - Restore a &struct iov_iter to the same state as when
* iov_iter_save_state() was called.
*
* @i: &struct iov_iter to restore
* @state: state to restore from
*
* Used after iov_iter_save_state() to bring restore @i, if operations may
* have advanced it.
*
* Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
*/
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{
if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
!iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
return;
i->iov_offset = state->iov_offset;
i->count = state->count;
if (iter_is_ubuf(i))
return;
/*
* For the *vec iters, nr_segs + iov is constant - if we increment
* the vec, then we also decrement the nr_segs count. Hence we don't
* need to track both of these, just one is enough and we can deduct
* the other from that. ITER_KVEC and ITER_IOVEC are the same struct
* size, so we can just increment the iov pointer as they are unionzed.
* ITER_BVEC _may_ be the same size on some archs, but on others it is
* not. Be safe and handle it separately.
*/
BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
if (iov_iter_is_bvec(i))
i->bvec -= state->nr_segs - i->nr_segs;
else
i->__iov -= state->nr_segs - i->nr_segs;
i->nr_segs = state->nr_segs;
}
/*
* Extract a list of contiguous pages from an ITER_FOLIOQ iterator. This does
* not get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
const struct folio_queue *folioq = i->folioq;
struct page **p;
unsigned int nr = 0;
size_t extracted = 0, offset, slot = i->folioq_slot;
if (slot >= folioq_nr_slots(folioq)) {
folioq = folioq->next;
slot = 0;
if (WARN_ON(i->iov_offset != 0))
return -EIO;
}
offset = i->iov_offset & ~PAGE_MASK;
*offset0 = offset;
maxpages = want_pages_array(pages, maxsize, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
for (;;) {
struct folio *folio = folioq_folio(folioq, slot);
size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot);
size_t part = PAGE_SIZE - offset % PAGE_SIZE;
if (offset < fsize) {
part = umin(part, umin(maxsize - extracted, fsize - offset));
i->count -= part;
i->iov_offset += part;
extracted += part;
p[nr++] = folio_page(folio, offset / PAGE_SIZE);
}
if (nr >= maxpages || extracted >= maxsize)
break;
if (i->iov_offset >= fsize) {
i->iov_offset = 0;
slot++;
if (slot == folioq_nr_slots(folioq) && folioq->next) {
folioq = folioq->next;
slot = 0;
}
}
}
i->folioq = folioq;
i->folioq_slot = slot;
return extracted;
}
/*
* Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
* get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
struct page **p;
struct folio *folio;
unsigned int nr = 0, offset;
loff_t pos = i->xarray_start + i->iov_offset;
XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT);
offset = pos & ~PAGE_MASK;
*offset0 = offset;
maxpages = want_pages_array(pages, maxsize, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
rcu_read_lock();
for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
if (xas_retry(&xas, folio))
continue;
/* Has the folio moved or been split? */
if (unlikely(folio != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
p[nr++] = folio_file_page(folio, xas.xa_index);
if (nr == maxpages)
break;
}
rcu_read_unlock();
maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
iov_iter_advance(i, maxsize);
return maxsize;
}
/*
* Extract a list of virtually contiguous pages from an ITER_BVEC iterator.
* This does not get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
size_t skip = i->iov_offset, size = 0;
struct bvec_iter bi;
int k = 0;
if (i->nr_segs == 0)
return 0;
if (i->iov_offset == i->bvec->bv_len) {
i->iov_offset = 0;
i->nr_segs--;
i->bvec++;
skip = 0;
}
bi.bi_idx = 0;
bi.bi_size = maxsize;
bi.bi_bvec_done = skip;
maxpages = want_pages_array(pages, maxsize, skip, maxpages);
while (bi.bi_size && bi.bi_idx < i->nr_segs) {
struct bio_vec bv = bvec_iter_bvec(i->bvec, bi);
/*
* The iov_iter_extract_pages interface only allows an offset
* into the first page. Break out of the loop if we see an
* offset into subsequent pages, the caller will have to call
* iov_iter_extract_pages again for the reminder.
*/
if (k) {
if (bv.bv_offset)
break;
} else {
*offset0 = bv.bv_offset;
}
(*pages)[k++] = bv.bv_page;
size += bv.bv_len;
if (k >= maxpages)
break;
/*
* We are done when the end of the bvec doesn't align to a page
* boundary as that would create a hole in the returned space.
* The caller will handle this with another call to
* iov_iter_extract_pages.
*/
if (bv.bv_offset + bv.bv_len != PAGE_SIZE)
break;
bvec_iter_advance_single(i->bvec, &bi, bv.bv_len);
}
iov_iter_advance(i, size);
return size;
}
/*
* Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
* This does not get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
struct page **p, *page;
const void *kaddr;
size_t skip = i->iov_offset, offset, len, size;
int k;
for (;;) {
if (i->nr_segs == 0)
return 0;
size = min(maxsize, i->kvec->iov_len - skip);
if (size)
break;
i->iov_offset = 0;
i->nr_segs--;
i->kvec++;
skip = 0;
}
kaddr = i->kvec->iov_base + skip;
offset = (unsigned long)kaddr & ~PAGE_MASK;
*offset0 = offset;
maxpages = want_pages_array(pages, size, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
kaddr -= offset;
len = offset + size;
for (k = 0; k < maxpages; k++) {
size_t seg = min_t(size_t, len, PAGE_SIZE);
if (is_vmalloc_or_module_addr(kaddr))
page = vmalloc_to_page(kaddr);
else
page = virt_to_page(kaddr);
p[k] = page;
len -= seg;
kaddr += PAGE_SIZE;
}
size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
iov_iter_advance(i, size);
return size;
}
/*
* Extract a list of contiguous pages from a user iterator and get a pin on
* each of them. This should only be used if the iterator is user-backed
* (IOBUF/UBUF).
*
* It does not get refs on the pages, but the pages must be unpinned by the
* caller once the transfer is complete.
*
* This is safe to be used where background IO/DMA *is* going to be modifying
* the buffer; using a pin rather than a ref makes forces fork() to give the
* child a copy of the page.
*/
static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
struct page ***pages,
size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
unsigned long addr;
unsigned int gup_flags = 0;
size_t offset;
int res;
if (i->data_source == ITER_DEST)
gup_flags |= FOLL_WRITE;
if (extraction_flags & ITER_ALLOW_P2PDMA)
gup_flags |= FOLL_PCI_P2PDMA;
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
addr = first_iovec_segment(i, &maxsize);
*offset0 = offset = addr % PAGE_SIZE;
addr &= PAGE_MASK;
maxpages = want_pages_array(pages, maxsize, offset, maxpages);
if (!maxpages)
return -ENOMEM;
res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
if (unlikely(res <= 0))
return res;
maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
iov_iter_advance(i, maxsize);
return maxsize;
}
/**
* iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
* @i: The iterator to extract from
* @pages: Where to return the list of pages
* @maxsize: The maximum amount of iterator to extract
* @maxpages: The maximum size of the list of pages
* @extraction_flags: Flags to qualify request
* @offset0: Where to return the starting offset into (*@pages)[0]
*
* Extract a list of contiguous pages from the current point of the iterator,
* advancing the iterator. The maximum number of pages and the maximum amount
* of page contents can be set.
*
* If *@pages is NULL, a page list will be allocated to the required size and
* *@pages will be set to its base. If *@pages is not NULL, it will be assumed
* that the caller allocated a page list at least @maxpages in size and this
* will be filled in.
*
* @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
* be allowed on the pages extracted.
*
* The iov_iter_extract_will_pin() function can be used to query how cleanup
* should be performed.
*
* Extra refs or pins on the pages may be obtained as follows:
*
* (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
* added to the pages, but refs will not be taken.
* iov_iter_extract_will_pin() will return true.
*
* (*) If the iterator is ITER_KVEC, ITER_BVEC, ITER_FOLIOQ or ITER_XARRAY, the
* pages are merely listed; no extra refs or pins are obtained.
* iov_iter_extract_will_pin() will return 0.
*
* Note also:
*
* (*) Use with ITER_DISCARD is not supported as that has no content.
*
* On success, the function sets *@pages to the new pagelist, if allocated, and
* sets *offset0 to the offset into the first page.
*
* It may also return -ENOMEM and -EFAULT.
*/
ssize_t iov_iter_extract_pages(struct iov_iter *i,
struct page ***pages,
size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
if (!maxsize)
return 0;
if (likely(user_backed_iter(i)))
return iov_iter_extract_user_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
if (iov_iter_is_kvec(i))
return iov_iter_extract_kvec_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
if (iov_iter_is_bvec(i))
return iov_iter_extract_bvec_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
if (iov_iter_is_folioq(i))
return iov_iter_extract_folioq_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
if (iov_iter_is_xarray(i))
return iov_iter_extract_xarray_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(iov_iter_extract_pages);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_TIMEKEEPING_H
#define _LINUX_TIMEKEEPING_H
#include <linux/errno.h>
#include <linux/clocksource_ids.h>
#include <linux/ktime.h>
/* Included from linux/ktime.h */
void timekeeping_init(void);
extern int timekeeping_suspended;
/* Architecture timer tick functions: */
extern void legacy_timer_tick(unsigned long ticks);
/*
* Get and set timeofday
*/
extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
/*
* ktime_get() family - read the current time in a multitude of ways.
*
* The default time reference is CLOCK_MONOTONIC, starting at
* boot time but not counting the time spent in suspend.
* For other references, use the functions with "real", "clocktai",
* "boottime" and "raw" suffixes.
*
* To get the time in a different format, use the ones with
* "ns", "ts64" and "seconds" suffix.
*
* See Documentation/core-api/timekeeping.rst for more details.
*/
/*
* timespec64 based interfaces
*/
extern void ktime_get_raw_ts64(struct timespec64 *ts);
extern void ktime_get_ts64(struct timespec64 *ts);
extern void ktime_get_real_ts64(struct timespec64 *tv);
extern void ktime_get_coarse_ts64(struct timespec64 *ts);
extern void ktime_get_coarse_real_ts64(struct timespec64 *ts);
extern void ktime_get_clock_ts64(clockid_t id, struct timespec64 *ts);
/* Multigrain timestamp interfaces */
extern void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts);
extern void ktime_get_real_ts64_mg(struct timespec64 *ts);
extern unsigned long timekeeping_get_mg_floor_swaps(void);
void getboottime64(struct timespec64 *ts);
/*
* time64_t base interfaces
*/
extern time64_t ktime_get_seconds(void);
extern time64_t __ktime_get_real_seconds(void);
extern time64_t ktime_get_real_seconds(void);
/*
* ktime_t based interfaces
*/
enum tk_offsets {
TK_OFFS_REAL,
TK_OFFS_BOOT,
TK_OFFS_TAI,
TK_OFFS_MAX,
};
extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
extern ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
extern u32 ktime_get_resolution_ns(void);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
*
* Returns: real (wall) time in ktime_t format
*/
static inline ktime_t ktime_get_real(void)
{
return ktime_get_with_offset(TK_OFFS_REAL);
}
static inline ktime_t ktime_get_coarse_real(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_REAL);
}
/**
* ktime_get_boottime - Get monotonic time since boot in ktime_t format
*
* This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
* time spent in suspend.
*
* Returns: monotonic time since boot in ktime_t format
*/
static inline ktime_t ktime_get_boottime(void)
{
return ktime_get_with_offset(TK_OFFS_BOOT);
}
static inline ktime_t ktime_get_coarse_boottime(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_BOOT);
}
/**
* ktime_get_clocktai - Get the TAI time of day in ktime_t format
*
* Returns: the TAI time of day in ktime_t format
*/
static inline ktime_t ktime_get_clocktai(void)
{
return ktime_get_with_offset(TK_OFFS_TAI);
}
static inline ktime_t ktime_get_coarse_clocktai(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_TAI);
}
static inline ktime_t ktime_get_coarse(void)
{
struct timespec64 ts;
ktime_get_coarse_ts64(&ts);
return timespec64_to_ktime(ts);
}
static inline u64 ktime_get_coarse_ns(void)
{
return ktime_to_ns(ktime_get_coarse());
}
static inline u64 ktime_get_coarse_real_ns(void)
{
return ktime_to_ns(ktime_get_coarse_real());
}
static inline u64 ktime_get_coarse_boottime_ns(void)
{
return ktime_to_ns(ktime_get_coarse_boottime());
}
static inline u64 ktime_get_coarse_clocktai_ns(void)
{
return ktime_to_ns(ktime_get_coarse_clocktai());
}
/**
* ktime_mono_to_real - Convert monotonic time to clock realtime
* @mono: monotonic time to convert
*
* Returns: time converted to realtime clock
*/
static inline ktime_t ktime_mono_to_real(ktime_t mono)
{
return ktime_mono_to_any(mono, TK_OFFS_REAL);
}
/**
* ktime_get_ns - Get the current time in nanoseconds
*
* Returns: current time converted to nanoseconds
*/
static inline u64 ktime_get_ns(void)
{
return ktime_to_ns(ktime_get());
}
/**
* ktime_get_real_ns - Get the current real/wall time in nanoseconds
*
* Returns: current real time converted to nanoseconds
*/
static inline u64 ktime_get_real_ns(void)
{
return ktime_to_ns(ktime_get_real());
}
/**
* ktime_get_boottime_ns - Get the monotonic time since boot in nanoseconds
*
* Returns: current boottime converted to nanoseconds
*/
static inline u64 ktime_get_boottime_ns(void)
{
return ktime_to_ns(ktime_get_boottime());
}
/**
* ktime_get_clocktai_ns - Get the current TAI time of day in nanoseconds
*
* Returns: current TAI time converted to nanoseconds
*/
static inline u64 ktime_get_clocktai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
}
/**
* ktime_get_raw_ns - Get the raw monotonic time in nanoseconds
*
* Returns: current raw monotonic time converted to nanoseconds
*/
static inline u64 ktime_get_raw_ns(void)
{
return ktime_to_ns(ktime_get_raw());
}
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
extern u64 ktime_get_boot_fast_ns(void);
extern u64 ktime_get_tai_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);
/*
* timespec64/time64_t interfaces utilizing the ktime based ones
* for API completeness, these could be implemented more efficiently
* if needed.
*/
static inline void ktime_get_boottime_ts64(struct timespec64 *ts)
{
*ts = ktime_to_timespec64(ktime_get_boottime());
}
static inline void ktime_get_coarse_boottime_ts64(struct timespec64 *ts)
{
*ts = ktime_to_timespec64(ktime_get_coarse_boottime());
}
static inline time64_t ktime_get_boottime_seconds(void)
{
return ktime_divns(ktime_get_coarse_boottime(), NSEC_PER_SEC);
}
static inline void ktime_get_clocktai_ts64(struct timespec64 *ts)
{
*ts = ktime_to_timespec64(ktime_get_clocktai());
}
static inline void ktime_get_coarse_clocktai_ts64(struct timespec64 *ts)
{
*ts = ktime_to_timespec64(ktime_get_coarse_clocktai());
}
static inline time64_t ktime_get_clocktai_seconds(void)
{
return ktime_divns(ktime_get_coarse_clocktai(), NSEC_PER_SEC);
}
/*
* RTC specific
*/
extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
/*
* Auxiliary clock interfaces
*/
#ifdef CONFIG_POSIX_AUX_CLOCKS
extern bool ktime_get_aux(clockid_t id, ktime_t *kt);
extern bool ktime_get_aux_ts64(clockid_t id, struct timespec64 *kt);
#else
static inline bool ktime_get_aux(clockid_t id, ktime_t *kt) { return false; }
static inline bool ktime_get_aux_ts64(clockid_t id, struct timespec64 *kt) { return false; }
#endif
/**
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
* @boot: Boot time
* @raw: Monotonic raw system time
* @cs_id: Clocksource ID
* @clock_was_set_seq: The sequence number of clock-was-set events
* @cs_was_changed_seq: The sequence number of clocksource change events
*/
struct system_time_snapshot {
u64 cycles;
ktime_t real;
ktime_t boot;
ktime_t raw;
enum clocksource_ids cs_id;
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
};
/**
* struct system_device_crosststamp - system/device cross-timestamp
* (synchronized capture)
* @device: Device time
* @sys_realtime: Realtime simultaneous with device time
* @sys_monoraw: Monotonic raw simultaneous with device time
*/
struct system_device_crosststamp {
ktime_t device;
ktime_t sys_realtime;
ktime_t sys_monoraw;
};
/**
* struct system_counterval_t - system counter value with the ID of the
* corresponding clocksource
* @cycles: System counter value
* @cs_id: Clocksource ID corresponding to system counter value. Used by
* timekeeping code to verify comparability of two cycle values.
* The default ID, CSID_GENERIC, does not identify a specific
* clocksource.
* @use_nsecs: @cycles is in nanoseconds.
*/
struct system_counterval_t {
u64 cycles;
enum clocksource_ids cs_id;
bool use_nsecs;
};
extern bool ktime_real_to_base_clock(ktime_t treal,
enum clocksource_ids base_id, u64 *cycles);
extern bool timekeeping_clocksource_has_base(enum clocksource_ids id);
/*
* Get cross timestamp between system clock and device clock
*/
extern int get_device_system_crosststamp(
int (*get_time_fn)(ktime_t *device_time,
struct system_counterval_t *system_counterval,
void *ctx),
void *ctx,
struct system_time_snapshot *history,
struct system_device_crosststamp *xtstamp);
/*
* Simultaneously snapshot realtime and monotonic raw clocks
*/
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
/*
* Persistent clock related interfaces
*/
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
#ifdef CONFIG_GENERIC_CMOS_UPDATE
extern int update_persistent_clock64(struct timespec64 now);
#endif
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common code for control of lockd and nfsv4 grace periods.
*
* Transplanted from lockd code
*/
#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/fs.h>
#include <linux/filelock.h>
static unsigned int grace_net_id;
static DEFINE_SPINLOCK(grace_lock);
/**
* locks_start_grace
* @net: net namespace that this lock manager belongs to
* @lm: who this grace period is for
*
* A grace period is a period during which locks should not be given
* out. Currently grace periods are only enforced by the two lock
* managers (lockd and nfsd), using the locks_in_grace() function to
* check when they are in a grace period.
*
* This function is called to start a grace period.
*/
void
locks_start_grace(struct net *net, struct lock_manager *lm)
{
struct list_head *grace_list = net_generic(net, grace_net_id);
spin_lock(&grace_lock);
if (list_empty(&lm->list))
list_add(&lm->list, grace_list);
else
WARN(1, "double list_add attempt detected in net %x %s\n",
net->ns.inum, (net == &init_net) ? "(init_net)" : "");
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
/**
* locks_end_grace
* @lm: who this grace period is for
*
* Call this function to state that the given lock manager is ready to
* resume regular locking. The grace period will not end until all lock
* managers that called locks_start_grace() also call locks_end_grace().
* Note that callers count on it being safe to call this more than once,
* and the second call should be a no-op.
*/
void
locks_end_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_del_init(&lm->list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_end_grace);
static bool
__state_in_grace(struct net *net, bool open)
{
struct list_head *grace_list = net_generic(net, grace_net_id);
struct lock_manager *lm;
if (!open)
return !list_empty(grace_list);
spin_lock(&grace_lock);
list_for_each_entry(lm, grace_list, list) {
if (lm->block_opens) {
spin_unlock(&grace_lock);
return true;
}
}
spin_unlock(&grace_lock);
return false;
}
/**
* locks_in_grace
* @net: network namespace
*
* Lock managers call this function to determine when it is OK for them
* to answer ordinary lock requests, and when they should accept only
* lock reclaims.
*/
bool locks_in_grace(struct net *net)
{
return __state_in_grace(net, false);
}
EXPORT_SYMBOL_GPL(locks_in_grace);
bool opens_in_grace(struct net *net)
{
return __state_in_grace(net, true);
}
EXPORT_SYMBOL_GPL(opens_in_grace);
static int __net_init
grace_init_net(struct net *net)
{
struct list_head *grace_list = net_generic(net, grace_net_id);
INIT_LIST_HEAD(grace_list);
return 0;
}
static void __net_exit
grace_exit_net(struct net *net)
{
struct list_head *grace_list = net_generic(net, grace_net_id);
WARN_ONCE(!list_empty(grace_list),
"net %x %s: grace_list is not empty\n",
net->ns.inum, __func__);
}
static struct pernet_operations grace_net_ops = {
.init = grace_init_net,
.exit = grace_exit_net,
.id = &grace_net_id,
.size = sizeof(struct list_head),
};
static int __init
init_grace(void)
{
return register_pernet_subsys(&grace_net_ops);
}
static void __exit
exit_grace(void)
{
unregister_pernet_subsys(&grace_net_ops);
}
MODULE_AUTHOR("Jeff Layton <jlayton@primarydata.com>");
MODULE_DESCRIPTION("NFS client and server infrastructure");
MODULE_LICENSE("GPL");
module_init(init_grace)
module_exit(exit_grace)
/* SPDX-License-Identifier: GPL-2.0
*
* Network memory
*
* Author: Mina Almasry <almasrymina@google.com>
*/
#ifndef _NET_NETMEM_H
#define _NET_NETMEM_H
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <net/net_debug.h>
/* These fields in struct page are used by the page_pool and net stack:
*
* struct {
* unsigned long pp_magic;
* struct page_pool *pp;
* unsigned long _pp_mapping_pad;
* unsigned long dma_addr;
* atomic_long_t pp_ref_count;
* };
*
* We mirror the page_pool fields here so the page_pool can access these
* fields without worrying whether the underlying fields belong to a
* page or netmem_desc.
*
* CAUTION: Do not update the fields in netmem_desc without also
* updating the anonymous aliasing union in struct net_iov.
*/
struct netmem_desc {
unsigned long _flags;
unsigned long pp_magic;
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
atomic_long_t pp_ref_count;
};
#define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \
static_assert(offsetof(struct page, pg) == \
offsetof(struct netmem_desc, desc))
NETMEM_DESC_ASSERT_OFFSET(flags, _flags);
NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic);
NETMEM_DESC_ASSERT_OFFSET(pp, pp);
NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr);
NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
#undef NETMEM_DESC_ASSERT_OFFSET
/*
* Since struct netmem_desc uses the space in struct page, the size
* should be checked, until struct netmem_desc has its own instance from
* slab, to avoid conflicting with other members within struct page.
*/
static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount));
/* net_iov */
DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
/* We overload the LSB of the struct page pointer to indicate whether it's
* a page or net_iov.
*/
#define NET_IOV 0x01UL
enum net_iov_type {
NET_IOV_DMABUF,
NET_IOV_IOURING,
/* Force size to unsigned long to make the NET_IOV_ASSERTS below pass.
*/
NET_IOV_MAX = ULONG_MAX
};
/* A memory descriptor representing abstract networking I/O vectors,
* generally for non-pages memory that doesn't have its corresponding
* struct page and needs to be explicitly allocated through slab.
*
* net_iovs are allocated and used by networking code, and the size of
* the chunk is PAGE_SIZE.
*
* This memory can be any form of non-struct paged memory. Examples
* include imported dmabuf memory and imported io_uring memory. See
* net_iov_type for all the supported types.
*
* @pp_magic: pp field, similar to the one in struct page/struct
* netmem_desc.
* @pp: the pp this net_iov belongs to, if any.
* @dma_addr: the dma addrs of the net_iov. Needed for the network
* card to send/receive this net_iov.
* @pp_ref_count: the pp ref count of this net_iov, exactly the same
* usage as struct page/struct netmem_desc.
* @owner: the net_iov_area this net_iov belongs to, if any.
* @type: the type of the memory. Different types of net_iovs are
* supported.
*/
struct net_iov {
union {
struct netmem_desc desc;
/* XXX: The following part should be removed once all
* the references to them are converted so as to be
* accessed via netmem_desc e.g. niov->desc.pp instead
* of niov->pp.
*/
struct {
unsigned long _flags;
unsigned long pp_magic;
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
atomic_long_t pp_ref_count;
};
};
struct net_iov_area *owner;
enum net_iov_type type;
};
struct net_iov_area {
/* Array of net_iovs for this area. */
struct net_iov *niovs;
size_t num_niovs;
/* Offset into the dma-buf where this chunk starts. */
unsigned long base_virtual;
};
/* net_iov is union'ed with struct netmem_desc mirroring struct page, so
* the page_pool can access these fields without worrying whether the
* underlying fields are accessed via netmem_desc or directly via
* net_iov, until all the references to them are converted so as to be
* accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp.
*
* The non-net stack fields of struct page are private to the mm stack
* and must never be mirrored to net_iov.
*/
#define NET_IOV_ASSERT_OFFSET(desc, iov) \
static_assert(offsetof(struct netmem_desc, desc) == \
offsetof(struct net_iov, iov))
NET_IOV_ASSERT_OFFSET(_flags, _flags);
NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
NET_IOV_ASSERT_OFFSET(pp, pp);
NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
#undef NET_IOV_ASSERT_OFFSET
static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
{
return niov->owner;
}
static inline unsigned int net_iov_idx(const struct net_iov *niov)
{
return niov - net_iov_owner(niov)->niovs;
}
/* netmem */
/**
* typedef netmem_ref - a nonexistent type marking a reference to generic
* network memory.
*
* A netmem_ref can be a struct page* or a struct net_iov* underneath.
*
* Use the supplied helpers to obtain the underlying memory pointer and fields.
*/
typedef unsigned long __bitwise netmem_ref;
static inline bool netmem_is_net_iov(const netmem_ref netmem)
{
return (__force unsigned long)netmem & NET_IOV;
}
/**
* __netmem_to_page - unsafely get pointer to the &page backing @netmem
* @netmem: netmem reference to convert
*
* Unsafe version of netmem_to_page(). When @netmem is always page-backed,
* e.g. when it's a header buffer, performs faster and generates smaller
* object code (no check for the LSB, no WARN). When @netmem points to IOV,
* provokes undefined behaviour.
*
* Return: pointer to the &page (garbage if @netmem is not page-backed).
*/
static inline struct page *__netmem_to_page(netmem_ref netmem)
{
return (__force struct page *)netmem;
}
static inline struct page *netmem_to_page(netmem_ref netmem)
{
if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
return NULL;
return __netmem_to_page(netmem);
}
static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
{
if (netmem_is_net_iov(netmem))
return (struct net_iov *)((__force unsigned long)netmem &
~NET_IOV);
DEBUG_NET_WARN_ON_ONCE(true);
return NULL;
}
static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
{
return (__force netmem_ref)((unsigned long)niov | NET_IOV);
}
#define page_to_netmem(p) (_Generic((p), \
const struct page * : (__force const netmem_ref)(p), \
struct page * : (__force netmem_ref)(p)))
/**
* virt_to_netmem - convert virtual memory pointer to a netmem reference
* @data: host memory pointer to convert
*
* Return: netmem reference to the &page backing this virtual address.
*/
static inline netmem_ref virt_to_netmem(const void *data)
{
return page_to_netmem(virt_to_page(data));
}
static inline int netmem_ref_count(netmem_ref netmem)
{
/* The non-pp refcount of net_iov is always 1. On net_iov, we only
* support pp refcounting which uses the pp_ref_count field.
*/
if (netmem_is_net_iov(netmem))
return 1;
return page_ref_count(netmem_to_page(netmem));
}
static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
{
if (netmem_is_net_iov(netmem))
return 0;
return page_to_pfn(netmem_to_page(netmem));
}
/**
* __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing
* @netmem
* @netmem: netmem reference to convert
*
* Unsafe version that can be used only when @netmem is always backed by
* system memory, performs faster and generates smaller object code (no
* check for the LSB, no WARN). When @netmem points to IOV, provokes
* undefined behaviour.
*
* Return: pointer to the &netmem_desc (garbage if @netmem is not backed
* by system memory).
*/
static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem)
{
return (__force struct netmem_desc *)netmem;
}
/* __netmem_clear_lsb - convert netmem_ref to struct net_iov * for access to
* common fields.
* @netmem: netmem reference to extract as net_iov.
*
* All the sub types of netmem_ref (page, net_iov) have the same pp, pp_magic,
* dma_addr, and pp_ref_count fields at the same offsets. Thus, we can access
* these fields without a type check to make sure that the underlying mem is
* net_iov or page.
*
* The resulting value of this function can only be used to access the fields
* that are NET_IOV_ASSERT_OFFSET'd. Accessing any other fields will result in
* undefined behavior.
*
* Return: the netmem_ref cast to net_iov* regardless of its underlying type.
*/
static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
{
return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
}
/* XXX: How to extract netmem_desc from page must be changed, once
* netmem_desc no longer overlays on page and will be allocated through
* slab.
*/
#define __pp_page_to_nmdesc(p) (_Generic((p), \
const struct page * : (const struct netmem_desc *)(p), \
struct page * : (struct netmem_desc *)(p)))
/* CAUTION: Check if the page is a pp page before calling this helper or
* know it's a pp page.
*/
#define pp_page_to_nmdesc(p) \
({ \
DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \
__pp_page_to_nmdesc(p); \
})
/**
* __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
* @netmem: netmem reference to get the pointer from
*
* Unsafe version of netmem_get_pp(). When @netmem is always page-backed,
* e.g. when it's a header buffer, performs faster and generates smaller
* object code (avoids clearing the LSB). When @netmem points to IOV,
* provokes invalid memory access.
*
* Return: pointer to the &page_pool (garbage if @netmem is not page-backed).
*/
static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
{
return __netmem_to_nmdesc(netmem)->pp;
}
static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
{
return __netmem_clear_lsb(netmem)->pp;
}
static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
{
return &__netmem_clear_lsb(netmem)->pp_ref_count;
}
static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
{
/* NUMA node preference only makes sense if we're allocating
* system memory. Memory providers (which give us net_iovs)
* choose for us.
*/
if (netmem_is_net_iov(netmem))
return true;
return page_to_nid(netmem_to_page(netmem)) == pref_nid;
}
static inline netmem_ref netmem_compound_head(netmem_ref netmem)
{
/* niov are never compounded */
if (netmem_is_net_iov(netmem))
return netmem;
return page_to_netmem(compound_head(netmem_to_page(netmem)));
}
/**
* __netmem_address - unsafely get pointer to the memory backing @netmem
* @netmem: netmem reference to get the pointer for
*
* Unsafe version of netmem_address(). When @netmem is always page-backed,
* e.g. when it's a header buffer, performs faster and generates smaller
* object code (no check for the LSB). When @netmem points to IOV, provokes
* undefined behaviour.
*
* Return: pointer to the memory (garbage if @netmem is not page-backed).
*/
static inline void *__netmem_address(netmem_ref netmem)
{
return page_address(__netmem_to_page(netmem));
}
static inline void *netmem_address(netmem_ref netmem)
{
if (netmem_is_net_iov(netmem))
return NULL;
return __netmem_address(netmem);
}
/**
* netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure
* @netmem: netmem reference to check
*
* Return: true if @netmem is page-backed and the page was allocated under
* memory pressure, false otherwise.
*/
static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
{
if (netmem_is_net_iov(netmem))
return false;
return page_is_pfmemalloc(netmem_to_page(netmem));
}
static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
{
return __netmem_clear_lsb(netmem)->dma_addr;
}
void get_netmem(netmem_ref netmem);
void put_netmem(netmem_ref netmem);
#define netmem_dma_unmap_addr_set(NETMEM, PTR, ADDR_NAME, VAL) \
do { \
if (!netmem_is_net_iov(NETMEM)) \
dma_unmap_addr_set(PTR, ADDR_NAME, VAL); \
else \
dma_unmap_addr_set(PTR, ADDR_NAME, 0); \
} while (0)
static inline void netmem_dma_unmap_page_attrs(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
if (!addr)
return;
dma_unmap_page_attrs(dev, addr, size, dir, attrs);
}
#endif /* _NET_NETMEM_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1993 Linus Torvalds
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
* Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
* Numa awareness, Christoph Lameter, SGI, June 2005
* Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
*/
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/set_memory.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/rbtree.h>
#include <linux/xarray.h>
#include <linux/io.h>
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/memcontrol.h>
#include <linux/llist.h>
#include <linux/uio.h>
#include <linux/bitops.h>
#include <linux/rbtree_augmented.h>
#include <linux/overflow.h>
#include <linux/pgtable.h>
#include <linux/hugetlb.h>
#include <linux/sched/mm.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
#include <linux/page_owner.h>
#define CREATE_TRACE_POINTS
#include <trace/events/vmalloc.h>
#include "internal.h"
#include "pgalloc-track.h"
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
static int __init set_nohugeiomap(char *str)
{
ioremap_max_page_shift = PAGE_SHIFT;
return 0;
}
early_param("nohugeiomap", set_nohugeiomap);
#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
static bool __ro_after_init vmap_allow_huge = true;
static int __init set_nohugevmalloc(char *str)
{
vmap_allow_huge = false;
return 0;
}
early_param("nohugevmalloc", set_nohugevmalloc);
#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
static const bool vmap_allow_huge = false;
#endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
bool is_vmalloc_addr(const void *x)
{
unsigned long addr = (unsigned long)kasan_reset_tag(x); return addr >= VMALLOC_START && addr < VMALLOC_END;
}
EXPORT_SYMBOL(is_vmalloc_addr);
struct vfree_deferred {
struct llist_head list;
struct work_struct wq;
};
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
/*** Page table manipulation functions ***/
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
pte_t *pte;
u64 pfn;
struct page *page;
unsigned long size = PAGE_SIZE;
pfn = phys_addr >> PAGE_SHIFT;
pte = pte_alloc_kernel_track(pmd, addr, mask);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
if (unlikely(!pte_none(ptep_get(pte)))) {
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
dump_page(page, "remapping already mapped page");
}
BUG();
}
#ifdef CONFIG_HUGETLB_PAGE
size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
if (size != PAGE_SIZE) {
pte_t entry = pfn_pte(pfn, prot);
entry = arch_make_huge_pte(entry, ilog2(size), 0);
set_huge_pte_at(&init_mm, addr, pte, entry, size);
pfn += PFN_DOWN(size);
continue;
}
#endif
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
pfn++;
} while (pte += PFN_DOWN(size), addr += size, addr != end);
arch_leave_lazy_mmu_mode();
*mask |= PGTBL_PTE_MODIFIED;
return 0;
}
static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{
if (max_page_shift < PMD_SHIFT)
return 0;
if (!arch_vmap_pmd_supported(prot))
return 0;
if ((end - addr) != PMD_SIZE)
return 0;
if (!IS_ALIGNED(addr, PMD_SIZE))
return 0;
if (!IS_ALIGNED(phys_addr, PMD_SIZE))
return 0;
if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
return 0;
return pmd_set_huge(pmd, phys_addr, prot);
}
static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
max_page_shift)) {
*mask |= PGTBL_PMD_MODIFIED;
continue;
}
if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
return -ENOMEM;
} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
}
static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{
if (max_page_shift < PUD_SHIFT)
return 0;
if (!arch_vmap_pud_supported(prot))
return 0;
if ((end - addr) != PUD_SIZE)
return 0;
if (!IS_ALIGNED(addr, PUD_SIZE))
return 0;
if (!IS_ALIGNED(phys_addr, PUD_SIZE))
return 0;
if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
return 0;
return pud_set_huge(pud, phys_addr, prot);
}
static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
pud = pud_alloc_track(&init_mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
max_page_shift)) {
*mask |= PGTBL_PUD_MODIFIED;
continue;
}
if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
max_page_shift, mask))
return -ENOMEM;
} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
}
static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{
if (max_page_shift < P4D_SHIFT)
return 0;
if (!arch_vmap_p4d_supported(prot))
return 0;
if ((end - addr) != P4D_SIZE)
return 0;
if (!IS_ALIGNED(addr, P4D_SIZE))
return 0;
if (!IS_ALIGNED(phys_addr, P4D_SIZE))
return 0;
if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
return 0;
return p4d_set_huge(p4d, phys_addr, prot);
}
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
max_page_shift)) {
*mask |= PGTBL_P4D_MODIFIED;
continue;
}
if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
max_page_shift, mask))
return -ENOMEM;
} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
}
static int vmap_range_noflush(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{
pgd_t *pgd;
unsigned long start;
unsigned long next;
int err;
pgtbl_mod_mask mask = 0;
might_sleep();
BUG_ON(addr >= end);
start = addr;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
max_page_shift, &mask);
if (err)
break;
} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
arch_sync_kernel_mappings(start, end);
return err;
}
int vmap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
int err;
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
ioremap_max_page_shift);
flush_cache_vmap(addr, end);
if (!err)
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
ioremap_max_page_shift);
return err;
}
int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
struct vm_struct *area;
area = find_vm_area((void *)addr);
if (!area || !(area->flags & VM_IOREMAP)) {
WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
return -EINVAL;
}
if (addr != (unsigned long)area->addr ||
(void *)end != area->addr + get_vm_area_size(area)) {
WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
addr, end, (long)area->addr,
(long)area->addr + get_vm_area_size(area));
return -ERANGE;
}
return vmap_page_range(addr, end, phys_addr, prot);
}
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pgtbl_mod_mask *mask)
{
pte_t *pte;
pte_t ptent;
unsigned long size = PAGE_SIZE;
pte = pte_offset_kernel(pmd, addr);
arch_enter_lazy_mmu_mode();
do {
#ifdef CONFIG_HUGETLB_PAGE
size = arch_vmap_pte_range_unmap_size(addr, pte);
if (size != PAGE_SIZE) {
if (WARN_ON(!IS_ALIGNED(addr, size))) {
addr = ALIGN_DOWN(addr, size);
pte = PTR_ALIGN_DOWN(pte, sizeof(*pte) * (size >> PAGE_SHIFT));
}
ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size);
if (WARN_ON(end - addr < size))
size = end - addr;
} else
#endif
ptent = ptep_get_and_clear(&init_mm, addr, pte);
WARN_ON(!pte_none(ptent) && !pte_present(ptent));
} while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
arch_leave_lazy_mmu_mode();
*mask |= PGTBL_PTE_MODIFIED;
}
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
int cleared;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
cleared = pmd_clear_huge(pmd);
if (cleared || pmd_bad(*pmd))
*mask |= PGTBL_PMD_MODIFIED;
if (cleared) {
WARN_ON(next - addr < PMD_SIZE);
continue;
}
if (pmd_none_or_clear_bad(pmd))
continue;
vunmap_pte_range(pmd, addr, next, mask);
cond_resched();
} while (pmd++, addr = next, addr != end);
}
static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
int cleared;
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
cleared = pud_clear_huge(pud);
if (cleared || pud_bad(*pud))
*mask |= PGTBL_PUD_MODIFIED;
if (cleared) {
WARN_ON(next - addr < PUD_SIZE);
continue;
}
if (pud_none_or_clear_bad(pud))
continue;
vunmap_pmd_range(pud, addr, next, mask);
} while (pud++, addr = next, addr != end);
}
static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
p4d_clear_huge(p4d);
if (p4d_bad(*p4d))
*mask |= PGTBL_P4D_MODIFIED;
if (p4d_none_or_clear_bad(p4d))
continue;
vunmap_pud_range(p4d, addr, next, mask);
} while (p4d++, addr = next, addr != end);
}
/*
* vunmap_range_noflush is similar to vunmap_range, but does not
* flush caches or TLBs.
*
* The caller is responsible for calling flush_cache_vmap() before calling
* this function, and flush_tlb_kernel_range after it has returned
* successfully (and before the addresses are expected to cause a page fault
* or be re-mapped for something else, if TLB flushes are being delayed or
* coalesced).
*
* This is an internal function only. Do not use outside mm/.
*/
void __vunmap_range_noflush(unsigned long start, unsigned long end)
{
unsigned long next;
pgd_t *pgd;
unsigned long addr = start;
pgtbl_mod_mask mask = 0;
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_bad(*pgd))
mask |= PGTBL_PGD_MODIFIED;
if (pgd_none_or_clear_bad(pgd))
continue;
vunmap_p4d_range(pgd, addr, next, &mask);
} while (pgd++, addr = next, addr != end);
if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
arch_sync_kernel_mappings(start, end);
}
void vunmap_range_noflush(unsigned long start, unsigned long end)
{
kmsan_vunmap_range_noflush(start, end);
__vunmap_range_noflush(start, end);
}
/**
* vunmap_range - unmap kernel virtual addresses
* @addr: start of the VM area to unmap
* @end: end of the VM area to unmap (non-inclusive)
*
* Clears any present PTEs in the virtual address range, flushes TLBs and
* caches. Any subsequent access to the address before it has been re-mapped
* is a kernel bug.
*/
void vunmap_range(unsigned long addr, unsigned long end)
{
flush_cache_vunmap(addr, end);
vunmap_range_noflush(addr, end);
flush_tlb_kernel_range(addr, end);
}
static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask)
{
int err = 0;
pte_t *pte;
/*
* nr is a running index into the array which helps higher level
* callers keep track of where we're up to.
*/
pte = pte_alloc_kernel_track(pmd, addr, mask);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
struct page *page = pages[*nr];
if (WARN_ON(!pte_none(ptep_get(pte)))) {
err = -EBUSY;
break;
}
if (WARN_ON(!page)) {
err = -ENOMEM;
break;
}
if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
err = -EINVAL;
break;
}
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
(*nr)++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
*mask |= PGTBL_PTE_MODIFIED;
return err;
}
static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
pud = pud_alloc_track(&init_mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
return -ENOMEM;
} while (p4d++, addr = next, addr != end);
return 0;
}
static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages)
{
unsigned long start = addr;
pgd_t *pgd;
unsigned long next;
int err = 0;
int nr = 0;
pgtbl_mod_mask mask = 0;
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_bad(*pgd))
mask |= PGTBL_PGD_MODIFIED;
err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
if (err)
break;
} while (pgd++, addr = next, addr != end);
if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
arch_sync_kernel_mappings(start, end);
return err;
}
/*
* vmap_pages_range_noflush is similar to vmap_pages_range, but does not
* flush caches.
*
* The caller is responsible for calling flush_cache_vmap() after this
* function returns successfully and before the addresses are accessed.
*
* This is an internal function only. Do not use outside mm/.
*/
int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
WARN_ON(page_shift < PAGE_SHIFT);
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
page_shift == PAGE_SHIFT)
return vmap_small_pages_range_noflush(addr, end, prot, pages);
for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
int err;
err = vmap_range_noflush(addr, addr + (1UL << page_shift),
page_to_phys(pages[i]), prot,
page_shift);
if (err)
return err;
addr += 1UL << page_shift;
}
return 0;
}
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
page_shift);
if (ret)
return ret;
return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
}
/**
* vmap_pages_range - map pages to a kernel virtual address
* @addr: start of the VM area to map
* @end: end of the VM area to map (non-inclusive)
* @prot: page protection flags to use
* @pages: pages to map (always PAGE_SIZE pages)
* @page_shift: maximum shift that the pages may be mapped with, @pages must
* be aligned and contiguous up to at least this shift.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int vmap_pages_range(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
int err;
err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
flush_cache_vmap(addr, end);
return err;
}
static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
unsigned long end)
{
might_sleep();
if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
return -EINVAL;
if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
return -EINVAL;
if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
return -EINVAL;
if ((end - start) >> PAGE_SHIFT > totalram_pages())
return -E2BIG;
if (start < (unsigned long)area->addr ||
(void *)end > area->addr + get_vm_area_size(area))
return -ERANGE;
return 0;
}
/**
* vm_area_map_pages - map pages inside given sparse vm_area
* @area: vm_area
* @start: start address inside vm_area
* @end: end address inside vm_area
* @pages: pages to map (always PAGE_SIZE pages)
*/
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages)
{
int err;
err = check_sparse_vm_area(area, start, end);
if (err)
return err;
return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
}
/**
* vm_area_unmap_pages - unmap pages inside given sparse vm_area
* @area: vm_area
* @start: start address inside vm_area
* @end: end address inside vm_area
*/
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
unsigned long end)
{
if (check_sparse_vm_area(area, start, end))
return;
vunmap_range(start, end);
}
int is_vmalloc_or_module_addr(const void *x)
{
/*
* ARM, x86-64 and sparc64 put modules in a special place,
* and fall back on vmalloc() if that fails. Others
* just put it in the vmalloc space.
*/
#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
unsigned long addr = (unsigned long)kasan_reset_tag(x);
if (addr >= MODULES_VADDR && addr < MODULES_END)
return 1;
#endif
return is_vmalloc_addr(x);
}
EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
/*
* Walk a vmap address to the struct page it maps. Huge vmap mappings will
* return the tail page that corresponds to the base page address, which
* matches small vmap mappings.
*/
struct page *vmalloc_to_page(const void *vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
/*
* XXX we might need to change this if we add VIRTUAL_BUG_ON for
* architectures that do not vmalloc module space
*/
VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
if (pgd_none(*pgd))
return NULL;
if (WARN_ON_ONCE(pgd_leaf(*pgd)))
return NULL; /* XXX: no allowance for huge pgd */
if (WARN_ON_ONCE(pgd_bad(*pgd)))
return NULL;
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
return NULL;
if (p4d_leaf(*p4d))
return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
if (WARN_ON_ONCE(p4d_bad(*p4d)))
return NULL;
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return NULL;
if (pud_leaf(*pud))
return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
if (WARN_ON_ONCE(pud_bad(*pud)))
return NULL;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return NULL;
if (pmd_leaf(*pmd))
return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
if (WARN_ON_ONCE(pmd_bad(*pmd)))
return NULL;
ptep = pte_offset_kernel(pmd, addr);
pte = ptep_get(ptep);
if (pte_present(pte))
page = pte_page(pte);
return page;
}
EXPORT_SYMBOL(vmalloc_to_page);
/*
* Map a vmalloc()-space virtual address to the physical page frame number.
*/
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
{
return page_to_pfn(vmalloc_to_page(vmalloc_addr));
}
EXPORT_SYMBOL(vmalloc_to_pfn);
/*** Global kva allocator ***/
#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
static DEFINE_SPINLOCK(free_vmap_area_lock);
static bool vmap_initialized __read_mostly;
/*
* This kmem_cache is used for vmap_area objects. Instead of
* allocating from slab we reuse an object from this cache to
* make things faster. Especially in "no edge" splitting of
* free block.
*/
static struct kmem_cache *vmap_area_cachep;
/*
* This linked list is used in pair with free_vmap_area_root.
* It gives O(1) access to prev/next to perform fast coalescing.
*/
static LIST_HEAD(free_vmap_area_list);
/*
* This augment red-black tree represents the free vmap space.
* All vmap_area objects in this tree are sorted by va->va_start
* address. It is used for allocation and merging when a vmap
* object is released.
*
* Each vmap_area node contains a maximum available free block
* of its sub-tree, right or left. Therefore it is possible to
* find a lowest match of free area.
*/
static struct rb_root free_vmap_area_root = RB_ROOT;
/*
* Preload a CPU with one object for "no edge" split case. The
* aim is to get rid of allocations from the atomic context, thus
* to use more permissive allocation masks.
*/
static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
/*
* This structure defines a single, solid model where a list and
* rb-tree are part of one entity protected by the lock. Nodes are
* sorted in ascending order, thus for O(1) access to left/right
* neighbors a list is used as well as for sequential traversal.
*/
struct rb_list {
struct rb_root root;
struct list_head head;
spinlock_t lock;
};
/*
* A fast size storage contains VAs up to 1M size. A pool consists
* of linked between each other ready to go VAs of certain sizes.
* An index in the pool-array corresponds to number of pages + 1.
*/
#define MAX_VA_SIZE_PAGES 256
struct vmap_pool {
struct list_head head;
unsigned long len;
};
/*
* An effective vmap-node logic. Users make use of nodes instead
* of a global heap. It allows to balance an access and mitigate
* contention.
*/
static struct vmap_node {
/* Simple size segregated storage. */
struct vmap_pool pool[MAX_VA_SIZE_PAGES];
spinlock_t pool_lock;
bool skip_populate;
/* Bookkeeping data of this node. */
struct rb_list busy;
struct rb_list lazy;
/*
* Ready-to-free areas.
*/
struct list_head purge_list;
struct work_struct purge_work;
unsigned long nr_purged;
} single;
/*
* Initial setup consists of one single node, i.e. a balancing
* is fully disabled. Later on, after vmap is initialized these
* parameters are updated based on a system capacity.
*/
static struct vmap_node *vmap_nodes = &single;
static __read_mostly unsigned int nr_vmap_nodes = 1;
static __read_mostly unsigned int vmap_zone_size = 1;
/* A simple iterator over all vmap-nodes. */
#define for_each_vmap_node(vn) \
for ((vn) = &vmap_nodes[0]; \
(vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)
static inline unsigned int
addr_to_node_id(unsigned long addr)
{
return (addr / vmap_zone_size) % nr_vmap_nodes;
}
static inline struct vmap_node *
addr_to_node(unsigned long addr)
{
return &vmap_nodes[addr_to_node_id(addr)];
}
static inline struct vmap_node *
id_to_node(unsigned int id)
{
return &vmap_nodes[id % nr_vmap_nodes];
}
static inline unsigned int
node_to_id(struct vmap_node *node)
{
/* Pointer arithmetic. */
unsigned int id = node - vmap_nodes;
if (likely(id < nr_vmap_nodes))
return id;
WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node);
return 0;
}
/*
* We use the value 0 to represent "no node", that is why
* an encoded value will be the node-id incremented by 1.
* It is always greater then 0. A valid node_id which can
* be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
* is not valid 0 is returned.
*/
static unsigned int
encode_vn_id(unsigned int node_id)
{
/* Can store U8_MAX [0:254] nodes. */
if (node_id < nr_vmap_nodes)
return (node_id + 1) << BITS_PER_BYTE;
/* Warn and no node encoded. */
WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
return 0;
}
/*
* Returns an encoded node-id, the valid range is within
* [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
* returned if extracted data is wrong.
*/
static unsigned int
decode_vn_id(unsigned int val)
{
unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
/* Can store U8_MAX [0:254] nodes. */
if (node_id < nr_vmap_nodes)
return node_id;
/* If it was _not_ zero, warn. */
WARN_ONCE(node_id != UINT_MAX,
"Decode wrong node id (%d)\n", node_id);
return nr_vmap_nodes;
}
static bool
is_vn_id_valid(unsigned int node_id)
{
if (node_id < nr_vmap_nodes)
return true;
return false;
}
static __always_inline unsigned long
va_size(struct vmap_area *va)
{
return (va->va_end - va->va_start);
}
static __always_inline unsigned long
get_subtree_max_size(struct rb_node *node)
{
struct vmap_area *va;
va = rb_entry_safe(node, struct vmap_area, rb_node);
return va ? va->subtree_max_size : 0;
}
RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
static void reclaim_and_purge_vmap_areas(void);
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
static void drain_vmap_area_work(struct work_struct *work);
static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages;
static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr;
unsigned long vmalloc_nr_pages(void)
{
return atomic_long_read(&nr_vmalloc_pages);
}
static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
{
struct rb_node *n = root->rb_node;
addr = (unsigned long)kasan_reset_tag((void *)addr);
while (n) {
struct vmap_area *va;
va = rb_entry(n, struct vmap_area, rb_node);
if (addr < va->va_start)
n = n->rb_left;
else if (addr >= va->va_end)
n = n->rb_right;
else
return va;
}
return NULL;
}
/* Look up the first VA which satisfies addr < va_end, NULL if none. */
static struct vmap_area *
__find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
{
struct vmap_area *va = NULL;
struct rb_node *n = root->rb_node;
addr = (unsigned long)kasan_reset_tag((void *)addr);
while (n) {
struct vmap_area *tmp;
tmp = rb_entry(n, struct vmap_area, rb_node);
if (tmp->va_end > addr) {
va = tmp;
if (tmp->va_start <= addr)
break;
n = n->rb_left;
} else
n = n->rb_right;
}
return va;
}
/*
* Returns a node where a first VA, that satisfies addr < va_end, resides.
* If success, a node is locked. A user is responsible to unlock it when a
* VA is no longer needed to be accessed.
*
* Returns NULL if nothing found.
*/
static struct vmap_node *
find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
{
unsigned long va_start_lowest;
struct vmap_node *vn;
repeat:
va_start_lowest = 0;
for_each_vmap_node(vn) {
spin_lock(&vn->busy.lock);
*va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
if (*va)
if (!va_start_lowest || (*va)->va_start < va_start_lowest)
va_start_lowest = (*va)->va_start;
spin_unlock(&vn->busy.lock);
}
/*
* Check if found VA exists, it might have gone away. In this case we
* repeat the search because a VA has been removed concurrently and we
* need to proceed to the next one, which is a rare case.
*/
if (va_start_lowest) {
vn = addr_to_node(va_start_lowest);
spin_lock(&vn->busy.lock);
*va = __find_vmap_area(va_start_lowest, &vn->busy.root);
if (*va)
return vn;
spin_unlock(&vn->busy.lock);
goto repeat;
}
return NULL;
}
/*
* This function returns back addresses of parent node
* and its left or right link for further processing.
*
* Otherwise NULL is returned. In that case all further
* steps regarding inserting of conflicting overlap range
* have to be declined and actually considered as a bug.
*/
static __always_inline struct rb_node **
find_va_links(struct vmap_area *va,
struct rb_root *root, struct rb_node *from,
struct rb_node **parent)
{
struct vmap_area *tmp_va;
struct rb_node **link;
if (root) {
link = &root->rb_node;
if (unlikely(!*link)) {
*parent = NULL;
return link;
}
} else {
link = &from;
}
/*
* Go to the bottom of the tree. When we hit the last point
* we end up with parent rb_node and correct direction, i name
* it link, where the new va->rb_node will be attached to.
*/
do {
tmp_va = rb_entry(*link, struct vmap_area, rb_node);
/*
* During the traversal we also do some sanity check.
* Trigger the BUG() if there are sides(left/right)
* or full overlaps.
*/
if (va->va_end <= tmp_va->va_start)
link = &(*link)->rb_left;
else if (va->va_start >= tmp_va->va_end)
link = &(*link)->rb_right;
else {
WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
return NULL;
}
} while (*link);
*parent = &tmp_va->rb_node;
return link;
}
static __always_inline struct list_head *
get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
{
struct list_head *list;
if (unlikely(!parent))
/*
* The red-black tree where we try to find VA neighbors
* before merging or inserting is empty, i.e. it means
* there is no free vmap space. Normally it does not
* happen but we handle this case anyway.
*/
return NULL;
list = &rb_entry(parent, struct vmap_area, rb_node)->list;
return (&parent->rb_right == link ? list->next : list);
}
static __always_inline void
__link_va(struct vmap_area *va, struct rb_root *root,
struct rb_node *parent, struct rb_node **link,
struct list_head *head, bool augment)
{
/*
* VA is still not in the list, but we can
* identify its future previous list_head node.
*/
if (likely(parent)) {
head = &rb_entry(parent, struct vmap_area, rb_node)->list;
if (&parent->rb_right != link)
head = head->prev;
}
/* Insert to the rb-tree */
rb_link_node(&va->rb_node, parent, link);
if (augment) {
/*
* Some explanation here. Just perform simple insertion
* to the tree. We do not set va->subtree_max_size to
* its current size before calling rb_insert_augmented().
* It is because we populate the tree from the bottom
* to parent levels when the node _is_ in the tree.
*
* Therefore we set subtree_max_size to zero after insertion,
* to let __augment_tree_propagate_from() puts everything to
* the correct order later on.
*/
rb_insert_augmented(&va->rb_node,
root, &free_vmap_area_rb_augment_cb);
va->subtree_max_size = 0;
} else {
rb_insert_color(&va->rb_node, root);
}
/* Address-sort this list */
list_add(&va->list, head);
}
static __always_inline void
link_va(struct vmap_area *va, struct rb_root *root,
struct rb_node *parent, struct rb_node **link,
struct list_head *head)
{
__link_va(va, root, parent, link, head, false);
}
static __always_inline void
link_va_augment(struct vmap_area *va, struct rb_root *root,
struct rb_node *parent, struct rb_node **link,
struct list_head *head)
{
__link_va(va, root, parent, link, head, true);
}
static __always_inline void
__unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
{
if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
return;
if (augment)
rb_erase_augmented(&va->rb_node,
root, &free_vmap_area_rb_augment_cb);
else
rb_erase(&va->rb_node, root);
list_del_init(&va->list);
RB_CLEAR_NODE(&va->rb_node);
}
static __always_inline void
unlink_va(struct vmap_area *va, struct rb_root *root)
{
__unlink_va(va, root, false);
}
static __always_inline void
unlink_va_augment(struct vmap_area *va, struct rb_root *root)
{
__unlink_va(va, root, true);
}
#if DEBUG_AUGMENT_PROPAGATE_CHECK
/*
* Gets called when remove the node and rotate.
*/
static __always_inline unsigned long
compute_subtree_max_size(struct vmap_area *va)
{
return max3(va_size(va),
get_subtree_max_size(va->rb_node.rb_left),
get_subtree_max_size(va->rb_node.rb_right));
}
static void
augment_tree_propagate_check(void)
{
struct vmap_area *va;
unsigned long computed_size;
list_for_each_entry(va, &free_vmap_area_list, list) {
computed_size = compute_subtree_max_size(va);
if (computed_size != va->subtree_max_size)
pr_emerg("tree is corrupted: %lu, %lu\n",
va_size(va), va->subtree_max_size);
}
}
#endif
/*
* This function populates subtree_max_size from bottom to upper
* levels starting from VA point. The propagation must be done
* when VA size is modified by changing its va_start/va_end. Or
* in case of newly inserting of VA to the tree.
*
* It means that __augment_tree_propagate_from() must be called:
* - After VA has been inserted to the tree(free path);
* - After VA has been shrunk(allocation path);
* - After VA has been increased(merging path).
*
* Please note that, it does not mean that upper parent nodes
* and their subtree_max_size are recalculated all the time up
* to the root node.
*
* 4--8
* /\
* / \
* / \
* 2--2 8--8
*
* For example if we modify the node 4, shrinking it to 2, then
* no any modification is required. If we shrink the node 2 to 1
* its subtree_max_size is updated only, and set to 1. If we shrink
* the node 8 to 6, then its subtree_max_size is set to 6 and parent
* node becomes 4--6.
*/
static __always_inline void
augment_tree_propagate_from(struct vmap_area *va)
{
/*
* Populate the tree from bottom towards the root until
* the calculated maximum available size of checked node
* is equal to its current one.
*/
free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
#if DEBUG_AUGMENT_PROPAGATE_CHECK
augment_tree_propagate_check();
#endif
}
static void
insert_vmap_area(struct vmap_area *va,
struct rb_root *root, struct list_head *head)
{
struct rb_node **link;
struct rb_node *parent;
link = find_va_links(va, root, NULL, &parent);
if (link)
link_va(va, root, parent, link, head);
}
static void
insert_vmap_area_augment(struct vmap_area *va,
struct rb_node *from, struct rb_root *root,
struct list_head *head)
{
struct rb_node **link;
struct rb_node *parent;
if (from)
link = find_va_links(va, NULL, from, &parent);
else
link = find_va_links(va, root, NULL, &parent);
if (link) {
link_va_augment(va, root, parent, link, head);
augment_tree_propagate_from(va);
}
}
/*
* Merge de-allocated chunk of VA memory with previous
* and next free blocks. If coalesce is not done a new
* free area is inserted. If VA has been merged, it is
* freed.
*
* Please note, it can return NULL in case of overlap
* ranges, followed by WARN() report. Despite it is a
* buggy behaviour, a system can be alive and keep
* ongoing.
*/
static __always_inline struct vmap_area *
__merge_or_add_vmap_area(struct vmap_area *va,
struct rb_root *root, struct list_head *head, bool augment)
{
struct vmap_area *sibling;
struct list_head *next;
struct rb_node **link;
struct rb_node *parent;
bool merged = false;
/*
* Find a place in the tree where VA potentially will be
* inserted, unless it is merged with its sibling/siblings.
*/
link = find_va_links(va, root, NULL, &parent);
if (!link)
return NULL;
/*
* Get next node of VA to check if merging can be done.
*/
next = get_va_next_sibling(parent, link);
if (unlikely(next == NULL))
goto insert;
/*
* start end
* | |
* |<------VA------>|<-----Next----->|
* | |
* start end
*/
if (next != head) {
sibling = list_entry(next, struct vmap_area, list);
if (sibling->va_start == va->va_end) {
sibling->va_start = va->va_start;
/* Free vmap_area object. */
kmem_cache_free(vmap_area_cachep, va);
/* Point to the new merged area. */
va = sibling;
merged = true;
}
}
/*
* start end
* | |
* |<-----Prev----->|<------VA------>|
* | |
* start end
*/
if (next->prev != head) {
sibling = list_entry(next->prev, struct vmap_area, list);
if (sibling->va_end == va->va_start) {
/*
* If both neighbors are coalesced, it is important
* to unlink the "next" node first, followed by merging
* with "previous" one. Otherwise the tree might not be
* fully populated if a sibling's augmented value is
* "normalized" because of rotation operations.
*/
if (merged)
__unlink_va(va, root, augment);
sibling->va_end = va->va_end;
/* Free vmap_area object. */
kmem_cache_free(vmap_area_cachep, va);
/* Point to the new merged area. */
va = sibling;
merged = true;
}
}
insert:
if (!merged)
__link_va(va, root, parent, link, head, augment);
return va;
}
static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area *va,
struct rb_root *root, struct list_head *head)
{
return __merge_or_add_vmap_area(va, root, head, false);
}
static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area *va,
struct rb_root *root, struct list_head *head)
{
va = __merge_or_add_vmap_area(va, root, head, true);
if (va)
augment_tree_propagate_from(va);
return va;
}
static __always_inline bool
is_within_this_va(struct vmap_area *va, unsigned long size,
unsigned long align, unsigned long vstart)
{
unsigned long nva_start_addr;
if (va->va_start > vstart)
nva_start_addr = ALIGN(va->va_start, align);
else
nva_start_addr = ALIGN(vstart, align);
/* Can be overflowed due to big size or alignment. */
if (nva_start_addr + size < nva_start_addr ||
nva_start_addr < vstart)
return false;
return (nva_start_addr + size <= va->va_end);
}
/*
* Find the first free block(lowest start address) in the tree,
* that will accomplish the request corresponding to passing
* parameters. Please note, with an alignment bigger than PAGE_SIZE,
* a search length is adjusted to account for worst case alignment
* overhead.
*/
static __always_inline struct vmap_area *
find_vmap_lowest_match(struct rb_root *root, unsigned long size,
unsigned long align, unsigned long vstart, bool adjust_search_size)
{
struct vmap_area *va;
struct rb_node *node;
unsigned long length;
/* Start from the root. */
node = root->rb_node;
/* Adjust the search size for alignment overhead. */
length = adjust_search_size ? size + align - 1 : size;
while (node) {
va = rb_entry(node, struct vmap_area, rb_node);
if (get_subtree_max_size(node->rb_left) >= length &&
vstart < va->va_start) {
node = node->rb_left;
} else {
if (is_within_this_va(va, size, align, vstart))
return va;
/*
* Does not make sense to go deeper towards the right
* sub-tree if it does not have a free block that is
* equal or bigger to the requested search length.
*/
if (get_subtree_max_size(node->rb_right) >= length) {
node = node->rb_right;
continue;
}
/*
* OK. We roll back and find the first right sub-tree,
* that will satisfy the search criteria. It can happen
* due to "vstart" restriction or an alignment overhead
* that is bigger then PAGE_SIZE.
*/
while ((node = rb_parent(node))) {
va = rb_entry(node, struct vmap_area, rb_node);
if (is_within_this_va(va, size, align, vstart))
return va;
if (get_subtree_max_size(node->rb_right) >= length &&
vstart <= va->va_start) {
/*
* Shift the vstart forward. Please note, we update it with
* parent's start address adding "1" because we do not want
* to enter same sub-tree after it has already been checked
* and no suitable free block found there.
*/
vstart = va->va_start + 1;
node = node->rb_right;
break;
}
}
}
}
return NULL;
}
#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
#include <linux/random.h>
static struct vmap_area *
find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
unsigned long align, unsigned long vstart)
{
struct vmap_area *va;
list_for_each_entry(va, head, list) {
if (!is_within_this_va(va, size, align, vstart))
continue;
return va;
}
return NULL;
}
static void
find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
unsigned long size, unsigned long align)
{
struct vmap_area *va_1, *va_2;
unsigned long vstart;
unsigned int rnd;
get_random_bytes(&rnd, sizeof(rnd));
vstart = VMALLOC_START + rnd;
va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
if (va_1 != va_2)
pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
va_1, va_2, vstart);
}
#endif
enum fit_type {
NOTHING_FIT = 0,
FL_FIT_TYPE = 1, /* full fit */
LE_FIT_TYPE = 2, /* left edge fit */
RE_FIT_TYPE = 3, /* right edge fit */
NE_FIT_TYPE = 4 /* no edge fit */
};
static __always_inline enum fit_type
classify_va_fit_type(struct vmap_area *va,
unsigned long nva_start_addr, unsigned long size)
{
enum fit_type type;
/* Check if it is within VA. */
if (nva_start_addr < va->va_start ||
nva_start_addr + size > va->va_end)
return NOTHING_FIT;
/* Now classify. */
if (va->va_start == nva_start_addr) {
if (va->va_end == nva_start_addr + size)
type = FL_FIT_TYPE;
else
type = LE_FIT_TYPE;
} else if (va->va_end == nva_start_addr + size) {
type = RE_FIT_TYPE;
} else {
type = NE_FIT_TYPE;
}
return type;
}
static __always_inline int
va_clip(struct rb_root *root, struct list_head *head,
struct vmap_area *va, unsigned long nva_start_addr,
unsigned long size)
{
struct vmap_area *lva = NULL;
enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
if (type == FL_FIT_TYPE) {
/*
* No need to split VA, it fully fits.
*
* | |
* V NVA V
* |---------------|
*/
unlink_va_augment(va, root);
kmem_cache_free(vmap_area_cachep, va);
} else if (type == LE_FIT_TYPE) {
/*
* Split left edge of fit VA.
*
* | |
* V NVA V R
* |-------|-------|
*/
va->va_start += size;
} else if (type == RE_FIT_TYPE) {
/*
* Split right edge of fit VA.
*
* | |
* L V NVA V
* |-------|-------|
*/
va->va_end = nva_start_addr;
} else if (type == NE_FIT_TYPE) {
/*
* Split no edge of fit VA.
*
* | |
* L V NVA V R
* |---|-------|---|
*/
lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
if (unlikely(!lva)) {
/*
* For percpu allocator we do not do any pre-allocation
* and leave it as it is. The reason is it most likely
* never ends up with NE_FIT_TYPE splitting. In case of
* percpu allocations offsets and sizes are aligned to
* fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
* are its main fitting cases.
*
* There are a few exceptions though, as an example it is
* a first allocation (early boot up) when we have "one"
* big free space that has to be split.
*
* Also we can hit this path in case of regular "vmap"
* allocations, if "this" current CPU was not preloaded.
* See the comment in alloc_vmap_area() why. If so, then
* GFP_NOWAIT is used instead to get an extra object for
* split purpose. That is rare and most time does not
* occur.
*
* What happens if an allocation gets failed. Basically,
* an "overflow" path is triggered to purge lazily freed
* areas to free some memory, then, the "retry" path is
* triggered to repeat one more time. See more details
* in alloc_vmap_area() function.
*/
lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
if (!lva)
return -ENOMEM;
}
/*
* Build the remainder.
*/
lva->va_start = va->va_start;
lva->va_end = nva_start_addr;
/*
* Shrink this VA to remaining size.
*/
va->va_start = nva_start_addr + size;
} else {
return -EINVAL;
}
if (type != FL_FIT_TYPE) {
augment_tree_propagate_from(va);
if (lva) /* type == NE_FIT_TYPE */
insert_vmap_area_augment(lva, &va->rb_node, root, head);
}
return 0;
}
static unsigned long
va_alloc(struct vmap_area *va,
struct rb_root *root, struct list_head *head,
unsigned long size, unsigned long align,
unsigned long vstart, unsigned long vend)
{
unsigned long nva_start_addr;
int ret;
if (va->va_start > vstart)
nva_start_addr = ALIGN(va->va_start, align);
else
nva_start_addr = ALIGN(vstart, align);
/* Check the "vend" restriction. */
if (nva_start_addr + size > vend)
return -ERANGE;
/* Update the free vmap_area. */
ret = va_clip(root, head, va, nva_start_addr, size);
if (WARN_ON_ONCE(ret))
return ret;
return nva_start_addr;
}
/*
* Returns a start address of the newly allocated area, if success.
* Otherwise an error value is returned that indicates failure.
*/
static __always_inline unsigned long
__alloc_vmap_area(struct rb_root *root, struct list_head *head,
unsigned long size, unsigned long align,
unsigned long vstart, unsigned long vend)
{
bool adjust_search_size = true;
unsigned long nva_start_addr;
struct vmap_area *va;
/*
* Do not adjust when:
* a) align <= PAGE_SIZE, because it does not make any sense.
* All blocks(their start addresses) are at least PAGE_SIZE
* aligned anyway;
* b) a short range where a requested size corresponds to exactly
* specified [vstart:vend] interval and an alignment > PAGE_SIZE.
* With adjusted search length an allocation would not succeed.
*/
if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
adjust_search_size = false;
va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
if (unlikely(!va))
return -ENOENT;
nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
if (!IS_ERR_VALUE(nva_start_addr))
find_vmap_lowest_match_check(root, head, size, align);
#endif
return nva_start_addr;
}
/*
* Free a region of KVA allocated by alloc_vmap_area
*/
static void free_vmap_area(struct vmap_area *va)
{
struct vmap_node *vn = addr_to_node(va->va_start);
/*
* Remove from the busy tree/list.
*/
spin_lock(&vn->busy.lock);
unlink_va(va, &vn->busy.root);
spin_unlock(&vn->busy.lock);
/*
* Insert/Merge it back to the free tree/list.
*/
spin_lock(&free_vmap_area_lock);
merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
spin_unlock(&free_vmap_area_lock);
}
static inline void
preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
{
struct vmap_area *va = NULL, *tmp;
/*
* Preload this CPU with one extra vmap_area object. It is used
* when fit type of free area is NE_FIT_TYPE. It guarantees that
* a CPU that does an allocation is preloaded.
*
* We do it in non-atomic context, thus it allows us to use more
* permissive allocation masks to be more stable under low memory
* condition and high memory pressure.
*/
if (!this_cpu_read(ne_fit_preload_node))
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
spin_lock(lock);
tmp = NULL;
if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va))
kmem_cache_free(vmap_area_cachep, va);
}
static struct vmap_pool *
size_to_va_pool(struct vmap_node *vn, unsigned long size)
{
unsigned int idx = (size - 1) / PAGE_SIZE;
if (idx < MAX_VA_SIZE_PAGES)
return &vn->pool[idx];
return NULL;
}
static bool
node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
{
struct vmap_pool *vp;
vp = size_to_va_pool(n, va_size(va));
if (!vp)
return false;
spin_lock(&n->pool_lock);
list_add(&va->list, &vp->head);
WRITE_ONCE(vp->len, vp->len + 1);
spin_unlock(&n->pool_lock);
return true;
}
static struct vmap_area *
node_pool_del_va(struct vmap_node *vn, unsigned long size,
unsigned long align, unsigned long vstart,
unsigned long vend)
{
struct vmap_area *va = NULL;
struct vmap_pool *vp;
int err = 0;
vp = size_to_va_pool(vn, size);
if (!vp || list_empty(&vp->head))
return NULL;
spin_lock(&vn->pool_lock);
if (!list_empty(&vp->head)) {
va = list_first_entry(&vp->head, struct vmap_area, list);
if (IS_ALIGNED(va->va_start, align)) {
/*
* Do some sanity check and emit a warning
* if one of below checks detects an error.
*/
err |= (va_size(va) != size);
err |= (va->va_start < vstart);
err |= (va->va_end > vend);
if (!WARN_ON_ONCE(err)) {
list_del_init(&va->list);
WRITE_ONCE(vp->len, vp->len - 1);
} else {
va = NULL;
}
} else {
list_move_tail(&va->list, &vp->head);
va = NULL;
}
}
spin_unlock(&vn->pool_lock);
return va;
}
static struct vmap_area *
node_alloc(unsigned long size, unsigned long align,
unsigned long vstart, unsigned long vend,
unsigned long *addr, unsigned int *vn_id)
{
struct vmap_area *va;
*vn_id = 0;
*addr = -EINVAL;
/*
* Fallback to a global heap if not vmalloc or there
* is only one node.
*/
if (vstart != VMALLOC_START || vend != VMALLOC_END ||
nr_vmap_nodes == 1)
return NULL;
*vn_id = raw_smp_processor_id() % nr_vmap_nodes;
va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
*vn_id = encode_vn_id(*vn_id);
if (va)
*addr = va->va_start;
return va;
}
static inline void setup_vmalloc_vm(struct vm_struct *vm,
struct vmap_area *va, unsigned long flags, const void *caller)
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
vm->size = vm->requested_size = va_size(va);
vm->caller = caller;
va->vm = vm;
}
/*
* Allocate a region of KVA of the specified size and alignment, within the
* vstart and vend. If vm is passed in, the two will also be bound.
*/
static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long align,
unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask,
unsigned long va_flags, struct vm_struct *vm)
{
struct vmap_node *vn;
struct vmap_area *va;
unsigned long freed;
unsigned long addr;
unsigned int vn_id;
int purged = 0;
int ret;
if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
return ERR_PTR(-EINVAL);
if (unlikely(!vmap_initialized))
return ERR_PTR(-EBUSY);
/* Only reclaim behaviour flags are relevant. */
gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
might_sleep();
/*
* If a VA is obtained from a global heap(if it fails here)
* it is anyway marked with this "vn_id" so it is returned
* to this pool's node later. Such way gives a possibility
* to populate pools based on users demand.
*
* On success a ready to go VA is returned.
*/
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
if (!va) {
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
/*
* Only scan the relevant parts containing pointers to other objects
* to avoid false negatives.
*/
kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
}
retry:
if (IS_ERR_VALUE(addr)) {
preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
size, align, vstart, vend);
spin_unlock(&free_vmap_area_lock);
/*
* This is not a fast path. Check if yielding is needed. This
* is the only reschedule point in the vmalloc() path.
*/
cond_resched();
}
trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
/*
* If an allocation fails, the error value is
* returned. Therefore trigger the overflow path.
*/
if (IS_ERR_VALUE(addr))
goto overflow;
va->va_start = addr;
va->va_end = addr + size;
va->vm = NULL;
va->flags = (va_flags | vn_id);
if (vm) {
vm->addr = (void *)va->va_start;
vm->size = va_size(va);
va->vm = vm;
}
vn = addr_to_node(va->va_start);
spin_lock(&vn->busy.lock);
insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
spin_unlock(&vn->busy.lock);
BUG_ON(!IS_ALIGNED(va->va_start, align));
BUG_ON(va->va_start < vstart);
BUG_ON(va->va_end > vend);
ret = kasan_populate_vmalloc(addr, size, gfp_mask);
if (ret) {
free_vmap_area(va);
return ERR_PTR(ret);
}
return va;
overflow:
if (!purged) {
reclaim_and_purge_vmap_areas();
purged = 1;
goto retry;
}
freed = 0;
blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
if (freed > 0) {
purged = 0;
goto retry;
}
if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n",
size, vstart, vend);
kmem_cache_free(vmap_area_cachep, va);
return ERR_PTR(-EBUSY);
}
int register_vmap_purge_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&vmap_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
int unregister_vmap_purge_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
/*
* lazy_max_pages is the maximum amount of virtual address space we gather up
* before attempting to purge with a TLB flush.
*
* There is a tradeoff here: a larger number will cover more kernel page tables
* and take slightly longer to purge, but it will linearly reduce the number of
* global TLB flushes that must be performed. It would seem natural to scale
* this number up linearly with the number of CPUs (because vmapping activity
* could also scale linearly with the number of CPUs), however it is likely
* that in practice, workloads might be constrained in other ways that mean
* vmap activity will not scale linearly with CPUs. Also, I want to be
* conservative and not introduce a big latency on huge systems, so go with
* a less aggressive log scale. It will still be an improvement over the old
* code, and it will be simple to change the scale factor if we find that it
* becomes a problem on bigger systems.
*/
static unsigned long lazy_max_pages(void)
{
unsigned int log;
log = fls(num_online_cpus());
return log * (32UL * 1024 * 1024 / PAGE_SIZE);
}
/*
* Serialize vmap purging. There is no actual critical section protected
* by this lock, but we want to avoid concurrent calls for performance
* reasons and to make the pcpu_get_vm_areas more deterministic.
*/
static DEFINE_MUTEX(vmap_purge_lock);
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
static void
reclaim_list_global(struct list_head *head)
{
struct vmap_area *va, *n;
if (list_empty(head))
return;
spin_lock(&free_vmap_area_lock);
list_for_each_entry_safe(va, n, head, list)
merge_or_add_vmap_area_augment(va,
&free_vmap_area_root, &free_vmap_area_list);
spin_unlock(&free_vmap_area_lock);
}
static void
decay_va_pool_node(struct vmap_node *vn, bool full_decay)
{
LIST_HEAD(decay_list);
struct rb_root decay_root = RB_ROOT;
struct vmap_area *va, *nva;
unsigned long n_decay, pool_len;
int i;
for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
LIST_HEAD(tmp_list);
if (list_empty(&vn->pool[i].head))
continue;
/* Detach the pool, so no-one can access it. */
spin_lock(&vn->pool_lock);
list_replace_init(&vn->pool[i].head, &tmp_list);
spin_unlock(&vn->pool_lock);
pool_len = n_decay = vn->pool[i].len;
WRITE_ONCE(vn->pool[i].len, 0);
/* Decay a pool by ~25% out of left objects. */
if (!full_decay)
n_decay >>= 2;
pool_len -= n_decay;
list_for_each_entry_safe(va, nva, &tmp_list, list) {
if (!n_decay--)
break;
list_del_init(&va->list);
merge_or_add_vmap_area(va, &decay_root, &decay_list);
}
/*
* Attach the pool back if it has been partly decayed.
* Please note, it is supposed that nobody(other contexts)
* can populate the pool therefore a simple list replace
* operation takes place here.
*/
if (!list_empty(&tmp_list)) {
spin_lock(&vn->pool_lock);
list_replace_init(&tmp_list, &vn->pool[i].head);
WRITE_ONCE(vn->pool[i].len, pool_len);
spin_unlock(&vn->pool_lock);
}
}
reclaim_list_global(&decay_list);
}
static void
kasan_release_vmalloc_node(struct vmap_node *vn)
{
struct vmap_area *va;
unsigned long start, end;
start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
list_for_each_entry(va, &vn->purge_list, list) {
if (is_vmalloc_or_module_addr((void *) va->va_start))
kasan_release_vmalloc(va->va_start, va->va_end,
va->va_start, va->va_end,
KASAN_VMALLOC_PAGE_RANGE);
}
kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
}
static void purge_vmap_node(struct work_struct *work)
{
struct vmap_node *vn = container_of(work,
struct vmap_node, purge_work);
unsigned long nr_purged_pages = 0;
struct vmap_area *va, *n_va;
LIST_HEAD(local_list);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_release_vmalloc_node(vn);
vn->nr_purged = 0;
list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
unsigned long nr = va_size(va) >> PAGE_SHIFT;
unsigned int vn_id = decode_vn_id(va->flags);
list_del_init(&va->list);
nr_purged_pages += nr;
vn->nr_purged++;
if (is_vn_id_valid(vn_id) && !vn->skip_populate)
if (node_pool_add_va(vn, va))
continue;
/* Go back to global. */
list_add(&va->list, &local_list);
}
atomic_long_sub(nr_purged_pages, &vmap_lazy_nr);
reclaim_list_global(&local_list);
}
/*
* Purges all lazily-freed vmap areas.
*/
static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
bool full_pool_decay)
{
unsigned long nr_purged_areas = 0;
unsigned int nr_purge_helpers;
static cpumask_t purge_nodes;
unsigned int nr_purge_nodes;
struct vmap_node *vn;
int i;
lockdep_assert_held(&vmap_purge_lock);
/*
* Use cpumask to mark which node has to be processed.
*/
purge_nodes = CPU_MASK_NONE;
for_each_vmap_node(vn) {
INIT_LIST_HEAD(&vn->purge_list);
vn->skip_populate = full_pool_decay;
decay_va_pool_node(vn, full_pool_decay);
if (RB_EMPTY_ROOT(&vn->lazy.root))
continue;
spin_lock(&vn->lazy.lock);
WRITE_ONCE(vn->lazy.root.rb_node, NULL);
list_replace_init(&vn->lazy.head, &vn->purge_list);
spin_unlock(&vn->lazy.lock);
start = min(start, list_first_entry(&vn->purge_list,
struct vmap_area, list)->va_start);
end = max(end, list_last_entry(&vn->purge_list,
struct vmap_area, list)->va_end);
cpumask_set_cpu(node_to_id(vn), &purge_nodes);
}
nr_purge_nodes = cpumask_weight(&purge_nodes);
if (nr_purge_nodes > 0) {
flush_tlb_kernel_range(start, end);
/* One extra worker is per a lazy_max_pages() full set minus one. */
nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
for_each_cpu(i, &purge_nodes) {
vn = &vmap_nodes[i];
if (nr_purge_helpers > 0) {
INIT_WORK(&vn->purge_work, purge_vmap_node);
if (cpumask_test_cpu(i, cpu_online_mask))
schedule_work_on(i, &vn->purge_work);
else
schedule_work(&vn->purge_work);
nr_purge_helpers--;
} else {
vn->purge_work.func = NULL;
purge_vmap_node(&vn->purge_work);
nr_purged_areas += vn->nr_purged;
}
}
for_each_cpu(i, &purge_nodes) {
vn = &vmap_nodes[i];
if (vn->purge_work.func) {
flush_work(&vn->purge_work);
nr_purged_areas += vn->nr_purged;
}
}
}
trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
return nr_purged_areas > 0;
}
/*
* Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
*/
static void reclaim_and_purge_vmap_areas(void)
{
mutex_lock(&vmap_purge_lock);
purge_fragmented_blocks_allcpus();
__purge_vmap_area_lazy(ULONG_MAX, 0, true);
mutex_unlock(&vmap_purge_lock);
}
static void drain_vmap_area_work(struct work_struct *work)
{
mutex_lock(&vmap_purge_lock);
__purge_vmap_area_lazy(ULONG_MAX, 0, false);
mutex_unlock(&vmap_purge_lock);
}
/*
* Free a vmap area, caller ensuring that the area has been unmapped,
* unlinked and flush_cache_vunmap had been called for the correct
* range previously.
*/
static void free_vmap_area_noflush(struct vmap_area *va)
{
unsigned long nr_lazy_max = lazy_max_pages();
unsigned long va_start = va->va_start;
unsigned int vn_id = decode_vn_id(va->flags);
struct vmap_node *vn;
unsigned long nr_lazy;
if (WARN_ON_ONCE(!list_empty(&va->list)))
return;
nr_lazy = atomic_long_add_return_relaxed(va_size(va) >> PAGE_SHIFT,
&vmap_lazy_nr);
/*
* If it was request by a certain node we would like to
* return it to that node, i.e. its pool for later reuse.
*/
vn = is_vn_id_valid(vn_id) ?
id_to_node(vn_id):addr_to_node(va->va_start);
spin_lock(&vn->lazy.lock);
insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
spin_unlock(&vn->lazy.lock);
trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
/* After this point, we may free va at any time */
if (unlikely(nr_lazy > nr_lazy_max))
schedule_work(&drain_vmap_work);
}
/*
* Free and unmap a vmap area
*/
static void free_unmap_vmap_area(struct vmap_area *va)
{
flush_cache_vunmap(va->va_start, va->va_end);
vunmap_range_noflush(va->va_start, va->va_end);
if (debug_pagealloc_enabled_static())
flush_tlb_kernel_range(va->va_start, va->va_end);
free_vmap_area_noflush(va);
}
struct vmap_area *find_vmap_area(unsigned long addr)
{
struct vmap_node *vn;
struct vmap_area *va;
int i, j;
if (unlikely(!vmap_initialized))
return NULL;
/*
* An addr_to_node_id(addr) converts an address to a node index
* where a VA is located. If VA spans several zones and passed
* addr is not the same as va->va_start, what is not common, we
* may need to scan extra nodes. See an example:
*
* <----va---->
* -|-----|-----|-----|-----|-
* 1 2 0 1
*
* VA resides in node 1 whereas it spans 1, 2 an 0. If passed
* addr is within 2 or 0 nodes we should do extra work.
*/
i = j = addr_to_node_id(addr);
do {
vn = &vmap_nodes[i];
spin_lock(&vn->busy.lock);
va = __find_vmap_area(addr, &vn->busy.root);
spin_unlock(&vn->busy.lock);
if (va)
return va;
} while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j);
return NULL;
}
static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
{
struct vmap_node *vn;
struct vmap_area *va;
int i, j;
/*
* Check the comment in the find_vmap_area() about the loop.
*/
i = j = addr_to_node_id(addr);
do {
vn = &vmap_nodes[i];
spin_lock(&vn->busy.lock);
va = __find_vmap_area(addr, &vn->busy.root);
if (va)
unlink_va(va, &vn->busy.root);
spin_unlock(&vn->busy.lock);
if (va)
return va;
} while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j);
return NULL;
}
/*** Per cpu kva allocator ***/
/*
* vmap space is limited especially on 32 bit architectures. Ensure there is
* room for at least 16 percpu vmap blocks per CPU.
*/
/*
* If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
* to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
* instead (we just need a rough idea)
*/
#if BITS_PER_LONG == 32
#define VMALLOC_SPACE (128UL*1024*1024)
#else
#define VMALLOC_SPACE (128UL*1024*1024*1024)
#endif
#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
#define VMAP_BBMAP_BITS \
VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
/*
* Purge threshold to prevent overeager purging of fragmented blocks for
* regular operations: Purge if vb->free is less than 1/4 of the capacity.
*/
#define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
#define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
#define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
#define VMAP_FLAGS_MASK 0x3
struct vmap_block_queue {
spinlock_t lock;
struct list_head free;
/*
* An xarray requires an extra memory dynamically to
* be allocated. If it is an issue, we can use rb-tree
* instead.
*/
struct xarray vmap_blocks;
};
struct vmap_block {
spinlock_t lock;
struct vmap_area *va;
unsigned long free, dirty;
DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
unsigned long dirty_min, dirty_max; /*< dirty range */
struct list_head free_list;
struct rcu_head rcu_head;
struct list_head purge;
unsigned int cpu;
};
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
/*
* In order to fast access to any "vmap_block" associated with a
* specific address, we use a hash.
*
* A per-cpu vmap_block_queue is used in both ways, to serialize
* an access to free block chains among CPUs(alloc path) and it
* also acts as a vmap_block hash(alloc/free paths). It means we
* overload it, since we already have the per-cpu array which is
* used as a hash table. When used as a hash a 'cpu' passed to
* per_cpu() is not actually a CPU but rather a hash index.
*
* A hash function is addr_to_vb_xa() which hashes any address
* to a specific index(in a hash) it belongs to. This then uses a
* per_cpu() macro to access an array with generated index.
*
* An example:
*
* CPU_1 CPU_2 CPU_0
* | | |
* V V V
* 0 10 20 30 40 50 60
* |------|------|------|------|------|------|...<vmap address space>
* CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
*
* - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
* it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
*
* - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
* it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
*
* - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
* it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
*
* This technique almost always avoids lock contention on insert/remove,
* however xarray spinlocks protect against any contention that remains.
*/
static struct xarray *
addr_to_vb_xa(unsigned long addr)
{
int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
/*
* Please note, nr_cpu_ids points on a highest set
* possible bit, i.e. we never invoke cpumask_next()
* if an index points on it which is nr_cpu_ids - 1.
*/
if (!cpu_possible(index))
index = cpumask_next(index, cpu_possible_mask);
return &per_cpu(vmap_block_queue, index).vmap_blocks;
}
/*
* We should probably have a fallback mechanism to allocate virtual memory
* out of partially filled vmap blocks. However vmap block sizing should be
* fairly reasonable according to the vmalloc size, so it shouldn't be a
* big problem.
*/
static unsigned long addr_to_vb_idx(unsigned long addr)
{
addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
addr /= VMAP_BLOCK_SIZE;
return addr;
}
static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
{
unsigned long addr;
addr = va_start + (pages_off << PAGE_SHIFT);
BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
return (void *)addr;
}
/**
* new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
* block. Of course pages number can't exceed VMAP_BBMAP_BITS
* @order: how many 2^order pages should be occupied in newly allocated block
* @gfp_mask: flags for the page level allocator
*
* Return: virtual address in a newly allocated block or ERR_PTR(-errno)
*/
static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
{
struct vmap_block_queue *vbq;
struct vmap_block *vb;
struct vmap_area *va;
struct xarray *xa;
unsigned long vb_idx;
int node, err;
void *vaddr;
node = numa_node_id();
vb = kmalloc_node(sizeof(struct vmap_block),
gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!vb))
return ERR_PTR(-ENOMEM);
va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
VMALLOC_START, VMALLOC_END,
node, gfp_mask,
VMAP_RAM|VMAP_BLOCK, NULL);
if (IS_ERR(va)) {
kfree(vb);
return ERR_CAST(va);
}
vaddr = vmap_block_vaddr(va->va_start, 0);
spin_lock_init(&vb->lock);
vb->va = va;
/* At least something should be left free */
BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
vb->free = VMAP_BBMAP_BITS - (1UL << order);
vb->dirty = 0;
vb->dirty_min = VMAP_BBMAP_BITS;
vb->dirty_max = 0;
bitmap_set(vb->used_map, 0, (1UL << order));
INIT_LIST_HEAD(&vb->free_list);
vb->cpu = raw_smp_processor_id();
xa = addr_to_vb_xa(va->va_start);
vb_idx = addr_to_vb_idx(va->va_start);
err = xa_insert(xa, vb_idx, vb, gfp_mask);
if (err) {
kfree(vb);
free_vmap_area(va);
return ERR_PTR(err);
}
/*
* list_add_tail_rcu could happened in another core
* rather than vb->cpu due to task migration, which
* is safe as list_add_tail_rcu will ensure the list's
* integrity together with list_for_each_rcu from read
* side.
*/
vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
return vaddr;
}
static void free_vmap_block(struct vmap_block *vb)
{
struct vmap_node *vn;
struct vmap_block *tmp;
struct xarray *xa;
xa = addr_to_vb_xa(vb->va->va_start);
tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
BUG_ON(tmp != vb);
vn = addr_to_node(vb->va->va_start);
spin_lock(&vn->busy.lock);
unlink_va(vb->va, &vn->busy.root);
spin_unlock(&vn->busy.lock);
free_vmap_area_noflush(vb->va);
kfree_rcu(vb, rcu_head);
}
static bool purge_fragmented_block(struct vmap_block *vb,
struct list_head *purge_list, bool force_purge)
{
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
vb->dirty == VMAP_BBMAP_BITS)
return false;
/* Don't overeagerly purge usable blocks unless requested */
if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
return false;
/* prevent further allocs after releasing lock */
WRITE_ONCE(vb->free, 0);
/* prevent purging it again */
WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
vb->dirty_min = 0;
vb->dirty_max = VMAP_BBMAP_BITS;
spin_lock(&vbq->lock);
list_del_rcu(&vb->free_list);
spin_unlock(&vbq->lock);
list_add_tail(&vb->purge, purge_list);
return true;
}
static void free_purged_blocks(struct list_head *purge_list)
{
struct vmap_block *vb, *n_vb;
list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
list_del(&vb->purge);
free_vmap_block(vb);
}
}
static void purge_fragmented_blocks(int cpu)
{
LIST_HEAD(purge);
struct vmap_block *vb;
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long free = READ_ONCE(vb->free);
unsigned long dirty = READ_ONCE(vb->dirty);
if (free + dirty != VMAP_BBMAP_BITS ||
dirty == VMAP_BBMAP_BITS)
continue;
spin_lock(&vb->lock);
purge_fragmented_block(vb, &purge, true);
spin_unlock(&vb->lock);
}
rcu_read_unlock();
free_purged_blocks(&purge);
}
static void purge_fragmented_blocks_allcpus(void)
{
int cpu;
for_each_possible_cpu(cpu)
purge_fragmented_blocks(cpu);
}
static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
struct vmap_block_queue *vbq;
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
if (WARN_ON(size == 0)) {
/*
* Allocating 0 bytes isn't what caller wants since
* get_order(0) returns funny result. Just warn and terminate
* early.
*/
return ERR_PTR(-EINVAL);
}
order = get_order(size);
rcu_read_lock();
vbq = raw_cpu_ptr(&vmap_block_queue);
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
if (READ_ONCE(vb->free) < (1UL << order))
continue;
spin_lock(&vb->lock);
if (vb->free < (1UL << order)) {
spin_unlock(&vb->lock);
continue;
}
pages_off = VMAP_BBMAP_BITS - vb->free;
vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
WRITE_ONCE(vb->free, vb->free - (1UL << order));
bitmap_set(vb->used_map, pages_off, (1UL << order));
if (vb->free == 0) {
spin_lock(&vbq->lock);
list_del_rcu(&vb->free_list);
spin_unlock(&vbq->lock);
}
spin_unlock(&vb->lock);
break;
}
rcu_read_unlock();
/* Allocate new block if nothing was found */
if (!vaddr)
vaddr = new_vmap_block(order, gfp_mask);
return vaddr;
}
static void vb_free(unsigned long addr, unsigned long size)
{
unsigned long offset;
unsigned int order;
struct vmap_block *vb;
struct xarray *xa;
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
flush_cache_vunmap(addr, addr + size);
order = get_order(size);
offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
xa = addr_to_vb_xa(addr);
vb = xa_load(xa, addr_to_vb_idx(addr));
spin_lock(&vb->lock);
bitmap_clear(vb->used_map, offset, (1UL << order));
spin_unlock(&vb->lock);
vunmap_range_noflush(addr, addr + size);
if (debug_pagealloc_enabled_static())
flush_tlb_kernel_range(addr, addr + size);
spin_lock(&vb->lock);
/* Expand the not yet TLB flushed dirty range */
vb->dirty_min = min(vb->dirty_min, offset);
vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
if (vb->dirty == VMAP_BBMAP_BITS) {
BUG_ON(vb->free);
spin_unlock(&vb->lock);
free_vmap_block(vb);
} else
spin_unlock(&vb->lock);
}
static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
{
LIST_HEAD(purge_list);
int cpu;
if (unlikely(!vmap_initialized))
return;
mutex_lock(&vmap_purge_lock);
for_each_possible_cpu(cpu) {
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
struct vmap_block *vb;
unsigned long idx;
rcu_read_lock();
xa_for_each(&vbq->vmap_blocks, idx, vb) {
spin_lock(&vb->lock);
/*
* Try to purge a fragmented block first. If it's
* not purgeable, check whether there is dirty
* space to be flushed.
*/
if (!purge_fragmented_block(vb, &purge_list, false) &&
vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
unsigned long va_start = vb->va->va_start;
unsigned long s, e;
s = va_start + (vb->dirty_min << PAGE_SHIFT);
e = va_start + (vb->dirty_max << PAGE_SHIFT);
start = min(s, start);
end = max(e, end);
/* Prevent that this is flushed again */
vb->dirty_min = VMAP_BBMAP_BITS;
vb->dirty_max = 0;
flush = 1;
}
spin_unlock(&vb->lock);
}
rcu_read_unlock();
}
free_purged_blocks(&purge_list);
if (!__purge_vmap_area_lazy(start, end, false) && flush)
flush_tlb_kernel_range(start, end);
mutex_unlock(&vmap_purge_lock);
}
/**
* vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
*
* The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
* to amortize TLB flushing overheads. What this means is that any page you
* have now, may, in a former life, have been mapped into kernel virtual
* address by the vmap layer and so there might be some CPUs with TLB entries
* still referencing that page (additional to the regular 1:1 kernel mapping).
*
* vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
* be sure that none of the pages we have control over will have any aliases
* from the vmap layer.
*/
void vm_unmap_aliases(void)
{
_vm_unmap_aliases(ULONG_MAX, 0, 0);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
/**
* vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
* @mem: the pointer returned by vm_map_ram
* @count: the count passed to that vm_map_ram call (cannot unmap partial)
*/
void vm_unmap_ram(const void *mem, unsigned int count)
{
unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr = (unsigned long)kasan_reset_tag(mem);
struct vmap_area *va;
might_sleep();
BUG_ON(!addr);
BUG_ON(addr < VMALLOC_START);
BUG_ON(addr > VMALLOC_END);
BUG_ON(!PAGE_ALIGNED(addr));
kasan_poison_vmalloc(mem, size);
if (likely(count <= VMAP_MAX_ALLOC)) {
debug_check_no_locks_freed(mem, size);
vb_free(addr, size);
return;
}
va = find_unlink_vmap_area(addr);
if (WARN_ON_ONCE(!va))
return;
debug_check_no_locks_freed((void *)va->va_start, va_size(va));
free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);
/**
* vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
* @pages: an array of pointers to the pages to be mapped
* @count: number of pages
* @node: prefer to allocate data structures on this node
*
* If you use this function for less than VMAP_MAX_ALLOC pages, it could be
* faster than vmap so it's good. But if you mix long-life and short-life
* objects with vm_map_ram(), it could consume lots of address space through
* fragmentation (especially on a 32bit machine). You could see failures in
* the end. Please use this function for short-lived objects.
*
* Returns: a pointer to the address that has been mapped, or %NULL on failure
*/
void *vm_map_ram(struct page **pages, unsigned int count, int node)
{
unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr;
void *mem;
if (likely(count <= VMAP_MAX_ALLOC)) {
mem = vb_alloc(size, GFP_KERNEL);
if (IS_ERR(mem))
return NULL;
addr = (unsigned long)mem;
} else {
struct vmap_area *va;
va = alloc_vmap_area(size, PAGE_SIZE,
VMALLOC_START, VMALLOC_END,
node, GFP_KERNEL, VMAP_RAM,
NULL);
if (IS_ERR(va))
return NULL;
addr = va->va_start;
mem = (void *)addr;
}
if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
pages, PAGE_SHIFT) < 0) {
vm_unmap_ram(mem, count);
return NULL;
}
/*
* Mark the pages as accessible, now that they are mapped.
* With hardware tag-based KASAN, marking is skipped for
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
return mem;
}
EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata;
static inline unsigned int vm_area_page_order(struct vm_struct *vm)
{
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
return vm->page_order;
#else
return 0;
#endif
}
unsigned int get_vm_area_page_order(struct vm_struct *vm)
{
return vm_area_page_order(vm);
}
static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
{
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
vm->page_order = order;
#else
BUG_ON(order != 0);
#endif
}
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add
*
* This function is used to add fixed kernel vm area to vmlist before
* vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
* should contain proper values and the other fields should be zero.
*
* DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
*/
void __init vm_area_add_early(struct vm_struct *vm)
{
struct vm_struct *tmp, **p;
BUG_ON(vmap_initialized);
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
if (tmp->addr >= vm->addr) {
BUG_ON(tmp->addr < vm->addr + vm->size);
break;
} else
BUG_ON(tmp->addr + tmp->size > vm->addr);
}
vm->next = *p;
*p = vm;
}
/**
* vm_area_register_early - register vmap area early during boot
* @vm: vm_struct to register
* @align: requested alignment
*
* This function is used to register kernel vm area before
* vmalloc_init() is called. @vm->size and @vm->flags should contain
* proper values on entry and other fields should be zero. On return,
* vm->addr contains the allocated address.
*
* DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
*/
void __init vm_area_register_early(struct vm_struct *vm, size_t align)
{
unsigned long addr = ALIGN(VMALLOC_START, align);
struct vm_struct *cur, **p;
BUG_ON(vmap_initialized);
for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
if ((unsigned long)cur->addr - addr >= vm->size)
break;
addr = ALIGN((unsigned long)cur->addr + cur->size, align);
}
BUG_ON(addr > VMALLOC_END - vm->size);
vm->addr = (void *)addr;
vm->next = *p;
*p = vm;
kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
}
static void clear_vm_uninitialized_flag(struct vm_struct *vm)
{
/*
* Before removing VM_UNINITIALIZED,
* we should make sure that vm has proper values.
* Pair with smp_rmb() in vread_iter() and vmalloc_info_show().
*/
smp_wmb();
vm->flags &= ~VM_UNINITIALIZED;
}
struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long shift, unsigned long flags,
unsigned long start, unsigned long end, int node,
gfp_t gfp_mask, const void *caller)
{
struct vmap_area *va;
struct vm_struct *area;
unsigned long requested_size = size;
BUG_ON(in_interrupt());
size = ALIGN(size, 1ul << shift);
if (unlikely(!size))
return NULL;
if (flags & VM_IOREMAP)
align = 1ul << clamp_t(int, get_count_order_long(size),
PAGE_SHIFT, IOREMAP_MAX_ORDER);
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!area))
return NULL;
if (!(flags & VM_NO_GUARD))
size += PAGE_SIZE;
area->flags = flags;
area->caller = caller;
area->requested_size = requested_size;
va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
if (IS_ERR(va)) {
kfree(area);
return NULL;
}
/*
* Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
* best-effort approach, as they can be mapped outside of vmalloc code.
* For VM_ALLOC mappings, the pages are marked as accessible after
* getting mapped in __vmalloc_node_range().
* With hardware tag-based KASAN, marking is skipped for
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
if (!(flags & VM_ALLOC))
area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
KASAN_VMALLOC_PROT_NORMAL);
return area;
}
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
const void *caller)
{
return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
NUMA_NO_NODE, GFP_KERNEL, caller);
}
/**
* get_vm_area - reserve a contiguous kernel virtual area
* @size: size of the area
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
*
* Search an area of @size in the kernel virtual mapping area,
* and reserved it for out purposes. Returns the area descriptor
* on success or %NULL on failure.
*
* Return: the area descriptor on success or %NULL on failure.
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller);
}
/**
* find_vm_area - find a continuous kernel virtual area
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and return it.
* It is up to the caller to do all required locking to keep the returned
* pointer valid.
*
* Return: the area descriptor on success or %NULL on failure.
*/
struct vm_struct *find_vm_area(const void *addr)
{
struct vmap_area *va;
va = find_vmap_area((unsigned long)addr);
if (!va)
return NULL;
return va->vm;
}
/**
* remove_vm_area - find and remove a continuous kernel virtual area
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe
* on SMP machines, except for its size or flags.
*
* Return: the area descriptor on success or %NULL on failure.
*/
struct vm_struct *remove_vm_area(const void *addr)
{
struct vmap_area *va;
struct vm_struct *vm;
might_sleep();
if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
addr))
return NULL;
va = find_unlink_vmap_area((unsigned long)addr);
if (!va || !va->vm)
return NULL;
vm = va->vm;
debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
kasan_free_module_shadow(vm);
kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
free_unmap_vmap_area(va);
return vm;
}
static inline void set_area_direct_map(const struct vm_struct *area,
int (*set_direct_map)(struct page *page))
{
int i;
/* HUGE_VMALLOC passes small pages to set_direct_map */
for (i = 0; i < area->nr_pages; i++)
if (page_address(area->pages[i]))
set_direct_map(area->pages[i]);
}
/*
* Flush the vm mapping and reset the direct map.
*/
static void vm_reset_perms(struct vm_struct *area)
{
unsigned long start = ULONG_MAX, end = 0;
unsigned int page_order = vm_area_page_order(area);
int flush_dmap = 0;
int i;
/*
* Find the start and end range of the direct mappings to make sure that
* the vm_unmap_aliases() flush includes the direct map.
*/
for (i = 0; i < area->nr_pages; i += 1U << page_order) {
unsigned long addr = (unsigned long)page_address(area->pages[i]);
if (addr) {
unsigned long page_size;
page_size = PAGE_SIZE << page_order;
start = min(addr, start);
end = max(addr + page_size, end);
flush_dmap = 1;
}
}
/*
* Set direct map to something invalid so that it won't be cached if
* there are any accesses after the TLB flush, then flush the TLB and
* reset the direct map permissions to the default.
*/
set_area_direct_map(area, set_direct_map_invalid_noflush);
_vm_unmap_aliases(start, end, flush_dmap);
set_area_direct_map(area, set_direct_map_default_noflush);
}
static void delayed_vfree_work(struct work_struct *w)
{
struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
struct llist_node *t, *llnode;
llist_for_each_safe(llnode, t, llist_del_all(&p->list))
vfree(llnode);
}
/**
* vfree_atomic - release memory allocated by vmalloc()
* @addr: memory base address
*
* This one is just like vfree() but can be called in any atomic context
* except NMIs.
*/
void vfree_atomic(const void *addr)
{
struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
BUG_ON(in_nmi());
kmemleak_free(addr);
/*
* Use raw_cpu_ptr() because this can be called from preemptible
* context. Preemption is absolutely fine here, because the llist_add()
* implementation is lockless, so it works even if we are adding to
* another cpu's list. schedule_work() should be fine with this too.
*/
if (addr && llist_add((struct llist_node *)addr, &p->list))
schedule_work(&p->wq);
}
/**
* vfree - Release memory allocated by vmalloc()
* @addr: Memory base address
*
* Free the virtually continuous memory area starting at @addr, as obtained
* from one of the vmalloc() family of APIs. This will usually also free the
* physical memory underlying the virtual allocation, but that memory is
* reference counted, so it will not be freed until the last user goes away.
*
* If @addr is NULL, no operation is performed.
*
* Context:
* May sleep if called *not* from interrupt context.
* Must not be called in NMI context (strictly speaking, it could be
* if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
* conventions for vfree() arch-dependent would be a really bad idea).
*/
void vfree(const void *addr)
{
struct vm_struct *vm;
int i;
if (unlikely(in_interrupt())) {
vfree_atomic(addr);
return;
}
BUG_ON(in_nmi());
kmemleak_free(addr);
might_sleep();
if (!addr)
return;
vm = remove_vm_area(addr);
if (unlikely(!vm)) {
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
return;
}
if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
vm_reset_perms(vm);
/* All pages of vm should be charged to same memcg, so use first one. */
if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES))
mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages);
for (i = 0; i < vm->nr_pages; i++) {
struct page *page = vm->pages[i];
BUG_ON(!page);
/*
* High-order allocs for huge vmallocs are split, so
* can be freed as an array of order-0 allocations
*/
__free_page(page);
cond_resched();
}
if (!(vm->flags & VM_MAP_PUT_PAGES))
atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
kvfree(vm->pages);
kfree(vm);
}
EXPORT_SYMBOL(vfree);
/**
* vunmap - release virtual mapping obtained by vmap()
* @addr: memory base address
*
* Free the virtually contiguous memory area starting at @addr,
* which was created from the page array passed to vmap().
*
* Must not be called in interrupt context.
*/
void vunmap(const void *addr)
{
struct vm_struct *vm;
BUG_ON(in_interrupt());
might_sleep();
if (!addr)
return;
vm = remove_vm_area(addr);
if (unlikely(!vm)) {
WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
addr);
return;
}
kfree(vm);
}
EXPORT_SYMBOL(vunmap);
/**
* vmap - map an array of pages into virtually contiguous space
* @pages: array of page pointers
* @count: number of pages to map
* @flags: vm_area->flags
* @prot: page protection for the mapping
*
* Maps @count pages from @pages into contiguous kernel virtual space.
* If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
* (which must be kmalloc or vmalloc memory) and one reference per pages in it
* are transferred from the caller to vmap(), and will be freed / dropped when
* vfree() is called on the return value.
*
* Return: the address of the area or %NULL on failure
*/
void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot)
{
struct vm_struct *area;
unsigned long addr;
unsigned long size; /* In bytes */
might_sleep();
if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
return NULL;
/*
* Your top guard is someone else's bottom guard. Not having a top
* guard compromises someone else's mappings too.
*/
if (WARN_ON_ONCE(flags & VM_NO_GUARD))
flags &= ~VM_NO_GUARD;
if (count > totalram_pages())
return NULL;
size = (unsigned long)count << PAGE_SHIFT;
area = get_vm_area_caller(size, flags, __builtin_return_address(0));
if (!area)
return NULL;
addr = (unsigned long)area->addr;
if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
pages, PAGE_SHIFT) < 0) {
vunmap(area->addr);
return NULL;
}
if (flags & VM_MAP_PUT_PAGES) {
area->pages = pages;
area->nr_pages = count;
}
return area->addr;
}
EXPORT_SYMBOL(vmap);
#ifdef CONFIG_VMAP_PFN
struct vmap_pfn_data {
unsigned long *pfns;
pgprot_t prot;
unsigned int idx;
};
static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
{
struct vmap_pfn_data *data = private;
unsigned long pfn = data->pfns[data->idx];
pte_t ptent;
if (WARN_ON_ONCE(pfn_valid(pfn)))
return -EINVAL;
ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
set_pte_at(&init_mm, addr, pte, ptent);
data->idx++;
return 0;
}
/**
* vmap_pfn - map an array of PFNs into virtually contiguous space
* @pfns: array of PFNs
* @count: number of pages to map
* @prot: page protection for the mapping
*
* Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
* the start address of the mapping.
*/
void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
{
struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
struct vm_struct *area;
area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
__builtin_return_address(0));
if (!area)
return NULL;
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
count * PAGE_SIZE, vmap_pfn_apply, &data)) {
free_vm_area(area);
return NULL;
}
flush_cache_vmap((unsigned long)area->addr,
(unsigned long)area->addr + count * PAGE_SIZE);
return area->addr;
}
EXPORT_SYMBOL_GPL(vmap_pfn);
#endif /* CONFIG_VMAP_PFN */
static inline unsigned int
vm_area_alloc_pages(gfp_t gfp, int nid,
unsigned int order, unsigned int nr_pages, struct page **pages)
{
unsigned int nr_allocated = 0;
struct page *page;
int i;
/*
* For order-0 pages we make use of bulk allocator, if
* the page array is partly or not at all populated due
* to fails, fallback to a single page allocator that is
* more permissive.
*/
if (!order) {
while (nr_allocated < nr_pages) {
unsigned int nr, nr_pages_request;
/*
* A maximum allowed request is hard-coded and is 100
* pages per call. That is done in order to prevent a
* long preemption off scenario in the bulk-allocator
* so the range is [1:100].
*/
nr_pages_request = min(100U, nr_pages - nr_allocated);
/* memory allocation should consider mempolicy, we can't
* wrongly use nearest node when nid == NUMA_NO_NODE,
* otherwise memory may be allocated in only one node,
* but mempolicy wants to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
nr = alloc_pages_bulk_mempolicy_noprof(gfp,
nr_pages_request,
pages + nr_allocated);
else
nr = alloc_pages_bulk_node_noprof(gfp, nid,
nr_pages_request,
pages + nr_allocated);
nr_allocated += nr;
/*
* If zero or pages were obtained partly,
* fallback to a single page allocator.
*/
if (nr != nr_pages_request)
break;
}
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)
page = alloc_pages_noprof(gfp, order);
else
page = alloc_pages_node_noprof(nid, gfp, order);
if (unlikely(!page))
break;
/*
* High-order allocations must be able to be treated as
* independent small pages by callers (as they can with
* small-page vmallocs). Some drivers do their own refcounting
* on vmalloc_to_page() pages, some use page->mapping,
* page->lru, etc.
*/
if (order)
split_page(page, order);
/*
* Careful, we allocate and map page-order pages, but
* tracking is done per PAGE_SIZE page so as to keep the
* vm_struct APIs independent of the physical/mapped size.
*/
for (i = 0; i < (1U << order); i++)
pages[nr_allocated + i] = page + i;
nr_allocated += 1U << order;
}
return nr_allocated;
}
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, unsigned int page_shift,
int node)
{
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
bool nofail = gfp_mask & __GFP_NOFAIL;
unsigned long addr = (unsigned long)area->addr;
unsigned long size = get_vm_area_size(area);
unsigned long array_size;
unsigned int nr_small_pages = size >> PAGE_SHIFT;
unsigned int page_order;
unsigned int flags;
int ret;
array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
gfp_mask |= __GFP_HIGHMEM;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
area->caller);
} else {
area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
}
if (!area->pages) {
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, failed to allocated page array size %lu",
nr_small_pages * PAGE_SIZE, array_size);
free_vm_area(area);
return NULL;
}
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
page_order = vm_area_page_order(area);
/*
* High-order nofail allocations are really expensive and
* potentially dangerous (pre-mature OOM, disruptive reclaim
* and compaction etc.
*
* Please note, the __vmalloc_node_range_noprof() falls-back
* to order-0 pages if high-order attempt is unsuccessful.
*/
area->nr_pages = vm_area_alloc_pages((page_order ?
gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
node, page_order, nr_small_pages, area->pages);
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
/* All pages of vm should be charged to same memcg, so use first one. */
if (gfp_mask & __GFP_ACCOUNT && area->nr_pages)
mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC,
area->nr_pages);
/*
* If not enough pages were obtained to accomplish an
* allocation request, free them via vfree() if any.
*/
if (area->nr_pages != nr_small_pages) {
/*
* vm_area_alloc_pages() can fail due to insufficient memory but
* also:-
*
* - a pending fatal signal
* - insufficient huge page-order pages
*
* Since we always retry allocations at order-0 in the huge page
* case a warning for either is spurious.
*/
if (!fatal_signal_pending(current) && page_order == 0)
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, failed to allocate pages",
area->nr_pages * PAGE_SIZE);
goto fail;
}
/*
* page tables allocations ignore external gfp mask, enforce it
* by the scope API
*/
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
flags = memalloc_nofs_save();
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
flags = memalloc_noio_save();
do {
ret = vmap_pages_range(addr, addr + size, prot, area->pages,
page_shift);
if (nofail && (ret < 0))
schedule_timeout_uninterruptible(1);
} while (nofail && (ret < 0));
if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
memalloc_nofs_restore(flags);
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
memalloc_noio_restore(flags);
if (ret < 0) {
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, failed to map pages",
area->nr_pages * PAGE_SIZE);
goto fail;
}
return area->addr;
fail:
vfree(area->addr);
return NULL;
}
/**
* __vmalloc_node_range - allocate virtually contiguous memory
* @size: allocation size
* @align: desired alignment
* @start: vm area range start
* @end: vm area range end
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
* @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Please note that the full set of gfp
* flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
* supported.
* Zone modifiers are not supported. From the reclaim modifiers
* __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
* and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
* __GFP_RETRY_MAYFAIL are not supported).
*
* __GFP_NOWARN can be used to suppress failures messages.
*
* Map them into contiguous kernel virtual space, using a pagetable
* protection of @prot.
*
* Return: the address of the area or %NULL on failure
*/
void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
{
struct vm_struct *area;
void *ret;
kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
unsigned long original_align = align;
unsigned int shift = PAGE_SHIFT;
if (WARN_ON_ONCE(!size))
return NULL;
if ((size >> PAGE_SHIFT) > totalram_pages()) {
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, exceeds total pages",
size);
return NULL;
}
if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
/*
* Try huge pages. Only try for PAGE_KERNEL allocations,
* others like modules don't yet expect huge pages in
* their allocations due to apply_to_page_range not
* supporting them.
*/
if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
shift = PMD_SHIFT;
else
shift = arch_vmap_pte_supported_shift(size);
align = max(original_align, 1UL << shift);
}
again:
area = __get_vm_area_node(size, align, shift, VM_ALLOC |
VM_UNINITIALIZED | vm_flags, start, end, node,
gfp_mask, caller);
if (!area) {
bool nofail = gfp_mask & __GFP_NOFAIL;
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, vm_struct allocation failed%s",
size, (nofail) ? ". Retrying." : "");
if (nofail) {
schedule_timeout_uninterruptible(1);
goto again;
}
goto fail;
}
/*
* Prepare arguments for __vmalloc_area_node() and
* kasan_unpoison_vmalloc().
*/
if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
if (kasan_hw_tags_enabled()) {
/*
* Modify protection bits to allow tagging.
* This must be done before mapping.
*/
prot = arch_vmap_pgprot_tagged(prot);
/*
* Skip page_alloc poisoning and zeroing for physical
* pages backing VM_ALLOC mapping. Memory is instead
* poisoned and zeroed by kasan_unpoison_vmalloc().
*/
gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
}
/* Take note that the mapping is PAGE_KERNEL. */
kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
}
/* Allocate physical pages and map them into vmalloc space. */
ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
if (!ret)
goto fail;
/*
* Mark the pages as accessible, now that they are mapped.
* The condition for setting KASAN_VMALLOC_INIT should complement the
* one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
* to make sure that memory is initialized under the same conditions.
* Tag-based KASAN modes only assign tags to normal non-executable
* allocations, see __kasan_unpoison_vmalloc().
*/
kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
(gfp_mask & __GFP_SKIP_ZERO))
kasan_flags |= KASAN_VMALLOC_INIT;
/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags);
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
* flag. It means that vm_struct is not fully initialized.
* Now, it is fully initialized, so remove this flag here.
*/
clear_vm_uninitialized_flag(area);
if (!(vm_flags & VM_DEFER_KMEMLEAK))
kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask);
return area->addr;
fail:
if (shift > PAGE_SHIFT) {
shift = PAGE_SHIFT;
align = original_align;
goto again;
}
return NULL;
}
/**
* __vmalloc_node - allocate virtually contiguous memory
* @size: allocation size
* @align: desired alignment
* @gfp_mask: flags for the page level allocator
* @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level allocator with
* @gfp_mask flags. Map them into contiguous kernel virtual space.
*
* Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
* and __GFP_NOFAIL are not supported
*
* Any use of gfp flags outside of GFP_KERNEL should be consulted
* with mm people.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
gfp_t gfp_mask, int node, const void *caller)
{
return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, PAGE_KERNEL, 0, node, caller);
}
/*
* This is only for performance analysis of vmalloc and stress purpose.
* It is required by vmalloc test module, therefore do not use it other
* than that.
*/
#ifdef CONFIG_TEST_VMALLOC_MODULE
EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
#endif
void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
{
return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc_noprof);
/**
* vmalloc - allocate virtually contiguous memory
* @size: allocation size
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
*
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_noprof(unsigned long size)
{
return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_noprof);
/**
* vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages
* @size: allocation size
* @gfp_mask: flags for the page level allocator
* @node: node to use for allocation or NUMA_NO_NODE
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
* If @size is greater than or equal to PMD_SIZE, allow using
* huge pages for the memory
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node)
{
return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
node, __builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof);
/**
* vzalloc - allocate virtually contiguous memory with zero fill
* @size: allocation size
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
* The memory allocated is set to zero.
*
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vzalloc_noprof(unsigned long size)
{
return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(vzalloc_noprof);
/**
* vmalloc_user - allocate zeroed virtually contiguous memory for userspace
* @size: allocation size
*
* The resulting memory area is zeroed so it can be mapped to userspace
* without leaking data.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_user_noprof(unsigned long size)
{
return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
VM_USERMAP, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_user_noprof);
/**
* vmalloc_node - allocate memory on a specific node
* @size: allocation size
* @node: numa node
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
*
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_node_noprof(unsigned long size, int node)
{
return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
__builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_node_noprof);
/**
* vzalloc_node - allocate memory on a specific node with zero fill
* @size: allocation size
* @node: numa node
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
* The memory allocated is set to zero.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vzalloc_node_noprof(unsigned long size, int node)
{
return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
__builtin_return_address(0));
}
EXPORT_SYMBOL(vzalloc_node_noprof);
/**
* vrealloc_node_align_noprof - reallocate virtually contiguous memory; contents
* remain unchanged
* @p: object to reallocate memory for
* @size: the size to reallocate
* @align: requested alignment
* @flags: the flags for the page level allocator
* @nid: node number of the target node
*
* If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc_XXX(). If @size
* is 0 and @p is not a %NULL pointer, the object pointed to is freed.
*
* If the caller wants the new memory to be on specific node *only*,
* __GFP_THISNODE flag should be set, otherwise the function will try to avoid
* reallocation and possibly disregard the specified @nid.
*
* If __GFP_ZERO logic is requested, callers must ensure that, starting with the
* initial memory allocation, every subsequent call to this API for the same
* memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
* __GFP_ZERO is not fully honored by this API.
*
* Requesting an alignment that is bigger than the alignment of the existing
* allocation will fail.
*
* In any case, the contents of the object pointed to are preserved up to the
* lesser of the new and old sizes.
*
* This function must not be called concurrently with itself or vfree() for the
* same memory allocation.
*
* Return: pointer to the allocated memory; %NULL if @size is zero or in case of
* failure
*/
void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
gfp_t flags, int nid)
{
struct vm_struct *vm = NULL;
size_t alloced_size = 0;
size_t old_size = 0;
void *n;
if (!size) {
vfree(p);
return NULL;
}
if (p) {
vm = find_vm_area(p);
if (unlikely(!vm)) {
WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
return NULL;
}
alloced_size = get_vm_area_size(vm);
old_size = vm->requested_size;
if (WARN(alloced_size < old_size,
"vrealloc() has mismatched area vs requested sizes (%p)\n", p))
return NULL;
if (WARN(!IS_ALIGNED((unsigned long)p, align),
"will not reallocate with a bigger alignment (0x%lx)\n", align))
return NULL;
if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
nid != page_to_nid(vmalloc_to_page(p)))
goto need_realloc;
}
/*
* TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
* would be a good heuristic for when to shrink the vm_area?
*/
if (size <= old_size) {
/* Zero out "freed" memory, potentially for future realloc. */
if (want_init_on_free() || want_init_on_alloc(flags))
memset((void *)p + size, 0, old_size - size);
vm->requested_size = size;
kasan_poison_vmalloc(p + size, old_size - size);
return (void *)p;
}
/*
* We already have the bytes available in the allocation; use them.
*/
if (size <= alloced_size) {
kasan_unpoison_vmalloc(p + old_size, size - old_size,
KASAN_VMALLOC_PROT_NORMAL);
/*
* No need to zero memory here, as unused memory will have
* already been zeroed at initial allocation time or during
* realloc shrink time.
*/
vm->requested_size = size;
return (void *)p;
}
need_realloc:
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0));
if (!n)
return NULL;
if (p) {
memcpy(n, p, old_size);
vfree(p);
}
return n;
}
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
#else
/*
* 64b systems should always have either DMA or DMA32 zones. For others
* GFP_DMA32 should do the right thing and use the normal zone.
*/
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#endif
/**
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
* @size: allocation size
*
* Allocate enough 32bit PA addressable pages to cover @size from the
* page level allocator and map them into contiguous kernel virtual space.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_32_noprof(unsigned long size)
{
return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_32_noprof);
/**
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
* @size: allocation size
*
* The resulting memory area is 32bit addressable and zeroed so it can be
* mapped to userspace without leaking data.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_32_user_noprof(unsigned long size)
{
return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
VM_USERMAP, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_32_user_noprof);
/*
* Atomically zero bytes in the iterator.
*
* Returns the number of zeroed bytes.
*/
static size_t zero_iter(struct iov_iter *iter, size_t count)
{
size_t remains = count;
while (remains > 0) {
size_t num, copied;
num = min_t(size_t, remains, PAGE_SIZE);
copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
remains -= copied;
if (copied < num)
break;
}
return count - remains;
}
/*
* small helper routine, copy contents to iter from addr.
* If the page is not present, fill zero.
*
* Returns the number of copied bytes.
*/
static size_t aligned_vread_iter(struct iov_iter *iter,
const char *addr, size_t count)
{
size_t remains = count;
struct page *page;
while (remains > 0) {
unsigned long offset, length;
size_t copied = 0;
offset = offset_in_page(addr);
length = PAGE_SIZE - offset;
if (length > remains)
length = remains;
page = vmalloc_to_page(addr);
/*
* To do safe access to this _mapped_ area, we need lock. But
* adding lock here means that we need to add overhead of
* vmalloc()/vfree() calls for this _debug_ interface, rarely
* used. Instead of that, we'll use an local mapping via
* copy_page_to_iter_nofault() and accept a small overhead in
* this access function.
*/
if (page)
copied = copy_page_to_iter_nofault(page, offset,
length, iter);
else
copied = zero_iter(iter, length);
addr += copied;
remains -= copied;
if (copied != length)
break;
}
return count - remains;
}
/*
* Read from a vm_map_ram region of memory.
*
* Returns the number of copied bytes.
*/
static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
size_t count, unsigned long flags)
{
char *start;
struct vmap_block *vb;
struct xarray *xa;
unsigned long offset;
unsigned int rs, re;
size_t remains, n;
/*
* If it's area created by vm_map_ram() interface directly, but
* not further subdividing and delegating management to vmap_block,
* handle it here.
*/
if (!(flags & VMAP_BLOCK))
return aligned_vread_iter(iter, addr, count);
remains = count;
/*
* Area is split into regions and tracked with vmap_block, read out
* each region and zero fill the hole between regions.
*/
xa = addr_to_vb_xa((unsigned long) addr);
vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
if (!vb)
goto finished_zero;
spin_lock(&vb->lock);
if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
spin_unlock(&vb->lock);
goto finished_zero;
}
for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
size_t copied;
if (remains == 0)
goto finished;
start = vmap_block_vaddr(vb->va->va_start, rs);
if (addr < start) {
size_t to_zero = min_t(size_t, start - addr, remains);
size_t zeroed = zero_iter(iter, to_zero);
addr += zeroed;
remains -= zeroed;
if (remains == 0 || zeroed != to_zero)
goto finished;
}
/*it could start reading from the middle of used region*/
offset = offset_in_page(addr);
n = ((re - rs + 1) << PAGE_SHIFT) - offset;
if (n > remains)
n = remains;
copied = aligned_vread_iter(iter, start + offset, n);
addr += copied;
remains -= copied;
if (copied != n)
goto finished;
}
spin_unlock(&vb->lock);
finished_zero:
/* zero-fill the left dirty or free regions */
return count - remains + zero_iter(iter, remains);
finished:
/* We couldn't copy/zero everything */
spin_unlock(&vb->lock);
return count - remains;
}
/**
* vread_iter() - read vmalloc area in a safe way to an iterator.
* @iter: the iterator to which data should be written.
* @addr: vm address.
* @count: number of bytes to be read.
*
* This function checks that addr is a valid vmalloc'ed area, and
* copy data from that area to a given buffer. If the given memory range
* of [addr...addr+count) includes some valid address, data is copied to
* proper area of @buf. If there are memory holes, they'll be zero-filled.
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
* vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vread() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without
* any information, as /proc/kcore.
*
* Return: number of bytes for which addr and buf should be increased
* (same number as @count) or %0 if [addr...addr+count) doesn't
* include any intersection with valid vmalloc area
*/
long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
{
struct vmap_node *vn;
struct vmap_area *va;
struct vm_struct *vm;
char *vaddr;
size_t n, size, flags, remains;
unsigned long next;
addr = kasan_reset_tag(addr);
/* Don't allow overflow */
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
remains = count;
vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
if (!vn)
goto finished_zero;
/* no intersects with alive vmap_area */
if ((unsigned long)addr + remains <= va->va_start)
goto finished_zero;
do {
size_t copied;
if (remains == 0)
goto finished;
vm = va->vm;
flags = va->flags & VMAP_FLAGS_MASK;
/*
* VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
* be set together with VMAP_RAM.
*/
WARN_ON(flags == VMAP_BLOCK);
if (!vm && !flags)
goto next_va;
if (vm && (vm->flags & VM_UNINITIALIZED))
goto next_va;
/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
smp_rmb();
vaddr = (char *) va->va_start;
size = vm ? get_vm_area_size(vm) : va_size(va);
if (addr >= vaddr + size)
goto next_va;
if (addr < vaddr) {
size_t to_zero = min_t(size_t, vaddr - addr, remains);
size_t zeroed = zero_iter(iter, to_zero);
addr += zeroed;
remains -= zeroed;
if (remains == 0 || zeroed != to_zero)
goto finished;
}
n = vaddr + size - addr;
if (n > remains)
n = remains;
if (flags & VMAP_RAM)
copied = vmap_ram_vread_iter(iter, addr, n, flags);
else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
copied = aligned_vread_iter(iter, addr, n);
else /* IOREMAP | SPARSE area is treated as memory hole */
copied = zero_iter(iter, n);
addr += copied;
remains -= copied;
if (copied != n)
goto finished;
next_va:
next = va->va_end;
spin_unlock(&vn->busy.lock);
} while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
finished_zero:
if (vn)
spin_unlock(&vn->busy.lock);
/* zero-fill memory holes */
return count - remains + zero_iter(iter, remains);
finished:
/* Nothing remains, or We couldn't copy/zero everything. */
if (vn)
spin_unlock(&vn->busy.lock);
return count - remains;
}
/**
* remap_vmalloc_range_partial - map vmalloc pages to userspace
* @vma: vma to cover
* @uaddr: target user address to start at
* @kaddr: virtual address of vmalloc kernel memory
* @pgoff: offset from @kaddr to start at
* @size: size of map area
*
* Returns: 0 for success, -Exxx on failure
*
* This function checks that @kaddr is a valid vmalloc'ed area,
* and that it is big enough to cover the range starting at
* @uaddr in @vma. Will return failure if that criteria isn't
* met.
*
* Similar to remap_pfn_range() (see mm/memory.c)
*/
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
void *kaddr, unsigned long pgoff,
unsigned long size)
{
struct vm_struct *area;
unsigned long off;
unsigned long end_index;
if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
return -EINVAL;
size = PAGE_ALIGN(size);
if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
return -EINVAL;
area = find_vm_area(kaddr);
if (!area)
return -EINVAL;
if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
return -EINVAL;
if (check_add_overflow(size, off, &end_index) ||
end_index > get_vm_area_size(area))
return -EINVAL;
kaddr += off;
do {
struct page *page = vmalloc_to_page(kaddr);
int ret;
ret = vm_insert_page(vma, uaddr, page);
if (ret)
return ret;
uaddr += PAGE_SIZE;
kaddr += PAGE_SIZE;
size -= PAGE_SIZE;
} while (size > 0);
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
/**
* remap_vmalloc_range - map vmalloc pages to userspace
* @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map
*
* Returns: 0 for success, -Exxx on failure
*
* This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if
* that criteria isn't met.
*
* Similar to remap_pfn_range() (see mm/memory.c)
*/
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff)
{
return remap_vmalloc_range_partial(vma, vma->vm_start,
addr, pgoff,
vma->vm_end - vma->vm_start);
}
EXPORT_SYMBOL(remap_vmalloc_range);
void free_vm_area(struct vm_struct *area)
{
struct vm_struct *ret;
ret = remove_vm_area(area->addr);
BUG_ON(ret != area);
kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
#ifdef CONFIG_SMP
static struct vmap_area *node_to_va(struct rb_node *n)
{
return rb_entry_safe(n, struct vmap_area, rb_node);
}
/**
* pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
* @addr: target address
*
* Returns: vmap_area if it is found. If there is no such area
* the first highest(reverse order) vmap_area is returned
* i.e. va->va_start < addr && va->va_end < addr or NULL
* if there are no any areas before @addr.
*/
static struct vmap_area *
pvm_find_va_enclose_addr(unsigned long addr)
{
struct vmap_area *va, *tmp;
struct rb_node *n;
n = free_vmap_area_root.rb_node;
va = NULL;
while (n) {
tmp = rb_entry(n, struct vmap_area, rb_node);
if (tmp->va_start <= addr) {
va = tmp;
if (tmp->va_end >= addr)
break;
n = n->rb_right;
} else {
n = n->rb_left;
}
}
return va;
}
/**
* pvm_determine_end_from_reverse - find the highest aligned address
* of free block below VMALLOC_END
* @va:
* in - the VA we start the search(reverse order);
* out - the VA with the highest aligned end address.
* @align: alignment for required highest address
*
* Returns: determined end address within vmap_area
*/
static unsigned long
pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
{
unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
unsigned long addr;
if (likely(*va)) {
list_for_each_entry_from_reverse((*va),
&free_vmap_area_list, list) {
addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
if ((*va)->va_start < addr)
return addr;
}
}
return 0;
}
/**
* pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
* @offsets: array containing offset of each area
* @sizes: array containing size of each area
* @nr_vms: the number of areas to allocate
* @align: alignment, all entries in @offsets and @sizes must be aligned to this
*
* Returns: kmalloc'd vm_struct pointer array pointing to allocated
* vm_structs on success, %NULL on failure
*
* Percpu allocator wants to use congruent vm areas so that it can
* maintain the offsets among percpu areas. This function allocates
* congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
* be scattered pretty far, distance between two areas easily going up
* to gigabytes. To avoid interacting with regular vmallocs, these
* areas are allocated from top.
*
* Despite its complicated look, this allocator is rather simple. It
* does everything top-down and scans free blocks from the end looking
* for matching base. While scanning, if any of the areas do not fit the
* base address is pulled down to fit the area. Scanning is repeated till
* all the areas fit and then all necessary data structures are inserted
* and the result is returned.
*/
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align)
{
const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
struct vmap_area **vas, *va;
struct vm_struct **vms;
int area, area2, last_area, term_area;
unsigned long base, start, size, end, last_end, orig_start, orig_end;
bool purged = false;
/* verify parameters and allocate data structures */
BUG_ON(offset_in_page(align) || !is_power_of_2(align));
for (last_area = 0, area = 0; area < nr_vms; area++) {
start = offsets[area];
end = start + sizes[area];
/* is everything aligned properly? */
BUG_ON(!IS_ALIGNED(offsets[area], align));
BUG_ON(!IS_ALIGNED(sizes[area], align));
/* detect the area with the highest address */
if (start > offsets[last_area])
last_area = area;
for (area2 = area + 1; area2 < nr_vms; area2++) {
unsigned long start2 = offsets[area2];
unsigned long end2 = start2 + sizes[area2];
BUG_ON(start2 < end && start < end2);
}
}
last_end = offsets[last_area] + sizes[last_area];
if (vmalloc_end - vmalloc_start < last_end) {
WARN_ON(true);
return NULL;
}
vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
if (!vas || !vms)
goto err_free2;
for (area = 0; area < nr_vms; area++) {
vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
if (!vas[area] || !vms[area])
goto err_free;
}
retry:
spin_lock(&free_vmap_area_lock);
/* start scanning - we scan from the top, begin with the last area */
area = term_area = last_area;
start = offsets[area];
end = start + sizes[area];
va = pvm_find_va_enclose_addr(vmalloc_end);
base = pvm_determine_end_from_reverse(&va, align) - end;
while (true) {
/*
* base might have underflowed, add last_end before
* comparing.
*/
if (base + last_end < vmalloc_start + last_end)
goto overflow;
/*
* Fitting base has not been found.
*/
if (va == NULL)
goto overflow;
/*
* If required width exceeds current VA block, move
* base downwards and then recheck.
*/
if (base + end > va->va_end) {
base = pvm_determine_end_from_reverse(&va, align) - end;
term_area = area;
continue;
}
/*
* If this VA does not fit, move base downwards and recheck.
*/
if (base + start < va->va_start) {
va = node_to_va(rb_prev(&va->rb_node));
base = pvm_determine_end_from_reverse(&va, align) - end;
term_area = area;
continue;
}
/*
* This area fits, move on to the previous one. If
* the previous one is the terminal one, we're done.
*/
area = (area + nr_vms - 1) % nr_vms;
if (area == term_area)
break;
start = offsets[area];
end = start + sizes[area];
va = pvm_find_va_enclose_addr(base + end);
}
/* we've found a fitting base, insert all va's */
for (area = 0; area < nr_vms; area++) {
int ret;
start = base + offsets[area];
size = sizes[area];
va = pvm_find_va_enclose_addr(start);
if (WARN_ON_ONCE(va == NULL))
/* It is a BUG(), but trigger recovery instead. */
goto recovery;
ret = va_clip(&free_vmap_area_root,
&free_vmap_area_list, va, start, size);
if (WARN_ON_ONCE(unlikely(ret)))
/* It is a BUG(), but trigger recovery instead. */
goto recovery;
/* Allocated area. */
va = vas[area];
va->va_start = start;
va->va_end = start + size;
}
spin_unlock(&free_vmap_area_lock);
/* populate the kasan shadow space */
for (area = 0; area < nr_vms; area++) {
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
goto err_free_shadow;
}
/* insert all vm's */
for (area = 0; area < nr_vms; area++) {
struct vmap_node *vn = addr_to_node(vas[area]->va_start);
spin_lock(&vn->busy.lock);
insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
pcpu_get_vm_areas);
spin_unlock(&vn->busy.lock);
}
/*
* Mark allocated areas as accessible. Do it now as a best-effort
* approach, as they can be mapped outside of vmalloc code.
* With hardware tag-based KASAN, marking is skipped for
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
for (area = 0; area < nr_vms; area++)
vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
kfree(vas);
return vms;
recovery:
/*
* Remove previously allocated areas. There is no
* need in removing these areas from the busy tree,
* because they are inserted only on the final step
* and when pcpu_get_vm_areas() is success.
*/
while (area--) {
orig_start = vas[area]->va_start;
orig_end = vas[area]->va_end;
va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
&free_vmap_area_list);
if (va)
kasan_release_vmalloc(orig_start, orig_end,
va->va_start, va->va_end,
KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
vas[area] = NULL;
}
overflow:
spin_unlock(&free_vmap_area_lock);
if (!purged) {
reclaim_and_purge_vmap_areas();
purged = true;
/* Before "retry", check if we recover. */
for (area = 0; area < nr_vms; area++) {
if (vas[area])
continue;
vas[area] = kmem_cache_zalloc(
vmap_area_cachep, GFP_KERNEL);
if (!vas[area])
goto err_free;
}
goto retry;
}
err_free:
for (area = 0; area < nr_vms; area++) {
if (vas[area])
kmem_cache_free(vmap_area_cachep, vas[area]);
kfree(vms[area]);
}
err_free2:
kfree(vas);
kfree(vms);
return NULL;
err_free_shadow:
spin_lock(&free_vmap_area_lock);
/*
* We release all the vmalloc shadows, even the ones for regions that
* hadn't been successfully added. This relies on kasan_release_vmalloc
* being able to tolerate this case.
*/
for (area = 0; area < nr_vms; area++) {
orig_start = vas[area]->va_start;
orig_end = vas[area]->va_end;
va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
&free_vmap_area_list);
if (va)
kasan_release_vmalloc(orig_start, orig_end,
va->va_start, va->va_end,
KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
vas[area] = NULL;
kfree(vms[area]);
}
spin_unlock(&free_vmap_area_lock);
kfree(vas);
kfree(vms);
return NULL;
}
/**
* pcpu_free_vm_areas - free vmalloc areas for percpu allocator
* @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
* @nr_vms: the number of allocated areas
*
* Free vm_structs and the array allocated by pcpu_get_vm_areas().
*/
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
{
int i;
for (i = 0; i < nr_vms; i++)
free_vm_area(vms[i]);
kfree(vms);
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PRINTK
bool vmalloc_dump_obj(void *object)
{
const void *caller;
struct vm_struct *vm;
struct vmap_area *va;
struct vmap_node *vn;
unsigned long addr;
unsigned int nr_pages;
addr = PAGE_ALIGN((unsigned long) object);
vn = addr_to_node(addr);
if (!spin_trylock(&vn->busy.lock))
return false;
va = __find_vmap_area(addr, &vn->busy.root);
if (!va || !va->vm) {
spin_unlock(&vn->busy.lock);
return false;
}
vm = va->vm;
addr = (unsigned long) vm->addr;
caller = vm->caller;
nr_pages = vm->nr_pages;
spin_unlock(&vn->busy.lock);
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
nr_pages, addr, caller);
return true;
}
#endif
#ifdef CONFIG_PROC_FS
/*
* Print number of pages allocated on each memory node.
*
* This function can only be called if CONFIG_NUMA is enabled
* and VM_UNINITIALIZED bit in v->flags is disabled.
*/
static void show_numa_info(struct seq_file *m, struct vm_struct *v,
unsigned int *counters)
{
unsigned int nr;
unsigned int step = 1U << vm_area_page_order(v);
if (!counters)
return;
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
for (nr = 0; nr < v->nr_pages; nr += step)
counters[page_to_nid(v->pages[nr])] += step;
for_each_node_state(nr, N_HIGH_MEMORY)
if (counters[nr])
seq_printf(m, " N%u=%u", nr, counters[nr]);
}
static void show_purge_info(struct seq_file *m)
{
struct vmap_node *vn;
struct vmap_area *va;
for_each_vmap_node(vn) {
spin_lock(&vn->lazy.lock);
list_for_each_entry(va, &vn->lazy.head, list) {
seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
(void *)va->va_start, (void *)va->va_end,
va_size(va));
}
spin_unlock(&vn->lazy.lock);
}
}
static int vmalloc_info_show(struct seq_file *m, void *p)
{
struct vmap_node *vn;
struct vmap_area *va;
struct vm_struct *v;
unsigned int *counters;
if (IS_ENABLED(CONFIG_NUMA))
counters = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
for_each_vmap_node(vn) {
spin_lock(&vn->busy.lock);
list_for_each_entry(va, &vn->busy.head, list) {
if (!va->vm) {
if (va->flags & VMAP_RAM)
seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
(void *)va->va_start, (void *)va->va_end,
va_size(va));
continue;
}
v = va->vm;
if (v->flags & VM_UNINITIALIZED)
continue;
/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
smp_rmb();
seq_printf(m, "0x%pK-0x%pK %7ld",
v->addr, v->addr + v->size, v->size);
if (v->caller)
seq_printf(m, " %pS", v->caller);
if (v->nr_pages)
seq_printf(m, " pages=%d", v->nr_pages);
if (v->phys_addr)
seq_printf(m, " phys=%pa", &v->phys_addr);
if (v->flags & VM_IOREMAP)
seq_puts(m, " ioremap");
if (v->flags & VM_SPARSE)
seq_puts(m, " sparse");
if (v->flags & VM_ALLOC)
seq_puts(m, " vmalloc");
if (v->flags & VM_MAP)
seq_puts(m, " vmap");
if (v->flags & VM_USERMAP)
seq_puts(m, " user");
if (v->flags & VM_DMA_COHERENT)
seq_puts(m, " dma-coherent");
if (is_vmalloc_addr(v->pages))
seq_puts(m, " vpages");
if (IS_ENABLED(CONFIG_NUMA))
show_numa_info(m, v, counters);
seq_putc(m, '\n');
}
spin_unlock(&vn->busy.lock);
}
/*
* As a final step, dump "unpurged" areas.
*/
show_purge_info(m);
if (IS_ENABLED(CONFIG_NUMA))
kfree(counters);
return 0;
}
static int __init proc_vmalloc_init(void)
{
proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
return 0;
}
module_init(proc_vmalloc_init);
#endif
static void __init vmap_init_free_space(void)
{
unsigned long vmap_start = 1;
const unsigned long vmap_end = ULONG_MAX;
struct vmap_area *free;
struct vm_struct *busy;
/*
* B F B B B F
* -|-----|.....|-----|-----|-----|.....|-
* | The KVA space |
* |<--------------------------------->|
*/
for (busy = vmlist; busy; busy = busy->next) {
if ((unsigned long) busy->addr - vmap_start > 0) {
free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
if (!WARN_ON_ONCE(!free)) {
free->va_start = vmap_start;
free->va_end = (unsigned long) busy->addr;
insert_vmap_area_augment(free, NULL,
&free_vmap_area_root,
&free_vmap_area_list);
}
}
vmap_start = (unsigned long) busy->addr + busy->size;
}
if (vmap_end - vmap_start > 0) {
free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
if (!WARN_ON_ONCE(!free)) {
free->va_start = vmap_start;
free->va_end = vmap_end;
insert_vmap_area_augment(free, NULL,
&free_vmap_area_root,
&free_vmap_area_list);
}
}
}
static void vmap_init_nodes(void)
{
struct vmap_node *vn;
int i;
#if BITS_PER_LONG == 64
/*
* A high threshold of max nodes is fixed and bound to 128,
* thus a scale factor is 1 for systems where number of cores
* are less or equal to specified threshold.
*
* As for NUMA-aware notes. For bigger systems, for example
* NUMA with multi-sockets, where we can end-up with thousands
* of cores in total, a "sub-numa-clustering" should be added.
*
* In this case a NUMA domain is considered as a single entity
* with dedicated sub-nodes in it which describe one group or
* set of cores. Therefore a per-domain purging is supposed to
* be added as well as a per-domain balancing.
*/
int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
if (n > 1) {
vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT);
if (vn) {
/* Node partition is 16 pages. */
vmap_zone_size = (1 << 4) * PAGE_SIZE;
nr_vmap_nodes = n;
vmap_nodes = vn;
} else {
pr_err("Failed to allocate an array. Disable a node layer\n");
}
}
#endif
for_each_vmap_node(vn) {
vn->busy.root = RB_ROOT;
INIT_LIST_HEAD(&vn->busy.head);
spin_lock_init(&vn->busy.lock);
vn->lazy.root = RB_ROOT;
INIT_LIST_HEAD(&vn->lazy.head);
spin_lock_init(&vn->lazy.lock);
for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
INIT_LIST_HEAD(&vn->pool[i].head);
WRITE_ONCE(vn->pool[i].len, 0);
}
spin_lock_init(&vn->pool_lock);
}
}
static unsigned long
vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned long count = 0;
struct vmap_node *vn;
int i;
for_each_vmap_node(vn) {
for (i = 0; i < MAX_VA_SIZE_PAGES; i++)
count += READ_ONCE(vn->pool[i].len);
}
return count ? count : SHRINK_EMPTY;
}
static unsigned long
vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct vmap_node *vn;
for_each_vmap_node(vn)
decay_va_pool_node(vn, true);
return SHRINK_STOP;
}
void __init vmalloc_init(void)
{
struct shrinker *vmap_node_shrinker;
struct vmap_area *va;
struct vmap_node *vn;
struct vm_struct *tmp;
int i;
/*
* Create the cache for vmap_area objects.
*/
vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
for_each_possible_cpu(i) {
struct vmap_block_queue *vbq;
struct vfree_deferred *p;
vbq = &per_cpu(vmap_block_queue, i);
spin_lock_init(&vbq->lock);
INIT_LIST_HEAD(&vbq->free);
p = &per_cpu(vfree_deferred, i);
init_llist_head(&p->list);
INIT_WORK(&p->wq, delayed_vfree_work);
xa_init(&vbq->vmap_blocks);
}
/*
* Setup nodes before importing vmlist.
*/
vmap_init_nodes();
/* Import existing vmlist entries. */
for (tmp = vmlist; tmp; tmp = tmp->next) {
va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
if (WARN_ON_ONCE(!va))
continue;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
va->vm = tmp;
vn = addr_to_node(va->va_start);
insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
}
/*
* Now we can initialize a free vmap space.
*/
vmap_init_free_space();
vmap_initialized = true;
vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
if (!vmap_node_shrinker) {
pr_err("Failed to allocate vmap-node shrinker!\n");
return;
}
vmap_node_shrinker->count_objects = vmap_node_shrink_count;
vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
shrinker_register(vmap_node_shrinker);
}
/*
* include/net/tipc.h: Include file for TIPC message header routines
*
* Copyright (c) 2017 Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _TIPC_HDR_H
#define _TIPC_HDR_H
#include <linux/random.h>
#define KEEPALIVE_MSG_MASK 0x0e080000 /* LINK_PROTOCOL + MSG_IS_KEEPALIVE */
struct tipc_basic_hdr {
__be32 w[4];
};
static inline __be32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr)
{
u32 w0 = ntohl(hdr->w[0]);
bool keepalive_msg = (w0 & KEEPALIVE_MSG_MASK) == KEEPALIVE_MSG_MASK;
__be32 key;
/* Return source node identity as key */
if (likely(!keepalive_msg)) return hdr->w[3];
/* Spread PROBE/PROBE_REPLY messages across the cores */
get_random_bytes(&key, sizeof(key));
return key;
}
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* internal.h - printk internal definitions
*/
#include <linux/console.h>
#include <linux/percpu.h>
#include <linux/types.h>
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
struct ctl_table;
void __init printk_sysctl_init(void);
int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#else
#define printk_sysctl_init() do { } while (0)
#endif
#define con_printk(lvl, con, fmt, ...) \
printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
(con->flags & CON_NBCON) ? "" : "legacy ", \
(con->flags & CON_BOOT) ? "boot" : "", \
con->name, con->index, ##__VA_ARGS__)
/*
* Identify if legacy printing is forced in a dedicated kthread. If
* true, all printing via console lock occurs within a dedicated
* legacy printer thread. The only exception is on panic, after the
* nbcon consoles have had their chance to print the panic messages
* first.
*/
#ifdef CONFIG_PREEMPT_RT
# define force_legacy_kthread() (true)
#else
# define force_legacy_kthread() (false)
#endif
#ifdef CONFIG_PRINTK
#ifdef CONFIG_PRINTK_CALLER
#define PRINTK_PREFIX_MAX 48
#else
#define PRINTK_PREFIX_MAX 32
#endif
/*
* the maximum size of a formatted record (i.e. with prefix added
* per line and dropped messages or in extended message format)
*/
#define PRINTK_MESSAGE_MAX 2048
/* the maximum size allowed to be reserved for a record */
#define PRINTKRB_RECORD_MAX 1024
/* Flags for a single printk record. */
enum printk_info_flags {
/* always show on console, ignore console_loglevel */
LOG_FORCE_CON = 1,
LOG_NEWLINE = 2, /* text ended with a newline */
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
struct printk_ringbuffer;
struct dev_printk_info;
extern struct printk_ringbuffer *prb;
extern bool printk_kthreads_running;
extern bool printk_kthreads_ready;
extern bool debug_non_panic_cpus;
__printf(4, 0)
int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args);
__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
void __printk_safe_enter(void);
void __printk_safe_exit(void);
bool printk_percpu_data_ready(void);
#define printk_safe_enter_irqsave(flags) \
do { \
local_irq_save(flags); \
__printk_safe_enter(); \
} while (0)
#define printk_safe_exit_irqrestore(flags) \
do { \
__printk_safe_exit(); \
local_irq_restore(flags); \
} while (0)
void defer_console_output(void);
bool is_printk_legacy_deferred(void);
bool is_printk_force_console(void);
u16 printk_parse_prefix(const char *text, int *level,
enum printk_info_flags *flags);
void console_lock_spinning_enable(void);
int console_lock_spinning_disable_and_check(int cookie);
u64 nbcon_seq_read(struct console *con);
void nbcon_seq_force(struct console *con, u64 seq);
bool nbcon_alloc(struct console *con);
void nbcon_free(struct console *con);
enum nbcon_prio nbcon_get_default_prio(void);
void nbcon_atomic_flush_pending(void);
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie, bool use_atomic);
bool nbcon_kthread_create(struct console *con);
void nbcon_kthread_stop(struct console *con);
void nbcon_kthreads_wake(void);
/*
* Check if the given console is currently capable and allowed to print
* records. Note that this function does not consider the current context,
* which can also play a role in deciding if @con can be used to print
* records.
*/
static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
{
if (!(flags & CON_ENABLED))
return false;
if ((flags & CON_SUSPENDED))
return false;
if (flags & CON_NBCON) {
/* The write_atomic() callback is optional. */
if (use_atomic && !con->write_atomic)
return false;
/*
* For the !use_atomic case, @printk_kthreads_running is not
* checked because the write_thread() callback is also used
* via the legacy loop when the printer threads are not
* available.
*/
} else {
if (!con->write)
return false;
}
/*
* Console drivers may assume that per-cpu resources have been
* allocated. So unless they're explicitly marked as being able to
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
*/
if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
return false;
return true;
}
/**
* nbcon_kthread_wake - Wake up a console printing thread
* @con: Console to operate on
*/
static inline void nbcon_kthread_wake(struct console *con)
{
/*
* Guarantee any new records can be seen by tasks preparing to wait
* before this context checks if the rcuwait is empty.
*
* The full memory barrier in rcuwait_wake_up() pairs with the full
* memory barrier within set_current_state() of
* ___rcuwait_wait_event(), which is called after prepare_to_rcuwait()
* adds the waiter but before it has checked the wait condition.
*
* This pairs with nbcon_kthread_func:A.
*/
rcuwait_wake_up(&con->rcuwait); /* LMM(nbcon_kthread_wake:A) */
}
#else
#define PRINTK_PREFIX_MAX 0
#define PRINTK_MESSAGE_MAX 0
#define PRINTKRB_RECORD_MAX 0
#define printk_kthreads_running (false)
#define printk_kthreads_ready (false)
/*
* In !PRINTK builds we still export console_sem
* semaphore and some of console functions (console_unlock()/etc.), so
* printk-safe must preserve the existing local IRQ guarantees.
*/
#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
static inline bool printk_percpu_data_ready(void) { return false; }
static inline void defer_console_output(void) { }
static inline bool is_printk_legacy_deferred(void) { return false; }
static inline u64 nbcon_seq_read(struct console *con) { return 0; }
static inline void nbcon_seq_force(struct console *con, u64 seq) { }
static inline bool nbcon_alloc(struct console *con) { return false; }
static inline void nbcon_free(struct console *con) { }
static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
static inline void nbcon_atomic_flush_pending(void) { }
static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie, bool use_atomic) { return false; }
static inline void nbcon_kthread_wake(struct console *con) { }
static inline void nbcon_kthreads_wake(void) { }
static inline bool console_is_usable(struct console *con, short flags,
bool use_atomic) { return false; }
#endif /* CONFIG_PRINTK */
extern bool have_boot_console;
extern bool have_nbcon_console;
extern bool have_legacy_console;
extern bool legacy_allow_panic_sync;
/**
* struct console_flush_type - Define available console flush methods
* @nbcon_atomic: Flush directly using nbcon_atomic() callback
* @nbcon_offload: Offload flush to printer thread
* @legacy_direct: Call the legacy loop in this context
* @legacy_offload: Offload the legacy loop into IRQ or legacy thread
*
* Note that the legacy loop also flushes the nbcon consoles.
*/
struct console_flush_type {
bool nbcon_atomic;
bool nbcon_offload;
bool legacy_direct;
bool legacy_offload;
};
/*
* Identify which console flushing methods should be used in the context of
* the caller.
*/
static inline void printk_get_console_flush_type(struct console_flush_type *ft)
{
memset(ft, 0, sizeof(*ft));
switch (nbcon_get_default_prio()) {
case NBCON_PRIO_NORMAL:
if (have_nbcon_console && !have_boot_console) { if (printk_kthreads_running)
ft->nbcon_offload = true;
else
ft->nbcon_atomic = true;
}
/* Legacy consoles are flushed directly when possible. */
if (have_legacy_console || have_boot_console) { if (!is_printk_legacy_deferred()) ft->legacy_direct = true;
else
ft->legacy_offload = true;
}
break;
case NBCON_PRIO_EMERGENCY:
if (have_nbcon_console && !have_boot_console)
ft->nbcon_atomic = true;
/* Legacy consoles are flushed directly when possible. */
if (have_legacy_console || have_boot_console) { if (!is_printk_legacy_deferred()) ft->legacy_direct = true;
else
ft->legacy_offload = true;
}
break;
case NBCON_PRIO_PANIC:
/*
* In panic, the nbcon consoles will directly print. But
* only allowed if there are no boot consoles.
*/
if (have_nbcon_console && !have_boot_console)
ft->nbcon_atomic = true;
if (have_legacy_console || have_boot_console) {
/*
* This is the same decision as NBCON_PRIO_NORMAL
* except that offloading never occurs in panic.
*
* Note that console_flush_on_panic() will flush
* legacy consoles anyway, even if unsafe.
*/
if (!is_printk_legacy_deferred()) ft->legacy_direct = true;
/*
* In panic, if nbcon atomic printing occurs,
* the legacy consoles must remain silent until
* explicitly allowed.
*/
if (ft->nbcon_atomic && !legacy_allow_panic_sync)
ft->legacy_direct = false;
}
break;
default:
WARN_ON_ONCE(1);
break;
}
}
extern struct printk_buffers printk_shared_pbufs;
/**
* struct printk_buffers - Buffers to read/format/output printk messages.
* @outbuf: After formatting, contains text to output.
* @scratchbuf: Used as temporary ringbuffer reading and string-print space.
*/
struct printk_buffers {
char outbuf[PRINTK_MESSAGE_MAX];
char scratchbuf[PRINTKRB_RECORD_MAX];
};
/**
* struct printk_message - Container for a prepared printk message.
* @pbufs: printk buffers used to prepare the message.
* @outbuf_len: The length of prepared text in @pbufs->outbuf to output. This
* does not count the terminator. A value of 0 means there is
* nothing to output and this record should be skipped.
* @seq: The sequence number of the record used for @pbufs->outbuf.
* @dropped: The number of dropped records from reading @seq.
*/
struct printk_message {
struct printk_buffers *pbufs;
unsigned int outbuf_len;
u64 seq;
unsigned long dropped;
};
bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
bool is_extended, bool may_supress);
#ifdef CONFIG_PRINTK
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
void console_prepend_replay(struct printk_message *pmsg);
#endif
#ifdef CONFIG_SMP
bool is_printk_cpu_sync_owner(void);
#else
static inline bool is_printk_cpu_sync_owner(void) { return false; }
#endif
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/atomic.h>
#include <linux/errseq.h>
#include <linux/log2.h>
/*
* An errseq_t is a way of recording errors in one place, and allowing any
* number of "subscribers" to tell whether it has changed since a previous
* point where it was sampled.
*
* It's implemented as an unsigned 32-bit value. The low order bits are
* designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits
* are used as a counter. This is done with atomics instead of locking so that
* these functions can be called from any context.
*
* The general idea is for consumers to sample an errseq_t value. That value
* can later be used to tell whether any new errors have occurred since that
* sampling was done.
*
* Note that there is a risk of collisions if new errors are being recorded
* frequently, since we have so few bits to use as a counter.
*
* To mitigate this, one bit is used as a flag to tell whether the value has
* been sampled since a new value was recorded. That allows us to avoid bumping
* the counter if no one has sampled it since the last time an error was
* recorded.
*
* A new errseq_t should always be zeroed out. A errseq_t value of all zeroes
* is the special (but common) case where there has never been an error. An all
* zero value thus serves as the "epoch" if one wishes to know whether there
* has ever been an error set since it was first initialized.
*/
/* The low bits are designated for error code (max of MAX_ERRNO) */
#define ERRSEQ_SHIFT (ilog2(MAX_ERRNO) + 1)
/* This bit is used as a flag to indicate whether the value has been seen */
#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT)
/* Leverage macro ERRSEQ_SEEN to define errno mask macro here */
#define ERRNO_MASK (ERRSEQ_SEEN - 1)
/* The lowest bit of the counter */
#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1))
/**
* errseq_set - set a errseq_t for later reporting
* @eseq: errseq_t field that should be set
* @err: error to set (must be between -1 and -MAX_ERRNO)
*
* This function sets the error in @eseq, and increments the sequence counter
* if the last sequence was sampled at some point in the past.
*
* Any error set will always overwrite an existing error.
*
* Return: The previous value, primarily for debugging purposes. The
* return value should not be used as a previously sampled value in later
* calls as it will not have the SEEN flag set.
*/
errseq_t errseq_set(errseq_t *eseq, int err)
{
errseq_t cur, old;
/*
* Ensure the error code actually fits where we want it to go. If it
* doesn't then just throw a warning and don't record anything. We
* also don't accept zero here as that would effectively clear a
* previous error.
*/
old = READ_ONCE(*eseq);
if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO),
"err = %d\n", err))
return old;
for (;;) {
errseq_t new;
/* Clear out error bits and set new error */
new = (old & ~(ERRNO_MASK | ERRSEQ_SEEN)) | -err;
/* Only increment if someone has looked at it */
if (old & ERRSEQ_SEEN)
new += ERRSEQ_CTR_INC;
/* If there would be no change, then call it done */
if (new == old) {
cur = new;
break;
}
/* Try to swap the new value into place */
cur = cmpxchg(eseq, old, new);
/*
* Call it success if we did the swap or someone else beat us
* to it for the same value.
*/
if (likely(cur == old || cur == new))
break;
/* Raced with an update, try again */
old = cur;
}
return cur;
}
EXPORT_SYMBOL(errseq_set);
/**
* errseq_sample() - Grab current errseq_t value.
* @eseq: Pointer to errseq_t to be sampled.
*
* This function allows callers to initialise their errseq_t variable.
* If the error has been "seen", new callers will not see an old error.
* If there is an unseen error in @eseq, the caller of this function will
* see it the next time it checks for an error.
*
* Context: Any context.
* Return: The current errseq value.
*/
errseq_t errseq_sample(errseq_t *eseq)
{
errseq_t old = READ_ONCE(*eseq);
/* If nobody has seen this error yet, then we can be the first. */
if (!(old & ERRSEQ_SEEN))
old = 0;
return old;
}
EXPORT_SYMBOL(errseq_sample);
/**
* errseq_check() - Has an error occurred since a particular sample point?
* @eseq: Pointer to errseq_t value to be checked.
* @since: Previously-sampled errseq_t from which to check.
*
* Grab the value that eseq points to, and see if it has changed @since
* the given value was sampled. The @since value is not advanced, so there
* is no need to mark the value as seen.
*
* Return: The latest error set in the errseq_t or 0 if it hasn't changed.
*/
int errseq_check(errseq_t *eseq, errseq_t since)
{
errseq_t cur = READ_ONCE(*eseq);
if (likely(cur == since))
return 0;
return -(cur & ERRNO_MASK);
}
EXPORT_SYMBOL(errseq_check);
/**
* errseq_check_and_advance() - Check an errseq_t and advance to current value.
* @eseq: Pointer to value being checked and reported.
* @since: Pointer to previously-sampled errseq_t to check against and advance.
*
* Grab the eseq value, and see whether it matches the value that @since
* points to. If it does, then just return 0.
*
* If it doesn't, then the value has changed. Set the "seen" flag, and try to
* swap it into place as the new eseq value. Then, set that value as the new
* "since" value, and return whatever the error portion is set to.
*
* Note that no locking is provided here for concurrent updates to the "since"
* value. The caller must provide that if necessary. Because of this, callers
* may want to do a lockless errseq_check before taking the lock and calling
* this.
*
* Return: Negative errno if one has been stored, or 0 if no new error has
* occurred.
*/
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
{
int err = 0;
errseq_t old, new;
/*
* Most callers will want to use the inline wrapper to check this,
* so that the common case of no error is handled without needing
* to take the lock that protects the "since" value.
*/
old = READ_ONCE(*eseq);
if (old != *since) {
/*
* Set the flag and try to swap it into place if it has
* changed.
*
* We don't care about the outcome of the swap here. If the
* swap doesn't occur, then it has either been updated by a
* writer who is altering the value in some way (updating
* counter or resetting the error), or another reader who is
* just setting the "seen" flag. Either outcome is OK, and we
* can advance "since" and return an error based on what we
* have.
*/
new = old | ERRSEQ_SEEN;
if (new != old)
cmpxchg(eseq, old, new);
*since = new;
err = -(new & ERRNO_MASK);
}
return err;
}
EXPORT_SYMBOL(errseq_check_and_advance);
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/main.c - Where the driver meets power management.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
* The driver model core calls device_pm_add() when a device is registered.
* This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
* controlling device power management will also be added.
*
* A separate list is used for keeping track of power info, because the power
* domain dependencies may differ from the ancestral dependencies that the
* subsystem list maintains.
*/
#define pr_fmt(fmt) "PM: " fmt
#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm-trace.h>
#include <linux/pm_wakeirq.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/async.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
* are inserted at the back of the list on discovery.
*
* Since device_pm_add() may be called with a device lock held,
* we must never try to acquire a device lock while holding
* dpm_list_mutex.
*/
LIST_HEAD(dpm_list);
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
/**
* pm_hibernate_is_recovering - if recovering from hibernate due to error.
*
* Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
* recovering from some error.
*
* Return: true for error case, false for normal case.
*/
bool pm_hibernate_is_recovering(void)
{
return pm_transition.event == PM_EVENT_RECOVER;
}
EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
static const char *pm_verb(int event)
{
switch (event) {
case PM_EVENT_SUSPEND:
return "suspend";
case PM_EVENT_RESUME:
return "resume";
case PM_EVENT_FREEZE:
return "freeze";
case PM_EVENT_QUIESCE:
return "quiesce";
case PM_EVENT_HIBERNATE:
return "hibernate";
case PM_EVENT_THAW:
return "thaw";
case PM_EVENT_RESTORE:
return "restore";
case PM_EVENT_RECOVER:
return "recover";
default:
return "(unknown PM event)";
}
}
/**
* device_pm_sleep_init - Initialize system suspend-related device fields.
* @dev: Device object being initialized.
*/
void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
dev->power.is_noirq_suspended = false;
dev->power.is_late_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
INIT_LIST_HEAD(&dev->power.entry);
}
/**
* device_pm_lock - Lock the list of active devices used by the PM core.
*/
void device_pm_lock(void)
{
mutex_lock(&dpm_list_mtx);
}
/**
* device_pm_unlock - Unlock the list of active devices used by the PM core.
*/
void device_pm_unlock(void)
{
mutex_unlock(&dpm_list_mtx);
}
/**
* device_pm_add - Add a device to the PM core's list of active devices.
* @dev: Device to add to the list.
*/
void device_pm_add(struct device *dev)
{
/* Skip PM setup/initialization. */
if (device_pm_not_required(dev))
return;
pr_debug("Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
if (dev->parent && dev->parent->power.is_prepared) dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);}
/**
* device_pm_remove - Remove a device from the PM core's list of active devices.
* @dev: Device to be removed from the list.
*/
void device_pm_remove(struct device *dev)
{
if (device_pm_not_required(dev))
return;
pr_debug("Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
device_pm_check_callbacks(dev);
}
/**
* device_pm_move_before - Move device in the PM core's list of active devices.
* @deva: Device to move in dpm_list.
* @devb: Device @deva should come before.
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
pr_debug("Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
list_move_tail(&deva->power.entry, &devb->power.entry);
}
/**
* device_pm_move_after - Move device in the PM core's list of active devices.
* @deva: Device to move in dpm_list.
* @devb: Device @deva should come after.
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
pr_debug("Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
list_move(&deva->power.entry, &devb->power.entry);
}
/**
* device_pm_move_last - Move device to end of the PM core's list of devices.
* @dev: Device to move in dpm_list.
*/
void device_pm_move_last(struct device *dev)
{
pr_debug("Moving %s:%s to end of list\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
static ktime_t initcall_debug_start(struct device *dev, void *cb)
{
if (!pm_print_times_enabled)
return 0;
dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
}
static void initcall_debug_report(struct device *dev, ktime_t calltime,
void *cb, int error)
{
ktime_t rettime;
if (!pm_print_times_enabled)
return;
rettime = ktime_get();
dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
(unsigned long long)ktime_us_delta(rettime, calltime));
}
/**
* dpm_wait - Wait for a PM operation to complete.
* @dev: Device to wait for.
* @async: If unset, wait only if the device's power.async_suspend flag is set.
*/
static void dpm_wait(struct device *dev, bool async)
{
if (!dev)
return;
if (async || (pm_async_enabled && dev->power.async_suspend))
wait_for_completion(&dev->power.completion);
}
static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
dpm_wait(dev, *((bool *)async_ptr));
return 0;
}
static void dpm_wait_for_children(struct device *dev, bool async)
{
device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* If the supplier goes away right after we've checked the link to it,
* we'll wait for its completion to change the state, but that's fine,
* because the only things that will block as a result are the SRCU
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
!device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
}
static bool dpm_wait_for_superior(struct device *dev, bool async)
{
struct device *parent;
/*
* If the device is resumed asynchronously and the parent's callback
* deletes both the device and the parent itself, the parent object may
* be freed while this function is running, so avoid that by reference
* counting the parent once more unless the device has been deleted
* already (in which case return right away).
*/
mutex_lock(&dpm_list_mtx);
if (!device_pm_initialized(dev)) {
mutex_unlock(&dpm_list_mtx);
return false;
}
parent = get_device(dev->parent);
mutex_unlock(&dpm_list_mtx);
dpm_wait(parent, async);
put_device(parent);
dpm_wait_for_suppliers(dev, async);
/*
* If the parent's callback has deleted the device, attempting to resume
* it would be invalid, so avoid doing that then.
*/
return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* The status of a device link can only be changed from "dormant" by a
* probe, but that cannot happen during system suspend/resume. In
* theory it can change to "dormant" at that time, but then it is
* reasonable to wait for the target device anyway (eg. if it goes
* away, it's better to wait for it to go away completely and then
* continue instead of trying to continue in parallel with its
* unregistration).
*/
dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
!device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
}
static void dpm_wait_for_subordinate(struct device *dev, bool async)
{
dpm_wait_for_children(dev, async);
dpm_wait_for_consumers(dev, async);
}
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
{
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
return ops->suspend;
case PM_EVENT_RESUME:
return ops->resume;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw;
case PM_EVENT_RESTORE:
return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
}
return NULL;
}
/**
* pm_late_early_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
pm_message_t state)
{
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
return ops->suspend_late;
case PM_EVENT_RESUME:
return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw_early;
case PM_EVENT_RESTORE:
return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
}
return NULL;
}
/**
* pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
{
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
return ops->suspend_noirq;
case PM_EVENT_RESUME:
return ops->resume_noirq;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw_noirq;
case PM_EVENT_RESTORE:
return ops->restore_noirq;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
}
return NULL;
}
static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
{
dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
", may wakeup" : "", dev->power.driver_flags);
}
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
const char *info)
{
ktime_t calltime;
u64 usecs64;
int usecs;
calltime = ktime_get();
usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
do_div(usecs64, NSEC_PER_USEC);
usecs = usecs64;
if (usecs == 0)
usecs = 1;
pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
info ?: "", info ? " " : "", pm_verb(state.event),
error ? "aborted" : "complete",
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
pm_message_t state, const char *info)
{
ktime_t calltime;
int error;
if (!cb)
return 0;
calltime = initcall_debug_start(dev, cb);
pm_dev_dbg(dev, state, info);
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
return error;
}
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
* @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
* capture a crash-dump in pstore.
*/
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
struct timer_list *timer = &wd->timer;
unsigned int time_left;
if (wd->fatal) {
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL, KERN_EMERG);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}
time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
show_stack(wd->tsk, NULL, KERN_WARNING);
wd->fatal = true;
mod_timer(timer, jiffies + HZ * time_left);
}
/**
* dpm_watchdog_set - Enable pm watchdog for given device.
* @wd: Watchdog. Must be allocated on the stack.
* @dev: Device to handle.
*/
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
struct timer_list *timer = &wd->timer;
wd->dev = dev;
wd->tsk = current;
wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
/**
* dpm_watchdog_clear - Disable suspend/resume watchdog.
* @wd: Watchdog to disable.
*/
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
struct timer_list *timer = &wd->timer;
timer_delete_sync(timer);
timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif
/*------------------------- Resume routines -------------------------*/
/**
* dev_pm_skip_resume - System-wide device resume optimization check.
* @dev: Target device.
*
* Return:
* - %false if the transition under way is RESTORE.
* - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
* - The logical negation of %power.must_resume otherwise (that is, when the
* transition under way is RESUME).
*/
bool dev_pm_skip_resume(struct device *dev)
{
if (pm_transition.event == PM_EVENT_RESTORE)
return false;
if (pm_transition.event == PM_EVENT_THAW)
return dev_pm_skip_suspend(dev);
return !dev->power.must_resume;
}
static bool is_async(struct device *dev)
{
return dev->power.async_suspend && pm_async_enabled
&& !pm_trace_is_enabled();
}
static bool __dpm_async(struct device *dev, async_func_t func)
{
if (dev->power.work_in_progress)
return true;
if (!is_async(dev))
return false;
dev->power.work_in_progress = true;
get_device(dev);
if (async_schedule_dev_nocall(func, dev))
return true;
put_device(dev);
return false;
}
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
guard(mutex)(&async_wip_mtx);
return __dpm_async(dev, func);
}
static int dpm_async_with_cleanup(struct device *dev, void *fn)
{
guard(mutex)(&async_wip_mtx);
if (!__dpm_async(dev, fn))
dev->power.work_in_progress = false;
return 0;
}
static void dpm_async_resume_children(struct device *dev, async_func_t func)
{
/*
* Prevent racing with dpm_clear_async_state() during initial list
* walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
* dpm_resume().
*/
guard(mutex)(&dpm_list_mtx);
/*
* Start processing "async" children of the device unless it's been
* started already for them.
*/
device_for_each_child(dev, func, dpm_async_with_cleanup);
}
static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
{
struct device_link *link;
int idx;
dpm_async_resume_children(dev, func);
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
device_links_read_unlock(idx);
}
static void dpm_clear_async_state(struct device *dev)
{
reinit_completion(&dev->power.completion);
dev->power.work_in_progress = false;
}
static bool dpm_root_device(struct device *dev)
{
lockdep_assert_held(&dpm_list_mtx);
/*
* Since this function is required to run under dpm_list_mtx, the
* list_empty() below will only return true if the device's list of
* consumers is actually empty before calling it.
*/
return !dev->parent && list_empty(&dev->links.suppliers);
}
static void async_resume_noirq(void *data, async_cookie_t cookie);
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
bool skip_resume;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
if (!dev->power.is_noirq_suspended) {
/*
* This means that system suspend has been aborted in the noirq
* phase before invoking the noirq suspend callback for the
* device, so if device_suspend_late() has left it in suspend,
* device_resume_early() should leave it in suspend either in
* case the early resume of it depends on the noirq resume that
* has not run.
*/
if (dev_pm_skip_suspend(dev))
dev->power.must_resume = false;
goto Out;
}
if (!dpm_wait_for_superior(dev, async))
goto Out;
skip_resume = dev_pm_skip_resume(dev);
/*
* If the driver callback is skipped below or by the middle layer
* callback and device_resume_early() also skips the driver callback for
* this device later, it needs to appear as "suspended" to PM-runtime,
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
* status to "active" unless its power.smart_suspend flag is clear, in
* which case it is not necessary to update its PM-runtime status.
*/
if (skip_resume)
pm_runtime_set_suspended(dev);
else if (dev_pm_smart_suspend(dev))
pm_runtime_set_active(dev);
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (skip_resume)
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
Skip:
dev->power.is_noirq_suspended = false;
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
if (error) {
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
dpm_async_resume_subordinate(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
async_error = 0;
pm_transition = state;
mutex_lock(&dpm_list_mtx);
/*
* Start processing "async" root devices upfront so they don't wait for
* the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
dpm_clear_async_state(dev);
if (dpm_root_device(dev))
dpm_async_with_cleanup(dev, async_resume_noirq);
}
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
if (!dpm_async_fn(dev, async_resume_noirq)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
device_resume_noirq(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
* allow device drivers' interrupt handlers to be called.
*/
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
}
static void async_resume_early(void *data, async_cookie_t cookie);
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static void device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
if (dev->power.syscore)
goto Skip;
if (!dpm_wait_for_superior(dev, async))
goto Out;
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "early type ";
callback = pm_late_early_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "early class ";
callback = pm_late_early_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "early bus ";
callback = pm_late_early_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (dev_pm_skip_resume(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "early driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
Skip:
dev->power.is_late_suspended = false;
pm_runtime_enable(dev);
Out:
TRACE_RESUME(error);
complete_all(&dev->power.completion);
if (error) {
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
dpm_async_resume_subordinate(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = data;
device_resume_early(dev, pm_transition, true);
put_device(dev);
}
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
void dpm_resume_early(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
async_error = 0;
pm_transition = state;
mutex_lock(&dpm_list_mtx);
/*
* Start processing "async" root devices upfront so they don't wait for
* the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
dpm_clear_async_state(dev);
if (dpm_root_device(dev))
dpm_async_with_cleanup(dev, async_resume_early);
}
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
if (!dpm_async_fn(dev, async_resume_early)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
device_resume_early(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
/**
* dpm_resume_start - Execute "noirq" and "early" device callbacks.
* @state: PM transition of the system being carried out.
*/
void dpm_resume_start(pm_message_t state)
{
dpm_resume_noirq(state);
dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
static void async_resume(void *data, async_cookie_t cookie);
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
static void device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->power.syscore)
goto Complete;
if (!dev->power.is_suspended)
goto Complete;
dev->power.is_suspended = false;
if (dev->power.direct_complete) {
/*
* Allow new children to be added under the device after this
* point if it has no PM callbacks.
*/
if (dev->power.no_pm_callbacks)
dev->power.is_prepared = false;
/* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
if (!dpm_wait_for_superior(dev, async))
goto Complete;
dpm_watchdog_set(&wd, dev);
device_lock(dev);
/*
* This is a fib. But we'll allow new children to be added below
* a resumed device, even if the device hasn't been completed yet.
*/
dev->power.is_prepared = false;
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
goto Driver;
}
if (dev->type && dev->type->pm) {
info = "type ";
callback = pm_op(dev->type->pm, state);
goto Driver;
}
if (dev->class && dev->class->pm) {
info = "class ";
callback = pm_op(dev->class->pm, state);
goto Driver;
}
if (dev->bus) {
if (dev->bus->pm) {
info = "bus ";
callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->resume) {
info = "legacy bus ";
callback = dev->bus->resume;
goto End;
}
}
Driver:
if (!callback && dev->driver && dev->driver->pm) {
info = "driver ";
callback = pm_op(dev->driver->pm, state);
}
End:
error = dpm_run_callback(callback, dev, state, info);
device_unlock(dev);
dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
if (error) {
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
dpm_async_resume_subordinate(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
{
struct device *dev = data;
device_resume(dev, pm_transition, true);
put_device(dev);
}
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Execute the appropriate "resume" callback for all devices whose status
* indicates that they are suspended.
*/
void dpm_resume(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
pm_transition = state;
async_error = 0;
mutex_lock(&dpm_list_mtx);
/*
* Start processing "async" root devices upfront so they don't wait for
* the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
dpm_clear_async_state(dev);
if (dpm_root_device(dev))
dpm_async_with_cleanup(dev, async_resume);
}
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
if (!dpm_async_fn(dev, async_resume)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
device_resume(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
/**
* device_complete - Complete a PM transition for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*/
static void device_complete(struct device *dev, pm_message_t state)
{
void (*callback)(struct device *) = NULL;
const char *info = NULL;
if (dev->power.syscore)
goto out;
device_lock(dev);
if (dev->pm_domain) {
info = "completing power domain ";
callback = dev->pm_domain->ops.complete;
} else if (dev->type && dev->type->pm) {
info = "completing type ";
callback = dev->type->pm->complete;
} else if (dev->class && dev->class->pm) {
info = "completing class ";
callback = dev->class->pm->complete;
} else if (dev->bus && dev->bus->pm) {
info = "completing bus ";
callback = dev->bus->pm->complete;
}
if (!callback && dev->driver && dev->driver->pm) {
info = "completing driver ";
callback = dev->driver->pm->complete;
}
if (callback) {
pm_dev_dbg(dev, state, info);
callback(dev);
}
device_unlock(dev);
out:
/* If enabling runtime PM for the device is blocked, unblock it. */
pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
/**
* dpm_complete - Complete a PM transition for all non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Execute the ->complete() callbacks for all devices whose PM status is not
* DPM_ON (this allows new devices to be registered).
*/
void dpm_complete(pm_message_t state)
{
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_prepared_list)) {
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
device_complete(dev, state);
trace_device_pm_callback_end(dev, 0);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
/* Allow device probing and trigger re-probing of deferred devices */
device_unblock_probing();
trace_suspend_resume(TPS("dpm_complete"), state.event, false);
}
/**
* dpm_resume_end - Execute "resume" callbacks and complete system transition.
* @state: PM transition of the system being carried out.
*
* Execute "resume" callbacks for all devices and complete the PM transition of
* the system.
*/
void dpm_resume_end(pm_message_t state)
{
dpm_resume(state);
pm_restore_gfp_mask();
dpm_complete(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
static bool dpm_leaf_device(struct device *dev)
{
struct device *child;
lockdep_assert_held(&dpm_list_mtx);
child = device_find_any_child(dev);
if (child) {
put_device(child);
return false;
}
/*
* Since this function is required to run under dpm_list_mtx, the
* list_empty() below will only return true if the device's list of
* consumers is actually empty before calling it.
*/
return list_empty(&dev->links.consumers);
}
static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
{
guard(mutex)(&dpm_list_mtx);
/*
* If the device is suspended asynchronously and the parent's callback
* deletes both the device and the parent itself, the parent object may
* be freed while this function is running, so avoid that by checking
* if the device has been deleted already as the parent cannot be
* deleted before it.
*/
if (!device_pm_initialized(dev))
return false;
/* Start processing the device's parent if it is "async". */
if (dev->parent)
dpm_async_with_cleanup(dev->parent, func);
return true;
}
static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
{
struct device_link *link;
int idx;
if (!dpm_async_suspend_parent(dev, func))
return;
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
device_links_read_unlock(idx);
}
static void dpm_async_suspend_complete_all(struct list_head *device_list)
{
struct device *dev;
guard(mutex)(&async_wip_mtx);
list_for_each_entry_reverse(dev, device_list, power.entry) {
/*
* In case the device is being waited for and async processing
* has not started for it yet, let the waiters make progress.
*/
if (!dev->power.work_in_progress)
complete_all(&dev->power.completion);
}
}
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
*
* Return a PM message representing the resume event corresponding to given
* sleep state.
*/
static pm_message_t resume_event(pm_message_t sleep_state)
{
switch (sleep_state.event) {
case PM_EVENT_SUSPEND:
return PMSG_RESUME;
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return PMSG_RECOVER;
case PM_EVENT_HIBERNATE:
return PMSG_RESTORE;
}
return PMSG_ON;
}
static void dpm_superior_set_must_resume(struct device *dev)
{
struct device_link *link;
int idx;
if (dev->parent)
dev->parent->power.must_resume = true;
idx = device_links_read_lock();
dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
}
static void async_suspend_noirq(void *data, async_cookie_t cookie);
/**
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
dpm_wait_for_subordinate(dev, async);
if (READ_ONCE(async_error))
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
Skip:
dev->power.is_noirq_suspended = true;
/*
* Devices must be resumed unless they are explicitly allowed to be left
* in suspend, but even in that case skipping the resume of devices that
* were in use right before the system suspend (as indicated by their
* runtime PM usage counters and child counters) would be suboptimal.
*/
if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
if (error || READ_ONCE(async_error))
return;
dpm_async_suspend_superior(dev, async_suspend_noirq);
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
device_suspend_noirq(dev, pm_transition, true);
put_device(dev);
}
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
int error;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
pm_transition = state;
async_error = 0;
mutex_lock(&dpm_list_mtx);
/*
* Start processing "async" leaf devices upfront so they don't need to
* wait for the "sync" devices they don't depend on.
*/
list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
dpm_clear_async_state(dev);
if (dpm_leaf_device(dev))
dpm_async_with_cleanup(dev, async_suspend_noirq);
}
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
get_device(dev);
mutex_unlock(&dpm_list_mtx);
device_suspend_noirq(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
if (READ_ONCE(async_error)) {
dpm_async_suspend_complete_all(&dpm_late_early_list);
/*
* Move all devices to the target list to resume them
* properly.
*/
list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
break;
}
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
/**
* dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers' interrupt handlers from being called and invoke
* "noirq" suspend callbacks for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
{
int ret;
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
return ret;
}
static void dpm_propagate_wakeup_to_parent(struct device *dev)
{
struct device *parent = dev->parent;
if (!parent)
return;
spin_lock_irq(&parent->power.lock);
if (device_wakeup_path(dev) && !parent->power.ignore_children)
parent->power.wakeup_path = true;
spin_unlock_irq(&parent->power.lock);
}
static void async_suspend_late(void *data, async_cookie_t cookie);
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
dpm_wait_for_subordinate(dev, async);
if (READ_ONCE(async_error))
goto Complete;
if (pm_wakeup_pending()) {
WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
if (dev->power.direct_complete)
goto Complete;
/*
* Disable runtime PM for the device without checking if there is a
* pending resume request for it.
*/
__pm_runtime_disable(dev, false);
if (dev->power.syscore)
goto Skip;
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "late type ";
callback = pm_late_early_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "late class ";
callback = pm_late_early_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "late bus ";
callback = pm_late_early_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
pm_runtime_enable(dev);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
Skip:
dev->power.is_late_suspended = true;
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
if (error || READ_ONCE(async_error))
return;
dpm_async_suspend_superior(dev, async_suspend_late);
}
static void async_suspend_late(void *data, async_cookie_t cookie)
{
struct device *dev = data;
device_suspend_late(dev, pm_transition, true);
put_device(dev);
}
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
int error;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
pm_transition = state;
async_error = 0;
wake_up_all_idle_cpus();
mutex_lock(&dpm_list_mtx);
/*
* Start processing "async" leaf devices upfront so they don't need to
* wait for the "sync" devices they don't depend on.
*/
list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
dpm_clear_async_state(dev);
if (dpm_leaf_device(dev))
dpm_async_with_cleanup(dev, async_suspend_late);
}
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
if (dpm_async_fn(dev, async_suspend_late))
continue;
get_device(dev);
mutex_unlock(&dpm_list_mtx);
device_suspend_late(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
if (READ_ONCE(async_error)) {
dpm_async_suspend_complete_all(&dpm_suspended_list);
/*
* Move all devices to the target list to resume them
* properly.
*/
list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
break;
}
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
error = READ_ONCE(async_error);
if (error) {
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
dpm_show_time(starttime, state, error, "late");
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
/**
* dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
* @state: PM transition of the system being carried out.
*/
int dpm_suspend_end(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error;
error = dpm_suspend_late(state);
if (error)
goto out;
error = dpm_suspend_noirq(state);
if (error)
dpm_resume_early(resume_event(state));
out:
dpm_show_time(starttime, state, error, "end");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
/**
* legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
* @dev: Device to suspend.
* @state: PM transition of the system being carried out.
* @cb: Suspend callback to execute.
* @info: string description of caller.
*/
static int legacy_suspend(struct device *dev, pm_message_t state,
int (*cb)(struct device *dev, pm_message_t state),
const char *info)
{
int error;
ktime_t calltime;
calltime = initcall_debug_start(dev, cb);
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
return error;
}
static void dpm_clear_superiors_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
if (dev->parent) {
spin_lock_irq(&dev->parent->power.lock);
dev->parent->power.direct_complete = false;
spin_unlock_irq(&dev->parent->power.lock);
}
idx = device_links_read_lock();
dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
}
device_links_read_unlock(idx);
}
static void async_suspend(void *data, async_cookie_t cookie);
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
static void device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
dpm_wait_for_subordinate(dev, async);
if (READ_ONCE(async_error)) {
dev->power.direct_complete = false;
goto Complete;
}
/*
* Wait for possible runtime PM transitions of the device in progress
* to complete and if there's a runtime resume request pending for it,
* resume it before proceeding with invoking the system-wide suspend
* callbacks for it.
*
* If the system-wide suspend callbacks below change the configuration
* of the device, they must disable runtime PM for it or otherwise
* ensure that its runtime-resume callbacks will not be confused by that
* change in case they are invoked going forward.
*/
pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
if (dev->power.syscore)
goto Complete;
/* Avoid direct_complete to let wakeup_path propagate. */
if (device_may_wakeup(dev) || device_wakeup_path(dev))
dev->power.direct_complete = false;
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
dev->power.is_suspended = true;
goto Complete;
}
pm_runtime_enable(dev);
}
dev->power.direct_complete = false;
}
dev->power.may_skip_resume = true;
dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
goto Run;
}
if (dev->type && dev->type->pm) {
info = "type ";
callback = pm_op(dev->type->pm, state);
goto Run;
}
if (dev->class && dev->class->pm) {
info = "class ";
callback = pm_op(dev->class->pm, state);
goto Run;
}
if (dev->bus) {
if (dev->bus->pm) {
info = "bus ";
callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy bus ");
error = legacy_suspend(dev, state, dev->bus->suspend,
"legacy bus ");
goto End;
}
}
Run:
if (!callback && dev->driver && dev->driver->pm) {
info = "driver ";
callback = pm_op(dev->driver->pm, state);
}
error = dpm_run_callback(callback, dev, state, info);
End:
if (!error) {
dev->power.is_suspended = true;
if (device_may_wakeup(dev))
dev->power.wakeup_path = true;
dpm_propagate_wakeup_to_parent(dev);
dpm_clear_superiors_direct_complete(dev);
}
device_unlock(dev);
dpm_watchdog_clear(&wd);
Complete:
if (error) {
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
if (error || READ_ONCE(async_error))
return;
dpm_async_suspend_superior(dev, async_suspend);
}
static void async_suspend(void *data, async_cookie_t cookie)
{
struct device *dev = data;
device_suspend(dev, pm_transition, true);
put_device(dev);
}
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
*/
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
int error;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
devfreq_suspend();
cpufreq_suspend();
pm_transition = state;
async_error = 0;
mutex_lock(&dpm_list_mtx);
/*
* Start processing "async" leaf devices upfront so they don't need to
* wait for the "sync" devices they don't depend on.
*/
list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
dpm_clear_async_state(dev);
if (dpm_leaf_device(dev))
dpm_async_with_cleanup(dev, async_suspend);
}
while (!list_empty(&dpm_prepared_list)) {
dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
if (dpm_async_fn(dev, async_suspend))
continue;
get_device(dev);
mutex_unlock(&dpm_list_mtx);
device_suspend(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
if (READ_ONCE(async_error)) {
dpm_async_suspend_complete_all(&dpm_prepared_list);
/*
* Move all devices to the target list to resume them
* properly.
*/
list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
break;
}
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
static bool device_prepare_smart_suspend(struct device *dev)
{
struct device_link *link;
bool ret = true;
int idx;
/*
* The "smart suspend" feature is enabled for devices whose drivers ask
* for it and for devices without PM callbacks.
*
* However, if "smart suspend" is not enabled for the device's parent
* or any of its suppliers that take runtime PM into account, it cannot
* be enabled for the device either.
*/
if (!dev->power.no_pm_callbacks &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
return false;
if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
!dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
return false;
idx = device_links_read_lock();
dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
if (!dev_pm_smart_suspend(link->supplier) &&
!pm_runtime_blocked(link->supplier)) {
ret = false;
break;
}
}
device_links_read_unlock(idx);
return ret;
}
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*
* Execute the ->prepare() callback(s) for given device. No new children of the
* device may be registered after this function has returned.
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
bool smart_suspend;
int ret = 0;
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
* block runtime suspend here, during the prepare phase, and allow
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
/*
* If runtime PM is disabled for the device at this point and it has
* never been enabled so far, it should not be enabled until this system
* suspend-resume cycle is complete, so prepare to trigger a warning on
* subsequent attempts to enable it.
*/
smart_suspend = !pm_runtime_block_if_disabled(dev);
if (dev->power.syscore)
return 0;
device_lock(dev);
dev->power.wakeup_path = false;
if (dev->power.no_pm_callbacks)
goto unlock;
if (dev->pm_domain)
callback = dev->pm_domain->ops.prepare;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->prepare;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->prepare;
else if (dev->bus && dev->bus->pm)
callback = dev->bus->pm->prepare;
if (!callback && dev->driver && dev->driver->pm)
callback = dev->driver->pm->prepare;
if (callback)
ret = callback(dev);
unlock:
device_unlock(dev);
if (ret < 0) {
suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
/* Do not enable "smart suspend" for devices with disabled runtime PM. */
if (smart_suspend)
smart_suspend = device_prepare_smart_suspend(dev);
spin_lock_irq(&dev->power.lock);
dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
* runtime-suspended, you can leave it in that state provided that you
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
spin_unlock_irq(&dev->power.lock);
return 0;
}
/**
* dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
* @state: PM transition of the system being carried out.
*
* Execute the ->prepare() callback(s) for all devices.
*/
int dpm_prepare(pm_message_t state)
{
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
/*
* Give a chance for the known devices to complete their probes, before
* disable probing of devices. This sync point is important at least
* at boot time + hibernation restore.
*/
wait_for_device_probe();
/*
* It is unsafe if probing of devices will happen during suspend or
* hibernation and system behavior will be unpredictable in this case.
* So, let's prohibit device's probing here and defer their probes
* instead. The normal behavior will be restored in dpm_complete().
*/
device_block_probing();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
error = device_prepare(dev, state);
trace_device_pm_callback_end(dev, error);
mutex_lock(&dpm_list_mtx);
if (!error) {
dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
} else if (error == -EAGAIN) {
error = 0;
} else {
dev_info(dev, "not prepared for power transition: code %d\n",
error);
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
return error;
}
/**
* dpm_suspend_start - Prepare devices for PM transition and suspend them.
* @state: PM transition of the system being carried out.
*
* Prepare all non-sysdev devices for system PM transition and execute "suspend"
* callbacks for them.
*/
int dpm_suspend_start(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error;
error = dpm_prepare(state);
if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
else {
pm_restrict_gfp_mask();
error = dpm_suspend(state);
}
dpm_show_time(starttime, state, error, "start");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
* @subordinate: Device that needs to wait for @dev.
* @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
dpm_wait(dev, subordinate->power.async_suspend);
return async_error;
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
/**
* dpm_for_each_dev - device iterator.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over devices in dpm_list, and call @fn for each device,
* passing it @data.
*/
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
struct device *dev;
if (!fn)
return;
device_pm_lock();
list_for_each_entry(dev, &dpm_list, power.entry)
fn(dev, data);
device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);
static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
{
if (!ops) return true; return !ops->prepare && !ops->suspend && !ops->suspend_late && !ops->suspend_noirq && !ops->resume_noirq && !ops->resume_early && !ops->resume &&
!ops->complete;
}
void device_pm_check_callbacks(struct device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags); dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && (!dev->class || pm_ops_is_empty(dev->class->pm)) && (!dev->type || pm_ops_is_empty(dev->type->pm)) && (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && !dev->driver->suspend && !dev->driver->resume));
spin_unlock_irqrestore(&dev->power.lock, flags);
}
bool dev_pm_skip_suspend(struct device *dev)
{
return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* This implements the various checks for CONFIG_HARDENED_USERCOPY*,
* which are designed to protect kernel memory from needless exposure
* and overwrite under many unintended conditions. This code is based
* on PAX_USERCOPY, which is:
*
* Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
* Security Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/ucopysize.h>
#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include <asm/sections.h>
#include "slab.h"
/*
* Checks if a given pointer and length is contained by the current
* stack frame (if possible).
*
* Returns:
* NOT_STACK: not at all on the stack
* GOOD_FRAME: fully within a valid stack frame
* GOOD_STACK: within the current stack (when can't frame-check exactly)
* BAD_STACK: error condition (invalid stack position or bad stack frame)
*/
static noinline int check_stack_object(const void *obj, unsigned long len)
{
const void * const stack = task_stack_page(current); const void * const stackend = stack + THREAD_SIZE;
int ret;
/* Object is not on the stack at all. */
if (obj + len <= stack || stackend <= obj)
return NOT_STACK;
/*
* Reject: object partially overlaps the stack (passing the
* check above means at least one end is within the stack,
* so if this check fails, the other end is outside the stack).
*/
if (obj < stack || stackend < obj + len) return BAD_STACK;
/* Check if object is safely within a valid frame. */
ret = arch_within_stack_frames(stack, stackend, obj, len); if (ret)
return ret;
/* Finally, check stack depth if possible. */
#ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
if (IS_ENABLED(CONFIG_STACK_GROWSUP)) {
if ((void *)current_stack_pointer < obj + len)
return BAD_STACK;
} else {
if (obj < (void *)current_stack_pointer)
return BAD_STACK;
}
#endif
return GOOD_STACK;
}
/*
* If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
* an unexpected state during a copy_from_user() or copy_to_user() call.
* There are several checks being performed on the buffer by the
* __check_object_size() function. Normal stack buffer usage should never
* trip the checks, and kernel text addressing will always trip the check.
* For cache objects, it is checking that only the whitelisted range of
* bytes for a given cache is being accessed (via the cache's usersize and
* useroffset fields). To adjust a cache whitelist, use the usercopy-aware
* kmem_cache_create_usercopy() function to create the cache (and
* carefully audit the whitelist range).
*/
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
unsigned long len)
{
pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
to_user ? "exposure" : "overwrite",
to_user ? "from" : "to",
name ? : "unknown?!",
detail ? " '" : "", detail ? : "", detail ? "'" : "",
offset, len);
/*
* For greater effect, it would be nice to do do_group_exit(),
* but BUG() actually hooks all the lock-breaking and per-arch
* Oops code, so that is used here instead.
*/
BUG();
}
/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
static bool overlaps(const unsigned long ptr, unsigned long n,
unsigned long low, unsigned long high)
{
const unsigned long check_low = ptr;
unsigned long check_high = check_low + n;
/* Does not overlap if entirely above or entirely below. */
if (check_low >= high || check_high <= low)
return false;
return true;
}
/* Is this address range in the kernel text area? */
static inline void check_kernel_text_object(const unsigned long ptr,
unsigned long n, bool to_user)
{
unsigned long textlow = (unsigned long)_stext;
unsigned long texthigh = (unsigned long)_etext;
unsigned long textlow_linear, texthigh_linear;
if (overlaps(ptr, n, textlow, texthigh)) usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
/*
* Some architectures have virtual memory mappings with a secondary
* mapping of the kernel text, i.e. there is more than one virtual
* kernel address that points to the kernel image. It is usually
* when there is a separate linear physical memory mapping, in that
* __pa() is not just the reverse of __va(). This can be detected
* and checked:
*/
textlow_linear = (unsigned long)lm_alias(textlow);
/* No different mapping: we're done. */
if (textlow_linear == textlow)
return;
/* Check the secondary mapping... */
texthigh_linear = (unsigned long)lm_alias(texthigh); if (overlaps(ptr, n, textlow_linear, texthigh_linear)) usercopy_abort("linear kernel text", NULL, to_user,
ptr - textlow_linear, n);
}
static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
bool to_user)
{
/* Reject if object wraps past end of memory. */
if (ptr + (n - 1) < ptr)
usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
/* Reject if NULL or ZERO-allocation. */
if (ZERO_OR_NULL_PTR(ptr)) usercopy_abort("null address", NULL, to_user, ptr, n);
}
static inline void check_heap_object(const void *ptr, unsigned long n,
bool to_user)
{
unsigned long addr = (unsigned long)ptr;
unsigned long offset;
struct folio *folio;
if (is_kmap_addr(ptr)) {
offset = offset_in_page(ptr);
if (n > PAGE_SIZE - offset)
usercopy_abort("kmap", NULL, to_user, offset, n);
return;
}
if (is_vmalloc_addr(ptr) && !pagefault_disabled()) {
struct vmap_area *area = find_vmap_area(addr);
if (!area)
usercopy_abort("vmalloc", "no area", to_user, 0, n);
if (n > area->va_end - addr) {
offset = addr - area->va_start;
usercopy_abort("vmalloc", NULL, to_user, offset, n);
}
return;
}
if (!virt_addr_valid(ptr))
return;
folio = virt_to_folio(ptr); if (folio_test_slab(folio)) {
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user); } else if (folio_test_large(folio)) {
offset = ptr - folio_address(folio);
if (n > folio_size(folio) - offset) usercopy_abort("page alloc", NULL, to_user, offset, n);
}
}
DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_HARDENED_USERCOPY_DEFAULT_ON,
validate_usercopy_range);
EXPORT_SYMBOL(validate_usercopy_range);
/*
* Validates that the given object is:
* - not bogus address
* - fully contained by stack (or stack frame, when available)
* - fully within SLAB object (or object whitelist area, when available)
* - not in kernel text
*/
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
{
/* Skip all tests if size is zero. */
if (!n)
return;
/* Check for invalid addresses. */
check_bogus_address((const unsigned long)ptr, n, to_user);
/* Check for bad stack object. */
switch (check_stack_object(ptr, n)) {
case NOT_STACK:
/* Object is not touching the current process stack. */
break;
case GOOD_FRAME:
case GOOD_STACK:
/*
* Object is either in the correct frame (when it
* is possible to check) or just generally on the
* process stack (when frame checking not available).
*/
return;
default:
usercopy_abort("process stack", NULL, to_user,
#ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
IS_ENABLED(CONFIG_STACK_GROWSUP) ?
ptr - (void *)current_stack_pointer :
(void *)current_stack_pointer - ptr,
#else
0,
#endif
n);
}
/* Check for bad heap object. */
check_heap_object(ptr, n, to_user);
/* Check for object in kernel to avoid text exposure. */
check_kernel_text_object((const unsigned long)ptr, n, to_user);}
EXPORT_SYMBOL(__check_object_size);
static bool enable_checks __initdata =
IS_ENABLED(CONFIG_HARDENED_USERCOPY_DEFAULT_ON);
static int __init parse_hardened_usercopy(char *str)
{
if (kstrtobool(str, &enable_checks))
pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
str);
return 1;
}
__setup("hardened_usercopy=", parse_hardened_usercopy);
static int __init set_hardened_usercopy(void)
{
if (enable_checks)
static_branch_enable(&validate_usercopy_range);
else
static_branch_disable(&validate_usercopy_range);
return 1;
}
late_initcall(set_hardened_usercopy);
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1994 Linus Torvalds
*
* Pentium III FXSR, SSE support
* General FPU state handling cleanups
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
#include <asm/fpu/api.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/sched.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/types.h>
#include <asm/msr.h>
#include <asm/traps.h>
#include <asm/irq_regs.h>
#include <uapi/asm/kvm.h>
#include <linux/hardirq.h>
#include <linux/pkeys.h>
#include <linux/vmalloc.h>
#include "context.h"
#include "internal.h"
#include "legacy.h"
#include "xstate.h"
#define CREATE_TRACE_POINTS
#include <asm/trace/fpu.h>
#ifdef CONFIG_X86_64
DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
DEFINE_PER_CPU(u64, xfd_state);
#endif
/* The FPU state configuration data for kernel and user space */
struct fpu_state_config fpu_kernel_cfg __ro_after_init;
struct fpu_state_config fpu_user_cfg __ro_after_init;
struct vcpu_fpu_config guest_default_cfg __ro_after_init;
/*
* Represents the initial FPU state. It's mostly (but not completely) zeroes,
* depending on the FPU hardware format:
*/
struct fpstate init_fpstate __ro_after_init;
/*
* Track FPU initialization and kernel-mode usage. 'true' means the FPU is
* initialized and is not currently being used by the kernel:
*/
DEFINE_PER_CPU(bool, kernel_fpu_allowed);
/*
* Track which context is using the FPU on the CPU:
*/
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
#ifdef CONFIG_X86_DEBUG_FPU
struct fpu *x86_task_fpu(struct task_struct *task)
{
if (WARN_ON_ONCE(task->flags & PF_KTHREAD)) return NULL; return (void *)task + sizeof(*task);
}
#endif
/*
* Can we use the FPU in kernel mode with the
* whole "kernel_fpu_begin/end()" sequence?
*/
bool irq_fpu_usable(void)
{
if (WARN_ON_ONCE(in_nmi())) return false;
/*
* Return false in the following cases:
*
* - FPU is not yet initialized. This can happen only when the call is
* coming from CPU onlining, for example for microcode checksumming.
* - The kernel is already using the FPU, either because of explicit
* nesting (which should never be done), or because of implicit
* nesting when a hardirq interrupted a kernel-mode FPU section.
*
* The single boolean check below handles both cases:
*/
if (!this_cpu_read(kernel_fpu_allowed))
return false;
/*
* When not in NMI or hard interrupt context, FPU can be used in:
*
* - Task context except from within fpregs_lock()'ed critical
* regions.
*
* - Soft interrupt processing context which cannot happen
* while in a fpregs_lock()'ed critical region.
*/
if (!in_hardirq())
return true;
/*
* In hard interrupt context it's safe when soft interrupts
* are enabled, which means the interrupt did not hit in
* a fpregs_lock()'ed critical region.
*/
return !softirq_count();}
EXPORT_SYMBOL(irq_fpu_usable);
/*
* Track AVX512 state use because it is known to slow the max clock
* speed of the core.
*/
static void update_avx_timestamp(struct fpu *fpu)
{
#define AVX512_TRACKING_MASK (XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM)
if (fpu->fpstate->regs.xsave.header.xfeatures & AVX512_TRACKING_MASK)
fpu->avx512_timestamp = jiffies;
}
/*
* Save the FPU register state in fpu->fpstate->regs. The register state is
* preserved.
*
* Must be called with fpregs_lock() held.
*
* The legacy FNSAVE instruction clears all FPU state unconditionally, so
* register state has to be reloaded. That might be a pointless exercise
* when the FPU is going to be used by another task right after that. But
* this only affects 20+ years old 32bit systems and avoids conditionals all
* over the place.
*
* FXSAVE and all XSAVE variants preserve the FPU register state.
*/
void save_fpregs_to_fpstate(struct fpu *fpu)
{ if (likely(use_xsave())) { os_xsave(fpu->fpstate); update_avx_timestamp(fpu);
return;
}
if (likely(use_fxsr())) {
fxsave(&fpu->fpstate->regs.fxsave);
return;
}
/*
* Legacy FPU register saving, FNSAVE always clears FPU registers,
* so we have to reload them from the memory state.
*/
asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
frstor(&fpu->fpstate->regs.fsave);
}
void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
{
/*
* AMD K7/K8 and later CPUs up to Zen don't save/restore
* FDP/FIP/FOP unless an exception is pending. Clear the x87 state
* here by setting it to fixed values. "m" is a random variable
* that should be in L1.
*/
if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
asm volatile(
"fnclex\n\t"
"emms\n\t"
"fildl %[addr]" /* set F?P to defined value */
: : [addr] "m" (*fpstate));
}
if (use_xsave()) {
/*
* Dynamically enabled features are enabled in XCR0, but
* usage requires also that the corresponding bits in XFD
* are cleared. If the bits are set then using a related
* instruction will raise #NM. This allows to do the
* allocation of the larger FPU buffer lazy from #NM or if
* the task has no permission to kill it which would happen
* via #UD if the feature is disabled in XCR0.
*
* XFD state is following the same life time rules as
* XSTATE and to restore state correctly XFD has to be
* updated before XRSTORS otherwise the component would
* stay in or go into init state even if the bits are set
* in fpstate::regs::xsave::xfeatures.
*/
xfd_update_state(fpstate);
/*
* Restoring state always needs to modify all features
* which are in @mask even if the current task cannot use
* extended features.
*
* So fpstate->xfeatures cannot be used here, because then
* a feature for which the task has no permission but was
* used by the previous task would not go into init state.
*/
mask = fpu_kernel_cfg.max_features & mask;
os_xrstor(fpstate, mask);
} else {
if (use_fxsr())
fxrstor(&fpstate->regs.fxsave);
else
frstor(&fpstate->regs.fsave);
}
}
void fpu_reset_from_exception_fixup(void)
{
restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE);
}
#if IS_ENABLED(CONFIG_KVM)
static void __fpstate_reset(struct fpstate *fpstate);
static void fpu_lock_guest_permissions(void)
{
struct fpu_state_perm *fpuperm;
u64 perm;
if (!IS_ENABLED(CONFIG_X86_64))
return;
spin_lock_irq(¤t->sighand->siglock);
fpuperm = &x86_task_fpu(current->group_leader)->guest_perm;
perm = fpuperm->__state_perm;
/* First fpstate allocation locks down permissions. */
WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED);
spin_unlock_irq(¤t->sighand->siglock);
}
bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
{
struct fpstate *fpstate;
unsigned int size;
size = guest_default_cfg.size + ALIGN(offsetof(struct fpstate, regs), 64);
fpstate = vzalloc(size);
if (!fpstate)
return false;
/* Initialize indicators to reflect properties of the fpstate */
fpstate->is_valloc = true;
fpstate->is_guest = true;
__fpstate_reset(fpstate);
fpstate_init_user(fpstate);
gfpu->fpstate = fpstate;
gfpu->xfeatures = guest_default_cfg.features;
/*
* KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
* to userspace, even when XSAVE is unsupported, so that restoring FPU
* state on a different CPU that does support XSAVE can cleanly load
* the incoming state using its natural XSAVE. In other words, KVM's
* uABI size may be larger than this host's default size. Conversely,
* the default size should never be larger than KVM's base uABI size;
* all features that can expand the uABI size must be opt-in.
*/
gfpu->uabi_size = sizeof(struct kvm_xsave);
if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size))
gfpu->uabi_size = fpu_user_cfg.default_size;
fpu_lock_guest_permissions();
return true;
}
EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
{
struct fpstate *fpstate = gfpu->fpstate;
if (!fpstate)
return;
if (WARN_ON_ONCE(!fpstate->is_valloc || !fpstate->is_guest || fpstate->in_use))
return;
gfpu->fpstate = NULL;
vfree(fpstate);
}
EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
/*
* fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable
* @guest_fpu: Pointer to the guest FPU container
* @xfeatures: Features requested by guest CPUID
*
* Enable all dynamic xfeatures according to guest perm and requested CPUID.
*
* Return: 0 on success, error code otherwise
*/
int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures)
{
lockdep_assert_preemption_enabled();
/* Nothing to do if all requested features are already enabled. */
xfeatures &= ~guest_fpu->xfeatures;
if (!xfeatures)
return 0;
return __xfd_enable_feature(xfeatures, guest_fpu);
}
EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
#ifdef CONFIG_X86_64
void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
{
fpregs_lock();
guest_fpu->fpstate->xfd = xfd;
if (guest_fpu->fpstate->in_use)
xfd_update_state(guest_fpu->fpstate);
fpregs_unlock();
}
EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
/**
* fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state
*
* Must be invoked from KVM after a VMEXIT before enabling interrupts when
* XFD write emulation is disabled. This is required because the guest can
* freely modify XFD and the state at VMEXIT is not guaranteed to be the
* same as the state on VMENTER. So software state has to be updated before
* any operation which depends on it can take place.
*
* Note: It can be invoked unconditionally even when write emulation is
* enabled for the price of a then pointless MSR read.
*/
void fpu_sync_guest_vmexit_xfd_state(void)
{
struct fpstate *fpstate = x86_task_fpu(current)->fpstate;
lockdep_assert_irqs_disabled();
if (fpu_state_size_dynamic()) {
rdmsrq(MSR_IA32_XFD, fpstate->xfd);
__this_cpu_write(xfd_state, fpstate->xfd);
}
}
EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
#endif /* CONFIG_X86_64 */
int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
{
struct fpstate *guest_fps = guest_fpu->fpstate;
struct fpu *fpu = x86_task_fpu(current);
struct fpstate *cur_fps = fpu->fpstate;
fpregs_lock();
if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
save_fpregs_to_fpstate(fpu);
/* Swap fpstate */
if (enter_guest) {
fpu->__task_fpstate = cur_fps;
fpu->fpstate = guest_fps;
guest_fps->in_use = true;
} else {
guest_fps->in_use = false;
fpu->fpstate = fpu->__task_fpstate;
fpu->__task_fpstate = NULL;
}
cur_fps = fpu->fpstate;
if (!cur_fps->is_confidential) {
/* Includes XFD update */
restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
} else {
/*
* XSTATE is restored by firmware from encrypted
* memory. Make sure XFD state is correct while
* running with guest fpstate
*/
xfd_update_state(cur_fps);
}
fpregs_mark_activate();
fpregs_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
unsigned int size, u64 xfeatures, u32 pkru)
{
struct fpstate *kstate = gfpu->fpstate;
union fpregs_state *ustate = buf;
struct membuf mb = { .p = buf, .left = size };
if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
__copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
XSTATE_COPY_XSAVE);
} else {
memcpy(&ustate->fxsave, &kstate->regs.fxsave,
sizeof(ustate->fxsave));
/* Make it restorable on a XSAVE enabled host */
ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
}
}
EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi);
int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
u64 xcr0, u32 *vpkru)
{
struct fpstate *kstate = gfpu->fpstate;
const union fpregs_state *ustate = buf;
if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
return -EINVAL;
if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
return 0;
}
if (ustate->xsave.header.xfeatures & ~xcr0)
return -EINVAL;
/*
* Nullify @vpkru to preserve its current value if PKRU's bit isn't set
* in the header. KVM's odd ABI is to leave PKRU untouched in this
* case (all other components are eventually re-initialized).
*/
if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU))
vpkru = NULL;
return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
}
EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
#endif /* CONFIG_KVM */
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
if (!irqs_disabled())
fpregs_lock(); WARN_ON_FPU(!irq_fpu_usable());
/* Toggle kernel_fpu_allowed to false: */
WARN_ON_FPU(!this_cpu_read(kernel_fpu_allowed)); this_cpu_write(kernel_fpu_allowed, false); if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
save_fpregs_to_fpstate(x86_task_fpu(current));
}
__cpu_invalidate_fpregs_state();
/* Put sane initial values into the control registers. */
if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
ldmxcsr(MXCSR_DEFAULT);
if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) asm volatile ("fninit");}
EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
void kernel_fpu_end(void)
{
/* Toggle kernel_fpu_allowed back to true: */
WARN_ON_FPU(this_cpu_read(kernel_fpu_allowed));
this_cpu_write(kernel_fpu_allowed, true);
if (!irqs_disabled())
fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
/*
* Sync the FPU register state to current's memory register state when the
* current task owns the FPU. The hardware register state is preserved.
*/
void fpu_sync_fpstate(struct fpu *fpu)
{
WARN_ON_FPU(fpu != x86_task_fpu(current));
fpregs_lock();
trace_x86_fpu_before_save(fpu);
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
save_fpregs_to_fpstate(fpu);
trace_x86_fpu_after_save(fpu);
fpregs_unlock();
}
static inline unsigned int init_fpstate_copy_size(void)
{
if (!use_xsave())
return fpu_kernel_cfg.default_size;
/* XSAVE(S) just needs the legacy and the xstate header part */
return sizeof(init_fpstate.regs.xsave);
}
static inline void fpstate_init_fxstate(struct fpstate *fpstate)
{
fpstate->regs.fxsave.cwd = 0x37f;
fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT;
}
/*
* Legacy x87 fpstate state init:
*/
static inline void fpstate_init_fstate(struct fpstate *fpstate)
{
fpstate->regs.fsave.cwd = 0xffff037fu;
fpstate->regs.fsave.swd = 0xffff0000u;
fpstate->regs.fsave.twd = 0xffffffffu;
fpstate->regs.fsave.fos = 0xffff0000u;
}
/*
* Used in two places:
* 1) Early boot to setup init_fpstate for non XSAVE systems
* 2) fpu_alloc_guest_fpstate() which is invoked from KVM
*/
void fpstate_init_user(struct fpstate *fpstate)
{
if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
fpstate_init_soft(&fpstate->regs.soft);
return;
}
xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures);
if (cpu_feature_enabled(X86_FEATURE_FXSR))
fpstate_init_fxstate(fpstate);
else
fpstate_init_fstate(fpstate);
}
static void __fpstate_reset(struct fpstate *fpstate)
{
/*
* Supervisor features (and thus sizes) may diverge between guest
* FPUs and host FPUs, as some supervisor features are supported
* for guests despite not being utilized by the host. User
* features and sizes are always identical, which allows for
* common guest and userspace ABI.
*
* For the host, set XFD to the kernel's desired initialization
* value. For guests, set XFD to its architectural RESET value.
*/
if (fpstate->is_guest) {
fpstate->size = guest_default_cfg.size;
fpstate->xfeatures = guest_default_cfg.features;
fpstate->xfd = 0;
} else {
fpstate->size = fpu_kernel_cfg.default_size;
fpstate->xfeatures = fpu_kernel_cfg.default_features;
fpstate->xfd = init_fpstate.xfd;
}
fpstate->user_size = fpu_user_cfg.default_size;
fpstate->user_xfeatures = fpu_user_cfg.default_features;
}
void fpstate_reset(struct fpu *fpu)
{
/* Set the fpstate pointer to the default fpstate */
fpu->fpstate = &fpu->__fpstate;
__fpstate_reset(fpu->fpstate);
/* Initialize the permission related info in fpu */
fpu->perm.__state_perm = fpu_kernel_cfg.default_features;
fpu->perm.__state_size = fpu_kernel_cfg.default_size;
fpu->perm.__user_state_size = fpu_user_cfg.default_size;
fpu->guest_perm.__state_perm = guest_default_cfg.features;
fpu->guest_perm.__state_size = guest_default_cfg.size;
/*
* User features and sizes are always identical between host and
* guest FPUs, which allows for common guest and userspace ABI.
*/
fpu->guest_perm.__user_state_size = fpu_user_cfg.default_size;
}
static inline void fpu_inherit_perms(struct fpu *dst_fpu)
{
if (fpu_state_size_dynamic()) { struct fpu *src_fpu = x86_task_fpu(current->group_leader); spin_lock_irq(¤t->sighand->siglock);
/* Fork also inherits the permissions of the parent */
dst_fpu->perm = src_fpu->perm;
dst_fpu->guest_perm = src_fpu->guest_perm;
spin_unlock_irq(¤t->sighand->siglock);
}
}
/* A passed ssp of zero will not cause any update */
static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
{
#ifdef CONFIG_X86_USER_SHADOW_STACK
struct cet_user_state *xstate;
/* If ssp update is not needed. */
if (!ssp)
return 0;
xstate = get_xsave_addr(&x86_task_fpu(dst)->fpstate->regs.xsave,
XFEATURE_CET_USER);
/*
* If there is a non-zero ssp, then 'dst' must be configured with a shadow
* stack and the fpu state should be up to date since it was just copied
* from the parent in fpu_clone(). So there must be a valid non-init CET
* state location in the buffer.
*/
if (WARN_ON_ONCE(!xstate))
return 1;
xstate->user_ssp = (u64)ssp;
#endif
return 0;
}
/* Clone current's FPU state on fork */
int fpu_clone(struct task_struct *dst, u64 clone_flags, bool minimal,
unsigned long ssp)
{
/*
* We allocate the new FPU structure right after the end of the task struct.
* task allocation size already took this into account.
*
* This is safe because task_struct size is a multiple of cacheline size,
* thus x86_task_fpu() will always be cacheline aligned as well.
*/
struct fpu *dst_fpu = (void *)dst + sizeof(*dst);
BUILD_BUG_ON(sizeof(*dst) % SMP_CACHE_BYTES != 0);
/* The new task's FPU state cannot be valid in the hardware. */
dst_fpu->last_cpu = -1;
fpstate_reset(dst_fpu);
if (!cpu_feature_enabled(X86_FEATURE_FPU))
return 0;
/*
* Enforce reload for user space tasks and prevent kernel threads
* from trying to save the FPU registers on context switch.
*/
set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
/*
* No FPU state inheritance for kernel threads and IO
* worker threads.
*/
if (minimal) {
/* Clear out the minimal state */
memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
init_fpstate_copy_size());
return 0;
}
/*
* If a new feature is added, ensure all dynamic features are
* caller-saved from here!
*/
BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
/*
* Save the default portion of the current FPU state into the
* clone. Assume all dynamic features to be defined as caller-
* saved, which enables skipping both the expansion of fpstate
* and the copying of any dynamic state.
*
* Do not use memcpy() when TIF_NEED_FPU_LOAD is set because
* copying is not valid when current uses non-default states.
*/
fpregs_lock();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
fpregs_restore_userregs(); save_fpregs_to_fpstate(dst_fpu);
fpregs_unlock();
if (!(clone_flags & CLONE_THREAD))
fpu_inherit_perms(dst_fpu);
/*
* Children never inherit PASID state.
* Force it to have its init value:
*/
if (use_xsave()) dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID;
/*
* Update shadow stack pointer, in case it changed during clone.
*/
if (update_fpu_shstk(dst, ssp))
return 1;
trace_x86_fpu_copy_dst(dst_fpu); return 0;}
/*
* While struct fpu is no longer part of struct thread_struct, it is still
* allocated after struct task_struct in the "task_struct" kmem cache. But
* since FPU is expected to be part of struct thread_struct, we have to
* adjust for it here.
*/
void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
{
/* The allocation follows struct task_struct. */
*offset = sizeof(struct task_struct) - offsetof(struct task_struct, thread);
*offset += offsetof(struct fpu, __fpstate.regs);
*size = fpu_kernel_cfg.default_size;
}
/*
* Drops current FPU state: deactivates the fpregs and
* the fpstate. NOTE: it still leaves previous contents
* in the fpregs in the eager-FPU case.
*
* This function can be used in cases where we know that
* a state-restore is coming: either an explicit one,
* or a reschedule.
*/
void fpu__drop(struct task_struct *tsk)
{
struct fpu *fpu;
if (test_tsk_thread_flag(tsk, TIF_NEED_FPU_LOAD))
return;
fpu = x86_task_fpu(tsk);
preempt_disable();
if (fpu == x86_task_fpu(current)) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
fpregs_deactivate(fpu);
}
trace_x86_fpu_dropped(fpu);
preempt_enable();
}
/*
* Clear FPU registers by setting them up from the init fpstate.
* Caller must do fpregs_[un]lock() around it.
*/
static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
{
if (use_xsave())
os_xrstor(&init_fpstate, features_mask);
else if (use_fxsr())
fxrstor(&init_fpstate.regs.fxsave);
else
frstor(&init_fpstate.regs.fsave);
pkru_write_default();
}
/*
* Reset current->fpu memory state to the init values.
*/
static void fpu_reset_fpstate_regs(void)
{
struct fpu *fpu = x86_task_fpu(current);
fpregs_lock();
__fpu_invalidate_fpregs_state(fpu);
/*
* This does not change the actual hardware registers. It just
* resets the memory image and sets TIF_NEED_FPU_LOAD so a
* subsequent return to usermode will reload the registers from the
* task's memory image.
*
* Do not use fpstate_init() here. Just copy init_fpstate which has
* the correct content already except for PKRU.
*
* PKRU handling does not rely on the xstate when restoring for
* user space as PKRU is eagerly written in switch_to() and
* flush_thread().
*/
memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
set_thread_flag(TIF_NEED_FPU_LOAD);
fpregs_unlock();
}
/*
* Reset current's user FPU states to the init states. current's
* supervisor states, if any, are not modified by this function. The
* caller guarantees that the XSTATE header in memory is intact.
*/
void fpu__clear_user_states(struct fpu *fpu)
{
WARN_ON_FPU(fpu != x86_task_fpu(current));
fpregs_lock();
if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
fpu_reset_fpstate_regs();
fpregs_unlock();
return;
}
/*
* Ensure that current's supervisor states are loaded into their
* corresponding registers.
*/
if (xfeatures_mask_supervisor() &&
!fpregs_state_valid(fpu, smp_processor_id()))
os_xrstor_supervisor(fpu->fpstate);
/* Ensure XFD state is in sync before reloading XSTATE */
xfd_update_state(fpu->fpstate);
/* Reset user states in registers. */
restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
/*
* Now all FPU registers have their desired values. Inform the FPU
* state machine that current's FPU registers are in the hardware
* registers. The memory image does not need to be updated because
* any operation relying on it has to save the registers first when
* current's FPU is marked active.
*/
fpregs_mark_activate();
fpregs_unlock();
}
void fpu_flush_thread(void)
{
fpstate_reset(x86_task_fpu(current));
fpu_reset_fpstate_regs();
}
/*
* Load FPU context before returning to userspace.
*/
void switch_fpu_return(void)
{
if (!static_cpu_has(X86_FEATURE_FPU))
return;
fpregs_restore_userregs();}
EXPORT_SYMBOL_GPL(switch_fpu_return);
void fpregs_lock_and_load(void)
{
/*
* fpregs_lock() only disables preemption (mostly). So modifying state
* in an interrupt could screw up some in progress fpregs operation.
* Warn about it.
*/
WARN_ON_ONCE(!irq_fpu_usable());
WARN_ON_ONCE(current->flags & PF_KTHREAD);
fpregs_lock();
fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
fpregs_restore_userregs();
}
#ifdef CONFIG_X86_DEBUG_FPU
/*
* If current FPU state according to its tracking (loaded FPU context on this
* CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
* loaded on return to userland.
*/
void fpregs_assert_state_consistent(void)
{
struct fpu *fpu = x86_task_fpu(current); if (test_thread_flag(TIF_NEED_FPU_LOAD))
return;
WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));}
EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
#endif
void fpregs_mark_activate(void)
{
struct fpu *fpu = x86_task_fpu(current);
fpregs_activate(fpu);
fpu->last_cpu = smp_processor_id();
clear_thread_flag(TIF_NEED_FPU_LOAD);
}
/*
* x87 math exception handling:
*/
int fpu__exception_code(struct fpu *fpu, int trap_nr)
{
int err;
if (trap_nr == X86_TRAP_MF) {
unsigned short cwd, swd;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
* status. 0x3f is the exception bits in these regs, 0x200 is the
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception.
*/
if (boot_cpu_has(X86_FEATURE_FXSR)) {
cwd = fpu->fpstate->regs.fxsave.cwd;
swd = fpu->fpstate->regs.fxsave.swd;
} else {
cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
}
err = swd & ~cwd;
} else {
/*
* The SIMD FPU exceptions are handled a little differently, as there
* is only a single status/control register. Thus, to determine which
* unmasked exception was caught we must mask the exception mask bits
* at 0x1f80, and then use these to mask the exception bits at 0x3f.
*/
unsigned short mxcsr = MXCSR_DEFAULT;
if (boot_cpu_has(X86_FEATURE_XMM))
mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
err = ~(mxcsr >> 7) & mxcsr;
}
if (err & 0x001) { /* Invalid op */
/*
* swd & 0x240 == 0x040: Stack Underflow
* swd & 0x240 == 0x240: Stack Overflow
* User must clear the SF bit (0x40) if set
*/
return FPE_FLTINV;
} else if (err & 0x004) { /* Divide by Zero */
return FPE_FLTDIV;
} else if (err & 0x008) { /* Overflow */
return FPE_FLTOVF;
} else if (err & 0x012) { /* Denormal, Underflow */
return FPE_FLTUND;
} else if (err & 0x020) { /* Precision */
return FPE_FLTRES;
}
/*
* If we're using IRQ 13, or supposedly even some trap
* X86_TRAP_MF implementations, it's possible
* we get a spurious trap, which is not an error.
*/
return 0;
}
/*
* Initialize register state that may prevent from entering low-power idle.
* This function will be invoked from the cpuidle driver only when needed.
*/
noinstr void fpu_idle_fpregs(void)
{
/* Note: AMX_TILE being enabled implies XGETBV1 support */
if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
(xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
tile_release();
__this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_LOCAL_H
#define _ASM_X86_LOCAL_H
#include <linux/percpu.h>
#include <linux/atomic.h>
#include <asm/asm.h>
typedef struct {
atomic_long_t a;
} local_t;
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local_read(l) atomic_long_read(&(l)->a)
#define local_set(l, i) atomic_long_set(&(l)->a, (i))
static inline void local_inc(local_t *l)
{
asm volatile(_ASM_INC "%0"
: "+m" (l->a.counter));
}
static inline void local_dec(local_t *l)
{
asm volatile(_ASM_DEC "%0"
: "+m" (l->a.counter));
}
static inline void local_add(long i, local_t *l)
{
asm volatile(_ASM_ADD "%1,%0"
: "+m" (l->a.counter)
: "ir" (i));
}
static inline void local_sub(long i, local_t *l)
{
asm volatile(_ASM_SUB "%1,%0"
: "+m" (l->a.counter)
: "ir" (i));
}
/**
* local_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @l: pointer to type local_t
*
* Atomically subtracts @i from @l and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline bool local_sub_and_test(long i, local_t *l)
{
return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i);
}
/**
* local_dec_and_test - decrement and test
* @l: pointer to type local_t
*
* Atomically decrements @l by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline bool local_dec_and_test(local_t *l)
{
return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e);
}
/**
* local_inc_and_test - increment and test
* @l: pointer to type local_t
*
* Atomically increments @l by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline bool local_inc_and_test(local_t *l)
{
return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e);
}
/**
* local_add_negative - add and test if negative
* @i: integer value to add
* @l: pointer to type local_t
*
* Atomically adds @i to @l and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline bool local_add_negative(long i, local_t *l)
{
return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i);
}
/**
* local_add_return - add and return
* @i: integer value to add
* @l: pointer to type local_t
*
* Atomically adds @i to @l and returns @i + @l
*/
static inline long local_add_return(long i, local_t *l)
{
long __i = i;
asm volatile(_ASM_XADD "%0, %1;"
: "+r" (i), "+m" (l->a.counter)
: : "memory");
return i + __i;
}
static inline long local_sub_return(long i, local_t *l)
{
return local_add_return(-i, l);
}
#define local_inc_return(l) (local_add_return(1, l))
#define local_dec_return(l) (local_sub_return(1, l))
static inline long local_cmpxchg(local_t *l, long old, long new)
{
return cmpxchg_local(&l->a.counter, old, new);
}
static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
{
return try_cmpxchg_local(&l->a.counter,
(typeof(l->a.counter) *) old, new);
}
/*
* Implement local_xchg using CMPXCHG instruction without the LOCK prefix.
* XCHG is expensive due to the implied LOCK prefix. The processor
* cannot prefetch cachelines if XCHG is used.
*/
static __always_inline long
local_xchg(local_t *l, long n)
{
long c = local_read(l);
do { } while (!local_try_cmpxchg(l, &c, n));
return c;
}
/**
* local_add_unless - add unless the number is already a given value
* @l: pointer of type local_t
* @a: the amount to add to l...
* @u: ...unless l is equal to u.
*
* Atomically adds @a to @l, if @v was not already @u.
* Returns true if the addition was done.
*/
static __always_inline bool
local_add_unless(local_t *l, long a, long u)
{
long c = local_read(l);
do {
if (unlikely(c == u))
return false;
} while (!local_try_cmpxchg(l, &c, c + a));
return true;
}
#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
/* On x86_32, these are no better than the atomic variants.
* On x86-64 these are better than the atomic variants on SMP kernels
* because they dont use a lock prefix.
*/
#define __local_inc(l) local_inc(l)
#define __local_dec(l) local_dec(l)
#define __local_add(i, l) local_add((i), (l))
#define __local_sub(i, l) local_sub((i), (l))
#endif /* _ASM_X86_LOCAL_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _KERNEL_PRINTK_RINGBUFFER_H
#define _KERNEL_PRINTK_RINGBUFFER_H
#include <linux/atomic.h>
#include <linux/bits.h>
#include <linux/dev_printk.h>
#include <linux/stddef.h>
#include <linux/types.h>
/*
* Meta information about each stored message.
*
* All fields are set by the printk code except for @seq, which is
* set by the ringbuffer code.
*/
struct printk_info {
u64 seq; /* sequence number */
u64 ts_nsec; /* timestamp in nanoseconds */
u16 text_len; /* length of text message */
u8 facility; /* syslog facility */
u8 flags:5; /* internal record flags */
u8 level:3; /* syslog level */
u32 caller_id; /* thread id or processor id */
struct dev_printk_info dev_info;
};
/*
* A structure providing the buffers, used by writers and readers.
*
* Writers:
* Using prb_rec_init_wr(), a writer sets @text_buf_size before calling
* prb_reserve(). On success, prb_reserve() sets @info and @text_buf to
* buffers reserved for that writer.
*
* Readers:
* Using prb_rec_init_rd(), a reader sets all fields before calling
* prb_read_valid(). Note that the reader provides the @info and @text_buf,
* buffers. On success, the struct pointed to by @info will be filled and
* the char array pointed to by @text_buf will be filled with text data.
*/
struct printk_record {
struct printk_info *info;
char *text_buf;
unsigned int text_buf_size;
};
/* Specifies the logical position and span of a data block. */
struct prb_data_blk_lpos {
unsigned long begin;
unsigned long next;
};
/*
* A descriptor: the complete meta-data for a record.
*
* @state_var: A bitwise combination of descriptor ID and descriptor state.
*/
struct prb_desc {
atomic_long_t state_var;
struct prb_data_blk_lpos text_blk_lpos;
};
/* A ringbuffer of "ID + data" elements. */
struct prb_data_ring {
unsigned int size_bits;
char *data;
atomic_long_t head_lpos;
atomic_long_t tail_lpos;
};
/* A ringbuffer of "struct prb_desc" elements. */
struct prb_desc_ring {
unsigned int count_bits;
struct prb_desc *descs;
struct printk_info *infos;
atomic_long_t head_id;
atomic_long_t tail_id;
atomic_long_t last_finalized_seq;
};
/*
* The high level structure representing the printk ringbuffer.
*
* @fail: Count of failed prb_reserve() calls where not even a data-less
* record was created.
*/
struct printk_ringbuffer {
struct prb_desc_ring desc_ring;
struct prb_data_ring text_data_ring;
atomic_long_t fail;
};
/*
* Used by writers as a reserve/commit handle.
*
* @rb: Ringbuffer where the entry is reserved.
* @irqflags: Saved irq flags to restore on entry commit.
* @id: ID of the reserved descriptor.
* @text_space: Total occupied buffer space in the text data ring, including
* ID, alignment padding, and wrapping data blocks.
*
* This structure is an opaque handle for writers. Its contents are only
* to be used by the ringbuffer implementation.
*/
struct prb_reserved_entry {
struct printk_ringbuffer *rb;
unsigned long irqflags;
unsigned long id;
unsigned int text_space;
};
/* The possible responses of a descriptor state-query. */
enum desc_state {
desc_miss = -1, /* ID mismatch (pseudo state) */
desc_reserved = 0x0, /* reserved, in use by writer */
desc_committed = 0x1, /* committed by writer, could get reopened */
desc_finalized = 0x2, /* committed, no further modification allowed */
desc_reusable = 0x3, /* free, not yet used by any writer */
};
#define _DATA_SIZE(sz_bits) (1UL << (sz_bits))
#define _DESCS_COUNT(ct_bits) (1U << (ct_bits))
#define DESC_SV_BITS BITS_PER_LONG
#define DESC_FLAGS_SHIFT (DESC_SV_BITS - 2)
#define DESC_FLAGS_MASK (3UL << DESC_FLAGS_SHIFT)
#define DESC_STATE(sv) (3UL & (sv >> DESC_FLAGS_SHIFT))
#define DESC_SV(id, state) (((unsigned long)state << DESC_FLAGS_SHIFT) | id)
#define DESC_ID_MASK (~DESC_FLAGS_MASK)
#define DESC_ID(sv) ((sv) & DESC_ID_MASK)
/*
* Special data block logical position values (for fields of
* @prb_desc.text_blk_lpos).
*
* - Bit0 is used to identify if the record has no data block. (Implemented in
* the LPOS_DATALESS() macro.)
*
* - Bit1 specifies the reason for not having a data block.
*
* These special values could never be real lpos values because of the
* meta data and alignment padding of data blocks. (See to_blk_size() for
* details.)
*/
#define FAILED_LPOS 0x1
#define EMPTY_LINE_LPOS 0x3
#define FAILED_BLK_LPOS \
{ \
.begin = FAILED_LPOS, \
.next = FAILED_LPOS, \
}
/*
* Descriptor Bootstrap
*
* The descriptor array is minimally initialized to allow immediate usage
* by readers and writers. The requirements that the descriptor array
* initialization must satisfy:
*
* Req1
* The tail must point to an existing (committed or reusable) descriptor.
* This is required by the implementation of prb_first_seq().
*
* Req2
* Readers must see that the ringbuffer is initially empty.
*
* Req3
* The first record reserved by a writer is assigned sequence number 0.
*
* To satisfy Req1, the tail initially points to a descriptor that is
* minimally initialized (having no data block, i.e. data-less with the
* data block's lpos @begin and @next values set to FAILED_LPOS).
*
* To satisfy Req2, the initial tail descriptor is initialized to the
* reusable state. Readers recognize reusable descriptors as existing
* records, but skip over them.
*
* To satisfy Req3, the last descriptor in the array is used as the initial
* head (and tail) descriptor. This allows the first record reserved by a
* writer (head + 1) to be the first descriptor in the array. (Only the first
* descriptor in the array could have a valid sequence number of 0.)
*
* The first time a descriptor is reserved, it is assigned a sequence number
* with the value of the array index. A "first time reserved" descriptor can
* be recognized because it has a sequence number of 0 but does not have an
* index of 0. (Only the first descriptor in the array could have a valid
* sequence number of 0.) After the first reservation, all future reservations
* (recycling) simply involve incrementing the sequence number by the array
* count.
*
* Hack #1
* Only the first descriptor in the array is allowed to have the sequence
* number 0. In this case it is not possible to recognize if it is being
* reserved the first time (set to index value) or has been reserved
* previously (increment by the array count). This is handled by _always_
* incrementing the sequence number by the array count when reserving the
* first descriptor in the array. In order to satisfy Req3, the sequence
* number of the first descriptor in the array is initialized to minus
* the array count. Then, upon the first reservation, it is incremented
* to 0, thus satisfying Req3.
*
* Hack #2
* prb_first_seq() can be called at any time by readers to retrieve the
* sequence number of the tail descriptor. However, due to Req2 and Req3,
* initially there are no records to report the sequence number of
* (sequence numbers are u64 and there is nothing less than 0). To handle
* this, the sequence number of the initial tail descriptor is initialized
* to 0. Technically this is incorrect, because there is no record with
* sequence number 0 (yet) and the tail descriptor is not the first
* descriptor in the array. But it allows prb_read_valid() to correctly
* report the existence of a record for _any_ given sequence number at all
* times. Bootstrapping is complete when the tail is pushed the first
* time, thus finally pointing to the first descriptor reserved by a
* writer, which has the assigned sequence number 0.
*/
/*
* Initiating Logical Value Overflows
*
* Both logical position (lpos) and ID values can be mapped to array indexes
* but may experience overflows during the lifetime of the system. To ensure
* that printk_ringbuffer can handle the overflows for these types, initial
* values are chosen that map to the correct initial array indexes, but will
* result in overflows soon.
*
* BLK0_LPOS
* The initial @head_lpos and @tail_lpos for data rings. It is at index
* 0 and the lpos value is such that it will overflow on the first wrap.
*
* DESC0_ID
* The initial @head_id and @tail_id for the desc ring. It is at the last
* index of the descriptor array (see Req3 above) and the ID value is such
* that it will overflow on the second wrap.
*/
#define BLK0_LPOS(sz_bits) (-(_DATA_SIZE(sz_bits)))
#define DESC0_ID(ct_bits) DESC_ID(-(_DESCS_COUNT(ct_bits) + 1))
#define DESC0_SV(ct_bits) DESC_SV(DESC0_ID(ct_bits), desc_reusable)
/*
* Define a ringbuffer with an external text data buffer. The same as
* DEFINE_PRINTKRB() but requires specifying an external buffer for the
* text data.
*
* Note: The specified external buffer must be of the size:
* 2 ^ (descbits + avgtextbits)
*/
#define _DEFINE_PRINTKRB(name, descbits, avgtextbits, text_buf) \
static struct prb_desc _##name##_descs[_DESCS_COUNT(descbits)] = { \
/* the initial head and tail */ \
[_DESCS_COUNT(descbits) - 1] = { \
/* reusable */ \
.state_var = ATOMIC_INIT(DESC0_SV(descbits)), \
/* no associated data block */ \
.text_blk_lpos = FAILED_BLK_LPOS, \
}, \
}; \
static struct printk_info _##name##_infos[_DESCS_COUNT(descbits)] = { \
/* this will be the first record reserved by a writer */ \
[0] = { \
/* will be incremented to 0 on the first reservation */ \
.seq = -(u64)_DESCS_COUNT(descbits), \
}, \
/* the initial head and tail */ \
[_DESCS_COUNT(descbits) - 1] = { \
/* reports the first seq value during the bootstrap phase */ \
.seq = 0, \
}, \
}; \
static struct printk_ringbuffer name = { \
.desc_ring = { \
.count_bits = descbits, \
.descs = &_##name##_descs[0], \
.infos = &_##name##_infos[0], \
.head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
.tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
.last_finalized_seq = ATOMIC_INIT(0), \
}, \
.text_data_ring = { \
.size_bits = (avgtextbits) + (descbits), \
.data = text_buf, \
.head_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
.tail_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
}, \
.fail = ATOMIC_LONG_INIT(0), \
}
/**
* DEFINE_PRINTKRB() - Define a ringbuffer.
*
* @name: The name of the ringbuffer variable.
* @descbits: The number of descriptors as a power-of-2 value.
* @avgtextbits: The average text data size per record as a power-of-2 value.
*
* This is a macro for defining a ringbuffer and all internal structures
* such that it is ready for immediate use. See _DEFINE_PRINTKRB() for a
* variant where the text data buffer can be specified externally.
*/
#define DEFINE_PRINTKRB(name, descbits, avgtextbits) \
static char _##name##_text[1U << ((avgtextbits) + (descbits))] \
__aligned(__alignof__(unsigned long)); \
_DEFINE_PRINTKRB(name, descbits, avgtextbits, &_##name##_text[0])
/* Writer Interface */
/**
* prb_rec_init_wr() - Initialize a buffer for writing records.
*
* @r: The record to initialize.
* @text_buf_size: The needed text buffer size.
*/
static inline void prb_rec_init_wr(struct printk_record *r,
unsigned int text_buf_size)
{
r->info = NULL;
r->text_buf = NULL;
r->text_buf_size = text_buf_size;
}
bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
struct printk_record *r);
bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
struct printk_record *r, u32 caller_id, unsigned int max_size);
void prb_commit(struct prb_reserved_entry *e);
void prb_final_commit(struct prb_reserved_entry *e);
void prb_init(struct printk_ringbuffer *rb,
char *text_buf, unsigned int text_buf_size,
struct prb_desc *descs, unsigned int descs_count_bits,
struct printk_info *infos);
unsigned int prb_record_text_space(struct prb_reserved_entry *e);
/* Reader Interface */
/**
* prb_rec_init_rd() - Initialize a buffer for reading records.
*
* @r: The record to initialize.
* @info: A buffer to store record meta-data.
* @text_buf: A buffer to store text data.
* @text_buf_size: The size of @text_buf.
*
* Initialize all the fields that a reader is interested in. All arguments
* (except @r) are optional. Only record data for arguments that are
* non-NULL or non-zero will be read.
*/
static inline void prb_rec_init_rd(struct printk_record *r,
struct printk_info *info,
char *text_buf, unsigned int text_buf_size)
{
r->info = info;
r->text_buf = text_buf;
r->text_buf_size = text_buf_size;
}
/**
* prb_for_each_record() - Iterate over the records of a ringbuffer.
*
* @from: The sequence number to begin with.
* @rb: The ringbuffer to iterate over.
* @s: A u64 to store the sequence number on each iteration.
* @r: A printk_record to store the record on each iteration.
*
* This is a macro for conveniently iterating over a ringbuffer.
* Note that @s may not be the sequence number of the record on each
* iteration. For the sequence number, @r->info->seq should be checked.
*
* Context: Any context.
*/
#define prb_for_each_record(from, rb, s, r) \
for ((s) = from; prb_read_valid(rb, s, r); (s) = (r)->info->seq + 1)
/**
* prb_for_each_info() - Iterate over the meta data of a ringbuffer.
*
* @from: The sequence number to begin with.
* @rb: The ringbuffer to iterate over.
* @s: A u64 to store the sequence number on each iteration.
* @i: A printk_info to store the record meta data on each iteration.
* @lc: An unsigned int to store the text line count of each record.
*
* This is a macro for conveniently iterating over a ringbuffer.
* Note that @s may not be the sequence number of the record on each
* iteration. For the sequence number, @r->info->seq should be checked.
*
* Context: Any context.
*/
#define prb_for_each_info(from, rb, s, i, lc) \
for ((s) = from; prb_read_valid_info(rb, s, i, lc); (s) = (i)->seq + 1)
bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
struct printk_record *r);
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
struct printk_info *info, unsigned int *line_count);
u64 prb_first_seq(struct printk_ringbuffer *rb);
u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
u64 prb_next_seq(struct printk_ringbuffer *rb);
u64 prb_next_reserve_seq(struct printk_ringbuffer *rb);
#ifdef CONFIG_64BIT
#define __u64seq_to_ulseq(u64seq) (u64seq)
#define __ulseq_to_u64seq(rb, ulseq) (ulseq)
#define ULSEQ_MAX(rb) (-1)
#else /* CONFIG_64BIT */
#define __u64seq_to_ulseq(u64seq) ((u32)u64seq)
#define ULSEQ_MAX(rb) __u64seq_to_ulseq(prb_first_seq(rb) + 0x80000000UL)
static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
{
u64 rb_first_seq = prb_first_seq(rb);
u64 seq;
/*
* The provided sequence is only the lower 32 bits of the ringbuffer
* sequence. It needs to be expanded to 64bit. Get the first sequence
* number from the ringbuffer and fold it.
*
* Having a 32bit representation in the console is sufficient.
* If a console ever gets more than 2^31 records behind
* the ringbuffer then this is the least of the problems.
*
* Also the access to the ring buffer is always safe.
*/
seq = rb_first_seq - (s32)((u32)rb_first_seq - ulseq);
return seq;
}
#endif /* CONFIG_64BIT */
#endif /* _KERNEL_PRINTK_RINGBUFFER_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Kernel internal schedule timeout and sleeping functions
*/
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include "tick-internal.h"
/*
* Since schedule_timeout()'s timer is defined on the stack, it must store
* the target task on the stack as well.
*/
struct process_timer {
struct timer_list timer;
struct task_struct *task;
};
static void process_timeout(struct timer_list *t)
{
struct process_timer *timeout = timer_container_of(timeout, t, timer);
wake_up_process(timeout->task);
}
/**
* schedule_timeout - sleep until timeout
* @timeout: timeout value in jiffies
*
* Make the current task sleep until @timeout jiffies have elapsed.
* The function behavior depends on the current task state
* (see also set_current_state() description):
*
* %TASK_RUNNING - the scheduler is called, but the task does not sleep
* at all. That happens because sched_submit_work() does nothing for
* tasks in %TASK_RUNNING state.
*
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
* pass before the routine returns unless the current task is explicitly
* woken up, (e.g. by wake_up_process()).
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
* delivered to the current task or the current task is explicitly woken
* up.
*
* The current task state is guaranteed to be %TASK_RUNNING when this
* routine returns.
*
* Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
* the CPU away without a bound on the timeout. In this case the return
* value will be %MAX_SCHEDULE_TIMEOUT.
*
* Returns: 0 when the timer has expired otherwise the remaining time in
* jiffies will be returned. In all cases the return value is guaranteed
* to be non-negative.
*/
signed long __sched schedule_timeout(signed long timeout)
{
struct process_timer timer;
unsigned long expire;
switch (timeout) {
case MAX_SCHEDULE_TIMEOUT:
/*
* These two special cases are useful to be comfortable
* in the caller. Nothing more. We could take
* MAX_SCHEDULE_TIMEOUT from one of the negative value
* but I' d like to return a valid offset (>=0) to allow
* the caller to do everything it want with the retval.
*/
schedule();
goto out;
default:
/*
* Another bit of PARANOID. Note that the retval will be
* 0 since no piece of kernel is supposed to do a check
* for a negative retval of schedule_timeout() (since it
* should never happens anyway). You just have the printk()
* that will tell you if something is gone wrong and where.
*/
if (timeout < 0) {
pr_err("%s: wrong timeout value %lx\n", __func__, timeout);
dump_stack();
__set_current_state(TASK_RUNNING); goto out;
}
}
expire = timeout + jiffies;
timer.task = current;
timer_setup_on_stack(&timer.timer, process_timeout, 0);
timer.timer.expires = expire;
add_timer(&timer.timer);
schedule();
timer_delete_sync(&timer.timer);
/* Remove the timer from the object tracker */
timer_destroy_on_stack(&timer.timer);
timeout = expire - jiffies;
out:
return timeout < 0 ? 0 : timeout;}
EXPORT_SYMBOL(schedule_timeout);
/*
* __set_current_state() can be used in schedule_timeout_*() functions, because
* schedule_timeout() calls schedule() unconditionally.
*/
/**
* schedule_timeout_interruptible - sleep until timeout (interruptible)
* @timeout: timeout value in jiffies
*
* See schedule_timeout() for details.
*
* Task state is set to TASK_INTERRUPTIBLE before starting the timeout.
*/
signed long __sched schedule_timeout_interruptible(signed long timeout)
{
__set_current_state(TASK_INTERRUPTIBLE);
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_interruptible);
/**
* schedule_timeout_killable - sleep until timeout (killable)
* @timeout: timeout value in jiffies
*
* See schedule_timeout() for details.
*
* Task state is set to TASK_KILLABLE before starting the timeout.
*/
signed long __sched schedule_timeout_killable(signed long timeout)
{
__set_current_state(TASK_KILLABLE);
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_killable);
/**
* schedule_timeout_uninterruptible - sleep until timeout (uninterruptible)
* @timeout: timeout value in jiffies
*
* See schedule_timeout() for details.
*
* Task state is set to TASK_UNINTERRUPTIBLE before starting the timeout.
*/
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{
__set_current_state(TASK_UNINTERRUPTIBLE);
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_uninterruptible);
/**
* schedule_timeout_idle - sleep until timeout (idle)
* @timeout: timeout value in jiffies
*
* See schedule_timeout() for details.
*
* Task state is set to TASK_IDLE before starting the timeout. It is similar to
* schedule_timeout_uninterruptible(), except this task will not contribute to
* load average.
*/
signed long __sched schedule_timeout_idle(signed long timeout)
{
__set_current_state(TASK_IDLE);
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_idle);
/**
* schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
* @mode: timer mode
* @clock_id: timer clock to be used
*
* Details are explained in schedule_hrtimeout_range() function description as
* this function is commonly used.
*/
int __sched schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode, clockid_t clock_id)
{
struct hrtimer_sleeper t;
/*
* Optimize when a zero timeout value is given. It does not
* matter whether this is an absolute or a relative time.
*/
if (expires && *expires == 0) {
__set_current_state(TASK_RUNNING);
return 0;
}
/*
* A NULL parameter means "infinite"
*/
if (!expires) {
schedule();
return -EINTR;
}
hrtimer_setup_sleeper_on_stack(&t, clock_id, mode);
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_sleeper_start_expires(&t, mode);
if (likely(t.task))
schedule();
hrtimer_cancel(&t.timer);
destroy_hrtimer_on_stack(&t.timer);
__set_current_state(TASK_RUNNING);
return !t.task ? 0 : -EINTR;
}
EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock);
/**
* schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
* @mode: timer mode
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
* the current task state has been set (see set_current_state()).
*
* The @delta argument gives the kernel the freedom to schedule the
* actual wakeup to a time that is both power and performance friendly
* for regular (non RT/DL) tasks.
* The kernel give the normal best effort behavior for "@expires+@delta",
* but may decide to fire the timer earlier, but no earlier than @expires.
*
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
* pass before the routine returns unless the current task is explicitly
* woken up, (e.g. by wake_up_process()).
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
* delivered to the current task or the current task is explicitly woken
* up.
*
* The current task state is guaranteed to be TASK_RUNNING when this
* routine returns.
*
* Returns: 0 when the timer has expired. If the task was woken before the
* timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
* by an explicit wakeup, it returns -EINTR.
*/
int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode)
{
return schedule_hrtimeout_range_clock(expires, delta, mode,
CLOCK_MONOTONIC);
}
EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
/**
* schedule_hrtimeout - sleep until timeout
* @expires: timeout value (ktime_t)
* @mode: timer mode
*
* See schedule_hrtimeout_range() for details. @delta argument of
* schedule_hrtimeout_range() is set to 0 and has therefore no impact.
*/
int __sched schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode)
{
return schedule_hrtimeout_range(expires, 0, mode);
}
EXPORT_SYMBOL_GPL(schedule_hrtimeout);
/**
* msleep - sleep safely even with waitqueue interruptions
* @msecs: Requested sleep duration in milliseconds
*
* msleep() uses jiffy based timeouts for the sleep duration. Because of the
* design of the timer wheel, the maximum additional percentage delay (slack) is
* 12.5%. This is only valid for timers which will end up in level 1 or a higher
* level of the timer wheel. For explanation of those 12.5% please check the
* detailed description about the basics of the timer wheel.
*
* The slack of timers which will end up in level 0 depends on sleep duration
* (msecs) and HZ configuration and can be calculated in the following way (with
* the timer wheel design restriction that the slack is not less than 12.5%):
*
* ``slack = MSECS_PER_TICK / msecs``
*
* When the allowed slack of the callsite is known, the calculation could be
* turned around to find the minimal allowed sleep duration to meet the
* constraints. For example:
*
* * ``HZ=1000`` with ``slack=25%``: ``MSECS_PER_TICK / slack = 1 / (1/4) = 4``:
* all sleep durations greater or equal 4ms will meet the constraints.
* * ``HZ=1000`` with ``slack=12.5%``: ``MSECS_PER_TICK / slack = 1 / (1/8) = 8``:
* all sleep durations greater or equal 8ms will meet the constraints.
* * ``HZ=250`` with ``slack=25%``: ``MSECS_PER_TICK / slack = 4 / (1/4) = 16``:
* all sleep durations greater or equal 16ms will meet the constraints.
* * ``HZ=250`` with ``slack=12.5%``: ``MSECS_PER_TICK / slack = 4 / (1/8) = 32``:
* all sleep durations greater or equal 32ms will meet the constraints.
*
* See also the signal aware variant msleep_interruptible().
*/
void msleep(unsigned int msecs)
{
unsigned long timeout = msecs_to_jiffies(msecs);
while (timeout)
timeout = schedule_timeout_uninterruptible(timeout);
}
EXPORT_SYMBOL(msleep);
/**
* msleep_interruptible - sleep waiting for signals
* @msecs: Requested sleep duration in milliseconds
*
* See msleep() for some basic information.
*
* The difference between msleep() and msleep_interruptible() is that the sleep
* could be interrupted by a signal delivery and then returns early.
*
* Returns: The remaining time of the sleep duration transformed to msecs (see
* schedule_timeout() for details).
*/
unsigned long msleep_interruptible(unsigned int msecs)
{
unsigned long timeout = msecs_to_jiffies(msecs);
while (timeout && !signal_pending(current))
timeout = schedule_timeout_interruptible(timeout);
return jiffies_to_msecs(timeout);
}
EXPORT_SYMBOL(msleep_interruptible);
/**
* usleep_range_state - Sleep for an approximate time in a given state
* @min: Minimum time in usecs to sleep
* @max: Maximum time in usecs to sleep
* @state: State of the current task that will be while sleeping
*
* usleep_range_state() sleeps at least for the minimum specified time but not
* longer than the maximum specified amount of time. The range might reduce
* power usage by allowing hrtimers to coalesce an already scheduled interrupt
* with this hrtimer. In the worst case, an interrupt is scheduled for the upper
* bound.
*
* The sleeping task is set to the specified state before starting the sleep.
*
* In non-atomic context where the exact wakeup time is flexible, use
* usleep_range() or its variants instead of udelay(). The sleep improves
* responsiveness by avoiding the CPU-hogging busy-wait of udelay().
*/
void __sched usleep_range_state(unsigned long min, unsigned long max, unsigned int state)
{
ktime_t exp = ktime_add_us(ktime_get(), min);
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
if (WARN_ON_ONCE(max < min))
delta = 0;
for (;;) {
__set_current_state(state);
/* Do not return before the requested sleep time has elapsed */
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
break;
}
}
EXPORT_SYMBOL(usleep_range_state);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_SWITCH_TO_H
#define _ASM_X86_SWITCH_TO_H
#include <linux/sched/task_stack.h>
struct task_struct; /* one of the stranger aspects of C forward declarations */
struct task_struct *__switch_to_asm(struct task_struct *prev,
struct task_struct *next);
__visible struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next);
asmlinkage void ret_from_fork_asm(void);
__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
int (*fn)(void *), void *fn_arg);
/*
* This is the structure pointed to by thread.sp for an inactive task. The
* order of the fields must match the code in __switch_to_asm().
*/
struct inactive_task_frame {
#ifdef CONFIG_X86_64
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
#else
unsigned long flags;
unsigned long si;
unsigned long di;
#endif
unsigned long bx;
/*
* These two fields must be together. They form a stack frame header,
* needed by get_frame_pointer().
*/
unsigned long bp;
unsigned long ret_addr;
};
struct fork_frame {
struct inactive_task_frame frame;
struct pt_regs regs;
};
#define switch_to(prev, next, last) \
do { \
((last) = __switch_to_asm((prev), (next))); \
} while (0)
#ifdef CONFIG_X86_32
#include <asm/msr.h>
static inline void refresh_sysenter_cs(struct thread_struct *thread)
{
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
return;
this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
wrmsrq(MSR_IA32_SYSENTER_CS, thread->sysenter_cs);
}
#endif
/* This is used when switching tasks or entering/exiting vm86 mode. */
static inline void update_task_stack(struct task_struct *task)
{
/* sp0 always points to the entry trampoline stack, which is constant: */
#ifdef CONFIG_X86_32
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
#else
if (!cpu_feature_enabled(X86_FEATURE_FRED) && cpu_feature_enabled(X86_FEATURE_XENPV))
/* Xen PV enters the kernel on the thread stack. */
load_sp0(task_top_of_stack(task));
#endif
}
static inline void kthread_frame_init(struct inactive_task_frame *frame,
int (*fun)(void *), void *arg)
{
frame->bx = (unsigned long)fun;
#ifdef CONFIG_X86_32
frame->di = (unsigned long)arg;
#else
frame->r12 = (unsigned long)arg;
#endif
}
#endif /* _ASM_X86_SWITCH_TO_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Macros for manipulating and testing page->flags
*/
#ifndef PAGE_FLAGS_H
#define PAGE_FLAGS_H
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mmdebug.h>
#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
#include <generated/bounds.h>
#endif /* !__GENERATING_BOUNDS_H */
/*
* Various page->flags bits:
*
* PG_reserved is set for special pages. The "struct page" of such a page
* should in general not be touched (e.g. set dirty) except by its owner.
* Pages marked as PG_reserved include:
* - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
* initrd, HW tables)
* - Pages reserved or allocated early during boot (before the page allocator
* was initialized). This includes (depending on the architecture) the
* initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
* much more. Once (if ever) freed, PG_reserved is cleared and they will
* be given to the page allocator.
* - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
* to read/write these pages might end badly. Don't touch!
* - The zero page(s)
* - Pages allocated in the context of kexec/kdump (loaded kernel image,
* control pages, vmcoreinfo)
* - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
* not marked PG_reserved (as they might be in use by somebody else who does
* not respect the caching strategy).
* - MCA pages on ia64
* - Pages holding CPU notes for POWER Firmware Assisted Dump
* - Device memory (e.g. PMEM, DAX, HMM)
* Some PG_reserved pages will be excluded from the hibernation image.
* PG_reserved does in general not hinder anybody from dumping or swapping
* and is no longer required for remap_pfn_range(). ioremap might require it.
* Consequently, PG_reserved for a page mapped into user space can indicate
* the zero page, the vDSO, MMIO pages or device memory.
*
* The PG_private bitflag is set on pagecache pages if they contain filesystem
* specific data (which is normally at page->private). It can be used by
* private allocations for its own usage.
*
* During initiation of disk I/O, PG_locked is set. This bit is set before I/O
* and cleared when writeback _starts_ or when read _completes_. PG_writeback
* is set before writeback starts and cleared when it finishes.
*
* PG_locked also pins a page in pagecache, and blocks truncation of the file
* while it is held.
*
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
* to become unlocked.
*
* PG_swapbacked is set when a page uses swap as a backing storage. This are
* usually PageAnon or shmem pages but please note that even anonymous pages
* might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
* a result of MADV_FREE).
*
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
* PG_arch_1 is an architecture specific page state bit. The generic code
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
*
* PG_hwpoison indicates that a page got corrupted in hardware and contains
* data with incorrect ECC bits that triggered a machine check. Accessing is
* not safe since it may cause another machine check. Don't touch!
*/
/*
* Don't use the pageflags directly. Use the PageFoo macros.
*
* The page flags field is split into two parts, the main flags area
* which extends from the low bits upwards, and the fields area which
* extends from the high bits downwards.
*
* | FIELD | ... | FLAGS |
* N-1 ^ 0
* (NR_PAGEFLAGS)
*
* The fields area is reserved for fields mapping zone, node (for NUMA) and
* SPARSEMEM section (for variants of SPARSEMEM that require section ids like
* SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
PG_writeback, /* Page is under writeback */
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
PG_head, /* Must be in bit 6 */
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
PG_owner_2, /* Owner use. If pagecache, fs may use */
PG_arch_1,
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
PG_dropbehind, /* drop pages on IO completion */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
PG_young,
PG_idle,
#endif
#ifdef CONFIG_ARCH_USES_PG_ARCH_2
PG_arch_2,
#endif
#ifdef CONFIG_ARCH_USES_PG_ARCH_3
PG_arch_3,
#endif
__NR_PAGEFLAGS,
PG_readahead = PG_reclaim,
/* Anonymous memory (and shmem) */
PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
/* Some filesystems */
PG_checked = PG_owner_priv_1,
/*
* Depending on the way an anonymous folio can be mapped into a page
* table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
* THP), PG_anon_exclusive may be set only for the head page or for
* tail pages of an anonymous folio. For now, we only expect it to be
* set on tail pages for PTE-mapped THP.
*/
PG_anon_exclusive = PG_owner_2,
/*
* Set if all buffer heads in the folio are mapped.
* Filesystems which do not use BHs can use it for their own purpose.
*/
PG_mappedtodisk = PG_owner_2,
/* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes
* when those inodes are being locally cached.
*/
PG_fscache = PG_private_2, /* page backed by cache */
/* XEN */
/* Pinned in Xen as a read-only pagetable page. */
PG_pinned = PG_owner_priv_1,
/* Pinned as part of domain save (see xen_mm_pin_all()). */
PG_savepinned = PG_dirty,
/* Has a grant mapping of another (foreign) domain's page. */
PG_foreign = PG_owner_priv_1,
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
#ifdef CONFIG_MIGRATION
/* movable_ops page that is isolated for migration */
PG_movable_ops_isolated = PG_reclaim,
/* this is a movable_ops page (for selected typed pages only) */
PG_movable_ops = PG_uptodate,
#endif
/* Only valid for buddy pages. Used to track pages that are reported */
PG_reported = PG_uptodate,
#ifdef CONFIG_MEMORY_HOTPLUG
/* For self-hosted memmap pages */
PG_vmemmap_self_hosted = PG_owner_priv_1,
#endif
/*
* Flags only valid for compound pages. Stored in first tail page's
* flags word. Cannot use the first 8 flags or any flag marked as
* PF_ANY.
*/
/* At least one page in this folio has the hwpoison flag set */
PG_has_hwpoisoned = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
#ifndef __GENERATING_BOUNDS_H
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
/*
* Return the real head page struct iff the @page is a fake head page, otherwise
* return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
*/
static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
{
if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) return page;
/*
* Only addresses aligned with PAGE_SIZE of struct page may be fake head
* struct page. The alignment check aims to avoid access the fields (
* e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
* cold cacheline in some cases.
*/
if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && test_bit(PG_head, &page->flags.f)) {
/*
* We can safely access the field of the @page[1] with PG_head
* because the @page is a compound page composed with at least
* two contiguous pages.
*/
unsigned long head = READ_ONCE(page[1].compound_head); if (likely(head & 1)) return (const struct page *)(head - 1);
}
return page;
}
static __always_inline bool page_count_writable(const struct page *page, int u)
{
if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
return true;
/*
* The refcount check is ordered before the fake-head check to prevent
* the following race:
* CPU 1 (HVO) CPU 2 (speculative PFN walker)
*
* page_ref_freeze()
* synchronize_rcu()
* rcu_read_lock()
* page_is_fake_head() is false
* vmemmap_remap_pte()
* XXX: struct page[] becomes r/o
*
* page_ref_unfreeze()
* page_ref_count() is not zero
*
* atomic_add_unless(&page->_refcount)
* XXX: try to modify r/o struct page[]
*
* The refcount check also prevents modification attempts to other (r/o)
* tail pages that are not fake heads.
*/
if (atomic_read_acquire(&page->_refcount) == u)
return false;
return page_fixed_fake_head(page) == page;
}
#else
static inline const struct page *page_fixed_fake_head(const struct page *page)
{
return page;
}
static inline bool page_count_writable(const struct page *page, int u)
{
return true;
}
#endif
static __always_inline int page_is_fake_head(const struct page *page)
{
return page_fixed_fake_head(page) != page;
}
static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) return head - 1; return (unsigned long)page_fixed_fake_head(page);
}
#define compound_head(page) ((typeof(page))_compound_head(page))
/**
* page_folio - Converts from page to folio.
* @p: The page.
*
* Every page is part of a folio. This function cannot be called on a
* NULL pointer.
*
* Context: No reference, nor lock is required on @page. If the caller
* does not hold a reference, this call may race with a folio split, so
* it should re-check the folio still contains this page after gaining
* a reference on the folio.
* Return: The folio which contains this page.
*/
#define page_folio(p) (_Generic((p), \
const struct page *: (const struct folio *)_compound_head(p), \
struct page *: (struct folio *)_compound_head(p)))
/**
* folio_page - Return a page from a folio.
* @folio: The folio.
* @n: The page number to return.
*
* @n is relative to the start of the folio. This function does not
* check that the page number lies within @folio; the caller is presumed
* to have a reference to the page.
*/
#define folio_page(folio, n) (&(folio)->page + (n))
static __always_inline int PageTail(const struct page *page)
{
return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
}
static __always_inline int PageCompound(const struct page *page)
{
return test_bit(PG_head, &page->flags.f) ||
READ_ONCE(page->compound_head) & 1;
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
void page_init_poison(struct page *page, size_t size);
#else
static inline void page_init_poison(struct page *page, size_t size)
{
}
#endif
static const unsigned long *const_folio_flags(const struct folio *folio,
unsigned n)
{
const struct page *page = &folio->page;
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
static unsigned long *folio_flags(struct folio *folio, unsigned n)
{
struct page *page = &folio->page;
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
/*
* Page flags policies wrt compound pages
*
* PF_POISONED_CHECK
* check if this struct page poisoned/uninitialized
*
* PF_ANY:
* the page flag is relevant for small, head and tail pages.
*
* PF_HEAD:
* for compound page all operations related to the page flag applied to
* head page.
*
* PF_NO_TAIL:
* modifications of the page flag must be done on small or head pages,
* checks can be done on tail pages too.
*
* PF_NO_COMPOUND:
* the page flag is not relevant for compound pages.
*
* PF_SECOND:
* the page flag is stored in the first tail page.
*/
#define PF_POISONED_CHECK(page) ({ \
VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
page; })
#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
PF_POISONED_CHECK(compound_head(page)); })
#define PF_NO_COMPOUND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
PF_POISONED_CHECK(page); })
#define PF_SECOND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
PF_POISONED_CHECK(&page[1]); })
/* Which page is the flag stored in */
#define FOLIO_PF_ANY 0
#define FOLIO_PF_HEAD 0
#define FOLIO_PF_NO_TAIL 0
#define FOLIO_PF_NO_COMPOUND 0
#define FOLIO_PF_SECOND 1
#define FOLIO_HEAD_PAGE 0
#define FOLIO_SECOND_PAGE 1
/*
* Macros to create function definitions for page flags
*/
#define FOLIO_TEST_FLAG(name, page) \
static __always_inline bool folio_test_##name(const struct folio *folio) \
{ return test_bit(PG_##name, const_folio_flags(folio, page)); }
#define FOLIO_SET_FLAG(name, page) \
static __always_inline void folio_set_##name(struct folio *folio) \
{ set_bit(PG_##name, folio_flags(folio, page)); }
#define FOLIO_CLEAR_FLAG(name, page) \
static __always_inline void folio_clear_##name(struct folio *folio) \
{ clear_bit(PG_##name, folio_flags(folio, page)); }
#define __FOLIO_SET_FLAG(name, page) \
static __always_inline void __folio_set_##name(struct folio *folio) \
{ __set_bit(PG_##name, folio_flags(folio, page)); }
#define __FOLIO_CLEAR_FLAG(name, page) \
static __always_inline void __folio_clear_##name(struct folio *folio) \
{ __clear_bit(PG_##name, folio_flags(folio, page)); }
#define FOLIO_TEST_SET_FLAG(name, page) \
static __always_inline bool folio_test_set_##name(struct folio *folio) \
{ return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
#define FOLIO_TEST_CLEAR_FLAG(name, page) \
static __always_inline bool folio_test_clear_##name(struct folio *folio) \
{ return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
#define FOLIO_FLAG(name, page) \
FOLIO_TEST_FLAG(name, page) \
FOLIO_SET_FLAG(name, page) \
FOLIO_CLEAR_FLAG(name, page)
#define TESTPAGEFLAG(uname, lname, policy) \
FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
static __always_inline int Page##uname(const struct page *page) \
{ return test_bit(PG_##lname, &policy(page, 0)->flags.f); }
#define SETPAGEFLAG(uname, lname, policy) \
FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void SetPage##uname(struct page *page) \
{ set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define CLEARPAGEFLAG(uname, lname, policy) \
FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void ClearPage##uname(struct page *page) \
{ clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __SETPAGEFLAG(uname, lname, policy) \
__FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void __SetPage##uname(struct page *page) \
{ __set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void __ClearPage##uname(struct page *page) \
{ __clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTSETFLAG(uname, lname, policy) \
FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestSetPage##uname(struct page *page) \
{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTCLEARFLAG(uname, lname, policy) \
FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestClearPage##uname(struct page *page) \
{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
SETPAGEFLAG(uname, lname, policy) \
CLEARPAGEFLAG(uname, lname, policy)
#define __PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
__SETPAGEFLAG(uname, lname, policy) \
__CLEARPAGEFLAG(uname, lname, policy)
#define TESTSCFLAG(uname, lname, policy) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
#define FOLIO_TEST_FLAG_FALSE(name) \
static inline bool folio_test_##name(const struct folio *folio) \
{ return false; }
#define FOLIO_SET_FLAG_NOOP(name) \
static inline void folio_set_##name(struct folio *folio) { }
#define FOLIO_CLEAR_FLAG_NOOP(name) \
static inline void folio_clear_##name(struct folio *folio) { }
#define __FOLIO_SET_FLAG_NOOP(name) \
static inline void __folio_set_##name(struct folio *folio) { }
#define __FOLIO_CLEAR_FLAG_NOOP(name) \
static inline void __folio_clear_##name(struct folio *folio) { }
#define FOLIO_TEST_SET_FLAG_FALSE(name) \
static inline bool folio_test_set_##name(struct folio *folio) \
{ return false; }
#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
static inline bool folio_test_clear_##name(struct folio *folio) \
{ return false; }
#define FOLIO_FLAG_FALSE(name) \
FOLIO_TEST_FLAG_FALSE(name) \
FOLIO_SET_FLAG_NOOP(name) \
FOLIO_CLEAR_FLAG_NOOP(name)
#define TESTPAGEFLAG_FALSE(uname, lname) \
FOLIO_TEST_FLAG_FALSE(lname) \
static inline int Page##uname(const struct page *page) { return 0; }
#define SETPAGEFLAG_NOOP(uname, lname) \
FOLIO_SET_FLAG_NOOP(lname) \
static inline void SetPage##uname(struct page *page) { }
#define CLEARPAGEFLAG_NOOP(uname, lname) \
FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void ClearPage##uname(struct page *page) { }
#define __CLEARPAGEFLAG_NOOP(uname, lname) \
__FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void __ClearPage##uname(struct page *page) { }
#define TESTSETFLAG_FALSE(uname, lname) \
FOLIO_TEST_SET_FLAG_FALSE(lname) \
static inline int TestSetPage##uname(struct page *page) { return 0; }
#define TESTCLEARFLAG_FALSE(uname, lname) \
FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
static inline int TestClearPage##uname(struct page *page) { return 0; }
#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
#define TESTSCFLAG_FALSE(uname, lname) \
TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
TESTCLEARFLAG(LRU, lru, PF_HEAD)
FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
__FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
/* Xen */
PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
__FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
* - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
PAGEFLAG(Private, private, PF_ANY)
FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE)
/* owner_2 can be set on tail pages for anon memory */
FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
/*
* Only test-and-set exist for PG_writeback. The unconditional operators are
* risky: they bypass page accounting.
*/
TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE)
#ifdef CONFIG_HIGHMEM
/*
* Must use a macro here due to header dependency issues. page_zone() is not
* available at this point.
*/
#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
#define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
#else
PAGEFLAG_FALSE(HighMem, highmem)
#endif
#define PhysHighMem(__p) (PageHighMem(phys_to_page(__p)))
/* Does kmap_local_folio() only allow access to one page of the folio? */
#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
#define folio_test_partial_kmap(f) true
#else
#define folio_test_partial_kmap(f) folio_test_highmem(f)
#endif
#ifdef CONFIG_SWAP
static __always_inline bool folio_test_swapcache(const struct folio *folio)
{
return folio_test_swapbacked(folio) &&
test_bit(PG_swapcache, const_folio_flags(folio, 0));
}
FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
#else
FOLIO_FLAG_FALSE(swapcache)
#endif
FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
__FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
#ifdef CONFIG_MMU
FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
__FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
#else
FOLIO_FLAG_FALSE(mlocked)
__FOLIO_CLEAR_FLAG_NOOP(mlocked)
FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
FOLIO_TEST_SET_FLAG_FALSE(mlocked)
#endif
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
#else
PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
#endif
#ifdef CONFIG_PAGE_IDLE_FLAG
#ifdef CONFIG_64BIT
FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
#endif
/* See page_idle.h for !64BIT workaround */
#else /* !CONFIG_PAGE_IDLE_FLAG */
FOLIO_FLAG_FALSE(young)
FOLIO_TEST_CLEAR_FLAG_FALSE(young)
FOLIO_FLAG_FALSE(idle)
#endif
/*
* PageReported() is used to track reported free pages within the Buddy
* allocator. We can use the non-atomic version of the test and set
* operations as both should be shielded with the zone lock to prevent
* any possible races on the setting or clearing of the bit.
*/
__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
#ifdef CONFIG_MEMORY_HOTPLUG
PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
#else
PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
#endif
/*
* On an anonymous folio mapped into a user virtual memory area,
* folio->mapping points to its anon_vma, not to a struct address_space;
* with the FOLIO_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
* On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
* the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON
* bit; and then folio->mapping points, not to an anon_vma, but to a private
* structure which KSM associates with that merged folio. See ksm.h.
*
* Please note that, confusingly, "folio_mapping" refers to the inode
* address_space which maps the folio from disk; whereas "folio_mapped"
* refers to user virtual address space into which the folio is mapped.
*
* For slab pages, since slab reuses the bits in struct page to store its
* internal states, the folio->mapping does not exist as such, nor do
* these flags below. So in order to avoid testing non-existent bits,
* please make sure that folio_test_slab(folio) actually evaluates to
* false before calling the following functions (e.g., folio_test_anon).
* See mm/slab.h.
*/
#define FOLIO_MAPPING_ANON 0x1
#define FOLIO_MAPPING_ANON_KSM 0x2
#define FOLIO_MAPPING_KSM (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
#define FOLIO_MAPPING_FLAGS (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
static __always_inline bool folio_test_anon(const struct folio *folio)
{
return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
}
static __always_inline bool PageAnonNotKsm(const struct page *page)
{
unsigned long flags = (unsigned long)page_folio(page)->mapping;
return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON;
}
static __always_inline bool PageAnon(const struct page *page)
{
return folio_test_anon(page_folio(page));
}
#ifdef CONFIG_KSM
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
static __always_inline bool folio_test_ksm(const struct folio *folio)
{
return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) ==
FOLIO_MAPPING_KSM;
}
#else
FOLIO_TEST_FLAG_FALSE(ksm)
#endif
u64 stable_page_flags(const struct page *page);
/**
* folio_xor_flags_has_waiters - Change some folio flags.
* @folio: The folio.
* @mask: Bits set in this word will be changed.
*
* This must only be used for flags which are changed with the folio
* lock held. For example, it is unsafe to use for PG_dirty as that
* can be set without the folio lock held. It can also only be used
* on flags which are in the range 0-6 as some of the implementations
* only affect those bits.
*
* Return: Whether there are tasks waiting on the folio.
*/
static inline bool folio_xor_flags_has_waiters(struct folio *folio,
unsigned long mask)
{
return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
}
/**
* folio_test_uptodate - Is this folio up to date?
* @folio: The folio.
*
* The uptodate flag is set on a folio when every byte in the folio is
* at least as new as the corresponding bytes on storage. Anonymous
* and CoW folios are always uptodate. If the folio is not uptodate,
* some of the bytes in it may be; see the is_partially_uptodate()
* address_space operation.
*/
static inline bool folio_test_uptodate(const struct folio *folio)
{
bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
/*
* Must ensure that the data we read out of the folio is loaded
* _after_ we've loaded folio->flags to check the uptodate bit.
* We can skip the barrier if the folio is not uptodate, because
* we wouldn't be reading anything from it.
*
* See folio_mark_uptodate() for the other side of the story.
*/
if (ret)
smp_rmb();
return ret;
}
static inline bool PageUptodate(const struct page *page)
{
return folio_test_uptodate(page_folio(page));
}
static __always_inline void __folio_mark_uptodate(struct folio *folio)
{
smp_wmb();
__set_bit(PG_uptodate, folio_flags(folio, 0));
}
static __always_inline void folio_mark_uptodate(struct folio *folio)
{
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
* so that all previous stores issued in order to bring the folio
* uptodate are actually visible before folio_test_uptodate becomes true.
*/
smp_wmb();
set_bit(PG_uptodate, folio_flags(folio, 0));
}
static __always_inline void __SetPageUptodate(struct page *page)
{
__folio_mark_uptodate((struct folio *)page);
}
static __always_inline void SetPageUptodate(struct page *page)
{
folio_mark_uptodate((struct folio *)page);
}
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
void __folio_start_writeback(struct folio *folio, bool keep_write);
void set_page_writeback(struct page *page);
#define folio_start_writeback(folio) \
__folio_start_writeback(folio, false)
static __always_inline bool folio_test_head(const struct folio *folio)
{
return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
}
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
}
__SETPAGEFLAG(Head, head, PF_ANY)
__CLEARPAGEFLAG(Head, head, PF_ANY)
CLEARPAGEFLAG(Head, head, PF_ANY)
/**
* folio_test_large() - Does this folio contain more than one page?
* @folio: The folio to test.
*
* Return: True if the folio is larger than one page.
*/
static inline bool folio_test_large(const struct folio *folio)
{
return folio_test_head(folio);
}
static __always_inline void set_compound_head(struct page *page, struct page *head)
{
WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
}
static __always_inline void clear_compound_head(struct page *page)
{
WRITE_ONCE(page->compound_head, 0);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ClearPageCompound(struct page *page)
{
BUG_ON(!PageHead(page));
ClearPageHead(page);
}
FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
#else
FOLIO_FLAG_FALSE(large_rmappable)
FOLIO_FLAG_FALSE(partially_mapped)
#endif
#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
*/
static inline int PageTransCompound(const struct page *page)
{
return PageCompound(page);
}
#else
TESTPAGEFLAG_FALSE(TransCompound, transcompound)
#endif
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
* PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
* compound page.
*
* This flag is set by hwpoison handler. Cleared by THP split or free page.
*/
FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE)
#else
FOLIO_FLAG_FALSE(has_hwpoisoned)
#endif
/*
* For pages that do not use mapcount, page_type may be used.
* The low 24 bits of pagetype may be used for your own purposes, as long
* as you are careful to not affect the top 8 bits. The low bits of
* pagetype will be overwritten when you clear the page_type from the page.
*/
enum pagetype {
/* 0x00-0x7f are positive numbers, ie mapcount */
/* Reserve 0x80-0xef for mapcount overflow. */
PGTY_buddy = 0xf0,
PGTY_offline = 0xf1,
PGTY_table = 0xf2,
PGTY_guard = 0xf3,
PGTY_hugetlb = 0xf4,
PGTY_slab = 0xf5,
PGTY_zsmalloc = 0xf6,
PGTY_unaccepted = 0xf7,
PGTY_large_kmalloc = 0xf8,
PGTY_mapcount_underflow = 0xff
};
static inline bool page_type_has_type(int page_type)
{
return page_type < (PGTY_mapcount_underflow << 24);
}
/* This takes a mapcount which is one more than page->_mapcount */
static inline bool page_mapcount_is_type(unsigned int mapcount)
{
return page_type_has_type(mapcount - 1);
}
static inline bool page_has_type(const struct page *page)
{
return page_type_has_type(data_race(page->page_type));
}
#define FOLIO_TYPE_OPS(lname, fname) \
static __always_inline bool folio_test_##fname(const struct folio *folio) \
{ \
return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __folio_set_##fname(struct folio *folio) \
{ \
if (folio_test_##fname(folio)) \
return; \
VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
folio); \
folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __folio_clear_##fname(struct folio *folio) \
{ \
if (folio->page.page_type == UINT_MAX) \
return; \
VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
folio->page.page_type = UINT_MAX; \
}
#define PAGE_TYPE_OPS(uname, lname, fname) \
FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
return data_race(page->page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
if (Page##uname(page)) \
return; \
VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
page->page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
if (page->page_type == UINT_MAX) \
return; \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type = UINT_MAX; \
}
/*
* PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
PAGE_TYPE_OPS(Buddy, buddy, buddy)
/*
* PageOffline() indicates that the page is logically offline although the
* containing section is online. (e.g. inflated in a balloon driver or
* not onlined when onlining the section).
* The content of these pages is effectively stale. Such pages should not
* be touched (read/write/dump/save) except by their owner.
*
* When a memory block gets onlined, all pages are initialized with a
* refcount of 1 and PageOffline(). generic_online_page() will
* take care of clearing PageOffline().
*
* If a driver wants to allow to offline unmovable PageOffline() pages without
* putting them back to the buddy, it can do so via the memory notifier by
* decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
* reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
* pages (now with a reference count of zero) are treated like free (unmanaged)
* pages, allowing the containing memory block to get offlined. A driver that
* relies on this feature is aware that re-onlining the memory block will
* require not giving them to the buddy via generic_online_page().
*
* Memory offlining code will not adjust the managed page count for any
* PageOffline() pages, treating them like they were never exposed to the
* buddy using generic_online_page().
*
* There are drivers that mark a page PageOffline() and expect there won't be
* any further access to page content. PFN walkers that read content of random
* pages should check PageOffline() and synchronize with such drivers using
* page_offline_freeze()/page_offline_thaw().
*/
PAGE_TYPE_OPS(Offline, offline, offline)
extern void page_offline_freeze(void);
extern void page_offline_thaw(void);
extern void page_offline_begin(void);
extern void page_offline_end(void);
/*
* Marks pages in use as page tables.
*/
PAGE_TYPE_OPS(Table, table, pgtable)
/*
* Marks guardpages used with debug_pagealloc.
*/
PAGE_TYPE_OPS(Guard, guard, guard)
FOLIO_TYPE_OPS(slab, slab)
/**
* PageSlab - Determine if the page belongs to the slab allocator
* @page: The page to test.
*
* Context: Any context.
* Return: True for slab pages, false for any other kind of page.
*/
static inline bool PageSlab(const struct page *page)
{
return folio_test_slab(page_folio(page));
}
#ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb, hugetlb)
#else
FOLIO_TEST_FLAG_FALSE(hugetlb)
#endif
PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
/*
* Mark pages that has to be accepted before touched for the first time.
*
* Serialized with zone lock.
*/
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
*
* Context: Any context.
* Return: True for hugetlbfs pages, false for anon pages or pages
* belonging to other filesystems.
*/
static inline bool PageHuge(const struct page *page)
{
return folio_test_hugetlb(page_folio(page));}
/*
* Check if a page is currently marked HWPoisoned. Note that this check is
* best effort only and inherently racy: there is no way to synchronize with
* failing hardware.
*/
static inline bool is_page_hwpoison(const struct page *page)
{
const struct folio *folio;
if (PageHWPoison(page))
return true;
folio = page_folio(page);
return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
}
static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
{
return folio_test_hwpoison(folio) ||
(folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
}
bool is_free_buddy_page(const struct page *page);
#ifdef CONFIG_MIGRATION
/*
* This page is migratable through movable_ops (for selected typed pages
* only).
*
* Page migration of such pages might fail, for example, if the page is
* already isolated by somebody else, or if the page is about to get freed.
*
* While a subsystem might set selected typed pages that support page migration
* as being movable through movable_ops, it must never clear this flag.
*
* This flag is only cleared when the page is freed back to the buddy.
*
* Only selected page types support this flag (see page_movable_ops()) and
* the flag might be used in other context for other pages. Always use
* page_has_movable_ops() instead.
*/
TESTPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
SETPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
/*
* A movable_ops page has this flag set while it is isolated for migration.
* This flag primarily protects against concurrent migration attempts.
*
* Once migration ended (success or failure), the flag is cleared. The
* flag is managed by the migration core.
*/
PAGEFLAG(MovableOpsIsolated, movable_ops_isolated, PF_NO_TAIL);
#else /* !CONFIG_MIGRATION */
TESTPAGEFLAG_FALSE(MovableOps, movable_ops);
SETPAGEFLAG_NOOP(MovableOps, movable_ops);
PAGEFLAG_FALSE(MovableOpsIsolated, movable_ops_isolated);
#endif /* CONFIG_MIGRATION */
/**
* page_has_movable_ops - test for a movable_ops page
* @page: The page to test.
*
* Test whether this is a movable_ops page. Such pages will stay that
* way until freed.
*
* Returns true if this is a movable_ops page, otherwise false.
*/
static inline bool page_has_movable_ops(const struct page *page)
{
return PageMovableOps(page) &&
(PageOffline(page) || PageZsmalloc(page));
}
static __always_inline int PageAnonExclusive(const struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
/*
* HugeTLB stores this information on the head page; THP keeps it per
* page
*/
if (PageHuge(page)) page = compound_head(page); return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void SetPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void ClearPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
#ifdef CONFIG_MMU
#define __PG_MLOCKED (1UL << PG_mlocked)
#else
#define __PG_MLOCKED 0
#endif
/*
* Flags checked when a page is freed. Pages being freed should not have
* these flags set. If they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_active | \
1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
/*
* Flags checked when a page is prepped for return by the page allocator.
* Pages being prepped should not have these flags set. If they are set,
* there has been a kernel bug or struct page corruption.
*
* __PG_HWPOISON is exceptional because it needs to be kept beyond page's
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
/*
* Flags stored in the second page of a compound page. They may overlap
* the CHECK_AT_FREE flags above, so need to be cleared.
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
* folio_has_private - Determine if folio has private stuff
* @folio: The folio to be checked
*
* Determine if a folio has private stuff, indicating that release routines
* should be invoked upon it.
*/
static inline int folio_has_private(const struct folio *folio)
{
return !!(folio->flags.f & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY
#undef PF_HEAD
#undef PF_NO_TAIL
#undef PF_NO_COMPOUND
#undef PF_SECOND
#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
*/
#ifndef _LINUX_HASHTABLE_H
#define _LINUX_HASHTABLE_H
#include <linux/list.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rculist.h>
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] __read_mostly = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
#define HASH_SIZE(name) (ARRAY_SIZE(name))
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++) INIT_HLIST_HEAD(&ht[i]);
}
/**
* hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*
* Calculates the size of the hashtable from the given parameter, otherwise
* same as hash_init_size.
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
/**
* hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_add_rcu - add an object to a rcu enabled hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add_rcu(hashtable, node, key) \
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_hashed - check whether an object is in any hashtable
* @node: the &struct hlist_node of the object to be checked
*/
static inline bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;
return true;
}
/**
* hash_empty - check whether a hashtable is empty
* @hashtable: hashtable to check
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
/**
* hash_del - remove an object from a hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}
/**
* hash_del_rcu - remove an object from a rcu enabled hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del_rcu(struct hlist_node *node)
{
hlist_del_init_rcu(node);
}
/**
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry(obj, &name[bkt], member)
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_rcu(obj, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @tmp: a &struct hlist_node used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
member, ## cond)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
* to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*
* This is the same as hash_for_each_possible_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
hlist_for_each_entry_rcu_notrace(obj, \
&name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @tmp: a &struct hlist_node used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member)
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BIT_SPINLOCK_H
#define __LINUX_BIT_SPINLOCK_H
#include <linux/kernel.h>
#include <linux/preempt.h>
#include <linux/atomic.h>
#include <linux/bug.h>
/*
* bit-based spin_lock()
*
* Don't use this unless you really need to: spin_lock() and spin_unlock()
* are significantly faster.
*/
static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
* the body of the outer loop. If it is contended, then
* within the inner loop a non-atomic test is used to
* busywait with less bus contention for a good time to
* attempt to acquire the lock bit.
*/
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
while (unlikely(test_and_set_bit_lock(bitnum, addr))) { preempt_enable();
do {
cpu_relax(); } while (test_bit(bitnum, addr)); preempt_disable();
}
#endif
__acquire(bitlock);
}
/*
* Return true if it was acquired
*/
static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
preempt_enable();
return 0;
}
#endif
__acquire(bitlock);
return 1;
}
/*
* bit-based spin_unlock()
*/
static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
__release(bitlock);
}
/*
* bit-based spin_unlock()
* non-atomic version, which can be used eg. if the bit lock itself is
* protecting the rest of the flags in the word.
*/
static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
__release(bitlock);
}
/*
* Return true if the lock is held.
*/
static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
{
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return test_bit(bitnum, addr);
#elif defined CONFIG_PREEMPT_COUNT
return preempt_count();
#else
return 1;
#endif
}
#endif /* __LINUX_BIT_SPINLOCK_H */
#ifndef _LINUX_JHASH_H
#define _LINUX_JHASH_H
/* jhash.h: Jenkins hash support.
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
* https://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
* lookup3.c, by Bob Jenkins, May 2006, Public Domain.
*
* These are functions for producing 32-bit hashes for hash table lookup.
* hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
* are externally useful functions. Routines to test the hash are included
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
* Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org)
*
* I've modified Bob's hash to be useful in the Linux kernel, and
* any bugs present are my fault.
* Jozsef
*/
#include <linux/bitops.h>
#include <linux/unaligned.h>
/* Best hash sizes are of power of two */
#define jhash_size(n) ((u32)1<<(n))
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
#define jhash_mask(n) (jhash_size(n)-1)
/* __jhash_mix - mix 3 32-bit values reversibly. */
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
/* An arbitrary initial parameter */
#define JHASH_INITVAL 0xdeadbeef
/* jhash - hash an arbitrary key
* @k: sequence of bytes as key
* @length: the length of the key
* @initval: the previous hash, or an arbitrary value
*
* The generic version, hashes an arbitrary sequence of bytes.
* No alignment or length assumptions are made about the input key.
*
* Returns the hash value of the key. The result depends on endianness.
*/
static inline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const u8 *k = key;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + length + initval;
/* All but the last block: affect some 32 bits of (a,b,c) */
while (length > 12) {
a += get_unaligned((u32 *)k);
b += get_unaligned((u32 *)(k + 4));
c += get_unaligned((u32 *)(k + 8));
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
/* Last block: affect all 32 bits of (c) */
switch (length) {
case 12: c += (u32)k[11]<<24; fallthrough;
case 11: c += (u32)k[10]<<16; fallthrough;
case 10: c += (u32)k[9]<<8; fallthrough;
case 9: c += k[8]; fallthrough;
case 8: b += (u32)k[7]<<24; fallthrough;
case 7: b += (u32)k[6]<<16; fallthrough;
case 6: b += (u32)k[5]<<8; fallthrough;
case 5: b += k[4]; fallthrough;
case 4: a += (u32)k[3]<<24; fallthrough;
case 3: a += (u32)k[2]<<16; fallthrough;
case 2: a += (u32)k[1]<<8; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
break;
case 0: /* Nothing left to add */
break;
}
return c;
}
/* jhash2 - hash an array of u32's
* @k: the key which must be an array of u32's
* @length: the number of u32's in the key
* @initval: the previous hash, or an arbitrary value
*
* Returns the hash value of the key.
*/
static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
{
u32 a, b, c;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + (length<<2) + initval;
/* Handle most of the key */
while (length > 3) { a += k[0];
b += k[1];
c += k[2];
__jhash_mix(a, b, c);
length -= 3;
k += 3;
}
/* Handle the last 3 u32's */
switch (length) {
case 3: c += k[2]; fallthrough;
case 2: b += k[1]; fallthrough; case 1: a += k[0];
__jhash_final(a, b, c);
break;
case 0: /* Nothing left to add */
break;
}
return c;}
/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval; b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
}
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Dynamic DMA mapping support.
*
* This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 05/09/10 linville Add support for syncing ranges, support syncing for
* DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
* 08/12/11 beckyb Add highmem support
*/
#define pr_fmt(fmt) "software IO TLB: " fmt
#include <linux/cache.h>
#include <linux/cc_platform.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/iommu-helper.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/rculist.h>
#include <linux/scatterlist.h>
#include <linux/set_memory.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/types.h>
#ifdef CONFIG_DMA_RESTRICTED_POOL
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/slab.h>
#endif
#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
* Minimum IO TLB size to bother booting with. Systems with mainly
* 64bit capable cards will only lightly use the swiotlb. If we can't
* allocate a contiguous 1MB, we're probably in trouble anyway.
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
/**
* struct io_tlb_slot - IO TLB slot descriptor
* @orig_addr: The original address corresponding to a mapped entry.
* @alloc_size: Size of the allocated buffer.
* @list: The free list describing the number of free entries available
* from each index.
* @pad_slots: Number of preceding padding slots. Valid only in the first
* allocated non-padding slot.
*/
struct io_tlb_slot {
phys_addr_t orig_addr;
size_t alloc_size;
unsigned short list;
unsigned short pad_slots;
};
static bool swiotlb_force_bounce;
static bool swiotlb_force_disable;
#ifdef CONFIG_SWIOTLB_DYNAMIC
static void swiotlb_dyn_alloc(struct work_struct *work);
static struct io_tlb_mem io_tlb_default_mem = {
.lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
.dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
swiotlb_dyn_alloc),
};
#else /* !CONFIG_SWIOTLB_DYNAMIC */
static struct io_tlb_mem io_tlb_default_mem;
#endif /* CONFIG_SWIOTLB_DYNAMIC */
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
static unsigned long default_nareas;
/**
* struct io_tlb_area - IO TLB memory area descriptor
*
* This is a single area with a single lock.
*
* @used: The number of used IO TLB block.
* @index: The slot index to start searching in this area for next round.
* @lock: The lock to protect the above data structures in the map and
* unmap calls.
*/
struct io_tlb_area {
unsigned long used;
unsigned int index;
spinlock_t lock;
};
/*
* Round up number of slabs to the next power of 2. The last area is going
* be smaller than the rest if default_nslabs is not power of two.
* The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
* otherwise a segment may span two or more areas. It conflicts with free
* contiguous slots tracking: free slots are treated contiguous no matter
* whether they cross an area boundary.
*
* Return true if default_nslabs is rounded up.
*/
static bool round_up_default_nslabs(void)
{
if (!default_nareas)
return false;
if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
default_nslabs = IO_TLB_SEGSIZE * default_nareas;
else if (is_power_of_2(default_nslabs))
return false;
default_nslabs = roundup_pow_of_two(default_nslabs);
return true;
}
/**
* swiotlb_adjust_nareas() - adjust the number of areas and slots
* @nareas: Desired number of areas. Zero is treated as 1.
*
* Adjust the default number of areas in a memory pool.
* The default size of the memory pool may also change to meet minimum area
* size requirements.
*/
static void swiotlb_adjust_nareas(unsigned int nareas)
{
if (!nareas)
nareas = 1;
else if (!is_power_of_2(nareas))
nareas = roundup_pow_of_two(nareas);
default_nareas = nareas;
pr_info("area num %d.\n", nareas);
if (round_up_default_nslabs())
pr_info("SWIOTLB bounce buffer size roundup to %luMB",
(default_nslabs << IO_TLB_SHIFT) >> 20);
}
/**
* limit_nareas() - get the maximum number of areas for a given memory pool size
* @nareas: Desired number of areas.
* @nslots: Total number of slots in the memory pool.
*
* Limit the number of areas to the maximum possible number of areas in
* a memory pool of the given size.
*
* Return: Maximum possible number of areas.
*/
static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
{
if (nslots < nareas * IO_TLB_SEGSIZE)
return nslots / IO_TLB_SEGSIZE;
return nareas;
}
static int __init
setup_io_tlb_npages(char *str)
{
if (isdigit(*str)) {
/* avoid tail segment of size < IO_TLB_SEGSIZE */
default_nslabs =
ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
}
if (*str == ',')
++str;
if (isdigit(*str))
swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
if (*str == ',')
++str;
if (!strcmp(str, "force"))
swiotlb_force_bounce = true;
else if (!strcmp(str, "noforce"))
swiotlb_force_disable = true;
return 0;
}
early_param("swiotlb", setup_io_tlb_npages);
unsigned long swiotlb_size_or_default(void)
{
return default_nslabs << IO_TLB_SHIFT;
}
void __init swiotlb_adjust_size(unsigned long size)
{
/*
* If swiotlb parameter has not been specified, give a chance to
* architectures such as those supporting memory encryption to
* adjust/expand SWIOTLB size for their use.
*/
if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
return;
size = ALIGN(size, IO_TLB_SIZE);
default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
if (round_up_default_nslabs())
size = default_nslabs << IO_TLB_SHIFT;
pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
}
void swiotlb_print_info(void)
{
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
if (!mem->nslabs) {
pr_warn("No low mem\n");
return;
}
pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
(mem->nslabs << IO_TLB_SHIFT) >> 20);
}
static inline unsigned long io_tlb_offset(unsigned long val)
{
return val & (IO_TLB_SEGSIZE - 1);
}
static inline unsigned long nr_slots(u64 val)
{
return DIV_ROUND_UP(val, IO_TLB_SIZE);
}
/*
* Early SWIOTLB allocation may be too early to allow an architecture to
* perform the desired operations. This function allows the architecture to
* call SWIOTLB when the operations are possible. It needs to be called
* before the SWIOTLB memory is used.
*/
void __init swiotlb_update_mem_attributes(void)
{
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
unsigned long bytes;
if (!mem->nslabs || mem->late_alloc)
return;
bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
}
static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
unsigned long nslabs, bool late_alloc, unsigned int nareas)
{
void *vaddr = phys_to_virt(start);
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
mem->nslabs = nslabs;
mem->start = start;
mem->end = mem->start + bytes;
mem->late_alloc = late_alloc;
mem->nareas = nareas;
mem->area_nslabs = nslabs / mem->nareas;
for (i = 0; i < mem->nareas; i++) {
spin_lock_init(&mem->areas[i].lock);
mem->areas[i].index = 0;
mem->areas[i].used = 0;
}
for (i = 0; i < mem->nslabs; i++) {
mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
mem->nslabs - i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
mem->slots[i].pad_slots = 0;
}
memset(vaddr, 0, bytes);
mem->vaddr = vaddr;
return;
}
/**
* add_mem_pool() - add a memory pool to the allocator
* @mem: Software IO TLB allocator.
* @pool: Memory pool to be added.
*/
static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
{
#ifdef CONFIG_SWIOTLB_DYNAMIC
spin_lock(&mem->lock);
list_add_rcu(&pool->node, &mem->pools);
mem->nslabs += pool->nslabs;
spin_unlock(&mem->lock);
#else
mem->nslabs = pool->nslabs;
#endif
}
static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
{
size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
void *tlb;
/*
* By default allocate the bounce buffer memory from low memory, but
* allow to pick a location everywhere for hypervisors with guest
* memory encryption.
*/
if (flags & SWIOTLB_ANY)
tlb = memblock_alloc(bytes, PAGE_SIZE);
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb) {
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
__func__, bytes);
return NULL;
}
if (remap && remap(tlb, nslabs) < 0) {
memblock_free(tlb, PAGE_ALIGN(bytes));
pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
return NULL;
}
return tlb;
}
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
{
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
unsigned long nslabs;
unsigned int nareas;
size_t alloc_size;
void *tlb;
if (!addressing_limit && !swiotlb_force_bounce)
return;
if (swiotlb_force_disable)
return;
io_tlb_default_mem.force_bounce =
swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
#ifdef CONFIG_SWIOTLB_DYNAMIC
if (!remap)
io_tlb_default_mem.can_grow = true;
if (flags & SWIOTLB_ANY)
io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
else
io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
#endif
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
nslabs = default_nslabs;
nareas = limit_nareas(default_nareas, nslabs);
while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
if (nslabs <= IO_TLB_MIN_SLABS)
return;
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
nareas = limit_nareas(nareas, nslabs);
}
if (default_nslabs != nslabs) {
pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
default_nslabs, nslabs);
default_nslabs = nslabs;
}
alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
if (!mem->slots) {
pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE);
return;
}
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
nareas), SMP_CACHE_BYTES);
if (!mem->areas) {
pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
return;
}
swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
add_mem_pool(&io_tlb_default_mem, mem);
if (flags & SWIOTLB_VERBOSE)
swiotlb_print_info();
}
void __init swiotlb_init(bool addressing_limit, unsigned int flags)
{
swiotlb_init_remap(addressing_limit, flags, NULL);
}
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
int (*remap)(void *tlb, unsigned long nslabs))
{
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned int nareas;
unsigned char *vstart = NULL;
unsigned int order, area_order;
bool retried = false;
int rc = 0;
if (io_tlb_default_mem.nslabs)
return 0;
if (swiotlb_force_disable)
return 0;
io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
#ifdef CONFIG_SWIOTLB_DYNAMIC
if (!remap)
io_tlb_default_mem.can_grow = true;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
io_tlb_default_mem.phys_limit = zone_dma_limit;
else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
io_tlb_default_mem.phys_limit = max(DMA_BIT_MASK(32), zone_dma_limit);
else
io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
#endif
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
retry:
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
order);
if (vstart)
break;
order--;
nslabs = SLABS_PER_PAGE << order;
retried = true;
}
if (!vstart)
return -ENOMEM;
if (remap)
rc = remap(vstart, nslabs);
if (rc) {
free_pages((unsigned long)vstart, order);
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
if (nslabs < IO_TLB_MIN_SLABS)
return rc;
retried = true;
goto retry;
}
if (retried) {
pr_warn("only able to allocate %ld MB\n",
(PAGE_SIZE << order) >> 20);
}
nareas = limit_nareas(default_nareas, nslabs);
area_order = get_order(array_size(sizeof(*mem->areas), nareas));
mem->areas = (struct io_tlb_area *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
if (!mem->areas)
goto error_area;
mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(array_size(sizeof(*mem->slots), nslabs)));
if (!mem->slots)
goto error_slots;
set_memory_decrypted((unsigned long)vstart,
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
nareas);
add_mem_pool(&io_tlb_default_mem, mem);
swiotlb_print_info();
return 0;
error_slots:
free_pages((unsigned long)mem->areas, area_order);
error_area:
free_pages((unsigned long)vstart, order);
return -ENOMEM;
}
void __init swiotlb_exit(void)
{
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
unsigned long tbl_vaddr;
size_t tbl_size, slots_size;
unsigned int area_order;
if (swiotlb_force_bounce)
return;
if (!mem->nslabs)
return;
pr_info("tearing down default memory pool\n");
tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
tbl_size = PAGE_ALIGN(mem->end - mem->start);
slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
if (mem->late_alloc) {
area_order = get_order(array_size(sizeof(*mem->areas),
mem->nareas));
free_pages((unsigned long)mem->areas, area_order);
free_pages(tbl_vaddr, get_order(tbl_size));
free_pages((unsigned long)mem->slots, get_order(slots_size));
} else {
memblock_free_late(__pa(mem->areas),
array_size(sizeof(*mem->areas), mem->nareas));
memblock_free_late(mem->start, tbl_size);
memblock_free_late(__pa(mem->slots), slots_size);
}
memset(mem, 0, sizeof(*mem));
}
#ifdef CONFIG_SWIOTLB_DYNAMIC
/**
* alloc_dma_pages() - allocate pages to be used for DMA
* @gfp: GFP flags for the allocation.
* @bytes: Size of the buffer.
* @phys_limit: Maximum allowed physical address of the buffer.
*
* Allocate pages from the buddy allocator. If successful, make the allocated
* pages decrypted that they can be used for DMA.
*
* Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
* if the allocated physical address was above @phys_limit.
*/
static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
{
unsigned int order = get_order(bytes);
struct page *page;
phys_addr_t paddr;
void *vaddr;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
paddr = page_to_phys(page);
if (paddr + bytes - 1 > phys_limit) {
__free_pages(page, order);
return ERR_PTR(-EAGAIN);
}
vaddr = phys_to_virt(paddr);
if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
goto error;
return page;
error:
/* Intentional leak if pages cannot be encrypted again. */
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
__free_pages(page, order);
return NULL;
}
/**
* swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
* @dev: Device for which a memory pool is allocated.
* @bytes: Size of the buffer.
* @phys_limit: Maximum allowed physical address of the buffer.
* @gfp: GFP flags for the allocation.
*
* Return: Allocated pages, or %NULL on allocation failure.
*/
static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
u64 phys_limit, gfp_t gfp)
{
struct page *page;
/*
* Allocate from the atomic pools if memory is encrypted and
* the allocation is atomic, because decrypting may block.
*/
if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
void *vaddr;
if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
return NULL;
return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
dma_coherent_ok);
}
gfp &= ~GFP_ZONEMASK;
if (phys_limit <= zone_dma_limit)
gfp |= __GFP_DMA;
else if (phys_limit <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
phys_limit < DMA_BIT_MASK(64) &&
!(gfp & (__GFP_DMA32 | __GFP_DMA)))
gfp |= __GFP_DMA32;
else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
!(gfp & __GFP_DMA))
gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
else
return NULL;
}
return page;
}
/**
* swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
* @vaddr: Virtual address of the buffer.
* @bytes: Size of the buffer.
*/
static void swiotlb_free_tlb(void *vaddr, size_t bytes)
{
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
dma_free_from_pool(NULL, vaddr, bytes))
return;
/* Intentional leak if pages cannot be encrypted again. */
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
__free_pages(virt_to_page(vaddr), get_order(bytes));
}
/**
* swiotlb_alloc_pool() - allocate a new IO TLB memory pool
* @dev: Device for which a memory pool is allocated.
* @minslabs: Minimum number of slabs.
* @nslabs: Desired (maximum) number of slabs.
* @nareas: Number of areas.
* @phys_limit: Maximum DMA buffer physical address.
* @gfp: GFP flags for the allocations.
*
* Allocate and initialize a new IO TLB memory pool. The actual number of
* slabs may be reduced if allocation of @nslabs fails. If even
* @minslabs cannot be allocated, this function fails.
*
* Return: New memory pool, or %NULL on allocation failure.
*/
static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
unsigned long minslabs, unsigned long nslabs,
unsigned int nareas, u64 phys_limit, gfp_t gfp)
{
struct io_tlb_pool *pool;
unsigned int slot_order;
struct page *tlb;
size_t pool_size;
size_t tlb_size;
if (nslabs > SLABS_PER_PAGE << MAX_PAGE_ORDER) {
nslabs = SLABS_PER_PAGE << MAX_PAGE_ORDER;
nareas = limit_nareas(nareas, nslabs);
}
pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
pool = kzalloc(pool_size, gfp);
if (!pool)
goto error;
pool->areas = (void *)pool + sizeof(*pool);
tlb_size = nslabs << IO_TLB_SHIFT;
while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
if (nslabs <= minslabs)
goto error_tlb;
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
nareas = limit_nareas(nareas, nslabs);
tlb_size = nslabs << IO_TLB_SHIFT;
}
slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
pool->slots = (struct io_tlb_slot *)
__get_free_pages(gfp, slot_order);
if (!pool->slots)
goto error_slots;
swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
return pool;
error_slots:
swiotlb_free_tlb(page_address(tlb), tlb_size);
error_tlb:
kfree(pool);
error:
return NULL;
}
/**
* swiotlb_dyn_alloc() - dynamic memory pool allocation worker
* @work: Pointer to dyn_alloc in struct io_tlb_mem.
*/
static void swiotlb_dyn_alloc(struct work_struct *work)
{
struct io_tlb_mem *mem =
container_of(work, struct io_tlb_mem, dyn_alloc);
struct io_tlb_pool *pool;
pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
default_nareas, mem->phys_limit, GFP_KERNEL);
if (!pool) {
pr_warn_ratelimited("Failed to allocate new pool");
return;
}
add_mem_pool(mem, pool);
}
/**
* swiotlb_dyn_free() - RCU callback to free a memory pool
* @rcu: RCU head in the corresponding struct io_tlb_pool.
*/
static void swiotlb_dyn_free(struct rcu_head *rcu)
{
struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
size_t tlb_size = pool->end - pool->start;
free_pages((unsigned long)pool->slots, get_order(slots_size));
swiotlb_free_tlb(pool->vaddr, tlb_size);
kfree(pool);
}
/**
* __swiotlb_find_pool() - find the IO TLB pool for a physical address
* @dev: Device which has mapped the DMA buffer.
* @paddr: Physical address within the DMA buffer.
*
* Find the IO TLB memory pool descriptor which contains the given physical
* address, if any. This function is for use only when the dev is known to
* be using swiotlb. Use swiotlb_find_pool() for the more general case
* when this condition is not met.
*
* Return: Memory pool which contains @paddr, or %NULL if none.
*/
struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) {
if (paddr >= pool->start && paddr < pool->end)
goto out;
}
list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
if (paddr >= pool->start && paddr < pool->end)
goto out;
}
pool = NULL;
out:
rcu_read_unlock();
return pool;
}
/**
* swiotlb_del_pool() - remove an IO TLB pool from a device
* @dev: Owning device.
* @pool: Memory pool to be removed.
*/
static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
{
unsigned long flags;
spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
list_del_rcu(&pool->node);
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
call_rcu(&pool->rcu, swiotlb_dyn_free);
}
#endif /* CONFIG_SWIOTLB_DYNAMIC */
/**
* swiotlb_dev_init() - initialize swiotlb fields in &struct device
* @dev: Device to be initialized.
*/
void swiotlb_dev_init(struct device *dev)
{
dev->dma_io_tlb_mem = &io_tlb_default_mem;
#ifdef CONFIG_SWIOTLB_DYNAMIC
INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
spin_lock_init(&dev->dma_io_tlb_lock);
dev->dma_uses_io_tlb = false;
#endif
}
/**
* swiotlb_align_offset() - Get required offset into an IO TLB allocation.
* @dev: Owning device.
* @align_mask: Allocation alignment mask.
* @addr: DMA address.
*
* Return the minimum offset from the start of an IO TLB allocation which is
* required for a given buffer address and allocation alignment to keep the
* device happy.
*
* First, the address bits covered by min_align_mask must be identical in the
* original address and the bounce buffer address. High bits are preserved by
* choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra
* padding bytes before the bounce buffer.
*
* Second, @align_mask specifies which bits of the first allocated slot must
* be zero. This may require allocating additional padding slots, and then the
* offset (in bytes) from the first such padding slot is returned.
*/
static unsigned int swiotlb_align_offset(struct device *dev,
unsigned int align_mask, u64 addr)
{
return addr & dma_get_min_align_mask(dev) &
(align_mask | (IO_TLB_SIZE - 1));
}
/*
* Bounce: copy the swiotlb buffer from or back to the original dma location
*/
static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
enum dma_data_direction dir, struct io_tlb_pool *mem)
{
int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = mem->slots[index].orig_addr;
size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
int tlb_offset;
if (orig_addr == INVALID_PHYS_ADDR)
return;
/*
* It's valid for tlb_offset to be negative. This can happen when the
* "offset" returned by swiotlb_align_offset() is non-zero, and the
* tlb_addr is pointing within the first "offset" bytes of the second
* or subsequent slots of the allocated swiotlb area. While it's not
* valid for tlb_addr to be pointing within the first "offset" bytes
* of the first slot, there's no way to check for such an error since
* this function can't distinguish the first slot from the second and
* subsequent slots.
*/
tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
swiotlb_align_offset(dev, 0, orig_addr);
orig_addr += tlb_offset;
alloc_size -= tlb_offset;
if (size > alloc_size) {
dev_WARN_ONCE(dev, 1,
"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
alloc_size, size);
size = alloc_size;
}
if (PageHighMem(pfn_to_page(pfn))) {
unsigned int offset = orig_addr & ~PAGE_MASK;
struct page *page;
unsigned int sz = 0;
unsigned long flags;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
memcpy_from_page(vaddr, page, offset, sz);
else
memcpy_to_page(page, offset, vaddr, sz);
local_irq_restore(flags);
size -= sz;
pfn++;
vaddr += sz;
offset = 0;
}
} else if (dir == DMA_TO_DEVICE) {
memcpy(vaddr, phys_to_virt(orig_addr), size);
} else {
memcpy(phys_to_virt(orig_addr), vaddr, size);
}
}
static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
{
return start + (idx << IO_TLB_SHIFT);
}
/*
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
*/
static inline unsigned long get_max_slots(unsigned long boundary_mask)
{
return (boundary_mask >> IO_TLB_SHIFT) + 1;
}
static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
{
if (index >= mem->area_nslabs)
return 0;
return index;
}
/*
* Track the total used slots with a global atomic value in order to have
* correct information to determine the high water mark. The mem_used()
* function gives imprecise results because there's no locking across
* multiple areas.
*/
#ifdef CONFIG_DEBUG_FS
static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
{
unsigned long old_hiwater, new_used;
new_used = atomic_long_add_return(nslots, &mem->total_used);
old_hiwater = atomic_long_read(&mem->used_hiwater);
do {
if (new_used <= old_hiwater)
break;
} while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
&old_hiwater, new_used));
}
static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
{
atomic_long_sub(nslots, &mem->total_used);
}
#else /* !CONFIG_DEBUG_FS */
static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
{
}
static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
{
}
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_SWIOTLB_DYNAMIC
#ifdef CONFIG_DEBUG_FS
static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
atomic_long_add(nslots, &mem->transient_nslabs);
}
static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
atomic_long_sub(nslots, &mem->transient_nslabs);
}
#else /* !CONFIG_DEBUG_FS */
static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
}
static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
}
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_SWIOTLB_DYNAMIC */
/**
* swiotlb_search_pool_area() - search one memory area in one pool
* @dev: Device which maps the buffer.
* @pool: Memory pool to be searched.
* @area_index: Index of the IO TLB memory area to be searched.
* @orig_addr: Original (non-bounced) IO buffer address.
* @alloc_size: Total requested size of the bounce buffer,
* including initial alignment padding.
* @alloc_align_mask: Required alignment of the allocated buffer.
*
* Find a suitable sequence of IO TLB entries for the request and allocate
* a buffer from the given IO TLB memory area.
* This function takes care of locking.
*
* Return: Index of the first allocated slot, or -1 on error.
*/
static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool,
int area_index, phys_addr_t orig_addr, size_t alloc_size,
unsigned int alloc_align_mask)
{
struct io_tlb_area *area = pool->areas + area_index;
unsigned long boundary_mask = dma_get_seg_boundary(dev);
dma_addr_t tbl_dma_addr =
phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
unsigned long max_slots = get_max_slots(boundary_mask);
unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
unsigned int nslots = nr_slots(alloc_size), stride;
unsigned int offset = swiotlb_align_offset(dev, 0, orig_addr);
unsigned int index, slots_checked, count = 0, i;
unsigned long flags;
unsigned int slot_base;
unsigned int slot_index;
BUG_ON(!nslots);
BUG_ON(area_index >= pool->nareas);
/*
* Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
* page-aligned in the absence of any other alignment requirements.
* 'alloc_align_mask' was later introduced to specify the alignment
* explicitly, however this is passed as zero for streaming mappings
* and so we preserve the old behaviour there in case any drivers are
* relying on it.
*/
if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
alloc_align_mask = PAGE_SIZE - 1;
/*
* Ensure that the allocation is at least slot-aligned and update
* 'iotlb_align_mask' to ignore bits that will be preserved when
* offsetting into the allocation.
*/
alloc_align_mask |= (IO_TLB_SIZE - 1);
iotlb_align_mask &= ~alloc_align_mask;
/*
* For mappings with an alignment requirement don't bother looping to
* unaligned slots once we found an aligned one.
*/
stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
spin_lock_irqsave(&area->lock, flags);
if (unlikely(nslots > pool->area_nslabs - area->used))
goto not_found;
slot_base = area_index * pool->area_nslabs;
index = area->index;
for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
phys_addr_t tlb_addr;
slot_index = slot_base + index;
tlb_addr = slot_addr(tbl_dma_addr, slot_index);
if ((tlb_addr & alloc_align_mask) ||
(orig_addr && (tlb_addr & iotlb_align_mask) !=
(orig_addr & iotlb_align_mask))) {
index = wrap_area_index(pool, index + 1);
slots_checked++;
continue;
}
if (!iommu_is_span_boundary(slot_index, nslots,
nr_slots(tbl_dma_addr),
max_slots)) {
if (pool->slots[slot_index].list >= nslots)
goto found;
}
index = wrap_area_index(pool, index + stride);
slots_checked += stride;
}
not_found:
spin_unlock_irqrestore(&area->lock, flags);
return -1;
found:
/*
* If we find a slot that indicates we have 'nslots' number of
* contiguous buffers, we allocate the buffers from that slot onwards
* and set the list of free entries to '0' indicating unavailable.
*/
for (i = slot_index; i < slot_index + nslots; i++) {
pool->slots[i].list = 0;
pool->slots[i].alloc_size = alloc_size - (offset +
((i - slot_index) << IO_TLB_SHIFT));
}
for (i = slot_index - 1;
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
pool->slots[i].list; i--)
pool->slots[i].list = ++count;
/*
* Update the indices to avoid searching in the next round.
*/
area->index = wrap_area_index(pool, index + nslots);
area->used += nslots;
spin_unlock_irqrestore(&area->lock, flags);
inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
return slot_index;
}
#ifdef CONFIG_SWIOTLB_DYNAMIC
/**
* swiotlb_search_area() - search one memory area in all pools
* @dev: Device which maps the buffer.
* @start_cpu: Start CPU number.
* @cpu_offset: Offset from @start_cpu.
* @orig_addr: Original (non-bounced) IO buffer address.
* @alloc_size: Total requested size of the bounce buffer,
* including initial alignment padding.
* @alloc_align_mask: Required alignment of the allocated buffer.
* @retpool: Used memory pool, updated on return.
*
* Search one memory area in all pools for a sequence of slots that match the
* allocation constraints.
*
* Return: Index of the first allocated slot, or -1 on error.
*/
static int swiotlb_search_area(struct device *dev, int start_cpu,
int cpu_offset, phys_addr_t orig_addr, size_t alloc_size,
unsigned int alloc_align_mask, struct io_tlb_pool **retpool)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
int area_index;
int index = -1;
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) {
if (cpu_offset >= pool->nareas)
continue;
area_index = (start_cpu + cpu_offset) & (pool->nareas - 1);
index = swiotlb_search_pool_area(dev, pool, area_index,
orig_addr, alloc_size,
alloc_align_mask);
if (index >= 0) {
*retpool = pool;
break;
}
}
rcu_read_unlock();
return index;
}
/**
* swiotlb_find_slots() - search for slots in the whole swiotlb
* @dev: Device which maps the buffer.
* @orig_addr: Original (non-bounced) IO buffer address.
* @alloc_size: Total requested size of the bounce buffer,
* including initial alignment padding.
* @alloc_align_mask: Required alignment of the allocated buffer.
* @retpool: Used memory pool, updated on return.
*
* Search through the whole software IO TLB to find a sequence of slots that
* match the allocation constraints.
*
* Return: Index of the first allocated slot, or -1 on error.
*/
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
size_t alloc_size, unsigned int alloc_align_mask,
struct io_tlb_pool **retpool)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
unsigned long nslabs;
unsigned long flags;
u64 phys_limit;
int cpu, i;
int index;
if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE)
return -1;
cpu = raw_smp_processor_id();
for (i = 0; i < default_nareas; ++i) {
index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size,
alloc_align_mask, &pool);
if (index >= 0)
goto found;
}
if (!mem->can_grow)
return -1;
schedule_work(&mem->dyn_alloc);
nslabs = nr_slots(alloc_size);
phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
GFP_NOWAIT);
if (!pool)
return -1;
index = swiotlb_search_pool_area(dev, pool, 0, orig_addr,
alloc_size, alloc_align_mask);
if (index < 0) {
swiotlb_dyn_free(&pool->rcu);
return -1;
}
pool->transient = true;
spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
inc_transient_used(mem, pool->nslabs);
found:
WRITE_ONCE(dev->dma_uses_io_tlb, true);
/*
* The general barrier orders reads and writes against a presumed store
* of the SWIOTLB buffer address by a device driver (to a driver private
* data structure). It serves two purposes.
*
* First, the store to dev->dma_uses_io_tlb must be ordered before the
* presumed store. This guarantees that the returned buffer address
* cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
*
* Second, the load from mem->pools must be ordered before the same
* presumed store. This guarantees that the returned buffer address
* cannot be observed by another CPU before an update of the RCU list
* that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
* atomicity).
*
* See also the comment in swiotlb_find_pool().
*/
smp_mb();
*retpool = pool;
return index;
}
#else /* !CONFIG_SWIOTLB_DYNAMIC */
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
size_t alloc_size, unsigned int alloc_align_mask,
struct io_tlb_pool **retpool)
{
struct io_tlb_pool *pool;
int start, i;
int index;
*retpool = pool = &dev->dma_io_tlb_mem->defpool;
i = start = raw_smp_processor_id() & (pool->nareas - 1);
do {
index = swiotlb_search_pool_area(dev, pool, i, orig_addr,
alloc_size, alloc_align_mask);
if (index >= 0)
return index;
if (++i >= pool->nareas)
i = 0;
} while (i != start);
return -1;
}
#endif /* CONFIG_SWIOTLB_DYNAMIC */
#ifdef CONFIG_DEBUG_FS
/**
* mem_used() - get number of used slots in an allocator
* @mem: Software IO TLB allocator.
*
* The result is accurate in this version of the function, because an atomic
* counter is available if CONFIG_DEBUG_FS is set.
*
* Return: Number of used slots.
*/
static unsigned long mem_used(struct io_tlb_mem *mem)
{
return atomic_long_read(&mem->total_used);
}
#else /* !CONFIG_DEBUG_FS */
/**
* mem_pool_used() - get number of used slots in a memory pool
* @pool: Software IO TLB memory pool.
*
* The result is not accurate, see mem_used().
*
* Return: Approximate number of used slots.
*/
static unsigned long mem_pool_used(struct io_tlb_pool *pool)
{
int i;
unsigned long used = 0;
for (i = 0; i < pool->nareas; i++)
used += pool->areas[i].used;
return used;
}
/**
* mem_used() - get number of used slots in an allocator
* @mem: Software IO TLB allocator.
*
* The result is not accurate, because there is no locking of individual
* areas.
*
* Return: Approximate number of used slots.
*/
static unsigned long mem_used(struct io_tlb_mem *mem)
{
#ifdef CONFIG_SWIOTLB_DYNAMIC
struct io_tlb_pool *pool;
unsigned long used = 0;
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node)
used += mem_pool_used(pool);
rcu_read_unlock();
return used;
#else
return mem_pool_used(&mem->defpool);
#endif
}
#endif /* CONFIG_DEBUG_FS */
/**
* swiotlb_tbl_map_single() - bounce buffer map a single contiguous physical area
* @dev: Device which maps the buffer.
* @orig_addr: Original (non-bounced) physical IO buffer address
* @mapping_size: Requested size of the actual bounce buffer, excluding
* any pre- or post-padding for alignment
* @alloc_align_mask: Required start and end alignment of the allocated buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
* Find and allocate a suitable sequence of IO TLB slots for the request.
* The allocated space starts at an alignment specified by alloc_align_mask,
* and the size of the allocated space is rounded up so that the total amount
* of allocated space is a multiple of (alloc_align_mask + 1). If
* alloc_align_mask is zero, the allocated space may be at any alignment and
* the size is not rounded up.
*
* The returned address is within the allocated space and matches the bits
* of orig_addr that are specified in the DMA min_align_mask for the device. As
* such, this returned address may be offset from the beginning of the allocated
* space. The bounce buffer space starting at the returned address for
* mapping_size bytes is initialized to the contents of the original IO buffer
* area. Any pre-padding (due to an offset) and any post-padding (due to
* rounding-up the size) is not initialized.
*/
phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
size_t mapping_size, unsigned int alloc_align_mask,
enum dma_data_direction dir, unsigned long attrs)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
unsigned int offset;
struct io_tlb_pool *pool;
unsigned int i;
size_t size;
int index;
phys_addr_t tlb_addr;
unsigned short pad_slots;
if (!mem || !mem->nslabs) {
dev_warn_ratelimited(dev,
"Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
return (phys_addr_t)DMA_MAPPING_ERROR;
}
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
/*
* The default swiotlb memory pool is allocated with PAGE_SIZE
* alignment. If a mapping is requested with larger alignment,
* the mapping may be unable to use the initial slot(s) in all
* sets of IO_TLB_SEGSIZE slots. In such case, a mapping request
* of or near the maximum mapping size would always fail.
*/
dev_WARN_ONCE(dev, alloc_align_mask > ~PAGE_MASK,
"Alloc alignment may prevent fulfilling requests with max mapping_size\n");
offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr);
size = ALIGN(mapping_size + offset, alloc_align_mask + 1);
index = swiotlb_find_slots(dev, orig_addr, size, alloc_align_mask, &pool);
if (index == -1) {
if (!(attrs & DMA_ATTR_NO_WARN))
dev_warn_ratelimited(dev,
"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
size, mem->nslabs, mem_used(mem));
return (phys_addr_t)DMA_MAPPING_ERROR;
}
/*
* If dma_skip_sync was set, reset it on first SWIOTLB buffer
* mapping to always sync SWIOTLB buffers.
*/
dma_reset_need_sync(dev);
/*
* Save away the mapping from the original address to the DMA address.
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
pad_slots = offset >> IO_TLB_SHIFT;
offset &= (IO_TLB_SIZE - 1);
index += pad_slots;
pool->slots[index].pad_slots = pad_slots;
for (i = 0; i < (nr_slots(size) - pad_slots); i++)
pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
tlb_addr = slot_addr(pool->start, index) + offset;
/*
* When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy
* the original buffer to the TLB buffer before initiating DMA in order
* to preserve the original's data if the device does a partial write,
* i.e. if the device doesn't overwrite the entire buffer. Preserving
* the original data, even if it's garbage, is necessary to match
* hardware behavior. Use of swiotlb is supposed to be transparent,
* i.e. swiotlb must not corrupt memory by clobbering unwritten bytes.
*/
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE, pool);
return tlb_addr;
}
static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr,
struct io_tlb_pool *mem)
{
unsigned long flags;
unsigned int offset = swiotlb_align_offset(dev, 0, tlb_addr);
int index, nslots, aindex;
struct io_tlb_area *area;
int count, i;
index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
index -= mem->slots[index].pad_slots;
nslots = nr_slots(mem->slots[index].alloc_size + offset);
aindex = index / mem->area_nslabs;
area = &mem->areas[aindex];
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
BUG_ON(aindex >= mem->nareas);
spin_lock_irqsave(&area->lock, flags);
if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
count = mem->slots[index + nslots].list;
else
count = 0;
/*
* Step 1: return the slots to the free list, merging the slots with
* superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--) {
mem->slots[i].list = ++count;
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
mem->slots[i].pad_slots = 0;
}
/*
* Step 2: merge the returned slots with the preceding slots, if
* available (non zero)
*/
for (i = index - 1;
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
i--)
mem->slots[i].list = ++count;
area->used -= nslots;
spin_unlock_irqrestore(&area->lock, flags);
dec_used(dev->dma_io_tlb_mem, nslots);
}
#ifdef CONFIG_SWIOTLB_DYNAMIC
/**
* swiotlb_del_transient() - delete a transient memory pool
* @dev: Device which mapped the buffer.
* @tlb_addr: Physical address within a bounce buffer.
* @pool: Pointer to the transient memory pool to be checked and deleted.
*
* Check whether the address belongs to a transient SWIOTLB memory pool.
* If yes, then delete the pool.
*
* Return: %true if @tlb_addr belonged to a transient pool that was released.
*/
static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr,
struct io_tlb_pool *pool)
{
if (!pool->transient)
return false;
dec_used(dev->dma_io_tlb_mem, pool->nslabs);
swiotlb_del_pool(dev, pool);
dec_transient_used(dev->dma_io_tlb_mem, pool->nslabs);
return true;
}
#else /* !CONFIG_SWIOTLB_DYNAMIC */
static inline bool swiotlb_del_transient(struct device *dev,
phys_addr_t tlb_addr, struct io_tlb_pool *pool)
{
return false;
}
#endif /* CONFIG_SWIOTLB_DYNAMIC */
/*
* tlb_addr is the physical address of the bounce buffer to unmap.
*/
void __swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
size_t mapping_size, enum dma_data_direction dir,
unsigned long attrs, struct io_tlb_pool *pool)
{
/*
* First, sync the memory before unmapping the entry
*/
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(dev, tlb_addr, mapping_size,
DMA_FROM_DEVICE, pool);
if (swiotlb_del_transient(dev, tlb_addr, pool))
return;
swiotlb_release_slots(dev, tlb_addr, pool);
}
void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
struct io_tlb_pool *pool)
{
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE, pool);
else
BUG_ON(dir != DMA_FROM_DEVICE);
}
void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
struct io_tlb_pool *pool)
{
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE, pool);
else
BUG_ON(dir != DMA_TO_DEVICE);
}
/*
* Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
* to the device copy the data into it as well.
*/
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t swiotlb_addr;
dma_addr_t dma_addr;
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, 0, dir, attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
/* Ensure that the address returned is DMA'ble */
dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
__swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC,
swiotlb_find_pool(dev, swiotlb_addr));
dev_WARN_ONCE(dev, 1,
"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
return DMA_MAPPING_ERROR;
}
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(swiotlb_addr, size, dir);
return dma_addr;
}
size_t swiotlb_max_mapping_size(struct device *dev)
{
int min_align_mask = dma_get_min_align_mask(dev);
int min_align = 0;
/*
* swiotlb_find_slots() skips slots according to
* min align mask. This affects max mapping size.
* Take it into acount here.
*/
if (min_align_mask)
min_align = roundup(min_align_mask, IO_TLB_SIZE);
return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
}
/**
* is_swiotlb_allocated() - check if the default software IO TLB is initialized
*/
bool is_swiotlb_allocated(void)
{
return io_tlb_default_mem.nslabs;
}
bool is_swiotlb_active(struct device *dev)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
return mem && mem->nslabs;
}
/**
* default_swiotlb_base() - get the base address of the default SWIOTLB
*
* Get the lowest physical address used by the default software IO TLB pool.
*/
phys_addr_t default_swiotlb_base(void)
{
#ifdef CONFIG_SWIOTLB_DYNAMIC
io_tlb_default_mem.can_grow = false;
#endif
return io_tlb_default_mem.defpool.start;
}
/**
* default_swiotlb_limit() - get the address limit of the default SWIOTLB
*
* Get the highest physical address used by the default software IO TLB pool.
*/
phys_addr_t default_swiotlb_limit(void)
{
#ifdef CONFIG_SWIOTLB_DYNAMIC
return io_tlb_default_mem.phys_limit;
#else
return io_tlb_default_mem.defpool.end - 1;
#endif
}
#ifdef CONFIG_DEBUG_FS
#ifdef CONFIG_SWIOTLB_DYNAMIC
static unsigned long mem_transient_used(struct io_tlb_mem *mem)
{
return atomic_long_read(&mem->transient_nslabs);
}
static int io_tlb_transient_used_get(void *data, u64 *val)
{
struct io_tlb_mem *mem = data;
*val = mem_transient_used(mem);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_transient_used, io_tlb_transient_used_get,
NULL, "%llu\n");
#endif /* CONFIG_SWIOTLB_DYNAMIC */
static int io_tlb_used_get(void *data, u64 *val)
{
struct io_tlb_mem *mem = data;
*val = mem_used(mem);
return 0;
}
static int io_tlb_hiwater_get(void *data, u64 *val)
{
struct io_tlb_mem *mem = data;
*val = atomic_long_read(&mem->used_hiwater);
return 0;
}
static int io_tlb_hiwater_set(void *data, u64 val)
{
struct io_tlb_mem *mem = data;
/* Only allow setting to zero */
if (val != 0)
return -EINVAL;
atomic_long_set(&mem->used_hiwater, val);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
io_tlb_hiwater_set, "%llu\n");
static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
const char *dirname)
{
mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
if (!mem->nslabs)
return;
debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
&fops_io_tlb_used);
debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
&fops_io_tlb_hiwater);
#ifdef CONFIG_SWIOTLB_DYNAMIC
debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
mem, &fops_io_tlb_transient_used);
#endif
}
static int __init swiotlb_create_default_debugfs(void)
{
swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
return 0;
}
late_initcall(swiotlb_create_default_debugfs);
#else /* !CONFIG_DEBUG_FS */
static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
const char *dirname)
{
}
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_DMA_RESTRICTED_POOL
struct page *swiotlb_alloc(struct device *dev, size_t size)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
phys_addr_t tlb_addr;
unsigned int align;
int index;
if (!mem)
return NULL;
align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
index = swiotlb_find_slots(dev, 0, size, align, &pool);
if (index == -1)
return NULL;
tlb_addr = slot_addr(pool->start, index);
if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
&tlb_addr);
swiotlb_release_slots(dev, tlb_addr, pool);
return NULL;
}
return pfn_to_page(PFN_DOWN(tlb_addr));
}
bool swiotlb_free(struct device *dev, struct page *page, size_t size)
{
phys_addr_t tlb_addr = page_to_phys(page);
struct io_tlb_pool *pool;
pool = swiotlb_find_pool(dev, tlb_addr);
if (!pool)
return false;
swiotlb_release_slots(dev, tlb_addr, pool);
return true;
}
static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
struct device *dev)
{
struct io_tlb_mem *mem = rmem->priv;
unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
/* Set Per-device io tlb area to one */
unsigned int nareas = 1;
if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
return -EINVAL;
}
/*
* Since multiple devices can share the same pool, the private data,
* io_tlb_mem struct, will be initialized by the first device attached
* to it.
*/
if (!mem) {
struct io_tlb_pool *pool;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
pool = &mem->defpool;
pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
if (!pool->slots) {
kfree(mem);
return -ENOMEM;
}
pool->areas = kcalloc(nareas, sizeof(*pool->areas),
GFP_KERNEL);
if (!pool->areas) {
kfree(pool->slots);
kfree(mem);
return -ENOMEM;
}
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
rmem->size >> PAGE_SHIFT);
swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
false, nareas);
mem->force_bounce = true;
mem->for_alloc = true;
#ifdef CONFIG_SWIOTLB_DYNAMIC
spin_lock_init(&mem->lock);
INIT_LIST_HEAD_RCU(&mem->pools);
#endif
add_mem_pool(mem, pool);
rmem->priv = mem;
swiotlb_create_debugfs_files(mem, rmem->name);
}
dev->dma_io_tlb_mem = mem;
return 0;
}
static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
struct device *dev)
{
dev->dma_io_tlb_mem = &io_tlb_default_mem;
}
static const struct reserved_mem_ops rmem_swiotlb_ops = {
.device_init = rmem_swiotlb_device_init,
.device_release = rmem_swiotlb_device_release,
};
static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
{
unsigned long node = rmem->fdt_node;
if (of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
rmem->ops = &rmem_swiotlb_ops;
pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
#endif /* CONFIG_DMA_RESTRICTED_POOL */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <linux/errno.h>
#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/pgalloc_tag.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/compiler.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
#include <linux/page-flags.h>
#include <linux/page_ref.h>
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
#include <linux/memremap.h>
#include <linux/slab.h>
#include <linux/cacheinfo.h>
#include <linux/rcuwait.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
struct user_struct;
struct pt_regs;
struct folio_batch;
void arch_mm_preinit(void);
void mm_core_init(void);
void init_mm_internals(void);
extern atomic_long_t _totalram_pages;
static inline unsigned long totalram_pages(void)
{
return (unsigned long)atomic_long_read(&_totalram_pages);
}
static inline void totalram_pages_inc(void)
{
atomic_long_inc(&_totalram_pages);
}
static inline void totalram_pages_dec(void)
{
atomic_long_dec(&_totalram_pages);
}
static inline void totalram_pages_add(long count)
{
atomic_long_add(count, &_totalram_pages);
}
extern void * high_memory;
/*
* Convert between pages and MB
* 20 is the shift for 1MB (2^20 = 1MB)
* PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
* So (20 - PAGE_SHIFT) converts between pages and MB
*/
#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
#define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
extern int mmap_rnd_bits_max __ro_after_init;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
extern const int mmap_rnd_compat_bits_min;
extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif
#ifndef DIRECT_MAP_PHYSMEM_END
# ifdef MAX_PHYSMEM_BITS
# define DIRECT_MAP_PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
# else
# define DIRECT_MAP_PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63))
# endif
#endif
#include <asm/page.h>
#include <asm/processor.h>
#ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
#ifndef page_to_virt
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
#ifndef lm_alias
#define lm_alias(x) __va(__pa_symbol(x))
#endif
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
* This macro should be defined within <asm/pgtable.h>.
* s390 does this to prevent multiplexing of hardware bits
* related to the physical page in case of virtualization.
*/
#ifndef mm_forbids_zeropage
#define mm_forbids_zeropage(X) (0)
#endif
/*
* On some architectures it is expensive to call memset() for small sizes.
* If an architecture decides to implement their own version of
* mm_zero_struct_page they should wrap the defines below in a #ifndef and
* define their own version of this macro in <asm/pgtable.h>
*/
#if BITS_PER_LONG == 64
/* This function must be updated when the size of struct page grows above 96
* or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can
* combine write statements if they are both assignments and can be reordered,
* this can result in several of the writes here being dropped.
*/
#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
static inline void __mm_zero_struct_page(struct page *page)
{
unsigned long *_pp = (void *)page;
/* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
BUILD_BUG_ON(sizeof(struct page) & 7);
BUILD_BUG_ON(sizeof(struct page) < 56);
BUILD_BUG_ON(sizeof(struct page) > 96);
switch (sizeof(struct page)) {
case 96:
_pp[11] = 0;
fallthrough;
case 88:
_pp[10] = 0;
fallthrough;
case 80:
_pp[9] = 0;
fallthrough;
case 72:
_pp[8] = 0;
fallthrough;
case 64:
_pp[7] = 0;
fallthrough;
case 56:
_pp[6] = 0;
_pp[5] = 0;
_pp[4] = 0;
_pp[3] = 0;
_pp[2] = 0;
_pp[1] = 0;
_pp[0] = 0;
}
}
#else
#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
#endif
/*
* Default maximum number of active map areas, this limits the number of vmas
* per mm struct. Users can overwrite this number by sysctl but there is a
* problem.
*
* When a program's coredump is generated as ELF format, a section is created
* per a vma. In ELF, the number of sections is represented in unsigned short.
* This means the number of sections should be smaller than 65535 at coredump.
* Because the kernel adds some informative sections to a image of program at
* generating coredump, we need some margin. The number of extra sections is
* 1-3 now and depends on arch. We use "5" as safe margin, here.
*
* ELF extended numbering allows more than 65535 sections, so 16-bit bound is
* not a hard limit any more. Although some userspace tools can be surprised by
* that.
*/
#define MAPCOUNT_ELF_CORE_MARGIN (5)
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
extern int sysctl_max_map_count;
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
#else
static inline bool page_range_contiguous(const struct page *page,
unsigned long nr_pages)
{
return true;
}
#endif
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
/* to align the pointer to the (prev) page boundary */
#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
/**
* folio_page_idx - Return the number of a page in a folio.
* @folio: The folio.
* @page: The folio page.
*
* This function expects that the page is actually part of the folio.
* The returned number is relative to the start of the folio.
*/
static inline unsigned long folio_page_idx(const struct folio *folio,
const struct page *page)
{
return page - &folio->page;
}
static inline struct folio *lru_to_folio(struct list_head *head)
{
return list_entry((head)->prev, struct folio, lru);
}
void setup_initial_init_mm(void *start_code, void *end_code,
void *end_data, void *brk);
/*
* Linux kernel virtual memory manager primitives.
* The idea being to have a "virtual" mm in the same way
* we have a virtual fs - giving a cleaner interface to the
* mm details, and allowing different kinds of memory mappings
* (from shared memory to executable loading to arbitrary
* mmap() functions).
*/
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
extern unsigned int kobjsize(const void *objp);
#endif
/*
* vm_flags in vm_area_struct, see mm_types.h.
* When changing, update also include/trace/events/mmflags.h
*/
#define VM_NONE 0x00000000
#define VM_READ 0x00000001 /* currently active flags */
#define VM_WRITE 0x00000002
#define VM_EXEC 0x00000004
#define VM_SHARED 0x00000008
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
#define VM_MAYWRITE 0x00000020
#define VM_MAYEXEC 0x00000040
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
#ifdef CONFIG_MMU
#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
#else /* CONFIG_MMU */
#define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
#define VM_UFFD_MISSING 0
#endif /* CONFIG_MMU */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
/* Used by sys_madvise() */
#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_SYNC 0x00800000 /* Synchronous page faults */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
#ifdef CONFIG_MEM_SOFT_DIRTY
# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
#else
# define VM_SOFTDIRTY 0
#endif
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
#if CONFIG_ARCH_PKEY_BITS > 3
# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
#else
# define VM_PKEY_BIT3 0
#endif
#if CONFIG_ARCH_PKEY_BITS > 4
# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
#else
# define VM_PKEY_BIT4 0
#endif
#endif /* CONFIG_ARCH_HAS_PKEYS */
#ifdef CONFIG_X86_USER_SHADOW_STACK
/*
* VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
* support core mm.
*
* These VMAs will get a single end guard page. This helps userspace protect
* itself from attacks. A single page is enough for current shadow stack archs
* (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
* for more details on the guard size.
*/
# define VM_SHADOW_STACK VM_HIGH_ARCH_5
#endif
#if defined(CONFIG_ARM64_GCS)
/*
* arm64's Guarded Control Stack implements similar functionality and
* has similar constraints to shadow stacks.
*/
# define VM_SHADOW_STACK VM_HIGH_ARCH_6
#endif
#ifndef VM_SHADOW_STACK
# define VM_SHADOW_STACK VM_NONE
#endif
#if defined(CONFIG_PPC64)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
# define VM_ARCH_CLEAR VM_SPARC_ADI
#elif defined(CONFIG_ARM64)
# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
# define VM_ARCH_CLEAR VM_ARM64_BTI
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
#if defined(CONFIG_ARM64_MTE)
# define VM_MTE VM_HIGH_ARCH_4 /* Use Tagged memory for access control */
# define VM_MTE_ALLOWED VM_HIGH_ARCH_5 /* Tagged memory permitted */
#else
# define VM_MTE VM_NONE
# define VM_MTE_ALLOWED VM_NONE
#endif
#ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
# define VM_UFFD_MINOR_BIT 41
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
# define VM_UFFD_MINOR VM_NONE
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
/*
* This flag is used to connect VFIO to arch specific KVM code. It
* indicates that the memory under this VMA is safe for use with any
* non-cachable memory type inside KVM. Some VFIO devices, on some
* platforms, are thought to be unsafe and can cause machine crashes
* if KVM does not lock down the memory type.
*/
#ifdef CONFIG_64BIT
#define VM_ALLOW_ANY_UNCACHED_BIT 39
#define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT)
#else
#define VM_ALLOW_ANY_UNCACHED VM_NONE
#endif
#ifdef CONFIG_64BIT
#define VM_DROPPABLE_BIT 40
#define VM_DROPPABLE BIT(VM_DROPPABLE_BIT)
#elif defined(CONFIG_PPC32)
#define VM_DROPPABLE VM_ARCH_1
#else
#define VM_DROPPABLE VM_NONE
#endif
#ifdef CONFIG_64BIT
#define VM_SEALED_BIT 42
#define VM_SEALED BIT(VM_SEALED_BIT)
#else
#define VM_SEALED VM_NONE
#endif
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
/* Common data flag combinations */
#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
#endif
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
#define VM_STACK_EARLY VM_GROWSDOWN
#else
#define VM_STACK VM_GROWSDOWN
#define VM_STACK_EARLY 0
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
/* VMA basic access permission flags */
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
/*
* Special vmas that are non-mergable, non-mlock()able.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
/* This mask prevents VMA from being scanned with khugepaged */
#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
/* This mask represents all the VMA flag bits used by mlock */
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
/* Arch-specific flags to clear when updating VM flags on protection change */
#ifndef VM_ARCH_CLEAR
# define VM_ARCH_CLEAR VM_NONE
#endif
#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
/*
* The default fault flags that should be used by most of the
* arch-specific page fault handlers.
*/
#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
FAULT_FLAG_KILLABLE | \
FAULT_FLAG_INTERRUPTIBLE)
/**
* fault_flag_allow_retry_first - check ALLOW_RETRY the first time
* @flags: Fault flags.
*
* This is mostly used for places where we want to try to avoid taking
* the mmap_lock for too long a time when waiting for another condition
* to change, in which case we can try to be polite to release the
* mmap_lock in the first round to avoid potential starvation of other
* processes that would also want the mmap_lock.
*
* Return: true if the page fault allows retry and this is the first
* attempt of the fault handling; false otherwise.
*/
static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
{
return (flags & FAULT_FLAG_ALLOW_RETRY) &&
(!(flags & FAULT_FLAG_TRIED));
}
#define FAULT_FLAG_TRACE \
{ FAULT_FLAG_WRITE, "WRITE" }, \
{ FAULT_FLAG_MKWRITE, "MKWRITE" }, \
{ FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
{ FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
{ FAULT_FLAG_KILLABLE, "KILLABLE" }, \
{ FAULT_FLAG_TRIED, "TRIED" }, \
{ FAULT_FLAG_USER, "USER" }, \
{ FAULT_FLAG_REMOTE, "REMOTE" }, \
{ FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
{ FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \
{ FAULT_FLAG_VMA_LOCK, "VMA_LOCK" }
/*
* vm_fault is filled by the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
* MM layer fills up gfp_mask for page allocations but fault handler might
* alter it if its implementation requires a different allocation context.
*
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
const struct {
struct vm_area_struct *vma; /* Target VMA */
gfp_t gfp_mask; /* gfp mask to be used for allocations */
pgoff_t pgoff; /* Logical page offset based on vma */
unsigned long address; /* Faulting virtual address - masked */
unsigned long real_address; /* Faulting virtual address - unmasked */
};
enum fault_flag flags; /* FAULT_FLAG_xxx flags
* XXX: should really be 'const' */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
pud_t *pud; /* Pointer to pud entry matching
* the 'address'
*/
union {
pte_t orig_pte; /* Value of PTE at the time of fault */
pmd_t orig_pmd; /* Value of PMD at the time of fault,
* used by PMD fault only.
*/
};
struct page *cow_page; /* Page handler may use for COW fault */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
* VM_FAULT_ERROR).
*/
/* These three entries are valid only while holding ptl lock */
pte_t *pte; /* Pointer to pte entry matching
* the 'address'. NULL if the page
* table hasn't been allocated.
*/
spinlock_t *ptl; /* Page table lock.
* Protects pte page table if 'pte'
* is not NULL, otherwise pmd.
*/
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
* vm_ops->map_pages() sets up a page
* table from atomic context.
* do_fault_around() pre-allocates
* page table to avoid allocation from
* atomic context.
*/
};
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
* to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
/**
* @close: Called when the VMA is being removed from the MM.
* Context: User context. May sleep. Caller holds mmap_lock.
*/
void (*close)(struct vm_area_struct * area);
/* Called any time before splitting to check if it's allowed */
int (*may_split)(struct vm_area_struct *area, unsigned long addr);
int (*mremap)(struct vm_area_struct *area);
/*
* Called by mprotect() to make driver-specific permission
* checks before mprotect() is finalised. The VMA must not
* be modified. Returns 0 if mprotect() can proceed.
*/
int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs. See also generic_access_phys() for a generic
* implementation useful for any iomem mapping.
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
/* Called by the /proc/PID/maps code to ask the vma whether it
* has a special name. Returning non-NULL will also cause this
* vma to be dumped unconditionally. */
const char *(*name)(struct vm_area_struct *vma);
#ifdef CONFIG_NUMA
/*
* set_policy() op must add a reference to any non-NULL @new mempolicy
* to hold the policy upon return. Caller should pass NULL @new to
* remove a policy and fall back to surrounding context--i.e. do not
* install a MPOL_DEFAULT policy, nor the task or system default
* mempolicy.
*/
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
/*
* get_policy() op must add reference [mpol_get()] to any policy at
* (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
* in mm/mempolicy.c will do this automatically.
* get_policy() must NOT add a ref if the policy at (vma,addr) is not
* marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
* If no [shared/vma] mempolicy exists at the addr, get_policy() op
* must return NULL--i.e., do not "fallback" to task or system default
* policy.
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr, pgoff_t *ilx);
#endif
#ifdef CONFIG_FIND_NORMAL_PAGE
/*
* Called by vm_normal_page() for special PTEs in @vma at @addr. This
* allows for returning a "normal" page from vm_normal_page() even
* though the PTE indicates that the "struct page" either does not exist
* or should not be touched: "special".
*
* Do not add new users: this really only works when a "normal" page
* was mapped, but then the PTE got changed to something weird (+
* marked special) that would not make pte_pfn() identify the originally
* inserted page.
*/
struct page *(*find_normal_page)(struct vm_area_struct *vma,
unsigned long addr);
#endif /* CONFIG_FIND_NORMAL_PAGE */
};
#ifdef CONFIG_NUMA_BALANCING
static inline void vma_numab_state_init(struct vm_area_struct *vma)
{
vma->numab_state = NULL;
}
static inline void vma_numab_state_free(struct vm_area_struct *vma)
{
kfree(vma->numab_state);
}
#else
static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
#endif /* CONFIG_NUMA_BALANCING */
/*
* These must be here rather than mmap_lock.h as dependent on vm_fault type,
* declared in this header.
*/
#ifdef CONFIG_PER_VMA_LOCK
static inline void release_fault_lock(struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
vma_end_read(vmf->vma);
else
mmap_read_unlock(vmf->vma->vm_mm);
}
static inline void assert_fault_locked(const struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
vma_assert_locked(vmf->vma);
else
mmap_assert_locked(vmf->vma->vm_mm);
}
#else
static inline void release_fault_lock(struct vm_fault *vmf)
{
mmap_read_unlock(vmf->vma->vm_mm);
}
static inline void assert_fault_locked(const struct vm_fault *vmf)
{
mmap_assert_locked(vmf->vma->vm_mm);
}
#endif /* CONFIG_PER_VMA_LOCK */
static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
{
return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
static inline bool mm_flags_test_and_set(int flag, struct mm_struct *mm)
{
return test_and_set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
static inline bool mm_flags_test_and_clear(int flag, struct mm_struct *mm)
{
return test_and_clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
static inline void mm_flags_set(int flag, struct mm_struct *mm)
{
set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
static inline void mm_flags_clear(int flag, struct mm_struct *mm)
{
clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
static inline void mm_flags_clear_all(struct mm_struct *mm)
{
bitmap_zero(ACCESS_PRIVATE(&mm->flags, __mm_flags), NUM_MM_FLAG_BITS);
}
extern const struct vm_operations_struct vma_dummy_vm_ops;
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma_lock_init(vma, false);
}
/* Use when VMA is not part of the VMA tree and needs no locking */
static inline void vm_flags_init(struct vm_area_struct *vma,
vm_flags_t flags)
{
ACCESS_PRIVATE(vma, __vm_flags) = flags;
}
/*
* Use when VMA is part of the VMA tree and modifications need coordination
* Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
* it should be locked explicitly beforehand.
*/
static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_assert_write_locked(vma);
vm_flags_init(vma, flags);
}
static inline void vm_flags_reset_once(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_assert_write_locked(vma);
WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
}
static inline void vm_flags_set(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) |= flags;
}
static inline void vm_flags_clear(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
}
/*
* Use only if VMA is not part of the VMA tree or has no other users and
* therefore needs no locking.
*/
static inline void __vm_flags_mod(struct vm_area_struct *vma,
vm_flags_t set, vm_flags_t clear)
{
vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
}
/*
* Use only when the order of set/clear operations is unimportant, otherwise
* use vm_flags_{set|clear} explicitly.
*/
static inline void vm_flags_mod(struct vm_area_struct *vma,
vm_flags_t set, vm_flags_t clear)
{
vma_start_write(vma);
__vm_flags_mod(vma, set, clear);
}
static inline void vma_set_anonymous(struct vm_area_struct *vma)
{
vma->vm_ops = NULL;
}
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
/*
* Indicate if the VMA is a heap for the given task; for
* /proc/PID/maps that is the heap of the main task.
*/
static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
{
return vma->vm_start < vma->vm_mm->brk &&
vma->vm_end > vma->vm_mm->start_brk;
}
/*
* Indicate if the VMA is a stack for the given task; for
* /proc/PID/maps that is the stack of the main task.
*/
static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
{
/*
* We make no effort to guess what a given thread considers to be
* its "stack". It's not even well-defined for programs written
* languages like Go.
*/
return vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack;
}
static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
if (!maybe_stack)
return false;
if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
VM_STACK_INCOMPLETE_SETUP)
return true;
return false;
}
static inline bool vma_is_foreign(const struct vm_area_struct *vma)
{
if (!current->mm)
return true;
if (current->mm != vma->vm_mm)
return true;
return false;
}
static inline bool vma_is_accessible(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_ACCESS_FLAGS;
}
static inline bool is_shared_maywrite(vm_flags_t vm_flags)
{
return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
(VM_SHARED | VM_MAYWRITE);
}
static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
{
return is_shared_maywrite(vma->vm_flags);
}
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
return mas_find(&vmi->mas, max - 1);
}
static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{
/*
* Uses mas_find() to get the first VMA when the iterator starts.
* Calling mas_next() could skip the first entry.
*/
return mas_find(&vmi->mas, ULONG_MAX);
}
static inline
struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
{
return mas_next_range(&vmi->mas, ULONG_MAX);
}
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
{
return mas_prev(&vmi->mas, 0);
}
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
__mas_set_range(&vmi->mas, start, end - 1);
mas_store_gfp(&vmi->mas, NULL, gfp);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
return 0;
}
/* Free any unused preallocations */
static inline void vma_iter_free(struct vma_iterator *vmi)
{
mas_destroy(&vmi->mas);
}
static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
vmi->mas.index = vma->vm_start;
vmi->mas.last = vma->vm_end - 1;
mas_store(&vmi->mas, vma);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
vma_mark_attached(vma);
return 0;
}
static inline void vma_iter_invalidate(struct vma_iterator *vmi)
{
mas_pause(&vmi->mas);
}
static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
{
mas_set(&vmi->mas, addr);
}
#define for_each_vma(__vmi, __vma) \
while (((__vma) = vma_next(&(__vmi))) != NULL)
/* The MM code likes to work with exclusive end addresses */
#define for_each_vma_range(__vmi, __vma, __end) \
while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
bool vma_is_shmem(const struct vm_area_struct *vma);
bool vma_is_anon_shmem(const struct vm_area_struct *vma);
#else
static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
#endif
int vma_is_stack_for_current(const struct vm_area_struct *vma);
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
struct mmu_gather;
struct inode;
extern void prep_compound_page(struct page *page, unsigned int order);
static inline unsigned int folio_large_order(const struct folio *folio)
{
return folio->_flags_1 & 0xff;
}
#ifdef NR_PAGES_IN_LARGE_FOLIO
static inline unsigned long folio_large_nr_pages(const struct folio *folio)
{
return folio->_nr_pages;
}
#else
static inline unsigned long folio_large_nr_pages(const struct folio *folio)
{
return 1L << folio_large_order(folio);
}
#endif
/*
* compound_order() can be called without holding a reference, which means
* that niceties like page_folio() don't work. These callers should be
* prepared to handle wild return values. For example, PG_head may be
* set before the order is initialised, or this may be a tail page.
* See compaction.c for some good examples.
*/
static inline unsigned int compound_order(const struct page *page)
{
const struct folio *folio = (struct folio *)page;
if (!test_bit(PG_head, &folio->flags.f)) return 0; return folio_large_order(folio);
}
/**
* folio_order - The allocation order of a folio.
* @folio: The folio.
*
* A folio is composed of 2^order pages. See get_order() for the definition
* of order.
*
* Return: The order of the folio.
*/
static inline unsigned int folio_order(const struct folio *folio)
{
if (!folio_test_large(folio))
return 0;
return folio_large_order(folio);
}
/**
* folio_reset_order - Reset the folio order and derived _nr_pages
* @folio: The folio.
*
* Reset the order and derived _nr_pages to 0. Must only be used in the
* process of splitting large folios.
*/
static inline void folio_reset_order(struct folio *folio)
{
if (WARN_ON_ONCE(!folio_test_large(folio)))
return;
folio->_flags_1 &= ~0xffUL;
#ifdef NR_PAGES_IN_LARGE_FOLIO
folio->_nr_pages = 0;
#endif
}
#include <linux/huge_mm.h>
/*
* Methods to modify the page usage count.
*
* What counts for a page usage:
* - cache mapping (page->mapping)
* - private data (page->private)
* - page mapped in a task's page tables, each mapping
* is counted separately
*
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
*/
/*
* Drop a ref, return true if the refcount fell to zero (the page has no users)
*/
static inline int put_page_testzero(struct page *page)
{
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
return page_ref_dec_and_test(page);
}
static inline int folio_put_testzero(struct folio *folio)
{
return put_page_testzero(&folio->page);
}
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
* This can be called when MMU is off so it must not access
* any of the virtual mappings.
*/
static inline bool get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}
static inline struct folio *folio_get_nontail_page(struct page *page)
{
if (unlikely(!get_page_unless_zero(page)))
return NULL;
return (struct folio *)page;
}
extern int page_is_ram(unsigned long pfn);
enum {
REGION_INTERSECTS,
REGION_DISJOINT,
REGION_MIXED,
};
int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
unsigned long desc);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
/*
* Determine if an address is within the vmalloc range
*
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
#ifdef CONFIG_MMU
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
#else
static inline bool is_vmalloc_addr(const void *x)
{
return false;
}
static inline int is_vmalloc_or_module_addr(const void *x)
{
return 0;
}
#endif
/*
* How many times the entire folio is mapped as a single unit (eg by a
* PMD or PUD entry). This is probably not what you want, except for
* debugging purposes or implementation of other core folio_*() primitives.
*/
static inline int folio_entire_mapcount(const struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
return 0;
return atomic_read(&folio->_entire_mapcount) + 1;
}
static inline int folio_large_mapcount(const struct folio *folio)
{
VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
return atomic_read(&folio->_large_mapcount) + 1;
}
/**
* folio_mapcount() - Number of mappings of this folio.
* @folio: The folio.
*
* The folio mapcount corresponds to the number of present user page table
* entries that reference any part of a folio. Each such present user page
* table entry must be paired with exactly on folio reference.
*
* For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
* exactly once.
*
* For hugetlb folios, each abstracted "hugetlb" user page table entry that
* references the entire folio counts exactly once, even when such special
* page table entries are comprised of multiple ordinary page table entries.
*
* Will report 0 for pages which cannot be mapped into userspace, such as
* slab, page tables and similar.
*
* Return: The number of times this folio is mapped.
*/
static inline int folio_mapcount(const struct folio *folio)
{
int mapcount;
if (likely(!folio_test_large(folio))) { mapcount = atomic_read(&folio->_mapcount) + 1;
if (page_mapcount_is_type(mapcount))
mapcount = 0;
return mapcount;
}
return folio_large_mapcount(folio);
}
/**
* folio_mapped - Is this folio mapped into userspace?
* @folio: The folio.
*
* Return: True if any page in this folio is referenced by user page tables.
*/
static inline bool folio_mapped(const struct folio *folio)
{
return folio_mapcount(folio) >= 1;
}
/*
* Return true if this page is mapped into pagetables.
* For compound page it returns true if any sub-page of compound page is mapped,
* even if this particular sub-page is not itself mapped by any PTE or PMD.
*/
static inline bool page_mapped(const struct page *page)
{
return folio_mapped(page_folio(page));
}
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
return compound_head(page);
}
static inline struct folio *virt_to_folio(const void *x)
{
struct page *page = virt_to_page(x); return page_folio(page);
}
void __folio_put(struct folio *folio);
void split_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);
int folio_mc_copy(struct folio *dst, struct folio *src);
unsigned long nr_free_buffer_pages(void);
/* Returns the number of bytes in this potentially compound page. */
static inline unsigned long page_size(const struct page *page)
{
return PAGE_SIZE << compound_order(page);
}
/* Returns the number of bits needed for the number of bytes in a page */
static inline unsigned int page_shift(struct page *page)
{
return PAGE_SHIFT + compound_order(page);
}
/**
* thp_order - Order of a transparent huge page.
* @page: Head page of a transparent huge page.
*/
static inline unsigned int thp_order(struct page *page)
{
VM_BUG_ON_PGFLAGS(PageTail(page), page);
return compound_order(page);
}
/**
* thp_size - Size of a transparent huge page.
* @page: Head page of a transparent huge page.
*
* Return: Number of bytes in this page.
*/
static inline unsigned long thp_size(struct page *page)
{
return PAGE_SIZE << thp_order(page);
}
#ifdef CONFIG_MMU
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
* servicing faults for write access. In the normal case, do always want
* pte_mkwrite. But get_user_pages can cause write faults for mappings
* that do not have writing enabled, when used by access_process_vm.
*/
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pte = pte_mkwrite(pte, vma);
return pte;
}
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
void set_pte_range(struct vm_fault *vmf, struct folio *folio,
struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
#endif
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
* zeroes, and text pages of executables and shared libraries have
* only one copy in memory, at most, normally.
*
* For the non-reserved pages, page_count(page) denotes a reference count.
* page_count() == 0 means the page is free. page->lru is then used for
* freelist management in the buddy allocator.
* page_count() > 0 means the page has been allocated.
*
* Pages are allocated by the slab allocator in order to provide memory
* to kmalloc and kmem_cache_alloc. In this case, the management of the
* page, and the fields in 'struct page' are the responsibility of mm/slab.c
* unless a particular usage is carefully commented. (the responsibility of
* freeing the kmalloc memory is the caller's, of course).
*
* A page may be used by anyone else who does a __get_free_page().
* In this case, page_count still tracks the references, and should only
* be used through the normal accessor functions. The top bits of page->flags
* and page->virtual store page management information, but all other fields
* are unused and could be used privately, carefully. The management of this
* page is the responsibility of the one who allocated it, and those who have
* subsequently been given references to it.
*
* The other pages (we may call them "pagecache pages") are completely
* managed by the Linux memory manager: I/O, buffers, swapping etc.
* The following discussion applies only to them.
*
* A pagecache page contains an opaque `private' member, which belongs to the
* page's address_space. Usually, this is the address of a circular list of
* the page's disk buffers. PG_private must be set to tell the VM to call
* into the filesystem to release these pages.
*
* A folio may belong to an inode's memory mapping. In this case,
* folio->mapping points to the inode, and folio->index is the file
* offset of the folio, in units of PAGE_SIZE.
*
* If pagecache pages are not associated with an inode, they are said to be
* anonymous pages. These may become associated with the swapcache, and in that
* case PG_swapcache is set, and page->private is an offset into the swapcache.
*
* In either case (swapcache or inode backed), the pagecache itself holds one
* reference to the page. Setting PG_private should also increment the
* refcount. The each user mapping also has a reference to the page.
*
* The pagecache pages are stored in a per-mapping radix tree, which is
* rooted at mapping->i_pages, and indexed by offset.
* Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
* lists, we instead now tag pages as dirty/writeback in the radix tree.
*
* All pagecache pages may be subject to I/O:
* - inode pages may need to be read from disk,
* - inode pages which have been modified and are MAP_SHARED may need
* to be written back to the inode on disk,
* - anonymous pages (including MAP_PRIVATE file mappings) which have been
* modified may need to be swapped out to swap space and (later) to be read
* back into memory.
*/
/* 127: arbitrary random number, small enough to assemble well */
#define folio_ref_zero_or_close_to_overflow(folio) \
((unsigned int) folio_ref_count(folio) + 127u <= 127u)
/**
* folio_get - Increment the reference count on a folio.
* @folio: The folio.
*
* Context: May be called in any context, as long as you know that
* you have a refcount on the folio. If you do not already have one,
* folio_try_get() may be the right interface for you to use.
*/
static inline void folio_get(struct folio *folio)
{
VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
folio_ref_inc(folio);
}
static inline void get_page(struct page *page)
{
struct folio *folio = page_folio(page);
if (WARN_ON_ONCE(folio_test_slab(folio)))
return;
if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
return;
folio_get(folio);
}
static inline __must_check bool try_get_page(struct page *page)
{
page = compound_head(page);
if (WARN_ON_ONCE(page_ref_count(page) <= 0))
return false;
page_ref_inc(page);
return true;
}
/**
* folio_put - Decrement the reference count on a folio.
* @folio: The folio.
*
* If the folio's reference count reaches zero, the memory will be
* released back to the page allocator and may be used by another
* allocation immediately. Do not access the memory or the struct folio
* after calling folio_put() unless you can be sure that it wasn't the
* last reference.
*
* Context: May be called in process or interrupt context, but not in NMI
* context. May be called while holding a spinlock.
*/
static inline void folio_put(struct folio *folio)
{
if (folio_put_testzero(folio)) __folio_put(folio);
}
/**
* folio_put_refs - Reduce the reference count on a folio.
* @folio: The folio.
* @refs: The amount to subtract from the folio's reference count.
*
* If the folio's reference count reaches zero, the memory will be
* released back to the page allocator and may be used by another
* allocation immediately. Do not access the memory or the struct folio
* after calling folio_put_refs() unless you can be sure that these weren't
* the last references.
*
* Context: May be called in process or interrupt context, but not in NMI
* context. May be called while holding a spinlock.
*/
static inline void folio_put_refs(struct folio *folio, int refs)
{
if (folio_ref_sub_and_test(folio, refs))
__folio_put(folio);
}
void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
/*
* union release_pages_arg - an array of pages or folios
*
* release_pages() releases a simple array of multiple pages, and
* accepts various different forms of said page array: either
* a regular old boring array of pages, an array of folios, or
* an array of encoded page pointers.
*
* The transparent union syntax for this kind of "any of these
* argument types" is all kinds of ugly, so look away.
*/
typedef union {
struct page **pages;
struct folio **folios;
struct encoded_page **encoded_pages;
} release_pages_arg __attribute__ ((__transparent_union__));
void release_pages(release_pages_arg, int nr);
/**
* folios_put - Decrement the reference count on an array of folios.
* @folios: The folios.
*
* Like folio_put(), but for a batch of folios. This is more efficient
* than writing the loop yourself as it will optimise the locks which need
* to be taken if the folios are freed. The folios batch is returned
* empty and ready to be reused for another batch; there is no need to
* reinitialise it.
*
* Context: May be called in process or interrupt context, but not in NMI
* context. May be called while holding a spinlock.
*/
static inline void folios_put(struct folio_batch *folios)
{
folios_put_refs(folios, NULL);
}
static inline void put_page(struct page *page)
{
struct folio *folio = page_folio(page); if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
return;
folio_put(folio);
}
/*
* GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
* the page's refcount so that two separate items are tracked: the original page
* reference count, and also a new count of how many pin_user_pages() calls were
* made against the page. ("gup-pinned" is another term for the latter).
*
* With this scheme, pin_user_pages() becomes special: such pages are marked as
* distinct from normal pages. As such, the unpin_user_page() call (and its
* variants) must be used in order to release gup-pinned pages.
*
* Choice of value:
*
* By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
* counts with respect to pin_user_pages() and unpin_user_page() becomes
* simpler, due to the fact that adding an even power of two to the page
* refcount has the effect of using only the upper N bits, for the code that
* counts up using the bias value. This means that the lower bits are left for
* the exclusive use of the original code that increments and decrements by one
* (or at least, by much smaller values than the bias value).
*
* Of course, once the lower bits overflow into the upper bits (and this is
* OK, because subtraction recovers the original values), then visual inspection
* no longer suffices to directly view the separate counts. However, for normal
* applications that don't have huge page reference counts, this won't be an
* issue.
*
* Locking: the lockless algorithm described in folio_try_get_rcu()
* provides safe operation for get_user_pages(), folio_mkclean() and
* other calls that race to set up page table entries.
*/
#define GUP_PIN_COUNTING_BIAS (1U << 10)
void unpin_user_page(struct page *page);
void unpin_folio(struct folio *folio);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
void unpin_user_folio(struct folio *folio, unsigned long npages);
void unpin_folios(struct folio **folios, unsigned long nfolios);
static inline bool is_cow_mapping(vm_flags_t flags)
{
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
#ifndef CONFIG_MMU
static inline bool is_nommu_shared_mapping(vm_flags_t flags)
{
/*
* NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
* R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
* a file mapping. R/O MAP_PRIVATE mappings might still modify
* underlying memory if ptrace is active, so this is only possible if
* ptrace does not apply. Note that there is no mprotect() to upgrade
* write permissions later.
*/
return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
}
#endif
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
/*
* The identification function is mainly used by the buddy allocator for
* determining if two pages could be buddies. We are not really identifying
* the zone since we could be using the section number id if we do not have
* node id available in page flags.
* We only guarantee that it will return the same value for two combinable
* pages in a zone.
*/
static inline int page_zone_id(struct page *page)
{
return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
int memdesc_nid(memdesc_flags_t mdf);
#else
static inline int memdesc_nid(memdesc_flags_t mdf)
{
return (mdf.f >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
static inline int page_to_nid(const struct page *page)
{
return memdesc_nid(PF_POISONED_CHECK(page)->flags);
}
static inline int folio_nid(const struct folio *folio)
{
return memdesc_nid(folio->flags);
}
#ifdef CONFIG_NUMA_BALANCING
/* page access time bits needs to hold at least 4 seconds */
#define PAGE_ACCESS_TIME_MIN_BITS 12
#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
#define PAGE_ACCESS_TIME_BUCKETS \
(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
#else
#define PAGE_ACCESS_TIME_BUCKETS 0
#endif
#define PAGE_ACCESS_TIME_MASK \
(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
}
static inline int cpupid_to_pid(int cpupid)
{
return cpupid & LAST__PID_MASK;
}
static inline int cpupid_to_cpu(int cpupid)
{
return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
}
static inline int cpupid_to_nid(int cpupid)
{
return cpu_to_node(cpupid_to_cpu(cpupid));
}
static inline bool cpupid_pid_unset(int cpupid)
{
return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
}
static inline bool cpupid_cpu_unset(int cpupid)
{
return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
}
static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
{
return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
}
#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
static inline int folio_last_cpupid(struct folio *folio)
{
return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
static inline int folio_last_cpupid(struct folio *folio)
{
return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
static inline int folio_xchg_access_time(struct folio *folio, int time)
{
int last_time;
last_time = folio_xchg_last_cpupid(folio,
time >> PAGE_ACCESS_TIME_BUCKETS);
return last_time << PAGE_ACCESS_TIME_BUCKETS;
}
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{
unsigned int pid_bit;
pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
}
}
bool folio_use_access_time(struct folio *folio);
#else /* !CONFIG_NUMA_BALANCING */
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
return folio_nid(folio); /* XXX */
}
static inline int folio_xchg_access_time(struct folio *folio, int time)
{
return 0;
}
static inline int folio_last_cpupid(struct folio *folio)
{
return folio_nid(folio); /* XXX */
}
static inline int cpupid_to_nid(int cpupid)
{
return -1;
}
static inline int cpupid_to_pid(int cpupid)
{
return -1;
}
static inline int cpupid_to_cpu(int cpupid)
{
return -1;
}
static inline int cpu_pid_to_cpupid(int nid, int pid)
{
return -1;
}
static inline bool cpupid_pid_unset(int cpupid)
{
return true;
}
static inline void page_cpupid_reset_last(struct page *page)
{
}
static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
return false;
}
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{
}
static inline bool folio_use_access_time(struct folio *folio)
{
return false;
}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
/*
* KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
* setting tags for all pages to native kernel tag value 0xff, as the default
* value 0x00 maps to 0xff.
*/
static inline u8 page_kasan_tag(const struct page *page)
{
u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}
return tag;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
unsigned long old_flags, flags;
if (!kasan_enabled())
return;
tag ^= 0xff;
old_flags = READ_ONCE(page->flags.f);
do {
flags = old_flags;
flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
} while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
{
if (kasan_enabled())
page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline u8 page_kasan_tag(const struct page *page)
{
return 0xff;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline void page_kasan_tag_reset(struct page *page) { }
#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
static inline pg_data_t *page_pgdat(const struct page *page)
{
return NODE_DATA(page_to_nid(page));
}
static inline pg_data_t *folio_pgdat(const struct folio *folio)
{
return NODE_DATA(folio_nid(folio));
}
static inline struct zone *folio_zone(const struct folio *folio)
{
return &folio_pgdat(folio)->node_zones[folio_zonenum(folio)];
}
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
static inline unsigned long memdesc_section(memdesc_flags_t mdf)
{
return (mdf.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
#else /* !SECTION_IN_PAGE_FLAGS */
static inline unsigned long memdesc_section(memdesc_flags_t mdf)
{
return 0;
}
#endif /* SECTION_IN_PAGE_FLAGS */
/**
* folio_pfn - Return the Page Frame Number of a folio.
* @folio: The folio.
*
* A folio may contain multiple pages. The pages have consecutive
* Page Frame Numbers.
*
* Return: The Page Frame Number of the first page in the folio.
*/
static inline unsigned long folio_pfn(const struct folio *folio)
{
return page_to_pfn(&folio->page);
}
static inline struct folio *pfn_folio(unsigned long pfn)
{
return page_folio(pfn_to_page(pfn));
}
#ifdef CONFIG_MMU
static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
{
return pfn_pte(page_to_pfn(page), pgprot);
}
/**
* folio_mk_pte - Create a PTE for this folio
* @folio: The folio to create a PTE for
* @pgprot: The page protection bits to use
*
* Create a page table entry for the first page of this folio.
* This is suitable for passing to set_ptes().
*
* Return: A page table entry suitable for mapping this folio.
*/
static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
{
return pfn_pte(folio_pfn(folio), pgprot);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/**
* folio_mk_pmd - Create a PMD for this folio
* @folio: The folio to create a PMD for
* @pgprot: The page protection bits to use
*
* Create a page table entry for the first page of this folio.
* This is suitable for passing to set_pmd_at().
*
* Return: A page table entry suitable for mapping this folio.
*/
static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
{
return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
/**
* folio_mk_pud - Create a PUD for this folio
* @folio: The folio to create a PUD for
* @pgprot: The page protection bits to use
*
* Create a page table entry for the first page of this folio.
* This is suitable for passing to set_pud_at().
*
* Return: A page table entry suitable for mapping this folio.
*/
static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
{
return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_MMU */
static inline bool folio_has_pincount(const struct folio *folio)
{
if (IS_ENABLED(CONFIG_64BIT))
return folio_test_large(folio);
return folio_order(folio) > 1;
}
/**
* folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
* @folio: The folio.
*
* This function checks if a folio has been pinned via a call to
* a function in the pin_user_pages() family.
*
* For small folios, the return value is partially fuzzy: false is not fuzzy,
* because it means "definitely not pinned for DMA", but true means "probably
* pinned for DMA, but possibly a false positive due to having at least
* GUP_PIN_COUNTING_BIAS worth of normal folio references".
*
* False positives are OK, because: a) it's unlikely for a folio to
* get that many refcounts, and b) all the callers of this routine are
* expected to be able to deal gracefully with a false positive.
*
* For most large folios, the result will be exactly correct. That's because
* we have more tracking data available: the _pincount field is used
* instead of the GUP_PIN_COUNTING_BIAS scheme.
*
* For more information, please see Documentation/core-api/pin_user_pages.rst.
*
* Return: True, if it is likely that the folio has been "dma-pinned".
* False, if the folio is definitely not dma-pinned.
*/
static inline bool folio_maybe_dma_pinned(struct folio *folio)
{
if (folio_has_pincount(folio)) return atomic_read(&folio->_pincount) > 0;
/*
* folio_ref_count() is signed. If that refcount overflows, then
* folio_ref_count() returns a negative value, and callers will avoid
* further incrementing the refcount.
*
* Here, for that overflow case, use the sign bit to count a little
* bit higher via unsigned math, and thus still get an accurate result.
*/
return ((unsigned int)folio_ref_count(folio)) >=
GUP_PIN_COUNTING_BIAS;
}
/*
* This should most likely only be called during fork() to see whether we
* should break the cow immediately for an anon page on the src mm.
*
* The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
*/
static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
struct folio *folio)
{ VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
return false;
return folio_maybe_dma_pinned(folio);}
/**
* is_zero_page - Query if a page is a zero page
* @page: The page to query
*
* This returns true if @page is one of the permanent zero pages.
*/
static inline bool is_zero_page(const struct page *page)
{
return is_zero_pfn(page_to_pfn(page));
}
/**
* is_zero_folio - Query if a folio is a zero page
* @folio: The folio to query
*
* This returns true if @folio is one of the permanent zero pages.
*/
static inline bool is_zero_folio(const struct folio *folio)
{
return is_zero_page(&folio->page);
}
/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
#ifdef CONFIG_MIGRATION
static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
#ifdef CONFIG_CMA
int mt = folio_migratetype(folio);
if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
return false;
#endif
/* The zero page can be "pinned" but gets special handling. */
if (is_zero_folio(folio))
return true;
/* Coherent device memory must always allow eviction. */
if (folio_is_device_coherent(folio))
return false;
/*
* Filesystems can only tolerate transient delays to truncate and
* hole-punch operations
*/
if (folio_is_fsdax(folio))
return false;
/* Otherwise, non-movable zone folios can be pinned. */
return !folio_is_zone_movable(folio);
}
#else
static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
return true;
}
#endif
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
static inline void set_page_node(struct page *page, unsigned long node)
{
page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
}
static inline void set_page_links(struct page *page, enum zone_type zone,
unsigned long node, unsigned long pfn)
{
set_page_zone(page, zone);
set_page_node(page, node);
#ifdef SECTION_IN_PAGE_FLAGS
set_page_section(page, pfn_to_section_nr(pfn));
#endif
}
/**
* folio_nr_pages - The number of pages in the folio.
* @folio: The folio.
*
* Return: A positive power of two.
*/
static inline unsigned long folio_nr_pages(const struct folio *folio)
{
if (!folio_test_large(folio))
return 1; return folio_large_nr_pages(folio);
}
#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)
/*
* We don't expect any folios that exceed buddy sizes (and consequently
* memory sections).
*/
#define MAX_FOLIO_ORDER MAX_PAGE_ORDER
#elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
/*
* Only pages within a single memory section are guaranteed to be
* contiguous. By limiting folios to a single memory section, all folio
* pages are guaranteed to be contiguous.
*/
#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT
#elif defined(CONFIG_HUGETLB_PAGE)
/*
* There is no real limit on the folio size. We limit them to the maximum we
* currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect
* no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.
*/
#define MAX_FOLIO_ORDER get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G)
#else
/*
* Without hugetlb, gigantic folios that are bigger than a single PUD are
* currently impossible.
*/
#define MAX_FOLIO_ORDER PUD_ORDER
#endif
#define MAX_FOLIO_NR_PAGES (1UL << MAX_FOLIO_ORDER)
/*
* compound_nr() returns the number of pages in this potentially compound
* page. compound_nr() can be called on a tail page, and is defined to
* return 1 in that case.
*/
static inline unsigned long compound_nr(const struct page *page)
{
const struct folio *folio = (struct folio *)page;
if (!test_bit(PG_head, &folio->flags.f))
return 1;
return folio_large_nr_pages(folio);
}
/**
* folio_next - Move to the next physical folio.
* @folio: The folio we're currently operating on.
*
* If you have physically contiguous memory which may span more than
* one folio (eg a &struct bio_vec), use this function to move from one
* folio to the next. Do not use it if the memory is only virtually
* contiguous as the folios are almost certainly not adjacent to each
* other. This is the folio equivalent to writing ``page++``.
*
* Context: We assume that the folios are refcounted and/or locked at a
* higher level and do not adjust the reference counts.
* Return: The next struct folio.
*/
static inline struct folio *folio_next(struct folio *folio)
{
return (struct folio *)folio_page(folio, folio_nr_pages(folio));
}
/**
* folio_shift - The size of the memory described by this folio.
* @folio: The folio.
*
* A folio represents a number of bytes which is a power-of-two in size.
* This function tells you which power-of-two the folio is. See also
* folio_size() and folio_order().
*
* Context: The caller should have a reference on the folio to prevent
* it from being split. It is not necessary for the folio to be locked.
* Return: The base-2 logarithm of the size of this folio.
*/
static inline unsigned int folio_shift(const struct folio *folio)
{
return PAGE_SHIFT + folio_order(folio);
}
/**
* folio_size - The number of bytes in a folio.
* @folio: The folio.
*
* Context: The caller should have a reference on the folio to prevent
* it from being split. It is not necessary for the folio to be locked.
* Return: The number of bytes in this folio.
*/
static inline size_t folio_size(const struct folio *folio)
{
return PAGE_SIZE << folio_order(folio);
}
/**
* folio_maybe_mapped_shared - Whether the folio is mapped into the page
* tables of more than one MM
* @folio: The folio.
*
* This function checks if the folio maybe currently mapped into more than one
* MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
* MM ("mapped exclusively").
*
* For KSM folios, this function also returns "mapped shared" when a folio is
* mapped multiple times into the same MM, because the individual page mappings
* are independent.
*
* For small anonymous folios and anonymous hugetlb folios, the return
* value will be exactly correct: non-KSM folios can only be mapped at most once
* into an MM, and they cannot be partially mapped. KSM folios are
* considered shared even if mapped multiple times into the same MM.
*
* For other folios, the result can be fuzzy:
* #. For partially-mappable large folios (THP), the return value can wrongly
* indicate "mapped shared" (false positive) if a folio was mapped by
* more than two MMs at one point in time.
* #. For pagecache folios (including hugetlb), the return value can wrongly
* indicate "mapped shared" (false positive) when two VMAs in the same MM
* cover the same file range.
*
* Further, this function only considers current page table mappings that
* are tracked using the folio mapcount(s).
*
* This function does not consider:
* #. If the folio might get mapped in the (near) future (e.g., swapcache,
* pagecache, temporary unmapping for migration).
* #. If the folio is mapped differently (VM_PFNMAP).
* #. If hugetlb page table sharing applies. Callers might want to check
* hugetlb_pmd_shared().
*
* Return: Whether the folio is estimated to be mapped into more than one MM.
*/
static inline bool folio_maybe_mapped_shared(struct folio *folio)
{
int mapcount = folio_mapcount(folio);
/* Only partially-mappable folios require more care. */
if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
return mapcount > 1;
/*
* vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
* simply assume "mapped shared", nobody should really care
* about this for arbitrary kernel allocations.
*/
if (!IS_ENABLED(CONFIG_MM_ID))
return true;
/*
* A single mapping implies "mapped exclusively", even if the
* folio flag says something different: it's easier to handle this
* case here instead of on the RMAP hot path.
*/
if (mapcount <= 1)
return false;
return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
}
/**
* folio_expected_ref_count - calculate the expected folio refcount
* @folio: the folio
*
* Calculate the expected folio refcount, taking references from the pagecache,
* swapcache, PG_private and page table mappings into account. Useful in
* combination with folio_ref_count() to detect unexpected references (e.g.,
* GUP or other temporary references).
*
* Does currently not consider references from the LRU cache. If the folio
* was isolated from the LRU (which is the case during migration or split),
* the LRU cache does not apply.
*
* Calling this function on an unmapped folio -- !folio_mapped() -- that is
* locked will return a stable result.
*
* Calling this function on a mapped folio will not result in a stable result,
* because nothing stops additional page table mappings from coming (e.g.,
* fork()) or going (e.g., munmap()).
*
* Calling this function without the folio lock will also not result in a
* stable result: for example, the folio might get dropped from the swapcache
* concurrently.
*
* However, even when called without the folio lock or on a mapped folio,
* this function can be used to detect unexpected references early (for example,
* if it makes sense to even lock the folio and unmap it).
*
* The caller must add any reference (e.g., from folio_try_get()) it might be
* holding itself to the result.
*
* Returns the expected folio refcount.
*/
static inline int folio_expected_ref_count(const struct folio *folio)
{
const int order = folio_order(folio);
int ref_count = 0;
if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
return 0;
if (folio_test_anon(folio)) {
/* One reference per page from the swapcache. */
ref_count += folio_test_swapcache(folio) << order;
} else {
/* One reference per page from the pagecache. */
ref_count += !!folio->mapping << order;
/* One reference from PG_private. */
ref_count += folio_test_private(folio);
}
/* One reference per page table mapping. */
return ref_count + folio_mapcount(folio);
}
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
static inline int arch_make_folio_accessible(struct folio *folio)
{
return 0;
}
#endif
/*
* Some inline functions in vmstat.h depend on page_zone()
*/
#include <linux/vmstat.h>
#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif
#if defined(WANT_PAGE_VIRTUAL)
static inline void *page_address(const struct page *page)
{
return page->virtual;
}
static inline void set_page_address(struct page *page, void *address)
{
page->virtual = address;
}
#define page_address_init() do { } while(0)
#endif
#if defined(HASHED_PAGE_VIRTUAL)
void *page_address(const struct page *page);
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif
static __always_inline void *lowmem_page_address(const struct page *page)
{
return page_to_virt(page);
}
#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address) do { } while(0)
#define page_address_init() do { } while(0)
#endif
static inline void *folio_address(const struct folio *folio)
{
return page_address(&folio->page);
}
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
* lru.next has bit 1 set if the page is allocated from the
* pfmemalloc reserves. Callers may simply overwrite it if
* they do not need to preserve that information.
*/
return (uintptr_t)page->lru.next & BIT(1);
}
/*
* Return true only if the folio has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
static inline bool folio_is_pfmemalloc(const struct folio *folio)
{
/*
* lru.next has bit 1 set if the page is allocated from the
* pfmemalloc reserves. Callers may simply overwrite it if
* they do not need to preserve that information.
*/
return (uintptr_t)folio->lru.next & BIT(1);
}
/*
* Only to be called by the page allocator on a freshly allocated
* page.
*/
static inline void set_page_pfmemalloc(struct page *page)
{
page->lru.next = (void *)BIT(1);
}
static inline void clear_page_pfmemalloc(struct page *page)
{
page->lru.next = NULL;
}
/*
* Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
*/
extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
struct folio *single_folio; /* Locked folio to be unmapped */
bool even_cows; /* Zap COWed private pages too? */
bool reclaim_pt; /* Need reclaim page tables? */
zap_flags_t zap_flags; /* Extra flags for zapping */
};
/*
* Whether to drop the pte markers, for example, the uffd-wp information for
* file-backed memory. This should only be specified when we will completely
* drop the page in the mm, either by truncation or unmapping of the vma. By
* default, the flag is not set.
*/
#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
#ifdef CONFIG_SCHED_MM_CID
void sched_mm_cid_before_execve(struct task_struct *t);
void sched_mm_cid_after_execve(struct task_struct *t);
void sched_mm_cid_fork(struct task_struct *t);
void sched_mm_cid_exit_signals(struct task_struct *t);
static inline int task_mm_cid(struct task_struct *t)
{
return t->mm_cid;
}
#else
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
static inline void sched_mm_cid_fork(struct task_struct *t) { }
static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
static inline int task_mm_cid(struct task_struct *t)
{
/*
* Use the processor id as a fall-back when the mm cid feature is
* disabled. This provides functional per-cpu data structure accesses
* in user-space, althrough it won't provide the memory usage benefits.
*/
return raw_smp_processor_id();
}
#endif
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
static inline bool can_do_mlock(void) { return false; }
#endif
extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *);
struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t pud);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details);
static inline void zap_vma_pages(struct vm_area_struct *vma)
{
zap_page_range_single(vma, vma->vm_start,
vma->vm_end - vma->vm_start, NULL);
}
void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long start,
unsigned long end, unsigned long tree_end, bool mm_wr_locked);
struct mmu_notifier_range;
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
struct follow_pfnmap_args {
/**
* Inputs:
* @vma: Pointer to @vm_area_struct struct
* @address: the virtual address to walk
*/
struct vm_area_struct *vma;
unsigned long address;
/**
* Internals:
*
* The caller shouldn't touch any of these.
*/
spinlock_t *lock;
pte_t *ptep;
/**
* Outputs:
*
* @pfn: the PFN of the address
* @addr_mask: address mask covering pfn
* @pgprot: the pgprot_t of the mapping
* @writable: whether the mapping is writable
* @special: whether the mapping is a special mapping (real PFN maps)
*/
unsigned long pfn;
unsigned long addr_mask;
pgprot_t pgprot;
bool writable;
bool special;
};
int follow_pfnmap_start(struct follow_pfnmap_args *args);
void follow_pfnmap_end(struct follow_pfnmap_args *args);
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int generic_error_remove_folio(struct address_space *mapping,
struct folio *folio);
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long address, struct pt_regs *regs);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct pt_regs *regs);
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
#else
static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct pt_regs *regs)
{
/* should never happen if there's no MMU */
BUG();
return VM_FAULT_SIGBUS;
}
static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
unsigned int fault_flags, bool *unlocked)
{
/* should never happen if there's no MMU */
BUG();
return -EFAULT;
}
static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows) { }
#endif
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
{
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
unsigned long addr);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
#ifdef CONFIG_BPF_SYSCALL
extern int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
#endif
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked);
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked);
/*
* Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
*/
static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
unsigned long addr,
int gup_flags,
struct vm_area_struct **vmap)
{
struct page *page;
struct vm_area_struct *vma;
int got;
if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
return ERR_PTR(-EINVAL);
got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
if (got < 0)
return ERR_PTR(got);
vma = vma_lookup(mm, addr);
if (WARN_ON_ONCE(!vma)) {
put_page(page);
return ERR_PTR(-EINVAL);
}
*vmap = vma;
return page;
}
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages);
long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
struct folio **folios, unsigned int max_folios,
pgoff_t *offset);
int folio_add_pins(struct folio *folio, unsigned int pins);
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
const struct task_struct *task, bool bypass_rlim);
struct kvec;
struct page *get_dump_page(unsigned long addr, int *locked);
bool folio_mark_dirty(struct folio *folio);
bool folio_mark_dirty_lock(struct folio *folio);
bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
/*
* Flags used by change_protection(). For now we make it a bitmap so
* that we can pass in multiple flags just like parameters. However
* for now all the callers are only use one of the flags at the same
* time.
*/
/*
* Whether we should manually check if we can map individual PTEs writable,
* because something (e.g., COW, uffd-wp) blocks that from happening for all
* PTEs automatically in a writable mapping.
*/
#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
/* Whether this protection change is for NUMA hints */
#define MM_CP_PROT_NUMA (1UL << 1)
/* Whether this change is for write protecting */
#define MM_CP_UFFD_WP (1UL << 2) /* do wp */
#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long cp_flags);
extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, vm_flags_t newflags);
/*
* doesn't attempt to fault and will return short.
*/
int get_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
static inline bool get_user_page_fast_only(unsigned long addr,
unsigned int gup_flags, struct page **pagep)
{
return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
}
/*
* per-process(per-mm_struct) statistics.
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
return percpu_counter_read_positive(&mm->rss_stat[member]);
}
static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
{
return percpu_counter_sum_positive(&mm->rss_stat[member]);
}
void mm_trace_rss_stat(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
percpu_counter_add(&mm->rss_stat[member], value);
mm_trace_rss_stat(mm, member);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
percpu_counter_inc(&mm->rss_stat[member]);
mm_trace_rss_stat(mm, member);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
percpu_counter_dec(&mm->rss_stat[member]);
mm_trace_rss_stat(mm, member);
}
/* Optimized variant when folio is already known not to be anon */
static inline int mm_counter_file(struct folio *folio)
{
if (folio_test_swapbacked(folio)) return MM_SHMEMPAGES;
return MM_FILEPAGES;
}
static inline int mm_counter(struct folio *folio)
{
if (folio_test_anon(folio))
return MM_ANONPAGES;
return mm_counter_file(folio);
}
static inline unsigned long get_mm_rss(struct mm_struct *mm)
{
return get_mm_counter(mm, MM_FILEPAGES) +
get_mm_counter(mm, MM_ANONPAGES) +
get_mm_counter(mm, MM_SHMEMPAGES);
}
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
return max(mm->hiwater_rss, get_mm_rss(mm));
}
static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
return max(mm->hiwater_vm, mm->total_vm);
}
static inline void update_hiwater_rss(struct mm_struct *mm)
{
unsigned long _rss = get_mm_rss(mm);
if (data_race(mm->hiwater_rss) < _rss)
data_race(mm->hiwater_rss = _rss);
}
static inline void update_hiwater_vm(struct mm_struct *mm)
{
if (mm->hiwater_vm < mm->total_vm)
mm->hiwater_vm = mm->total_vm;
}
static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
{
mm->hiwater_rss = get_mm_rss(mm);
}
static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
struct mm_struct *mm)
{
unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
if (*maxrss < hiwater_rss) *maxrss = hiwater_rss;
}
#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline int pte_special(pte_t pte)
{
return 0;
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return pte;
}
#endif
#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
static inline bool pmd_special(pmd_t pmd)
{
return false;
}
static inline pmd_t pmd_mkspecial(pmd_t pmd)
{
return pmd;
}
#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
static inline bool pud_special(pud_t pud)
{
return false;
}
static inline pud_t pud_mkspecial(pud_t pud)
{
return pud;
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pte_t *ptep;
__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
return ptep;
}
#ifdef __PAGETABLE_P4D_FOLDED
static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
return 0;
}
#else
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{
return 0;
}
static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
#else
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
if (mm_pud_folded(mm))
return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
if (mm_pud_folded(mm))
return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
unsigned long address)
{
return 0;
}
static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
if (mm_pmd_folded(mm))
return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
if (mm_pmd_folded(mm))
return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif
#ifdef CONFIG_MMU
static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
{
atomic_long_set(&mm->pgtables_bytes, 0);
}
static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
{
return atomic_long_read(&mm->pgtables_bytes);
}
static inline void mm_inc_nr_ptes(struct mm_struct *mm)
{
atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_ptes(struct mm_struct *mm)
{
atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
}
#else
static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
{
return 0;
}
static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);
#if defined(CONFIG_MMU)
static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? NULL : p4d_offset(pgd, address);
}
static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{
return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? NULL : pud_offset(p4d, address);
}
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address);
}
#endif /* CONFIG_MMU */
enum pt_flags {
PT_reserved = PG_reserved,
/* High bits are used for zone/node/section */
};
static inline struct ptdesc *virt_to_ptdesc(const void *x)
{
return page_ptdesc(virt_to_page(x));
}
/**
* ptdesc_address - Virtual address of page table.
* @pt: Page table descriptor.
*
* Return: The first byte of the page table described by @pt.
*/
static inline void *ptdesc_address(const struct ptdesc *pt)
{
return folio_address(ptdesc_folio(pt));
}
static inline bool pagetable_is_reserved(struct ptdesc *pt)
{
return test_bit(PT_reserved, &pt->pt_flags.f);
}
/**
* pagetable_alloc - Allocate pagetables
* @gfp: GFP flags
* @order: desired pagetable order
*
* pagetable_alloc allocates memory for page tables as well as a page table
* descriptor to describe that memory.
*
* Return: The ptdesc describing the allocated page tables.
*/
static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
{
struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
return page_ptdesc(page);
}
#define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
/**
* pagetable_free - Free pagetables
* @pt: The page table descriptor
*
* pagetable_free frees the memory of all page tables described by a page
* table descriptor and the memory for the descriptor itself.
*/
static inline void pagetable_free(struct ptdesc *pt)
{
struct page *page = ptdesc_page(pt);
__free_pages(page, compound_order(page));
}
#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
bool ptlock_alloc(struct ptdesc *ptdesc);
void ptlock_free(struct ptdesc *ptdesc);
static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
return ptdesc->ptl;
}
#else /* ALLOC_SPLIT_PTLOCKS */
static inline void ptlock_cache_init(void)
{
}
static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{
return true;
}
static inline void ptlock_free(struct ptdesc *ptdesc)
{
}
static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
return &ptdesc->ptl;
}
#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
}
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
{
BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
return ptlock_ptr(virt_to_ptdesc(pte));
}
static inline bool ptlock_init(struct ptdesc *ptdesc)
{
/*
* prep_new_page() initialize page->private (and therefore page->ptl)
* with 0. Make sure nobody took it in use in between.
*
* It can happen if arch try to use slab for page table allocation:
* slab code uses page->slab_cache, which share storage with page->ptl.
*/
VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc)); if (!ptlock_alloc(ptdesc))
return false;
spin_lock_init(ptlock_ptr(ptdesc));
return true;
}
#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline unsigned long ptdesc_nr_pages(const struct ptdesc *ptdesc)
{
return compound_nr(ptdesc_page(ptdesc));
}
static inline void __pagetable_ctor(struct ptdesc *ptdesc)
{
pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
__SetPageTable(ptdesc_page(ptdesc)); mod_node_page_state(pgdat, NR_PAGETABLE, ptdesc_nr_pages(ptdesc));}
static inline void pagetable_dtor(struct ptdesc *ptdesc)
{
pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
ptlock_free(ptdesc);
__ClearPageTable(ptdesc_page(ptdesc));
mod_node_page_state(pgdat, NR_PAGETABLE, -ptdesc_nr_pages(ptdesc));
}
static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
{
pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
static inline bool pagetable_pte_ctor(struct mm_struct *mm,
struct ptdesc *ptdesc)
{
if (mm != &init_mm && !ptlock_init(ptdesc))
return false;
__pagetable_ctor(ptdesc); return true;
}
pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
pmd_t *pmdvalp)
{
pte_t *pte;
__cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
return pte;
}
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{
return __pte_offset_map(pmd, addr, NULL);
}
pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp)
{
pte_t *pte;
__cond_lock(RCU, __cond_lock(*ptlp,
pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
return pte;
}
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, pmd_t *pmdvalp,
spinlock_t **ptlp);
#define pte_unmap_unlock(pte, ptl) do { \
spin_unlock(ptl); \
pte_unmap(pte); \
} while (0)
#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
#define pte_alloc_map(mm, pmd, address) \
(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
(pte_alloc(mm, pmd) ? \
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
return virt_to_page((void *)((unsigned long) pmd & mask));
}
static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
{
return page_ptdesc(pmd_pgtable_page(pmd));
}
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return ptlock_ptr(pmd_ptdesc(pmd));
}
static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ptdesc->pmd_huge_pte = NULL;
#endif
return ptlock_init(ptdesc);
}
#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
#endif
static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
return ptl;
}
static inline bool pagetable_pmd_ctor(struct mm_struct *mm,
struct ptdesc *ptdesc)
{
if (mm != &init_mm && !pmd_ptlock_init(ptdesc))
return false;
ptdesc_pmd_pts_init(ptdesc);
__pagetable_ctor(ptdesc);
return true;
}
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
* considered ready to switch to split PUD locks yet; there may be places
* which need to be converted from page_table_lock.
*/
static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
{
return &mm->page_table_lock;
}
static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
{
spinlock_t *ptl = pud_lockptr(mm, pud);
spin_lock(ptl);
return ptl;
}
static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
{
__pagetable_ctor(ptdesc);
}
static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
{
__pagetable_ctor(ptdesc);
}
static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
{
__pagetable_ctor(ptdesc);
}
extern void __init pagecache_init(void);
extern void free_initmem(void);
/*
* Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
* into the buddy system. The freed pages will be poisoned with pattern
* "poison" if it's within range [0, UCHAR_MAX].
* Return pages freed into the buddy system.
*/
extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);
extern void adjust_managed_page_count(struct page *page, long count);
extern void reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
void free_reserved_page(struct page *page);
static inline void mark_page_reserved(struct page *page)
{
SetPageReserved(page);
adjust_managed_page_count(page, -1);
}
static inline void free_reserved_ptdesc(struct ptdesc *pt)
{
free_reserved_page(ptdesc_page(pt));
}
/*
* Default method to free all the __init memory into the buddy system.
* The freed pages will be poisoned with pattern "poison" if it's within
* range [0, UCHAR_MAX].
* Return pages freed into the buddy system.
*/
static inline unsigned long free_initmem_default(int poison)
{
extern char __init_begin[], __init_end[];
return free_reserved_area(&__init_begin, &__init_end,
poison, "unused kernel image (initmem)");
}
static inline unsigned long get_num_physpages(void)
{
int nid;
unsigned long phys_pages = 0;
for_each_online_node(nid)
phys_pages += node_present_pages(nid);
return phys_pages;
}
/*
* Using memblock node mappings, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in an
* architecture independent manner.
*
* An architecture is expected to register range of page frames backed by
* physical memory with memblock_add[_node]() before calling
* free_area_init() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like
*
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
* memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
#ifndef CONFIG_NUMA
static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
}
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
extern void mem_init(void);
extern void __init mmap_init(void);
extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
static inline void show_mem(void)
{
__show_mem(0, NULL, MAX_NR_ZONES - 1);
}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern __printf(3, 4)
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* interval_tree.c */
void vma_interval_tree_insert(struct vm_area_struct *node,
struct rb_root_cached *root);
void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct vm_area_struct *prev,
struct rb_root_cached *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
struct rb_root_cached *root);
struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
unsigned long start, unsigned long last);
#define vma_interval_tree_foreach(vma, root, start, last) \
for (vma = vma_interval_tree_iter_first(root, start, last); \
vma; vma = vma_interval_tree_iter_next(vma, start, last))
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root_cached *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
struct rb_root_cached *root);
struct anon_vma_chain *
anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
struct anon_vma_chain *node, unsigned long start, unsigned long last);
#ifdef CONFIG_DEBUG_VM_RB
void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
#endif
#define anon_vma_interval_tree_foreach(avc, root, start, last) \
for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
/* mmap.c */
extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *);
bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, bool write);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
unsigned long end_data,
unsigned long start_data)
{
if (rlim < RLIM_INFINITY) {
if (((new - start) + (end_data - start_data)) > rlim)
return -ENOSPC;
}
return 0;
}
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm);
struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
vm_flags_t vm_flags,
const struct vm_special_mapping *spec);
unsigned long randomize_stack_top(unsigned long stack_top);
unsigned long randomize_page(unsigned long start, unsigned long range);
unsigned long
__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
static inline unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
{
/* Ignore errors */
(void) __mm_populate(addr, len, 1);
}
#else
static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
/* This takes the mm semaphore itself */
extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
struct vm_unmapped_area_info {
#define VM_UNMAPPED_AREA_TOPDOWN 1
unsigned long flags;
unsigned long length;
unsigned long low_limit;
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
unsigned long start_gap;
};
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
/*
* Look up the first VMA which intersects the interval [start_addr, end_addr)
* NULL if none. Assume start_addr < end_addr.
*/
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr, unsigned long end_addr);
/**
* vma_lookup() - Find a VMA at a specific address
* @mm: The process address space.
* @addr: The user address.
*
* Return: The vm_area_struct at the given address, %NULL otherwise.
*/
static inline
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{
return mtree_load(&mm->mm_mt, addr);
}
static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_GROWSDOWN)
return stack_guard_gap;
/* See reasoning around the VM_SHADOW_STACK definition */
if (vma->vm_flags & VM_SHADOW_STACK)
return PAGE_SIZE;
return 0;
}
static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
{
unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
vm_start -= gap;
if (vm_start > vma->vm_start)
vm_start = 0;
return vm_start;
}
static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
if (vma->vm_flags & VM_GROWSUP) {
vm_end += stack_guard_gap;
if (vm_end < vma->vm_end)
vm_end = -PAGE_SIZE;
}
return vm_end;
}
static inline unsigned long vma_pages(const struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
struct vm_area_struct *vma = vma_lookup(mm, vm_start);
if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = NULL;
return vma;
}
static inline bool range_in_vma(const struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
return (vma && vma->vm_start <= start && end <= vma->vm_end);
}
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
#else
static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
return __pgprot(0);
}
static inline void vma_set_page_prot(struct vm_area_struct *vma)
{
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
#endif
void vma_set_file(struct vm_area_struct *vma, struct file *file);
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#endif
struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
bool write);
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
unsigned long addr, struct page *page)
{
int err = vm_insert_page(vma, addr, page);
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
#ifndef io_remap_pfn_range
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
}
#endif
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
return VM_FAULT_OOM;
else if (err == -EHWPOISON)
return VM_FAULT_HWPOISON;
return VM_FAULT_SIGBUS;
}
/*
* Convert errno to return value for ->page_mkwrite() calls.
*
* This should eventually be merged with vmf_error() above, but will need a
* careful audit of all vmf_error() callers.
*/
static inline vm_fault_t vmf_fs_error(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
if (err == -EFAULT || err == -EAGAIN)
return VM_FAULT_NOPAGE;
if (err == -ENOMEM)
return VM_FAULT_OOM;
/* -ENOSPC, -EDQUOT, -EIO ... */
return VM_FAULT_SIGBUS;
}
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
return -ENOMEM;
if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
return 0;
}
/*
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
unsigned int flags)
{
/*
* If callers don't want to honor NUMA hinting faults, no need to
* determine if we would actually have to trigger a NUMA hinting fault.
*/
if (!(flags & FOLL_HONOR_NUMA_FAULT))
return true;
/*
* NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
*
* Requiring a fault here even for inaccessible VMAs would mean that
* FOLL_FORCE cannot make any progress, because handle_mm_fault()
* refuses to process NUMA hinting faults in inaccessible VMAs.
*/
return !vma_is_accessible(vma);
}
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);
#ifdef CONFIG_PAGE_POISONING
extern void __kernel_poison_pages(struct page *page, int numpages);
extern void __kernel_unpoison_pages(struct page *page, int numpages);
extern bool _page_poisoning_enabled_early;
DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
static inline bool page_poisoning_enabled(void)
{
return _page_poisoning_enabled_early;
}
/*
* For use in fast paths after init_mem_debugging() has run, or when a
* false negative result is not harmful when called too early.
*/
static inline bool page_poisoning_enabled_static(void)
{
return static_branch_unlikely(&_page_poisoning_enabled);
}
static inline void kernel_poison_pages(struct page *page, int numpages)
{
if (page_poisoning_enabled_static())
__kernel_poison_pages(page, numpages);
}
static inline void kernel_unpoison_pages(struct page *page, int numpages)
{
if (page_poisoning_enabled_static())
__kernel_unpoison_pages(page, numpages);
}
#else
static inline bool page_poisoning_enabled(void) { return false; }
static inline bool page_poisoning_enabled_static(void) { return false; }
static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
static inline void kernel_poison_pages(struct page *page, int numpages) { }
static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
#endif
DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
static inline bool want_init_on_alloc(gfp_t flags)
{
if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
&init_on_alloc))
return true;
return flags & __GFP_ZERO;
}
DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void)
{
return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
&init_on_free);
}
extern bool _debug_pagealloc_enabled_early;
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
static inline bool debug_pagealloc_enabled(void)
{
return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
_debug_pagealloc_enabled_early;
}
/*
* For use in fast paths after mem_debugging_and_hardening_init() has run,
* or when a false negative result is not harmful when called too early.
*/
static inline bool debug_pagealloc_enabled_static(void)
{
if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
return false;
return static_branch_unlikely(&_debug_pagealloc_enabled);
}
/*
* To support DEBUG_PAGEALLOC architecture must ensure that
* __kernel_map_pages() never fails
*/
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
#ifdef CONFIG_DEBUG_PAGEALLOC
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 1);
}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
{
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 0);
}
extern unsigned int _debug_guardpage_minorder;
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static inline unsigned int debug_guardpage_minorder(void)
{
return _debug_guardpage_minorder;
}
static inline bool debug_guardpage_enabled(void)
{
return static_branch_unlikely(&_debug_guardpage_enabled);
}
static inline bool page_is_guard(const struct page *page)
{
if (!debug_guardpage_enabled())
return false;
return PageGuard(page);
}
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order)
{
if (!debug_guardpage_enabled())
return false;
return __set_page_guard(zone, page, order);
}
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order)
{
if (!debug_guardpage_enabled())
return;
__clear_page_guard(zone, page, order);
}
#else /* CONFIG_DEBUG_PAGEALLOC */
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
static inline bool page_is_guard(const struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
extern int in_gate_area_no_mm(unsigned long addr);
extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
#else
static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
#endif /* __HAVE_ARCH_GATE_AREA */
bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm);
void drop_slab(void);
#ifndef CONFIG_MMU
#define randomize_va_space 0
#else
extern int randomize_va_space;
#endif
const char * arch_vma_name(struct vm_area_struct *vma);
#ifdef CONFIG_MMU
void print_vma_addr(char *prefix, unsigned long rip);
#else
static inline void print_vma_addr(char *prefix, unsigned long rip)
{
}
#endif
void *sparse_buffer_alloc(unsigned long size);
unsigned long section_map_size(void);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
struct vmem_altmap *altmap, unsigned long ptpfn,
unsigned long flags);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
void *vmemmap_alloc_block_buf(unsigned long size, int node,
struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
unsigned long addr, unsigned long next);
int vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
unsigned long headsize);
int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
unsigned long headsize);
void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
unsigned long headsize);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
if (altmap)
return altmap->reserve + altmap->free;
return 0;
}
static inline void vmem_altmap_free(struct vmem_altmap *altmap,
unsigned long nr_pfns)
{
altmap->alloc -= nr_pfns;
}
#else
static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
return 0;
}
static inline void vmem_altmap_free(struct vmem_altmap *altmap,
unsigned long nr_pfns)
{
}
#endif
#define VMEMMAP_RESERVE_NR 2
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
unsigned long nr_pages;
unsigned long nr_vmemmap_pages;
if (!pgmap || !is_power_of_2(sizeof(struct page)))
return false;
nr_pages = pgmap_vmemmap_nr(pgmap);
nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
/*
* For vmemmap optimization with DAX we need minimum 2 vmemmap
* pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
*/
return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
}
/*
* If we don't have an architecture override, use the generic rule
*/
#ifndef vmemmap_can_optimize
#define vmemmap_can_optimize __vmemmap_can_optimize
#endif
#else
static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
return false;
}
#endif
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
MF_UNPOISON = 1 << 4,
MF_SW_SIMULATED = 1 << 5,
MF_NO_RETRY = 1 << 6,
MF_MEM_PRE_REMOVE = 1 << 7,
};
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
unsigned long count, int mf_flags);
extern int memory_failure(unsigned long pfn, int flags);
extern int unpoison_memory(unsigned long pfn);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
#ifdef CONFIG_MEMORY_FAILURE
/*
* Sysfs entries for memory failure handling statistics.
*/
extern const struct attribute_group memory_failure_attr_group;
extern void memory_failure_queue(unsigned long pfn, int flags);
extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void num_poisoned_pages_inc(unsigned long pfn);
void num_poisoned_pages_sub(unsigned long pfn, long i);
#else
static inline void memory_failure_queue(unsigned long pfn, int flags)
{
}
static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared)
{
return 0;
}
static inline void num_poisoned_pages_inc(unsigned long pfn)
{
}
static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
{
}
#endif
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
extern void memblk_nr_poison_inc(unsigned long pfn);
extern void memblk_nr_poison_sub(unsigned long pfn, long i);
#else
static inline void memblk_nr_poison_inc(unsigned long pfn)
{
}
static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
{
}
#endif
#ifndef arch_memory_failure
static inline int arch_memory_failure(unsigned long pfn, int flags)
{
return -ENXIO;
}
#endif
#ifndef arch_is_platform_page
static inline bool arch_is_platform_page(u64 paddr)
{
return false;
}
#endif
/*
* Error handlers for various types of pages.
*/
enum mf_result {
MF_IGNORED, /* Error: cannot be handled */
MF_FAILED, /* Error: handling failed */
MF_DELAYED, /* Will be handled later */
MF_RECOVERED, /* Successfully recovered */
};
enum mf_action_page_type {
MF_MSG_KERNEL,
MF_MSG_KERNEL_HIGH_ORDER,
MF_MSG_DIFFERENT_COMPOUND,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
MF_MSG_GET_HWPOISON,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
MF_MSG_DIRTY_MLOCKED_LRU,
MF_MSG_CLEAN_MLOCKED_LRU,
MF_MSG_DIRTY_UNEVICTABLE_LRU,
MF_MSG_CLEAN_UNEVICTABLE_LRU,
MF_MSG_DIRTY_LRU,
MF_MSG_CLEAN_LRU,
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
MF_MSG_DAX,
MF_MSG_UNSPLIT_THP,
MF_MSG_ALREADY_POISONED,
MF_MSG_UNKNOWN,
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
void folio_zero_user(struct folio *folio, unsigned long addr_hint);
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma);
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
bool allow_pagefault);
/**
* vma_is_special_huge - Are transhuge page-table entries considered special?
* @vma: Pointer to the struct vm_area_struct to consider
*
* Whether transhuge page-table entries are considered "special" following
* the definition in vm_normal_page().
*
* Return: true if transhuge page-table entries should be considered special,
* false otherwise.
*/
static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
{
return vma_is_dax(vma) || (vma->vm_file &&
(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void);
#else
static inline void setup_nr_node_ids(void) {}
#endif
extern int memcmp_pages(struct page *page1, struct page *page2);
static inline int pages_identical(struct page *page1, struct page *page2)
{
return !memcmp_pages(page1, page2);
}
#ifdef CONFIG_MAPPING_DIRTY_HELPERS
unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
pgoff_t first_index, pgoff_t nr,
pgoff_t bitmap_pgoff,
unsigned long *bitmap,
pgoff_t *start,
pgoff_t *end);
unsigned long wp_shared_mapping_range(struct address_space *mapping,
pgoff_t first_index, pgoff_t nr);
#endif
#ifdef CONFIG_ANON_VMA_NAME
int set_anon_vma_name(unsigned long addr, unsigned long size,
const char __user *uname);
#else
static inline
int set_anon_vma_name(unsigned long addr, unsigned long size,
const char __user *uname)
{
return -EINVAL;
}
#endif
#ifdef CONFIG_UNACCEPTED_MEMORY
bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
void accept_memory(phys_addr_t start, unsigned long size);
#else
static inline bool range_contains_unaccepted_memory(phys_addr_t start,
unsigned long size)
{
return false;
}
static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
#endif
static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
{
return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
}
void vma_pgtable_walk_begin(struct vm_area_struct *vma);
void vma_pgtable_walk_end(struct vm_area_struct *vma);
int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
int reserve_mem_release_by_name(const char *name);
#ifdef CONFIG_64BIT
int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
#else
static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
{
/* noop on 32 bit */
return 0;
}
#endif
/*
* user_alloc_needs_zeroing checks if a user folio from page allocator needs to
* be zeroed or not.
*/
static inline bool user_alloc_needs_zeroing(void)
{
/*
* for user folios, arch with cache aliasing requires cache flush and
* arc changes folio->flags to make icache coherent with dcache, so
* always return false to make caller use
* clear_user_page()/clear_user_highpage().
*/
return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
!static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
&init_on_alloc);
}
int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
/*
* mseal of userspace process's system mappings.
*/
#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
#define VM_SEALED_SYSMAP VM_SEALED
#else
#define VM_SEALED_SYSMAP VM_NONE
#endif
/*
* DMA mapping IDs for page_pool
*
* When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
* stashes it in the upper bits of page->pp_magic. We always want to be able to
* unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
* pages can have arbitrary kernel pointers stored in the same field as pp_magic
* (since it overlaps with page->lru.next), so we must ensure that we cannot
* mistake a valid kernel pointer with any of the values we write into this
* field.
*
* On architectures that set POISON_POINTER_DELTA, this is already ensured,
* since this value becomes part of PP_SIGNATURE; meaning we can just use the
* space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
* lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
* 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
* known at compile-time.
*
* If the value of PAGE_OFFSET is not known at compile time, or if it is too
* small to leave at least 8 bits available above PP_SIGNATURE, we define the
* number of bits to be 0, which turns off the DMA index tracking altogether
* (see page_pool_register_dma_index()).
*/
#define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
#if POISON_POINTER_DELTA > 0
/* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
* index to not overlap with that if set
*/
#define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
#else
/* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
#define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
#define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
!(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
#endif
#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
PP_DMA_INDEX_SHIFT)
/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
* OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
* the head page of compound page and bit 1 for pfmemalloc page, as well as the
* bits used for the DMA index. page_is_pfmemalloc() is checked in
* __page_pool_put_page() to avoid recycling the pfmemalloc page.
*/
#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
#ifdef CONFIG_PAGE_POOL
static inline bool page_pool_page_is_pp(const struct page *page)
{
return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
}
#else
static inline bool page_pool_page_is_pp(const struct page *page)
{
return false;
}
#endif
#define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
#define PAGE_SNAPSHOT_PG_IDLE (1 << 2)
struct page_snapshot {
struct folio folio_snapshot;
struct page page_snapshot;
unsigned long pfn;
unsigned long idx;
unsigned long flags;
};
static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
{
return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
}
void snapshot_page(struct page_snapshot *ps, const struct page *page);
#endif /* _LINUX_MM_H */
/*
* kernel/cpuset.c
*
* Processor and Memory placement constraints for sets of tasks.
*
* Copyright (C) 2003 BULL SA.
* Copyright (C) 2004-2007 Silicon Graphics, Inc.
* Copyright (C) 2006 Google, Inc
*
* Portions derived from Patrick Mochel's sysfs code.
* sysfs is Copyright (c) 2001-3 Patrick Mochel
*
* 2003-10-10 Written by Simon Derr.
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
* 2006 Rework by Paul Menage to use generic cgroups
* 2008 Rework of the scheduler domains and CPU hotplug handling
* by Max Krasnyansky
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include "cpuset-internal.h"
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mempolicy.h>
#include <linux/mm.h>
#include <linux/memory.h>
#include <linux/export.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/sched/deadline.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/security.h>
#include <linux/oom.h>
#include <linux/sched/isolation.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/task_work.h>
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
/*
* There could be abnormal cpuset configurations for cpu or memory
* node binding, add this key to provide a quick low-cost judgment
* of the situation.
*/
DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
static const char * const perr_strings[] = {
[PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
[PERR_INVPARENT] = "Parent is an invalid partition root",
[PERR_NOTPART] = "Parent is not a partition root",
[PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
[PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
[PERR_HOTPLUG] = "No cpu available due to hotplug",
[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
[PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
[PERR_ACCESS] = "Enable partition not permitted",
[PERR_REMOTE] = "Have remote partition underneath",
};
/*
* For local partitions, update to subpartitions_cpus & isolated_cpus is done
* in update_parent_effective_cpumask(). For remote partitions, it is done in
* the remote_partition_*() and remote_cpus_update() helpers.
*/
/*
* Exclusive CPUs distributed out to local or remote sub-partitions of
* top_cpuset
*/
static cpumask_var_t subpartitions_cpus;
/*
* Exclusive CPUs in isolated partitions
*/
static cpumask_var_t isolated_cpus;
/*
* Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
*/
static cpumask_var_t boot_hk_cpus;
static bool have_boot_isolcpus;
/* List of remote partition root children */
static struct list_head remote_children;
/*
* A flag to force sched domain rebuild at the end of an operation.
* It can be set in
* - update_partition_sd_lb()
* - update_cpumasks_hier()
* - cpuset_update_flag()
* - cpuset_hotplug_update_tasks()
* - cpuset_handle_hotplug()
*
* Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
*
* Note that update_relax_domain_level() in cpuset-v1.c can still call
* rebuild_sched_domains_locked() directly without using this flag.
*/
static bool force_sd_rebuild;
/*
* Partition root states:
*
* 0 - member (not a partition root)
* 1 - partition root
* 2 - partition root without load balancing (isolated)
* -1 - invalid partition root
* -2 - invalid isolated partition root
*
* There are 2 types of partitions - local or remote. Local partitions are
* those whose parents are partition root themselves. Setting of
* cpuset.cpus.exclusive are optional in setting up local partitions.
* Remote partitions are those whose parents are not partition roots. Passing
* down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
* nodes are mandatory in creating a remote partition.
*
* For simplicity, a local partition can be created under a local or remote
* partition but a remote partition cannot have any partition root in its
* ancestor chain except the cgroup root.
*/
#define PRS_MEMBER 0
#define PRS_ROOT 1
#define PRS_ISOLATED 2
#define PRS_INVALID_ROOT -1
#define PRS_INVALID_ISOLATED -2
/*
* Temporary cpumasks for working with partitions that are passed among
* functions to avoid memory allocation in inner functions.
*/
struct tmpmasks {
cpumask_var_t addmask, delmask; /* For partition root */
cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
};
void inc_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks++;
}
void dec_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks--;
}
static inline bool is_partition_valid(const struct cpuset *cs)
{
return cs->partition_root_state > 0;
}
static inline bool is_partition_invalid(const struct cpuset *cs)
{
return cs->partition_root_state < 0;
}
static inline bool cs_is_member(const struct cpuset *cs)
{
return cs->partition_root_state == PRS_MEMBER;
}
/*
* Callers should hold callback_lock to modify partition_root_state.
*/
static inline void make_partition_invalid(struct cpuset *cs)
{
if (cs->partition_root_state > 0)
cs->partition_root_state = -cs->partition_root_state;
}
/*
* Send notification event of whenever partition_root_state changes.
*/
static inline void notify_partition_change(struct cpuset *cs, int old_prs)
{
if (old_prs == cs->partition_root_state)
return;
cgroup_file_notify(&cs->partition_file);
/* Reset prs_err if not invalid */
if (is_partition_valid(cs))
WRITE_ONCE(cs->prs_err, PERR_NONE);
}
/*
* The top_cpuset is always synchronized to cpu_active_mask and we should avoid
* using cpu_online_mask as much as possible. An active CPU is always an online
* CPU, but not vice versa. cpu_active_mask and cpu_online_mask can differ
* during hotplug operations. A CPU is marked active at the last stage of CPU
* bringup (CPUHP_AP_ACTIVE). It is also the stage where cpuset hotplug code
* will be called to update the sched domains so that the scheduler can move
* a normal task to a newly active CPU or remove tasks away from a newly
* inactivated CPU. The online bit is set much earlier in the CPU bringup
* process and cleared much later in CPU teardown.
*
* If cpu_online_mask is used while a hotunplug operation is happening in
* parallel, we may leave an offline CPU in cpu_allowed or some other masks.
*/
static struct cpuset top_cpuset = {
.flags = BIT(CS_CPU_EXCLUSIVE) |
BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
.partition_root_state = PRS_ROOT,
.relax_domain_level = -1,
.remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
};
/*
* There are two global locks guarding cpuset structures - cpuset_mutex and
* callback_lock. The cpuset code uses only cpuset_mutex. Other kernel
* subsystems can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
* structures. Note that cpuset_mutex needs to be a mutex as it is used in
* paths that rely on priority inheritance (e.g. scheduler - on RT) for
* correctness.
*
* A task must hold both locks to modify cpusets. If a task holds
* cpuset_mutex, it blocks others, ensuring that it is the only task able to
* also acquire callback_lock and be able to modify cpusets. It can perform
* various checks on the cpuset structure first, knowing nothing will change.
* It can also allocate memory while just holding cpuset_mutex. While it is
* performing these checks, various callback routines can briefly acquire
* callback_lock to query cpusets. Once it is ready to make the changes, it
* takes callback_lock, blocking everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
* callback_lock, as that would risk double tripping on callback_lock
* from one of the callbacks into the cpuset code from within
* __alloc_pages().
*
* If a task is only holding callback_lock, then it has read-only
* access to cpusets.
*
* Now, the task_struct fields mems_allowed and mempolicy may be changed
* by other task, we use alloc_lock in the task_struct fields to protect
* them.
*
* The cpuset_common_seq_show() handlers only hold callback_lock across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*/
static DEFINE_MUTEX(cpuset_mutex);
/**
* cpuset_lock - Acquire the global cpuset mutex
*
* This locks the global cpuset mutex to prevent modifications to cpuset
* hierarchy and configurations. This helper is not enough to make modification.
*/
void cpuset_lock(void)
{
mutex_lock(&cpuset_mutex);
}
void cpuset_unlock(void)
{
mutex_unlock(&cpuset_mutex);
}
/**
* cpuset_full_lock - Acquire full protection for cpuset modification
*
* Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
* to safely modify cpuset data.
*/
void cpuset_full_lock(void)
{
cpus_read_lock();
mutex_lock(&cpuset_mutex);
}
void cpuset_full_unlock(void)
{
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
static DEFINE_SPINLOCK(callback_lock);
void cpuset_callback_lock_irq(void)
{
spin_lock_irq(&callback_lock);
}
void cpuset_callback_unlock_irq(void)
{
spin_unlock_irq(&callback_lock);
}
static struct workqueue_struct *cpuset_migrate_mm_wq;
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
static inline void check_insane_mems_config(nodemask_t *nodes)
{
if (!cpusets_insane_config() &&
movable_only_nodes(nodes)) {
static_branch_enable_cpuslocked(&cpusets_insane_config_key);
pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
"Cpuset allocations might fail even with a lot of memory available.\n",
nodemask_pr_args(nodes));
}
}
/*
* decrease cs->attach_in_progress.
* wake_up cpuset_attach_wq if cs->attach_in_progress==0.
*/
static inline void dec_attach_in_progress_locked(struct cpuset *cs)
{
lockdep_assert_held(&cpuset_mutex); cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq);
}
static inline void dec_attach_in_progress(struct cpuset *cs)
{
mutex_lock(&cpuset_mutex);
dec_attach_in_progress_locked(cs);
mutex_unlock(&cpuset_mutex);
}
static inline bool cpuset_v2(void)
{
return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
}
/*
* Cgroup v2 behavior is used on the "cpus" and "mems" control files when
* on default hierarchy or when the cpuset_v2_mode flag is set by mounting
* the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
* With v2 behavior, "cpus" and "mems" are always what the users have
* requested and won't be changed by hotplug events. Only the effective
* cpus or mems will be affected.
*/
static inline bool is_in_v2_mode(void)
{
return cpuset_v2() ||
(cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
}
/**
* partition_is_populated - check if partition has tasks
* @cs: partition root to be checked
* @excluded_child: a child cpuset to be excluded in task checking
* Return: true if there are tasks, false otherwise
*
* It is assumed that @cs is a valid partition root. @excluded_child should
* be non-NULL when this cpuset is going to become a partition itself.
*/
static inline bool partition_is_populated(struct cpuset *cs,
struct cpuset *excluded_child)
{
struct cgroup_subsys_state *css;
struct cpuset *child;
if (cs->css.cgroup->nr_populated_csets)
return true;
if (!excluded_child && !cs->nr_subparts)
return cgroup_is_populated(cs->css.cgroup);
rcu_read_lock();
cpuset_for_each_child(child, css, cs) {
if (child == excluded_child)
continue;
if (is_partition_valid(child))
continue;
if (cgroup_is_populated(child->css.cgroup)) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
/*
* Return in pmask the portion of a task's cpusets's cpus_allowed that
* are online and are capable of running the task. If none are found,
* walk up the cpuset hierarchy until we find one that does have some
* appropriate cpus.
*
* One way or another, we guarantee to return some non-empty subset
* of cpu_active_mask.
*
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_active_cpus(struct task_struct *tsk,
struct cpumask *pmask)
{
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
struct cpuset *cs;
if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
cpumask_copy(pmask, cpu_active_mask);
rcu_read_lock();
cs = task_cs(tsk);
while (!cpumask_intersects(cs->effective_cpus, pmask))
cs = parent_cs(cs);
cpumask_and(pmask, pmask, cs->effective_cpus);
rcu_read_unlock();
}
/*
* Return in *pmask the portion of a cpusets's mems_allowed that
* are online, with memory. If none are online with memory, walk
* up the cpuset hierarchy until we find one that does have some
* online mems. The top cpuset always has some mems online.
*
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) cs = parent_cs(cs); nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
}
/**
* alloc_cpumasks - Allocate an array of cpumask variables
* @pmasks: Pointer to array of cpumask_var_t pointers
* @size: Number of cpumasks to allocate
* Return: 0 if successful, -ENOMEM otherwise.
*
* Allocates @size cpumasks and initializes them to empty. Returns 0 on
* success, -ENOMEM on allocation failure. On failure, any previously
* allocated cpumasks are freed.
*/
static inline int alloc_cpumasks(cpumask_var_t *pmasks[], u32 size)
{
int i;
for (i = 0; i < size; i++) {
if (!zalloc_cpumask_var(pmasks[i], GFP_KERNEL)) {
while (--i >= 0)
free_cpumask_var(*pmasks[i]);
return -ENOMEM;
}
}
return 0;
}
/**
* alloc_tmpmasks - Allocate temporary cpumasks for cpuset operations.
* @tmp: Pointer to tmpmasks structure to populate
* Return: 0 on success, -ENOMEM on allocation failure
*/
static inline int alloc_tmpmasks(struct tmpmasks *tmp)
{
/*
* Array of pointers to the three cpumask_var_t fields in tmpmasks.
* Note: Array size must match actual number of masks (3)
*/
cpumask_var_t *pmask[3] = {
&tmp->new_cpus,
&tmp->addmask,
&tmp->delmask
};
return alloc_cpumasks(pmask, ARRAY_SIZE(pmask));
}
/**
* free_tmpmasks - free cpumasks in a tmpmasks structure
* @tmp: the tmpmasks structure pointer
*/
static inline void free_tmpmasks(struct tmpmasks *tmp)
{
if (!tmp)
return;
free_cpumask_var(tmp->new_cpus);
free_cpumask_var(tmp->addmask);
free_cpumask_var(tmp->delmask);
}
/**
* dup_or_alloc_cpuset - Duplicate or allocate a new cpuset
* @cs: Source cpuset to duplicate (NULL for a fresh allocation)
*
* Creates a new cpuset by either:
* 1. Duplicating an existing cpuset (if @cs is non-NULL), or
* 2. Allocating a fresh cpuset with zero-initialized masks (if @cs is NULL)
*
* Return: Pointer to newly allocated cpuset on success, NULL on failure
*/
static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs)
{
struct cpuset *trial;
/* Allocate base structure */
trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) :
kzalloc(sizeof(*cs), GFP_KERNEL);
if (!trial)
return NULL;
/* Setup cpumask pointer array */
cpumask_var_t *pmask[4] = {
&trial->cpus_allowed,
&trial->effective_cpus,
&trial->effective_xcpus,
&trial->exclusive_cpus
};
if (alloc_cpumasks(pmask, ARRAY_SIZE(pmask))) {
kfree(trial);
return NULL;
}
/* Copy masks if duplicating */
if (cs) {
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
cpumask_copy(trial->effective_cpus, cs->effective_cpus);
cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
}
return trial;
}
/**
* free_cpuset - free the cpuset
* @cs: the cpuset to be freed
*/
static inline void free_cpuset(struct cpuset *cs)
{
free_cpumask_var(cs->cpus_allowed);
free_cpumask_var(cs->effective_cpus);
free_cpumask_var(cs->effective_xcpus);
free_cpumask_var(cs->exclusive_cpus);
kfree(cs);
}
/* Return user specified exclusive CPUs */
static inline struct cpumask *user_xcpus(struct cpuset *cs)
{
return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
: cs->exclusive_cpus;
}
static inline bool xcpus_empty(struct cpuset *cs)
{
return cpumask_empty(cs->cpus_allowed) &&
cpumask_empty(cs->exclusive_cpus);
}
/*
* cpusets_are_exclusive() - check if two cpusets are exclusive
*
* Return true if exclusive, false if not
*/
static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
{
struct cpumask *xcpus1 = user_xcpus(cs1);
struct cpumask *xcpus2 = user_xcpus(cs2);
if (cpumask_intersects(xcpus1, xcpus2))
return false;
return true;
}
/**
* cpus_excl_conflict - Check if two cpusets have exclusive CPU conflicts
* @cs1: first cpuset to check
* @cs2: second cpuset to check
*
* Returns: true if CPU exclusivity conflict exists, false otherwise
*
* Conflict detection rules:
* 1. If either cpuset is CPU exclusive, they must be mutually exclusive
* 2. exclusive_cpus masks cannot intersect between cpusets
* 3. The allowed CPUs of one cpuset cannot be a subset of another's exclusive CPUs
*/
static inline bool cpus_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
{
/* If either cpuset is exclusive, check if they are mutually exclusive */
if (is_cpu_exclusive(cs1) || is_cpu_exclusive(cs2))
return !cpusets_are_exclusive(cs1, cs2);
/* Exclusive_cpus cannot intersect */
if (cpumask_intersects(cs1->exclusive_cpus, cs2->exclusive_cpus))
return true;
/* The cpus_allowed of one cpuset cannot be a subset of another cpuset's exclusive_cpus */
if (!cpumask_empty(cs1->cpus_allowed) &&
cpumask_subset(cs1->cpus_allowed, cs2->exclusive_cpus))
return true;
if (!cpumask_empty(cs2->cpus_allowed) &&
cpumask_subset(cs2->cpus_allowed, cs1->exclusive_cpus))
return true;
return false;
}
static inline bool mems_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
{
if ((is_mem_exclusive(cs1) || is_mem_exclusive(cs2)))
return nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
return false;
}
/*
* validate_change() - Used to validate that any proposed cpuset change
* follows the structural rules for cpusets.
*
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
* cpuset_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
* cpuset in the list must use cur below, not trial.
*
* 'trial' is the address of bulk structure copy of cur, with
* perhaps one or more of the fields cpus_allowed, mems_allowed,
* or flags changed to new, trial values.
*
* Return 0 if valid, -errno if not.
*/
static int validate_change(struct cpuset *cur, struct cpuset *trial)
{
struct cgroup_subsys_state *css;
struct cpuset *c, *par;
int ret = 0;
rcu_read_lock();
if (!is_in_v2_mode())
ret = cpuset1_validate_change(cur, trial);
if (ret)
goto out;
/* Remaining checks don't apply to root cpuset */
if (cur == &top_cpuset)
goto out;
par = parent_cs(cur);
/*
* Cpusets with tasks - existing or newly being attached - can't
* be changed to have empty cpus_allowed or mems_allowed.
*/
ret = -ENOSPC;
if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
if (!cpumask_empty(cur->cpus_allowed) &&
cpumask_empty(trial->cpus_allowed))
goto out;
if (!nodes_empty(cur->mems_allowed) &&
nodes_empty(trial->mems_allowed))
goto out;
}
/*
* We can't shrink if we won't have enough room for SCHED_DEADLINE
* tasks. This check is not done when scheduling is disabled as the
* users should know what they are doing.
*
* For v1, effective_cpus == cpus_allowed & user_xcpus() returns
* cpus_allowed.
*
* For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
* for non-isolated partition root. At this point, the target
* effective_cpus isn't computed yet. user_xcpus() is the best
* approximation.
*
* TBD: May need to precompute the real effective_cpus here in case
* incorrect scheduling of SCHED_DEADLINE tasks in a partition
* becomes an issue.
*/
ret = -EBUSY;
if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
!cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
goto out;
/*
* If either I or some sibling (!= me) is exclusive, we can't
* overlap. exclusive_cpus cannot overlap with each other if set.
*/
ret = -EINVAL;
cpuset_for_each_child(c, css, par) {
if (c == cur)
continue;
if (cpus_excl_conflict(trial, c))
goto out;
if (mems_excl_conflict(trial, c))
goto out;
}
ret = 0;
out:
rcu_read_unlock();
return ret;
}
#ifdef CONFIG_SMP
/*
* Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks?
*/
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
return cpumask_intersects(a->effective_cpus, b->effective_cpus);
}
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
if (dattr->relax_domain_level < c->relax_domain_level)
dattr->relax_domain_level = c->relax_domain_level;
return;
}
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
struct cpuset *root_cs)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
/* skip the whole subtree if @cp doesn't have any CPU */
if (cpumask_empty(cp->cpus_allowed)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
if (is_sched_load_balance(cp))
update_domain_attr(dattr, cp);
}
rcu_read_unlock();
}
/* Must be called with cpuset_mutex held. */
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
return static_key_count(&cpusets_enabled_key.key) + 1;
}
/*
* generate_sched_domains()
*
* This function builds a partial partition of the systems CPUs
* A 'partial partition' is a set of non-overlapping subsets whose
* union is a subset of that set.
* The output of this function needs to be passed to kernel/sched/core.c
* partition_sched_domains() routine, which will rebuild the scheduler's
* load balancing domains (sched domains) as specified by that partial
* partition.
*
* See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
* for a background explanation of this.
*
* Does not return errors, on the theory that the callers of this
* routine would rather not worry about failures to rebuild sched
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
* Must be called with cpuset_mutex held.
*
* The three key local variables below are:
* cp - cpuset pointer, used (together with pos_css) to perform a
* top-down scan of all cpusets. For our purposes, rebuilding
* the schedulers sched domains, we can ignore !is_sched_load_
* balance cpusets.
* csa - (for CpuSet Array) Array of pointers to all the cpusets
* that need to be load balanced, for convenient iterative
* access by the subsequent code that finds the best partition,
* i.e the set of domains (subsets) of CPUs such that the
* cpus_allowed of every cpuset marked is_sched_load_balance
* is a subset of one of these domains, while there are as
* many such domains as possible, each as small as possible.
* doms - Conversion of 'csa' to an array of cpumasks, for passing to
* the kernel/sched/core.c routine partition_sched_domains() in a
* convenient format, that can be easily compared to the prior
* value to determine what partition elements (sched domains)
* were changed (added or removed.)
*
* Finding the best partition (set of domains):
* The double nested loops below over i, j scan over the load
* balanced cpusets (using the array of cpuset pointers in csa[])
* looking for pairs of cpusets that have overlapping cpus_allowed
* and merging them using a union-find algorithm.
*
* The union of the cpus_allowed masks from the set of all cpusets
* having the same root then form the one element of the partition
* (one sched domain) to be passed to partition_sched_domains().
*
*/
static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes)
{
struct cpuset *cp; /* top-down scan of cpusets */
struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */
int i, j; /* indices for partition finding loops */
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] struct cpumask slot */
struct cgroup_subsys_state *pos_css;
bool root_load_balance = is_sched_load_balance(&top_cpuset);
bool cgrpv2 = cpuset_v2();
int nslot_update;
doms = NULL;
dattr = NULL;
csa = NULL;
/* Special case for the 99% of systems with one, full, sched domain */
if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
single_root_domain:
ndoms = 1;
doms = alloc_sched_domains(ndoms);
if (!doms)
goto done;
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
if (dattr) {
*dattr = SD_ATTR_INIT;
update_domain_attr_tree(dattr, &top_cpuset);
}
cpumask_and(doms[0], top_cpuset.effective_cpus,
housekeeping_cpumask(HK_TYPE_DOMAIN));
goto done;
}
csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
rcu_read_lock();
if (root_load_balance)
csa[csn++] = &top_cpuset;
cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
if (cp == &top_cpuset)
continue;
if (cgrpv2)
goto v2;
/*
* v1:
* Continue traversing beyond @cp iff @cp has some CPUs and
* isn't load balancing. The former is obvious. The
* latter: All child cpusets contain a subset of the
* parent's cpus, so just skip them, and then we call
* update_domain_attr_tree() to calc relax_domain_level of
* the corresponding sched domain.
*/
if (!cpumask_empty(cp->cpus_allowed) &&
!(is_sched_load_balance(cp) &&
cpumask_intersects(cp->cpus_allowed,
housekeeping_cpumask(HK_TYPE_DOMAIN))))
continue;
if (is_sched_load_balance(cp) &&
!cpumask_empty(cp->effective_cpus))
csa[csn++] = cp;
/* skip @cp's subtree */
pos_css = css_rightmost_descendant(pos_css);
continue;
v2:
/*
* Only valid partition roots that are not isolated and with
* non-empty effective_cpus will be saved into csn[].
*/
if ((cp->partition_root_state == PRS_ROOT) &&
!cpumask_empty(cp->effective_cpus))
csa[csn++] = cp;
/*
* Skip @cp's subtree if not a partition root and has no
* exclusive CPUs to be granted to child cpusets.
*/
if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
pos_css = css_rightmost_descendant(pos_css);
}
rcu_read_unlock();
/*
* If there are only isolated partitions underneath the cgroup root,
* we can optimize out unneeded sched domains scanning.
*/
if (root_load_balance && (csn == 1))
goto single_root_domain;
for (i = 0; i < csn; i++)
uf_node_init(&csa[i]->node);
/* Merge overlapping cpusets */
for (i = 0; i < csn; i++) {
for (j = i + 1; j < csn; j++) {
if (cpusets_overlap(csa[i], csa[j])) {
/*
* Cgroup v2 shouldn't pass down overlapping
* partition root cpusets.
*/
WARN_ON_ONCE(cgrpv2);
uf_union(&csa[i]->node, &csa[j]->node);
}
}
}
/* Count the total number of domains */
for (i = 0; i < csn; i++) {
if (uf_find(&csa[i]->node) == &csa[i]->node)
ndoms++;
}
/*
* Now we know how many domains to create.
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
*/
doms = alloc_sched_domains(ndoms);
if (!doms)
goto done;
/*
* The rest of the code, including the scheduler, can deal with
* dattr==NULL case. No need to abort if alloc fails.
*/
dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
GFP_KERNEL);
/*
* Cgroup v2 doesn't support domain attributes, just set all of them
* to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
* subset of HK_TYPE_DOMAIN housekeeping CPUs.
*/
if (cgrpv2) {
for (i = 0; i < ndoms; i++) {
/*
* The top cpuset may contain some boot time isolated
* CPUs that need to be excluded from the sched domain.
*/
if (csa[i] == &top_cpuset)
cpumask_and(doms[i], csa[i]->effective_cpus,
housekeeping_cpumask(HK_TYPE_DOMAIN));
else
cpumask_copy(doms[i], csa[i]->effective_cpus);
if (dattr)
dattr[i] = SD_ATTR_INIT;
}
goto done;
}
for (nslot = 0, i = 0; i < csn; i++) {
nslot_update = 0;
for (j = i; j < csn; j++) {
if (uf_find(&csa[j]->node) == &csa[i]->node) {
struct cpumask *dp = doms[nslot];
if (i == j) {
nslot_update = 1;
cpumask_clear(dp);
if (dattr)
*(dattr + nslot) = SD_ATTR_INIT;
}
cpumask_or(dp, dp, csa[j]->effective_cpus);
cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
if (dattr)
update_domain_attr_tree(dattr + nslot, csa[j]);
}
}
if (nslot_update)
nslot++;
}
BUG_ON(nslot != ndoms);
done:
kfree(csa);
/*
* Fallback to the default domain if kmalloc() failed.
* See comments in partition_sched_domains().
*/
if (doms == NULL)
ndoms = 1;
*domains = doms;
*attributes = dattr;
return ndoms;
}
static void dl_update_tasks_root_domain(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
if (cs->nr_deadline_tasks == 0)
return;
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it)))
dl_add_task_root_domain(task);
css_task_iter_end(&it);
}
void dl_rebuild_rd_accounting(void)
{
struct cpuset *cs = NULL;
struct cgroup_subsys_state *pos_css;
int cpu;
u64 cookie = ++dl_cookie;
lockdep_assert_held(&cpuset_mutex);
lockdep_assert_cpus_held();
lockdep_assert_held(&sched_domains_mutex);
rcu_read_lock();
for_each_possible_cpu(cpu) {
if (dl_bw_visited(cpu, cookie))
continue;
dl_clear_root_domain_cpu(cpu);
}
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
if (cpumask_empty(cs->effective_cpus)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
css_get(&cs->css);
rcu_read_unlock();
dl_update_tasks_root_domain(cs);
rcu_read_lock();
css_put(&cs->css);
}
rcu_read_unlock();
}
/*
* Rebuild scheduler domains.
*
* If the flag 'sched_load_balance' of any cpuset with non-empty
* 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
* which has that flag enabled, or if any cpuset with a non-empty
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
* Call with cpuset_mutex held. Takes cpus_read_lock().
*/
void rebuild_sched_domains_locked(void)
{
struct cgroup_subsys_state *pos_css;
struct sched_domain_attr *attr;
cpumask_var_t *doms;
struct cpuset *cs;
int ndoms;
lockdep_assert_cpus_held();
lockdep_assert_held(&cpuset_mutex);
force_sd_rebuild = false;
/*
* If we have raced with CPU hotplug, return early to avoid
* passing doms with offlined cpu to partition_sched_domains().
* Anyways, cpuset_handle_hotplug() will rebuild sched domains.
*
* With no CPUs in any subpartitions, top_cpuset's effective CPUs
* should be the same as the active CPUs, so checking only top_cpuset
* is enough to detect racing CPU offlines.
*/
if (cpumask_empty(subpartitions_cpus) &&
!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
return;
/*
* With subpartition CPUs, however, the effective CPUs of a partition
* root should be only a subset of the active CPUs. Since a CPU in any
* partition root could be offlined, all must be checked.
*/
if (!cpumask_empty(subpartitions_cpus)) {
rcu_read_lock();
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
if (!is_partition_valid(cs)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
if (!cpumask_subset(cs->effective_cpus,
cpu_active_mask)) {
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
}
/* Generate domain masks and attrs */
ndoms = generate_sched_domains(&doms, &attr);
/* Have scheduler rebuild the domains */
partition_sched_domains(ndoms, doms, attr);
}
#else /* !CONFIG_SMP */
void rebuild_sched_domains_locked(void)
{
}
#endif /* CONFIG_SMP */
static void rebuild_sched_domains_cpuslocked(void)
{
mutex_lock(&cpuset_mutex);
rebuild_sched_domains_locked();
mutex_unlock(&cpuset_mutex);
}
void rebuild_sched_domains(void)
{
cpus_read_lock();
rebuild_sched_domains_cpuslocked();
cpus_read_unlock();
}
void cpuset_reset_sched_domains(void)
{
mutex_lock(&cpuset_mutex);
partition_sched_domains(1, NULL, NULL);
mutex_unlock(&cpuset_mutex);
}
/**
* cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
* @new_cpus: the temp variable for the new effective_cpus mask
*
* Iterate through each task of @cs updating its cpus_allowed to the
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*
* For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
* to make sure all offline CPUs are also included as hotplug code won't
* update cpumasks for tasks in top_cpuset.
*
* As task_cpu_possible_mask() can be task dependent in arm64, we have to
* do cpu masking per task instead of doing it once for all.
*/
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
{
struct css_task_iter it;
struct task_struct *task;
bool top_cs = cs == &top_cpuset;
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it))) {
const struct cpumask *possible_mask = task_cpu_possible_mask(task);
if (top_cs) {
/*
* PF_NO_SETAFFINITY tasks are ignored.
* All per cpu kthreads should have PF_NO_SETAFFINITY
* flag set, see kthread_set_per_cpu().
*/
if (task->flags & PF_NO_SETAFFINITY)
continue;
cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
} else {
cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
}
set_cpus_allowed_ptr(task, new_cpus);
}
css_task_iter_end(&it);
}
/**
* compute_effective_cpumask - Compute the effective cpumask of the cpuset
* @new_cpus: the temp variable for the new effective_cpus mask
* @cs: the cpuset the need to recompute the new effective_cpus mask
* @parent: the parent cpuset
*
* The result is valid only if the given cpuset isn't a partition root.
*/
static void compute_effective_cpumask(struct cpumask *new_cpus,
struct cpuset *cs, struct cpuset *parent)
{
cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
}
/*
* Commands for update_parent_effective_cpumask
*/
enum partition_cmd {
partcmd_enable, /* Enable partition root */
partcmd_enablei, /* Enable isolated partition root */
partcmd_disable, /* Disable partition root */
partcmd_update, /* Update parent's effective_cpus */
partcmd_invalidate, /* Make partition invalid */
};
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct tmpmasks *tmp);
/*
* Update partition exclusive flag
*
* Return: 0 if successful, an error code otherwise
*/
static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
{
bool exclusive = (new_prs > PRS_MEMBER);
if (exclusive && !is_cpu_exclusive(cs)) {
if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
return PERR_NOTEXCL;
} else if (!exclusive && is_cpu_exclusive(cs)) {
/* Turning off CS_CPU_EXCLUSIVE will not return error */
cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
}
return 0;
}
/*
* Update partition load balance flag and/or rebuild sched domain
*
* Changing load balance flag will automatically call
* rebuild_sched_domains_locked().
* This function is for cgroup v2 only.
*/
static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
{
int new_prs = cs->partition_root_state;
bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
bool new_lb;
/*
* If cs is not a valid partition root, the load balance state
* will follow its parent.
*/
if (new_prs > 0) {
new_lb = (new_prs != PRS_ISOLATED);
} else {
new_lb = is_sched_load_balance(parent_cs(cs));
}
if (new_lb != !!is_sched_load_balance(cs)) {
rebuild_domains = true;
if (new_lb)
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
else
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}
if (rebuild_domains)
cpuset_force_rebuild();
}
/*
* tasks_nocpu_error - Return true if tasks will have no effective_cpus
*/
static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
struct cpumask *xcpus)
{
/*
* A populated partition (cs or parent) can't have empty effective_cpus
*/
return (cpumask_subset(parent->effective_cpus, xcpus) &&
partition_is_populated(parent, cs)) ||
(!cpumask_intersects(xcpus, cpu_active_mask) &&
partition_is_populated(cs, NULL));
}
static void reset_partition_data(struct cpuset *cs)
{
struct cpuset *parent = parent_cs(cs);
if (!cpuset_v2())
return;
lockdep_assert_held(&callback_lock);
cs->nr_subparts = 0;
if (cpumask_empty(cs->exclusive_cpus)) {
cpumask_clear(cs->effective_xcpus);
if (is_cpu_exclusive(cs))
clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
}
if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
}
/*
* isolated_cpus_update - Update the isolated_cpus mask
* @old_prs: old partition_root_state
* @new_prs: new partition_root_state
* @xcpus: exclusive CPUs with state change
*/
static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
{
WARN_ON_ONCE(old_prs == new_prs);
if (new_prs == PRS_ISOLATED)
cpumask_or(isolated_cpus, isolated_cpus, xcpus);
else
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
}
/*
* partition_xcpus_add - Add new exclusive CPUs to partition
* @new_prs: new partition_root_state
* @parent: parent cpuset
* @xcpus: exclusive CPUs to be added
* Return: true if isolated_cpus modified, false otherwise
*
* Remote partition if parent == NULL
*/
static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
struct cpumask *xcpus)
{
bool isolcpus_updated;
WARN_ON_ONCE(new_prs < 0);
lockdep_assert_held(&callback_lock);
if (!parent)
parent = &top_cpuset;
if (parent == &top_cpuset)
cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
isolcpus_updated = (new_prs != parent->partition_root_state);
if (isolcpus_updated)
isolated_cpus_update(parent->partition_root_state, new_prs,
xcpus);
cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
return isolcpus_updated;
}
/*
* partition_xcpus_del - Remove exclusive CPUs from partition
* @old_prs: old partition_root_state
* @parent: parent cpuset
* @xcpus: exclusive CPUs to be removed
* Return: true if isolated_cpus modified, false otherwise
*
* Remote partition if parent == NULL
*/
static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
struct cpumask *xcpus)
{
bool isolcpus_updated;
WARN_ON_ONCE(old_prs < 0);
lockdep_assert_held(&callback_lock);
if (!parent)
parent = &top_cpuset;
if (parent == &top_cpuset)
cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
isolcpus_updated = (old_prs != parent->partition_root_state);
if (isolcpus_updated)
isolated_cpus_update(old_prs, parent->partition_root_state,
xcpus);
cpumask_and(xcpus, xcpus, cpu_active_mask);
cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
return isolcpus_updated;
}
static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
{
int ret;
lockdep_assert_cpus_held();
if (!isolcpus_updated)
return;
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
}
/**
* cpuset_cpu_is_isolated - Check if the given CPU is isolated
* @cpu: the CPU number to be checked
* Return: true if CPU is used in an isolated partition, false otherwise
*/
bool cpuset_cpu_is_isolated(int cpu)
{
return cpumask_test_cpu(cpu, isolated_cpus);
}
EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
/**
* rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
* @parent: Parent cpuset containing all siblings
* @cs: Current cpuset (will be skipped)
* @excpus: exclusive effective CPU mask to modify
*
* This function ensures the given @excpus mask doesn't include any CPUs that
* are exclusively allocated to sibling cpusets. It walks through all siblings
* of @cs under @parent and removes their exclusive CPUs from @excpus.
*/
static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs,
struct cpumask *excpus)
{
struct cgroup_subsys_state *css;
struct cpuset *sibling;
int retval = 0;
if (cpumask_empty(excpus))
return retval;
/*
* Exclude exclusive CPUs from siblings
*/
rcu_read_lock();
cpuset_for_each_child(sibling, css, parent) {
if (sibling == cs)
continue;
if (cpumask_intersects(excpus, sibling->exclusive_cpus)) {
cpumask_andnot(excpus, excpus, sibling->exclusive_cpus);
retval++;
continue;
}
if (cpumask_intersects(excpus, sibling->effective_xcpus)) {
cpumask_andnot(excpus, excpus, sibling->effective_xcpus);
retval++;
}
}
rcu_read_unlock();
return retval;
}
/*
* compute_excpus - compute effective exclusive CPUs
* @cs: cpuset
* @xcpus: effective exclusive CPUs value to be set
* Return: 0 if there is no sibling conflict, > 0 otherwise
*
* If exclusive_cpus isn't explicitly set , we have to scan the sibling cpusets
* and exclude their exclusive_cpus or effective_xcpus as well.
*/
static int compute_excpus(struct cpuset *cs, struct cpumask *excpus)
{
struct cpuset *parent = parent_cs(cs);
cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus);
if (!cpumask_empty(cs->exclusive_cpus))
return 0;
return rm_siblings_excl_cpus(parent, cs, excpus);
}
/*
* compute_trialcs_excpus - Compute effective exclusive CPUs for a trial cpuset
* @trialcs: The trial cpuset containing the proposed new configuration
* @cs: The original cpuset that the trial configuration is based on
* Return: 0 if successful with no sibling conflict, >0 if a conflict is found
*
* Computes the effective_xcpus for a trial configuration. @cs is provided to represent
* the real cs.
*/
static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
{
struct cpuset *parent = parent_cs(trialcs);
struct cpumask *excpus = trialcs->effective_xcpus;
/* trialcs is member, cpuset.cpus has no impact to excpus */
if (cs_is_member(cs))
cpumask_and(excpus, trialcs->exclusive_cpus,
parent->effective_xcpus);
else
cpumask_and(excpus, user_xcpus(trialcs), parent->effective_xcpus);
return rm_siblings_excl_cpus(parent, cs, excpus);
}
static inline bool is_remote_partition(struct cpuset *cs)
{
return !list_empty(&cs->remote_sibling);
}
static inline bool is_local_partition(struct cpuset *cs)
{
return is_partition_valid(cs) && !is_remote_partition(cs);
}
/*
* remote_partition_enable - Enable current cpuset as a remote partition root
* @cs: the cpuset to update
* @new_prs: new partition_root_state
* @tmp: temporary masks
* Return: 0 if successful, errcode if error
*
* Enable the current cpuset to become a remote partition root taking CPUs
* directly from the top cpuset. cpuset_mutex must be held by the caller.
*/
static int remote_partition_enable(struct cpuset *cs, int new_prs,
struct tmpmasks *tmp)
{
bool isolcpus_updated;
/*
* The user must have sysadmin privilege.
*/
if (!capable(CAP_SYS_ADMIN))
return PERR_ACCESS;
/*
* The requested exclusive_cpus must not be allocated to other
* partitions and it can't use up all the root's effective_cpus.
*
* The effective_xcpus mask can contain offline CPUs, but there must
* be at least one or more online CPUs present before it can be enabled.
*
* Note that creating a remote partition with any local partition root
* above it or remote partition root underneath it is not allowed.
*/
compute_excpus(cs, tmp->new_cpus);
WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus));
if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
return PERR_INVCPUS;
spin_lock_irq(&callback_lock);
isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
list_add(&cs->remote_sibling, &remote_children);
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
cpuset_force_rebuild();
cs->prs_err = 0;
/*
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
*/
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
return 0;
}
/*
* remote_partition_disable - Remove current cpuset from remote partition list
* @cs: the cpuset to update
* @tmp: temporary masks
*
* The effective_cpus is also updated.
*
* cpuset_mutex must be held by the caller.
*/
static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
{
bool isolcpus_updated;
WARN_ON_ONCE(!is_remote_partition(cs));
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
spin_lock_irq(&callback_lock);
list_del_init(&cs->remote_sibling);
isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
NULL, cs->effective_xcpus);
if (cs->prs_err)
cs->partition_root_state = -cs->partition_root_state;
else
cs->partition_root_state = PRS_MEMBER;
/* effective_xcpus may need to be changed */
compute_excpus(cs, cs->effective_xcpus);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
cpuset_force_rebuild();
/*
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
*/
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
}
/*
* remote_cpus_update - cpus_exclusive change of remote partition
* @cs: the cpuset to be updated
* @xcpus: the new exclusive_cpus mask, if non-NULL
* @excpus: the new effective_xcpus mask
* @tmp: temporary masks
*
* top_cpuset and subpartitions_cpus will be updated or partition can be
* invalidated.
*/
static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
struct cpumask *excpus, struct tmpmasks *tmp)
{
bool adding, deleting;
int prs = cs->partition_root_state;
int isolcpus_updated = 0;
if (WARN_ON_ONCE(!is_remote_partition(cs)))
return;
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
if (cpumask_empty(excpus)) {
cs->prs_err = PERR_CPUSEMPTY;
goto invalidate;
}
adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
/*
* Additions of remote CPUs is only allowed if those CPUs are
* not allocated to other partitions and there are effective_cpus
* left in the top cpuset.
*/
if (adding) {
WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus));
if (!capable(CAP_SYS_ADMIN))
cs->prs_err = PERR_ACCESS;
else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
cs->prs_err = PERR_NOCPUS;
if (cs->prs_err)
goto invalidate;
}
spin_lock_irq(&callback_lock);
if (adding)
isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
if (deleting)
isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
/*
* Need to update effective_xcpus and exclusive_cpus now as
* update_sibling_cpumasks() below may iterate back to the same cs.
*/
cpumask_copy(cs->effective_xcpus, excpus);
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
if (adding || deleting)
cpuset_force_rebuild();
/*
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
*/
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
return;
invalidate:
remote_partition_disable(cs, tmp);
}
/*
* prstate_housekeeping_conflict - check for partition & housekeeping conflicts
* @prstate: partition root state to be checked
* @new_cpus: cpu mask
* Return: true if there is conflict, false otherwise
*
* CPUs outside of boot_hk_cpus, if defined, can only be used in an
* isolated partition.
*/
static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
{
if (!have_boot_isolcpus)
return false;
if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
return true;
return false;
}
/**
* update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
* @cs: The cpuset that requests change in partition root state
* @cmd: Partition root state change command
* @newmask: Optional new cpumask for partcmd_update
* @tmp: Temporary addmask and delmask
* Return: 0 or a partition root state error code
*
* For partcmd_enable*, the cpuset is being transformed from a non-partition
* root to a partition root. The effective_xcpus (cpus_allowed if
* effective_xcpus not set) mask of the given cpuset will be taken away from
* parent's effective_cpus. The function will return 0 if all the CPUs listed
* in effective_xcpus can be granted or an error code will be returned.
*
* For partcmd_disable, the cpuset is being transformed from a partition
* root back to a non-partition root. Any CPUs in effective_xcpus will be
* given back to parent's effective_cpus. 0 will always be returned.
*
* For partcmd_update, if the optional newmask is specified, the cpu list is
* to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
* assumed to remain the same. The cpuset should either be a valid or invalid
* partition root. The partition root state may change from valid to invalid
* or vice versa. An error code will be returned if transitioning from
* invalid to valid violates the exclusivity rule.
*
* For partcmd_invalidate, the current partition will be made invalid.
*
* The partcmd_enable* and partcmd_disable commands are used by
* update_prstate(). An error code may be returned and the caller will check
* for error.
*
* The partcmd_update command is used by update_cpumasks_hier() with newmask
* NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
* by update_cpumask() with NULL newmask. In both cases, the callers won't
* check for error and so partition_root_state and prs_err will be updated
* directly.
*/
static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
struct cpumask *newmask,
struct tmpmasks *tmp)
{
struct cpuset *parent = parent_cs(cs);
int adding; /* Adding cpus to parent's effective_cpus */
int deleting; /* Deleting cpus from parent's effective_cpus */
int old_prs, new_prs;
int part_error = PERR_NONE; /* Partition error? */
int subparts_delta = 0;
int isolcpus_updated = 0;
struct cpumask *xcpus = user_xcpus(cs);
bool nocpu;
lockdep_assert_held(&cpuset_mutex);
WARN_ON_ONCE(is_remote_partition(cs)); /* For local partition only */
/*
* new_prs will only be changed for the partcmd_update and
* partcmd_invalidate commands.
*/
adding = deleting = false;
old_prs = new_prs = cs->partition_root_state;
if (cmd == partcmd_invalidate) {
if (is_partition_invalid(cs))
return 0;
/*
* Make the current partition invalid.
*/
if (is_partition_valid(parent))
adding = cpumask_and(tmp->addmask,
xcpus, parent->effective_xcpus);
if (old_prs > 0) {
new_prs = -old_prs;
subparts_delta--;
}
goto write_error;
}
/*
* The parent must be a partition root.
* The new cpumask, if present, or the current cpus_allowed must
* not be empty.
*/
if (!is_partition_valid(parent)) {
return is_partition_invalid(parent)
? PERR_INVPARENT : PERR_NOTPART;
}
if (!newmask && xcpus_empty(cs))
return PERR_CPUSEMPTY;
nocpu = tasks_nocpu_error(parent, cs, xcpus);
if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
/*
* Need to call compute_excpus() in case
* exclusive_cpus not set. Sibling conflict should only happen
* if exclusive_cpus isn't set.
*/
xcpus = tmp->delmask;
if (compute_excpus(cs, xcpus))
WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
/*
* Enabling partition root is not allowed if its
* effective_xcpus is empty.
*/
if (cpumask_empty(xcpus))
return PERR_INVCPUS;
if (prstate_housekeeping_conflict(new_prs, xcpus))
return PERR_HKEEPING;
if (tasks_nocpu_error(parent, cs, xcpus))
return PERR_NOCPUS;
/*
* This function will only be called when all the preliminary
* checks have passed. At this point, the following condition
* should hold.
*
* (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
*
* Warn if it is not the case.
*/
cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask);
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
deleting = true;
subparts_delta++;
} else if (cmd == partcmd_disable) {
/*
* May need to add cpus back to parent's effective_cpus
* (and maybe removed from subpartitions_cpus/isolated_cpus)
* for valid partition root. xcpus may contain CPUs that
* shouldn't be removed from the two global cpumasks.
*/
if (is_partition_valid(cs)) {
cpumask_copy(tmp->addmask, cs->effective_xcpus);
adding = true;
subparts_delta--;
}
new_prs = PRS_MEMBER;
} else if (newmask) {
/*
* Empty cpumask is not allowed
*/
if (cpumask_empty(newmask)) {
part_error = PERR_CPUSEMPTY;
goto write_error;
}
/* Check newmask again, whether cpus are available for parent/cs */
nocpu |= tasks_nocpu_error(parent, cs, newmask);
/*
* partcmd_update with newmask:
*
* Compute add/delete mask to/from effective_cpus
*
* For valid partition:
* addmask = exclusive_cpus & ~newmask
* & parent->effective_xcpus
* delmask = newmask & ~exclusive_cpus
* & parent->effective_xcpus
*
* For invalid partition:
* delmask = newmask & parent->effective_xcpus
*/
if (is_partition_invalid(cs)) {
adding = false;
deleting = cpumask_and(tmp->delmask,
newmask, parent->effective_xcpus);
} else {
cpumask_andnot(tmp->addmask, xcpus, newmask);
adding = cpumask_and(tmp->addmask, tmp->addmask,
parent->effective_xcpus);
cpumask_andnot(tmp->delmask, newmask, xcpus);
deleting = cpumask_and(tmp->delmask, tmp->delmask,
parent->effective_xcpus);
}
/*
* The new CPUs to be removed from parent's effective CPUs
* must be present.
*/
if (deleting) {
cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask);
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
}
/*
* Make partition invalid if parent's effective_cpus could
* become empty and there are tasks in the parent.
*/
if (nocpu && (!adding ||
!cpumask_intersects(tmp->addmask, cpu_active_mask))) {
part_error = PERR_NOCPUS;
deleting = false;
adding = cpumask_and(tmp->addmask,
xcpus, parent->effective_xcpus);
}
} else {
/*
* partcmd_update w/o newmask
*
* delmask = effective_xcpus & parent->effective_cpus
*
* This can be called from:
* 1) update_cpumasks_hier()
* 2) cpuset_hotplug_update_tasks()
*
* Check to see if it can be transitioned from valid to
* invalid partition or vice versa.
*
* A partition error happens when parent has tasks and all
* its effective CPUs will have to be distributed out.
*/
if (nocpu) {
part_error = PERR_NOCPUS;
if (is_partition_valid(cs))
adding = cpumask_and(tmp->addmask,
xcpus, parent->effective_xcpus);
} else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
cpumask_subset(xcpus, parent->effective_xcpus)) {
struct cgroup_subsys_state *css;
struct cpuset *child;
bool exclusive = true;
/*
* Convert invalid partition to valid has to
* pass the cpu exclusivity test.
*/
rcu_read_lock();
cpuset_for_each_child(child, css, parent) {
if (child == cs)
continue;
if (!cpusets_are_exclusive(cs, child)) {
exclusive = false;
break;
}
}
rcu_read_unlock();
if (exclusive)
deleting = cpumask_and(tmp->delmask,
xcpus, parent->effective_cpus);
else
part_error = PERR_NOTEXCL;
}
}
write_error:
if (part_error)
WRITE_ONCE(cs->prs_err, part_error);
if (cmd == partcmd_update) {
/*
* Check for possible transition between valid and invalid
* partition root.
*/
switch (cs->partition_root_state) {
case PRS_ROOT:
case PRS_ISOLATED:
if (part_error) {
new_prs = -old_prs;
subparts_delta--;
}
break;
case PRS_INVALID_ROOT:
case PRS_INVALID_ISOLATED:
if (!part_error) {
new_prs = -old_prs;
subparts_delta++;
}
break;
}
}
if (!adding && !deleting && (new_prs == old_prs))
return 0;
/*
* Transitioning between invalid to valid or vice versa may require
* changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
* validate_change() has already been successfully called and
* CPU lists in cs haven't been updated yet. So defer it to later.
*/
if ((old_prs != new_prs) && (cmd != partcmd_update)) {
int err = update_partition_exclusive_flag(cs, new_prs);
if (err)
return err;
}
/*
* Change the parent's effective_cpus & effective_xcpus (top cpuset
* only).
*
* Newly added CPUs will be removed from effective_cpus and
* newly deleted ones will be added back to effective_cpus.
*/
spin_lock_irq(&callback_lock);
if (old_prs != new_prs) {
cs->partition_root_state = new_prs;
if (new_prs <= 0)
cs->nr_subparts = 0;
}
/*
* Adding to parent's effective_cpus means deletion CPUs from cs
* and vice versa.
*/
if (adding)
isolcpus_updated += partition_xcpus_del(old_prs, parent,
tmp->addmask);
if (deleting)
isolcpus_updated += partition_xcpus_add(new_prs, parent,
tmp->delmask);
if (is_partition_valid(parent)) {
parent->nr_subparts += subparts_delta;
WARN_ON_ONCE(parent->nr_subparts < 0);
}
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
if (adding || deleting) {
cpuset_update_tasks_cpumask(parent, tmp->addmask);
update_sibling_cpumasks(parent, cs, tmp);
}
/*
* For partcmd_update without newmask, it is being called from
* cpuset_handle_hotplug(). Update the load balance flag and
* scheduling domain accordingly.
*/
if ((cmd == partcmd_update) && !newmask)
update_partition_sd_lb(cs, old_prs);
notify_partition_change(cs, old_prs);
return 0;
}
/**
* compute_partition_effective_cpumask - compute effective_cpus for partition
* @cs: partition root cpuset
* @new_ecpus: previously computed effective_cpus to be updated
*
* Compute the effective_cpus of a partition root by scanning effective_xcpus
* of child partition roots and excluding their effective_xcpus.
*
* This has the side effect of invalidating valid child partition roots,
* if necessary. Since it is called from either cpuset_hotplug_update_tasks()
* or update_cpumasks_hier() where parent and children are modified
* successively, we don't need to call update_parent_effective_cpumask()
* and the child's effective_cpus will be updated in later iterations.
*
* Note that rcu_read_lock() is assumed to be held.
*/
static void compute_partition_effective_cpumask(struct cpuset *cs,
struct cpumask *new_ecpus)
{
struct cgroup_subsys_state *css;
struct cpuset *child;
bool populated = partition_is_populated(cs, NULL);
/*
* Check child partition roots to see if they should be
* invalidated when
* 1) child effective_xcpus not a subset of new
* excluisve_cpus
* 2) All the effective_cpus will be used up and cp
* has tasks
*/
compute_excpus(cs, new_ecpus);
cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
rcu_read_lock();
cpuset_for_each_child(child, css, cs) {
if (!is_partition_valid(child))
continue;
/*
* There shouldn't be a remote partition underneath another
* partition root.
*/
WARN_ON_ONCE(is_remote_partition(child));
child->prs_err = 0;
if (!cpumask_subset(child->effective_xcpus,
cs->effective_xcpus))
child->prs_err = PERR_INVCPUS;
else if (populated &&
cpumask_subset(new_ecpus, child->effective_xcpus))
child->prs_err = PERR_NOCPUS;
if (child->prs_err) {
int old_prs = child->partition_root_state;
/*
* Invalidate child partition
*/
spin_lock_irq(&callback_lock);
make_partition_invalid(child);
cs->nr_subparts--;
child->nr_subparts = 0;
spin_unlock_irq(&callback_lock);
notify_partition_change(child, old_prs);
continue;
}
cpumask_andnot(new_ecpus, new_ecpus,
child->effective_xcpus);
}
rcu_read_unlock();
}
/*
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
* @cs: the cpuset to consider
* @tmp: temp variables for calculating effective_cpus & partition setup
* @force: don't skip any descendant cpusets if set
*
* When configured cpumask is changed, the effective cpumasks of this cpuset
* and all its descendants need to be updated.
*
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
*
* Called with cpuset_mutex held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
bool force)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
bool need_rebuild_sched_domains = false;
int old_prs, new_prs;
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
struct cpuset *parent = parent_cs(cp);
bool remote = is_remote_partition(cp);
bool update_parent = false;
old_prs = new_prs = cp->partition_root_state;
/*
* For child remote partition root (!= cs), we need to call
* remote_cpus_update() if effective_xcpus will be changed.
* Otherwise, we can skip the whole subtree.
*
* remote_cpus_update() will reuse tmp->new_cpus only after
* its value is being processed.
*/
if (remote && (cp != cs)) {
compute_excpus(cp, tmp->new_cpus);
if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
rcu_read_unlock();
remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
rcu_read_lock();
/* Remote partition may be invalidated */
new_prs = cp->partition_root_state;
remote = (new_prs == old_prs);
}
if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
compute_partition_effective_cpumask(cp, tmp->new_cpus);
else
compute_effective_cpumask(tmp->new_cpus, cp, parent);
if (remote)
goto get_css; /* Ready to update cpuset data */
/*
* A partition with no effective_cpus is allowed as long as
* there is no task associated with it. Call
* update_parent_effective_cpumask() to check it.
*/
if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
update_parent = true;
goto update_parent_effective;
}
/*
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some CPUs unless
* it is a partition root that has explicitly distributed
* out all its CPUs.
*/
if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
cpumask_copy(tmp->new_cpus, parent->effective_cpus);
/*
* Skip the whole subtree if
* 1) the cpumask remains the same,
* 2) has no partition root state,
* 3) force flag not set, and
* 4) for v2 load balance state same as its parent.
*/
if (!cp->partition_root_state && !force &&
cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
(!cpuset_v2() ||
(is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
update_parent_effective:
/*
* update_parent_effective_cpumask() should have been called
* for cs already in update_cpumask(). We should also call
* cpuset_update_tasks_cpumask() again for tasks in the parent
* cpuset if the parent's effective_cpus changes.
*/
if ((cp != cs) && old_prs) {
switch (parent->partition_root_state) {
case PRS_ROOT:
case PRS_ISOLATED:
update_parent = true;
break;
default:
/*
* When parent is not a partition root or is
* invalid, child partition roots become
* invalid too.
*/
if (is_partition_valid(cp))
new_prs = -cp->partition_root_state;
WRITE_ONCE(cp->prs_err,
is_partition_invalid(parent)
? PERR_INVPARENT : PERR_NOTPART);
break;
}
}
get_css:
if (!css_tryget_online(&cp->css))
continue;
rcu_read_unlock();
if (update_parent) {
update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
/*
* The cpuset partition_root_state may become
* invalid. Capture it.
*/
new_prs = cp->partition_root_state;
}
spin_lock_irq(&callback_lock);
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
cp->partition_root_state = new_prs;
if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs))
compute_excpus(cp, cp->effective_xcpus);
/*
* Make sure effective_xcpus is properly set for a valid
* partition root.
*/
if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
cpumask_and(cp->effective_xcpus,
cp->cpus_allowed, parent->effective_xcpus);
else if (new_prs < 0)
reset_partition_data(cp);
spin_unlock_irq(&callback_lock);
notify_partition_change(cp, old_prs);
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
/*
* On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
* from parent if current cpuset isn't a valid partition root
* and their load balance states differ.
*/
if (cpuset_v2() && !is_partition_valid(cp) &&
(is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
if (is_sched_load_balance(parent))
set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
else
clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
}
/*
* On legacy hierarchy, if the effective cpumask of any non-
* empty cpuset is changed, we need to rebuild sched domains.
* On default hierarchy, the cpuset needs to be a partition
* root as well.
*/
if (!cpumask_empty(cp->cpus_allowed) &&
is_sched_load_balance(cp) &&
(!cpuset_v2() || is_partition_valid(cp)))
need_rebuild_sched_domains = true;
rcu_read_lock();
css_put(&cp->css);
}
rcu_read_unlock();
if (need_rebuild_sched_domains)
cpuset_force_rebuild();
}
/**
* update_sibling_cpumasks - Update siblings cpumasks
* @parent: Parent cpuset
* @cs: Current cpuset
* @tmp: Temp variables
*/
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct tmpmasks *tmp)
{
struct cpuset *sibling;
struct cgroup_subsys_state *pos_css;
lockdep_assert_held(&cpuset_mutex);
/*
* Check all its siblings and call update_cpumasks_hier()
* if their effective_cpus will need to be changed.
*
* It is possible a change in parent's effective_cpus
* due to a change in a child partition's effective_xcpus will impact
* its siblings even if they do not inherit parent's effective_cpus
* directly.
*
* The update_cpumasks_hier() function may sleep. So we have to
* release the RCU read lock before calling it.
*/
rcu_read_lock();
cpuset_for_each_child(sibling, pos_css, parent) {
if (sibling == cs)
continue;
if (!is_partition_valid(sibling)) {
compute_effective_cpumask(tmp->new_cpus, sibling,
parent);
if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
continue;
} else if (is_remote_partition(sibling)) {
/*
* Change in a sibling cpuset won't affect a remote
* partition root.
*/
continue;
}
if (!css_tryget_online(&sibling->css))
continue;
rcu_read_unlock();
update_cpumasks_hier(sibling, tmp, false);
rcu_read_lock();
css_put(&sibling->css);
}
rcu_read_unlock();
}
static int parse_cpuset_cpulist(const char *buf, struct cpumask *out_mask)
{
int retval;
retval = cpulist_parse(buf, out_mask);
if (retval < 0)
return retval;
if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
return -EINVAL;
return 0;
}
/**
* validate_partition - Validate a cpuset partition configuration
* @cs: The cpuset to validate
* @trialcs: The trial cpuset containing proposed configuration changes
*
* If any validation check fails, the appropriate error code is set in the
* cpuset's prs_err field.
*
* Return: PRS error code (0 if valid, non-zero error code if invalid)
*/
static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs)
{
struct cpuset *parent = parent_cs(cs);
if (cs_is_member(trialcs))
return PERR_NONE;
if (cpumask_empty(trialcs->effective_xcpus))
return PERR_INVCPUS;
if (prstate_housekeeping_conflict(trialcs->partition_root_state,
trialcs->effective_xcpus))
return PERR_HKEEPING;
if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus))
return PERR_NOCPUS;
return PERR_NONE;
}
static int cpus_allowed_validate_change(struct cpuset *cs, struct cpuset *trialcs,
struct tmpmasks *tmp)
{
int retval;
struct cpuset *parent = parent_cs(cs);
retval = validate_change(cs, trialcs);
if ((retval == -EINVAL) && cpuset_v2()) {
struct cgroup_subsys_state *css;
struct cpuset *cp;
/*
* The -EINVAL error code indicates that partition sibling
* CPU exclusivity rule has been violated. We still allow
* the cpumask change to proceed while invalidating the
* partition. However, any conflicting sibling partitions
* have to be marked as invalid too.
*/
trialcs->prs_err = PERR_NOTEXCL;
rcu_read_lock();
cpuset_for_each_child(cp, css, parent) {
struct cpumask *xcpus = user_xcpus(trialcs);
if (is_partition_valid(cp) &&
cpumask_intersects(xcpus, cp->effective_xcpus)) {
rcu_read_unlock();
update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, tmp);
rcu_read_lock();
}
}
rcu_read_unlock();
retval = 0;
}
return retval;
}
/**
* partition_cpus_change - Handle partition state changes due to CPU mask updates
* @cs: The target cpuset being modified
* @trialcs: The trial cpuset containing proposed configuration changes
* @tmp: Temporary masks for intermediate calculations
*
* This function handles partition state transitions triggered by CPU mask changes.
* CPU modifications may cause a partition to be disabled or require state updates.
*/
static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs,
struct tmpmasks *tmp)
{
enum prs_errcode prs_err;
if (cs_is_member(cs))
return;
prs_err = validate_partition(cs, trialcs);
if (prs_err)
trialcs->prs_err = cs->prs_err = prs_err;
if (is_remote_partition(cs)) {
if (trialcs->prs_err)
remote_partition_disable(cs, tmp);
else
remote_cpus_update(cs, trialcs->exclusive_cpus,
trialcs->effective_xcpus, tmp);
} else {
if (trialcs->prs_err)
update_parent_effective_cpumask(cs, partcmd_invalidate,
NULL, tmp);
else
update_parent_effective_cpumask(cs, partcmd_update,
trialcs->effective_xcpus, tmp);
}
}
/**
* update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
* @cs: the cpuset to consider
* @trialcs: trial cpuset
* @buf: buffer of cpu numbers written to this cpuset
*/
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{
int retval;
struct tmpmasks tmp;
bool force = false;
int old_prs = cs->partition_root_state;
retval = parse_cpuset_cpulist(buf, trialcs->cpus_allowed);
if (retval < 0)
return retval;
/* Nothing to do if the cpus didn't change */
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
return 0;
if (alloc_tmpmasks(&tmp))
return -ENOMEM;
compute_trialcs_excpus(trialcs, cs);
trialcs->prs_err = PERR_NONE;
retval = cpus_allowed_validate_change(cs, trialcs, &tmp);
if (retval < 0)
goto out_free;
/*
* Check all the descendants in update_cpumasks_hier() if
* effective_xcpus is to be changed.
*/
force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
partition_cpus_change(cs, trialcs, &tmp);
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
if ((old_prs > 0) && !is_partition_valid(cs))
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
/* effective_cpus/effective_xcpus will be updated here */
update_cpumasks_hier(cs, &tmp, force);
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
if (cs->partition_root_state)
update_partition_sd_lb(cs, old_prs);
out_free:
free_tmpmasks(&tmp);
return retval;
}
/**
* update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
* @cs: the cpuset to consider
* @trialcs: trial cpuset
* @buf: buffer of cpu numbers written to this cpuset
*
* The tasks' cpumask will be updated if cs is a valid partition root.
*/
static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{
int retval;
struct tmpmasks tmp;
bool force = false;
int old_prs = cs->partition_root_state;
retval = parse_cpuset_cpulist(buf, trialcs->exclusive_cpus);
if (retval < 0)
return retval;
/* Nothing to do if the CPUs didn't change */
if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
return 0;
/*
* Reject the change if there is exclusive CPUs conflict with
* the siblings.
*/
if (compute_trialcs_excpus(trialcs, cs))
return -EINVAL;
/*
* Check all the descendants in update_cpumasks_hier() if
* effective_xcpus is to be changed.
*/
force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
retval = validate_change(cs, trialcs);
if (retval)
return retval;
if (alloc_tmpmasks(&tmp))
return -ENOMEM;
trialcs->prs_err = PERR_NONE;
partition_cpus_change(cs, trialcs, &tmp);
spin_lock_irq(&callback_lock);
cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
if ((old_prs > 0) && !is_partition_valid(cs))
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
/*
* Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
* of the subtree when it is a valid partition root or effective_xcpus
* is updated.
*/
if (is_partition_valid(cs) || force)
update_cpumasks_hier(cs, &tmp, force);
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
if (cs->partition_root_state)
update_partition_sd_lb(cs, old_prs);
free_tmpmasks(&tmp);
return 0;
}
/*
* Migrate memory region from one set of nodes to another. This is
* performed asynchronously as it can be called from process migration path
* holding locks involved in process management. All mm migrations are
* performed in the queued order and can be waited for by flushing
* cpuset_migrate_mm_wq.
*/
struct cpuset_migrate_mm_work {
struct work_struct work;
struct mm_struct *mm;
nodemask_t from;
nodemask_t to;
};
static void cpuset_migrate_mm_workfn(struct work_struct *work)
{
struct cpuset_migrate_mm_work *mwork =
container_of(work, struct cpuset_migrate_mm_work, work);
/* on a wq worker, no need to worry about %current's mems_allowed */
do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
mmput(mwork->mm);
kfree(mwork);
}
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to)
{
struct cpuset_migrate_mm_work *mwork;
if (nodes_equal(*from, *to)) {
mmput(mm);
return;
}
mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
if (mwork) {
mwork->mm = mm;
mwork->from = *from;
mwork->to = *to;
INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
queue_work(cpuset_migrate_mm_wq, &mwork->work);
} else {
mmput(mm);
}
}
static void flush_migrate_mm_task_workfn(struct callback_head *head)
{
flush_workqueue(cpuset_migrate_mm_wq);
kfree(head);
}
static void schedule_flush_migrate_mm(void)
{
struct callback_head *flush_cb;
flush_cb = kzalloc(sizeof(struct callback_head), GFP_KERNEL);
if (!flush_cb)
return;
init_task_work(flush_cb, flush_migrate_mm_task_workfn);
if (task_work_add(current, flush_cb, TWA_RESUME))
kfree(flush_cb);
}
/*
* cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
* @tsk: the task to change
* @newmems: new nodes that the task will be set
*
* We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
* and rebind an eventual tasks' mempolicy. If the task is allocating in
* parallel, it might temporarily see an empty intersection, which results in
* a seqlock check and retry before OOM or allocation failure.
*/
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
task_lock(tsk);
local_irq_disable();
write_seqcount_begin(&tsk->mems_allowed_seq);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems);
tsk->mems_allowed = *newmems;
write_seqcount_end(&tsk->mems_allowed_seq);
local_irq_enable();
task_unlock(tsk);
}
static void *cpuset_being_rebound;
/**
* cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
void cpuset_update_tasks_nodemask(struct cpuset *cs)
{
static nodemask_t newmems; /* protected by cpuset_mutex */
struct css_task_iter it;
struct task_struct *task;
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
guarantee_online_mems(cs, &newmems);
/*
* The mpol_rebind_mm() call takes mmap_lock, which we couldn't
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
* the global cpuset_mutex, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
*/
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it))) {
struct mm_struct *mm;
bool migrate;
cpuset_change_task_nodemask(task, &newmems);
mm = get_task_mm(task);
if (!mm)
continue;
migrate = is_memory_migrate(cs);
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
else
mmput(mm);
}
css_task_iter_end(&it);
/*
* All the tasks' nodemasks have been updated, update
* cs->old_mems_allowed.
*/
cs->old_mems_allowed = newmems;
/* We're done rebinding vmas to this cpuset's new mems_allowed. */
cpuset_being_rebound = NULL;
}
/*
* update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
* @cs: the cpuset to consider
* @new_mems: a temp variable for calculating new effective_mems
*
* When configured nodemask is changed, the effective nodemasks of this cpuset
* and all its descendants need to be updated.
*
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
*
* Called with cpuset_mutex held
*/
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
struct cpuset *parent = parent_cs(cp);
nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
/*
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some MEMs.
*/
if (is_in_v2_mode() && nodes_empty(*new_mems))
*new_mems = parent->effective_mems;
/* Skip the whole subtree if the nodemask remains the same. */
if (nodes_equal(*new_mems, cp->effective_mems)) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
if (!css_tryget_online(&cp->css))
continue;
rcu_read_unlock();
spin_lock_irq(&callback_lock);
cp->effective_mems = *new_mems;
spin_unlock_irq(&callback_lock);
WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
cpuset_update_tasks_nodemask(cp);
rcu_read_lock();
css_put(&cp->css);
}
rcu_read_unlock();
}
/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
* cpusets mems_allowed, and for each task in the cpuset,
* update mems_allowed and rebind task's mempolicy and any vma
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
* Call with cpuset_mutex held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
*/
static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf)
{
int retval;
/*
* An empty mems_allowed is ok iff there are no tasks in the cpuset.
* The validate_change() call ensures that cpusets with tasks have memory.
*/
retval = nodelist_parse(buf, trialcs->mems_allowed);
if (retval < 0)
goto done;
if (!nodes_subset(trialcs->mems_allowed,
top_cpuset.mems_allowed)) {
retval = -EINVAL;
goto done;
}
if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
retval = 0; /* Too easy - nothing to do */
goto done;
}
retval = validate_change(cs, trialcs);
if (retval < 0)
goto done;
check_insane_mems_config(&trialcs->mems_allowed);
spin_lock_irq(&callback_lock);
cs->mems_allowed = trialcs->mems_allowed;
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}
bool current_cpuset_is_being_rebound(void)
{
bool ret;
rcu_read_lock();
ret = task_cs(current) == cpuset_being_rebound;
rcu_read_unlock();
return ret;
}
/*
* cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (see cpuset_flagbits_t)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
* Call with cpuset_mutex held.
*/
int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on)
{
struct cpuset *trialcs;
int balance_flag_changed;
int spread_flag_changed;
int err;
trialcs = dup_or_alloc_cpuset(cs);
if (!trialcs)
return -ENOMEM;
if (turning_on)
set_bit(bit, &trialcs->flags);
else
clear_bit(bit, &trialcs->flags);
err = validate_change(cs, trialcs);
if (err < 0)
goto out;
balance_flag_changed = (is_sched_load_balance(cs) !=
is_sched_load_balance(trialcs));
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
spin_lock_irq(&callback_lock);
cs->flags = trialcs->flags;
spin_unlock_irq(&callback_lock);
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
if (cpuset_v2())
cpuset_force_rebuild();
else
rebuild_sched_domains_locked();
}
if (spread_flag_changed)
cpuset1_update_tasks_flags(cs);
out:
free_cpuset(trialcs);
return err;
}
/**
* update_prstate - update partition_root_state
* @cs: the cpuset to update
* @new_prs: new partition root state
* Return: 0 if successful, != 0 if error
*
* Call with cpuset_mutex held.
*/
static int update_prstate(struct cpuset *cs, int new_prs)
{
int err = PERR_NONE, old_prs = cs->partition_root_state;
struct cpuset *parent = parent_cs(cs);
struct tmpmasks tmpmask;
bool isolcpus_updated = false;
if (old_prs == new_prs)
return 0;
/*
* Treat a previously invalid partition root as if it is a "member".
*/
if (new_prs && is_partition_invalid(cs))
old_prs = PRS_MEMBER;
if (alloc_tmpmasks(&tmpmask))
return -ENOMEM;
err = update_partition_exclusive_flag(cs, new_prs);
if (err)
goto out;
if (!old_prs) {
/*
* cpus_allowed and exclusive_cpus cannot be both empty.
*/
if (xcpus_empty(cs)) {
err = PERR_CPUSEMPTY;
goto out;
}
/*
* We don't support the creation of a new local partition with
* a remote partition underneath it. This unsupported
* setting can happen only if parent is the top_cpuset because
* a remote partition cannot be created underneath an existing
* local or remote partition.
*/
if ((parent == &top_cpuset) &&
cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
err = PERR_REMOTE;
goto out;
}
/*
* If parent is valid partition, enable local partiion.
* Otherwise, enable a remote partition.
*/
if (is_partition_valid(parent)) {
enum partition_cmd cmd = (new_prs == PRS_ROOT)
? partcmd_enable : partcmd_enablei;
err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
} else {
err = remote_partition_enable(cs, new_prs, &tmpmask);
}
} else if (old_prs && new_prs) {
/*
* A change in load balance state only, no change in cpumasks.
* Need to update isolated_cpus.
*/
isolcpus_updated = true;
} else {
/*
* Switching back to member is always allowed even if it
* disables child partitions.
*/
if (is_remote_partition(cs))
remote_partition_disable(cs, &tmpmask);
else
update_parent_effective_cpumask(cs, partcmd_disable,
NULL, &tmpmask);
/*
* Invalidation of child partitions will be done in
* update_cpumasks_hier().
*/
}
out:
/*
* Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
* happens.
*/
if (err) {
new_prs = -new_prs;
update_partition_exclusive_flag(cs, new_prs);
}
spin_lock_irq(&callback_lock);
cs->partition_root_state = new_prs;
WRITE_ONCE(cs->prs_err, err);
if (!is_partition_valid(cs))
reset_partition_data(cs);
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
/* A newly created partition must have effective_xcpus set */
WARN_ON_ONCE(!old_prs && (new_prs > 0)
&& cpumask_empty(cs->effective_xcpus));
/* Update sched domains and load balance flag */
update_partition_sd_lb(cs, old_prs);
notify_partition_change(cs, old_prs);
if (force_sd_rebuild)
rebuild_sched_domains_locked();
free_tmpmasks(&tmpmask);
return 0;
}
static struct cpuset *cpuset_attach_old_cs;
/*
* Check to see if a cpuset can accept a new task
* For v1, cpus_allowed and mems_allowed can't be empty.
* For v2, effective_cpus can't be empty.
* Note that in v1, effective_cpus = cpus_allowed.
*/
static int cpuset_can_attach_check(struct cpuset *cs)
{
if (cpumask_empty(cs->effective_cpus) ||
(!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
return -ENOSPC;
return 0;
}
static void reset_migrate_dl_data(struct cpuset *cs)
{
cs->nr_migrate_dl_tasks = 0;
cs->sum_migrate_dl_bw = 0;
}
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs, *oldcs;
struct task_struct *task;
bool cpus_updated, mems_updated;
int ret;
/* used later by cpuset_attach() */
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
oldcs = cpuset_attach_old_cs;
cs = css_cs(css);
mutex_lock(&cpuset_mutex);
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
if (ret)
goto out_unlock;
cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task);
if (ret)
goto out_unlock;
/*
* Skip rights over task check in v2 when nothing changes,
* migration permission derives from hierarchy ownership in
* cgroup_procs_write_permission()).
*/
if (!cpuset_v2() || (cpus_updated || mems_updated)) {
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
}
if (dl_task(task)) {
cs->nr_migrate_dl_tasks++;
cs->sum_migrate_dl_bw += task->dl.dl_bw;
}
}
if (!cs->nr_migrate_dl_tasks)
goto out_success;
if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
if (unlikely(cpu >= nr_cpu_ids)) {
reset_migrate_dl_data(cs);
ret = -EINVAL;
goto out_unlock;
}
ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
if (ret) {
reset_migrate_dl_data(cs);
goto out_unlock;
}
}
out_success:
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
*/
cs->attach_in_progress++;
out_unlock:
mutex_unlock(&cpuset_mutex);
return ret;
}
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs;
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
mutex_lock(&cpuset_mutex);
dec_attach_in_progress_locked(cs);
if (cs->nr_migrate_dl_tasks) {
int cpu = cpumask_any(cs->effective_cpus);
dl_bw_free(cpu, cs->sum_migrate_dl_bw);
reset_migrate_dl_data(cs);
}
mutex_unlock(&cpuset_mutex);
}
/*
* Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
static cpumask_var_t cpus_attach;
static nodemask_t cpuset_attach_nodemask_to;
static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
{
lockdep_assert_held(&cpuset_mutex);
if (cs != &top_cpuset)
guarantee_active_cpus(task, cpus_attach);
else
cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
subpartitions_cpus);
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
*/
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
cpuset1_update_task_spread_flags(cs, task);
}
static void cpuset_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct task_struct *leader;
struct cgroup_subsys_state *css;
struct cpuset *cs;
struct cpuset *oldcs = cpuset_attach_old_cs;
bool cpus_updated, mems_updated;
bool queue_task_work = false;
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
mutex_lock(&cpuset_mutex);
cpus_updated = !cpumask_equal(cs->effective_cpus,
oldcs->effective_cpus);
mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
/*
* In the default hierarchy, enabling cpuset in the child cgroups
* will trigger a number of cpuset_attach() calls with no change
* in effective cpus and mems. In that case, we can optimize out
* by skipping the task iteration and update.
*/
if (cpuset_v2() && !cpus_updated && !mems_updated) {
cpuset_attach_nodemask_to = cs->effective_mems;
goto out;
}
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cgroup_taskset_for_each(task, css, tset)
cpuset_attach_task(cs, task);
/*
* Change mm for all threadgroup leaders. This is expensive and may
* sleep and should be moved outside migration path proper. Skip it
* if there is no change in effective_mems and CS_MEMORY_MIGRATE is
* not set.
*/
cpuset_attach_nodemask_to = cs->effective_mems;
if (!is_memory_migrate(cs) && !mems_updated)
goto out;
cgroup_taskset_for_each_leader(leader, css, tset) {
struct mm_struct *mm = get_task_mm(leader);
if (mm) {
mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
/*
* old_mems_allowed is the same with mems_allowed
* here, except if this task is being moved
* automatically due to hotplug. In that case
* @mems_allowed has been updated and is empty, so
* @old_mems_allowed is the right nodesets that we
* migrate mm from.
*/
if (is_memory_migrate(cs)) {
cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
&cpuset_attach_nodemask_to);
queue_task_work = true;
} else
mmput(mm);
}
}
out:
if (queue_task_work)
schedule_flush_migrate_mm();
cs->old_mems_allowed = cpuset_attach_nodemask_to;
if (cs->nr_migrate_dl_tasks) {
cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
reset_migrate_dl_data(cs);
}
dec_attach_in_progress_locked(cs);
mutex_unlock(&cpuset_mutex);
}
/*
* Common handling for a write to a "cpus" or "mems" file.
*/
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cpuset *cs = css_cs(of_css(of));
struct cpuset *trialcs;
int retval = -ENODEV;
/* root is read-only */
if (cs == &top_cpuset)
return -EACCES;
buf = strstrip(buf);
cpuset_full_lock();
if (!is_cpuset_online(cs))
goto out_unlock;
trialcs = dup_or_alloc_cpuset(cs);
if (!trialcs) {
retval = -ENOMEM;
goto out_unlock;
}
switch (of_cft(of)->private) {
case FILE_CPULIST:
retval = update_cpumask(cs, trialcs, buf);
break;
case FILE_EXCLUSIVE_CPULIST:
retval = update_exclusive_cpumask(cs, trialcs, buf);
break;
case FILE_MEMLIST:
retval = update_nodemask(cs, trialcs, buf);
break;
default:
retval = -EINVAL;
break;
}
free_cpuset(trialcs);
if (force_sd_rebuild)
rebuild_sched_domains_locked();
out_unlock:
cpuset_full_unlock();
if (of_cft(of)->private == FILE_MEMLIST)
schedule_flush_migrate_mm();
return retval ?: nbytes;
}
/*
* These ascii lists should be read in a single call, by using a user
* buffer large enough to hold the entire map. If read in smaller
* chunks, there is no guarantee of atomicity. Since the display format
* used, list of ranges of sequential numbers, is variable length,
* and since these maps can change value dynamically, one could read
* gibberish by doing partial reads while a list was changing.
*/
int cpuset_common_seq_show(struct seq_file *sf, void *v)
{
struct cpuset *cs = css_cs(seq_css(sf));
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
spin_lock_irq(&callback_lock);
switch (type) {
case FILE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
break;
case FILE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
break;
case FILE_EFFECTIVE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
break;
case FILE_EFFECTIVE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
break;
case FILE_EXCLUSIVE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
break;
case FILE_EFFECTIVE_XCPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
break;
case FILE_SUBPARTS_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
break;
case FILE_ISOLATED_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
break;
default:
ret = -EINVAL;
}
spin_unlock_irq(&callback_lock);
return ret;
}
static int cpuset_partition_show(struct seq_file *seq, void *v)
{
struct cpuset *cs = css_cs(seq_css(seq));
const char *err, *type = NULL;
switch (cs->partition_root_state) {
case PRS_ROOT:
seq_puts(seq, "root\n");
break;
case PRS_ISOLATED:
seq_puts(seq, "isolated\n");
break;
case PRS_MEMBER:
seq_puts(seq, "member\n");
break;
case PRS_INVALID_ROOT:
type = "root";
fallthrough;
case PRS_INVALID_ISOLATED:
if (!type)
type = "isolated";
err = perr_strings[READ_ONCE(cs->prs_err)];
if (err)
seq_printf(seq, "%s invalid (%s)\n", type, err);
else
seq_printf(seq, "%s invalid\n", type);
break;
}
return 0;
}
static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cpuset *cs = css_cs(of_css(of));
int val;
int retval = -ENODEV;
buf = strstrip(buf);
if (!strcmp(buf, "root"))
val = PRS_ROOT;
else if (!strcmp(buf, "member"))
val = PRS_MEMBER;
else if (!strcmp(buf, "isolated"))
val = PRS_ISOLATED;
else
return -EINVAL;
cpuset_full_lock();
if (is_cpuset_online(cs))
retval = update_prstate(cs, val);
cpuset_full_unlock();
return retval ?: nbytes;
}
/*
* This is currently a minimal set for the default hierarchy. It can be
* expanded later on by migrating more features and control files from v1.
*/
static struct cftype dfl_files[] = {
{
.name = "cpus",
.seq_show = cpuset_common_seq_show,
.write = cpuset_write_resmask,
.max_write_len = (100U + 6 * NR_CPUS),
.private = FILE_CPULIST,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "mems",
.seq_show = cpuset_common_seq_show,
.write = cpuset_write_resmask,
.max_write_len = (100U + 6 * MAX_NUMNODES),
.private = FILE_MEMLIST,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "cpus.effective",
.seq_show = cpuset_common_seq_show,
.private = FILE_EFFECTIVE_CPULIST,
},
{
.name = "mems.effective",
.seq_show = cpuset_common_seq_show,
.private = FILE_EFFECTIVE_MEMLIST,
},
{
.name = "cpus.partition",
.seq_show = cpuset_partition_show,
.write = cpuset_partition_write,
.private = FILE_PARTITION_ROOT,
.flags = CFTYPE_NOT_ON_ROOT,
.file_offset = offsetof(struct cpuset, partition_file),
},
{
.name = "cpus.exclusive",
.seq_show = cpuset_common_seq_show,
.write = cpuset_write_resmask,
.max_write_len = (100U + 6 * NR_CPUS),
.private = FILE_EXCLUSIVE_CPULIST,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "cpus.exclusive.effective",
.seq_show = cpuset_common_seq_show,
.private = FILE_EFFECTIVE_XCPULIST,
.flags = CFTYPE_NOT_ON_ROOT,
},
{
.name = "cpus.subpartitions",
.seq_show = cpuset_common_seq_show,
.private = FILE_SUBPARTS_CPULIST,
.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
},
{
.name = "cpus.isolated",
.seq_show = cpuset_common_seq_show,
.private = FILE_ISOLATED_CPULIST,
.flags = CFTYPE_ONLY_ON_ROOT,
},
{ } /* terminate */
};
/**
* cpuset_css_alloc - Allocate a cpuset css
* @parent_css: Parent css of the control group that the new cpuset will be
* part of
* Return: cpuset css on success, -ENOMEM on failure.
*
* Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
* top cpuset css otherwise.
*/
static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cpuset *cs;
if (!parent_css)
return &top_cpuset.css;
cs = dup_or_alloc_cpuset(NULL);
if (!cs)
return ERR_PTR(-ENOMEM);
__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;
INIT_LIST_HEAD(&cs->remote_sibling);
/* Set CS_MEMORY_MIGRATE for default hierarchy */
if (cpuset_v2())
__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
return &cs->css;
}
static int cpuset_css_online(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
struct cpuset *parent = parent_cs(cs);
struct cpuset *tmp_cs;
struct cgroup_subsys_state *pos_css;
if (!parent)
return 0;
cpuset_full_lock();
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
/*
* For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
*/
if (cpuset_v2() && !is_sched_load_balance(parent))
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpuset_inc();
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
}
spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
/*
* Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
* set. This flag handling is implemented in cgroup core for
* historical reasons - the flag may be specified during mount.
*
* Currently, if any sibling cpusets have exclusive cpus or mem, we
* refuse to clone the configuration - thereby refusing the task to
* be entered, and as a result refusing the sys_unshare() or
* clone() which initiated it. If this becomes a problem for some
* users who wish to allow that scenario, then this could be
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
* (and likewise for mems) to the new cgroup.
*/
rcu_read_lock();
cpuset_for_each_child(tmp_cs, pos_css, parent) {
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
rcu_read_unlock();
goto out_unlock;
}
}
rcu_read_unlock();
spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
cpuset_full_unlock();
return 0;
}
/*
* If the cpuset being removed has its flag 'sched_load_balance'
* enabled, then simulate turning sched_load_balance off, which
* will call rebuild_sched_domains_locked(). That is not needed
* in the default hierarchy where only changes in partition
* will cause repartitioning.
*/
static void cpuset_css_offline(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
cpuset_full_lock();
if (!cpuset_v2() && is_sched_load_balance(cs))
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
cpuset_dec();
cpuset_full_unlock();
}
/*
* If a dying cpuset has the 'cpus.partition' enabled, turn it off by
* changing it back to member to free its exclusive CPUs back to the pool to
* be used by other online cpusets.
*/
static void cpuset_css_killed(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
cpuset_full_lock();
/* Reset valid partition back to member */
if (is_partition_valid(cs))
update_prstate(cs, PRS_MEMBER);
cpuset_full_unlock();
}
static void cpuset_css_free(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
free_cpuset(cs);
}
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
top_cpuset.mems_allowed = node_possible_map;
} else {
cpumask_copy(top_cpuset.cpus_allowed,
top_cpuset.effective_cpus);
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
spin_unlock_irq(&callback_lock);
mutex_unlock(&cpuset_mutex);
}
/*
* In case the child is cloned into a cpuset different from its parent,
* additional checks are done to see if the move is allowed.
*/
static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
{
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
bool same_cs;
int ret;
rcu_read_lock(); same_cs = (cs == task_cs(current)); rcu_read_unlock();
if (same_cs)
return 0;
lockdep_assert_held(&cgroup_mutex); mutex_lock(&cpuset_mutex);
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
if (ret)
goto out_unlock;
ret = task_can_attach(task);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
*/
cs->attach_in_progress++;
out_unlock:
mutex_unlock(&cpuset_mutex); return ret;}
static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
{
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
bool same_cs;
rcu_read_lock();
same_cs = (cs == task_cs(current));
rcu_read_unlock();
if (same_cs)
return;
dec_attach_in_progress(cs);
}
/*
* Make sure the new task conform to the current state of its parent,
* which could have been changed by cpuset just after it inherits the
* state from the parent and before it sits on the cgroup's task list.
*/
static void cpuset_fork(struct task_struct *task)
{
struct cpuset *cs;
bool same_cs;
rcu_read_lock(); cs = task_cs(task);
same_cs = (cs == task_cs(current));
rcu_read_unlock(); if (same_cs) { if (cs == &top_cpuset)
return;
set_cpus_allowed_ptr(task, current->cpus_ptr);
task->mems_allowed = current->mems_allowed;
return;
}
/* CLONE_INTO_CGROUP */
mutex_lock(&cpuset_mutex);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cpuset_attach_task(cs, task);
dec_attach_in_progress_locked(cs); mutex_unlock(&cpuset_mutex);
}
struct cgroup_subsys cpuset_cgrp_subsys = {
.css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online,
.css_offline = cpuset_css_offline,
.css_killed = cpuset_css_killed,
.css_free = cpuset_css_free,
.can_attach = cpuset_can_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
.bind = cpuset_bind,
.can_fork = cpuset_can_fork,
.cancel_fork = cpuset_cancel_fork,
.fork = cpuset_fork,
#ifdef CONFIG_CPUSETS_V1
.legacy_cftypes = cpuset1_files,
#endif
.dfl_cftypes = dfl_files,
.early_init = true,
.threaded = true,
};
/**
* cpuset_init - initialize cpusets at system boot
*
* Description: Initialize top_cpuset
**/
int __init cpuset_init(void)
{
BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
cpumask_setall(top_cpuset.cpus_allowed);
nodes_setall(top_cpuset.mems_allowed);
cpumask_setall(top_cpuset.effective_cpus);
cpumask_setall(top_cpuset.effective_xcpus);
cpumask_setall(top_cpuset.exclusive_cpus);
nodes_setall(top_cpuset.effective_mems);
fmeter_init(&top_cpuset.fmeter);
INIT_LIST_HEAD(&remote_children);
BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN);
if (have_boot_isolcpus) {
BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL));
cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN));
cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus);
}
return 0;
}
static void
hotplug_update_tasks(struct cpuset *cs,
struct cpumask *new_cpus, nodemask_t *new_mems,
bool cpus_updated, bool mems_updated)
{
/* A partition root is allowed to have empty effective cpus */
if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
spin_lock_irq(&callback_lock);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->effective_mems = *new_mems;
spin_unlock_irq(&callback_lock);
if (cpus_updated)
cpuset_update_tasks_cpumask(cs, new_cpus);
if (mems_updated)
cpuset_update_tasks_nodemask(cs);
}
void cpuset_force_rebuild(void)
{
force_sd_rebuild = true;
}
/**
* cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
* @cs: cpuset in interest
* @tmp: the tmpmasks structure pointer
*
* Compare @cs's cpu and mem masks against top_cpuset and if some have gone
* offline, update @cs accordingly. If @cs ends up with no CPU or memory,
* all its tasks are moved to the nearest ancestor with both resources.
*/
static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;
bool cpus_updated;
bool mems_updated;
bool remote;
int partcmd = -1;
struct cpuset *parent;
retry:
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
mutex_lock(&cpuset_mutex);
/*
* We have raced with task attaching. We wait until attaching
* is finished, so we won't attach a task to an empty cpuset.
*/
if (cs->attach_in_progress) {
mutex_unlock(&cpuset_mutex);
goto retry;
}
parent = parent_cs(cs);
compute_effective_cpumask(&new_cpus, cs, parent);
nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
if (!tmp || !cs->partition_root_state)
goto update_tasks;
/*
* Compute effective_cpus for valid partition root, may invalidate
* child partition roots if necessary.
*/
remote = is_remote_partition(cs);
if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
compute_partition_effective_cpumask(cs, &new_cpus);
if (remote && cpumask_empty(&new_cpus) &&
partition_is_populated(cs, NULL)) {
cs->prs_err = PERR_HOTPLUG;
remote_partition_disable(cs, tmp);
compute_effective_cpumask(&new_cpus, cs, parent);
remote = false;
}
/*
* Force the partition to become invalid if either one of
* the following conditions hold:
* 1) empty effective cpus but not valid empty partition.
* 2) parent is invalid or doesn't grant any cpus to child
* partitions.
*/
if (is_local_partition(cs) && (!is_partition_valid(parent) ||
tasks_nocpu_error(parent, cs, &new_cpus)))
partcmd = partcmd_invalidate;
/*
* On the other hand, an invalid partition root may be transitioned
* back to a regular one with a non-empty effective xcpus.
*/
else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
!cpumask_empty(cs->effective_xcpus))
partcmd = partcmd_update;
if (partcmd >= 0) {
update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
compute_partition_effective_cpumask(cs, &new_cpus);
cpuset_force_rebuild();
}
}
update_tasks:
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
mems_updated = !nodes_equal(new_mems, cs->effective_mems);
if (!cpus_updated && !mems_updated)
goto unlock; /* Hotplug doesn't affect this cpuset */
if (mems_updated)
check_insane_mems_config(&new_mems);
if (is_in_v2_mode())
hotplug_update_tasks(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
else
cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
unlock:
mutex_unlock(&cpuset_mutex);
}
/**
* cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
*
* This function is called after either CPU or memory configuration has
* changed and updates cpuset accordingly. The top_cpuset is always
* synchronized to cpu_active_mask and N_MEMORY, which is necessary in
* order to make cpusets transparent (of no affect) on systems that are
* actively using CPU hotplug but making no active use of cpusets.
*
* Non-root cpusets are only affected by offlining. If any CPUs or memory
* nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
* all descendants.
*
* Note that CPU offlining during suspend is ignored. We don't modify
* cpusets across suspend/resume cycles at all.
*
* CPU / memory hotplug is handled synchronously.
*/
static void cpuset_handle_hotplug(void)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;
bool cpus_updated, mems_updated;
bool on_dfl = is_in_v2_mode();
struct tmpmasks tmp, *ptmp = NULL;
if (on_dfl && !alloc_tmpmasks(&tmp))
ptmp = &tmp;
lockdep_assert_cpus_held();
mutex_lock(&cpuset_mutex);
/* fetch the available cpus/mems and find out which changed how */
cpumask_copy(&new_cpus, cpu_active_mask);
new_mems = node_states[N_MEMORY];
/*
* If subpartitions_cpus is populated, it is likely that the check
* below will produce a false positive on cpus_updated when the cpu
* list isn't changed. It is extra work, but it is better to be safe.
*/
cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
!cpumask_empty(subpartitions_cpus);
mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
/* For v1, synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
cpuset_force_rebuild();
spin_lock_irq(&callback_lock);
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
* Make sure that CPUs allocated to child partitions
* do not show up in effective_cpus. If no CPU is left,
* we clear the subpartitions_cpus & let the child partitions
* fight for the CPUs again.
*/
if (!cpumask_empty(subpartitions_cpus)) {
if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
top_cpuset.nr_subparts = 0;
cpumask_clear(subpartitions_cpus);
} else {
cpumask_andnot(&new_cpus, &new_cpus,
subpartitions_cpus);
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
spin_unlock_irq(&callback_lock);
/* we don't mess with cpumasks of tasks in top_cpuset */
}
/* synchronize mems_allowed to N_MEMORY */
if (mems_updated) {
spin_lock_irq(&callback_lock);
if (!on_dfl)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
spin_unlock_irq(&callback_lock);
cpuset_update_tasks_nodemask(&top_cpuset);
}
mutex_unlock(&cpuset_mutex);
/* if cpus or mems changed, we need to propagate to descendants */
if (cpus_updated || mems_updated) {
struct cpuset *cs;
struct cgroup_subsys_state *pos_css;
rcu_read_lock();
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
if (cs == &top_cpuset || !css_tryget_online(&cs->css))
continue;
rcu_read_unlock();
cpuset_hotplug_update_tasks(cs, ptmp);
rcu_read_lock();
css_put(&cs->css);
}
rcu_read_unlock();
}
/* rebuild sched domains if necessary */
if (force_sd_rebuild)
rebuild_sched_domains_cpuslocked();
free_tmpmasks(ptmp);
}
void cpuset_update_active_cpus(void)
{
/*
* We're inside cpu hotplug critical region which usually nests
* inside cgroup synchronization. Bounce actual hotplug processing
* to a work item to avoid reverse locking order.
*/
cpuset_handle_hotplug();
}
/*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes.
* See cpuset_update_active_cpus() for CPU hotplug handling.
*/
static int cpuset_track_online_nodes(struct notifier_block *self,
unsigned long action, void *arg)
{
cpuset_handle_hotplug();
return NOTIFY_OK;
}
/**
* cpuset_init_smp - initialize cpus_allowed
*
* Description: Finish top cpuset after cpu, node maps are initialized
*/
void __init cpuset_init_smp(void)
{
/*
* cpus_allowd/mems_allowed set to v2 values in the initial
* cpuset_bind() call will be reset to v1 values in another
* cpuset_bind() call when v1 cpuset is mounted.
*/
top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
top_cpuset.effective_mems = node_states[N_MEMORY];
hotplug_node_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
BUG_ON(!cpuset_migrate_mm_wq);
}
/**
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
*
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of cpu_active_mask, even if this means going outside the
* tasks cpuset, except when the task is in the top cpuset.
**/
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;
struct cpuset *cs;
spin_lock_irqsave(&callback_lock, flags);
cs = task_cs(tsk);
if (cs != &top_cpuset)
guarantee_active_cpus(tsk, pmask);
/*
* Tasks in the top cpuset won't get update to their cpumasks
* when a hotplug online/offline event happens. So we include all
* offline cpus in the allowed cpu list.
*/
if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
/*
* We first exclude cpus allocated to partitions. If there is no
* allowable online cpu left, we fall back to all possible cpus.
*/
cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
if (!cpumask_intersects(pmask, cpu_active_mask))
cpumask_copy(pmask, possible_mask);
}
spin_unlock_irqrestore(&callback_lock, flags);
}
/**
* cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
* @tsk: pointer to task_struct with which the scheduler is struggling
*
* Description: In the case that the scheduler cannot find an allowed cpu in
* tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
* mode however, this value is the same as task_cs(tsk)->effective_cpus,
* which will not contain a sane cpumask during cases such as cpu hotplugging.
* This is the absolute last resort for the scheduler and it is only used if
* _every_ other avenue has been traveled.
*
* Returns true if the affinity of @tsk was changed, false otherwise.
**/
bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
const struct cpumask *cs_mask;
bool changed = false;
rcu_read_lock();
cs_mask = task_cs(tsk)->cpus_allowed;
if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
do_set_cpus_allowed(tsk, cs_mask);
changed = true;
}
rcu_read_unlock();
/*
* We own tsk->cpus_allowed, nobody can change it under us.
*
* But we used cs && cs->cpus_allowed lockless and thus can
* race with cgroup_attach_task() or update_cpumask() and get
* the wrong tsk->cpus_allowed. However, both cases imply the
* subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
* which takes task_rq_lock().
*
* If we are called after it dropped the lock we must see all
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
* set any mask even if it is not right from task_cs() pov,
* the pending set_cpus_allowed_ptr() will fix things.
*
* select_fallback_rq() will fix things ups and set cpu_possible_mask
* if required.
*/
return changed;
}
void __init cpuset_init_current_mems_allowed(void)
{
nodes_setall(current->mems_allowed);
}
/**
* cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
*
* Description: Returns the nodemask_t mems_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of node_states[N_MEMORY], even if this means going outside the
* tasks cpuset.
**/
nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
nodemask_t mask;
unsigned long flags;
spin_lock_irqsave(&callback_lock, flags);
guarantee_online_mems(task_cs(tsk), &mask);
spin_unlock_irqrestore(&callback_lock, flags);
return mask;
}
/**
* cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
* @nodemask: the nodemask to be checked
*
* Are any of the nodes in the nodemask allowed in current->mems_allowed?
*/
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return nodes_intersects(*nodemask, current->mems_allowed);
}
/*
* nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
* mem_hardwall ancestor to the specified cpuset. Call holding
* callback_lock. If no ancestor is mem_exclusive or mem_hardwall
* (an unusual configuration), then returns the root cpuset.
*/
static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
{
while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
cs = parent_cs(cs);
return cs;
}
/*
* cpuset_current_node_allowed - Can current task allocate on a memory node?
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
* If we're in interrupt, yes, we can always allocate. If @node is set in
* current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
* node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
* yes. If current has access to memory reserves as an oom victim, yes.
* Otherwise, no.
*
* GFP_USER allocations are marked with the __GFP_HARDWALL bit,
* and do not allow allocations outside the current tasks cpuset
* unless the task has been OOM killed.
* GFP_KERNEL allocations are not so marked, so can escape to the
* nearest enclosing hardwalled ancestor cpuset.
*
* Scanning up parent cpusets requires callback_lock. The
* __alloc_pages() routine only calls here with __GFP_HARDWALL bit
* _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
* current tasks mems_allowed came up empty on the first pass over
* the zonelist. So only GFP_KERNEL allocations, if all nodes in the
* cpuset are short of memory, might require taking the callback_lock.
*
* The first call here from mm/page_alloc:get_page_from_freelist()
* has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
* so no allocation on a node outside the cpuset is allowed (unless
* in interrupt, of course).
*
* The second pass through get_page_from_freelist() doesn't even call
* here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
* variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
* in alloc_flags. That logic and the checks below have the combined
* affect that:
* in_interrupt - any node ok (current task context irrelevant)
* GFP_ATOMIC - any node ok
* tsk_is_oom_victim - any node ok
* GFP_KERNEL - any node in enclosing hardwalled cpuset ok
* GFP_USER - only nodes in current tasks mems allowed ok.
*/
bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
{
struct cpuset *cs; /* current cpuset ancestors */
bool allowed; /* is allocation in zone z allowed? */
unsigned long flags;
if (in_interrupt())
return true;
if (node_isset(node, current->mems_allowed))
return true;
/*
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
if (unlikely(tsk_is_oom_victim(current)))
return true;
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
return false;
if (current->flags & PF_EXITING) /* Let dying task have memory */
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
spin_lock_irqsave(&callback_lock, flags);
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
spin_unlock_irqrestore(&callback_lock, flags);
return allowed;
}
bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
{
struct cgroup_subsys_state *css;
struct cpuset *cs;
bool allowed;
/*
* In v1, mem_cgroup and cpuset are unlikely in the same hierarchy
* and mems_allowed is likely to be empty even if we could get to it,
* so return true to avoid taking a global lock on the empty check.
*/
if (!cpuset_v2())
return true;
css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
if (!css)
return true;
/*
* Normally, accessing effective_mems would require the cpuset_mutex
* or callback_lock - but node_isset is atomic and the reference
* taken via cgroup_get_e_css is sufficient to protect css.
*
* Since this interface is intended for use by migration paths, we
* relax locking here to avoid taking global locks - while accepting
* there may be rare scenarios where the result may be innaccurate.
*
* Reclaim and migration are subject to these same race conditions, and
* cannot make strong isolation guarantees, so this is acceptable.
*/
cs = container_of(css, struct cpuset, css);
allowed = node_isset(nid, cs->effective_mems);
css_put(css);
return allowed;
}
/**
* cpuset_spread_node() - On which node to begin search for a page
* @rotor: round robin rotor
*
* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
* tasks in a cpuset with is_spread_page or is_spread_slab set),
* and if the memory allocation used cpuset_mem_spread_node()
* to determine on which node to start looking, as it will for
* certain page cache or slab cache pages such as used for file
* system buffers and inode caches, then instead of starting on the
* local node to look for a free page, rather spread the starting
* node around the tasks mems_allowed nodes.
*
* We don't have to worry about the returned node being offline
* because "it can't happen", and even if it did, it would be ok.
*
* The routines calling guarantee_online_mems() are careful to
* only set nodes in task->mems_allowed that are online. So it
* should not be possible for the following code to return an
* offline node. But if it did, that would be ok, as this routine
* is not returning the node where the allocation must be, only
* the node where the search should start. The zonelist passed to
* __alloc_pages() will include all nodes. If the slab allocator
* is passed an offline node, it will fall back to the local node.
* See kmem_cache_alloc_node().
*/
static int cpuset_spread_node(int *rotor)
{
return *rotor = next_node_in(*rotor, current->mems_allowed);
}
/**
* cpuset_mem_spread_node() - On which node to begin search for a file page
*/
int cpuset_mem_spread_node(void)
{
if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
current->cpuset_mem_spread_rotor =
node_random(¤t->mems_allowed);
return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
}
/**
* cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
* @tsk1: pointer to task_struct of some task.
* @tsk2: pointer to task_struct of some other task.
*
* Description: Return true if @tsk1's mems_allowed intersects the
* mems_allowed of @tsk2. Used by the OOM killer to determine if
* one of the task's memory usage might impact the memory available
* to the other.
**/
int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2)
{
return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
/**
* cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
*
* Description: Prints current's name, cpuset name, and cached copy of its
* mems_allowed to the kernel log.
*/
void cpuset_print_current_mems_allowed(void)
{
struct cgroup *cgrp;
rcu_read_lock();
cgrp = task_cs(current)->css.cgroup;
pr_cont(",cpuset=");
pr_cont_cgroup_name(cgrp);
pr_cont(",mems_allowed=%*pbl",
nodemask_pr_args(¤t->mems_allowed));
rcu_read_unlock();
}
/* Display task mems_allowed in /proc/<pid>/status file. */
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Mems_allowed:\t%*pb\n",
nodemask_pr_args(&task->mems_allowed));
seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
nodemask_pr_args(&task->mems_allowed));
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Scatterlist Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller.
*/
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/refcount_types.h>
#include <linux/slab.h>
#include <linux/types.h>
/*
* Algorithm masks and types.
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_SIG 0x00000007
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
#define CRYPTO_ALG_DEAD 0x00000020
#define CRYPTO_ALG_DYING 0x00000040
#define CRYPTO_ALG_ASYNC 0x00000080
/*
* Set if the algorithm (or an algorithm which it uses) requires another
* algorithm of the same type to handle corner cases.
*/
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
* Set if the algorithm data structure should be duplicated into
* kmalloc memory before registration. This is useful for hardware
* that can be disconnected at will. Do not use this if the data
* structure is embedded into a bigger one. Duplicate the overall
* data structure in the driver in that case.
*/
#define CRYPTO_ALG_DUP_FIRST 0x00000200
/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
*/
#define CRYPTO_ALG_TESTED 0x00000400
/*
* Set if the algorithm is an instance that is built from templates.
*/
#define CRYPTO_ALG_INSTANCE 0x00000800
/* Set this bit if the algorithm provided is hardware accelerated but
* not available to userspace via instruction set or so.
*/
#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
/*
* Mark a cipher as a service implementation only usable by another
* cipher and never by a normal user of the kernel crypto API
*/
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
* Set if the algorithm has a ->setkey() method but can be used without
* calling it first, i.e. there is a default key.
*/
#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
/*
* Don't trigger module loading
*/
#define CRYPTO_NOLOAD 0x00008000
/*
* The algorithm may allocate memory during request processing, i.e. during
* encryption, decryption, or hashing. Users can request an algorithm with this
* flag unset if they can't handle memory allocation failures.
*
* This flag is currently only implemented for algorithms of type "skcipher",
* "aead", "ahash", "shash", and "cipher". Algorithms of other types might not
* have this flag set even if they allocate memory.
*
* In some edge cases, algorithms can allocate memory regardless of this flag.
* To avoid these cases, users must obey the following usage constraints:
* skcipher:
* - The IV buffer and all scatterlist elements must be aligned to the
* algorithm's alignmask.
* - If the data were to be divided into chunks of size
* crypto_skcipher_walksize() (with any remainder going at the end), no
* chunk can cross a page boundary or a scatterlist element boundary.
* aead:
* - The IV buffer and all scatterlist elements must be aligned to the
* algorithm's alignmask.
* - The first scatterlist element must contain all the associated data,
* and its pages must be !PageHighMem.
* - If the plaintext/ciphertext were to be divided into chunks of size
* crypto_aead_walksize() (with the remainder going at the end), no chunk
* can cross a page boundary or a scatterlist element boundary.
* ahash:
* - crypto_ahash_finup() must not be used unless the algorithm implements
* ->finup() natively.
*/
#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
/*
* Mark an algorithm as a service implementation only usable by a
* template and never by a normal user of the kernel crypto API.
* This is intended to be used by algorithms that are themselves
* not FIPS-approved but may instead be used to implement parts of
* a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
*/
#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
/* Set if the algorithm supports virtual addresses. */
#define CRYPTO_ALG_REQ_VIRT 0x00040000
/* Set if the algorithm cannot have a fallback (e.g., phmac). */
#define CRYPTO_ALG_NO_FALLBACK 0x00080000
/* The high bits 0xff000000 are reserved for type-specific flags. */
/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
#define CRYPTO_TFM_REQ_ON_STACK 0x00000800
/*
* Miscellaneous stuff.
*/
#define CRYPTO_MAX_ALG_NAME 128
/*
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
* aligned correctly for the given architecture so that there are no alignment
* faults for C data types. On architectures that support non-cache coherent
* DMA, such as ARM or arm64, it also takes into account the minimal alignment
* that is required to ensure that the context struct member does not share any
* cachelines with the rest of the struct. This is needed to ensure that cache
* maintenance for non-coherent DMA (cache invalidation in particular) does not
* affect data that may be accessed by the CPU concurrently.
*/
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
struct crypto_tfm;
struct crypto_type;
struct module;
typedef void (*crypto_completion_t)(void *req, int err);
/**
* DOC: Block Cipher Context Data Structures
*
* These data structures define the operating context for each block cipher
* type.
*/
struct crypto_async_request {
struct list_head list;
crypto_completion_t complete;
void *data;
struct crypto_tfm *tfm;
u32 flags;
};
/**
* DOC: Block Cipher Algorithm Definitions
*
* These data structures define modular crypto algorithm implementations,
* managed via crypto_register_alg() and crypto_unregister_alg().
*/
/**
* struct cipher_alg - single-block symmetric ciphers definition
* @cia_min_keysize: Minimum key size supported by the transformation. This is
* the smallest key length supported by this transformation
* algorithm. This must be set to one of the pre-defined
* values as this is not hardware specific. Possible values
* for this field can be found via git grep "_MIN_KEY_SIZE"
* include/crypto/
* @cia_max_keysize: Maximum key size supported by the transformation. This is
* the largest key length supported by this transformation
* algorithm. This must be set to one of the pre-defined values
* as this is not hardware specific. Possible values for this
* field can be found via git grep "_MAX_KEY_SIZE"
* include/crypto/
* @cia_setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
* function does modify the transformation context. This function
* can be called multiple times during the existence of the
* transformation object, so one must make sure the key is properly
* reprogrammed into the hardware. This function is also
* responsible for checking the key length for validity.
* @cia_encrypt: Encrypt a single block. This function is used to encrypt a
* single block of data, which must be @cra_blocksize big. This
* always operates on a full @cra_blocksize and it is not possible
* to encrypt a block of smaller size. The supplied buffers must
* therefore also be at least of @cra_blocksize size. Both the
* input and output buffers are always aligned to @cra_alignmask.
* In case either of the input or output buffer supplied by user
* of the crypto API is not aligned to @cra_alignmask, the crypto
* API will re-align the buffers. The re-alignment means that a
* new buffer will be allocated, the data will be copied into the
* new buffer, then the processing will happen on the new buffer,
* then the data will be copied back into the original buffer and
* finally the new buffer will be freed. In case a software
* fallback was put in place in the @cra_init call, this function
* might need to use the fallback if the algorithm doesn't support
* all of the key sizes. In case the key was stored in
* transformation context, the key might need to be re-programmed
* into the hardware in this function. This function shall not
* modify the transformation context, as this function may be
* called in parallel with the same transformation object.
* @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
* @cia_encrypt, and the conditions are exactly the same.
*
* All fields are mandatory and must be filled.
*/
struct cipher_alg {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
#define cra_cipher cra_u.cipher
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
* @cra_flags: Flags describing this transformation. See include/linux/crypto.h
* CRYPTO_ALG_* flags for the flags which go in here. Those are
* used for fine-tuning the description of the transformation
* algorithm.
* @cra_blocksize: Minimum block size of this transformation. The size in bytes
* of the smallest possible unit which can be transformed with
* this algorithm. The users must respect this value.
* In case of HASH transformation, it is possible for a smaller
* block than @cra_blocksize to be passed to the crypto API for
* transformation, in case of any other transformation type, an
* error will be returned upon any attempt to transform smaller
* than @cra_blocksize chunks.
* @cra_ctxsize: Size of the operational context of the transformation. This
* value informs the kernel crypto API about the memory size
* needed to be allocated for the transformation context.
* @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
* 1 less than the alignment, in bytes, that the algorithm
* implementation requires for input and output buffers. When
* the crypto API is invoked with buffers that are not aligned
* to this alignment, the crypto API automatically utilizes
* appropriately aligned temporary buffers to comply with what
* the algorithm needs. (For scatterlists this happens only if
* the algorithm uses the skcipher_walk helper functions.) This
* misalignment handling carries a performance penalty, so it is
* preferred that algorithms do not set a nonzero alignmask.
* Also, crypto API users may wish to allocate buffers aligned
* to the alignmask of the algorithm being used, in order to
* avoid the API having to realign them. Note: the alignmask is
* not supported for hash algorithms and is always 0 for them.
* @cra_reqsize: Size of the request context for this algorithm.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
* @cra_priority.
* @cra_name: Generic name (usable by multiple implementations) of the
* transformation algorithm. This is the name of the transformation
* itself. This field is used by the kernel when looking up the
* providers of particular transformation.
* @cra_driver_name: Unique name of the transformation provider. This is the
* name of the provider of the transformation. This can be any
* arbitrary value, but in the usual case, this contains the
* name of the chip or provider and the name of the
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
* transformation types. There are multiple options, such as
* &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher.
* @cra_u: Callbacks implementing the transformation. This is a union of
* multiple structures. Depending on the type of transformation selected
* by @cra_type and @cra_flags above, the associated structure must be
* filled with callbacks. This field might be empty. This is the case
* for ahash, shash.
* @cra_init: Deprecated, do not use.
* @cra_exit: Deprecated, do not use.
* @cra_u.cipher: Union member which contains a single-block symmetric cipher
* definition. See @struct @cipher_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
* @cra_list: internally used
* @cra_users: internally used
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
*/
struct crypto_alg {
struct list_head cra_list;
struct list_head cra_users;
u32 cra_flags;
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
unsigned int cra_reqsize;
int cra_priority;
refcount_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
const struct crypto_type *cra_type;
union {
struct cipher_alg cipher;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
void (*cra_exit)(struct crypto_tfm *tfm);
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
} CRYPTO_MINALIGN_ATTR;
/*
* A helper struct for waiting for completion of async crypto ops
*/
struct crypto_wait {
struct completion completion;
int err;
};
/*
* Macro for declaring a crypto op async wait object on stack
*/
#define DECLARE_CRYPTO_WAIT(_wait) \
struct crypto_wait _wait = { \
COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
/*
* Async ops completion helper functioons
*/
void crypto_req_done(void *req, int err);
static inline int crypto_wait_req(int err, struct crypto_wait *wait)
{
switch (err) {
case -EINPROGRESS:
case -EBUSY:
wait_for_completion(&wait->completion);
reinit_completion(&wait->completion);
err = wait->err;
break;
}
return err;
}
static inline void crypto_init_wait(struct crypto_wait *wait)
{
init_completion(&wait->completion);
}
/*
* Algorithm query interface.
*/
int crypto_has_alg(const char *name, u32 type, u32 mask);
/*
* Transforms: user-instantiated objects which encapsulate algorithms
* and core processing logic. Managed via crypto_alloc_*() and
* crypto_free_*(), as well as the various helpers below.
*/
struct crypto_tfm {
refcount_t refcnt;
u32 crt_flags;
int node;
struct crypto_tfm *fb;
void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
/*
* Transform user interface.
*/
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
static inline void crypto_free_tfm(struct crypto_tfm *tfm)
{
return crypto_destroy_tfm(tfm, tfm);
}
/*
* Transform helpers which query the underlying algorithm.
*/
static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_name;
}
static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_driver_name;
}
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
}
static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_alignmask;
}
static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_reqsize;
}
static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
{
return tfm->crt_flags;
}
static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
{
tfm->crt_flags |= flags;
}
static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
{
tfm->crt_flags &= ~flags;
}
static inline unsigned int crypto_tfm_ctx_alignment(void)
{
struct crypto_tfm *tfm;
return __alignof__(tfm->__crt_ctx);
}
static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
}
static inline bool crypto_req_on_stack(struct crypto_async_request *req)
{
return req->flags & CRYPTO_TFM_REQ_ON_STACK;
}
static inline void crypto_request_set_callback(
struct crypto_async_request *req, u32 flags,
crypto_completion_t compl, void *data)
{
u32 keep = CRYPTO_TFM_REQ_ON_STACK;
req->complete = compl;
req->data = data;
req->flags &= keep;
req->flags |= flags & ~keep;
}
static inline void crypto_request_set_tfm(struct crypto_async_request *req,
struct crypto_tfm *tfm)
{
req->tfm = tfm;
req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
}
struct crypto_async_request *crypto_request_clone(
struct crypto_async_request *req, size_t total, gfp_t gfp);
static inline void crypto_stack_request_init(struct crypto_async_request *req,
struct crypto_tfm *tfm)
{
req->flags = 0;
crypto_request_set_tfm(req, tfm);
req->flags |= CRYPTO_TFM_REQ_ON_STACK;
}
#endif /* _LINUX_CRYPTO_H */
// SPDX-License-Identifier: GPL-2.0
/*
* class.c - basic device class management
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2003-2004 Greg Kroah-Hartman
* Copyright (c) 2003-2004 IBM Corp.
*/
#include <linux/device/class.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include "base.h"
/* /sys/class */
static struct kset *class_kset;
#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
/**
* class_to_subsys - Turn a struct class into a struct subsys_private
*
* @class: pointer to the struct bus_type to look up
*
* The driver core internals need to work on the subsys_private structure, not
* the external struct class pointer. This function walks the list of
* registered classes in the system and finds the matching one and returns the
* internal struct subsys_private that relates to that class.
*
* Note, the reference count of the return value is INCREMENTED if it is not
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
struct subsys_private *class_to_subsys(const struct class *class)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
if (!class || !class_kset)
return NULL;
spin_lock(&class_kset->list_lock);
if (list_empty(&class_kset->list))
goto done;
list_for_each_entry(kobj, &class_kset->list, entry) { struct kset *kset = container_of(kobj, struct kset, kobj);
sp = container_of_const(kset, struct subsys_private, subsys);
if (sp->class == class)
goto done;
}
sp = NULL;
done:
sp = subsys_get(sp); spin_unlock(&class_kset->list_lock); return sp;
}
static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct class_attribute *class_attr = to_class_attr(attr);
struct subsys_private *cp = to_subsys_private(kobj);
ssize_t ret = -EIO;
if (class_attr->show)
ret = class_attr->show(cp->class, class_attr, buf);
return ret;
}
static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct class_attribute *class_attr = to_class_attr(attr);
struct subsys_private *cp = to_subsys_private(kobj);
ssize_t ret = -EIO;
if (class_attr->store)
ret = class_attr->store(cp->class, class_attr, buf, count);
return ret;
}
static void class_release(struct kobject *kobj)
{
struct subsys_private *cp = to_subsys_private(kobj);
const struct class *class = cp->class;
pr_debug("class '%s': release.\n", class->name);
if (class->class_release)
class->class_release(class);
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
lockdep_unregister_key(&cp->lock_key);
kfree(cp);
}
static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj)
{
const struct subsys_private *cp = to_subsys_private(kobj);
const struct class *class = cp->class;
return class->ns_type;
}
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
};
static const struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
.child_ns_type = class_child_ns_type,
};
int class_create_file_ns(const struct class *cls, const struct class_attribute *attr,
const void *ns)
{
struct subsys_private *sp = class_to_subsys(cls);
int error;
if (!sp)
return -EINVAL;
error = sysfs_create_file_ns(&sp->subsys.kobj, &attr->attr, ns);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(class_create_file_ns);
void class_remove_file_ns(const struct class *cls, const struct class_attribute *attr,
const void *ns)
{
struct subsys_private *sp = class_to_subsys(cls);
if (!sp)
return;
sysfs_remove_file_ns(&sp->subsys.kobj, &attr->attr, ns);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(class_remove_file_ns);
static struct device *klist_class_to_dev(struct klist_node *n)
{
struct device_private *p = to_device_private_class(n);
return p->device;
}
static void klist_class_dev_get(struct klist_node *n)
{
struct device *dev = klist_class_to_dev(n);
get_device(dev);
}
static void klist_class_dev_put(struct klist_node *n)
{
struct device *dev = klist_class_to_dev(n);
put_device(dev);
}
int class_register(const struct class *cls)
{
struct subsys_private *cp;
struct lock_class_key *key;
int error;
pr_debug("device class '%s': registering\n", cls->name);
if (cls->ns_type && !cls->namespace) {
pr_err("%s: class '%s' does not have namespace\n",
__func__, cls->name);
return -EINVAL;
}
if (!cls->ns_type && cls->namespace) {
pr_err("%s: class '%s' does not have ns_type\n",
__func__, cls->name);
return -EINVAL;
}
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
INIT_LIST_HEAD(&cp->interfaces);
kset_init(&cp->glue_dirs);
key = &cp->lock_key;
lockdep_register_key(key);
__mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
if (error)
goto err_out;
cp->subsys.kobj.kset = class_kset;
cp->subsys.kobj.ktype = &class_ktype;
cp->class = cls;
error = kset_register(&cp->subsys);
if (error)
goto err_out;
error = sysfs_create_groups(&cp->subsys.kobj, cls->class_groups);
if (error) {
kobject_del(&cp->subsys.kobj);
kfree_const(cp->subsys.kobj.name);
goto err_out;
}
return 0;
err_out:
lockdep_unregister_key(key);
kfree(cp);
return error;
}
EXPORT_SYMBOL_GPL(class_register);
void class_unregister(const struct class *cls)
{
struct subsys_private *sp = class_to_subsys(cls);
if (!sp)
return;
pr_debug("device class '%s': unregistering\n", cls->name);
sysfs_remove_groups(&sp->subsys.kobj, cls->class_groups);
kset_unregister(&sp->subsys);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(class_unregister);
static void class_create_release(const struct class *cls)
{
pr_debug("%s called for %s\n", __func__, cls->name);
kfree(cls);
}
/**
* class_create - create a struct class structure
* @name: pointer to a string for the name of this class.
*
* This is used to create a struct class pointer that can then be used
* in calls to device_create().
*
* Returns &struct class pointer on success, or ERR_PTR() on error.
*
* Note, the pointer created here is to be destroyed when finished by
* making a call to class_destroy().
*/
struct class *class_create(const char *name)
{
struct class *cls;
int retval;
cls = kzalloc(sizeof(*cls), GFP_KERNEL);
if (!cls) {
retval = -ENOMEM;
goto error;
}
cls->name = name;
cls->class_release = class_create_release;
retval = class_register(cls);
if (retval)
goto error;
return cls;
error:
kfree(cls);
return ERR_PTR(retval);
}
EXPORT_SYMBOL_GPL(class_create);
/**
* class_destroy - destroys a struct class structure
* @cls: pointer to the struct class that is to be destroyed
*
* Note, the pointer to be destroyed must have been created with a call
* to class_create().
*/
void class_destroy(const struct class *cls)
{
if (IS_ERR_OR_NULL(cls))
return;
class_unregister(cls);
}
EXPORT_SYMBOL_GPL(class_destroy);
/**
* class_dev_iter_init - initialize class device iterator
* @iter: class iterator to initialize
* @class: the class we wanna iterate over
* @start: the device to start iterating from, if any
* @type: device_type of the devices to iterate over, NULL for all
*
* Initialize class iterator @iter such that it iterates over devices
* of @class. If @start is set, the list iteration will start there,
* otherwise if it is NULL, the iteration starts at the beginning of
* the list.
*/
void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
const struct device *start, const struct device_type *type)
{
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
memset(iter, 0, sizeof(*iter));
if (!sp) {
pr_crit("%s: class %p was not registered yet\n",
__func__, class);
return;
}
if (start)
start_knode = &start->p->knode_class;
klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
iter->sp = sp;
}
EXPORT_SYMBOL_GPL(class_dev_iter_init);
/**
* class_dev_iter_next - iterate to the next device
* @iter: class iterator to proceed
*
* Proceed @iter to the next device and return it. Returns NULL if
* iteration is complete.
*
* The returned device is referenced and won't be released till
* iterator is proceed to the next device or exited. The caller is
* free to do whatever it wants to do with the device including
* calling back into class code.
*/
struct device *class_dev_iter_next(struct class_dev_iter *iter)
{
struct klist_node *knode;
struct device *dev;
if (!iter->sp)
return NULL;
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
return NULL;
dev = klist_class_to_dev(knode);
if (!iter->type || iter->type == dev->type)
return dev;
}
}
EXPORT_SYMBOL_GPL(class_dev_iter_next);
/**
* class_dev_iter_exit - finish iteration
* @iter: class iterator to finish
*
* Finish an iteration. Always call this function after iteration is
* complete whether the iteration ran till the end or not.
*/
void class_dev_iter_exit(struct class_dev_iter *iter)
{
klist_iter_exit(&iter->ki);
subsys_put(iter->sp);
}
EXPORT_SYMBOL_GPL(class_dev_iter_exit);
/**
* class_for_each_device - device iterator
* @class: the class we're iterating
* @start: the device to start with in the list, if any.
* @data: data for the callback
* @fn: function to be called for each device
*
* Iterate over @class's list of devices, and call @fn for each,
* passing it @data. If @start is set, the list iteration will start
* there, otherwise if it is NULL, the iteration starts at the
* beginning of the list.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*
* @fn is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
void *data, device_iter_t fn)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
struct device *dev;
int error = 0;
if (!class)
return -EINVAL;
if (!sp) {
WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return -EINVAL;
}
class_dev_iter_init(&iter, class, start, NULL);
while ((dev = class_dev_iter_next(&iter))) {
error = fn(dev, data);
if (error)
break;
}
class_dev_iter_exit(&iter);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(class_for_each_device);
/**
* class_find_device - device iterator for locating a particular device
* @class: the class we're iterating
* @start: Device to begin with
* @data: data for the match function
* @match: function to check device
*
* This is similar to the class_for_each_dev() function above, but it
* returns a reference to a device that is 'found' for later use, as
* determined by the @match callback.
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*
* Note, you will need to drop the reference with put_device() after use.
*
* @match is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
struct device *class_find_device(const struct class *class, const struct device *start,
const void *data, device_match_t match)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
struct device *dev;
if (!class)
return NULL;
if (!sp) {
WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return NULL;
}
class_dev_iter_init(&iter, class, start, NULL);
while ((dev = class_dev_iter_next(&iter))) {
if (match(dev, data)) {
get_device(dev);
break;
}
}
class_dev_iter_exit(&iter);
subsys_put(sp);
return dev;
}
EXPORT_SYMBOL_GPL(class_find_device);
int class_interface_register(struct class_interface *class_intf)
{
struct subsys_private *sp;
const struct class *parent;
struct class_dev_iter iter;
struct device *dev;
if (!class_intf || !class_intf->class)
return -ENODEV;
parent = class_intf->class;
sp = class_to_subsys(parent);
if (!sp)
return -EINVAL;
/*
* Reference in sp is now incremented and will be dropped when
* the interface is removed in the call to class_interface_unregister()
*/
mutex_lock(&sp->mutex);
list_add_tail(&class_intf->node, &sp->interfaces);
if (class_intf->add_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
class_intf->add_dev(dev);
class_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(class_interface_register);
void class_interface_unregister(struct class_interface *class_intf)
{
struct subsys_private *sp;
const struct class *parent = class_intf->class;
struct class_dev_iter iter;
struct device *dev;
if (!parent)
return;
sp = class_to_subsys(parent);
if (!sp)
return;
mutex_lock(&sp->mutex);
list_del_init(&class_intf->node);
if (class_intf->remove_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
class_intf->remove_dev(dev);
class_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
/*
* Decrement the reference count twice, once for the class_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in class_interface_register()
*/
subsys_put(sp);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(class_interface_unregister);
ssize_t show_class_attr_string(const struct class *class,
const struct class_attribute *attr, char *buf)
{
struct class_attribute_string *cs;
cs = container_of(attr, struct class_attribute_string, attr);
return sysfs_emit(buf, "%s\n", cs->str);
}
EXPORT_SYMBOL_GPL(show_class_attr_string);
struct class_compat {
struct kobject *kobj;
};
/**
* class_compat_register - register a compatibility class
* @name: the name of the class
*
* Compatibility class are meant as a temporary user-space compatibility
* workaround when converting a family of class devices to a bus devices.
*/
struct class_compat *class_compat_register(const char *name)
{
struct class_compat *cls;
cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL);
if (!cls)
return NULL;
cls->kobj = kobject_create_and_add(name, &class_kset->kobj);
if (!cls->kobj) {
kfree(cls);
return NULL;
}
return cls;
}
EXPORT_SYMBOL_GPL(class_compat_register);
/**
* class_compat_unregister - unregister a compatibility class
* @cls: the class to unregister
*/
void class_compat_unregister(struct class_compat *cls)
{
kobject_put(cls->kobj);
kfree(cls);
}
EXPORT_SYMBOL_GPL(class_compat_unregister);
/**
* class_compat_create_link - create a compatibility class device link to
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
*/
int class_compat_create_link(struct class_compat *cls, struct device *dev)
{
return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
/**
* class_compat_remove_link - remove a compatibility class device link to
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
*/
void class_compat_remove_link(struct class_compat *cls, struct device *dev)
{
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
/**
* class_is_registered - determine if at this moment in time, a class is
* registered in the driver core or not.
* @class: the class to check
*
* Returns a boolean to state if the class is registered in the driver core
* or not. Note that the value could switch right after this call is made,
* so only use this in places where you "know" it is safe to do so (usually
* to determine if the specific class has been registered yet or not).
*
* Be careful in using this.
*/
bool class_is_registered(const struct class *class)
{
struct subsys_private *sp = class_to_subsys(class);
bool is_initialized = false;
if (sp) {
is_initialized = true;
subsys_put(sp);
}
return is_initialized;
}
EXPORT_SYMBOL_GPL(class_is_registered);
int __init classes_init(void)
{
class_kset = kset_create_and_add("class", NULL, NULL);
if (!class_kset)
return -ENOMEM;
return 0;
}
/*
* INETPEER - A storage for permanent information about peers
*
* This source is covered by the GNU GPL, the same as all kernel sources.
*
* Authors: Andrey V. Savochkin <saw@msu.ru>
*/
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/inetpeer.h>
#include <net/secure_seq.h>
/*
* Theory of operations.
* We keep one entry for each peer IP address. The nodes contains long-living
* information about the peer which doesn't depend on routes.
*
* Nodes are removed only when reference counter goes to 0.
* When it's happened the node may be removed when a sufficient amount of
* time has been passed since its last use. The less-recently-used entry can
* also be removed if the pool is overloaded i.e. if the total amount of
* entries is greater-or-equal than the threshold.
*
* Node pool is organised as an RB tree.
* Such an implementation has been chosen not just for fun. It's a way to
* prevent easy and efficient DoS attacks by creating hash collisions. A huge
* amount of long living nodes in a single hash slot would significantly delay
* lookups performed with disabled BHs.
*
* Serialisation issues.
* 1. Nodes may appear in the tree only with the pool lock held.
* 2. Nodes may disappear from the tree only with the pool lock held
* AND reference count being 0.
* 3. Global variable peer_total is modified under the pool lock.
* 4. struct inet_peer fields modification:
* rb_node: pool lock
* refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing
* daddr: unchangeable
*/
static struct kmem_cache *peer_cachep __ro_after_init;
void inet_peer_base_init(struct inet_peer_base *bp)
{
bp->rb_root = RB_ROOT;
seqlock_init(&bp->lock);
bp->total = 0;
}
EXPORT_IPV6_MOD_GPL(inet_peer_base_init);
#define PEER_MAX_GC 32
/* Exported for sysctl_net_ipv4. */
int inet_peer_threshold __read_mostly; /* start to throw entries more
* aggressively at this stage */
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
/* Called from ip_output.c:ip_init */
void __init inet_initpeers(void)
{
u64 nr_entries;
/* 1% of physical memory */
nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
}
/* Called with rcu_read_lock() or base->lock held */
static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
struct inet_peer_base *base,
unsigned int seq,
struct inet_peer *gc_stack[],
unsigned int *gc_cnt,
struct rb_node **parent_p,
struct rb_node ***pp_p)
{
struct rb_node **pp, *parent, *next;
struct inet_peer *p;
u32 now;
pp = &base->rb_root.rb_node;
parent = NULL;
while (1) {
int cmp;
next = rcu_dereference_raw(*pp);
if (!next)
break;
parent = next;
p = rb_entry(parent, struct inet_peer, rb_node);
cmp = inetpeer_addr_cmp(daddr, &p->daddr);
if (cmp == 0) {
now = jiffies;
if (READ_ONCE(p->dtime) != now)
WRITE_ONCE(p->dtime, now);
return p;
}
if (gc_stack) {
if (*gc_cnt < PEER_MAX_GC)
gc_stack[(*gc_cnt)++] = p;
} else if (unlikely(read_seqretry(&base->lock, seq))) {
break;
}
if (cmp == -1)
pp = &next->rb_left;
else
pp = &next->rb_right;
}
*parent_p = parent;
*pp_p = pp;
return NULL;
}
/* perform garbage collect on all items stacked during a lookup */
static void inet_peer_gc(struct inet_peer_base *base,
struct inet_peer *gc_stack[],
unsigned int gc_cnt)
{
int peer_threshold, peer_maxttl, peer_minttl;
struct inet_peer *p;
__u32 delta, ttl;
int i;
peer_threshold = READ_ONCE(inet_peer_threshold);
peer_maxttl = READ_ONCE(inet_peer_maxttl);
peer_minttl = READ_ONCE(inet_peer_minttl);
if (base->total >= peer_threshold)
ttl = 0; /* be aggressive */
else
ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
base->total / peer_threshold * HZ;
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
delta = (__u32)jiffies - READ_ONCE(p->dtime);
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
gc_stack[i] = NULL;
}
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
if (p) {
rb_erase(&p->rb_node, &base->rb_root);
base->total--;
kfree_rcu(p, rcu);
}
}
}
/* Must be called under RCU : No refcount change is done here. */
struct inet_peer *inet_getpeer(struct inet_peer_base *base,
const struct inetpeer_addr *daddr)
{
struct inet_peer *p, *gc_stack[PEER_MAX_GC];
struct rb_node **pp, *parent;
unsigned int gc_cnt, seq;
/* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry.
*/
seq = read_seqbegin(&base->lock);
p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
if (p)
return p;
/* retry an exact lookup, taking the lock before.
* At least, nodes should be hot in our cache.
*/
parent = NULL;
write_seqlock_bh(&base->lock);
gc_cnt = 0;
p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
if (!p) {
p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
if (p) {
p->daddr = *daddr;
p->dtime = (__u32)jiffies;
refcount_set(&p->refcnt, 1);
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
p->n_redirects = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
* calculation of tokens is at its maximum.
*/
p->rate_last = jiffies - 60*HZ;
rb_link_node(&p->rb_node, parent, pp);
rb_insert_color(&p->rb_node, &base->rb_root);
base->total++;
}
}
if (gc_cnt)
inet_peer_gc(base, gc_stack, gc_cnt);
write_sequnlock_bh(&base->lock);
return p;
}
EXPORT_IPV6_MOD_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p)
{
if (refcount_dec_and_test(&p->refcnt))
kfree_rcu(p, rcu);
}
/*
* Check transmit rate limitation for given message.
* The rate information is held in the inet_peer entries now.
* This function is generic and could be used for other purposes
* too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
*
* Note that the same inet_peer fields are modified by functions in
* route.c too, but these work for packet destinations while xrlim_allow
* works for icmp destinations. This means the rate limiting information
* for one "ip object" is shared - and these ICMPs are twice limited:
* by source and by destination.
*
* RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
* SHOULD allow setting of rate limits
*
* Shared between ICMPv4 and ICMPv6.
*/
#define XRLIM_BURST_FACTOR 6
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
{
unsigned long now, token, otoken, delta;
bool rc = false;
if (!peer)
return true;
token = otoken = READ_ONCE(peer->rate_tokens);
now = jiffies;
delta = now - READ_ONCE(peer->rate_last);
if (delta) {
WRITE_ONCE(peer->rate_last, now);
token += delta;
if (token > XRLIM_BURST_FACTOR * timeout)
token = XRLIM_BURST_FACTOR * timeout;
}
if (token >= timeout) {
token -= timeout;
rc = true;
}
if (token != otoken)
WRITE_ONCE(peer->rate_tokens, token);
return rc;
}
EXPORT_IPV6_MOD(inet_peer_xrlim_allow);
void inetpeer_invalidate_tree(struct inet_peer_base *base)
{
struct rb_node *p = rb_first(&base->rb_root);
while (p) {
struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
p = rb_next(p);
rb_erase(&peer->rb_node, &base->rb_root);
inet_putpeer(peer);
cond_resched();
}
base->total = 0;
}
EXPORT_IPV6_MOD(inetpeer_invalidate_tree);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MM_PERCPU_INTERNAL_H
#define _MM_PERCPU_INTERNAL_H
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/memcontrol.h>
/*
* pcpu_block_md is the metadata block struct.
* Each chunk's bitmap is split into a number of full blocks.
* All units are in terms of bits.
*
* The scan hint is the largest known contiguous area before the contig hint.
* It is not necessarily the actual largest contig hint though. There is an
* invariant that the scan_hint_start > contig_hint_start iff
* scan_hint == contig_hint. This is necessary because when scanning forward,
* we don't know if a new contig hint would be better than the current one.
*/
struct pcpu_block_md {
int scan_hint; /* scan hint for block */
int scan_hint_start; /* block relative starting
position of the scan hint */
int contig_hint; /* contig hint for block */
int contig_hint_start; /* block relative starting
position of the contig hint */
int left_free; /* size of free space along
the left side of the block */
int right_free; /* size of free space along
the right side of the block */
int first_free; /* block position of first free */
int nr_bits; /* total bits responsible for */
};
struct pcpuobj_ext {
#ifdef CONFIG_MEMCG
struct obj_cgroup *cgroup;
#endif
#ifdef CONFIG_MEM_ALLOC_PROFILING
union codetag_ref tag;
#endif
};
#if defined(CONFIG_MEMCG) || defined(CONFIG_MEM_ALLOC_PROFILING)
#define NEED_PCPUOBJ_EXT
#endif
struct pcpu_chunk {
#ifdef CONFIG_PERCPU_STATS
int nr_alloc; /* # of allocations */
size_t max_alloc_size; /* largest allocation size */
#endif
struct list_head list; /* linked to pcpu_slot lists */
int free_bytes; /* free bytes in the chunk */
struct pcpu_block_md chunk_md;
unsigned long *bound_map; /* boundary map */
/*
* base_addr is the base address of this chunk.
* To reduce false sharing, current layout is optimized to make sure
* base_addr locate in the different cacheline with free_bytes and
* chunk_md.
*/
void *base_addr ____cacheline_aligned_in_smp;
unsigned long *alloc_map; /* allocation map */
struct pcpu_block_md *md_blocks; /* metadata blocks */
void *data; /* chunk data */
bool immutable; /* no [de]population allowed */
bool isolated; /* isolated from active chunk
slots */
int start_offset; /* the overlap with the previous
region to have a page aligned
base_addr */
int end_offset; /* additional area required to
have the region end page
aligned */
#ifdef NEED_PCPUOBJ_EXT
struct pcpuobj_ext *obj_exts; /* vector of object cgroups */
#endif
int nr_pages; /* # of pages served by this chunk */
int nr_populated; /* # of populated pages */
int nr_empty_pop_pages; /* # of empty populated pages */
unsigned long populated[]; /* populated bitmap */
};
static inline bool need_pcpuobj_ext(void)
{
if (IS_ENABLED(CONFIG_MEM_ALLOC_PROFILING))
return true;
if (!mem_cgroup_kmem_disabled())
return true;
return false;
}
extern spinlock_t pcpu_lock;
extern struct list_head *pcpu_chunk_lists;
extern int pcpu_nr_slots;
extern int pcpu_sidelined_slot;
extern int pcpu_to_depopulate_slot;
extern int pcpu_nr_empty_pop_pages;
extern struct pcpu_chunk *pcpu_first_chunk;
extern struct pcpu_chunk *pcpu_reserved_chunk;
/**
* pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
* @chunk: chunk of interest
*
* This conversion is from the number of physical pages that the chunk
* serves to the number of bitmap blocks used.
*/
static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
{
return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
}
/**
* pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
* @pages: number of physical pages
*
* This conversion is from physical pages to the number of bits
* required in the bitmap.
*/
static inline int pcpu_nr_pages_to_map_bits(int pages)
{
return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
}
/**
* pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
* @chunk: chunk of interest
*
* This conversion is from the number of physical pages that the chunk
* serves to the number of bits in the bitmap.
*/
static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
{
return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
}
/**
* pcpu_obj_full_size - helper to calculate size of each accounted object
* @size: size of area to allocate in bytes
*
* For each accounted object there is an extra space which is used to store
* obj_cgroup membership if kmemcg is not disabled. Charge it too.
*/
static inline size_t pcpu_obj_full_size(size_t size)
{
size_t extra_size = 0;
#ifdef CONFIG_MEMCG
if (!mem_cgroup_kmem_disabled())
extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
#endif
return size * num_possible_cpus() + extra_size;
}
#ifdef CONFIG_PERCPU_STATS
#include <linux/spinlock.h>
struct percpu_stats {
u64 nr_alloc; /* lifetime # of allocations */
u64 nr_dealloc; /* lifetime # of deallocations */
u64 nr_cur_alloc; /* current # of allocations */
u64 nr_max_alloc; /* max # of live allocations */
u32 nr_chunks; /* current # of live chunks */
u32 nr_max_chunks; /* max # of live chunks */
size_t min_alloc_size; /* min allocation size */
size_t max_alloc_size; /* max allocation size */
};
extern struct percpu_stats pcpu_stats;
extern struct pcpu_alloc_info pcpu_stats_ai;
/*
* For debug purposes. We don't care about the flexible array.
*/
static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
{
memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
/* initialize min_alloc_size to unit_size */
pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
}
/*
* pcpu_stats_area_alloc - increment area allocation stats
* @chunk: the location of the area being allocated
* @size: size of area to allocate in bytes
*
* CONTEXT:
* pcpu_lock.
*/
static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
{
lockdep_assert_held(&pcpu_lock);
pcpu_stats.nr_alloc++;
pcpu_stats.nr_cur_alloc++;
pcpu_stats.nr_max_alloc =
max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
pcpu_stats.min_alloc_size =
min(pcpu_stats.min_alloc_size, size);
pcpu_stats.max_alloc_size =
max(pcpu_stats.max_alloc_size, size);
chunk->nr_alloc++;
chunk->max_alloc_size = max(chunk->max_alloc_size, size);
}
/*
* pcpu_stats_area_dealloc - decrement allocation stats
* @chunk: the location of the area being deallocated
*
* CONTEXT:
* pcpu_lock.
*/
static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
{
lockdep_assert_held(&pcpu_lock);
pcpu_stats.nr_dealloc++;
pcpu_stats.nr_cur_alloc--;
chunk->nr_alloc--;
}
/*
* pcpu_stats_chunk_alloc - increment chunk stats
*/
static inline void pcpu_stats_chunk_alloc(void)
{
unsigned long flags;
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_stats.nr_chunks++;
pcpu_stats.nr_max_chunks =
max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
spin_unlock_irqrestore(&pcpu_lock, flags);
}
/*
* pcpu_stats_chunk_dealloc - decrement chunk stats
*/
static inline void pcpu_stats_chunk_dealloc(void)
{
unsigned long flags;
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_stats.nr_chunks--;
spin_unlock_irqrestore(&pcpu_lock, flags);
}
#else
static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
{
}
static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
{
}
static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
{
}
static inline void pcpu_stats_chunk_alloc(void)
{
}
static inline void pcpu_stats_chunk_dealloc(void)
{
}
#endif /* !CONFIG_PERCPU_STATS */
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Symmetric key ciphers.
*
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
/* Set this bit if the lskcipher operation is a continuation. */
#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001
/* Set this bit if the lskcipher operation is final. */
#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002
/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */
/* Set this bit if the skcipher operation is a continuation. */
#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001
/* Set this bit if the skcipher operation is not final. */
#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
struct scatterlist;
/**
* struct skcipher_request - Symmetric key cipher request
* @cryptlen: Number of bytes to encrypt or decrypt
* @iv: Initialisation Vector
* @src: Source SG list
* @dst: Destination SG list
* @base: Underlying async request
* @__ctx: Start of private context data
*/
struct skcipher_request {
unsigned int cryptlen;
u8 *iv;
struct scatterlist *src;
struct scatterlist *dst;
struct crypto_async_request base;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_skcipher {
unsigned int reqsize;
struct crypto_tfm base;
};
struct crypto_sync_skcipher {
struct crypto_skcipher base;
};
struct crypto_lskcipher {
struct crypto_tfm base;
};
/*
* struct skcipher_alg_common - common properties of skcipher_alg
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MIN_KEY_SIZE" include/crypto/
* @max_keysize: Maximum key size supported by the transformation. This is the
* largest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MAX_KEY_SIZE" include/crypto/
* @ivsize: IV size applicable for transformation. The consumer must provide an
* IV of exactly that size to perform the encrypt or decrypt operation.
* @chunksize: Equal to the block size except for stream ciphers such as
* CTR where it is set to the underlying block size.
* @statesize: Size of the internal state for the algorithm.
* @base: Definition of a generic crypto algorithm.
*/
#define SKCIPHER_ALG_COMMON { \
unsigned int min_keysize; \
unsigned int max_keysize; \
unsigned int ivsize; \
unsigned int chunksize; \
unsigned int statesize; \
\
struct crypto_alg base; \
}
struct skcipher_alg_common SKCIPHER_ALG_COMMON;
/**
* struct skcipher_alg - symmetric key cipher definition
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
* function does modify the transformation context. This function can
* be called multiple times during the existence of the transformation
* object, so one must make sure the key is properly reprogrammed into
* the hardware. This function is also responsible for checking the key
* length for validity. In case a software fallback was put in place in
* the @cra_init call, this function might need to use the fallback if
* the algorithm doesn't support all of the key sizes.
* @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
* the supplied scatterlist containing the blocks of data. The crypto
* API consumer is responsible for aligning the entries of the
* scatterlist properly and making sure the chunks are correctly
* sized. In case a software fallback was put in place in the
* @cra_init call, this function might need to use the fallback if
* the algorithm doesn't support all of the key sizes. In case the
* key was stored in transformation context, the key might need to be
* re-programmed into the hardware in this function. This function
* shall not modify the transformation context, as this function may
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
* @export: Export partial state of the transformation. This function dumps the
* entire state of the ongoing transformation into a provided block of
* data so it can be @import 'ed back later on. This is useful in case
* you want to save partial result of the transformation after
* processing certain amount of data and reload this partial result
* multiple times later on for multiple re-use. No data processing
* happens at this point.
* @import: Import partial state of the transformation. This function loads the
* entire state of the ongoing transformation from a provided block of
* data so the transformation can continue from this point onward. No
* data processing happens at this point.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
* after the transformation context was allocated. In case the
* cryptographic hardware has some special requirements which need to
* be handled by software, this function shall check for the precise
* requirement of the transformation and put any software fallbacks
* in place.
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
* @co: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
*/
struct skcipher_alg {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
int (*export)(struct skcipher_request *req, void *out);
int (*import)(struct skcipher_request *req, const void *in);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
unsigned int walksize;
union {
struct SKCIPHER_ALG_COMMON;
struct skcipher_alg_common co;
};
};
/**
* struct lskcipher_alg - linear symmetric key cipher definition
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
* function does modify the transformation context. This function can
* be called multiple times during the existence of the transformation
* object, so one must make sure the key is properly reprogrammed into
* the hardware. This function is also responsible for checking the key
* length for validity. In case a software fallback was put in place in
* the @cra_init call, this function might need to use the fallback if
* the algorithm doesn't support all of the key sizes.
* @encrypt: Encrypt a number of bytes. This function is used to encrypt
* the supplied data. This function shall not modify
* the transformation context, as this function may be called
* in parallel with the same transformation object. Data
* may be left over if length is not a multiple of blocks
* and there is more to come (final == false). The number of
* left-over bytes should be returned in case of success.
* The siv field shall be as long as ivsize + statesize with
* the IV placed at the front. The state will be used by the
* algorithm internally.
* @decrypt: Decrypt a number of bytes. This is a reverse counterpart to
* @encrypt and the conditions are exactly the same.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
* after the transformation context was allocated.
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
* @co: see struct skcipher_alg_common
*/
struct lskcipher_alg {
int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src,
u8 *dst, unsigned len, u8 *siv, u32 flags);
int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src,
u8 *dst, unsigned len, u8 *siv, u32 flags);
int (*init)(struct crypto_lskcipher *tfm);
void (*exit)(struct crypto_lskcipher *tfm);
struct skcipher_alg_common co;
};
#define MAX_SYNC_SKCIPHER_REQSIZE 384
/*
* This performs a type-check against the "_tfm" argument to make sure
* all users have the correct skcipher tfm for doing on-stack requests.
*/
#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
MAX_SYNC_SKCIPHER_REQSIZE \
] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = \
(((struct skcipher_request *)__##name##_desc)->base.tfm = \
crypto_sync_skcipher_tfm((_tfm)), \
(void *)__##name##_desc)
/**
* DOC: Symmetric Key Cipher API
*
* Symmetric key cipher API is used with the ciphers of type
* CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
*
* Asynchronous cipher operations imply that the function invocation for a
* cipher request returns immediately before the completion of the operation.
* The cipher request is scheduled as a separate kernel thread and therefore
* load-balanced on the different CPUs via the process scheduler. To allow
* the kernel crypto API to inform the caller about the completion of a cipher
* request, the caller must provide a callback function. That function is
* invoked with the cipher handle when the request completes.
*
* To support the asynchronous operation, additional information than just the
* cipher handle must be supplied to the kernel crypto API. That additional
* information is given by filling in the skcipher_request data structure.
*
* For the symmetric key cipher API, the state is maintained with the tfm
* cipher handle. A single tfm can be used across multiple calls and in
* parallel. For asynchronous block cipher calls, context data supplied and
* only used by the caller can be referenced the request data structure in
* addition to the IV used for the cipher request. The maintenance of such
* state information would be important for a crypto driver implementer to
* have, because when calling the callback function upon completion of the
* cipher operation, that callback function may need some information about
* which operation just finished if it invoked multiple in parallel. This
* state information is unused by the kernel crypto API.
*/
static inline struct crypto_skcipher *__crypto_skcipher_cast(
struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_skcipher, base);
}
/**
* crypto_alloc_skcipher() - allocate symmetric key cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for an skcipher. The returned struct
* crypto_skcipher is the cipher handle that is required for any subsequent
* API invocation for that skcipher.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
u32 type, u32 mask);
/**
* crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* lskcipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for an lskcipher. The returned struct
* crypto_lskcipher is the cipher handle that is required for any subsequent
* API invocation for that lskcipher.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
u32 type, u32 mask);
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
return &tfm->base;
}
static inline struct crypto_tfm *crypto_lskcipher_tfm(
struct crypto_lskcipher *tfm)
{
return &tfm->base;
}
static inline struct crypto_tfm *crypto_sync_skcipher_tfm(
struct crypto_sync_skcipher *tfm)
{
return crypto_skcipher_tfm(&tfm->base);
}
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
*
* If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
{
crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
}
static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
{
crypto_free_skcipher(&tfm->base);
}
/**
* crypto_free_lskcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
*
* If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm)
{
crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm));
}
/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
* @type: specifies the type of the skcipher
* @mask: specifies the mask for the skcipher
*
* Return: true when the skcipher is known to the kernel crypto API; false
* otherwise
*/
int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask);
static inline const char *crypto_skcipher_driver_name(
struct crypto_skcipher *tfm)
{
return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
}
static inline const char *crypto_lskcipher_driver_name(
struct crypto_lskcipher *tfm)
{
return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm));
}
static inline struct skcipher_alg_common *crypto_skcipher_alg_common(
struct crypto_skcipher *tfm)
{
return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
struct skcipher_alg_common, base);
}
static inline struct skcipher_alg *crypto_skcipher_alg(
struct crypto_skcipher *tfm)
{
return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
struct skcipher_alg, base);
}
static inline struct lskcipher_alg *crypto_lskcipher_alg(
struct crypto_lskcipher *tfm)
{
return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg,
struct lskcipher_alg, co.base);
}
/**
* crypto_skcipher_ivsize() - obtain IV size
* @tfm: cipher handle
*
* The size of the IV for the skcipher referenced by the cipher handle is
* returned. This IV size may be zero if the cipher does not need an IV.
*
* Return: IV size in bytes
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_common(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
struct crypto_sync_skcipher *tfm)
{
return crypto_skcipher_ivsize(&tfm->base);
}
/**
* crypto_lskcipher_ivsize() - obtain IV size
* @tfm: cipher handle
*
* The size of the IV for the lskcipher referenced by the cipher handle is
* returned. This IV size may be zero if the cipher does not need an IV.
*
* Return: IV size in bytes
*/
static inline unsigned int crypto_lskcipher_ivsize(
struct crypto_lskcipher *tfm)
{
return crypto_lskcipher_alg(tfm)->co.ivsize;
}
/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
* The block size for the skcipher referenced with the cipher handle is
* returned. The caller may use that information to allocate appropriate
* memory for the data returned by the encryption or decryption operation
*
* Return: block size of cipher
*/
static inline unsigned int crypto_skcipher_blocksize(
struct crypto_skcipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
/**
* crypto_lskcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
* The block size for the lskcipher referenced with the cipher handle is
* returned. The caller may use that information to allocate appropriate
* memory for the data returned by the encryption or decryption operation
*
* Return: block size of cipher
*/
static inline unsigned int crypto_lskcipher_blocksize(
struct crypto_lskcipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm));
}
/**
* crypto_skcipher_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CTR. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_common(tfm)->chunksize;
}
/**
* crypto_lskcipher_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CTR. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_lskcipher_chunksize(
struct crypto_lskcipher *tfm)
{
return crypto_lskcipher_alg(tfm)->co.chunksize;
}
/**
* crypto_skcipher_statesize() - obtain state size
* @tfm: cipher handle
*
* Some algorithms cannot be chained with the IV alone. They carry
* internal state which must be replicated if data is to be processed
* incrementally. The size of that state can be obtained with this
* function.
*
* Return: state size in bytes
*/
static inline unsigned int crypto_skcipher_statesize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_common(tfm)->statesize;
}
/**
* crypto_lskcipher_statesize() - obtain state size
* @tfm: cipher handle
*
* Some algorithms cannot be chained with the IV alone. They carry
* internal state which must be replicated if data is to be processed
* incrementally. The size of that state can be obtained with this
* function.
*
* Return: state size in bytes
*/
static inline unsigned int crypto_lskcipher_statesize(
struct crypto_lskcipher *tfm)
{
return crypto_lskcipher_alg(tfm)->co.statesize;
}
static inline unsigned int crypto_sync_skcipher_blocksize(
struct crypto_sync_skcipher *tfm)
{
return crypto_skcipher_blocksize(&tfm->base);
}
static inline unsigned int crypto_skcipher_alignmask(
struct crypto_skcipher *tfm)
{
return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
}
static inline unsigned int crypto_lskcipher_alignmask(
struct crypto_lskcipher *tfm)
{
return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm));
}
static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
{
return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
}
static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
u32 flags)
{
crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
}
static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
u32 flags)
{
crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
}
static inline u32 crypto_sync_skcipher_get_flags(
struct crypto_sync_skcipher *tfm)
{
return crypto_skcipher_get_flags(&tfm->base);
}
static inline void crypto_sync_skcipher_set_flags(
struct crypto_sync_skcipher *tfm, u32 flags)
{
crypto_skcipher_set_flags(&tfm->base, flags);
}
static inline void crypto_sync_skcipher_clear_flags(
struct crypto_sync_skcipher *tfm, u32 flags)
{
crypto_skcipher_clear_flags(&tfm->base, flags);
}
static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm)
{
return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm));
}
static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm,
u32 flags)
{
crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags);
}
static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm,
u32 flags)
{
crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags);
}
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
* @key: buffer holding the key
* @keylen: length of the key in bytes
*
* The caller provided key is set for the skcipher referenced by the cipher
* handle.
*
* Note, the key length determines the cipher type. Many block ciphers implement
* different cipher modes depending on the key size, such as AES-128 vs AES-192
* vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
* is performed.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen);
static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
/**
* crypto_lskcipher_setkey() - set key for cipher
* @tfm: cipher handle
* @key: buffer holding the key
* @keylen: length of the key in bytes
*
* The caller provided key is set for the lskcipher referenced by the cipher
* handle.
*
* Note, the key length determines the cipher type. Many block ciphers implement
* different cipher modes depending on the key size, such as AES-128 vs AES-192
* vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
* is performed.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm,
const u8 *key, unsigned int keylen);
static inline unsigned int crypto_skcipher_min_keysize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_common(tfm)->min_keysize;
}
static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_common(tfm)->max_keysize;
}
static inline unsigned int crypto_lskcipher_min_keysize(
struct crypto_lskcipher *tfm)
{
return crypto_lskcipher_alg(tfm)->co.min_keysize;
}
static inline unsigned int crypto_lskcipher_max_keysize(
struct crypto_lskcipher *tfm)
{
return crypto_lskcipher_alg(tfm)->co.max_keysize;
}
/**
* crypto_skcipher_reqtfm() - obtain cipher handle from request
* @req: skcipher_request out of which the cipher handle is to be obtained
*
* Return the crypto_skcipher handle when furnishing an skcipher_request
* data structure.
*
* Return: crypto_skcipher handle
*/
static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
struct skcipher_request *req)
{
return __crypto_skcipher_cast(req->base.tfm);
}
static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
return container_of(tfm, struct crypto_sync_skcipher, base);
}
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
* needed to perform the cipher operation
*
* Encrypt plaintext data using the skcipher_request handle. That data
* structure and how it is filled with data is discussed with the
* skcipher_request_* functions.
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
int crypto_skcipher_encrypt(struct skcipher_request *req);
/**
* crypto_skcipher_decrypt() - decrypt ciphertext
* @req: reference to the skcipher_request handle that holds all information
* needed to perform the cipher operation
*
* Decrypt ciphertext data using the skcipher_request handle. That data
* structure and how it is filled with data is discussed with the
* skcipher_request_* functions.
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
* crypto_skcipher_export() - export partial state
* @req: reference to the skcipher_request handle that holds all information
* needed to perform the operation
* @out: output buffer of sufficient size that can hold the state
*
* Export partial state of the transformation. This function dumps the
* entire state of the ongoing transformation into a provided block of
* data so it can be @import 'ed back later on. This is useful in case
* you want to save partial result of the transformation after
* processing certain amount of data and reload this partial result
* multiple times later on for multiple re-use. No data processing
* happens at this point.
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
int crypto_skcipher_export(struct skcipher_request *req, void *out);
/**
* crypto_skcipher_import() - import partial state
* @req: reference to the skcipher_request handle that holds all information
* needed to perform the operation
* @in: buffer holding the state
*
* Import partial state of the transformation. This function loads the
* entire state of the ongoing transformation from a provided block of
* data so the transformation can continue from this point onward. No
* data processing happens at this point.
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
int crypto_skcipher_import(struct skcipher_request *req, const void *in);
/**
* crypto_lskcipher_encrypt() - encrypt plaintext
* @tfm: lskcipher handle
* @src: source buffer
* @dst: destination buffer
* @len: number of bytes to process
* @siv: IV + state for the cipher operation. The length of the IV must
* comply with the IV size defined by crypto_lskcipher_ivsize. The
* IV is then followed with a buffer with the length as specified by
* crypto_lskcipher_statesize.
* Encrypt plaintext data using the lskcipher handle.
*
* Return: >=0 if the cipher operation was successful, if positive
* then this many bytes have been left unprocessed;
* < 0 if an error occurred
*/
int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
u8 *dst, unsigned len, u8 *siv);
/**
* crypto_lskcipher_decrypt() - decrypt ciphertext
* @tfm: lskcipher handle
* @src: source buffer
* @dst: destination buffer
* @len: number of bytes to process
* @siv: IV + state for the cipher operation. The length of the IV must
* comply with the IV size defined by crypto_lskcipher_ivsize. The
* IV is then followed with a buffer with the length as specified by
* crypto_lskcipher_statesize.
*
* Decrypt ciphertext data using the lskcipher handle.
*
* Return: >=0 if the cipher operation was successful, if positive
* then this many bytes have been left unprocessed;
* < 0 if an error occurred
*/
int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
u8 *dst, unsigned len, u8 *siv);
/**
* DOC: Symmetric Key Cipher Request Handle
*
* The skcipher_request data structure contains all pointers to data
* required for the symmetric key cipher operation. This includes the cipher
* handle (which can be used by multiple skcipher_request instances), pointer
* to plaintext and ciphertext, asynchronous callback function, etc. It acts
* as a handle to the skcipher_request_* API calls in a similar way as
* skcipher handle to the crypto_skcipher_* API calls.
*/
/**
* crypto_skcipher_reqsize() - obtain size of the request data structure
* @tfm: cipher handle
*
* Return: number of bytes
*/
static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
{
return tfm->reqsize;
}
/**
* skcipher_request_set_tfm() - update cipher handle reference in request
* @req: request handle to be modified
* @tfm: cipher handle that shall be added to the request handle
*
* Allow the caller to replace the existing skcipher handle in the request
* data structure with a different one.
*/
static inline void skcipher_request_set_tfm(struct skcipher_request *req,
struct crypto_skcipher *tfm)
{
req->base.tfm = crypto_skcipher_tfm(tfm);
}
static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
struct crypto_sync_skcipher *tfm)
{
skcipher_request_set_tfm(req, &tfm->base);
}
static inline struct skcipher_request *skcipher_request_cast(
struct crypto_async_request *req)
{
return container_of(req, struct skcipher_request, base);
}
/**
* skcipher_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
* @gfp: memory allocation flag that is handed to kmalloc by the API call.
*
* Allocate the request data structure that must be used with the skcipher
* encrypt and decrypt API calls. During the allocation, the provided skcipher
* handle is registered in the request data structure.
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct skcipher_request *skcipher_request_alloc_noprof(
struct crypto_skcipher *tfm, gfp_t gfp)
{
struct skcipher_request *req;
req = kmalloc_noprof(sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(tfm), gfp);
if (likely(req))
skcipher_request_set_tfm(req, tfm);
return req;
}
#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__))
/**
* skcipher_request_free() - zeroize and free request data structure
* @req: request data structure cipher handle to be freed
*/
static inline void skcipher_request_free(struct skcipher_request *req)
{
kfree_sensitive(req);
}
static inline void skcipher_request_zero(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm));
}
/**
* skcipher_request_set_callback() - set asynchronous callback function
* @req: request handle
* @flags: specify zero or an ORing of the flags
* CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
* increase the wait queue beyond the initial maximum size;
* CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
* @compl: callback function pointer to be registered with the request handle
* @data: The data pointer refers to memory that is not used by the kernel
* crypto API, but provided to the callback function for it to use. Here,
* the caller can provide a reference to memory the callback function can
* operate on. As the callback function is invoked asynchronously to the
* related functionality, it may need to access data structures of the
* related functionality which can be referenced using this pointer. The
* callback function can access the memory via the "data" field in the
* crypto_async_request data structure provided to the callback function.
*
* This function allows setting the callback function that is triggered once the
* cipher operation completes.
*
* The callback function is registered with the skcipher_request handle and
* must comply with the following template::
*
* void callback_function(struct crypto_async_request *req, int error)
*/
static inline void skcipher_request_set_callback(struct skcipher_request *req,
u32 flags,
crypto_completion_t compl,
void *data)
{
req->base.complete = compl;
req->base.data = data;
req->base.flags = flags;
}
/**
* skcipher_request_set_crypt() - set data buffers
* @req: request handle
* @src: source scatter / gather list
* @dst: destination scatter / gather list
* @cryptlen: number of bytes to process from @src
* @iv: IV for the cipher operation which must comply with the IV size defined
* by crypto_skcipher_ivsize
*
* This function allows setting of the source data and destination data
* scatter / gather lists.
*
* For encryption, the source is treated as the plaintext and the
* destination is the ciphertext. For a decryption operation, the use is
* reversed - the source is the ciphertext and the destination is the plaintext.
*/
static inline void skcipher_request_set_crypt(
struct skcipher_request *req,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, void *iv)
{
req->src = src;
req->dst = dst;
req->cryptlen = cryptlen;
req->iv = iv;
}
#endif /* _CRYPTO_SKCIPHER_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Common Block IO controller cgroup interface
*
* Based on ideas and code from CFQ, CFS and BFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*
* Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
* Paolo Valente <paolo.valente@unimore.it>
*
* Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
* Nauman Rafique <nauman@google.com>
*
* For policy-specific per-blkcg data:
* Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
* Arianna Avanzini <avanzini.arianna@gmail.com>
*/
#include <linux/ioprio.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/ctype.h>
#include <linux/resume_user_mode.h>
#include <linux/psi.h>
#include <linux/part_stat.h>
#include "blk.h"
#include "blk-cgroup.h"
#include "blk-ioprio.h"
#include "blk-throttle.h"
static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu);
/*
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
* blkcg_pol_register_mutex nests outside of it and synchronizes entire
* policy [un]register operations including cgroup file additions /
* removals. Putting cgroup file registration outside blkcg_pol_mutex
* allows grabbing it from cgroup callbacks.
*/
static DEFINE_MUTEX(blkcg_pol_register_mutex);
static DEFINE_MUTEX(blkcg_pol_mutex);
struct blkcg blkcg_root;
EXPORT_SYMBOL_GPL(blkcg_root);
struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
EXPORT_SYMBOL_GPL(blkcg_root_css);
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
bool blkcg_debug_stats = false;
static DEFINE_RAW_SPINLOCK(blkg_stat_lock);
#define BLKG_DESTROY_BATCH_SIZE 64
/*
* Lockless lists for tracking IO stats update
*
* New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg).
* There are multiple blkg's (one for each block device) attached to each
* blkcg. The rstat code keeps track of which cpu has IO stats updated,
* but it doesn't know which blkg has the updated stats. If there are many
* block devices in a system, the cost of iterating all the blkg's to flush
* out the IO stats can be high. To reduce such overhead, a set of percpu
* lockless lists (lhead) per blkcg are used to track the set of recently
* updated iostat_cpu's since the last flush. An iostat_cpu will be put
* onto the lockless list on the update side [blk_cgroup_bio_start()] if
* not there yet and then removed when being flushed [blkcg_rstat_flush()].
* References to blkg are gotten and then put back in the process to
* protect against blkg removal.
*
* Return: 0 if successful or -ENOMEM if allocation fails.
*/
static int init_blkcg_llists(struct blkcg *blkcg)
{
int cpu;
blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL);
if (!blkcg->lhead)
return -ENOMEM;
for_each_possible_cpu(cpu)
init_llist_head(per_cpu_ptr(blkcg->lhead, cpu));
return 0;
}
/**
* blkcg_css - find the current css
*
* Find the css associated with either the kthread or the current task.
* This may return a dying css, so it is up to the caller to use tryget logic
* to confirm it is alive and well.
*/
static struct cgroup_subsys_state *blkcg_css(void)
{
struct cgroup_subsys_state *css;
css = kthread_blkcg();
if (css)
return css;
return task_css(current, io_cgrp_id);}
static void blkg_free_workfn(struct work_struct *work)
{
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
free_work);
struct request_queue *q = blkg->q;
int i;
/*
* pd_free_fn() can also be called from blkcg_deactivate_policy(),
* in order to make sure pd_free_fn() is called in order, the deletion
* of the list blkg->q_node is delayed to here from blkg_destroy(), and
* blkcg_mutex is used to synchronize blkg_free_workfn() and
* blkcg_deactivate_policy().
*/
mutex_lock(&q->blkcg_mutex);
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->parent)
blkg_put(blkg->parent);
spin_lock_irq(&q->queue_lock);
list_del_init(&blkg->q_node);
spin_unlock_irq(&q->queue_lock);
mutex_unlock(&q->blkcg_mutex);
blk_put_queue(q);
free_percpu(blkg->iostat_cpu);
percpu_ref_exit(&blkg->refcnt);
kfree(blkg);
}
/**
* blkg_free - free a blkg
* @blkg: blkg to free
*
* Free @blkg which may be partially allocated.
*/
static void blkg_free(struct blkcg_gq *blkg)
{
if (!blkg)
return;
/*
* Both ->pd_free_fn() and request queue's release handler may
* sleep, so free us by scheduling one work func
*/
INIT_WORK(&blkg->free_work, blkg_free_workfn);
schedule_work(&blkg->free_work);
}
static void __blkg_release(struct rcu_head *rcu)
{
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
struct blkcg *blkcg = blkg->blkcg;
int cpu;
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
WARN_ON(!bio_list_empty(&blkg->async_bios));
#endif
/*
* Flush all the non-empty percpu lockless lists before releasing
* us, given these stat belongs to us.
*
* blkg_stat_lock is for serializing blkg stat update
*/
for_each_possible_cpu(cpu)
__blkcg_rstat_flush(blkcg, cpu);
/* release the blkcg and parent blkg refs this blkg has been holding */
css_put(&blkg->blkcg->css);
blkg_free(blkg);
}
/*
* A group is RCU protected, but having an rcu lock does not mean that one
* can access all the fields of blkg and assume these are valid. For
* example, don't try to follow throtl_data and request queue links.
*
* Having a reference to blkg under an rcu allows accesses to only values
* local to groups like group stats and group rate limits.
*/
static void blkg_release(struct percpu_ref *ref)
{
struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
call_rcu(&blkg->rcu_head, __blkg_release);
}
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
static struct workqueue_struct *blkcg_punt_bio_wq;
static void blkg_async_bio_workfn(struct work_struct *work)
{
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
async_bio_work);
struct bio_list bios = BIO_EMPTY_LIST;
struct bio *bio;
struct blk_plug plug;
bool need_plug = false;
/* as long as there are pending bios, @blkg can't go away */
spin_lock(&blkg->async_bio_lock);
bio_list_merge_init(&bios, &blkg->async_bios);
spin_unlock(&blkg->async_bio_lock);
/* start plug only when bio_list contains at least 2 bios */
if (bios.head && bios.head->bi_next) {
need_plug = true;
blk_start_plug(&plug);
}
while ((bio = bio_list_pop(&bios)))
submit_bio(bio);
if (need_plug)
blk_finish_plug(&plug);
}
/*
* When a shared kthread issues a bio for a cgroup, doing so synchronously can
* lead to priority inversions as the kthread can be trapped waiting for that
* cgroup. Use this helper instead of submit_bio to punt the actual issuing to
* a dedicated per-blkcg work item to avoid such priority inversions.
*/
void blkcg_punt_bio_submit(struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
if (blkg->parent) {
spin_lock(&blkg->async_bio_lock);
bio_list_add(&blkg->async_bios, bio);
spin_unlock(&blkg->async_bio_lock);
queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
} else {
/* never bounce for the root cgroup */
submit_bio(bio);
}
}
EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit);
static int __init blkcg_punt_bio_init(void)
{
blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
WQ_MEM_RECLAIM | WQ_FREEZABLE |
WQ_UNBOUND | WQ_SYSFS, 0);
if (!blkcg_punt_bio_wq)
return -ENOMEM;
return 0;
}
subsys_initcall(blkcg_punt_bio_init);
#endif /* CONFIG_BLK_CGROUP_PUNT_BIO */
/**
* bio_blkcg_css - return the blkcg CSS associated with a bio
* @bio: target bio
*
* This returns the CSS for the blkcg associated with a bio, or %NULL if not
* associated. Callers are expected to either handle %NULL or know association
* has been done prior to calling this.
*/
struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
{
if (!bio || !bio->bi_blkg)
return NULL;
return &bio->bi_blkg->blkcg->css;
}
EXPORT_SYMBOL_GPL(bio_blkcg_css);
/**
* blkcg_parent - get the parent of a blkcg
* @blkcg: blkcg of interest
*
* Return the parent blkcg of @blkcg. Can be called anytime.
*/
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
return css_to_blkcg(blkcg->css.parent);
}
/**
* blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with
* @disk: gendisk the new blkg is associated with
* @gfp_mask: allocation mask to use
*
* Allocate a new blkg associating @blkcg and @disk.
*/
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
gfp_t gfp_mask)
{
struct blkcg_gq *blkg;
int i, cpu;
/* alloc and init base part */
blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
if (!blkg)
return NULL;
if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
goto out_free_blkg;
blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
if (!blkg->iostat_cpu)
goto out_exit_refcnt;
if (!blk_get_queue(disk->queue))
goto out_free_iostat;
blkg->q = disk->queue;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
blkg->iostat.blkg = blkg;
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
spin_lock_init(&blkg->async_bio_lock);
bio_list_init(&blkg->async_bios);
INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
#endif
u64_stats_init(&blkg->iostat.sync);
for_each_possible_cpu(cpu) {
u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg;
}
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
struct blkg_policy_data *pd;
if (!blkcg_policy_enabled(disk->queue, pol))
continue;
/* alloc per-policy data and attach it to blkg */
pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask);
if (!pd)
goto out_free_pds;
blkg->pd[i] = pd;
pd->blkg = blkg;
pd->plid = i;
pd->online = false;
}
return blkg;
out_free_pds:
while (--i >= 0)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
blk_put_queue(disk->queue);
out_free_iostat:
free_percpu(blkg->iostat_cpu);
out_exit_refcnt:
percpu_ref_exit(&blkg->refcnt);
out_free_blkg:
kfree(blkg);
return NULL;
}
/*
* If @new_blkg is %NULL, this function tries to allocate a new one as
* necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
*/
static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
struct blkcg_gq *new_blkg)
{
struct blkcg_gq *blkg;
int i, ret;
lockdep_assert_held(&disk->queue->queue_lock);
/* request_queue is dying, do not create/recreate a blkg */
if (blk_queue_dying(disk->queue)) {
ret = -ENODEV;
goto err_free_blkg;
}
/* blkg holds a reference to blkcg */
if (!css_tryget_online(&blkcg->css)) {
ret = -ENODEV;
goto err_free_blkg;
}
/* allocate */
if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_css;
}
}
blkg = new_blkg;
/* link parent */
if (blkcg_parent(blkcg)) {
blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue);
if (WARN_ON_ONCE(!blkg->parent)) {
ret = -ENODEV;
goto err_put_css;
}
blkg_get(blkg->parent);
}
/* invoke per-policy init */
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_init_fn)
pol->pd_init_fn(blkg->pd[i]);
}
/* insert */
spin_lock(&blkcg->lock);
ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
if (likely(!ret)) {
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
list_add(&blkg->q_node, &disk->queue->blkg_list);
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i]) {
if (pol->pd_online_fn)
pol->pd_online_fn(blkg->pd[i]);
blkg->pd[i]->online = true;
}
}
}
blkg->online = true;
spin_unlock(&blkcg->lock);
if (!ret)
return blkg;
/* @blkg failed fully initialized, use the usual release path */
blkg_put(blkg);
return ERR_PTR(ret);
err_put_css:
css_put(&blkcg->css);
err_free_blkg:
if (new_blkg)
blkg_free(new_blkg);
return ERR_PTR(ret);
}
/**
* blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @disk: gendisk of interest
*
* Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to
* create one. blkg creation is performed recursively from blkcg_root such
* that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and takes @disk->queue->queue_lock.
*
* Returns the blkg or the closest blkg if blkg_create() fails as it walks
* down from root.
*/
static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg;
unsigned long flags;
WARN_ON_ONCE(!rcu_read_lock_held());
blkg = blkg_lookup(blkcg, q);
if (blkg)
return blkg;
spin_lock_irqsave(&q->queue_lock, flags);
blkg = blkg_lookup(blkcg, q);
if (blkg) {
if (blkcg != &blkcg_root &&
blkg != rcu_dereference(blkcg->blkg_hint))
rcu_assign_pointer(blkcg->blkg_hint, blkg);
goto found;
}
/*
* Create blkgs walking down from blkcg_root to @blkcg, so that all
* non-root blkgs have access to their parents. Returns the closest
* blkg to the intended blkg should blkg_create() fail.
*/
while (true) {
struct blkcg *pos = blkcg;
struct blkcg *parent = blkcg_parent(blkcg);
struct blkcg_gq *ret_blkg = q->root_blkg;
while (parent) {
blkg = blkg_lookup(parent, q);
if (blkg) {
/* remember closest blkg */
ret_blkg = blkg;
break;
}
pos = parent;
parent = blkcg_parent(parent);
}
blkg = blkg_create(pos, disk, NULL);
if (IS_ERR(blkg)) {
blkg = ret_blkg;
break;
}
if (pos == blkcg)
break;
}
found:
spin_unlock_irqrestore(&q->queue_lock, flags);
return blkg;
}
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
int i;
lockdep_assert_held(&blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);
/*
* blkg stays on the queue list until blkg_free_workfn(), see details in
* blkg_free_workfn(), hence this function can be called from
* blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
* blkg_free_workfn().
*/
if (hlist_unhashed(&blkg->blkcg_node))
return;
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && blkg->pd[i]->online) {
blkg->pd[i]->online = false;
if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[i]);
}
}
blkg->online = false;
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
hlist_del_init_rcu(&blkg->blkcg_node);
/*
* Both setting lookup hint to and clearing it from @blkg are done
* under queue_lock. If it's not pointing to @blkg now, it never
* will. Hint assignment itself can race safely.
*/
if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
percpu_ref_kill(&blkg->refcnt);
}
static void blkg_destroy_all(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg;
int count = BLKG_DESTROY_BATCH_SIZE;
int i;
restart:
spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
if (hlist_unhashed(&blkg->blkcg_node))
continue;
spin_lock(&blkcg->lock);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
/*
* in order to avoid holding the spin lock for too long, release
* it when a batch of blkgs are destroyed.
*/
if (!(--count)) {
count = BLKG_DESTROY_BATCH_SIZE;
spin_unlock_irq(&q->queue_lock);
cond_resched();
goto restart;
}
}
/*
* Mark policy deactivated since policy offline has been done, and
* the free is scheduled, so future blkcg_deactivate_policy() can
* be bypassed
*/
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (pol)
__clear_bit(pol->plid, q->blkcg_pols);
}
q->root_blkg = NULL;
spin_unlock_irq(&q->queue_lock);
}
static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
{
int i;
for (i = 0; i < BLKG_IOSTAT_NR; i++) {
dst->bytes[i] = src->bytes[i];
dst->ios[i] = src->ios[i];
}
}
static void __blkg_clear_stat(struct blkg_iostat_set *bis)
{
struct blkg_iostat cur = {0};
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&bis->sync);
blkg_iostat_set(&bis->cur, &cur);
blkg_iostat_set(&bis->last, &cur);
u64_stats_update_end_irqrestore(&bis->sync, flags);
}
static void blkg_clear_stat(struct blkcg_gq *blkg)
{
int cpu;
for_each_possible_cpu(cpu) {
struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
__blkg_clear_stat(s);
}
__blkg_clear_stat(&blkg->iostat);
}
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 val)
{
struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
int i;
pr_info_once("blkio.%s is deprecated\n", cftype->name);
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
/*
* Note that stat reset is racy - it doesn't synchronize against
* stat updates. This is a debug feature which shouldn't exist
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
blkg_clear_stat(blkg);
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_reset_stats_fn)
pol->pd_reset_stats_fn(blkg->pd[i]);
}
}
spin_unlock_irq(&blkcg->lock);
mutex_unlock(&blkcg_pol_mutex);
return 0;
}
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
if (!blkg->q->disk)
return NULL;
return bdi_dev_name(blkg->q->disk->bdi);
}
/**
* blkcg_print_blkgs - helper for printing per-blkg data
* @sf: seq_file to print to
* @blkcg: blkcg of interest
* @prfill: fill function to print out a blkg
* @pol: policy in question
* @data: data to be passed to @prfill
* @show_total: to print out sum of prfill return values or not
*
* This function invokes @prfill on each blkg of @blkcg if pd for the
* policy specified by @pol exists. @prfill is invoked with @sf, the
* policy data and @data and the matching queue lock held. If @show_total
* is %true, the sum of the return values from @prfill is printed with
* "Total" label at the end.
*
* This is to be used to construct print functions for
* cftype->read_seq_string method.
*/
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
struct blkg_policy_data *, int),
const struct blkcg_policy *pol, int data,
bool show_total)
{
struct blkcg_gq *blkg;
u64 total = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
if (show_total)
seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
/**
* __blkg_prfill_u64 - prfill helper for a single u64 value
* @sf: seq_file to print to
* @pd: policy private data of interest
* @v: value to print
*
* Print @v to @sf for the device associated with @pd.
*/
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
{
const char *dname = blkg_dev_name(pd->blkg);
if (!dname)
return 0;
seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
return v;
}
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
/**
* blkg_conf_init - initialize a blkg_conf_ctx
* @ctx: blkg_conf_ctx to initialize
* @input: input string
*
* Initialize @ctx which can be used to parse blkg config input string @input.
* Once initialized, @ctx can be used with blkg_conf_open_bdev() and
* blkg_conf_prep(), and must be cleaned up with blkg_conf_exit().
*/
void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input)
{
*ctx = (struct blkg_conf_ctx){ .input = input };
}
EXPORT_SYMBOL_GPL(blkg_conf_init);
/**
* blkg_conf_open_bdev - parse and open bdev for per-blkg config update
* @ctx: blkg_conf_ctx initialized with blkg_conf_init()
*
* Parse the device node prefix part, MAJ:MIN, of per-blkg config update from
* @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is
* set to point past the device node prefix.
*
* This function may be called multiple times on @ctx and the extra calls become
* NOOPs. blkg_conf_prep() implicitly calls this function. Use this function
* explicitly if bdev access is needed without resolving the blkcg / policy part
* of @ctx->input. Returns -errno on error.
*/
int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
{
char *input = ctx->input;
unsigned int major, minor;
struct block_device *bdev;
int key_len;
if (ctx->bdev)
return 0;
if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
return -EINVAL;
input += key_len;
if (!isspace(*input))
return -EINVAL;
input = skip_spaces(input);
bdev = blkdev_get_no_open(MKDEV(major, minor), false);
if (!bdev)
return -ENODEV;
if (bdev_is_partition(bdev)) {
blkdev_put_no_open(bdev);
return -ENODEV;
}
mutex_lock(&bdev->bd_queue->rq_qos_mutex);
if (!disk_live(bdev->bd_disk)) {
blkdev_put_no_open(bdev);
mutex_unlock(&bdev->bd_queue->rq_qos_mutex);
return -ENODEV;
}
ctx->body = input;
ctx->bdev = bdev;
return 0;
}
/*
* Similar to blkg_conf_open_bdev, but additionally freezes the queue,
* ensures the correct locking order between freeze queue and q->rq_qos_mutex.
*
* This function returns negative error on failure. On success it returns
* memflags which must be saved and later passed to blkg_conf_exit_frozen
* for restoring the memalloc scope.
*/
unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
{
int ret;
unsigned long memflags;
if (ctx->bdev)
return -EINVAL;
ret = blkg_conf_open_bdev(ctx);
if (ret < 0)
return ret;
/*
* At this point, we haven’t started protecting anything related to QoS,
* so we release q->rq_qos_mutex here, which was first acquired in blkg_
* conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
* the queue to maintain the correct locking order.
*/
mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
return memflags;
}
/**
* blkg_conf_prep - parse and prepare for per-blkg config update
* @blkcg: target block cgroup
* @pol: target policy
* @ctx: blkg_conf_ctx initialized with blkg_conf_init()
*
* Parse per-blkg config update from @ctx->input and initialize @ctx
* accordingly. On success, @ctx->body points to the part of @ctx->input
* following MAJ:MIN, @ctx->bdev points to the target block device and
* @ctx->blkg to the blkg being configured.
*
* blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this
* function returns with queue lock held and must be followed by
* blkg_conf_exit().
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
struct blkg_conf_ctx *ctx)
__acquires(&bdev->bd_queue->queue_lock)
{
struct gendisk *disk;
struct request_queue *q;
struct blkcg_gq *blkg;
int ret;
ret = blkg_conf_open_bdev(ctx);
if (ret)
return ret;
disk = ctx->bdev->bd_disk;
q = disk->queue;
/* Prevent concurrent with blkcg_deactivate_policy() */
mutex_lock(&q->blkcg_mutex);
spin_lock_irq(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) {
ret = -EOPNOTSUPP;
goto fail_unlock;
}
blkg = blkg_lookup(blkcg, q);
if (blkg)
goto success;
/*
* Create blkgs walking down from blkcg_root to @blkcg, so that all
* non-root blkgs have access to their parents.
*/
while (true) {
struct blkcg *pos = blkcg;
struct blkcg *parent;
struct blkcg_gq *new_blkg;
parent = blkcg_parent(blkcg);
while (parent && !blkg_lookup(parent, q)) {
pos = parent;
parent = blkcg_parent(parent);
}
/* Drop locks to do new blkg allocation with GFP_KERNEL. */
spin_unlock_irq(&q->queue_lock);
new_blkg = blkg_alloc(pos, disk, GFP_NOIO);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto fail_exit;
}
if (radix_tree_preload(GFP_KERNEL)) {
blkg_free(new_blkg);
ret = -ENOMEM;
goto fail_exit;
}
spin_lock_irq(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) {
blkg_free(new_blkg);
ret = -EOPNOTSUPP;
goto fail_preloaded;
}
blkg = blkg_lookup(pos, q);
if (blkg) {
blkg_free(new_blkg);
} else {
blkg = blkg_create(pos, disk, new_blkg);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_preloaded;
}
}
radix_tree_preload_end();
if (pos == blkcg)
goto success;
}
success:
mutex_unlock(&q->blkcg_mutex);
ctx->blkg = blkg;
return 0;
fail_preloaded:
radix_tree_preload_end();
fail_unlock:
spin_unlock_irq(&q->queue_lock);
fail_exit:
mutex_unlock(&q->blkcg_mutex);
/*
* If queue was bypassing, we should retry. Do so after a
* short msleep(). It isn't strictly necessary but queue
* can be bypassing for some time and it's always nice to
* avoid busy looping.
*/
if (ret == -EBUSY) {
msleep(10);
ret = restart_syscall();
}
return ret;
}
EXPORT_SYMBOL_GPL(blkg_conf_prep);
/**
* blkg_conf_exit - clean up per-blkg config update
* @ctx: blkg_conf_ctx initialized with blkg_conf_init()
*
* Clean up after per-blkg config update. This function must be called on all
* blkg_conf_ctx's initialized with blkg_conf_init().
*/
void blkg_conf_exit(struct blkg_conf_ctx *ctx)
__releases(&ctx->bdev->bd_queue->queue_lock)
__releases(&ctx->bdev->bd_queue->rq_qos_mutex)
{
if (ctx->blkg) {
spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
ctx->blkg = NULL;
}
if (ctx->bdev) {
mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
blkdev_put_no_open(ctx->bdev);
ctx->body = NULL;
ctx->bdev = NULL;
}
}
EXPORT_SYMBOL_GPL(blkg_conf_exit);
/*
* Similar to blkg_conf_exit, but also unfreezes the queue. Should be used
* when blkg_conf_open_bdev_frozen is used to open the bdev.
*/
void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
{
if (ctx->bdev) {
struct request_queue *q = ctx->bdev->bd_queue;
blkg_conf_exit(ctx);
blk_mq_unfreeze_queue(q, memflags);
}
}
static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
{
int i;
for (i = 0; i < BLKG_IOSTAT_NR; i++) {
dst->bytes[i] += src->bytes[i];
dst->ios[i] += src->ios[i];
}
}
static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
{
int i;
for (i = 0; i < BLKG_IOSTAT_NR; i++) {
dst->bytes[i] -= src->bytes[i];
dst->ios[i] -= src->ios[i];
}
}
static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur,
struct blkg_iostat *last)
{
struct blkg_iostat delta;
unsigned long flags;
/* propagate percpu delta to global */
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
blkg_iostat_set(&delta, cur);
blkg_iostat_sub(&delta, last);
blkg_iostat_add(&blkg->iostat.cur, &delta);
blkg_iostat_add(last, &delta);
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
}
static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
{
struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
struct llist_node *lnode;
struct blkg_iostat_set *bisc, *next_bisc;
unsigned long flags;
rcu_read_lock();
lnode = llist_del_all(lhead);
if (!lnode)
goto out;
/*
* For covering concurrent parent blkg update from blkg_release().
*
* When flushing from cgroup, the subsystem rstat lock is always held,
* so this lock won't cause contention most of time.
*/
raw_spin_lock_irqsave(&blkg_stat_lock, flags);
/*
* Iterate only the iostat_cpu's queued in the lockless list.
*/
llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) {
struct blkcg_gq *blkg = bisc->blkg;
struct blkcg_gq *parent = blkg->parent;
struct blkg_iostat cur;
unsigned int seq;
/*
* Order assignment of `next_bisc` from `bisc->lnode.next` in
* llist_for_each_entry_safe and clearing `bisc->lqueued` for
* avoiding to assign `next_bisc` with new next pointer added
* in blk_cgroup_bio_start() in case of re-ordering.
*
* The pair barrier is implied in llist_add() in blk_cgroup_bio_start().
*/
smp_mb();
WRITE_ONCE(bisc->lqueued, false);
if (bisc == &blkg->iostat)
goto propagate_up; /* propagate up to parent only */
/* fetch the current per-cpu values */
do {
seq = u64_stats_fetch_begin(&bisc->sync);
blkg_iostat_set(&cur, &bisc->cur);
} while (u64_stats_fetch_retry(&bisc->sync, seq));
blkcg_iostat_update(blkg, &cur, &bisc->last);
propagate_up:
/* propagate global delta to parent (unless that's root) */
if (parent && parent->parent) {
blkcg_iostat_update(parent, &blkg->iostat.cur,
&blkg->iostat.last);
/*
* Queue parent->iostat to its blkcg's lockless
* list to propagate up to the grandparent if the
* iostat hasn't been queued yet.
*/
if (!parent->iostat.lqueued) {
struct llist_head *plhead;
plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
llist_add(&parent->iostat.lnode, plhead);
parent->iostat.lqueued = true;
}
}
}
raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
out:
rcu_read_unlock();
}
static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
{
/* Root-level stats are sourced from system-wide IO stats */
if (cgroup_parent(css->cgroup))
__blkcg_rstat_flush(css_to_blkcg(css), cpu);
}
/*
* We source root cgroup stats from the system-wide stats to avoid
* tracking the same information twice and incurring overhead when no
* cgroups are defined. For that reason, css_rstat_flush in
* blkcg_print_stat does not actually fill out the iostat in the root
* cgroup's blkcg_gq.
*
* However, we would like to re-use the printing code between the root and
* non-root cgroups to the extent possible. For that reason, we simulate
* flushing the root cgroup's stats by explicitly filling in the iostat
* with disk level statistics.
*/
static void blkcg_fill_root_iostats(void)
{
struct class_dev_iter iter;
struct device *dev;
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
while ((dev = class_dev_iter_next(&iter))) {
struct block_device *bdev = dev_to_bdev(dev);
struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
struct blkg_iostat tmp;
int cpu;
unsigned long flags;
memset(&tmp, 0, sizeof(tmp));
for_each_possible_cpu(cpu) {
struct disk_stats *cpu_dkstats;
cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
tmp.ios[BLKG_IOSTAT_READ] +=
cpu_dkstats->ios[STAT_READ];
tmp.ios[BLKG_IOSTAT_WRITE] +=
cpu_dkstats->ios[STAT_WRITE];
tmp.ios[BLKG_IOSTAT_DISCARD] +=
cpu_dkstats->ios[STAT_DISCARD];
// convert sectors to bytes
tmp.bytes[BLKG_IOSTAT_READ] +=
cpu_dkstats->sectors[STAT_READ] << 9;
tmp.bytes[BLKG_IOSTAT_WRITE] +=
cpu_dkstats->sectors[STAT_WRITE] << 9;
tmp.bytes[BLKG_IOSTAT_DISCARD] +=
cpu_dkstats->sectors[STAT_DISCARD] << 9;
}
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
blkg_iostat_set(&blkg->iostat.cur, &tmp);
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
}
class_dev_iter_exit(&iter);
}
static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
{
struct blkg_iostat_set *bis = &blkg->iostat;
u64 rbytes, wbytes, rios, wios, dbytes, dios;
const char *dname;
unsigned seq;
int i;
if (!blkg->online)
return;
dname = blkg_dev_name(blkg);
if (!dname)
return;
seq_printf(s, "%s ", dname);
do {
seq = u64_stats_fetch_begin(&bis->sync);
rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
rios = bis->cur.ios[BLKG_IOSTAT_READ];
wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
} while (u64_stats_fetch_retry(&bis->sync, seq));
if (rbytes || wbytes || rios || wios) {
seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
rbytes, wbytes, rios, wios,
dbytes, dios);
}
if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
seq_printf(s, " use_delay=%d delay_nsec=%llu",
atomic_read(&blkg->use_delay),
atomic64_read(&blkg->delay_nsec));
}
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (!blkg->pd[i] || !pol->pd_stat_fn)
continue;
pol->pd_stat_fn(blkg->pd[i], s);
}
seq_puts(s, "\n");
}
static int blkcg_print_stat(struct seq_file *sf, void *v)
{
struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
struct blkcg_gq *blkg;
if (!seq_css(sf)->parent)
blkcg_fill_root_iostats();
else
css_rstat_flush(&blkcg->css);
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
spin_lock_irq(&blkg->q->queue_lock);
blkcg_print_one_stat(blkg, sf);
spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
return 0;
}
static struct cftype blkcg_files[] = {
{
.name = "stat",
.seq_show = blkcg_print_stat,
},
{ } /* terminate */
};
static struct cftype blkcg_legacy_files[] = {
{
.name = "reset_stats",
.write_u64 = blkcg_reset_stats,
},
{ } /* terminate */
};
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
{
return &css_to_blkcg(css)->cgwb_list;
}
#endif
/*
* blkcg destruction is a three-stage process.
*
* 1. Destruction starts. The blkcg_css_offline() callback is invoked
* which offlines writeback. Here we tie the next stage of blkg destruction
* to the completion of writeback associated with the blkcg. This lets us
* avoid punting potentially large amounts of outstanding writeback to root
* while maintaining any ongoing policies. The next stage is triggered when
* the nr_cgwbs count goes to zero.
*
* 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
* and handles the destruction of blkgs. Here the css reference held by
* the blkg is put back eventually allowing blkcg_css_free() to be called.
* This work may occur in cgwb_release_workfn() on the cgwb_release
* workqueue. Any submitted ios that fail to get the blkg ref will be
* punted to the root_blkg.
*
* 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
* This finally frees the blkcg.
*/
/**
* blkcg_destroy_blkgs - responsible for shooting down blkgs
* @blkcg: blkcg of interest
*
* blkgs should be removed while holding both q and blkcg locks. As blkcg lock
* is nested inside q lock, this function performs reverse double lock dancing.
* Destroying the blkgs releases the reference held on the blkcg's css allowing
* blkcg_css_free to eventually be called.
*
* This is the blkcg counterpart of ioc_release_fn().
*/
static void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
might_sleep();
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
if (need_resched() || !spin_trylock(&q->queue_lock)) {
/*
* Given that the system can accumulate a huge number
* of blkgs in pathological cases, check to see if we
* need to rescheduling to avoid softlockup.
*/
spin_unlock_irq(&blkcg->lock);
cond_resched();
spin_lock_irq(&blkcg->lock);
continue;
}
blkg_destroy(blkg);
spin_unlock(&q->queue_lock);
}
spin_unlock_irq(&blkcg->lock);
}
/**
* blkcg_pin_online - pin online state
* @blkcg_css: blkcg of interest
*
* While pinned, a blkcg is kept online. This is primarily used to
* impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
* while an associated cgwb is still active.
*/
void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css)
{
refcount_inc(&css_to_blkcg(blkcg_css)->online_pin);
}
/**
* blkcg_unpin_online - unpin online state
* @blkcg_css: blkcg of interest
*
* This is primarily used to impedance-match blkg and cgwb lifetimes so
* that blkg doesn't go offline while an associated cgwb is still active.
* When this count goes to zero, all active cgwbs have finished so the
* blkcg can continue destruction by calling blkcg_destroy_blkgs().
*/
void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
{
struct blkcg *blkcg = css_to_blkcg(blkcg_css);
do {
struct blkcg *parent;
if (!refcount_dec_and_test(&blkcg->online_pin))
break;
parent = blkcg_parent(blkcg);
blkcg_destroy_blkgs(blkcg);
blkcg = parent;
} while (blkcg);
}
/**
* blkcg_css_offline - cgroup css_offline callback
* @css: css of interest
*
* This function is called when @css is about to go away. Here the cgwbs are
* offlined first and only once writeback associated with the blkcg has
* finished do we start step 2 (see above).
*/
static void blkcg_css_offline(struct cgroup_subsys_state *css)
{
/* this prevents anyone from attaching or migrating to this blkcg */
wb_blkcg_offline(css);
/* put the base online pin allowing step 2 to be triggered */
blkcg_unpin_online(css);
}
static void blkcg_css_free(struct cgroup_subsys_state *css)
{
struct blkcg *blkcg = css_to_blkcg(css);
int i;
mutex_lock(&blkcg_pol_mutex);
list_del(&blkcg->all_blkcgs_node);
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (blkcg->cpd[i])
blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
mutex_unlock(&blkcg_pol_mutex);
free_percpu(blkcg->lhead);
kfree(blkcg);
}
static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct blkcg *blkcg;
int i;
mutex_lock(&blkcg_pol_mutex);
if (!parent_css) {
blkcg = &blkcg_root;
} else {
blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
if (!blkcg)
goto unlock;
}
if (init_blkcg_llists(blkcg))
goto free_blkcg;
for (i = 0; i < BLKCG_MAX_POLS ; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
struct blkcg_policy_data *cpd;
/*
* If the policy hasn't been attached yet, wait for it
* to be attached before doing anything else. Otherwise,
* check if the policy requires any specific per-cgroup
* data: if it does, allocate and initialize it.
*/
if (!pol || !pol->cpd_alloc_fn)
continue;
cpd = pol->cpd_alloc_fn(GFP_KERNEL);
if (!cpd)
goto free_pd_blkcg;
blkcg->cpd[i] = cpd;
cpd->blkcg = blkcg;
cpd->plid = i;
}
spin_lock_init(&blkcg->lock);
refcount_set(&blkcg->online_pin, 1);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif
list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
mutex_unlock(&blkcg_pol_mutex);
return &blkcg->css;
free_pd_blkcg:
for (i--; i >= 0; i--)
if (blkcg->cpd[i])
blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
free_percpu(blkcg->lhead);
free_blkcg:
if (blkcg != &blkcg_root)
kfree(blkcg);
unlock:
mutex_unlock(&blkcg_pol_mutex);
return ERR_PTR(-ENOMEM);
}
static int blkcg_css_online(struct cgroup_subsys_state *css)
{
struct blkcg *parent = blkcg_parent(css_to_blkcg(css));
/*
* blkcg_pin_online() is used to delay blkcg offline so that blkgs
* don't go offline while cgwbs are still active on them. Pin the
* parent so that offline always happens towards the root.
*/
if (parent)
blkcg_pin_online(&parent->css);
return 0;
}
void blkg_init_queue(struct request_queue *q)
{
INIT_LIST_HEAD(&q->blkg_list);
mutex_init(&q->blkcg_mutex);
}
int blkcg_init_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blkcg_gq *new_blkg, *blkg;
bool preloaded;
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;
preloaded = !radix_tree_preload(GFP_KERNEL);
/* Make sure the root blkg exists. */
/* spin_lock_irq can serve as RCU read-side critical section. */
spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, disk, new_blkg);
if (IS_ERR(blkg))
goto err_unlock;
q->root_blkg = blkg;
spin_unlock_irq(&q->queue_lock);
if (preloaded)
radix_tree_preload_end();
return 0;
err_unlock:
spin_unlock_irq(&q->queue_lock);
if (preloaded)
radix_tree_preload_end();
return PTR_ERR(blkg);
}
void blkcg_exit_disk(struct gendisk *disk)
{
blkg_destroy_all(disk);
blk_throtl_exit(disk);
}
static void blkcg_exit(struct task_struct *tsk)
{
if (tsk->throttle_disk)
put_disk(tsk->throttle_disk);
tsk->throttle_disk = NULL;
}
struct cgroup_subsys io_cgrp_subsys = {
.css_alloc = blkcg_css_alloc,
.css_online = blkcg_css_online,
.css_offline = blkcg_css_offline,
.css_free = blkcg_css_free,
.css_rstat_flush = blkcg_rstat_flush,
.dfl_cftypes = blkcg_files,
.legacy_cftypes = blkcg_legacy_files,
.legacy_name = "blkio",
.exit = blkcg_exit,
#ifdef CONFIG_MEMCG
/*
* This ensures that, if available, memcg is automatically enabled
* together on the default hierarchy so that the owner cgroup can
* be retrieved from writeback pages.
*/
.depends_on = 1 << memory_cgrp_id,
#endif
};
EXPORT_SYMBOL_GPL(io_cgrp_subsys);
/**
* blkcg_activate_policy - activate a blkcg policy on a gendisk
* @disk: gendisk of interest
* @pol: blkcg policy to activate
*
* Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through
* bypass mode to populate its blkgs with policy_data for @pol.
*
* Activation happens with @disk bypassed, so nobody would be accessing blkgs
* from IO path. Update of each blkg is protected by both queue and blkcg
* locks so that holding either lock and testing blkcg_policy_enabled() is
* always enough for dereferencing policy data.
*
* The caller is responsible for synchronizing [de]activations and policy
* [un]registerations. Returns 0 on success, -errno on failure.
*/
int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
{
struct request_queue *q = disk->queue;
struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg, *pinned_blkg = NULL;
unsigned int memflags;
int ret;
if (blkcg_policy_enabled(q, pol))
return 0;
/*
* Policy is allowed to be registered without pd_alloc_fn/pd_free_fn,
* for example, ioprio. Such policy will work on blkcg level, not disk
* level, and don't need to be activated.
*/
if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn))
return -EINVAL;
if (queue_is_mq(q))
memflags = blk_mq_freeze_queue(q);
retry:
spin_lock_irq(&q->queue_lock);
/* blkg_list is pushed at the head, reverse walk to initialize parents first */
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
if (blkg->pd[pol->plid])
continue;
/* If prealloc matches, use it; otherwise try GFP_NOWAIT */
if (blkg == pinned_blkg) {
pd = pd_prealloc;
pd_prealloc = NULL;
} else {
pd = pol->pd_alloc_fn(disk, blkg->blkcg,
GFP_NOWAIT);
}
if (!pd) {
/*
* GFP_NOWAIT failed. Free the existing one and
* prealloc for @blkg w/ GFP_KERNEL.
*/
if (pinned_blkg)
blkg_put(pinned_blkg);
blkg_get(blkg);
pinned_blkg = blkg;
spin_unlock_irq(&q->queue_lock);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg,
GFP_KERNEL);
if (pd_prealloc)
goto retry;
else
goto enomem;
}
spin_lock(&blkg->blkcg->lock);
pd->blkg = blkg;
pd->plid = pol->plid;
blkg->pd[pol->plid] = pd;
if (pol->pd_init_fn)
pol->pd_init_fn(pd);
if (pol->pd_online_fn)
pol->pd_online_fn(pd);
pd->online = true;
spin_unlock(&blkg->blkcg->lock);
}
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
spin_unlock_irq(&q->queue_lock);
out:
if (queue_is_mq(q))
blk_mq_unfreeze_queue(q, memflags);
if (pinned_blkg)
blkg_put(pinned_blkg);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
enomem:
/* alloc failed, take down everything */
spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
struct blkg_policy_data *pd;
spin_lock(&blkcg->lock);
pd = blkg->pd[pol->plid];
if (pd) {
if (pd->online && pol->pd_offline_fn)
pol->pd_offline_fn(pd);
pd->online = false;
pol->pd_free_fn(pd);
blkg->pd[pol->plid] = NULL;
}
spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
ret = -ENOMEM;
goto out;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
/**
* blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk
* @disk: gendisk of interest
* @pol: blkcg policy to deactivate
*
* Deactivate @pol on @disk. Follows the same synchronization rules as
* blkcg_activate_policy().
*/
void blkcg_deactivate_policy(struct gendisk *disk,
const struct blkcg_policy *pol)
{
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg;
unsigned int memflags;
if (!blkcg_policy_enabled(q, pol))
return;
if (queue_is_mq(q))
memflags = blk_mq_freeze_queue(q);
mutex_lock(&q->blkcg_mutex);
spin_lock_irq(&q->queue_lock);
__clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) {
if (blkg->pd[pol->plid]->online && pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
mutex_unlock(&q->blkcg_mutex);
if (queue_is_mq(q))
blk_mq_unfreeze_queue(q, memflags);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
static void blkcg_free_all_cpd(struct blkcg_policy *pol)
{
struct blkcg *blkcg;
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
if (blkcg->cpd[pol->plid]) {
pol->cpd_free_fn(blkcg->cpd[pol->plid]);
blkcg->cpd[pol->plid] = NULL;
}
}
}
/**
* blkcg_policy_register - register a blkcg policy
* @pol: blkcg policy to register
*
* Register @pol with blkcg core. Might sleep and @pol may be modified on
* successful registration. Returns 0 on success and -errno on failure.
*/
int blkcg_policy_register(struct blkcg_policy *pol)
{
struct blkcg *blkcg;
int i, ret;
/*
* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
* without pd_alloc_fn/pd_free_fn can't be activated.
*/
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
return -EINVAL;
mutex_lock(&blkcg_pol_register_mutex);
mutex_lock(&blkcg_pol_mutex);
/* find an empty slot */
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (!blkcg_policy[i])
break;
if (i >= BLKCG_MAX_POLS) {
pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
ret = -ENOSPC;
goto err_unlock;
}
/* register @pol */
pol->plid = i;
blkcg_policy[pol->plid] = pol;
/* allocate and install cpd's */
if (pol->cpd_alloc_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
struct blkcg_policy_data *cpd;
cpd = pol->cpd_alloc_fn(GFP_KERNEL);
if (!cpd) {
ret = -ENOMEM;
goto err_free_cpds;
}
blkcg->cpd[pol->plid] = cpd;
cpd->blkcg = blkcg;
cpd->plid = pol->plid;
}
}
mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */
if (pol->dfl_cftypes == pol->legacy_cftypes) {
WARN_ON(cgroup_add_cftypes(&io_cgrp_subsys,
pol->dfl_cftypes));
} else {
WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
pol->dfl_cftypes));
WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
pol->legacy_cftypes));
}
mutex_unlock(&blkcg_pol_register_mutex);
return 0;
err_free_cpds:
if (pol->cpd_free_fn)
blkcg_free_all_cpd(pol);
blkcg_policy[pol->plid] = NULL;
err_unlock:
mutex_unlock(&blkcg_pol_mutex);
mutex_unlock(&blkcg_pol_register_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_policy_register);
/**
* blkcg_policy_unregister - unregister a blkcg policy
* @pol: blkcg policy to unregister
*
* Undo blkcg_policy_register(@pol). Might sleep.
*/
void blkcg_policy_unregister(struct blkcg_policy *pol)
{
mutex_lock(&blkcg_pol_register_mutex);
if (WARN_ON(blkcg_policy[pol->plid] != pol))
goto out_unlock;
/* kill the intf files first */
if (pol->dfl_cftypes)
cgroup_rm_cftypes(pol->dfl_cftypes);
if (pol->legacy_cftypes)
cgroup_rm_cftypes(pol->legacy_cftypes);
/* remove cpds and unregister */
mutex_lock(&blkcg_pol_mutex);
if (pol->cpd_free_fn)
blkcg_free_all_cpd(pol);
blkcg_policy[pol->plid] = NULL;
mutex_unlock(&blkcg_pol_mutex);
out_unlock:
mutex_unlock(&blkcg_pol_register_mutex);
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
/*
* Scale the accumulated delay based on how long it has been since we updated
* the delay. We only call this when we are adding delay, in case it's been a
* while since we added delay, and when we are checking to see if we need to
* delay a task, to account for any delays that may have occurred.
*/
static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
{
u64 old = atomic64_read(&blkg->delay_start);
/* negative use_delay means no scaling, see blkcg_set_delay() */
if (atomic_read(&blkg->use_delay) < 0)
return;
/*
* We only want to scale down every second. The idea here is that we
* want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
* time window. We only want to throttle tasks for recent delay that
* has occurred, in 1 second time windows since that's the maximum
* things can be throttled. We save the current delay window in
* blkg->last_delay so we know what amount is still left to be charged
* to the blkg from this point onward. blkg->last_use keeps track of
* the use_delay counter. The idea is if we're unthrottling the blkg we
* are ok with whatever is happening now, and we can take away more of
* the accumulated delay as we've already throttled enough that
* everybody is happy with their IO latencies.
*/
if (time_before64(old + NSEC_PER_SEC, now) &&
atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) {
u64 cur = atomic64_read(&blkg->delay_nsec);
u64 sub = min_t(u64, blkg->last_delay, now - old);
int cur_use = atomic_read(&blkg->use_delay);
/*
* We've been unthrottled, subtract a larger chunk of our
* accumulated delay.
*/
if (cur_use < blkg->last_use)
sub = max_t(u64, sub, blkg->last_delay >> 1);
/*
* This shouldn't happen, but handle it anyway. Our delay_nsec
* should only ever be growing except here where we subtract out
* min(last_delay, 1 second), but lord knows bugs happen and I'd
* rather not end up with negative numbers.
*/
if (unlikely(cur < sub)) {
atomic64_set(&blkg->delay_nsec, 0);
blkg->last_delay = 0;
} else {
atomic64_sub(sub, &blkg->delay_nsec);
blkg->last_delay = cur - sub;
}
blkg->last_use = cur_use;
}
}
/*
* This is called when we want to actually walk up the hierarchy and check to
* see if we need to throttle, and then actually throttle if there is some
* accumulated delay. This should only be called upon return to user space so
* we're not holding some lock that would induce a priority inversion.
*/
static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
{
unsigned long pflags;
bool clamp;
u64 now = blk_time_get_ns();
u64 exp;
u64 delay_nsec = 0;
int tok;
while (blkg->parent) { int use_delay = atomic_read(&blkg->use_delay); if (use_delay) {
u64 this_delay;
blkcg_scale_delay(blkg, now);
this_delay = atomic64_read(&blkg->delay_nsec);
if (this_delay > delay_nsec) {
delay_nsec = this_delay;
clamp = use_delay > 0;
}
}
blkg = blkg->parent;
}
if (!delay_nsec) return;
/*
* Let's not sleep for all eternity if we've amassed a huge delay.
* Swapping or metadata IO can accumulate 10's of seconds worth of
* delay, and we want userspace to be able to do _something_ so cap the
* delays at 0.25s. If there's 10's of seconds worth of delay then the
* tasks will be delayed for 0.25 second for every syscall. If
* blkcg_set_delay() was used as indicated by negative use_delay, the
* caller is responsible for regulating the range.
*/
if (clamp)
delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
if (use_memdelay)
psi_memstall_enter(&pflags);
exp = ktime_add_ns(now, delay_nsec);
tok = io_schedule_prepare();
do {
__set_current_state(TASK_KILLABLE); if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
break;
} while (!fatal_signal_pending(current)); io_schedule_finish(tok);
if (use_memdelay)
psi_memstall_leave(&pflags);
}
/**
* blkcg_maybe_throttle_current - throttle the current task if it has been marked
*
* This is only called if we've been marked with set_notify_resume(). Obviously
* we can be set_notify_resume() for reasons other than blkcg throttling, so we
* check to see if current->throttle_disk is set and if not this doesn't do
* anything. This should only ever be called by the resume code, it's not meant
* to be called by people willy-nilly as it will actually do the work to
* throttle the task if it is setup for throttling.
*/
void blkcg_maybe_throttle_current(void)
{
struct gendisk *disk = current->throttle_disk;
struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool use_memdelay = current->use_memdelay;
if (!disk)
return;
current->throttle_disk = NULL;
current->use_memdelay = false; rcu_read_lock(); blkcg = css_to_blkcg(blkcg_css()); if (!blkcg) goto out;
blkg = blkg_lookup(blkcg, disk->queue);
if (!blkg) goto out; if (!blkg_tryget(blkg)) goto out; rcu_read_unlock(); blkcg_maybe_throttle_blkg(blkg, use_memdelay);
blkg_put(blkg);
put_disk(disk);
return;
out:
rcu_read_unlock();}
/**
* blkcg_schedule_throttle - this task needs to check for throttling
* @disk: disk to throttle
* @use_memdelay: do we charge this to memory delay for PSI
*
* This is called by the IO controller when we know there's delay accumulated
* for the blkg for this task. We do not pass the blkg because there are places
* we call this that may not have that information, the swapping code for
* instance will only have a block_device at that point. This set's the
* notify_resume for the task to check and see if it requires throttling before
* returning to user space.
*
* We will only schedule once per syscall. You can call this over and over
* again and it will only do the check once upon return to user space, and only
* throttle once. If the task needs to be throttled again it'll need to be
* re-set at the next time we see the task.
*/
void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
{
if (unlikely(current->flags & PF_KTHREAD))
return;
if (current->throttle_disk != disk) {
if (test_bit(GD_DEAD, &disk->state))
return;
get_device(disk_to_dev(disk));
if (current->throttle_disk)
put_disk(current->throttle_disk);
current->throttle_disk = disk;
}
if (use_memdelay)
current->use_memdelay = use_memdelay;
set_notify_resume(current);
}
/**
* blkcg_add_delay - add delay to this blkg
* @blkg: blkg of interest
* @now: the current time in nanoseconds
* @delta: how many nanoseconds of delay to add
*
* Charge @delta to the blkg's current delay accumulation. This is used to
* throttle tasks if an IO controller thinks we need more throttling.
*/
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
{
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
return;
blkcg_scale_delay(blkg, now);
atomic64_add(delta, &blkg->delay_nsec);
}
/**
* blkg_tryget_closest - try and get a blkg ref on the closet blkg
* @bio: target bio
* @css: target css
*
* As the failure mode here is to walk up the blkg tree, this ensure that the
* blkg->parent pointers are always valid. This returns the blkg that it ended
* up taking a reference on or %NULL if no reference was taken.
*/
static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
struct cgroup_subsys_state *css)
{
struct blkcg_gq *blkg, *ret_blkg = NULL;
rcu_read_lock();
blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk);
while (blkg) {
if (blkg_tryget(blkg)) {
ret_blkg = blkg;
break;
}
blkg = blkg->parent;
}
rcu_read_unlock();
return ret_blkg;
}
/**
* bio_associate_blkg_from_css - associate a bio with a specified css
* @bio: target bio
* @css: target css
*
* Associate @bio with the blkg found by combining the css's blkg and the
* request_queue of the @bio. An association failure is handled by walking up
* the blkg tree. Therefore, the blkg associated can be anything between @blkg
* and q->root_blkg. This situation only happens when a cgroup is dying and
* then the remaining bios will spill to the closest alive blkg.
*
* A reference will be taken on the blkg and will be released when @bio is
* freed.
*/
void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
{
if (bio->bi_blkg)
blkg_put(bio->bi_blkg);
if (css && css->parent) {
bio->bi_blkg = blkg_tryget_closest(bio, css);
} else {
blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
}
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
/**
* bio_associate_blkg - associate a bio with a blkg
* @bio: target bio
*
* Associate @bio with the blkg found from the bio's css and request_queue.
* If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
* already associated, the css is reused and association redone as the
* request_queue may have changed.
*/
void bio_associate_blkg(struct bio *bio)
{
struct cgroup_subsys_state *css;
if (blk_op_is_passthrough(bio->bi_opf))
return;
rcu_read_lock();
if (bio->bi_blkg)
css = bio_blkcg_css(bio);
else
css = blkcg_css();
bio_associate_blkg_from_css(bio, css);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(bio_associate_blkg);
/**
* bio_clone_blkg_association - clone blkg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
if (src->bi_blkg)
bio_associate_blkg_from_css(dst, bio_blkcg_css(src));
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
static int blk_cgroup_io_type(struct bio *bio)
{
if (op_is_discard(bio->bi_opf))
return BLKG_IOSTAT_DISCARD;
if (op_is_write(bio->bi_opf))
return BLKG_IOSTAT_WRITE;
return BLKG_IOSTAT_READ;
}
void blk_cgroup_bio_start(struct bio *bio)
{
struct blkcg *blkcg = bio->bi_blkg->blkcg;
int rwd = blk_cgroup_io_type(bio), cpu;
struct blkg_iostat_set *bis;
unsigned long flags;
if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
return;
/* Root-level stats are sourced from system-wide IO stats */
if (!cgroup_parent(blkcg->css.cgroup))
return;
cpu = get_cpu();
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
flags = u64_stats_update_begin_irqsave(&bis->sync);
/*
* If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
* bio and we would have already accounted for the size of the bio.
*/
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
bio_set_flag(bio, BIO_CGROUP_ACCT);
bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
}
bis->cur.ios[rwd]++;
/*
* If the iostat_cpu isn't in a lockless list, put it into the
* list to indicate that a stat update is pending.
*/
if (!READ_ONCE(bis->lqueued)) {
struct llist_head *lhead = this_cpu_ptr(blkcg->lhead);
llist_add(&bis->lnode, lhead);
WRITE_ONCE(bis->lqueued, true);
}
u64_stats_update_end_irqrestore(&bis->sync, flags);
css_rstat_updated(&blkcg->css, cpu);
put_cpu();
}
bool blk_cgroup_congested(void)
{
struct blkcg *blkcg;
bool ret = false;
rcu_read_lock(); for (blkcg = css_to_blkcg(blkcg_css()); blkcg; blkcg = blkcg_parent(blkcg)) { if (atomic_read(&blkcg->congestion_count)) { ret = true;
break;
}
}
rcu_read_unlock();
return ret;
}
module_param(blkcg_debug_stats, bool, 0644);
MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BLK_CGROUP_PRIVATE_H
#define _BLK_CGROUP_PRIVATE_H
/*
* block cgroup private header
*
* Based on ideas and code from CFQ, CFS and BFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*
* Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
* Paolo Valente <paolo.valente@unimore.it>
*
* Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
* Nauman Rafique <nauman@google.com>
*/
#include <linux/blk-cgroup.h>
#include <linux/cgroup.h>
#include <linux/kthread.h>
#include <linux/blk-mq.h>
#include <linux/llist.h>
#include "blk.h"
struct blkcg_gq;
struct blkg_policy_data;
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
#ifdef CONFIG_BLK_CGROUP
enum blkg_iostat_type {
BLKG_IOSTAT_READ,
BLKG_IOSTAT_WRITE,
BLKG_IOSTAT_DISCARD,
BLKG_IOSTAT_NR,
};
struct blkg_iostat {
u64 bytes[BLKG_IOSTAT_NR];
u64 ios[BLKG_IOSTAT_NR];
};
struct blkg_iostat_set {
struct u64_stats_sync sync;
struct blkcg_gq *blkg;
struct llist_node lnode;
int lqueued; /* queued in llist */
struct blkg_iostat cur;
struct blkg_iostat last;
};
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
/* Pointer to the associated request_queue */
struct request_queue *q;
struct list_head q_node;
struct hlist_node blkcg_node;
struct blkcg *blkcg;
/* all non-root blkcg_gq's are guaranteed to have access to parent */
struct blkcg_gq *parent;
/* reference count */
struct percpu_ref refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
struct blkg_iostat_set __percpu *iostat_cpu;
struct blkg_iostat_set iostat;
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
spinlock_t async_bio_lock;
struct bio_list async_bios;
#endif
union {
struct work_struct async_bio_work;
struct work_struct free_work;
};
atomic_t use_delay;
atomic64_t delay_nsec;
atomic64_t delay_start;
u64 last_delay;
int last_use;
struct rcu_head rcu_head;
};
struct blkcg {
struct cgroup_subsys_state css;
spinlock_t lock;
refcount_t online_pin;
/* If there is block congestion on this cgroup. */
atomic_t congestion_count;
struct radix_tree_root blkg_tree;
struct blkcg_gq __rcu *blkg_hint;
struct hlist_head blkg_list;
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
struct list_head all_blkcgs_node;
/*
* List of updated percpu blkg_iostat_set's since the last flush.
*/
struct llist_head __percpu *lhead;
#ifdef CONFIG_BLK_CGROUP_FC_APPID
char fc_app_id[FC_APPID_LEN];
#endif
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
#endif
};
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
}
/*
* A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
* request_queue (q). This is used by blkcg policies which need to track
* information per blkcg - q pair.
*
* There can be multiple active blkcg policies and each blkg:policy pair is
* represented by a blkg_policy_data which is allocated and freed by each
* policy's pd_alloc/free_fn() methods. A policy can allocate private data
* area by allocating larger data structure which embeds blkg_policy_data
* at the beginning.
*/
struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
bool online;
};
/*
* Policies that need to keep per-blkcg data which is independent from any
* request_queue associated to it should implement cpd_alloc/free_fn()
* methods. A policy can allocate private data area by allocating larger
* data structure which embeds blkcg_policy_data at the beginning.
* cpd_init() is invoked to let each policy handle per-blkcg data.
*/
struct blkcg_policy_data {
/* the blkcg and policy id this per-policy data belongs to */
struct blkcg *blkcg;
int plid;
};
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk,
struct blkcg *blkcg, gfp_t gfp);
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
struct seq_file *s);
struct blkcg_policy {
int plid;
/* cgroup files for the policy */
struct cftype *dfl_cftypes;
struct cftype *legacy_cftypes;
/* operations */
blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
blkcg_pol_free_cpd_fn *cpd_free_fn;
blkcg_pol_alloc_pd_fn *pd_alloc_fn;
blkcg_pol_init_pd_fn *pd_init_fn;
blkcg_pol_online_pd_fn *pd_online_fn;
blkcg_pol_offline_pd_fn *pd_offline_fn;
blkcg_pol_free_pd_fn *pd_free_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
blkcg_pol_stat_pd_fn *pd_stat_fn;
};
extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats;
void blkg_init_queue(struct request_queue *q);
int blkcg_init_disk(struct gendisk *disk);
void blkcg_exit_disk(struct gendisk *disk);
/* Blkio controller policy registration */
int blkcg_policy_register(struct blkcg_policy *pol);
void blkcg_policy_unregister(struct blkcg_policy *pol);
int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol);
void blkcg_deactivate_policy(struct gendisk *disk,
const struct blkcg_policy *pol);
const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
struct blkg_policy_data *, int),
const struct blkcg_policy *pol, int data,
bool show_total);
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
struct blkg_conf_ctx {
char *input;
char *body;
struct block_device *bdev;
struct blkcg_gq *blkg;
};
void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
unsigned long blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx);
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
struct blkg_conf_ctx *ctx);
void blkg_conf_exit(struct blkg_conf_ctx *ctx);
void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags);
/**
* bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
* @bio: the target &bio
*
* Return: true if this bio needs to be submitted with the root blkg context.
*
* In order to avoid priority inversions we sometimes need to issue a bio as if
* it were attached to the root blkg, and then backcharge to the actual owning
* blkg. The idea is we do bio_blkcg_css() to look up the actual context for
* the bio and attach the appropriate blkg to the bio. Then we call this helper
* and if it is true run with the root blkg for that queue and then do any
* backcharging to the originating cgroup once the io is complete.
*/
static inline bool bio_issue_as_root_blkg(struct bio *bio)
{
return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
}
/**
* blkg_lookup - lookup blkg for the specified blkcg - q pair
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
* Lookup blkg for the @blkcg - @q pair.
*
* Must be called in a RCU critical section.
*/
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q)
{
struct blkcg_gq *blkg;
if (blkcg == &blkcg_root)
return q->root_blkg;
blkg = rcu_dereference_check(blkcg->blkg_hint,
lockdep_is_held(&q->queue_lock));
if (blkg && blkg->q == q)
return blkg;
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q != q)
blkg = NULL;
return blkg;
}
/**
* blkg_to_pd - get policy private data
* @blkg: blkg of interest
* @pol: policy of interest
*
* Return pointer to private data associated with the @blkg-@pol pair.
*/
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
struct blkcg_policy *pol)
{
return blkg ? blkg->pd[pol->plid] : NULL;
}
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
struct blkcg_policy *pol)
{
return blkcg ? blkcg->cpd[pol->plid] : NULL;
}
/**
* pd_to_blkg - get blkg associated with policy private data
* @pd: policy private data of interest
*
* @pd is policy private data. Determine the blkg it's associated with.
*/
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
{
return pd ? pd->blkg : NULL;
}
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
return cpd ? cpd->blkcg : NULL;
}
/**
* blkg_get - get a blkg reference
* @blkg: blkg to get
*
* The caller should be holding an existing reference.
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
percpu_ref_get(&blkg->refcnt);
}
/**
* blkg_tryget - try and get a blkg reference
* @blkg: blkg to get
*
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
* of freeing this blkg, so we can only use it if the refcnt is not zero.
*/
static inline bool blkg_tryget(struct blkcg_gq *blkg)
{
return blkg && percpu_ref_tryget(&blkg->refcnt);
}
/**
* blkg_put - put a blkg reference
* @blkg: blkg to put
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
percpu_ref_put(&blkg->refcnt);
}
/**
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant
* @pos_css: used for iteration
* @p_blkg: target blkg to walk descendants of
*
* Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
* read locked. If called under either blkcg or queue lock, the iteration
* is guaranteed to include all and only online blkgs. The caller may
* update @pos_css by calling css_rightmost_descendant() to skip subtree.
* @p_blkg is included in the iteration and the first node to be visited.
*/
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q)))
/**
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant
* @pos_css: used for iteration
* @p_blkg: target blkg to walk descendants of
*
* Similar to blkg_for_each_descendant_pre() but performs post-order
* traversal instead. Synchronization rules are the same. @p_blkg is
* included in the iteration and the last node to be visited.
*/
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q)))
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
return;
if (atomic_add_return(1, &blkg->use_delay) == 1)
atomic_inc(&blkg->blkcg->congestion_count);
}
static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
{
int old = atomic_read(&blkg->use_delay);
if (WARN_ON_ONCE(old < 0))
return 0;
if (old == 0)
return 0;
/*
* We do this song and dance because we can race with somebody else
* adding or removing delay. If we just did an atomic_dec we'd end up
* negative and we'd already be in trouble. We need to subtract 1 and
* then check to see if we were the last delay so we can drop the
* congestion count on the cgroup.
*/
while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
;
if (old == 0)
return 0;
if (old == 1)
atomic_dec(&blkg->blkcg->congestion_count);
return 1;
}
/**
* blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
* @blkg: target blkg
* @delay: delay duration in nsecs
*
* When enabled with this function, the delay is not decayed and must be
* explicitly cleared with blkcg_clear_delay(). Must not be mixed with
* blkcg_[un]use_delay() and blkcg_add_delay() usages.
*/
static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
{
int old = atomic_read(&blkg->use_delay);
/* We only want 1 person setting the congestion count for this blkg. */
if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
atomic_inc(&blkg->blkcg->congestion_count);
atomic64_set(&blkg->delay_nsec, delay);
}
/**
* blkcg_clear_delay - Disable allocator delay mechanism
* @blkg: target blkg
*
* Disable use_delay mechanism. See blkcg_set_delay().
*/
static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
{
int old = atomic_read(&blkg->use_delay);
/* We only want 1 person clearing the congestion count for this blkg. */
if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
atomic_dec(&blkg->blkcg->congestion_count);
}
/**
* blk_cgroup_mergeable - Determine whether to allow or disallow merges
* @rq: request to merge into
* @bio: bio to merge
*
* @bio and @rq should belong to the same cgroup and their issue_as_root should
* match. The latter is necessary as we don't want to throttle e.g. a metadata
* update because it happens to be next to a regular IO.
*/
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
{
return rq->bio->bi_blkg == bio->bi_blkg &&
bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
}
static inline bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
return pol && test_bit(pol->plid, q->blkcg_pols);
}
void blk_cgroup_bio_start(struct bio *bio);
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
#else /* CONFIG_BLK_CGROUP */
struct blkg_policy_data {
};
struct blkcg_policy_data {
};
struct blkcg_policy {
};
struct blkcg {
};
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
static inline void blkg_init_queue(struct request_queue *q) { }
static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
static inline int blkcg_activate_policy(struct gendisk *disk,
const struct blkcg_policy *pol) { return 0; }
static inline void blkcg_deactivate_policy(struct gendisk *disk,
const struct blkcg_policy *pol) { }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline void blk_cgroup_bio_start(struct bio *bio) { }
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
#endif /* CONFIG_BLK_CGROUP */
#endif /* _BLK_CGROUP_PRIVATE_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the TCP protocol.
*
* Version: @(#)tcp.h 1.0.2 04/28/93
*
* Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*/
#ifndef _LINUX_TCP_H
#define _LINUX_TCP_H
#include <linux/skbuff.h>
#include <linux/win_minmax.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
#include <uapi/linux/tcp.h>
static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
{
return (struct tcphdr *)skb_transport_header(skb);
}
static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
{
return th->doff * 4;
}
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
return __tcp_hdrlen(tcp_hdr(skb));
}
static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
{
return (struct tcphdr *)skb_inner_transport_header(skb);
}
static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
{
return inner_tcp_hdr(skb)->doff * 4;
}
/**
* skb_tcp_all_headers - Returns size of all headers for a TCP packet
* @skb: buffer
*
* Used in TX path, for a packet known to be a TCP one.
*
* if (skb_is_gso(skb)) {
* int hlen = skb_tcp_all_headers(skb);
* ...
*/
static inline int skb_tcp_all_headers(const struct sk_buff *skb)
{
return skb_transport_offset(skb) + tcp_hdrlen(skb);
}
/**
* skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet
* @skb: buffer
*
* Used in TX path, for a packet known to be a TCP one.
*
* if (skb_is_gso(skb) && skb->encapsulation) {
* int hlen = skb_inner_tcp_all_headers(skb);
* ...
*/
static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb)
{
return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
{
return (tcp_hdr(skb)->doff - 5) * 4;
}
/* TCP Fast Open */
#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */
#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */
#define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */
/* TCP Fast Open Cookie as stored in memory */
struct tcp_fastopen_cookie {
__le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))];
s8 len;
bool exp; /* In RFC6994 experimental option format */
};
/* This defines a selective acknowledgement block. */
struct tcp_sack_block_wire {
__be32 start_seq;
__be32 end_seq;
};
struct tcp_sack_block {
u32 start_seq;
u32 end_seq;
};
/*These are used to set the sack_ok field in struct tcp_options_received */
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
struct tcp_options_received {
/* PAWS/RTTM data */
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
u32 ts_recent; /* Time stamp to echo next */
u32 rcv_tsval; /* Time stamp value */
u32 rcv_tsecr; /* Time stamp echo reply */
u16 saw_tstamp : 1, /* Saw TIMESTAMP on last packet */
tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */
dsack : 1, /* D-SACK is scheduled */
wscale_ok : 1, /* Wscale seen on SYN packet */
sack_ok : 3, /* SACK seen on SYN packet */
smc_ok : 1, /* SMC seen on SYN packet */
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
u8 accecn:6, /* AccECN index in header, 0=no options */
saw_unknown:1, /* Received unknown option */
unused:1;
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
};
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
{
rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
#if IS_ENABLED(CONFIG_SMC)
rx_opt->smc_ok = 0;
#endif
}
/* This is the max number of SACKS that we'll generate and process. It's safe
* to increase this, although since:
* size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8)
* only four options will fit in a standard TCP header */
#define TCP_NUM_SACKS 4
struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
bool is_mptcp;
bool req_usec_ts;
#if IS_ENABLED(CONFIG_MPTCP)
bool drop_req;
#endif
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
u32 ts_off;
u32 snt_tsval_first;
u32 snt_tsval_last;
u32 last_oow_ack_time; /* last SYNACK */
u32 rcv_nxt; /* the ack # by SYNACK. For
* FastOpen it's the seq#
* after data-in-SYN.
*/
u8 syn_tos;
bool accecn_ok;
u8 syn_ect_snt: 2,
syn_ect_rcv: 2,
accecn_fail_mode:4;
u8 saw_accecn_opt :2;
#ifdef CONFIG_TCP_AO
u8 ao_keyid;
u8 ao_rcv_next;
bool used_tcp_ao;
#endif
};
static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
{
return (struct tcp_request_sock *)req;
}
static inline bool tcp_rsk_used_ao(const struct request_sock *req)
{
#ifndef CONFIG_TCP_AO
return false;
#else
return tcp_rsk(req)->used_tcp_ao;
#endif
}
#define TCP_RMEM_TO_WIN_SCALE 8
struct tcp_sock {
/* Cacheline organization can be found documented in
* Documentation/networking/net_cachelines/tcp_sock.rst.
* Please update the document when adding new fields.
*/
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
/* TX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_tx);
u32 max_window; /* Maximal window ever seen from peer */
u32 rcv_ssthresh; /* Current window clamp */
u32 reordering; /* Packet reordering metric. */
u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u16 gso_segs; /* Max number of segs per GSO packet */
/* from STCP, retrans queue hinting */
struct sk_buff *retransmit_skb_hint;
#if defined(CONFIG_TLS_DEVICE)
void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
#endif
__cacheline_group_end(tcp_sock_read_tx);
/* TXRX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_txrx);
u32 tsoffset; /* timestamp offset */
u32 snd_wnd; /* The window we expect to receive */
u32 mss_cache; /* Cached effective mss, not including SACKS */
u32 snd_cwnd; /* Sending congestion window */
u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */
u16 tcp_header_len; /* Bytes of tcp header to send */
u8 scaling_ratio; /* see tcp_win_from_space() */
u8 chrono_type : 2, /* current chronograph type */
repair : 1,
tcp_usec_ts : 1, /* TSval values in usec */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
__cacheline_group_end(tcp_sock_read_txrx);
/* RX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_rx);
u32 copied_seq; /* Head of yet unread data */
u32 snd_wl1; /* Sequence for window update */
u32 tlp_high_seq; /* snd_nxt at the time of TLP */
u32 rttvar_us; /* smoothed mdev_max */
u32 retrans_out; /* Retransmitted packets out */
u16 advmss; /* Advertised MSS */
u16 urg_data; /* Saved octet of OOB data and control flags */
u32 lost; /* Total data packets lost incl. rexmits */
u32 snd_ssthresh; /* Slow start size threshold */
struct minmax rtt_min;
/* OOO segments go in this rbtree. Socket lock must be held. */
struct rb_root out_of_order_queue;
__cacheline_group_end(tcp_sock_read_rx);
/* TX read-write hotpath cache lines */
__cacheline_group_begin(tcp_sock_write_tx) ____cacheline_aligned;
u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
* The total number of segments sent.
*/
u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
* total number of data segments sent.
*/
u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
* total number of data bytes sent.
*/
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 mdev_us; /* medium deviation */
u32 rtt_seq; /* sequence number to update rttvar */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
u64 accecn_opt_tstamp; /* Last AccECN option sent timestamp */
struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
struct sk_buff *highest_sack; /* skb just after the highest
* skb with SACKed bit set
* (validity guaranteed only if
* sacked_out > 0)
*/
u8 ecn_flags; /* ECN status bits. */
__cacheline_group_end(tcp_sock_write_tx);
/* TXRX read-write hotpath cache lines */
__cacheline_group_begin(tcp_sock_write_txrx);
/*
* Header prediction flags
* 0x5?10 << 16 + snd_wnd in net byte order
*/
u8 nonagle : 4,/* Disable Nagle algorithm? */
rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
u8 received_ce_pending:4, /* Not yet transmit cnt of received_ce */
unused2:4;
u8 accecn_minlen:2,/* Minimum length of AccECN option sent */
est_ecnfield:2,/* ECN field for AccECN delivered estimates */
accecn_opt_demand:2,/* Demand AccECN option for n next ACKs */
prev_ecnfield:2; /* ECN bits from the previous segment */
__be32 pred_flags;
u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
u64 tcp_mstamp; /* most recent packet received/sent */
u32 rcv_nxt; /* What we want to receive next */
u32 snd_nxt; /* Next sequence we send */
u32 snd_una; /* First byte we want an ack for */
u32 window_clamp; /* Maximal window to advertise */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
u32 packets_out; /* Packets which are "in flight" */
u32 snd_up; /* Urgent pointer */
u32 delivered; /* Total data packets delivered incl. rexmits */
u32 delivered_ce; /* Like the above but only ECE marked packets */
u32 received_ce; /* Like the above but for rcvd CE marked pkts */
u32 received_ecn_bytes[3]; /* received byte counters for three ECN
* types: INET_ECN_ECT_1, INET_ECN_ECT_0,
* and INET_ECN_CE
*/
u32 app_limited; /* limited until "delivered" reaches this val */
u32 rcv_wnd; /* Current receiver window */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
struct tcp_options_received rx_opt;
__cacheline_group_end(tcp_sock_write_txrx);
/* RX read-write hotpath cache lines */
__cacheline_group_begin(tcp_sock_write_rx) __aligned(8);
u64 bytes_received;
/* RFC4898 tcpEStatsAppHCThruOctetsReceived
* sum(delta(rcv_nxt)), or how many bytes
* were acked.
*/
u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
* total number of segments in.
*/
u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
* total number of data segments in.
*/
u32 rcv_wup; /* rcv_nxt on last window update sent */
u32 max_packets_out; /* max packets_out in last window */
u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
u32 rate_delivered; /* saved rate sample: packets delivered */
u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_rtt_last_tsecr;
u32 delivered_ecn_bytes[3];
u64 first_tx_mstamp; /* start of window send phase */
u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
struct {
u32 rtt_us;
u32 seq;
u64 time;
} rcv_rtt_est;
/* Receiver queue space */
struct {
int space;
u32 seq;
u64 time;
} rcvq_space;
__cacheline_group_end(tcp_sock_write_rx);
/* End of Hot Path */
/*
* RFC793 variables by their proper names. This means you can
* read the code and the spec side by side (and laugh ...)
* See RFC793 and RFC1122. The RFC writes these in capitals.
*/
u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
* total number of DSACK blocks received
*/
u32 compressed_ack_rcv_nxt;
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
/* Information of the most recently (s)acked skb */
struct tcp_rack {
u64 mstamp; /* (Re)sent time of the skb */
u32 rtt_us; /* Associated RTT */
u32 end_seq; /* Ending TCP sequence of the skb */
u32 last_delivered; /* tp->delivered at last reo_wnd adj */
u8 reo_wnd_steps; /* Allowed reordering window */
#define TCP_RACK_RECOVERY_THRESH 16
u8 reo_wnd_persist:5, /* No. of recovery since last adj */
dsack_seen:1, /* Whether DSACK seen after last adj */
advanced:1; /* mstamp advanced since last lost marking */
} rack;
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
syn_ect_snt:2, /* AccECN ECT memory, only */
syn_ect_rcv:2; /* ... needed during 3WHS + first seqno */
u8 thin_lto : 1,/* Use linear timeouts for thin streams */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
fastopen_client_fail:2, /* reason why fastopen failed */
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
u8 save_syn:2, /* Save headers of SYN packet */
syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
syn_fastopen_child:1; /* created TFO passive child socket */
u8 keepalive_probes; /* num of allowed keep alive probes */
u8 accecn_fail_mode:4, /* AccECN failure handling */
saw_accecn_opt:2; /* An AccECN option was seen */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
/* RTT measurement */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 reord_seen; /* number of data packet reordering events */
/*
* Slow start and congestion control (see also Nagle, and Karn & Partridge)
*/
u32 snd_cwnd_cnt; /* Linear increase counter */
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used;
u32 snd_cwnd_stamp;
u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
struct hrtimer pacing_timer;
struct hrtimer compressed_ack_timer;
struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
struct tcp_sack_block recv_sack_cache[4];
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
u32 retrans_stamp; /* Timestamp of the last retransmit,
* also used in SYN-SENT to remember stamp of
* the first SYN. */
u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
* while socket was owned by user.
*/
u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans
* Total data bytes retransmitted
*/
u32 total_retrans; /* Total retransmits for entire connection */
u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */
u16 total_rto; /* Total number of RTO timeouts, including
* SYN/SYN-ACK and recurring timeouts.
*/
u16 total_rto_recoveries; /* Total number of RTO recoveries,
* including any unfinished recovery.
*/
u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */
u32 urg_seq; /* Seq of received urgent pointer */
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2;
/* Sock_ops bpf program related variables */
#ifdef CONFIG_BPF
u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs
* values defined in uapi/linux/tcp.h
*/
u8 bpf_chg_cc_inprogress:1; /* In the middle of
* bpf_setsockopt(TCP_CONGESTION),
* it is to avoid the bpf_tcp_cc->init()
* to recur itself by calling
* bpf_setsockopt(TCP_CONGESTION, "itself").
*/
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
#else
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
#endif
u16 timeout_rehash; /* Timeout-triggered rehash attempts */
u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
/* TCP-specific MTU probe information. */
struct {
u32 probe_seq_start;
u32 probe_seq_end;
} mtu_probe;
u32 plb_rehash; /* PLB-triggered rehash attempts */
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
#if IS_ENABLED(CONFIG_SMC)
bool syn_smc; /* SYN includes SMC */
bool (*smc_hs_congested)(const struct sock *sk);
#endif
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */
const struct tcp_sock_af_ops *af_specific;
#ifdef CONFIG_TCP_MD5SIG
/* TCP MD5 Signature Option information */
struct tcp_md5sig_info __rcu *md5sig_info;
#endif
#ifdef CONFIG_TCP_AO
struct tcp_ao_info __rcu *ao_info;
#endif
#endif
/* TCP fastopen related information */
struct tcp_fastopen_request *fastopen_req;
/* fastopen_rsk points to request_sock that resulted in this big
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock __rcu *fastopen_rsk;
struct saved_syn *saved_syn;
};
enum tsq_enum {
TSQ_THROTTLED,
TSQ_QUEUED,
TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */
TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */
TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
* tcp_v{4|6}_mtu_reduced()
*/
TCP_ACK_DEFERRED, /* TX pure ack is deferred */
};
enum tsq_flags {
TSQF_THROTTLED = BIT(TSQ_THROTTLED),
TSQF_QUEUED = BIT(TSQ_QUEUED),
TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED),
TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED),
TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED),
TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED),
TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED),
};
#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
/* Variant of tcp_sk() upgrading a const sock to a read/write tcp socket.
* Used in context of (lockless) tcp listeners.
*/
#define tcp_sk_rw(ptr) container_of(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk;
#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt
#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt
u32 tw_rcv_wnd;
u32 tw_ts_offset;
u32 tw_ts_recent;
/* The time we sent the last out-of-window ACK: */
u32 tw_last_oow_ack_time;
int tw_ts_recent_stamp;
u32 tw_tx_delay;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
#ifdef CONFIG_TCP_AO
struct tcp_ao_info __rcu *ao_info;
#endif
};
static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
{
return (struct tcp_timewait_sock *)sk;
}
static inline bool tcp_passive_fastopen(const struct sock *sk)
{
return sk->sk_state == TCP_SYN_RECV &&
rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
}
static inline void fastopen_queue_tune(struct sock *sk, int backlog)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
}
static inline void tcp_move_syn(struct tcp_sock *tp,
struct request_sock *req)
{
tp->saved_syn = req->saved_syn;
req->saved_syn = NULL;
}
static inline void tcp_saved_syn_free(struct tcp_sock *tp)
{
kfree(tp->saved_syn);
tp->saved_syn = NULL;
}
static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
{
return saved_syn->mac_hdrlen + saved_syn->network_hdrlen +
saved_syn->tcp_hdrlen;
}
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
const struct sk_buff *orig_skb,
const struct sk_buff *ack_skb);
static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{
/* We use READ_ONCE() here because socket might not be locked.
* This happens for listeners.
*/
u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
return (user_mss && user_mss < mss) ? user_mss : mss;
}
int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
int shiftlen);
void __tcp_sock_set_cork(struct sock *sk, bool on);
void tcp_sock_set_cork(struct sock *sk, bool on);
int tcp_sock_set_keepcnt(struct sock *sk, int val);
int tcp_sock_set_keepidle_locked(struct sock *sk, int val);
int tcp_sock_set_keepidle(struct sock *sk, int val);
int tcp_sock_set_keepintvl(struct sock *sk, int val);
void __tcp_sock_set_nodelay(struct sock *sk, bool on);
void tcp_sock_set_nodelay(struct sock *sk);
void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
int tcp_sock_set_user_timeout(struct sock *sk, int val);
int tcp_sock_set_maxseg(struct sock *sk, int val);
static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
{
return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS);
}
#endif /* _LINUX_TCP_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Resizable, Scalable, Concurrent Hash Table
*
* Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
* Code partially derived from nft_hash
* Rewritten with rehash code from br_multicast plus single list
* pointer as suggested by Josh Triplett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
#include <linux/rculist.h>
#include <linux/bit_spinlock.h>
#include <linux/rhashtable-types.h>
/*
* Objects in an rhashtable have an embedded struct rhash_head
* which is linked into as hash chain from the hash table - or one
* of two or more hash tables when the rhashtable is being resized.
* The end of the chain is marked with a special nulls marks which has
* the least significant bit set but otherwise stores the address of
* the hash bucket. This allows us to be sure we've found the end
* of the right list.
* The value stored in the hash bucket has BIT(0) used as a lock bit.
* This bit must be atomically set before any changes are made to
* the chain. To avoid dereferencing this pointer without clearing
* the bit first, we use an opaque 'struct rhash_lock_head *' for the
* pointer stored in the bucket. This struct needs to be defined so
* that rcu_dereference() works on it, but it has no content so a
* cast is needed for it to be useful. This ensures it isn't
* used by mistake with clearing the lock bit first.
*/
struct rhash_lock_head {};
/* Maximum chain length before rehash
*
* The maximum (not average) chain length grows with the size of the hash
* table, at a rate of (log N)/(log log N).
*
* The value of 16 is selected so that even if the hash table grew to
* 2^32 you would not expect the maximum chain length to exceed it
* unless we are under attack (or extremely unlucky).
*
* As this limit is only to detect attacks, we don't need to set it to a
* lower value as you'd need the chain length to vastly exceed 16 to have
* any real effect on the system.
*/
#define RHT_ELASTICITY 16u
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
* @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets
*/
struct bucket_table {
unsigned int size;
unsigned int nest;
u32 hash_rnd;
struct list_head walkers;
struct rcu_head rcu;
struct bucket_table __rcu *future_tbl;
struct lockdep_map dep_map;
struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/*
* NULLS_MARKER() expects a hash value with the low
* bits mostly likely to be significant, and it discards
* the msb.
* We give it an address, in which the bottom bit is
* always 0, and the msb might be significant.
* So we shift the address down one bit to align with
* expectations and avoid losing a significant bit.
*
* We never store the NULLS_MARKER in the hash table
* itself as we need the lsb for locking.
* Instead we store a NULL
*/
#define RHT_NULLS_MARKER(ptr) \
((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
#define INIT_RHT_NULLS_HEAD(ptr) \
((ptr) = NULL)
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
return ((unsigned long) ptr & 1);
}
static inline void *rht_obj(const struct rhashtable *ht,
const struct rhash_head *he)
{
return (char *)he - ht->p.head_offset;
}
static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
unsigned int hash)
{
return hash & (tbl->size - 1);
}
static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
const void *key, const struct rhashtable_params params,
unsigned int hash_rnd)
{
unsigned int hash;
/* params must be equal to ht->p if it isn't constant. */
if (!__builtin_constant_p(params.key_len))
hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
else if (params.key_len) {
unsigned int key_len = params.key_len;
if (params.hashfn)
hash = params.hashfn(key, key_len, hash_rnd);
else if (key_len & (sizeof(u32) - 1))
hash = jhash(key, key_len, hash_rnd);
else
hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
} else {
unsigned int key_len = ht->p.key_len;
if (params.hashfn)
hash = params.hashfn(key, key_len, hash_rnd);
else
hash = jhash(key, key_len, hash_rnd);
}
return hash;
}
static __always_inline unsigned int rht_key_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const void *key, const struct rhashtable_params params)
{
unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); return rht_bucket_index(tbl, hash);
}
static __always_inline unsigned int rht_head_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const struct rhash_head *he, const struct rhashtable_params params)
{
const char *ptr = rht_obj(ht, he);
return likely(params.obj_hashfn) ?
rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
ht->p.key_len,
tbl->hash_rnd)) :
rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
}
/**
* rht_grow_above_75 - returns true if nelems > 0.75 * table-size
* @ht: hash table
* @tbl: current table
*/
static inline bool rht_grow_above_75(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
/* Expand table when exceeding 75% load */
return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && (!ht->p.max_size || tbl->size < ht->p.max_size);
}
/**
* rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
* @ht: hash table
* @tbl: current table
*/
static inline bool rht_shrink_below_30(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
/* Shrink table beneath 30% load */
return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
tbl->size > ht->p.min_size;
}
/**
* rht_grow_above_100 - returns true if nelems > table-size
* @ht: hash table
* @tbl: current table
*/
static inline bool rht_grow_above_100(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
return atomic_read(&ht->nelems) > tbl->size && (!ht->p.max_size || tbl->size < ht->p.max_size);
}
/**
* rht_grow_above_max - returns true if table is above maximum
* @ht: hash table
* @tbl: current table
*/
static inline bool rht_grow_above_max(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
return atomic_read(&ht->nelems) >= ht->max_elems;
}
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
#else
static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
return 1;
}
static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
u32 hash)
{
return 1;
}
#endif /* CONFIG_PROVE_LOCKING */
void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj);
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
{
(void)rhashtable_walk_start_check(iter);
}
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
struct rhash_lock_head __rcu **rht_bucket_nested(
const struct bucket_table *tbl, unsigned int hash);
struct rhash_lock_head __rcu **__rht_bucket_nested(
const struct bucket_table *tbl, unsigned int hash);
struct rhash_lock_head __rcu **rht_bucket_nested_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
#define rht_dereference_rcu(p, ht) \
rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht))
#define rht_dereference_bucket(p, tbl, hash) \
rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
#define rht_dereference_bucket_rcu(p, tbl, hash) \
rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash))
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
static inline struct rhash_lock_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_lock_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_lock_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
&tbl->buckets[hash];
}
/*
* We lock a bucket by setting BIT(0) in the pointer - this is always
* zero in real pointers. The NULLS mark is never stored in the bucket,
* rather we store NULL if the bucket is empty.
* bit_spin_locks do not handle contention well, but the whole point
* of the hashtable design is to achieve minimum per-bucket contention.
* A nested hash table might not have a bucket pointer. In that case
* we cannot get a lock. For remove and replace the bucket cannot be
* interesting and doesn't need locking.
* For insert we allocate the bucket if this is the last bucket_table,
* and then take the lock.
* Sometimes we unlock a bucket by writing a new pointer there. In that
* case we don't need to unlock, but we do need to reset state such as
* local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
* provides the same release semantics that bit_spin_unlock() provides,
* this is safe.
* When we write to a bucket without unlocking, we use rht_assign_locked().
*/
static inline unsigned long rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
{
unsigned long flags;
local_irq_save(flags); bit_spin_lock(0, (unsigned long *)bkt);
lock_map_acquire(&tbl->dep_map);
return flags;
}
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
{
unsigned long flags;
local_irq_save(flags);
bit_spin_lock(0, (unsigned long *)bucket);
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
return flags;
}
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
unsigned long flags)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt); local_irq_restore(flags);
}
static inline struct rhash_head *__rht_ptr(
struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
{
return (struct rhash_head *) ((unsigned long)p & ~BIT(0) ?:
(unsigned long)RHT_NULLS_MARKER(bkt));
}
/*
* Where 'bkt' is a bucket and might be locked:
* rht_ptr_rcu() dereferences that pointer and clears the lock bit.
* rht_ptr() dereferences in a context where the bucket is locked.
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt)
{
return __rht_ptr(rcu_dereference_all(*bkt), bkt);
}
static inline struct rhash_head *rht_ptr(
struct rhash_lock_head __rcu *const *bkt,
struct bucket_table *tbl,
unsigned int hash)
{
return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu *const *bkt)
{
return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
if (rht_is_a_nulls(obj))
obj = NULL;
rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj,
unsigned long flags)
{
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
local_irq_restore(flags);
}
/**
* rht_for_each_from - iterate over hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor.
* @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*/
#define rht_for_each_from(pos, head, tbl, hash) \
for (pos = head; \
!rht_is_a_nulls(pos); \
pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each - iterate over hash chain
* @pos: the &struct rhash_head to use as a loop cursor.
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*/
#define rht_for_each(pos, tbl, hash) \
rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
tbl, hash)
/**
* rht_for_each_entry_from - iterate over hash chain from given head
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
* @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
for (pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each_entry - iterate over hash chain of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
rht_for_each_entry_from(tpos, pos, \
rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
tbl, hash, member)
/**
* rht_for_each_entry_safe - safely iterate over hash chain of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
* @next: the &struct rhash_head to use as next in loop cursor.
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = next, \
next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/**
* rht_for_each_rcu_from - iterate over rcu hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor.
* @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*
* This hash chain list-traversal primitive may safely run concurrently with
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_rcu_from(pos, head, tbl, hash) \
for (({barrier(); }), \
pos = head; \
!rht_is_a_nulls(pos); \
pos = rcu_dereference_all(pos->next))
/**
* rht_for_each_rcu - iterate over rcu hash chain
* @pos: the &struct rhash_head to use as a loop cursor.
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*
* This hash chain list-traversal primitive may safely run concurrently with
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_rcu(pos, tbl, hash) \
for (({barrier(); }), \
pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
!rht_is_a_nulls(pos); \
pos = rcu_dereference_all(pos->next))
/**
* rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
* @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive may safely run concurrently with
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
for (({barrier(); }), \
pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
/**
* rht_for_each_entry_rcu - iterate over rcu hash chain of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive may safely run concurrently with
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_from(tpos, pos, \
rht_ptr_rcu(rht_bucket(tbl, hash)), \
tbl, hash, member)
/**
* rhl_for_each_rcu - iterate over rcu hash table list
* @pos: the &struct rlist_head to use as a loop cursor.
* @list: the head of the list
*
* This hash chain list-traversal primitive should be used on the
* list returned by rhltable_lookup.
*/
#define rhl_for_each_rcu(pos, list) \
for (pos = list; pos; pos = rcu_dereference_all(pos->next))
/**
* rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rlist_head to use as a loop cursor.
* @list: the head of the list
* @member: name of the &struct rlist_head within the hashable struct.
*
* This hash chain list-traversal primitive should be used on the
* list returned by rhltable_lookup.
*/
#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
for (pos = list; pos && rht_entry(tpos, pos, member); \
pos = rcu_dereference_all(pos->next))
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
{
struct rhashtable *ht = arg->ht;
const char *ptr = obj;
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
/* Internal function, do not use. */
static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
.key = key,
};
struct rhash_lock_head __rcu *const *bkt;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params); bkt = rht_bucket(tbl, hash);
do {
rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { if (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
continue;
return he;
}
/* An object might have been moved to a different hash chain,
* while we walk along it - better check and retry.
*/
} while (he != RHT_NULLS_MARKER(bkt));
/* Ensure we see any new tables. */
smp_rmb();
tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (unlikely(tbl))
goto restart;
return NULL;
}
/**
* rhashtable_lookup - search hash table
* @ht: hash table
* @key: the pointer to the key
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
* for an entry with an identical key. The first matching entry is returned.
*
* This must only be called under the RCU read lock.
*
* Returns the first entry on which the compare function returned true.
*/
static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params); return he ? rht_obj(ht, he) : NULL;
}
/**
* rhashtable_lookup_fast - search hash table, without RCU read lock
* @ht: hash table
* @key: the pointer to the key
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
* for an entry with an identical key. The first matching entry is returned.
*
* Only use this function when you have other mechanisms guaranteeing
* that the object won't go away after the RCU read lock is released.
*
* Returns the first entry on which the compare function returned true.
*/
static __always_inline void *rhashtable_lookup_fast(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
void *obj;
rcu_read_lock(); obj = rhashtable_lookup(ht, key, params); rcu_read_unlock();
return obj;
}
/**
* rhltable_lookup - search hash list table
* @hlt: hash table
* @key: the pointer to the key
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
* for an entry with an identical key. All matching entries are returned
* in a list.
*
* This must only be called under the RCU read lock.
*
* Returns the list of entries that match the given key.
*/
static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
return he ? container_of(he, struct rhlist_head, rhead) : NULL;
}
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().
*/
static __always_inline void *__rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params, bool rhlist)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
.key = key,
};
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
unsigned long flags;
unsigned int hash;
int elasticity;
void *data;
rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); hash = rht_head_hashfn(ht, tbl, obj, params); elasticity = RHT_ELASTICITY; bkt = rht_bucket_insert(ht, tbl, hash); data = ERR_PTR(-ENOMEM); if (!bkt) goto out;
pprev = NULL;
flags = rht_lock(tbl, bkt); if (unlikely(rcu_access_pointer(tbl->future_tbl))) {slow_path: rht_unlock(tbl, bkt, flags); rcu_read_unlock(); return rhashtable_insert_slow(ht, key, obj);
}
rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *plist;
struct rhlist_head *list;
elasticity--;
if (!key ||
(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
rhashtable_compare(&arg, rht_obj(ht, head)))) {
pprev = &head->next;
continue;
}
data = rht_obj(ht, head);
if (!rhlist)
goto out_unlock;
list = container_of(obj, struct rhlist_head, rhead);
plist = container_of(head, struct rhlist_head, rhead);
RCU_INIT_POINTER(list->next, plist);
head = rht_dereference_bucket(head->next, tbl, hash);
RCU_INIT_POINTER(list->rhead.next, head);
if (pprev) {
rcu_assign_pointer(*pprev, obj);
rht_unlock(tbl, bkt, flags);
} else
rht_assign_unlock(tbl, bkt, obj, flags);
data = NULL;
goto out;
}
if (elasticity <= 0)
goto slow_path;
data = ERR_PTR(-E2BIG);
if (unlikely(rht_grow_above_max(ht, tbl))) goto out_unlock; if (unlikely(rht_grow_above_100(ht, tbl))) goto slow_path;
/* Inserting at head of list makes unlocking free. */
head = rht_ptr(bkt, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (rhlist) {
struct rhlist_head *list;
list = container_of(obj, struct rhlist_head, rhead);
RCU_INIT_POINTER(list->next, NULL);
}
atomic_inc(&ht->nelems);
rht_assign_unlock(tbl, bkt, obj, flags); if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work);
data = NULL;
out:
rcu_read_unlock(); return data;
out_unlock:
rht_unlock(tbl, bkt, flags);
goto out;
}
/**
* rhashtable_insert_fast - insert object into hash table
* @ht: hash table
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
* Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
* they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
static __always_inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
void *ret;
ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
if (IS_ERR(ret))
return PTR_ERR(ret);
return ret == NULL ? 0 : -EEXIST;
}
/**
* rhltable_insert_key - insert object into hash list table
* @hlt: hash list table
* @key: the pointer to the key
* @list: pointer to hash list head inside object
* @params: hash table parameters
*
* Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
* they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
static __always_inline int rhltable_insert_key(
struct rhltable *hlt, const void *key, struct rhlist_head *list,
const struct rhashtable_params params)
{
return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
params, true));
}
/**
* rhltable_insert - insert object into hash list table
* @hlt: hash list table
* @list: pointer to hash list head inside object
* @params: hash table parameters
*
* Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
* they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
static __always_inline int rhltable_insert(
struct rhltable *hlt, struct rhlist_head *list,
const struct rhashtable_params params)
{
const char *key = rht_obj(&hlt->ht, &list->rhead);
key += params.key_offset;
return rhltable_insert_key(hlt, key, list, params);
}
/**
* rhashtable_lookup_insert_fast - lookup and insert object into hash table
* @ht: hash table
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
* This lookup function may only be used for fixed key hash table (key_len
* parameter set). It will BUG() if used inappropriately.
*
* It is safe to call this function from atomic context.
*
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
static __always_inline int rhashtable_lookup_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
const char *key = rht_obj(ht, obj);
void *ret;
BUG_ON(ht->p.obj_hashfn);
ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
false);
if (IS_ERR(ret))
return PTR_ERR(ret);
return ret == NULL ? 0 : -EEXIST;
}
/**
* rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
* @ht: hash table
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
* Just like rhashtable_lookup_insert_fast(), but this function returns the
* object if it exists, NULL if it did not and the insertion was successful,
* and an ERR_PTR otherwise.
*/
static __always_inline void *rhashtable_lookup_get_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
const char *key = rht_obj(ht, obj);
BUG_ON(ht->p.obj_hashfn);
return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
false);
}
/**
* rhashtable_lookup_insert_key - search and insert object to hash table
* with explicit key
* @ht: hash table
* @key: key
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
* Lookups may occur in parallel with hashtable mutations and resizing.
*
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*
* Returns zero on success.
*/
static __always_inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
void *ret;
BUG_ON(!ht->p.obj_hashfn || !key); ret = __rhashtable_insert_fast(ht, key, obj, params, false); if (IS_ERR(ret))
return PTR_ERR(ret);
return ret == NULL ? 0 : -EEXIST;
}
/**
* rhashtable_lookup_get_insert_key - lookup and insert object into hash table
* @ht: hash table
* @key: key
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
* Just like rhashtable_lookup_insert_key(), but this function returns the
* object if it exists, NULL if it does not and the insertion was successful,
* and an ERR_PTR otherwise.
*/
static __always_inline void *rhashtable_lookup_get_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
BUG_ON(!ht->p.obj_hashfn || !key); return __rhashtable_insert_fast(ht, key, obj, params, false);
}
/* Internal function, please use rhashtable_remove_fast() instead */
static __always_inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
{
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned long flags;
unsigned int hash;
int err = -ENOENT;
hash = rht_head_hashfn(ht, tbl, obj, params); bkt = rht_bucket_var(tbl, hash); if (!bkt)
return -ENOENT;
pprev = NULL; flags = rht_lock(tbl, bkt); rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead);
if (he != obj) {
struct rhlist_head __rcu **lpprev;
pprev = &he->next;
if (!rhlist)
continue;
do {
lpprev = &list->next;
list = rht_dereference_bucket(list->next,
tbl, hash);
} while (list && obj != &list->rhead);
if (!list)
continue;
list = rht_dereference_bucket(list->next, tbl, hash);
RCU_INIT_POINTER(*lpprev, list);
err = 0;
break;
}
obj = rht_dereference_bucket(obj->next, tbl, hash);
err = 1;
if (rhlist) {
list = rht_dereference_bucket(list->next, tbl, hash);
if (list) {
RCU_INIT_POINTER(list->rhead.next, obj);
obj = &list->rhead;
err = 0;
}
}
if (pprev) {
rcu_assign_pointer(*pprev, obj);
rht_unlock(tbl, bkt, flags);
} else {
rht_assign_unlock(tbl, bkt, obj, flags);
}
goto unlocked;
}
rht_unlock(tbl, bkt, flags);
unlocked:
if (err > 0) { atomic_dec(&ht->nelems); if (unlikely(ht->p.automatic_shrinking &&
rht_shrink_below_30(ht, tbl)))
schedule_work(&ht->run_work);
err = 0;
}
return err;
}
/* Internal function, please use rhashtable_remove_fast() instead */
static __always_inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params, bool rhlist)
{
struct bucket_table *tbl;
int err;
rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht);
/* Because we have already taken (and released) the bucket
* lock in old_tbl, if we find that future_tbl is not yet
* visible then that guarantees the entry to still be in
* the old tbl if it exists.
*/
while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, rhlist)) && (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
;
rcu_read_unlock(); return err;
}
/**
* rhashtable_remove_fast - remove object from hash table
* @ht: hash table
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
* Since the hash chain is single linked, the removal operation needs to
* walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized.
*
* Will automatically shrink the table if permitted when residency drops
* below 30%.
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
static __always_inline int rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
return __rhashtable_remove_fast(ht, obj, params, false);
}
/**
* rhltable_remove - remove object from hash list table
* @hlt: hash list table
* @list: pointer to hash list head inside object
* @params: hash table parameters
*
* Since the hash chain is single linked, the removal operation needs to
* walk the bucket chain upon removal. The removal operation is thus
* considerably slower if the hash table is not correctly sized.
*
* Will automatically shrink the table if permitted when residency drops
* below 30%
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
static __always_inline int rhltable_remove(
struct rhltable *hlt, struct rhlist_head *list,
const struct rhashtable_params params)
{
return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
}
/* Internal function, please use rhashtable_replace_fast() instead */
static __always_inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
{
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned long flags;
unsigned int hash;
int err = -ENOENT;
/* Minimally, the old and new objects must have same hash
* (which should mean identifiers are the same).
*/
hash = rht_head_hashfn(ht, tbl, obj_old, params);
if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
return -EINVAL;
bkt = rht_bucket_var(tbl, hash);
if (!bkt)
return -ENOENT;
pprev = NULL;
flags = rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
continue;
}
rcu_assign_pointer(obj_new->next, obj_old->next);
if (pprev) {
rcu_assign_pointer(*pprev, obj_new);
rht_unlock(tbl, bkt, flags);
} else {
rht_assign_unlock(tbl, bkt, obj_new, flags);
}
err = 0;
goto unlocked;
}
rht_unlock(tbl, bkt, flags);
unlocked:
return err;
}
/**
* rhashtable_replace_fast - replace an object in hash table
* @ht: hash table
* @obj_old: pointer to hash head inside object being replaced
* @obj_new: pointer to hash head inside object which is new
* @params: hash table parameters
*
* Replacing an object doesn't affect the number of elements in the hash table
* or bucket, so we don't need to worry about shrinking or expanding the
* table here.
*
* Returns zero on success, -ENOENT if the entry could not be found,
* -EINVAL if hash is not the same for the old and new objects.
*/
static __always_inline int rhashtable_replace_fast(
struct rhashtable *ht, struct rhash_head *obj_old,
struct rhash_head *obj_new,
const struct rhashtable_params params)
{
struct bucket_table *tbl;
int err;
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
/* Because we have already taken (and released) the bucket
* lock in old_tbl, if we find that future_tbl is not yet
* visible then that guarantees the entry to still be in
* the old tbl if it exists.
*/
while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
obj_new, params)) &&
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
;
rcu_read_unlock();
return err;
}
/**
* rhltable_walk_enter - Initialise an iterator
* @hlt: Table to walk over
* @iter: Hash table Iterator
*
* This function prepares a hash table walk.
*
* Note that if you restart a walk after rhashtable_walk_stop you
* may see the same object twice. Also, you may miss objects if
* there are removals in between rhashtable_walk_stop and the next
* call to rhashtable_walk_start.
*
* For a completely stable walk you should construct your own data
* structure outside the hash table.
*
* This function may be called from any process context, including
* non-preemptable context, but cannot be called from softirq or
* hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
*/
static inline void rhltable_walk_enter(struct rhltable *hlt,
struct rhashtable_iter *iter)
{
rhashtable_walk_enter(&hlt->ht, iter);
}
/**
* rhltable_free_and_destroy - free elements and destroy hash list table
* @hlt: the hash list table to destroy
* @free_fn: callback to release resources of element
* @arg: pointer passed to free_fn
*
* See documentation for rhashtable_free_and_destroy.
*/
static inline void rhltable_free_and_destroy(struct rhltable *hlt,
void (*free_fn)(void *ptr,
void *arg),
void *arg)
{
rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
}
static inline void rhltable_destroy(struct rhltable *hlt)
{
rhltable_free_and_destroy(hlt, NULL, NULL);
}
#endif /* _LINUX_RHASHTABLE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BITMAP_H
#define __LINUX_BITMAP_H
#ifndef __ASSEMBLY__
#include <linux/align.h>
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/find.h>
#include <linux/limits.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bitmap-str.h>
struct device;
/*
* bitmaps provide bit arrays that consume one or more unsigned
* longs. The bitmap interface and available operations are listed
* here, in bitmap.h
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
* specific are in various arch/<arch>/include/asm/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
*/
/**
* DOC: bitmap overview
*
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* The generated code is more efficient when nbits is known at
* compile-time and at most BITS_PER_LONG.
*
* ::
*
* bitmap_zero(dst, nbits) *dst = 0UL
* bitmap_fill(dst, nbits) *dst = ~0UL
* bitmap_copy(dst, src, nbits) *dst = *src
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
* bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
* bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
* bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
* bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
* bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src)
* bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src)
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
* bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
* bitmap_read(map, start, nbits) Read an nbits-sized value from
* map at start
* bitmap_write(map, value, start, nbits) Write an nbits-sized value to
* map at start
*
* Note, bitmap_zero() and bitmap_fill() operate over the region of
* unsigned longs, that is, bits behind bitmap till the unsigned long
* boundary will be zeroed or filled as well. Consider to use
* bitmap_clear() or bitmap_set() to make explicit zeroing or filling
* respectively.
*/
/**
* DOC: bitmap bitops
*
* Also the following operations in asm/bitops.h apply to bitmaps.::
*
* set_bit(bit, addr) *addr |= bit
* clear_bit(bit, addr) *addr &= ~bit
* change_bit(bit, addr) *addr ^= bit
* test_bit(bit, addr) Is bit set in *addr?
* test_and_set_bit(bit, addr) Set bit and return old value
* test_and_clear_bit(bit, addr) Clear bit and return old value
* test_and_change_bit(bit, addr) Change bit and return old value
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr
* find_first_bit(addr, nbits) Position first set bit in *addr
* find_next_zero_bit(addr, nbits, bit)
* Position next zero bit in *addr >= bit
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
* find_next_and_bit(addr1, addr2, nbits, bit)
* Same as find_next_bit, but in
* (*addr1 & *addr2)
*
*/
/**
* DOC: declare bitmap
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
*/
/*
* Allocation and deallocation of bitmap.
* Provided in lib/bitmap.c to avoid circular dependency.
*/
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
void bitmap_free(const unsigned long *bitmap);
DEFINE_FREE(bitmap, unsigned long *, if (_T) bitmap_free(_T))
/* Managed variants of the above. */
unsigned long *devm_bitmap_alloc(struct device *dev,
unsigned int nbits, gfp_t flags);
unsigned long *devm_bitmap_zalloc(struct device *dev,
unsigned int nbits, gfp_t flags);
/*
* lib/bitmap.c provides these functions:
*/
bool __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __pure __bitmap_or_equal(const unsigned long *src1,
const unsigned long *src2,
const unsigned long *src3,
unsigned int nbits);
void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut, unsigned int nbits);
bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
const unsigned long *mask, unsigned int nbits);
bool __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset);
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
static __always_inline
unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
}
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = 0;
else
memset(dst, 0, len);
}
static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = ~0UL;
else
memset(dst, 0xff, len);
}
static __always_inline
void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = *src;
else
memcpy(dst, src, len);
}
/*
* Copy bitmap and clear tail bits in last word.
*/
static __always_inline
void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % BITS_PER_LONG)
dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy_and_extend(unsigned long *to,
const unsigned long *from,
unsigned int count, unsigned int size)
{ unsigned int copy = BITS_TO_LONGS(count);
memcpy(to, from, copy * sizeof(long));
if (count % BITS_PER_LONG)
to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
}
/*
* On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
* machines the order of hi and lo parts of numbers match the bitmap structure.
* In both cases conversion is not needed when copying data from/to arrays of
* u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
* to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
* architectures are not using bitmap_copy_clear_tail().
*/
#if BITS_PER_LONG == 64
void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
unsigned int nbits);
void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
unsigned int nbits);
#else
#define bitmap_from_arr32(bitmap, buf, nbits) \
bitmap_copy_clear_tail((unsigned long *) (bitmap), \
(const unsigned long *) (buf), (nbits))
#define bitmap_to_arr32(buf, bitmap, nbits) \
bitmap_copy_clear_tail((unsigned long *) (buf), \
(const unsigned long *) (bitmap), (nbits))
#endif
/*
* On 64-bit systems bitmaps are represented as u64 arrays internally. So,
* the conversion is not needed when copying data from/to arrays of u64.
*/
#if BITS_PER_LONG == 32
void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
#else
#define bitmap_from_arr64(bitmap, buf, nbits) \
bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
#define bitmap_to_arr64(buf, bitmap, nbits) \
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif
static __always_inline
bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
static __always_inline
void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
else
__bitmap_or(dst, src1, src2, nbits);
}
static __always_inline
void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
else
__bitmap_xor(dst, src1, src2, nbits);
}
static __always_inline
bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
static __always_inline
void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
else
__bitmap_complement(dst, src, nbits);
}
#ifdef __LITTLE_ENDIAN
#define BITMAP_MEM_ALIGNMENT 8
#else
#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
static __always_inline
bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
return !memcmp(src1, src2, nbits / 8);
return __bitmap_equal(src1, src2, nbits);
}
/**
* bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
* @src1: Pointer to bitmap 1
* @src2: Pointer to bitmap 2 will be or'ed with bitmap 1
* @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
* @nbits: number of bits in each of these bitmaps
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
static __always_inline
bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
const unsigned long *src3, unsigned int nbits)
{
if (!small_const_nbits(nbits))
return __bitmap_or_equal(src1, src2, src3, nbits);
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
static __always_inline
bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
else
return __bitmap_intersects(src1, src2, nbits);
}
static __always_inline
bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_subset(src1, src2, nbits);
}
static __always_inline
bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
return find_first_bit(src, nbits) == nbits;
}
static __always_inline
bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
return find_first_zero_bit(src, nbits) == nbits;
}
static __always_inline
unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
static __always_inline
unsigned long bitmap_weight_and(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight_and(src1, src2, nbits);
}
static __always_inline
unsigned long bitmap_weight_andnot(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight_andnot(src1, src2, nbits);
}
static __always_inline
void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
else if (small_const_nbits(start + nbits))
*map |= GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0xff, nbits / 8);
else
__bitmap_set(map, start, nbits);
}
static __always_inline
void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
else if (small_const_nbits(start + nbits))
*map &= ~GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0, nbits / 8);
else
__bitmap_clear(map, start, nbits);
}
static __always_inline
void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
else
__bitmap_shift_right(dst, src, shift, nbits);
}
static __always_inline
void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_shift_left(dst, src, shift, nbits);
}
static __always_inline
void bitmap_replace(unsigned long *dst,
const unsigned long *old,
const unsigned long *new,
const unsigned long *mask,
unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*old & ~(*mask)) | (*new & *mask);
else
__bitmap_replace(dst, old, new, mask, nbits);
}
/**
* bitmap_scatter - Scatter a bitmap according to the given mask
* @dst: scattered bitmap
* @src: gathered bitmap
* @mask: mask representing bits to assign to in the scattered bitmap
* @nbits: number of bits in each of these bitmaps
*
* Scatters bitmap with sequential bits according to the given @mask.
*
* Example:
* If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302.
*
* Or in binary form
* @src @mask @dst
* 0000000001011010 0001001100010011 0000001100000010
*
* (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12)
*
* A more 'visual' description of the operation::
*
* src: 0000000001011010
* ||||||
* +------+|||||
* | +----+||||
* | |+----+|||
* | || +-+||
* | || | ||
* mask: ...v..vv...v..vv
* ...0..11...0..10
* dst: 0000001100000010
*
* A relationship exists between bitmap_scatter() and bitmap_gather(). See
* bitmap_gather() for the bitmap gather detailed operations. TL;DR:
* bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
*/
static __always_inline
void bitmap_scatter(unsigned long *dst, const unsigned long *src,
const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
bitmap_zero(dst, nbits);
for_each_set_bit(bit, mask, nbits)
__assign_bit(bit, dst, test_bit(n++, src));
}
/**
* bitmap_gather - Gather a bitmap according to given mask
* @dst: gathered bitmap
* @src: scattered bitmap
* @mask: mask representing bits to extract from in the scattered bitmap
* @nbits: number of bits in each of these bitmaps
*
* Gathers bitmap with sparse bits according to the given @mask.
*
* Example:
* If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a.
*
* Or in binary form
* @src @mask @dst
* 0000001100000010 0001001100010011 0000000000011010
*
* (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5)
*
* A more 'visual' description of the operation::
*
* mask: ...v..vv...v..vv
* src: 0000001100000010
* ^ ^^ ^ 0
* | || | 10
* | || > 010
* | |+--> 1010
* | +--> 11010
* +----> 011010
* dst: 0000000000011010
*
* A relationship exists between bitmap_gather() and bitmap_scatter(). See
* bitmap_scatter() for the bitmap scatter detailed operations. TL;DR:
* bitmap_scatter() can be seen as the 'reverse' bitmap_gather() operation.
*
* Suppose scattered computed using bitmap_scatter(scattered, src, mask, n).
* The operation bitmap_gather(result, scattered, mask, n) leads to a result
* equal or equivalent to src.
*
* The result can be 'equivalent' because bitmap_scatter() and bitmap_gather()
* are not bijective.
* The result and src values are equivalent in that sense that a call to
* bitmap_scatter(res, src, mask, n) and a call to
* bitmap_scatter(res, result, mask, n) will lead to the same res value.
*/
static __always_inline
void bitmap_gather(unsigned long *dst, const unsigned long *src,
const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
bitmap_zero(dst, nbits);
for_each_set_bit(bit, mask, nbits)
__assign_bit(n++, dst, test_bit(bit, src));
}
static __always_inline
void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
unsigned int *re, unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
/**
* bitmap_release_region - release allocated bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to release
* @order: region size (log base 2 of number of bits) to release
*
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*/
static __always_inline
void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
bitmap_clear(bitmap, pos, BIT(order));
}
/**
* bitmap_allocate_region - allocate bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to allocate
* @order: region size (log base 2 of number of bits) to allocate
*
* Allocate (set bits in) a specified region of a bitmap.
*
* Returns: 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
static __always_inline
int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
unsigned int len = BIT(order);
if (find_next_bit(bitmap, pos + len, pos) < pos + len)
return -EBUSY;
bitmap_set(bitmap, pos, len);
return 0;
}
/**
* bitmap_find_free_region - find a contiguous aligned mem region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @bits: number of bits in the bitmap
* @order: region size (log base 2 of number of bits) to find
*
* Find a region of free (zero) bits in a @bitmap of @bits bits and
* allocate them (set them to one). Only consider regions of length
* a power (@order) of two, aligned to that power of two, which
* makes the search algorithm much faster.
*
* Returns: the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
static __always_inline
int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
unsigned int pos, end; /* scans bitmap by regions of size order */
for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) {
if (!bitmap_allocate_region(bitmap, pos, order))
return pos;
}
return -ENOMEM;
}
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value
*
* Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit
* integers in 32-bit environment, and 64-bit integers in 64-bit one.
*
* There are four combinations of endianness and length of the word in linux
* ABIs: LE64, BE64, LE32 and BE32.
*
* On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in
* bitmaps and therefore don't require any special handling.
*
* On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory
* prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the
* other hand is represented as an array of 32-bit words and the position of
* bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that
* word. For example, bit #42 is located at 10th position of 2nd word.
* It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit
* values in memory as it usually does. But for BE we need to swap hi and lo
* words manually.
*
* With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and
* lo parts of u64. For LE32 it does nothing, and for BE environment it swaps
* hi and lo words, as is expected by bitmap.
*/
#if __BITS_PER_LONG == 64
#define BITMAP_FROM_U64(n) (n)
#else
#define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \
((unsigned long) ((u64)(n) >> 32))
#endif
/**
* bitmap_from_u64 - Check and swap words within u64.
* @mask: source bitmap
* @dst: destination bitmap
*
* In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]``
* to read u64 mask, we will get the wrong word.
* That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
bitmap_from_arr64(dst, &mask, 64);
}
/**
* bitmap_read - read a value of n-bits from the memory region
* @map: address to the bitmap memory region
* @start: bit offset of the n-bit value
* @nbits: size of value in bits, nonzero, up to BITS_PER_LONG
*
* Returns: value of @nbits bits located at the @start bit offset within the
* @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
* value is undefined.
*/
static __always_inline
unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
size_t index = BIT_WORD(start);
unsigned long offset = start % BITS_PER_LONG;
unsigned long space = BITS_PER_LONG - offset;
unsigned long value_low, value_high;
if (unlikely(!nbits || nbits > BITS_PER_LONG))
return 0;
if (space >= nbits)
return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits);
value_low = map[index] & BITMAP_FIRST_WORD_MASK(start);
value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits);
return (value_low >> offset) | (value_high << space);
}
/**
* bitmap_write - write n-bit value within a memory region
* @map: address to the bitmap memory region
* @value: value to write, clamped to nbits
* @start: bit offset of the n-bit value
* @nbits: size of value in bits, nonzero, up to BITS_PER_LONG.
*
* bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(),
* i.e. bits beyond @nbits are ignored:
*
* for (bit = 0; bit < nbits; bit++)
* __assign_bit(start + bit, bitmap, val & BIT(bit));
*
* For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
static __always_inline
void bitmap_write(unsigned long *map, unsigned long value,
unsigned long start, unsigned long nbits)
{
size_t index;
unsigned long offset;
unsigned long space;
unsigned long mask;
bool fit;
if (unlikely(!nbits || nbits > BITS_PER_LONG))
return;
mask = BITMAP_LAST_WORD_MASK(nbits);
value &= mask;
offset = start % BITS_PER_LONG;
space = BITS_PER_LONG - offset;
fit = space >= nbits;
index = BIT_WORD(start);
map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start));
map[index] |= value << offset;
if (fit)
return;
map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits);
map[index + 1] |= (value >> space);
}
#define bitmap_get_value8(map, start) \
bitmap_read(map, start, BITS_PER_BYTE)
#define bitmap_set_value8(map, value, start) \
bitmap_write(map, value, start, BITS_PER_BYTE)
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* fs/eventpoll.c (Efficient event retrieval implementation)
* Copyright (C) 2001,...,2009 Davide Libenzi
*
* Davide Libenzi <davidel@xmailserver.org>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/rbtree.h>
#include <linux/wait.h>
#include <linux/eventpoll.h>
#include <linux/mount.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/mman.h>
#include <linux/atomic.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/rculist.h>
#include <linux/capability.h>
#include <net/busy_poll.h>
/*
* LOCKING:
* There are three level of locking required by epoll :
*
* 1) epnested_mutex (mutex)
* 2) ep->mtx (mutex)
* 3) ep->lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
* We need a spinlock (ep->lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
* a spinlock. During the event transfer loop (from kernel to
* user space) we could end up sleeping due a copy_to_user(), so
* we need a lock that will allow us to sleep. This lock is a
* mutex (ep->mtx). It is acquired during the event transfer loop,
* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
* The epnested_mutex is acquired when inserting an epoll fd onto another
* epoll fd. We do this so that we walk the epoll tree and ensure that this
* insertion does not create a cycle of epoll file descriptors, which
* could lead to deadlock. We need a global mutex to prevent two
* simultaneous inserts (A into B and B into A) from racing and
* constructing a cycle without either insert observing that it is
* going to.
* It is necessary to acquire multiple "ep->mtx"es at once in the
* case when one epoll fd is added to another. In this case, we
* always acquire the locks in the order of nesting (i.e. after
* epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
* before e2->mtx). Since we disallow cycles of epoll file
* descriptors, this ensures that the mutexes are well-ordered. In
* order to communicate this nesting to lockdep, when walking a tree
* of epoll file descriptors, we use the current recursion depth as
* the lockdep subkey.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epnested_mutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
* Events that require holding "epnested_mutex" are very rare, while for
* normal operations the epoll private "ep->mtx" will guarantee
* a better scalability.
*/
/* Epoll private bits inside the event mask */
#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
#define EP_UNACTIVE_PTR ((void *) -1L)
#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
struct epoll_filefd {
struct file *file;
int fd;
} __packed;
/* Wait structure used by the poll hooks */
struct eppoll_entry {
/* List header used to link this structure to the "struct epitem" */
struct eppoll_entry *next;
/* The "base" pointer is set to the container "struct epitem" */
struct epitem *base;
/*
* Wait queue item that will be linked to the target file wait
* queue head.
*/
wait_queue_entry_t wait;
/* The wait queue head that linked the "wait" wait queue item */
wait_queue_head_t *whead;
};
/*
* Each file descriptor added to the eventpoll interface will
* have an entry of this type linked to the "rbr" RB tree.
* Avoid increasing the size of this struct, there can be many thousands
* of these on a server and we do not want this to take another cache line.
*/
struct epitem {
union {
/* RB tree node links this structure to the eventpoll RB tree */
struct rb_node rbn;
/* Used to free the struct epitem */
struct rcu_head rcu;
};
/* List header used to link this structure to the eventpoll ready list */
struct list_head rdllink;
/*
* Works together "struct eventpoll"->ovflist in keeping the
* single linked chain of items.
*/
struct epitem *next;
/* The file descriptor information this item refers to */
struct epoll_filefd ffd;
/*
* Protected by file->f_lock, true for to-be-released epitem already
* removed from the "struct file" items list; together with
* eventpoll->refcount orchestrates "struct eventpoll" disposal
*/
bool dying;
/* List containing poll wait queues */
struct eppoll_entry *pwqlist;
/* The "container" of this item */
struct eventpoll *ep;
/* List header used to link this item to the "struct file" items list */
struct hlist_node fllink;
/* wakeup_source used when EPOLLWAKEUP is set */
struct wakeup_source __rcu *ws;
/* The structure that describe the interested events and the source fd */
struct epoll_event event;
};
/*
* This structure is stored inside the "private_data" member of the file
* structure and represents the main data structure for the eventpoll
* interface.
*/
struct eventpoll {
/*
* This mutex is used to ensure that files are not removed
* while epoll is using them. This is held during the event
* collection loop, the file cleanup path, the epoll file exit
* code and the ctl operations.
*/
struct mutex mtx;
/* Wait queue used by sys_epoll_wait() */
wait_queue_head_t wq;
/* Wait queue used by file->poll() */
wait_queue_head_t poll_wait;
/* List of ready file descriptors */
struct list_head rdllist;
/* Lock which protects rdllist and ovflist */
spinlock_t lock;
/* RB tree root used to store monitored fd structs */
struct rb_root_cached rbr;
/*
* This is a single linked list that chains all the "struct epitem" that
* happened while transferring ready events to userspace w/out
* holding ->lock.
*/
struct epitem *ovflist;
/* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */
struct wakeup_source *ws;
/* The user that created the eventpoll descriptor */
struct user_struct *user;
struct file *file;
/* used to optimize loop detection check */
u64 gen;
struct hlist_head refs;
u8 loop_check_depth;
/*
* usage count, used together with epitem->dying to
* orchestrate the disposal of this struct
*/
refcount_t refcount;
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
unsigned int napi_id;
/* busy poll timeout */
u32 busy_poll_usecs;
/* busy poll packet budget */
u16 busy_poll_budget;
bool prefer_busy_poll;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* tracks wakeup nests for lockdep validation */
u8 nests;
#endif
};
/* Wrapper struct used by poll queueing */
struct ep_pqueue {
poll_table pt;
struct epitem *epi;
};
/*
* Configuration options available inside /proc/sys/fs/epoll/
*/
/* Maximum number of epoll watched descriptors, per user */
static long max_user_watches __read_mostly;
/* Used for cycles detection */
static DEFINE_MUTEX(epnested_mutex);
static u64 loop_check_gen = 0;
/* Used to check for epoll file descriptor inclusion loops */
static struct eventpoll *inserting_into;
/* Slab cache used to allocate "struct epitem" */
static struct kmem_cache *epi_cache __ro_after_init;
/* Slab cache used to allocate "struct eppoll_entry" */
static struct kmem_cache *pwq_cache __ro_after_init;
/*
* List of files with newly added links, where we may need to limit the number
* of emanating paths. Protected by the epnested_mutex.
*/
struct epitems_head {
struct hlist_head epitems;
struct epitems_head *next;
};
static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR;
static struct kmem_cache *ephead_cache __ro_after_init;
static inline void free_ephead(struct epitems_head *head)
{
if (head)
kmem_cache_free(ephead_cache, head);
}
static void list_file(struct file *file)
{
struct epitems_head *head;
head = container_of(file->f_ep, struct epitems_head, epitems);
if (!head->next) {
head->next = tfile_check_list;
tfile_check_list = head;
}
}
static void unlist_file(struct epitems_head *head)
{
struct epitems_head *to_free = head;
struct hlist_node *p = rcu_dereference(hlist_first_rcu(&head->epitems));
if (p) {
struct epitem *epi= container_of(p, struct epitem, fllink);
spin_lock(&epi->ffd.file->f_lock);
if (!hlist_empty(&head->epitems))
to_free = NULL;
head->next = NULL;
spin_unlock(&epi->ffd.file->f_lock);
}
free_ephead(to_free);
}
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static long long_zero;
static long long_max = LONG_MAX;
static const struct ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
.maxlen = sizeof(max_user_watches),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &long_zero,
.extra2 = &long_max,
},
};
static void __init epoll_sysctls_init(void)
{
register_sysctl("fs/epoll", epoll_table);
}
#else
#define epoll_sysctls_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
static const struct file_operations eventpoll_fops;
static inline int is_file_epoll(struct file *f)
{
return f->f_op == &eventpoll_fops;
}
/* Setup the structure that is used as key for the RB tree */
static inline void ep_set_ffd(struct epoll_filefd *ffd,
struct file *file, int fd)
{
ffd->file = file;
ffd->fd = fd;
}
/* Compare RB tree keys */
static inline int ep_cmp_ffd(struct epoll_filefd *p1,
struct epoll_filefd *p2)
{
return (p1->file > p2->file ? +1:
(p1->file < p2->file ? -1 : p1->fd - p2->fd));
}
/* Tells us if the item is currently linked */
static inline int ep_is_linked(struct epitem *epi)
{
return !list_empty(&epi->rdllink);
}
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
{
return container_of(p, struct eppoll_entry, wait);
}
/* Get the "struct epitem" from a wait queue pointer */
static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
{
return container_of(p, struct eppoll_entry, wait)->base;
}
/**
* ep_events_available - Checks if ready events might be available.
*
* @ep: Pointer to the eventpoll context.
*
* Return: a value different than %zero if ready events are available,
* or %zero otherwise.
*/
static inline int ep_events_available(struct eventpoll *ep)
{
return !list_empty_careful(&ep->rdllist) ||
READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
/**
* busy_loop_ep_timeout - check if busy poll has timed out. The timeout value
* from the epoll instance ep is preferred, but if it is not set fallback to
* the system-wide global via busy_loop_timeout.
*
* @start_time: The start time used to compute the remaining time until timeout.
* @ep: Pointer to the eventpoll context.
*
* Return: true if the timeout has expired, false otherwise.
*/
static bool busy_loop_ep_timeout(unsigned long start_time,
struct eventpoll *ep)
{
unsigned long bp_usec = READ_ONCE(ep->busy_poll_usecs);
if (bp_usec) {
unsigned long end_time = start_time + bp_usec;
unsigned long now = busy_loop_current_time();
return time_after(now, end_time);
} else {
return busy_loop_timeout(start_time);
}
}
static bool ep_busy_loop_on(struct eventpoll *ep)
{
return !!READ_ONCE(ep->busy_poll_usecs) || READ_ONCE(ep->prefer_busy_poll) || net_busy_loop_on();
}
static bool ep_busy_loop_end(void *p, unsigned long start_time)
{
struct eventpoll *ep = p;
return ep_events_available(ep) || busy_loop_ep_timeout(start_time, ep);
}
/*
* Busy poll if globally on and supporting sockets found && no events,
* busy loop will return if need_resched or ep_events_available.
*
* we must do our busy polling with irqs enabled
*/
static bool ep_busy_loop(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
u16 budget = READ_ONCE(ep->busy_poll_budget);
bool prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
if (!budget)
budget = BUSY_POLL_BUDGET;
if (napi_id_valid(napi_id) && ep_busy_loop_on(ep)) {
napi_busy_loop(napi_id, ep_busy_loop_end,
ep, prefer_busy_poll, budget);
if (ep_events_available(ep))
return true;
/*
* Busy poll timed out. Drop NAPI ID for now, we can add
* it back in when we have moved a socket with a valid NAPI
* ID onto the ready list.
*/
if (prefer_busy_poll)
napi_resume_irqs(napi_id);
ep->napi_id = 0;
return false;
}
return false;
}
/*
* Set epoll busy poll NAPI ID from sk.
*/
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
struct eventpoll *ep = epi->ep;
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
if (!ep_busy_loop_on(ep))
return;
sock = sock_from_file(epi->ffd.file);
if (!sock)
return;
sk = sock->sk;
if (!sk)
return;
napi_id = READ_ONCE(sk->sk_napi_id);
/* Non-NAPI IDs can be rejected
* or
* Nothing to do if we already have this ID
*/
if (!napi_id_valid(napi_id) || napi_id == ep->napi_id)
return;
/* record NAPI ID for use in next busy poll */
ep->napi_id = napi_id;
}
static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct eventpoll *ep = file->private_data;
void __user *uarg = (void __user *)arg;
struct epoll_params epoll_params;
switch (cmd) {
case EPIOCSPARAMS:
if (copy_from_user(&epoll_params, uarg, sizeof(epoll_params)))
return -EFAULT;
/* pad byte must be zero */
if (epoll_params.__pad)
return -EINVAL;
if (epoll_params.busy_poll_usecs > S32_MAX)
return -EINVAL;
if (epoll_params.prefer_busy_poll > 1)
return -EINVAL;
if (epoll_params.busy_poll_budget > NAPI_POLL_WEIGHT &&
!capable(CAP_NET_ADMIN))
return -EPERM;
WRITE_ONCE(ep->busy_poll_usecs, epoll_params.busy_poll_usecs);
WRITE_ONCE(ep->busy_poll_budget, epoll_params.busy_poll_budget);
WRITE_ONCE(ep->prefer_busy_poll, epoll_params.prefer_busy_poll);
return 0;
case EPIOCGPARAMS:
memset(&epoll_params, 0, sizeof(epoll_params));
epoll_params.busy_poll_usecs = READ_ONCE(ep->busy_poll_usecs);
epoll_params.busy_poll_budget = READ_ONCE(ep->busy_poll_budget);
epoll_params.prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
if (copy_to_user(uarg, &epoll_params, sizeof(epoll_params)))
return -EFAULT;
return 0;
default:
return -ENOIOCTLCMD;
}
}
static void ep_suspend_napi_irqs(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))
napi_suspend_irqs(napi_id);
}
static void ep_resume_napi_irqs(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))
napi_resume_irqs(napi_id);
}
#else
static inline bool ep_busy_loop(struct eventpoll *ep)
{
return false;
}
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
}
static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return -EOPNOTSUPP;
}
static void ep_suspend_napi_irqs(struct eventpoll *ep)
{
}
static void ep_resume_napi_irqs(struct eventpoll *ep)
{
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
* As described in commit 0ccf831cb lockdep: annotate epoll
* the use of wait queues used by epoll is done in a very controlled
* manner. Wake ups can nest inside each other, but are never done
* with the same locking. For example:
*
* dfd = socket(...);
* efd1 = epoll_create();
* efd2 = epoll_create();
* epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
* epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
*
* When a packet arrives to the device underneath "dfd", the net code will
* issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
* callback wakeup entry on that queue, and the wake_up() performed by the
* "dfd" net code will end up in ep_poll_callback(). At this point epoll
* (efd1) notices that it may have some event ready, so it needs to wake up
* the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
* that ends up in another wake_up(), after having checked about the
* recursion constraints. That are, no more than EP_MAX_NESTS, to avoid
* stack blasting.
*
* When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
* this special case of epoll.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
unsigned pollflags)
{
struct eventpoll *ep_src;
unsigned long flags;
u8 nests = 0;
/*
* To set the subclass or nesting level for spin_lock_irqsave_nested()
* it might be natural to create a per-cpu nest count. However, since
* we can recurse on ep->poll_wait.lock, and a non-raw spinlock can
* schedule() in the -rt kernel, the per-cpu variable are no longer
* protected. Thus, we are introducing a per eventpoll nest field.
* If we are not being call from ep_poll_callback(), epi is NULL and
* we are at the first level of nesting, 0. Otherwise, we are being
* called from ep_poll_callback() and if a previous wakeup source is
* not an epoll file itself, we are at depth 1 since the wakeup source
* is depth 0. If the wakeup source is a previous epoll file in the
* wakeup chain then we use its nests value and record ours as
* nests + 1. The previous epoll file nests value is stable since its
* already holding its own poll_wait.lock.
*/
if (epi) {
if ((is_file_epoll(epi->ffd.file))) {
ep_src = epi->ffd.file->private_data;
nests = ep_src->nests;
} else {
nests = 1;
}
}
spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
ep->nests = nests + 1;
wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
ep->nests = 0;
spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
}
#else
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
__poll_t pollflags)
{
wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
}
#endif
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
{
wait_queue_head_t *whead;
rcu_read_lock();
/*
* If it is cleared by POLLFREE, it should be rcu-safe.
* If we read NULL we need a barrier paired with
* smp_store_release() in ep_poll_callback(), otherwise
* we rely on whead->lock.
*/
whead = smp_load_acquire(&pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
}
/*
* This function unregisters poll callbacks from the associated file
* descriptor. Must be called with "mtx" held.
*/
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
{
struct eppoll_entry **p = &epi->pwqlist;
struct eppoll_entry *pwq;
while ((pwq = *p) != NULL) {
*p = pwq->next;
ep_remove_wait_queue(pwq);
kmem_cache_free(pwq_cache, pwq);
}
}
/* call only when ep->mtx is held */
static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
{
return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
}
/* call only when ep->mtx is held */
static inline void ep_pm_stay_awake(struct epitem *epi)
{
struct wakeup_source *ws = ep_wakeup_source(epi);
if (ws)
__pm_stay_awake(ws);
}
static inline bool ep_has_wakeup_source(struct epitem *epi)
{
return rcu_access_pointer(epi->ws) ? true : false;
}
/* call when ep->mtx cannot be held (ep_poll_callback) */
static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
{
struct wakeup_source *ws;
rcu_read_lock(); ws = rcu_dereference(epi->ws); if (ws)
__pm_stay_awake(ws);
rcu_read_unlock();}
/*
* ep->mutex needs to be held because we could be hit by
* eventpoll_release_file() and epoll_ctl().
*/
static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
{
/*
* Steal the ready list, and re-init the original one to the
* empty list. Also, set ep->ovflist to NULL so that events
* happening while looping w/out locks, are not lost. We cannot
* have the poll callback to queue directly on ep->rdllist,
* because we want the "sproc" callback to be able to do it
* in a lockless way.
*/
lockdep_assert_irqs_enabled();
spin_lock_irq(&ep->lock);
list_splice_init(&ep->rdllist, txlist);
WRITE_ONCE(ep->ovflist, NULL);
spin_unlock_irq(&ep->lock);
}
static void ep_done_scan(struct eventpoll *ep,
struct list_head *txlist)
{
struct epitem *epi, *nepi;
spin_lock_irq(&ep->lock);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
* We re-insert them inside the main ready-list here.
*/
for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
/*
* We need to check if the item is already in the list.
* During the "sproc" callback execution time, items are
* queued into ->ovflist but the "txlist" might already
* contain them, and the list_splice() below takes care of them.
*/
if (!ep_is_linked(epi)) {
/*
* ->ovflist is LIFO, so we have to reverse it in order
* to keep in FIFO.
*/
list_add(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
}
}
/*
* We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
* releasing the lock, events will be queued in the normal way inside
* ep->rdllist.
*/
WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);
/*
* Quickly re-inject items left on "txlist".
*/
list_splice(txlist, &ep->rdllist);
__pm_relax(ep->ws);
if (!list_empty(&ep->rdllist)) {
if (waitqueue_active(&ep->wq))
wake_up(&ep->wq);
}
spin_unlock_irq(&ep->lock);
}
static void ep_get(struct eventpoll *ep)
{
refcount_inc(&ep->refcount);
}
/*
* Returns true if the event poll can be disposed
*/
static bool ep_refcount_dec_and_test(struct eventpoll *ep)
{
if (!refcount_dec_and_test(&ep->refcount))
return false;
WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root));
return true;
}
static void ep_free(struct eventpoll *ep)
{
ep_resume_napi_irqs(ep);
mutex_destroy(&ep->mtx);
free_uid(ep->user);
wakeup_source_unregister(ep->ws);
kfree(ep);
}
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
* all the associated resources. Must be called with "mtx" held.
* If the dying flag is set, do the removal only if force is true.
* This prevents ep_clear_and_put() from dropping all the ep references
* while running concurrently with eventpoll_release_file().
* Returns true if the eventpoll can be disposed.
*/
static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
{
struct file *file = epi->ffd.file;
struct epitems_head *to_free;
struct hlist_head *head;
lockdep_assert_irqs_enabled();
/*
* Removes poll wait queue hooks.
*/
ep_unregister_pollwait(ep, epi);
/* Remove the current item from the list of epoll hooks */
spin_lock(&file->f_lock);
if (epi->dying && !force) {
spin_unlock(&file->f_lock);
return false;
}
to_free = NULL;
head = file->f_ep;
if (head->first == &epi->fllink && !epi->fllink.next) {
/* See eventpoll_release() for details. */
WRITE_ONCE(file->f_ep, NULL);
if (!is_file_epoll(file)) {
struct epitems_head *v;
v = container_of(head, struct epitems_head, epitems);
if (!smp_load_acquire(&v->next))
to_free = v;
}
}
hlist_del_rcu(&epi->fllink);
spin_unlock(&file->f_lock);
free_ephead(to_free);
rb_erase_cached(&epi->rbn, &ep->rbr);
spin_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
spin_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
* At this point it is safe to free the eventpoll item. Use the union
* field epi->rcu, since we are trying to minimize the size of
* 'struct epitem'. The 'rbn' field is no longer in use. Protected by
* ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
* use of the rbn field.
*/
kfree_rcu(epi, rcu);
percpu_counter_dec(&ep->user->epoll_watches);
return true;
}
/*
* ep_remove variant for callers owing an additional reference to the ep
*/
static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
{
if (__ep_remove(ep, epi, false))
WARN_ON_ONCE(ep_refcount_dec_and_test(ep));
}
static void ep_clear_and_put(struct eventpoll *ep)
{
struct rb_node *rbp, *next;
struct epitem *epi;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
ep_poll_safewake(ep, NULL, 0);
mutex_lock(&ep->mtx);
/*
* Walks through the whole tree by unregistering poll callbacks.
*/
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_unregister_pollwait(ep, epi);
cond_resched();
}
/*
* Walks through the whole tree and try to free each "struct epitem".
* Note that ep_remove_safe() will not remove the epitem in case of a
* racing eventpoll_release_file(); the latter will do the removal.
* At this point we are sure no poll callbacks will be lingering around.
* Since we still own a reference to the eventpoll struct, the loop can't
* dispose it.
*/
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) {
next = rb_next(rbp);
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove_safe(ep, epi);
cond_resched();
}
mutex_unlock(&ep->mtx);
if (ep_refcount_dec_and_test(ep))
ep_free(ep);
}
static long ep_eventpoll_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
if (!is_file_epoll(file))
return -EINVAL;
switch (cmd) {
case EPIOCSPARAMS:
case EPIOCGPARAMS:
ret = ep_eventpoll_bp_ioctl(file, cmd, arg);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int ep_eventpoll_release(struct inode *inode, struct file *file)
{
struct eventpoll *ep = file->private_data;
if (ep)
ep_clear_and_put(ep);
return 0;
}
static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);
static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth)
{
struct eventpoll *ep = file->private_data;
LIST_HEAD(txlist);
struct epitem *epi, *tmp;
poll_table pt;
__poll_t res = 0;
init_poll_funcptr(&pt, NULL);
/* Insert inside our poll wait queue */
poll_wait(file, &ep->poll_wait, wait);
/*
* Proceed to find out if wanted events are really available inside
* the ready list.
*/
mutex_lock_nested(&ep->mtx, depth);
ep_start_scan(ep, &txlist);
list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
if (ep_item_poll(epi, &pt, depth + 1)) {
res = EPOLLIN | EPOLLRDNORM;
break;
} else {
/*
* Item has been dropped into the ready list by the poll
* callback, but it's not actually ready, as far as
* caller requested events goes. We can remove it here.
*/
__pm_relax(ep_wakeup_source(epi));
list_del_init(&epi->rdllink);
}
}
ep_done_scan(ep, &txlist);
mutex_unlock(&ep->mtx);
return res;
}
/*
* The ffd.file pointer may be in the process of being torn down due to
* being closed, but we may not have finished eventpoll_release() yet.
*
* Normally, even with the atomic_long_inc_not_zero, the file may have
* been free'd and then gotten re-allocated to something else (since
* files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).
*
* But for epoll, users hold the ep->mtx mutex, and as such any file in
* the process of being free'd will block in eventpoll_release_file()
* and thus the underlying file allocation will not be free'd, and the
* file re-use cannot happen.
*
* For the same reason we can avoid a rcu_read_lock() around the
* operation - 'ffd.file' cannot go away even if the refcount has
* reached zero (but we must still not call out to ->poll() functions
* etc).
*/
static struct file *epi_fget(const struct epitem *epi)
{
struct file *file;
file = epi->ffd.file;
if (!file_ref_get(&file->f_ref))
file = NULL;
return file;
}
/*
* Differs from ep_eventpoll_poll() in that internal callers already have
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
*/
static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
int depth)
{
struct file *file = epi_fget(epi);
__poll_t res;
/*
* We could return EPOLLERR | EPOLLHUP or something, but let's
* treat this more as "file doesn't exist, poll didn't happen".
*/
if (!file)
return 0;
pt->_key = epi->event.events;
if (!is_file_epoll(file))
res = vfs_poll(file, pt);
else
res = __ep_eventpoll_poll(file, pt, depth);
fput(file);
return res & epi->event.events;
}
static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
return __ep_eventpoll_poll(file, wait, 0);
}
#ifdef CONFIG_PROC_FS
static void ep_show_fdinfo(struct seq_file *m, struct file *f)
{
struct eventpoll *ep = f->private_data;
struct rb_node *rbp;
mutex_lock(&ep->mtx);
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
struct inode *inode = file_inode(epi->ffd.file);
seq_printf(m, "tfd: %8d events: %8x data: %16llx "
" pos:%lli ino:%lx sdev:%x\n",
epi->ffd.fd, epi->event.events,
(long long)epi->event.data,
(long long)epi->ffd.file->f_pos,
inode->i_ino, inode->i_sb->s_dev);
if (seq_has_overflowed(m))
break;
}
mutex_unlock(&ep->mtx);
}
#endif
/* File callbacks that implement the eventpoll file behaviour */
static const struct file_operations eventpoll_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = ep_show_fdinfo,
#endif
.release = ep_eventpoll_release,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
.unlocked_ioctl = ep_eventpoll_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
/*
* This is called from eventpoll_release() to unlink files from the eventpoll
* interface. We need to have this facility to cleanup correctly files that are
* closed without being removed from the eventpoll interface.
*/
void eventpoll_release_file(struct file *file)
{
struct eventpoll *ep;
struct epitem *epi;
bool dispose;
/*
* Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from
* touching the epitems list before eventpoll_release_file() can access
* the ep->mtx.
*/
again:
spin_lock(&file->f_lock);
if (file->f_ep && file->f_ep->first) {
epi = hlist_entry(file->f_ep->first, struct epitem, fllink);
epi->dying = true;
spin_unlock(&file->f_lock);
/*
* ep access is safe as we still own a reference to the ep
* struct
*/
ep = epi->ep;
mutex_lock(&ep->mtx);
dispose = __ep_remove(ep, epi, true);
mutex_unlock(&ep->mtx);
if (dispose && ep_refcount_dec_and_test(ep))
ep_free(ep);
goto again;
}
spin_unlock(&file->f_lock);
}
static int ep_alloc(struct eventpoll **pep)
{
struct eventpoll *ep;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
return -ENOMEM;
mutex_init(&ep->mtx);
spin_lock_init(&ep->lock);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT_CACHED;
ep->ovflist = EP_UNACTIVE_PTR;
ep->user = get_current_user();
refcount_set(&ep->refcount, 1);
*pep = ep;
return 0;
}
/*
* Search the file inside the eventpoll tree. The RB tree operations
* are protected by the "mtx" mutex, and ep_find() must be called with
* "mtx" held.
*/
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
{
int kcmp;
struct rb_node *rbp;
struct epitem *epi, *epir = NULL;
struct epoll_filefd ffd;
ep_set_ffd(&ffd, file, fd);
for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
epi = rb_entry(rbp, struct epitem, rbn);
kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
if (kcmp > 0)
rbp = rbp->rb_right;
else if (kcmp < 0)
rbp = rbp->rb_left;
else {
epir = epi;
break;
}
}
return epir;
}
#ifdef CONFIG_KCMP
static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
{
struct rb_node *rbp;
struct epitem *epi;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (epi->ffd.fd == tfd) {
if (toff == 0)
return epi;
else
toff--;
}
cond_resched();
}
return NULL;
}
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
unsigned long toff)
{
struct file *file_raw;
struct eventpoll *ep;
struct epitem *epi;
if (!is_file_epoll(file))
return ERR_PTR(-EINVAL);
ep = file->private_data;
mutex_lock(&ep->mtx);
epi = ep_find_tfd(ep, tfd, toff);
if (epi)
file_raw = epi->ffd.file;
else
file_raw = ERR_PTR(-ENOENT);
mutex_unlock(&ep->mtx);
return file_raw;
}
#endif /* CONFIG_KCMP */
/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
* have events to report.
*/
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
int pwake = 0;
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
__poll_t pollflags = key_to_poll(key);
unsigned long flags;
int ewake = 0;
spin_lock_irqsave(&ep->lock, flags);
ep_set_busy_poll_napi_id(epi);
/*
* If the event mask does not contain any poll(2) event, we consider the
* descriptor to be disabled. This condition is likely the effect of the
* EPOLLONESHOT bit that disables the descriptor when an event is received,
* until the next EPOLL_CTL_MOD will be issued.
*/
if (!(epi->event.events & ~EP_PRIVATE_BITS)) goto out_unlock;
/*
* Check the events coming with the callback. At this stage, not
* every device reports the events in the "key" parameter of the
* callback. We need to be able to handle both cases here, hence the
* test for "key" != NULL before the event match test.
*/
if (pollflags && !(pollflags & epi->event.events))
goto out_unlock;
/*
* If we are transferring events to userspace, we can hold no locks
* (because we're accessing user memory, and because of linux f_op->poll()
* semantics). All the events that happen during that period of time are
* chained in ep->ovflist and requeued later on.
*/
if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { if (epi->next == EP_UNACTIVE_PTR) {
epi->next = READ_ONCE(ep->ovflist);
WRITE_ONCE(ep->ovflist, epi);
ep_pm_stay_awake_rcu(epi);
}
} else if (!ep_is_linked(epi)) {
/* In the usual case, add event to ready list. */
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake_rcu(epi);
}
/*
* Wake up ( if active ) both the eventpoll wait list and the ->poll()
* wait list.
*/
if (waitqueue_active(&ep->wq)) { if ((epi->event.events & EPOLLEXCLUSIVE) &&
!(pollflags & POLLFREE)) {
switch (pollflags & EPOLLINOUT_BITS) {
case EPOLLIN:
if (epi->event.events & EPOLLIN)
ewake = 1;
break;
case EPOLLOUT:
if (epi->event.events & EPOLLOUT)
ewake = 1;
break;
case 0:
ewake = 1;
break;
}
}
if (sync) wake_up_sync(&ep->wq);
else
wake_up(&ep->wq);
}
if (waitqueue_active(&ep->poll_wait))
pwake++;
out_unlock:
spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
if (!(epi->event.events & EPOLLEXCLUSIVE))
ewake = 1;
if (pollflags & POLLFREE) {
/*
* If we race with ep_remove_wait_queue() it can miss
* ->whead = NULL and do another remove_wait_queue() after
* us, so we can't use __remove_wait_queue().
*/
list_del_init(&wait->entry);
/*
* ->whead != NULL protects us from the race with
* ep_clear_and_put() or ep_remove(), ep_remove_wait_queue()
* takes whead->lock held by the caller. Once we nullify it,
* nothing protects ep/epi or even wait.
*/
smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
}
return ewake;}
/*
* This is the callback that is used to add our wait queue to the
* target file wakeup lists.
*/
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt)
{
struct ep_pqueue *epq = container_of(pt, struct ep_pqueue, pt);
struct epitem *epi = epq->epi;
struct eppoll_entry *pwq;
if (unlikely(!epi)) // an earlier allocation has failed
return;
pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL);
if (unlikely(!pwq)) {
epq->epi = NULL;
return;
}
init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
pwq->whead = whead;
pwq->base = epi;
if (epi->event.events & EPOLLEXCLUSIVE)
add_wait_queue_exclusive(whead, &pwq->wait);
else
add_wait_queue(whead, &pwq->wait);
pwq->next = epi->pwqlist;
epi->pwqlist = pwq;
}
static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
{
int kcmp;
struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
struct epitem *epic;
bool leftmost = true;
while (*p) {
parent = *p;
epic = rb_entry(parent, struct epitem, rbn);
kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
if (kcmp > 0) {
p = &parent->rb_right;
leftmost = false;
} else
p = &parent->rb_left;
}
rb_link_node(&epi->rbn, parent, p);
rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
}
#define PATH_ARR_SIZE 5
/*
* These are the number paths of length 1 to 5, that we are allowing to emanate
* from a single file of interest. For example, we allow 1000 paths of length
* 1, to emanate from each file of interest. This essentially represents the
* potential wakeup paths, which need to be limited in order to avoid massive
* uncontrolled wakeup storms. The common use case should be a single ep which
* is connected to n file sources. In this case each file source has 1 path
* of length 1. Thus, the numbers below should be more than sufficient. These
* path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
* and delete can't add additional paths. Protected by the epnested_mutex.
*/
static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
static int path_count[PATH_ARR_SIZE];
static int path_count_inc(int nests)
{
/* Allow an arbitrary number of depth 1 paths */
if (nests == 0)
return 0;
if (++path_count[nests] > path_limits[nests])
return -1;
return 0;
}
static void path_count_init(void)
{
int i;
for (i = 0; i < PATH_ARR_SIZE; i++)
path_count[i] = 0;
}
static int reverse_path_check_proc(struct hlist_head *refs, int depth)
{
int error = 0;
struct epitem *epi;
if (depth > EP_MAX_NESTS) /* too deep nesting */
return -1;
/* CTL_DEL can remove links here, but that can't increase our count */
hlist_for_each_entry_rcu(epi, refs, fllink) {
struct hlist_head *refs = &epi->ep->refs;
if (hlist_empty(refs))
error = path_count_inc(depth);
else
error = reverse_path_check_proc(refs, depth + 1);
if (error != 0)
break;
}
return error;
}
/**
* reverse_path_check - The tfile_check_list is list of epitem_head, which have
* links that are proposed to be newly added. We need to
* make sure that those added links don't add too many
* paths such that we will spend all our time waking up
* eventpoll objects.
*
* Return: %zero if the proposed links don't create too many paths,
* %-1 otherwise.
*/
static int reverse_path_check(void)
{
struct epitems_head *p;
for (p = tfile_check_list; p != EP_UNACTIVE_PTR; p = p->next) {
int error;
path_count_init();
rcu_read_lock();
error = reverse_path_check_proc(&p->epitems, 0);
rcu_read_unlock();
if (error)
return error;
}
return 0;
}
static int ep_create_wakeup_source(struct epitem *epi)
{
struct name_snapshot n;
struct wakeup_source *ws;
if (!epi->ep->ws) {
epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
if (!epi->ep->ws)
return -ENOMEM;
}
take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
ws = wakeup_source_register(NULL, n.name.name);
release_dentry_name_snapshot(&n);
if (!ws)
return -ENOMEM;
rcu_assign_pointer(epi->ws, ws);
return 0;
}
/* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
static noinline void ep_destroy_wakeup_source(struct epitem *epi)
{
struct wakeup_source *ws = ep_wakeup_source(epi);
RCU_INIT_POINTER(epi->ws, NULL);
/*
* wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
* used internally by wakeup_source_remove, too (called by
* wakeup_source_unregister), so we cannot use call_rcu
*/
synchronize_rcu();
wakeup_source_unregister(ws);
}
static int attach_epitem(struct file *file, struct epitem *epi)
{
struct epitems_head *to_free = NULL;
struct hlist_head *head = NULL;
struct eventpoll *ep = NULL;
if (is_file_epoll(file))
ep = file->private_data;
if (ep) {
head = &ep->refs;
} else if (!READ_ONCE(file->f_ep)) {
allocate:
to_free = kmem_cache_zalloc(ephead_cache, GFP_KERNEL);
if (!to_free)
return -ENOMEM;
head = &to_free->epitems;
}
spin_lock(&file->f_lock);
if (!file->f_ep) {
if (unlikely(!head)) {
spin_unlock(&file->f_lock);
goto allocate;
}
/* See eventpoll_release() for details. */
WRITE_ONCE(file->f_ep, head);
to_free = NULL;
}
hlist_add_head_rcu(&epi->fllink, file->f_ep);
spin_unlock(&file->f_lock);
free_ephead(to_free);
return 0;
}
/*
* Must be called with "mtx" held.
*/
static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
struct file *tfile, int fd, int full_check)
{
int error, pwake = 0;
__poll_t revents;
struct epitem *epi;
struct ep_pqueue epq;
struct eventpoll *tep = NULL;
if (is_file_epoll(tfile))
tep = tfile->private_data;
lockdep_assert_irqs_enabled();
if (unlikely(percpu_counter_compare(&ep->user->epoll_watches,
max_user_watches) >= 0))
return -ENOSPC;
percpu_counter_inc(&ep->user->epoll_watches);
if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) {
percpu_counter_dec(&ep->user->epoll_watches);
return -ENOMEM;
}
/* Item initialization follow here ... */
INIT_LIST_HEAD(&epi->rdllink);
epi->ep = ep;
ep_set_ffd(&epi->ffd, tfile, fd);
epi->event = *event;
epi->next = EP_UNACTIVE_PTR;
if (tep)
mutex_lock_nested(&tep->mtx, 1);
/* Add the current item to the list of active epoll hook for this file */
if (unlikely(attach_epitem(tfile, epi) < 0)) {
if (tep)
mutex_unlock(&tep->mtx);
kmem_cache_free(epi_cache, epi);
percpu_counter_dec(&ep->user->epoll_watches);
return -ENOMEM;
}
if (full_check && !tep)
list_file(tfile);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
if (tep)
mutex_unlock(&tep->mtx);
/*
* ep_remove_safe() calls in the later error paths can't lead to
* ep_free() as the ep file itself still holds an ep reference.
*/
ep_get(ep);
/* now check if we've created too many backpaths */
if (unlikely(full_check && reverse_path_check())) {
ep_remove_safe(ep, epi);
return -EINVAL;
}
if (epi->event.events & EPOLLWAKEUP) {
error = ep_create_wakeup_source(epi);
if (error) {
ep_remove_safe(ep, epi);
return error;
}
}
/* Initialize the poll table using the queue callback */
epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
/*
* Attach the item to the poll hooks and get current event bits.
* We can safely use the file* here because its usage count has
* been increased by the caller of this function. Note that after
* this operation completes, the poll callback can start hitting
* the new item.
*/
revents = ep_item_poll(epi, &epq.pt, 1);
/*
* We have to check if something went wrong during the poll wait queue
* install process. Namely an allocation for a wait queue failed due
* high memory pressure.
*/
if (unlikely(!epq.epi)) {
ep_remove_safe(ep, epi);
return -ENOMEM;
}
/* We have to drop the new item inside our item list to keep track of it */
spin_lock_irq(&ep->lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
/* If the file is already "ready" we drop it inside the ready list */
if (revents && !ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irq(&ep->lock);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(ep, NULL, 0);
return 0;
}
/*
* Modify the interest event mask by dropping an event if the new mask
* has a match in the current file status. Must be called with "mtx" held.
*/
static int ep_modify(struct eventpoll *ep, struct epitem *epi,
const struct epoll_event *event)
{
int pwake = 0;
poll_table pt;
lockdep_assert_irqs_enabled();
init_poll_funcptr(&pt, NULL);
/*
* Set the new event interest mask before calling f_op->poll();
* otherwise we might miss an event that happens between the
* f_op->poll() call and the new event set registering.
*/
epi->event.events = event->events; /* need barrier below */
epi->event.data = event->data; /* protected by mtx */
if (epi->event.events & EPOLLWAKEUP) {
if (!ep_has_wakeup_source(epi))
ep_create_wakeup_source(epi);
} else if (ep_has_wakeup_source(epi)) {
ep_destroy_wakeup_source(epi);
}
/*
* The following barrier has two effects:
*
* 1) Flush epi changes above to other CPUs. This ensures
* we do not miss events from ep_poll_callback if an
* event occurs immediately after we call f_op->poll().
* We need this because we did not take ep->lock while
* changing epi above (but ep_poll_callback does take
* ep->lock).
*
* 2) We also need to ensure we do not miss _past_ events
* when calling f_op->poll(). This barrier also
* pairs with the barrier in wq_has_sleeper (see
* comments for wq_has_sleeper).
*
* This barrier will now guarantee ep_poll_callback or f_op->poll
* (or both) will notice the readiness of an item.
*/
smp_mb();
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
if (ep_item_poll(epi, &pt, 1)) {
spin_lock_irq(&ep->lock);
if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(ep, NULL, 0);
return 0;
}
static int ep_send_events(struct eventpoll *ep,
struct epoll_event __user *events, int maxevents)
{
struct epitem *epi, *tmp;
LIST_HEAD(txlist);
poll_table pt;
int res = 0;
/*
* Always short-circuit for fatal signals to allow threads to make a
* timely exit without the chance of finding more events available and
* fetching repeatedly.
*/
if (fatal_signal_pending(current))
return -EINTR;
init_poll_funcptr(&pt, NULL);
mutex_lock(&ep->mtx);
ep_start_scan(ep, &txlist);
/*
* We can loop without lock because we are passed a task private list.
* Items cannot vanish during the loop we are holding ep->mtx.
*/
list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
struct wakeup_source *ws;
__poll_t revents;
if (res >= maxevents)
break;
/*
* Activate ep->ws before deactivating epi->ws to prevent
* triggering auto-suspend here (in case we reactive epi->ws
* below).
*
* This could be rearranged to delay the deactivation of epi->ws
* instead, but then epi->ws would temporarily be out of sync
* with ep_is_linked().
*/
ws = ep_wakeup_source(epi);
if (ws) {
if (ws->active)
__pm_stay_awake(ep->ws);
__pm_relax(ws);
}
list_del_init(&epi->rdllink);
/*
* If the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, we are holding ep->mtx,
* so no operations coming from userspace can change the item.
*/
revents = ep_item_poll(epi, &pt, 1);
if (!revents)
continue;
events = epoll_put_uevent(revents, epi->event.data, events);
if (!events) {
list_add(&epi->rdllink, &txlist);
ep_pm_stay_awake(epi);
if (!res)
res = -EFAULT;
break;
}
res++;
if (epi->event.events & EPOLLONESHOT)
epi->event.events &= EP_PRIVATE_BITS;
else if (!(epi->event.events & EPOLLET)) {
/*
* If this file has been added with Level
* Trigger mode, we need to insert back inside
* the ready list, so that the next call to
* epoll_wait() will check again the events
* availability. At this point, no one can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_send_events() holding "mtx" and the
* poll callback will queue them in ep->ovflist.
*/
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
}
}
ep_done_scan(ep, &txlist);
mutex_unlock(&ep->mtx);
return res;
}
static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
{
struct timespec64 now;
if (ms < 0)
return NULL;
if (!ms) {
to->tv_sec = 0;
to->tv_nsec = 0;
return to;
}
to->tv_sec = ms / MSEC_PER_SEC;
to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC);
ktime_get_ts64(&now);
*to = timespec64_add_safe(now, *to);
return to;
}
/*
* autoremove_wake_function, but remove even on failure to wake up, because we
* know that default_wake_function/ttwu will only fail if the thread is already
* woken, and in that case the ep_poll loop will remove the entry anyways, not
* try to reuse it.
*/
static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
unsigned int mode, int sync, void *key)
{
int ret = default_wake_function(wq_entry, mode, sync, key);
/*
* Pairs with list_empty_careful in ep_poll, and ensures future loop
* iterations see the cause of this wakeup.
*/
list_del_init_careful(&wq_entry->entry);
return ret;
}
static int ep_try_send_events(struct eventpoll *ep,
struct epoll_event __user *events, int maxevents)
{
int res;
/*
* Try to transfer events to user space. In case we get 0 events and
* there's still timeout left over, we go trying again in search of
* more luck.
*/
res = ep_send_events(ep, events, maxevents);
if (res > 0)
ep_suspend_napi_irqs(ep);
return res;
}
static int ep_schedule_timeout(ktime_t *to)
{
if (to)
return ktime_after(*to, ktime_get());
else
return 1;
}
/**
* ep_poll - Retrieves ready events, and delivers them to the caller-supplied
* event buffer.
*
* @ep: Pointer to the eventpoll context.
* @events: Pointer to the userspace buffer where the ready events should be
* stored.
* @maxevents: Size (in terms of number of events) of the caller event buffer.
* @timeout: Maximum timeout for the ready events fetch operation, in
* timespec. If the timeout is zero, the function will not block,
* while if the @timeout ptr is NULL, the function will block
* until at least one event has been retrieved (or an error
* occurred).
*
* Return: the number of ready events which have been fetched, or an
* error code, in case of error.
*/
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, struct timespec64 *timeout)
{
int res, eavail, timed_out = 0;
u64 slack = 0;
wait_queue_entry_t wait;
ktime_t expires, *to = NULL;
lockdep_assert_irqs_enabled();
if (timeout && (timeout->tv_sec | timeout->tv_nsec)) {
slack = select_estimate_accuracy(timeout);
to = &expires;
*to = timespec64_to_ktime(*timeout);
} else if (timeout) {
/*
* Avoid the unnecessary trip to the wait queue loop, if the
* caller specified a non blocking operation.
*/
timed_out = 1;
}
/*
* This call is racy: We may or may not see events that are being added
* to the ready list under the lock (e.g., in IRQ callbacks). For cases
* with a non-zero timeout, this thread will check the ready list under
* lock and will add to the wait queue. For cases with a zero
* timeout, the user by definition should not care and will have to
* recheck again.
*/
eavail = ep_events_available(ep);
while (1) {
if (eavail) {
res = ep_try_send_events(ep, events, maxevents);
if (res)
return res;
}
if (timed_out)
return 0;
eavail = ep_busy_loop(ep);
if (eavail)
continue;
if (signal_pending(current))
return -EINTR;
/*
* Internally init_wait() uses autoremove_wake_function(),
* thus wait entry is removed from the wait queue on each
* wakeup. Why it is important? In case of several waiters
* each new wakeup will hit the next waiter, giving it the
* chance to harvest new event. Otherwise wakeup can be
* lost. This is also good performance-wise, because on
* normal wakeup path no need to call __remove_wait_queue()
* explicitly, thus ep->lock is not taken, which halts the
* event delivery.
*
* In fact, we now use an even more aggressive function that
* unconditionally removes, because we don't reuse the wait
* entry between loop iterations. This lets us also avoid the
* performance issue if a process is killed, causing all of its
* threads to wake up without being removed normally.
*/
init_wait(&wait);
wait.func = ep_autoremove_wake_function;
spin_lock_irq(&ep->lock);
/*
* Barrierless variant, waitqueue_active() is called under
* the same lock on wakeup ep_poll_callback() side, so it
* is safe to avoid an explicit barrier.
*/
__set_current_state(TASK_INTERRUPTIBLE);
/*
* Do the final check under the lock. ep_start/done_scan()
* plays with two lists (->rdllist and ->ovflist) and there
* is always a race when both lists are empty for short
* period of time although events are pending, so lock is
* important.
*/
eavail = ep_events_available(ep);
if (!eavail)
__add_wait_queue_exclusive(&ep->wq, &wait);
spin_unlock_irq(&ep->lock);
if (!eavail)
timed_out = !ep_schedule_timeout(to) ||
!schedule_hrtimeout_range(to, slack,
HRTIMER_MODE_ABS);
__set_current_state(TASK_RUNNING);
/*
* We were woken up, thus go and try to harvest some events.
* If timed out and still on the wait queue, recheck eavail
* carefully under lock, below.
*/
eavail = 1;
if (!list_empty_careful(&wait.entry)) {
spin_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue,
* it means that the thread was woken up after its
* timeout expired before it could reacquire the lock.
* Thus, when wait.entry is empty, it needs to harvest
* events.
*/
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
spin_unlock_irq(&ep->lock);
}
}
}
/**
* ep_loop_check_proc - verify that adding an epoll file @ep inside another
* epoll file does not create closed loops, and
* determine the depth of the subtree starting at @ep
*
* @ep: the &struct eventpoll to be currently checked.
* @depth: Current depth of the path being checked.
*
* Return: depth of the subtree, or INT_MAX if we found a loop or went too deep.
*/
static int ep_loop_check_proc(struct eventpoll *ep, int depth)
{
int result = 0;
struct rb_node *rbp;
struct epitem *epi;
if (ep->gen == loop_check_gen)
return ep->loop_check_depth;
mutex_lock_nested(&ep->mtx, depth + 1);
ep->gen = loop_check_gen;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) {
struct eventpoll *ep_tovisit;
ep_tovisit = epi->ffd.file->private_data;
if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
result = INT_MAX;
else
result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1);
if (result > EP_MAX_NESTS)
break;
} else {
/*
* If we've reached a file that is not associated with
* an ep, then we need to check if the newly added
* links are going to add too many wakeup paths. We do
* this by adding it to the tfile_check_list, if it's
* not already there, and calling reverse_path_check()
* during ep_insert().
*/
list_file(epi->ffd.file);
}
}
ep->loop_check_depth = result;
mutex_unlock(&ep->mtx);
return result;
}
/* ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards */
static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth)
{
int result = 0;
struct epitem *epi;
if (ep->gen == loop_check_gen)
return ep->loop_check_depth;
hlist_for_each_entry_rcu(epi, &ep->refs, fllink)
result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1);
ep->gen = loop_check_gen;
ep->loop_check_depth = result;
return result;
}
/**
* ep_loop_check - Performs a check to verify that adding an epoll file (@to)
* into another epoll file (represented by @ep) does not create
* closed loops or too deep chains.
*
* @ep: Pointer to the epoll we are inserting into.
* @to: Pointer to the epoll to be inserted.
*
* Return: %zero if adding the epoll @to inside the epoll @from
* does not violate the constraints, or %-1 otherwise.
*/
static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
{
int depth, upwards_depth;
inserting_into = ep;
/*
* Check how deep down we can get from @to, and whether it is possible
* to loop up to @ep.
*/
depth = ep_loop_check_proc(to, 0);
if (depth > EP_MAX_NESTS)
return -1;
/* Check how far up we can go from @ep. */
rcu_read_lock();
upwards_depth = ep_get_upwards_depth_proc(ep, 0);
rcu_read_unlock();
return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0;
}
static void clear_tfile_check_list(void)
{
rcu_read_lock();
while (tfile_check_list != EP_UNACTIVE_PTR) {
struct epitems_head *head = tfile_check_list;
tfile_check_list = head->next;
unlist_file(head);
}
rcu_read_unlock();
}
/*
* Open an eventpoll file descriptor.
*/
static int do_epoll_create(int flags)
{
int error, fd;
struct eventpoll *ep = NULL;
struct file *file;
/* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
if (flags & ~EPOLL_CLOEXEC)
return -EINVAL;
/*
* Create the internal data structure ("struct eventpoll").
*/
error = ep_alloc(&ep);
if (error < 0)
return error;
/*
* Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor.
*/
fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
if (fd < 0) {
error = fd;
goto out_free_ep;
}
file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
O_RDWR | (flags & O_CLOEXEC));
if (IS_ERR(file)) {
error = PTR_ERR(file);
goto out_free_fd;
}
ep->file = file;
fd_install(fd, file);
return fd;
out_free_fd:
put_unused_fd(fd);
out_free_ep:
ep_clear_and_put(ep);
return error;
}
SYSCALL_DEFINE1(epoll_create1, int, flags)
{
return do_epoll_create(flags);
}
SYSCALL_DEFINE1(epoll_create, int, size)
{
if (size <= 0)
return -EINVAL;
return do_epoll_create(0);
}
#ifdef CONFIG_PM_SLEEP
static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
{
if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
epev->events &= ~EPOLLWAKEUP;
}
#else
static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
{
epev->events &= ~EPOLLWAKEUP;
}
#endif
static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
bool nonblock)
{
if (!nonblock) {
mutex_lock_nested(mutex, depth);
return 0;
}
if (mutex_trylock(mutex))
return 0;
return -EAGAIN;
}
int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
bool nonblock)
{
int error;
int full_check = 0;
struct eventpoll *ep;
struct epitem *epi;
struct eventpoll *tep = NULL;
CLASS(fd, f)(epfd);
if (fd_empty(f))
return -EBADF;
/* Get the "struct file *" for the target file */
CLASS(fd, tf)(fd);
if (fd_empty(tf))
return -EBADF;
/* The target file descriptor must support poll */
if (!file_can_poll(fd_file(tf)))
return -EPERM;
/* Check if EPOLLWAKEUP is allowed */
if (ep_op_has_event(op))
ep_take_care_of_epollwakeup(epds);
/*
* We have to check that the file structure underneath the file descriptor
* the user passed to us _is_ an eventpoll file. And also we do not permit
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
if (fd_file(f) == fd_file(tf) || !is_file_epoll(fd_file(f)))
goto error_tgt_fput;
/*
* epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
* so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
* Also, we do not currently supported nested exclusive wakeups.
*/
if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
if (op == EPOLL_CTL_MOD)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD && (is_file_epoll(fd_file(tf)) ||
(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
goto error_tgt_fput;
}
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
ep = fd_file(f)->private_data;
/*
* When we insert an epoll file descriptor inside another epoll file
* descriptor, there is the chance of creating closed loops, which are
* better be handled here, than in more critical paths. While we are
* checking for loops we also determine the list of files reachable
* and hang them on the tfile_check_list, so we can check that we
* haven't created too many possible wakeup paths.
*
* We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
* the epoll file descriptor is attaching directly to a wakeup source,
* unless the epoll file descriptor is nested. The purpose of taking the
* 'epnested_mutex' on add is to prevent complex toplogies such as loops and
* deep wakeup paths from forming in parallel through multiple
* EPOLL_CTL_ADD operations.
*/
error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
if (error)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD) {
if (READ_ONCE(fd_file(f)->f_ep) || ep->gen == loop_check_gen ||
is_file_epoll(fd_file(tf))) {
mutex_unlock(&ep->mtx);
error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);
if (error)
goto error_tgt_fput;
loop_check_gen++;
full_check = 1;
if (is_file_epoll(fd_file(tf))) {
tep = fd_file(tf)->private_data;
error = -ELOOP;
if (ep_loop_check(ep, tep) != 0)
goto error_tgt_fput;
}
error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
if (error)
goto error_tgt_fput;
}
}
/*
* Try to lookup the file inside our RB tree. Since we grabbed "mtx"
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
epi = ep_find(ep, fd_file(tf), fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds->events |= EPOLLERR | EPOLLHUP;
error = ep_insert(ep, epds, fd_file(tf), fd, full_check);
} else
error = -EEXIST;
break;
case EPOLL_CTL_DEL:
if (epi) {
/*
* The eventpoll itself is still alive: the refcount
* can't go to zero here.
*/
ep_remove_safe(ep, epi);
error = 0;
} else {
error = -ENOENT;
}
break;
case EPOLL_CTL_MOD:
if (epi) {
if (!(epi->event.events & EPOLLEXCLUSIVE)) {
epds->events |= EPOLLERR | EPOLLHUP;
error = ep_modify(ep, epi, epds);
}
} else
error = -ENOENT;
break;
}
mutex_unlock(&ep->mtx);
error_tgt_fput:
if (full_check) {
clear_tfile_check_list();
loop_check_gen++;
mutex_unlock(&epnested_mutex);
}
return error;
}
/*
* The following function implements the controller interface for
* the eventpoll file that enables the insertion/removal/change of
* file descriptors inside the interest set.
*/
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
struct epoll_event epds;
if (ep_op_has_event(op) &&
copy_from_user(&epds, event, sizeof(struct epoll_event)))
return -EFAULT;
return do_epoll_ctl(epfd, op, fd, &epds, false);
}
static int ep_check_params(struct file *file, struct epoll_event __user *evs,
int maxevents)
{
/* The maximum number of event must be greater than zero */
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
return -EINVAL;
/* Verify that the area passed by the user is writeable */
if (!access_ok(evs, maxevents * sizeof(struct epoll_event)))
return -EFAULT;
/*
* We have to check that the file structure underneath the fd
* the user passed to us _is_ an eventpoll file.
*/
if (!is_file_epoll(file))
return -EINVAL;
return 0;
}
int epoll_sendevents(struct file *file, struct epoll_event __user *events,
int maxevents)
{
struct eventpoll *ep;
int ret;
ret = ep_check_params(file, events, maxevents);
if (unlikely(ret))
return ret;
ep = file->private_data;
/*
* Racy call, but that's ok - it should get retried based on
* poll readiness anyway.
*/
if (ep_events_available(ep))
return ep_try_send_events(ep, events, maxevents);
return 0;
}
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
*/
static int do_epoll_wait(int epfd, struct epoll_event __user *events,
int maxevents, struct timespec64 *to)
{
struct eventpoll *ep;
int ret;
/* Get the "struct file *" for the eventpoll file */
CLASS(fd, f)(epfd);
if (fd_empty(f))
return -EBADF;
ret = ep_check_params(fd_file(f), events, maxevents);
if (unlikely(ret))
return ret;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
ep = fd_file(f)->private_data;
/* Time to fish for events ... */
return ep_poll(ep, events, maxevents, to);
}
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout)
{
struct timespec64 to;
return do_epoll_wait(epfd, events, maxevents,
ep_timeout_to_timespec(&to, timeout));
}
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_pwait(2).
*/
static int do_epoll_pwait(int epfd, struct epoll_event __user *events,
int maxevents, struct timespec64 *to,
const sigset_t __user *sigmask, size_t sigsetsize)
{
int error;
/*
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
error = set_user_sigmask(sigmask, sigsetsize);
if (error)
return error;
error = do_epoll_wait(epfd, events, maxevents, to);
restore_saved_sigmask_unless(error == -EINTR);
return error;
}
SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout, const sigset_t __user *, sigmask,
size_t, sigsetsize)
{
struct timespec64 to;
return do_epoll_pwait(epfd, events, maxevents,
ep_timeout_to_timespec(&to, timeout),
sigmask, sigsetsize);
}
SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events,
int, maxevents, const struct __kernel_timespec __user *, timeout,
const sigset_t __user *, sigmask, size_t, sigsetsize)
{
struct timespec64 ts, *to = NULL;
if (timeout) {
if (get_timespec64(&ts, timeout))
return -EFAULT;
to = &ts;
if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
return -EINVAL;
}
return do_epoll_pwait(epfd, events, maxevents, to,
sigmask, sigsetsize);
}
#ifdef CONFIG_COMPAT
static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events,
int maxevents, struct timespec64 *timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize)
{
long err;
/*
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
err = set_compat_user_sigmask(sigmask, sigsetsize);
if (err)
return err;
err = do_epoll_wait(epfd, events, maxevents, timeout);
restore_saved_sigmask_unless(err == -EINTR);
return err;
}
COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
struct epoll_event __user *, events,
int, maxevents, int, timeout,
const compat_sigset_t __user *, sigmask,
compat_size_t, sigsetsize)
{
struct timespec64 to;
return do_compat_epoll_pwait(epfd, events, maxevents,
ep_timeout_to_timespec(&to, timeout),
sigmask, sigsetsize);
}
COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd,
struct epoll_event __user *, events,
int, maxevents,
const struct __kernel_timespec __user *, timeout,
const compat_sigset_t __user *, sigmask,
compat_size_t, sigsetsize)
{
struct timespec64 ts, *to = NULL;
if (timeout) {
if (get_timespec64(&ts, timeout))
return -EFAULT;
to = &ts;
if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
return -EINVAL;
}
return do_compat_epoll_pwait(epfd, events, maxevents, to,
sigmask, sigsetsize);
}
#endif
static int __init eventpoll_init(void)
{
struct sysinfo si;
si_meminfo(&si);
/*
* Allows top 4% of lomem to be allocated for epoll watches (per user).
*/
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
/*
* We can have many thousands of epitems, so prevent this from
* using an extra cache line on 64-bit (and smaller) CPUs
*/
BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
/* Allocates slab cache used to allocate "struct epitem" items */
epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
epoll_sysctls_init();
ephead_cache = kmem_cache_create("ep_head",
sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
return 0;
}
fs_initcall(eventpoll_init);
/* SPDX-License-Identifier: GPL-2.0 */
/* thread_info.h: low-level thread information
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/
#ifndef _ASM_X86_THREAD_INFO_H
#define _ASM_X86_THREAD_INFO_H
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/types.h>
/*
* TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we
* reserve at the top of the kernel stack. We do it because of a nasty
* 32-bit corner case. On x86_32, the hardware stack frame is
* variable-length. Except for vm86 mode, struct pt_regs assumes a
* maximum-length frame. If we enter from CPL 0, the top 8 bytes of
* pt_regs don't actually exist. Ordinarily this doesn't matter, but it
* does in at least one case:
*
* If we take an NMI early enough in SYSENTER, then we can end up with
* pt_regs that extends above sp0. On the way out, in the espfix code,
* we can read the saved SS value, but that value will be above sp0.
* Without this offset, that can result in a page fault. (We are
* careful that, in this case, the value we read doesn't matter.)
*
* In vm86 mode, the hardware frame is much longer still, so add 16
* bytes to make room for the real-mode segments.
*
* x86-64 has a fixed-length stack frame, but it depends on whether
* or not FRED is enabled. Future versions of FRED might make this
* dynamic, but for now it is always 2 words longer.
*/
#ifdef CONFIG_X86_32
# ifdef CONFIG_VM86
# define TOP_OF_KERNEL_STACK_PADDING 16
# else
# define TOP_OF_KERNEL_STACK_PADDING 8
# endif
#else /* x86-64 */
# ifdef CONFIG_X86_FRED
# define TOP_OF_KERNEL_STACK_PADDING (2 * 8)
# else
# define TOP_OF_KERNEL_STACK_PADDING 0
# endif
#endif
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
*/
#ifndef __ASSEMBLER__
struct task_struct;
#include <asm/cpufeature.h>
#include <linux/atomic.h>
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
u32 status; /* thread synchronous flags */
#ifdef CONFIG_SMP
u32 cpu; /* current CPU */
#endif
};
#define INIT_THREAD_INFO(tsk) \
{ \
.flags = 0, \
}
#else /* !__ASSEMBLER__ */
#include <asm/asm-offsets.h>
#endif
/*
* Tell the generic TIF infrastructure which bits x86 supports
*/
#define HAVE_TIF_NEED_RESCHED_LAZY
#define HAVE_TIF_POLLING_NRFLAG
#define HAVE_TIF_SINGLESTEP
#include <asm-generic/thread_info_tif.h>
/* Architecture specific TIF space starts at 16 */
#define TIF_SSBD 16 /* Speculative store bypass disable */
#define TIF_SPEC_IB 17 /* Indirect branch speculation mitigation */
#define TIF_SPEC_L1D_FLUSH 18 /* Flush L1D on mm switches (processes) */
#define TIF_NEED_FPU_LOAD 19 /* load FPU on return to userspace */
#define TIF_NOCPUID 20 /* CPUID is not accessible in userland */
#define TIF_NOTSC 21 /* TSC is not accessible in userland */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
#define TIF_SPEC_FORCE_UPDATE 23 /* Force speculation MSR update in context switch */
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
#define TIF_SINGLESTEP 25 /* reenable singlestep on user return*/
#define TIF_BLOCKSTEP 26 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
#define TIF_ADDR32 28 /* 32-bit address space on 64 bits */
#define _TIF_SSBD BIT(TIF_SSBD)
#define _TIF_SPEC_IB BIT(TIF_SPEC_IB)
#define _TIF_SPEC_L1D_FLUSH BIT(TIF_SPEC_L1D_FLUSH)
#define _TIF_NEED_FPU_LOAD BIT(TIF_NEED_FPU_LOAD)
#define _TIF_NOCPUID BIT(TIF_NOCPUID)
#define _TIF_NOTSC BIT(TIF_NOTSC)
#define _TIF_IO_BITMAP BIT(TIF_IO_BITMAP)
#define _TIF_SPEC_FORCE_UPDATE BIT(TIF_SPEC_FORCE_UPDATE)
#define _TIF_FORCED_TF BIT(TIF_FORCED_TF)
#define _TIF_BLOCKSTEP BIT(TIF_BLOCKSTEP)
#define _TIF_SINGLESTEP BIT(TIF_SINGLESTEP)
#define _TIF_LAZY_MMU_UPDATES BIT(TIF_LAZY_MMU_UPDATES)
#define _TIF_ADDR32 BIT(TIF_ADDR32)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \
(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
_TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
/*
* Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
*/
#ifdef CONFIG_SMP
# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
#else
# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
#endif
#ifdef CONFIG_X86_IOPL_IOPERM
# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY | \
_TIF_IO_BITMAP)
#else
# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY)
#endif
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
#define STACK_WARN (THREAD_SIZE/8)
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#ifndef __ASSEMBLER__
/*
* Walks up the stack frames to make sure that the specified object is
* entirely contained by a single stack frame.
*
* Returns:
* GOOD_FRAME if within a frame
* BAD_STACK if placed across a frame boundary (or outside stack)
* NOT_STACK unable to determine (no frame pointers, etc)
*
* This function reads pointers from the stack and dereferences them. The
* pointers may not have their KMSAN shadow set up properly, which may result
* in false positive reports. Disable instrumentation to avoid those.
*/
__no_kmsan_checks
static inline int arch_within_stack_frames(const void * const stack,
const void * const stackend,
const void *obj, unsigned long len)
{
#if defined(CONFIG_FRAME_POINTER)
const void *frame = NULL;
const void *oldframe;
oldframe = __builtin_frame_address(1);
if (oldframe)
frame = __builtin_frame_address(2);
/*
* low ----------------------------------------------> high
* [saved bp][saved ip][args][local vars][saved bp][saved ip]
* ^----------------^
* allow copies only within here
*/
while (stack <= frame && frame < stackend) {
/*
* If obj + len extends past the last frame, this
* check won't pass and the next frame will be 0,
* causing us to bail out and correctly report
* the copy as invalid.
*/
if (obj + len <= frame)
return obj >= oldframe + 2 * sizeof(void *) ?
GOOD_FRAME : BAD_STACK;
oldframe = frame;
frame = *(const void * const *)frame;
}
return BAD_STACK;
#else
return NOT_STACK;
#endif
}
#endif /* !__ASSEMBLER__ */
/*
* Thread-synchronous status.
*
* This is different from the flags in that nobody else
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
#ifndef __ASSEMBLER__
#ifdef CONFIG_COMPAT
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
#define arch_set_restart_data(restart) \
do { restart->arch_data = current_thread_info()->status; } while (0)
#endif
#ifdef CONFIG_X86_32
#define in_ia32_syscall() true
#else
#define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \
current_thread_info()->status & TS_COMPAT)
#endif
extern void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_THREAD_INFO_H */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Christian Brauner <brauner@kernel.org> */
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/mnt_idmapping.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
#include <linux/seq_file.h>
#include "internal.h"
/*
* Outside of this file vfs{g,u}id_t are always created from k{g,u}id_t,
* never from raw values. These are just internal helpers.
*/
#define VFSUIDT_INIT_RAW(val) (vfsuid_t){ val }
#define VFSGIDT_INIT_RAW(val) (vfsgid_t){ val }
struct mnt_idmap {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
refcount_t count;
};
/*
* Carries the initial idmapping of 0:0:4294967295 which is an identity
* mapping. This means that {g,u}id 0 is mapped to {g,u}id 0, {g,u}id 1 is
* mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...].
*/
struct mnt_idmap nop_mnt_idmap = {
.count = REFCOUNT_INIT(1),
};
EXPORT_SYMBOL_GPL(nop_mnt_idmap);
/*
* Carries the invalid idmapping of a full 0-4294967295 {g,u}id range.
* This means that all {g,u}ids are mapped to INVALID_VFS{G,U}ID.
*/
struct mnt_idmap invalid_mnt_idmap = {
.count = REFCOUNT_INIT(1),
};
EXPORT_SYMBOL_GPL(invalid_mnt_idmap);
/**
* initial_idmapping - check whether this is the initial mapping
* @ns: idmapping to check
*
* Check whether this is the initial mapping, mapping 0 to 0, 1 to 1,
* [...], 1000 to 1000 [...].
*
* Return: true if this is the initial mapping, false if not.
*/
static inline bool initial_idmapping(const struct user_namespace *ns)
{
return ns == &init_user_ns;
}
/**
* make_vfsuid - map a filesystem kuid according to an idmapping
* @idmap: the mount's idmapping
* @fs_userns: the filesystem's idmapping
* @kuid : kuid to be mapped
*
* Take a @kuid and remap it from @fs_userns into @idmap. Use this
* function when preparing a @kuid to be reported to userspace.
*
* If initial_idmapping() determines that this is not an idmapped mount
* we can simply return @kuid unchanged.
* If initial_idmapping() tells us that the filesystem is not mounted with an
* idmapping we know the value of @kuid won't change when calling
* from_kuid() so we can simply retrieve the value via __kuid_val()
* directly.
*
* Return: @kuid mapped according to @idmap.
* If @kuid has no mapping in either @idmap or @fs_userns INVALID_UID is
* returned.
*/
vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns,
kuid_t kuid)
{
uid_t uid;
if (idmap == &nop_mnt_idmap)
return VFSUIDT_INIT(kuid);
if (idmap == &invalid_mnt_idmap)
return INVALID_VFSUID;
if (initial_idmapping(fs_userns))
uid = __kuid_val(kuid);
else
uid = from_kuid(fs_userns, kuid);
if (uid == (uid_t)-1)
return INVALID_VFSUID;
return VFSUIDT_INIT_RAW(map_id_down(&idmap->uid_map, uid));
}
EXPORT_SYMBOL_GPL(make_vfsuid);
/**
* make_vfsgid - map a filesystem kgid according to an idmapping
* @idmap: the mount's idmapping
* @fs_userns: the filesystem's idmapping
* @kgid : kgid to be mapped
*
* Take a @kgid and remap it from @fs_userns into @idmap. Use this
* function when preparing a @kgid to be reported to userspace.
*
* If initial_idmapping() determines that this is not an idmapped mount
* we can simply return @kgid unchanged.
* If initial_idmapping() tells us that the filesystem is not mounted with an
* idmapping we know the value of @kgid won't change when calling
* from_kgid() so we can simply retrieve the value via __kgid_val()
* directly.
*
* Return: @kgid mapped according to @idmap.
* If @kgid has no mapping in either @idmap or @fs_userns INVALID_GID is
* returned.
*/
vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, kgid_t kgid)
{
gid_t gid;
if (idmap == &nop_mnt_idmap)
return VFSGIDT_INIT(kgid);
if (idmap == &invalid_mnt_idmap)
return INVALID_VFSGID;
if (initial_idmapping(fs_userns))
gid = __kgid_val(kgid);
else
gid = from_kgid(fs_userns, kgid);
if (gid == (gid_t)-1)
return INVALID_VFSGID;
return VFSGIDT_INIT_RAW(map_id_down(&idmap->gid_map, gid));
}
EXPORT_SYMBOL_GPL(make_vfsgid);
/**
* from_vfsuid - map a vfsuid into the filesystem idmapping
* @idmap: the mount's idmapping
* @fs_userns: the filesystem's idmapping
* @vfsuid : vfsuid to be mapped
*
* Map @vfsuid into the filesystem idmapping. This function has to be used in
* order to e.g. write @vfsuid to inode->i_uid.
*
* Return: @vfsuid mapped into the filesystem idmapping
*/
kuid_t from_vfsuid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, vfsuid_t vfsuid)
{
uid_t uid;
if (idmap == &nop_mnt_idmap)
return AS_KUIDT(vfsuid);
if (idmap == &invalid_mnt_idmap)
return INVALID_UID;
uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid));
if (uid == (uid_t)-1)
return INVALID_UID;
if (initial_idmapping(fs_userns))
return KUIDT_INIT(uid);
return make_kuid(fs_userns, uid);
}
EXPORT_SYMBOL_GPL(from_vfsuid);
/**
* from_vfsgid - map a vfsgid into the filesystem idmapping
* @idmap: the mount's idmapping
* @fs_userns: the filesystem's idmapping
* @vfsgid : vfsgid to be mapped
*
* Map @vfsgid into the filesystem idmapping. This function has to be used in
* order to e.g. write @vfsgid to inode->i_gid.
*
* Return: @vfsgid mapped into the filesystem idmapping
*/
kgid_t from_vfsgid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, vfsgid_t vfsgid)
{
gid_t gid;
if (idmap == &nop_mnt_idmap)
return AS_KGIDT(vfsgid);
if (idmap == &invalid_mnt_idmap)
return INVALID_GID;
gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid));
if (gid == (gid_t)-1)
return INVALID_GID;
if (initial_idmapping(fs_userns))
return KGIDT_INIT(gid);
return make_kgid(fs_userns, gid);
}
EXPORT_SYMBOL_GPL(from_vfsgid);
#ifdef CONFIG_MULTIUSER
/**
* vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups
* @vfsgid: the mnt gid to match
*
* This function can be used to determine whether @vfsuid matches any of the
* caller's groups.
*
* Return: 1 if vfsuid matches caller's groups, 0 if not.
*/
int vfsgid_in_group_p(vfsgid_t vfsgid)
{
return in_group_p(AS_KGIDT(vfsgid));
}
#else
int vfsgid_in_group_p(vfsgid_t vfsgid)
{
return 1;
}
#endif
EXPORT_SYMBOL_GPL(vfsgid_in_group_p);
static int copy_mnt_idmap(struct uid_gid_map *map_from,
struct uid_gid_map *map_to)
{
struct uid_gid_extent *forward, *reverse;
u32 nr_extents = READ_ONCE(map_from->nr_extents);
/* Pairs with smp_wmb() when writing the idmapping. */
smp_rmb();
/*
* Don't blindly copy @map_to into @map_from if nr_extents is
* smaller or equal to UID_GID_MAP_MAX_BASE_EXTENTS. Since we
* read @nr_extents someone could have written an idmapping and
* then we might end up with inconsistent data. So just don't do
* anything at all.
*/
if (nr_extents == 0)
return -EINVAL;
/*
* Here we know that nr_extents is greater than zero which means
* a map has been written. Since idmappings can't be changed
* once they have been written we know that we can safely copy
* from @map_to into @map_from.
*/
if (nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
*map_to = *map_from;
return 0;
}
forward = kmemdup_array(map_from->forward, nr_extents,
sizeof(struct uid_gid_extent),
GFP_KERNEL_ACCOUNT);
if (!forward)
return -ENOMEM;
reverse = kmemdup_array(map_from->reverse, nr_extents,
sizeof(struct uid_gid_extent),
GFP_KERNEL_ACCOUNT);
if (!reverse) {
kfree(forward);
return -ENOMEM;
}
/*
* The idmapping isn't exposed anywhere so we don't need to care
* about ordering between extent pointers and @nr_extents
* initialization.
*/
map_to->forward = forward;
map_to->reverse = reverse;
map_to->nr_extents = nr_extents;
return 0;
}
static void free_mnt_idmap(struct mnt_idmap *idmap)
{
if (idmap->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
kfree(idmap->uid_map.forward);
kfree(idmap->uid_map.reverse);
}
if (idmap->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
kfree(idmap->gid_map.forward);
kfree(idmap->gid_map.reverse);
}
kfree(idmap);
}
struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns)
{
struct mnt_idmap *idmap;
int ret;
idmap = kzalloc(sizeof(struct mnt_idmap), GFP_KERNEL_ACCOUNT);
if (!idmap)
return ERR_PTR(-ENOMEM);
refcount_set(&idmap->count, 1);
ret = copy_mnt_idmap(&mnt_userns->uid_map, &idmap->uid_map);
if (!ret)
ret = copy_mnt_idmap(&mnt_userns->gid_map, &idmap->gid_map);
if (ret) {
free_mnt_idmap(idmap);
idmap = ERR_PTR(ret);
}
return idmap;
}
/**
* mnt_idmap_get - get a reference to an idmapping
* @idmap: the idmap to bump the reference on
*
* If @idmap is not the @nop_mnt_idmap bump the reference count.
*
* Return: @idmap with reference count bumped if @not_mnt_idmap isn't passed.
*/
struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
{
if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap) refcount_inc(&idmap->count); return idmap;}
EXPORT_SYMBOL_GPL(mnt_idmap_get);
/**
* mnt_idmap_put - put a reference to an idmapping
* @idmap: the idmap to put the reference on
*
* If this is a non-initial idmapping, put the reference count when a mount is
* released and free it if we're the last user.
*/
void mnt_idmap_put(struct mnt_idmap *idmap)
{
if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap &&
refcount_dec_and_test(&idmap->count))
free_mnt_idmap(idmap);
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);
int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map)
{
struct uid_gid_map *map, *map_up;
u32 idx, nr_mappings;
if (!is_valid_mnt_idmap(idmap))
return 0;
/*
* Idmappings are shown relative to the caller's idmapping.
* This is both the most intuitive and most useful solution.
*/
if (uid_map) {
map = &idmap->uid_map;
map_up = ¤t_user_ns()->uid_map;
} else {
map = &idmap->gid_map;
map_up = ¤t_user_ns()->gid_map;
}
for (idx = 0, nr_mappings = 0; idx < map->nr_extents; idx++) {
uid_t lower;
struct uid_gid_extent *extent;
if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
extent = &map->extent[idx];
else
extent = &map->forward[idx];
/*
* Verify that the whole range of the mapping can be
* resolved in the caller's idmapping. If it cannot be
* resolved skip the mapping.
*/
lower = map_id_range_up(map_up, extent->lower_first, extent->count);
if (lower == (uid_t) -1)
continue;
seq_printf(seq, "%u %u %u", extent->first, lower, extent->count);
seq->count++; /* mappings are separated by \0 */
if (seq_has_overflowed(seq))
return -EAGAIN;
nr_mappings++;
}
return nr_mappings;
}
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _LINUX_MAPLE_TREE_H
#define _LINUX_MAPLE_TREE_H
/*
* Maple Tree - An RCU-safe adaptive tree for storing ranges
* Copyright (c) 2018-2022 Oracle
* Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
* Matthew Wilcox <willy@infradead.org>
*/
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
/* #define CONFIG_MAPLE_RCU_DISABLED */
/*
* Allocated nodes are mutable until they have been inserted into the tree,
* at which time they cannot change their type until they have been removed
* from the tree and an RCU grace period has passed.
*
* Removed nodes have their ->parent set to point to themselves. RCU readers
* check ->parent before relying on the value that they loaded from the
* slots array. This lets us reuse the slots array for the RCU head.
*
* Nodes in the tree point to their parent unless bit 0 is set.
*/
#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
/* 64bit sizes */
#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
#else
/* 32bit sizes */
#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
#define MAPLE_NODE_MASK 255UL
/*
* The node->parent of the root node has bit 0 set and the rest of the pointer
* is a pointer to the tree itself. No more bits are available in this pointer
* (on m68k, the data structure may only be 2-byte aligned).
*
* Internal non-root nodes can only have maple_range_* nodes as parents. The
* parent pointer is 256B aligned like all other tree nodes. When storing a 32
* or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
* extra bit to store the offset. This extra bit comes from a reuse of the last
* bit in the node type. This is possible by using bit 1 to indicate if bit 2
* is part of the type or the slot.
*
* Once the type is decided, the decision of an allocation range type or a
* range type is done by examining the immutable tree flag for the
* MT_FLAGS_ALLOC_RANGE flag.
*
* Node types:
* 0b??1 = Root
* 0b?00 = 16 bit nodes
* 0b010 = 32 bit nodes
* 0b110 = 64 bit nodes
*
* Slot size and location in the parent pointer:
* type : slot location
* 0b??1 : Root
* 0b?00 : 16 bit values, type in 0-1, slot in 2-6
* 0b010 : 32 bit values, type in 0-2, slot in 3-6
* 0b110 : 64 bit values, type in 0-2, slot in 3-6
*/
/*
* This metadata is used to optimize the gap updating code and in reverse
* searching for gaps or any other code that needs to find the end of the data.
*/
struct maple_metadata {
unsigned char end; /* end of data */
unsigned char gap; /* offset of largest gap */
};
/*
* Leaf nodes do not store pointers to nodes, they store user data. Users may
* store almost any bit pattern. As noted above, the optimisation of storing an
* entry at 0 in the root pointer cannot be done for data which have the bottom
* two bits set to '10'. We also reserve values with the bottom two bits set to
* '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
* return errnos as a negative errno shifted right by two bits and the bottom
* two bits set to '10', and while choosing to store these values in the array
* is not an error, it may lead to confusion if you're testing for an error with
* mas_is_err().
*
* Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
* 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
*
* In regular B-Tree terms, pivots are called keys. The term pivot is used to
* indicate that the tree is specifying ranges, Pivots may appear in the
* subtree with an entry attached to the value whereas keys are unique to a
* specific position of a B-tree. Pivot values are inclusive of the slot with
* the same index.
*/
struct maple_range_64 {
struct maple_pnode *parent;
unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
union {
void __rcu *slot[MAPLE_RANGE64_SLOTS];
struct {
void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
struct maple_metadata meta;
};
};
};
/*
* At tree creation time, the user can specify that they're willing to trade off
* storing fewer entries in a tree in return for storing more information in
* each node.
*
* The maple tree supports recording the largest range of NULL entries available
* in this node, also called gaps. This optimises the tree for allocating a
* range.
*/
struct maple_arange_64 {
struct maple_pnode *parent;
unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
void __rcu *slot[MAPLE_ARANGE64_SLOTS];
unsigned long gap[MAPLE_ARANGE64_SLOTS];
struct maple_metadata meta;
};
struct maple_alloc {
unsigned long total;
unsigned char node_count;
unsigned int request_count;
struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
};
struct maple_topiary {
struct maple_pnode *parent;
struct maple_enode *next; /* Overlaps the pivot */
};
enum maple_type {
maple_dense,
maple_leaf_64,
maple_range_64,
maple_arange_64,
};
enum store_type {
wr_invalid,
wr_new_root,
wr_store_root,
wr_exact_fit,
wr_spanning_store,
wr_split_store,
wr_rebalance,
wr_append,
wr_node_store,
wr_slot_store,
};
/**
* DOC: Maple tree flags
*
* * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
* * MT_FLAGS_USE_RCU - Operate in RCU mode
* * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
* * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
* * MT_FLAGS_LOCK_MASK - How the mt_lock is used
* * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
* * MT_FLAGS_LOCK_BH - Acquired bh-safe
* * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
*
* MAPLE_HEIGHT_MAX The largest height that can be stored
*/
#define MT_FLAGS_ALLOC_RANGE 0x01
#define MT_FLAGS_USE_RCU 0x02
#define MT_FLAGS_HEIGHT_OFFSET 0x02
#define MT_FLAGS_HEIGHT_MASK 0x7C
#define MT_FLAGS_LOCK_MASK 0x300
#define MT_FLAGS_LOCK_IRQ 0x100
#define MT_FLAGS_LOCK_BH 0x200
#define MT_FLAGS_LOCK_EXTERN 0x300
#define MT_FLAGS_ALLOC_WRAPPED 0x0800
#define MAPLE_HEIGHT_MAX 31
#define MAPLE_NODE_TYPE_MASK 0x0F
#define MAPLE_NODE_TYPE_SHIFT 0x03
#define MAPLE_RESERVED_RANGE 4096
#ifdef CONFIG_LOCKDEP
#define mt_lock_is_held(mt) \
(!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
#define mt_write_lock_is_held(mt) \
(!(mt)->ma_external_lock || \
lock_is_held_type((mt)->ma_external_lock, 0))
#define mt_set_external_lock(mt, lock) \
(mt)->ma_external_lock = &(lock)->dep_map
#define mt_on_stack(mt) (mt).ma_external_lock = NULL
#else
#define mt_lock_is_held(mt) 1
#define mt_write_lock_is_held(mt) 1
#define mt_set_external_lock(mt, lock) do { } while (0)
#define mt_on_stack(mt) do { } while (0)
#endif
/*
* If the tree contains a single entry at index 0, it is usually stored in
* tree->ma_root. To optimise for the page cache, an entry which ends in '00',
* '01' or '11' is stored in the root, but an entry which ends in '10' will be
* stored in a node. Bits 3-6 are used to store enum maple_type.
*
* The flags are used both to store some immutable information about this tree
* (set at tree creation time) and dynamic information set under the spinlock.
*
* Another use of flags are to indicate global states of the tree. This is the
* case with the MT_FLAGS_USE_RCU flag, which indicates the tree is currently in
* RCU mode. This mode was added to allow the tree to reuse nodes instead of
* re-allocating and RCU freeing nodes when there is a single user.
*/
struct maple_tree {
union {
spinlock_t ma_lock;
#ifdef CONFIG_LOCKDEP
struct lockdep_map *ma_external_lock;
#endif
};
unsigned int ma_flags;
void __rcu *ma_root;
};
/**
* MTREE_INIT() - Initialize a maple tree
* @name: The maple tree name
* @__flags: The maple tree flags
*
*/
#define MTREE_INIT(name, __flags) { \
.ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
.ma_flags = __flags, \
.ma_root = NULL, \
}
/**
* MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
* @name: The tree name
* @__flags: The maple tree flags
* @__lock: The external lock
*/
#ifdef CONFIG_LOCKDEP
#define MTREE_INIT_EXT(name, __flags, __lock) { \
.ma_external_lock = &(__lock).dep_map, \
.ma_flags = (__flags), \
.ma_root = NULL, \
}
#else
#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
#endif
#define DEFINE_MTREE(name) \
struct maple_tree name = MTREE_INIT(name, 0)
#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
#define mtree_lock_nested(mas, subclass) \
spin_lock_nested((&(mt)->ma_lock), subclass)
#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
/*
* The Maple Tree squeezes various bits in at various points which aren't
* necessarily obvious. Usually, this is done by observing that pointers are
* N-byte aligned and thus the bottom log_2(N) bits are available for use. We
* don't use the high bits of pointers to store additional information because
* we don't know what bits are unused on any given architecture.
*
* Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
* low bits for our own purposes. Nodes are currently of 4 types:
* 1. Single pointer (Range is 0-0)
* 2. Non-leaf Allocation Range nodes
* 3. Non-leaf Range nodes
* 4. Leaf Range nodes All nodes consist of a number of node slots,
* pivots, and a parent pointer.
*/
struct maple_node {
union {
struct {
struct maple_pnode *parent;
void __rcu *slot[MAPLE_NODE_SLOTS];
};
struct {
void *pad;
struct rcu_head rcu;
struct maple_enode *piv_parent;
unsigned char parent_slot;
enum maple_type type;
unsigned char slot_len;
unsigned int ma_flags;
};
struct maple_range_64 mr64;
struct maple_arange_64 ma64;
struct maple_alloc alloc;
};
};
/*
* More complicated stores can cause two nodes to become one or three and
* potentially alter the height of the tree. Either half of the tree may need
* to be rebalanced against the other. The ma_topiary struct is used to track
* which nodes have been 'cut' from the tree so that the change can be done
* safely at a later date. This is done to support RCU.
*/
struct ma_topiary {
struct maple_enode *head;
struct maple_enode *tail;
struct maple_tree *mtree;
};
void *mtree_load(struct maple_tree *mt, unsigned long index);
int mtree_insert(struct maple_tree *mt, unsigned long index,
void *entry, gfp_t gfp);
int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp);
int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp);
int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long range_lo, unsigned long range_hi,
unsigned long *next, gfp_t gfp);
int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp);
int mtree_store_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp);
int mtree_store(struct maple_tree *mt, unsigned long index,
void *entry, gfp_t gfp);
void *mtree_erase(struct maple_tree *mt, unsigned long index);
int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
void mtree_destroy(struct maple_tree *mt);
void __mt_destroy(struct maple_tree *mt);
/**
* mtree_empty() - Determine if a tree has any present entries.
* @mt: Maple Tree.
*
* Context: Any context.
* Return: %true if the tree contains only NULL pointers.
*/
static inline bool mtree_empty(const struct maple_tree *mt)
{
return mt->ma_root == NULL;
}
/* Advanced API */
/*
* Maple State Status
* ma_active means the maple state is pointing to a node and offset and can
* continue operating on the tree.
* ma_start means we have not searched the tree.
* ma_root means we have searched the tree and the entry we found lives in
* the root of the tree (ie it has index 0, length 1 and is the only entry in
* the tree).
* ma_none means we have searched the tree and there is no node in the
* tree for this entry. For example, we searched for index 1 in an empty
* tree. Or we have a tree which points to a full leaf node and we
* searched for an entry which is larger than can be contained in that
* leaf node.
* ma_pause means the data within the maple state may be stale, restart the
* operation
* ma_overflow means the search has reached the upper limit of the search
* ma_underflow means the search has reached the lower limit of the search
* ma_error means there was an error, check the node for the error number.
*/
enum maple_status {
ma_active,
ma_start,
ma_root,
ma_none,
ma_pause,
ma_overflow,
ma_underflow,
ma_error,
};
/*
* The maple state is defined in the struct ma_state and is used to keep track
* of information during operations, and even between operations when using the
* advanced API.
*
* If state->node has bit 0 set then it references a tree location which is not
* a node (eg the root). If bit 1 is set, the rest of the bits are a negative
* errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
* node type.
*
* state->alloc either has a request number of nodes or an allocated node. If
* stat->alloc has a requested number of nodes, the first bit will be set (0x1)
* and the remaining bits are the value. If state->alloc is a node, then the
* node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
* storing more allocated nodes, a total number of nodes allocated, and the
* node_count in this node. node_count is the number of allocated nodes in this
* node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
* nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
* by removing a node from the state->alloc node until state->alloc->node_count
* is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
* to state->alloc. Nodes are pushed onto state->alloc by putting the current
* state->alloc into the pushed node's slot[0].
*
* The state also contains the implied min/max of the state->node, the depth of
* this search, and the offset. The implied min/max are either from the parent
* node or are 0-oo for the root node. The depth is incremented or decremented
* every time a node is walked down or up. The offset is the slot/pivot of
* interest in the node - either for reading or writing.
*
* When returning a value the maple state index and last respectively contain
* the start and end of the range for the entry. Ranges are inclusive in the
* Maple Tree.
*
* The status of the state is used to determine how the next action should treat
* the state. For instance, if the status is ma_start then the next action
* should start at the root of the tree and walk down. If the status is
* ma_pause then the node may be stale data and should be discarded. If the
* status is ma_overflow, then the last action hit the upper limit.
*
*/
struct ma_state {
struct maple_tree *tree; /* The tree we're operating in */
unsigned long index; /* The index we're operating on - range start */
unsigned long last; /* The last index we're operating on - range end */
struct maple_enode *node; /* The node containing this entry */
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
struct maple_node *alloc; /* A single allocated node for fast path writes */
unsigned long node_request; /* The number of nodes to allocate for this operation */
enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
unsigned char mas_flags;
unsigned char end; /* The end of the node */
enum store_type store_type; /* The type of store needed for this operation */
};
struct ma_wr_state {
struct ma_state *mas;
struct maple_node *node; /* Decoded mas->node */
unsigned long r_min; /* range min */
unsigned long r_max; /* range max */
enum maple_type type; /* mas->node type */
unsigned char offset_end; /* The offset where the write ends */
unsigned long *pivots; /* mas->node->pivots pointer */
unsigned long end_piv; /* The pivot at the offset end */
void __rcu **slots; /* mas->node->slots pointer */
void *entry; /* The entry to write */
void *content; /* The existing entry that is being overwritten */
unsigned char vacant_height; /* Height of lowest node with free space */
unsigned char sufficient_height;/* Height of lowest node with min sufficiency + 1 nodes */
};
#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
#define mas_lock_nested(mas, subclass) \
spin_lock_nested(&((mas)->tree->ma_lock), subclass)
#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
/*
* Special values for ma_state.node.
* MA_ERROR represents an errno. After dropping the lock and attempting
* to resolve the error, the walk would have to be restarted from the
* top of the tree as the tree may have been modified.
*/
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
/*
* When changing MA_STATE, remember to also change rust/kernel/maple_tree.rs
*/
#define MA_STATE(name, mt, first, end) \
struct ma_state name = { \
.tree = mt, \
.index = first, \
.last = end, \
.node = NULL, \
.status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
.sheaf = NULL, \
.alloc = NULL, \
.node_request = 0, \
.mas_flags = 0, \
.store_type = wr_invalid, \
}
#define MA_WR_STATE(name, ma_state, wr_entry) \
struct ma_wr_state name = { \
.mas = ma_state, \
.content = NULL, \
.entry = wr_entry, \
.vacant_height = 0, \
.sufficient_height = 0 \
}
#define MA_TOPIARY(name, tree) \
struct ma_topiary name = { \
.head = NULL, \
.tail = NULL, \
.mtree = tree, \
}
void *mas_walk(struct ma_state *mas);
void *mas_store(struct ma_state *mas, void *entry);
void *mas_erase(struct ma_state *mas);
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
void mas_store_prealloc(struct ma_state *mas, void *entry);
void *mas_find(struct ma_state *mas, unsigned long max);
void *mas_find_range(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
void *entry, unsigned long range_lo, unsigned long range_hi,
unsigned long *next, gfp_t gfp);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
void maple_tree_init(void);
void mas_destroy(struct ma_state *mas);
int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
void *mas_prev(struct ma_state *mas, unsigned long min);
void *mas_prev_range(struct ma_state *mas, unsigned long max);
void *mas_next(struct ma_state *mas, unsigned long max);
void *mas_next_range(struct ma_state *mas, unsigned long max);
int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
unsigned long size);
/*
* This finds an empty area from the highest address to the lowest.
* AKA "Topdown" version,
*/
int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size);
static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
unsigned long addr)
{
memset(mas, 0, sizeof(struct ma_state));
mas->tree = tree;
mas->index = mas->last = addr;
mas->max = ULONG_MAX;
mas->status = ma_start;
mas->node = NULL;
}
static inline bool mas_is_active(struct ma_state *mas)
{
return mas->status == ma_active;
}
static inline bool mas_is_err(struct ma_state *mas)
{
return mas->status == ma_error;
}
/**
* mas_reset() - Reset a Maple Tree operation state.
* @mas: Maple Tree operation state.
*
* Resets the error or walk state of the @mas so future walks of the
* array will start from the root. Use this if you have dropped the
* lock and want to reuse the ma_state.
*
* Context: Any context.
*/
static __always_inline void mas_reset(struct ma_state *mas)
{
mas->status = ma_start; mas->node = NULL;}
/**
* mas_for_each() - Iterate over a range of the maple tree.
* @__mas: Maple Tree operation state (maple_state)
* @__entry: Entry retrieved from the tree
* @__max: maximum index to retrieve from the tree
*
* When returned, mas->index and mas->last will hold the entire range for the
* entry.
*
* Note: may return the zero entry.
*/
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
/**
* mas_for_each_rev() - Iterate over a range of the maple tree in reverse order.
* @__mas: Maple Tree operation state (maple_state)
* @__entry: Entry retrieved from the tree
* @__min: minimum index to retrieve from the tree
*
* When returned, mas->index and mas->last will hold the entire range for the
* entry.
*
* Note: may return the zero entry.
*/
#define mas_for_each_rev(__mas, __entry, __min) \
while (((__entry) = mas_find_rev((__mas), (__min))) != NULL)
#ifdef CONFIG_DEBUG_MAPLE_TREE
enum mt_dump_format {
mt_dump_dec,
mt_dump_hex,
};
extern atomic_t maple_tree_tests_run;
extern atomic_t maple_tree_tests_passed;
void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
void mas_dump(const struct ma_state *mas);
void mas_wr_dump(const struct ma_wr_state *wr_mas);
void mt_validate(struct maple_tree *mt);
void mt_cache_shrink(void);
#define MT_BUG_ON(__tree, __x) do { \
atomic_inc(&maple_tree_tests_run); \
if (__x) { \
pr_info("BUG at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
mt_dump(__tree, mt_dump_hex); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
dump_stack(); \
} else { \
atomic_inc(&maple_tree_tests_passed); \
} \
} while (0)
#define MAS_BUG_ON(__mas, __x) do { \
atomic_inc(&maple_tree_tests_run); \
if (__x) { \
pr_info("BUG at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
mas_dump(__mas); \
mt_dump((__mas)->tree, mt_dump_hex); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
dump_stack(); \
} else { \
atomic_inc(&maple_tree_tests_passed); \
} \
} while (0)
#define MAS_WR_BUG_ON(__wrmas, __x) do { \
atomic_inc(&maple_tree_tests_run); \
if (__x) { \
pr_info("BUG at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
mas_wr_dump(__wrmas); \
mas_dump((__wrmas)->mas); \
mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
dump_stack(); \
} else { \
atomic_inc(&maple_tree_tests_passed); \
} \
} while (0)
#define MT_WARN_ON(__tree, __x) ({ \
int ret = !!(__x); \
atomic_inc(&maple_tree_tests_run); \
if (ret) { \
pr_info("WARN at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
mt_dump(__tree, mt_dump_hex); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
dump_stack(); \
} else { \
atomic_inc(&maple_tree_tests_passed); \
} \
unlikely(ret); \
})
#define MAS_WARN_ON(__mas, __x) ({ \
int ret = !!(__x); \
atomic_inc(&maple_tree_tests_run); \
if (ret) { \
pr_info("WARN at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
mas_dump(__mas); \
mt_dump((__mas)->tree, mt_dump_hex); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
dump_stack(); \
} else { \
atomic_inc(&maple_tree_tests_passed); \
} \
unlikely(ret); \
})
#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
int ret = !!(__x); \
atomic_inc(&maple_tree_tests_run); \
if (ret) { \
pr_info("WARN at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
mas_wr_dump(__wrmas); \
mas_dump((__wrmas)->mas); \
mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
dump_stack(); \
} else { \
atomic_inc(&maple_tree_tests_passed); \
} \
unlikely(ret); \
})
#else
#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
#endif /* CONFIG_DEBUG_MAPLE_TREE */
/**
* __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
* current location.
* @mas: Maple Tree operation state.
* @start: New start of range in the Maple Tree.
* @last: New end of range in the Maple Tree.
*
* set the internal maple state values to a sub-range.
* Please use mas_set_range() if you do not know where you are in the tree.
*/
static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
unsigned long last)
{
/* Ensure the range starts within the current slot */
MAS_WARN_ON(mas, mas_is_active(mas) &&
(mas->index > start || mas->last < start));
mas->index = start;
mas->last = last;
}
/**
* mas_set_range() - Set up Maple Tree operation state for a different index.
* @mas: Maple Tree operation state.
* @start: New start of range in the Maple Tree.
* @last: New end of range in the Maple Tree.
*
* Move the operation state to refer to a different range. This will
* have the effect of starting a walk from the top; see mas_next()
* to move to an adjacent index.
*/
static inline
void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
{
mas_reset(mas);
__mas_set_range(mas, start, last);
}
/**
* mas_set() - Set up Maple Tree operation state for a different index.
* @mas: Maple Tree operation state.
* @index: New index into the Maple Tree.
*
* Move the operation state to refer to a different index. This will
* have the effect of starting a walk from the top; see mas_next()
* to move to an adjacent index.
*/
static inline void mas_set(struct ma_state *mas, unsigned long index)
{
mas_set_range(mas, index, index);
}
static inline bool mt_external_lock(const struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
}
/**
* mt_init_flags() - Initialise an empty maple tree with flags.
* @mt: Maple Tree
* @flags: maple tree flags.
*
* If you need to initialise a Maple Tree with special flags (eg, an
* allocation tree), use this function.
*
* Context: Any context.
*/
static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
{
mt->ma_flags = flags;
if (!mt_external_lock(mt))
spin_lock_init(&mt->ma_lock);
rcu_assign_pointer(mt->ma_root, NULL);
}
/**
* mt_init() - Initialise an empty maple tree.
* @mt: Maple Tree
*
* An empty Maple Tree.
*
* Context: Any context.
*/
static inline void mt_init(struct maple_tree *mt)
{
mt_init_flags(mt, 0);
}
static inline bool mt_in_rcu(struct maple_tree *mt)
{
#ifdef CONFIG_MAPLE_RCU_DISABLED
return false;
#endif
return mt->ma_flags & MT_FLAGS_USE_RCU;
}
/**
* mt_clear_in_rcu() - Switch the tree to non-RCU mode.
* @mt: The Maple Tree
*/
static inline void mt_clear_in_rcu(struct maple_tree *mt)
{
if (!mt_in_rcu(mt))
return;
if (mt_external_lock(mt)) { WARN_ON(!mt_lock_is_held(mt)); mt->ma_flags &= ~MT_FLAGS_USE_RCU;
} else {
mtree_lock(mt);
mt->ma_flags &= ~MT_FLAGS_USE_RCU;
mtree_unlock(mt);
}
}
/**
* mt_set_in_rcu() - Switch the tree to RCU safe mode.
* @mt: The Maple Tree
*/
static inline void mt_set_in_rcu(struct maple_tree *mt)
{
if (mt_in_rcu(mt))
return;
if (mt_external_lock(mt)) { WARN_ON(!mt_lock_is_held(mt)); mt->ma_flags |= MT_FLAGS_USE_RCU;
} else {
mtree_lock(mt);
mt->ma_flags |= MT_FLAGS_USE_RCU;
mtree_unlock(mt);
}
}
static inline unsigned int mt_height(const struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
}
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
void *mt_find_after(struct maple_tree *mt, unsigned long *index,
unsigned long max);
void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
/**
* mt_for_each - Iterate over each entry starting at index until max.
* @__tree: The Maple Tree
* @__entry: The current entry
* @__index: The index to start the search from. Subsequently used as iterator.
* @__max: The maximum limit for @index
*
* This iterator skips all entries, which resolve to a NULL pointer,
* e.g. entries which has been reserved with XA_ZERO_ENTRY.
*/
#define mt_for_each(__tree, __entry, __index, __max) \
for (__entry = mt_find(__tree, &(__index), __max); \
__entry; __entry = mt_find_after(__tree, &(__index), __max))
#endif /*_LINUX_MAPLE_TREE_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/* Provide a way to create a superblock configuration context within the kernel
* that allows a superblock to be set up prior to mounting.
*
* Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include <linux/security.h>
#include <linux/mnt_namespace.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <net/net_namespace.h>
#include <asm/sections.h>
#include "mount.h"
#include "internal.h"
enum legacy_fs_param {
LEGACY_FS_UNSET_PARAMS,
LEGACY_FS_MONOLITHIC_PARAMS,
LEGACY_FS_INDIVIDUAL_PARAMS,
};
struct legacy_fs_context {
char *legacy_data; /* Data page for legacy filesystems */
size_t data_size;
enum legacy_fs_param param_type;
};
static int legacy_init_fs_context(struct fs_context *fc);
static const struct constant_table common_set_sb_flag[] = {
{ "dirsync", SB_DIRSYNC },
{ "lazytime", SB_LAZYTIME },
{ "mand", SB_MANDLOCK },
{ "ro", SB_RDONLY },
{ "sync", SB_SYNCHRONOUS },
{ },
};
static const struct constant_table common_clear_sb_flag[] = {
{ "async", SB_SYNCHRONOUS },
{ "nolazytime", SB_LAZYTIME },
{ "nomand", SB_MANDLOCK },
{ "rw", SB_RDONLY },
{ },
};
/*
* Check for a common mount option that manipulates s_flags.
*/
static int vfs_parse_sb_flag(struct fs_context *fc, const char *key)
{
unsigned int token;
token = lookup_constant(common_set_sb_flag, key, 0);
if (token) {
fc->sb_flags |= token;
fc->sb_flags_mask |= token;
return 0;
}
token = lookup_constant(common_clear_sb_flag, key, 0);
if (token) {
fc->sb_flags &= ~token;
fc->sb_flags_mask |= token;
return 0;
}
return -ENOPARAM;
}
/**
* vfs_parse_fs_param_source - Handle setting "source" via parameter
* @fc: The filesystem context to modify
* @param: The parameter
*
* This is a simple helper for filesystems to verify that the "source" they
* accept is sane.
*
* Returns 0 on success, -ENOPARAM if this is not "source" parameter, and
* -EINVAL otherwise. In the event of failure, supplementary error information
* is logged.
*/
int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param)
{
if (strcmp(param->key, "source") != 0)
return -ENOPARAM;
if (param->type != fs_value_is_string)
return invalf(fc, "Non-string source");
if (fc->source)
return invalf(fc, "Multiple sources");
fc->source = param->string;
param->string = NULL;
return 0;
}
EXPORT_SYMBOL(vfs_parse_fs_param_source);
/**
* vfs_parse_fs_param - Add a single parameter to a superblock config
* @fc: The filesystem context to modify
* @param: The parameter
*
* A single mount option in string form is applied to the filesystem context
* being set up. Certain standard options (for example "ro") are translated
* into flag bits without going to the filesystem. The active security module
* is allowed to observe and poach options. Any other options are passed over
* to the filesystem to parse.
*
* This may be called multiple times for a context.
*
* Returns 0 on success and a negative error code on failure. In the event of
* failure, supplementary error information may have been set.
*/
int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param)
{
int ret;
if (!param->key)
return invalf(fc, "Unnamed parameter\n");
ret = vfs_parse_sb_flag(fc, param->key);
if (ret != -ENOPARAM)
return ret;
ret = security_fs_context_parse_param(fc, param);
if (ret != -ENOPARAM)
/* Param belongs to the LSM or is disallowed by the LSM; so
* don't pass to the FS.
*/
return ret;
if (fc->ops->parse_param) {
ret = fc->ops->parse_param(fc, param);
if (ret != -ENOPARAM)
return ret;
}
/* If the filesystem doesn't take any arguments, give it the
* default handling of source.
*/
ret = vfs_parse_fs_param_source(fc, param);
if (ret != -ENOPARAM)
return ret;
return invalf(fc, "%s: Unknown parameter '%s'",
fc->fs_type->name, param->key);
}
EXPORT_SYMBOL(vfs_parse_fs_param);
/**
* vfs_parse_fs_qstr - Convenience function to just parse a string.
* @fc: Filesystem context.
* @key: Parameter name.
* @value: Default value.
*/
int vfs_parse_fs_qstr(struct fs_context *fc, const char *key,
const struct qstr *value)
{
int ret;
struct fs_parameter param = {
.key = key,
.type = fs_value_is_flag,
.size = value ? value->len : 0,
};
if (value) {
param.string = kmemdup_nul(value->name, value->len, GFP_KERNEL);
if (!param.string)
return -ENOMEM;
param.type = fs_value_is_string;
}
ret = vfs_parse_fs_param(fc, ¶m);
kfree(param.string);
return ret;
}
EXPORT_SYMBOL(vfs_parse_fs_qstr);
/**
* vfs_parse_monolithic_sep - Parse key[=val][,key[=val]]* mount data
* @fc: The superblock configuration to fill in.
* @data: The data to parse
* @sep: callback for separating next option
*
* Parse a blob of data that's in key[=val][,key[=val]]* form with a custom
* option separator callback.
*
* Returns 0 on success or the error returned by the ->parse_option() fs_context
* operation on failure.
*/
int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
char *(*sep)(char **))
{
char *options = data, *key;
int ret = 0;
if (!options)
return 0;
ret = security_sb_eat_lsm_opts(options, &fc->security);
if (ret)
return ret;
while ((key = sep(&options)) != NULL) {
if (*key) {
char *value = strchr(key, '=');
if (value) {
if (unlikely(value == key))
continue;
*value++ = 0;
}
ret = vfs_parse_fs_string(fc, key, value);
if (ret < 0)
break;
}
}
return ret;
}
EXPORT_SYMBOL(vfs_parse_monolithic_sep);
static char *vfs_parse_comma_sep(char **s)
{
return strsep(s, ",");
}
/**
* generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
* @fc: The superblock configuration to fill in.
* @data: The data to parse
*
* Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
* called from the ->monolithic_mount_data() fs_context operation.
*
* Returns 0 on success or the error returned by the ->parse_option() fs_context
* operation on failure.
*/
int generic_parse_monolithic(struct fs_context *fc, void *data)
{
return vfs_parse_monolithic_sep(fc, data, vfs_parse_comma_sep);
}
EXPORT_SYMBOL(generic_parse_monolithic);
/**
* alloc_fs_context - Create a filesystem context.
* @fs_type: The filesystem type.
* @reference: The dentry from which this one derives (or NULL)
* @sb_flags: Filesystem/superblock flags (SB_*)
* @sb_flags_mask: Applicable members of @sb_flags
* @purpose: The purpose that this configuration shall be used for.
*
* Open a filesystem and create a mount context. The mount context is
* initialised with the supplied flags and, if a submount/automount from
* another superblock (referred to by @reference) is supplied, may have
* parameters such as namespaces copied across from that superblock.
*/
static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
struct dentry *reference,
unsigned int sb_flags,
unsigned int sb_flags_mask,
enum fs_context_purpose purpose)
{
int (*init_fs_context)(struct fs_context *);
struct fs_context *fc;
int ret = -ENOMEM;
fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL_ACCOUNT);
if (!fc)
return ERR_PTR(-ENOMEM);
fc->purpose = purpose;
fc->sb_flags = sb_flags;
fc->sb_flags_mask = sb_flags_mask;
fc->fs_type = get_filesystem(fs_type);
fc->cred = get_current_cred(); fc->net_ns = get_net(current->nsproxy->net_ns);
fc->log.prefix = fs_type->name;
mutex_init(&fc->uapi_mutex);
switch (purpose) {
case FS_CONTEXT_FOR_MOUNT:
fc->user_ns = get_user_ns(fc->cred->user_ns);
break;
case FS_CONTEXT_FOR_SUBMOUNT:
fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
break;
case FS_CONTEXT_FOR_RECONFIGURE:
atomic_inc(&reference->d_sb->s_active);
fc->user_ns = get_user_ns(reference->d_sb->s_user_ns); fc->root = dget(reference);
break;
}
/* TODO: Make all filesystems support this unconditionally */
init_fs_context = fc->fs_type->init_fs_context;
if (!init_fs_context)
init_fs_context = legacy_init_fs_context; ret = init_fs_context(fc);
if (ret < 0)
goto err_fc;
fc->need_free = true; return fc;
err_fc:
put_fs_context(fc);
return ERR_PTR(ret);}
struct fs_context *fs_context_for_mount(struct file_system_type *fs_type,
unsigned int sb_flags)
{
return alloc_fs_context(fs_type, NULL, sb_flags, 0,
FS_CONTEXT_FOR_MOUNT);
}
EXPORT_SYMBOL(fs_context_for_mount);
struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
unsigned int sb_flags,
unsigned int sb_flags_mask)
{
return alloc_fs_context(dentry->d_sb->s_type, dentry, sb_flags,
sb_flags_mask, FS_CONTEXT_FOR_RECONFIGURE);
}
EXPORT_SYMBOL(fs_context_for_reconfigure);
/**
* fs_context_for_submount: allocate a new fs_context for a submount
* @type: file_system_type of the new context
* @reference: reference dentry from which to copy relevant info
*
* Allocate a new fs_context suitable for a submount. This also ensures that
* the fc->security object is inherited from @reference (if needed).
*/
struct fs_context *fs_context_for_submount(struct file_system_type *type,
struct dentry *reference)
{
struct fs_context *fc;
int ret;
fc = alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
if (IS_ERR(fc))
return fc;
ret = security_fs_context_submount(fc, reference->d_sb);
if (ret) {
put_fs_context(fc);
return ERR_PTR(ret);
}
return fc;
}
EXPORT_SYMBOL(fs_context_for_submount);
void fc_drop_locked(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
dput(fc->root);
fc->root = NULL;
deactivate_locked_super(sb);
}
static void legacy_fs_context_free(struct fs_context *fc);
/**
* vfs_dup_fs_context - Duplicate a filesystem context.
* @src_fc: The context to copy.
*/
struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
{
struct fs_context *fc;
int ret;
if (!src_fc->ops->dup)
return ERR_PTR(-EOPNOTSUPP);
fc = kmemdup(src_fc, sizeof(struct fs_context), GFP_KERNEL);
if (!fc)
return ERR_PTR(-ENOMEM);
mutex_init(&fc->uapi_mutex);
fc->fs_private = NULL;
fc->s_fs_info = NULL;
fc->source = NULL;
fc->security = NULL;
get_filesystem(fc->fs_type);
get_net(fc->net_ns);
get_user_ns(fc->user_ns);
get_cred(fc->cred);
if (fc->log.log)
refcount_inc(&fc->log.log->usage);
/* Can't call put until we've called ->dup */
ret = fc->ops->dup(fc, src_fc);
if (ret < 0)
goto err_fc;
ret = security_fs_context_dup(fc, src_fc);
if (ret < 0)
goto err_fc;
return fc;
err_fc:
put_fs_context(fc);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(vfs_dup_fs_context);
/**
* logfc - Log a message to a filesystem context
* @log: The filesystem context to log to, or NULL to use printk.
* @prefix: A string to prefix the output with, or NULL.
* @level: 'w' for a warning, 'e' for an error. Anything else is a notice.
* @fmt: The format of the buffer.
*/
void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...)
{
va_list va;
struct va_format vaf = {.fmt = fmt, .va = &va};
va_start(va, fmt);
if (!log) {
switch (level) {
case 'w':
printk(KERN_WARNING "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
break;
case 'e':
printk(KERN_ERR "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
break;
case 'i':
printk(KERN_INFO "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
break;
default:
printk(KERN_NOTICE "%s%s%pV\n", prefix ? prefix : "",
prefix ? ": " : "", &vaf);
break;
}
} else {
unsigned int logsize = ARRAY_SIZE(log->buffer);
u8 index;
char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
prefix ? prefix : "",
prefix ? ": " : "", &vaf);
index = log->head & (logsize - 1);
BUILD_BUG_ON(sizeof(log->head) != sizeof(u8) ||
sizeof(log->tail) != sizeof(u8));
if ((u8)(log->head - log->tail) == logsize) {
/* The buffer is full, discard the oldest message */
if (log->need_free & (1 << index))
kfree(log->buffer[index]);
log->tail++;
}
log->buffer[index] = q ? q : "OOM: Can't store error string";
if (q)
log->need_free |= 1 << index;
else
log->need_free &= ~(1 << index);
log->head++;
}
va_end(va);
}
EXPORT_SYMBOL(logfc);
/*
* Free a logging structure.
*/
static void put_fc_log(struct fs_context *fc)
{
struct fc_log *log = fc->log.log;
int i;
if (log) { if (refcount_dec_and_test(&log->usage)) {
fc->log.log = NULL;
for (i = 0; i < ARRAY_SIZE(log->buffer) ; i++) if (log->need_free & (1 << i)) kfree(log->buffer[i]); kfree(log);
}
}
}
/**
* put_fs_context - Dispose of a superblock configuration context.
* @fc: The context to dispose of.
*/
void put_fs_context(struct fs_context *fc)
{
struct super_block *sb;
if (fc->root) {
sb = fc->root->d_sb;
dput(fc->root);
fc->root = NULL;
deactivate_super(sb);
}
if (fc->need_free && fc->ops && fc->ops->free) fc->ops->free(fc); security_free_mnt_opts(&fc->security); put_net(fc->net_ns); put_user_ns(fc->user_ns); put_cred(fc->cred); put_fc_log(fc);
put_filesystem(fc->fs_type);
kfree(fc->source);
kfree(fc);
}
EXPORT_SYMBOL(put_fs_context);
/*
* Free the config for a filesystem that doesn't support fs_context.
*/
static void legacy_fs_context_free(struct fs_context *fc)
{
struct legacy_fs_context *ctx = fc->fs_private;
if (ctx) {
if (ctx->param_type == LEGACY_FS_INDIVIDUAL_PARAMS)
kfree(ctx->legacy_data);
kfree(ctx);
}
}
/*
* Duplicate a legacy config.
*/
static int legacy_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
{
struct legacy_fs_context *ctx;
struct legacy_fs_context *src_ctx = src_fc->fs_private;
ctx = kmemdup(src_ctx, sizeof(*src_ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
if (ctx->param_type == LEGACY_FS_INDIVIDUAL_PARAMS) {
ctx->legacy_data = kmemdup(src_ctx->legacy_data,
src_ctx->data_size, GFP_KERNEL);
if (!ctx->legacy_data) {
kfree(ctx);
return -ENOMEM;
}
}
fc->fs_private = ctx;
return 0;
}
/*
* Add a parameter to a legacy config. We build up a comma-separated list of
* options.
*/
static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct legacy_fs_context *ctx = fc->fs_private;
unsigned int size = ctx->data_size;
size_t len = 0;
int ret;
ret = vfs_parse_fs_param_source(fc, param);
if (ret != -ENOPARAM)
return ret;
if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS)
return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options");
switch (param->type) {
case fs_value_is_string:
len = 1 + param->size;
fallthrough;
case fs_value_is_flag:
len += strlen(param->key);
break;
default:
return invalf(fc, "VFS: Legacy: Parameter type for '%s' not supported",
param->key);
}
if (size + len + 2 > PAGE_SIZE)
return invalf(fc, "VFS: Legacy: Cumulative options too large");
if (strchr(param->key, ',') ||
(param->type == fs_value_is_string &&
memchr(param->string, ',', param->size)))
return invalf(fc, "VFS: Legacy: Option '%s' contained comma",
param->key);
if (!ctx->legacy_data) {
ctx->legacy_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ctx->legacy_data)
return -ENOMEM;
}
if (size)
ctx->legacy_data[size++] = ',';
len = strlen(param->key);
memcpy(ctx->legacy_data + size, param->key, len);
size += len;
if (param->type == fs_value_is_string) {
ctx->legacy_data[size++] = '=';
memcpy(ctx->legacy_data + size, param->string, param->size);
size += param->size;
}
ctx->legacy_data[size] = '\0';
ctx->data_size = size;
ctx->param_type = LEGACY_FS_INDIVIDUAL_PARAMS;
return 0;
}
/*
* Add monolithic mount data.
*/
static int legacy_parse_monolithic(struct fs_context *fc, void *data)
{
struct legacy_fs_context *ctx = fc->fs_private;
if (ctx->param_type != LEGACY_FS_UNSET_PARAMS) {
pr_warn("VFS: Can't mix monolithic and individual options\n");
return -EINVAL;
}
ctx->legacy_data = data;
ctx->param_type = LEGACY_FS_MONOLITHIC_PARAMS;
if (!ctx->legacy_data)
return 0;
if (fc->fs_type->fs_flags & FS_BINARY_MOUNTDATA)
return 0;
return security_sb_eat_lsm_opts(ctx->legacy_data, &fc->security);
}
/*
* Get a mountable root with the legacy mount command.
*/
static int legacy_get_tree(struct fs_context *fc)
{
struct legacy_fs_context *ctx = fc->fs_private;
struct super_block *sb;
struct dentry *root;
root = fc->fs_type->mount(fc->fs_type, fc->sb_flags,
fc->source, ctx->legacy_data);
if (IS_ERR(root))
return PTR_ERR(root);
sb = root->d_sb;
BUG_ON(!sb);
fc->root = root;
return 0;
}
/*
* Handle remount.
*/
static int legacy_reconfigure(struct fs_context *fc)
{
struct legacy_fs_context *ctx = fc->fs_private;
struct super_block *sb = fc->root->d_sb;
if (!sb->s_op->remount_fs)
return 0;
return sb->s_op->remount_fs(sb, &fc->sb_flags,
ctx ? ctx->legacy_data : NULL);
}
const struct fs_context_operations legacy_fs_context_ops = {
.free = legacy_fs_context_free,
.dup = legacy_fs_context_dup,
.parse_param = legacy_parse_param,
.parse_monolithic = legacy_parse_monolithic,
.get_tree = legacy_get_tree,
.reconfigure = legacy_reconfigure,
};
/*
* Initialise a legacy context for a filesystem that doesn't support
* fs_context.
*/
static int legacy_init_fs_context(struct fs_context *fc)
{
fc->fs_private = kzalloc(sizeof(struct legacy_fs_context), GFP_KERNEL_ACCOUNT);
if (!fc->fs_private)
return -ENOMEM;
fc->ops = &legacy_fs_context_ops;
return 0;
}
int parse_monolithic_mount_data(struct fs_context *fc, void *data)
{
int (*monolithic_mount_data)(struct fs_context *, void *);
monolithic_mount_data = fc->ops->parse_monolithic;
if (!monolithic_mount_data)
monolithic_mount_data = generic_parse_monolithic;
return monolithic_mount_data(fc, data);
}
/*
* Clean up a context after performing an action on it and put it into a state
* from where it can be used to reconfigure a superblock.
*
* Note that here we do only the parts that can't fail; the rest is in
* finish_clean_context() below and in between those fs_context is marked
* FS_CONTEXT_AWAITING_RECONF. The reason for splitup is that after
* successful mount or remount we need to report success to userland.
* Trying to do full reinit (for the sake of possible subsequent remount)
* and failing to allocate memory would've put us into a nasty situation.
* So here we only discard the old state and reinitialization is left
* until we actually try to reconfigure.
*/
void vfs_clean_context(struct fs_context *fc)
{
if (fc->need_free && fc->ops && fc->ops->free)
fc->ops->free(fc);
fc->need_free = false;
fc->fs_private = NULL;
fc->s_fs_info = NULL;
fc->sb_flags = 0;
security_free_mnt_opts(&fc->security);
kfree(fc->source);
fc->source = NULL;
fc->exclusive = false;
fc->purpose = FS_CONTEXT_FOR_RECONFIGURE;
fc->phase = FS_CONTEXT_AWAITING_RECONF;
}
int finish_clean_context(struct fs_context *fc)
{
int error;
if (fc->phase != FS_CONTEXT_AWAITING_RECONF)
return 0;
if (fc->fs_type->init_fs_context)
error = fc->fs_type->init_fs_context(fc);
else
error = legacy_init_fs_context(fc);
if (unlikely(error)) {
fc->phase = FS_CONTEXT_FAILED;
return error;
}
fc->need_free = true;
fc->phase = FS_CONTEXT_RECONF_PARAMS;
return 0;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/namespace.c
*
* (C) Copyright Al Viro 2000, 2001
*
* Based on code from fs/super.c, copyright Linus Torvalds and others.
* Heavily rewritten.
*/
#include <linux/syscalls.h>
#include <linux/export.h>
#include <linux/capability.h>
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/idr.h>
#include <linux/init.h> /* init_rootfs */
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/task_work.h>
#include <linux/sched/task.h>
#include <uapi/linux/mount.h>
#include <linux/fs_context.h>
#include <linux/shmem_fs.h>
#include <linux/mnt_idmapping.h>
#include <linux/pidfs.h>
#include <linux/nstree.h>
#include "pnode.h"
#include "internal.h"
/* Maximum number of mounts in a mount namespace */
static unsigned int sysctl_mount_max __read_mostly = 100000;
static unsigned int m_hash_mask __ro_after_init;
static unsigned int m_hash_shift __ro_after_init;
static unsigned int mp_hash_mask __ro_after_init;
static unsigned int mp_hash_shift __ro_after_init;
static __initdata unsigned long mhash_entries;
static int __init set_mhash_entries(char *str)
{
if (!str)
return 0;
mhash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("mhash_entries=", set_mhash_entries);
static __initdata unsigned long mphash_entries;
static int __init set_mphash_entries(char *str)
{
if (!str)
return 0;
mphash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("mphash_entries=", set_mphash_entries);
static char * __initdata initramfs_options;
static int __init initramfs_options_setup(char *str)
{
initramfs_options = str;
return 1;
}
__setup("initramfs_options=", initramfs_options_setup);
static u64 event;
static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC);
static DEFINE_IDA(mnt_group_ida);
/* Don't allow confusion with old 32bit mount ID */
#define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET;
static struct hlist_head *mount_hashtable __ro_after_init;
static struct hlist_head *mountpoint_hashtable __ro_after_init;
static struct kmem_cache *mnt_cache __ro_after_init;
static DECLARE_RWSEM(namespace_sem);
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
static struct mnt_namespace *emptied_ns; /* protected by namespace_sem */
static inline void namespace_lock(void);
static void namespace_unlock(void);
DEFINE_LOCK_GUARD_0(namespace_excl, namespace_lock(), namespace_unlock())
DEFINE_LOCK_GUARD_0(namespace_shared, down_read(&namespace_sem),
up_read(&namespace_sem))
DEFINE_FREE(mntput, struct vfsmount *, if (!IS_ERR(_T)) mntput(_T))
#ifdef CONFIG_FSNOTIFY
LIST_HEAD(notify_list); /* protected by namespace_sem */
#endif
enum mount_kattr_flags_t {
MOUNT_KATTR_RECURSE = (1 << 0),
MOUNT_KATTR_IDMAP_REPLACE = (1 << 1),
};
struct mount_kattr {
unsigned int attr_set;
unsigned int attr_clr;
unsigned int propagation;
unsigned int lookup_flags;
enum mount_kattr_flags_t kflags;
struct user_namespace *mnt_userns;
struct mnt_idmap *mnt_idmap;
};
/* /sys/fs */
struct kobject *fs_kobj __ro_after_init;
EXPORT_SYMBOL_GPL(fs_kobj);
/*
* vfsmount lock may be taken for read to prevent changes to the
* vfsmount hash, ie. during mountpoint lookups or walking back
* up the tree.
*
* It should be taken for write in all cases where the vfsmount
* tree or hash is modified or when a vfsmount structure is modified.
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
static void mnt_ns_release(struct mnt_namespace *ns)
{
/* keep alive for {list,stat}mount() */
if (ns && refcount_dec_and_test(&ns->passive)) {
fsnotify_mntns_delete(ns);
put_user_ns(ns->user_ns);
kfree(ns);
}
}
DEFINE_FREE(mnt_ns_release, struct mnt_namespace *,
if (!IS_ERR(_T)) mnt_ns_release(_T))
static void mnt_ns_release_rcu(struct rcu_head *rcu)
{
mnt_ns_release(container_of(rcu, struct mnt_namespace, ns.ns_rcu));
}
static void mnt_ns_tree_remove(struct mnt_namespace *ns)
{
/* remove from global mount namespace list */
if (ns_tree_active(ns))
ns_tree_remove(ns);
call_rcu(&ns->ns.ns_rcu, mnt_ns_release_rcu);
}
/*
* Lookup a mount namespace by id and take a passive reference count. Taking a
* passive reference means the mount namespace can be emptied if e.g., the last
* task holding an active reference exits. To access the mounts of the
* namespace the @namespace_sem must first be acquired. If the namespace has
* already shut down before acquiring @namespace_sem, {list,stat}mount() will
* see that the mount rbtree of the namespace is empty.
*
* Note the lookup is lockless protected by a sequence counter. We only
* need to guard against false negatives as false positives aren't
* possible. So if we didn't find a mount namespace and the sequence
* counter has changed we need to retry. If the sequence counter is
* still the same we know the search actually failed.
*/
static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
{
struct mnt_namespace *mnt_ns;
struct ns_common *ns;
guard(rcu)();
ns = ns_tree_lookup_rcu(mnt_ns_id, CLONE_NEWNS);
if (!ns)
return NULL;
/*
* The last reference count is put with RCU delay so we can
* unconditonally acquire a reference here.
*/
mnt_ns = container_of(ns, struct mnt_namespace, ns);
refcount_inc(&mnt_ns->passive);
return mnt_ns;
}
static inline void lock_mount_hash(void)
{
write_seqlock(&mount_lock);
}
static inline void unlock_mount_hash(void)
{
write_sequnlock(&mount_lock);
}
static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> m_hash_shift);
return &mount_hashtable[tmp & m_hash_mask];
}
static inline struct hlist_head *mp_hash(struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> mp_hash_shift);
return &mountpoint_hashtable[tmp & mp_hash_mask];
}
static int mnt_alloc_id(struct mount *mnt)
{
int res;
xa_lock(&mnt_id_xa);
res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, XA_LIMIT(1, INT_MAX), GFP_KERNEL);
if (!res)
mnt->mnt_id_unique = ++mnt_id_ctr; xa_unlock(&mnt_id_xa);
return res;
}
static void mnt_free_id(struct mount *mnt)
{
xa_erase(&mnt_id_xa, mnt->mnt_id);}
/*
* Allocate a new peer group ID
*/
static int mnt_alloc_group_id(struct mount *mnt)
{
int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
if (res < 0)
return res;
mnt->mnt_group_id = res;
return 0;
}
/*
* Release a peer group ID
*/
void mnt_release_group_id(struct mount *mnt)
{
ida_free(&mnt_group_ida, mnt->mnt_group_id);
mnt->mnt_group_id = 0;
}
/*
* vfsmount lock must be held for read
*/
static inline void mnt_add_count(struct mount *mnt, int n)
{
#ifdef CONFIG_SMP
this_cpu_add(mnt->mnt_pcp->mnt_count, n);
#else
preempt_disable();
mnt->mnt_count += n;
preempt_enable();
#endif
}
/*
* vfsmount lock must be held for write
*/
int mnt_get_count(struct mount *mnt)
{
#ifdef CONFIG_SMP
int count = 0;
int cpu;
for_each_possible_cpu(cpu) {
count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
}
return count;
#else
return mnt->mnt_count;
#endif
}
static struct mount *alloc_vfsmnt(const char *name)
{
struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
if (mnt) {
int err;
err = mnt_alloc_id(mnt);
if (err)
goto out_free_cache;
if (name)
mnt->mnt_devname = kstrdup_const(name,
GFP_KERNEL_ACCOUNT);
else
mnt->mnt_devname = "none";
if (!mnt->mnt_devname)
goto out_free_id;
#ifdef CONFIG_SMP
mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); if (!mnt->mnt_pcp)
goto out_free_devname;
this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
#else
mnt->mnt_count = 1;
mnt->mnt_writers = 0;
#endif
INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
INIT_LIST_HEAD(&mnt->mnt_expire);
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_HLIST_HEAD(&mnt->mnt_slave_list);
INIT_HLIST_NODE(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
RB_CLEAR_NODE(&mnt->mnt_node);
mnt->mnt.mnt_idmap = &nop_mnt_idmap;
}
return mnt;
#ifdef CONFIG_SMP
out_free_devname:
kfree_const(mnt->mnt_devname);
#endif
out_free_id:
mnt_free_id(mnt);
out_free_cache:
kmem_cache_free(mnt_cache, mnt); return NULL;
}
/*
* Most r/o checks on a fs are for operations that take
* discrete amounts of time, like a write() or unlink().
* We must keep track of when those operations start
* (for permission checks) and when they end, so that
* we can determine when writes are able to occur to
* a filesystem.
*/
/*
* __mnt_is_readonly: check whether a mount is read-only
* @mnt: the mount to check for its write status
*
* This shouldn't be used directly ouside of the VFS.
* It does not guarantee that the filesystem will stay
* r/w, just that it is right *now*. This can not and
* should not be used in place of IS_RDONLY(inode).
* mnt_want/drop_write() will _keep_ the filesystem
* r/w.
*/
bool __mnt_is_readonly(const struct vfsmount *mnt)
{
return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
}
EXPORT_SYMBOL_GPL(__mnt_is_readonly);
static inline void mnt_inc_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
this_cpu_inc(mnt->mnt_pcp->mnt_writers);
#else
mnt->mnt_writers++;
#endif
}
static inline void mnt_dec_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
this_cpu_dec(mnt->mnt_pcp->mnt_writers);
#else
mnt->mnt_writers--;
#endif
}
static unsigned int mnt_get_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
int cpu;
for_each_possible_cpu(cpu) {
count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
}
return count;
#else
return mnt->mnt_writers;
#endif
}
static int mnt_is_readonly(const struct vfsmount *mnt)
{
if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
return 1;
/*
* The barrier pairs with the barrier in sb_start_ro_state_change()
* making sure if we don't see s_readonly_remount set yet, we also will
* not see any superblock / mount flag changes done by remount.
* It also pairs with the barrier in sb_end_ro_state_change()
* assuring that if we see s_readonly_remount already cleared, we will
* see the values of superblock / mount flags updated by remount.
*/
smp_rmb();
return __mnt_is_readonly(mnt);
}
/*
* Most r/o & frozen checks on a fs are for operations that take discrete
* amounts of time, like a write() or unlink(). We must keep track of when
* those operations start (for permission checks) and when they end, so that we
* can determine when writes are able to occur to a filesystem.
*/
/**
* mnt_get_write_access - get write access to a mount without freeze protection
* @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is about to be performed to
* it, and makes sure that writes are allowed (mnt it read-write) before
* returning success. This operation does not protect against filesystem being
* frozen. When the write operation is finished, mnt_put_write_access() must be
* called. This is effectively a refcount.
*/
int mnt_get_write_access(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
int ret = 0;
preempt_disable();
mnt_inc_writers(mnt);
/*
* The store to mnt_inc_writers must be visible before we pass
* WRITE_HOLD loop below, so that the slowpath can see our
* incremented count after it has set WRITE_HOLD.
*/
smp_mb();
might_lock(&mount_lock.lock);
while (__test_write_hold(READ_ONCE(mnt->mnt_pprev_for_sb))) {
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
cpu_relax();
} else {
/*
* This prevents priority inversion, if the task
* setting WRITE_HOLD got preempted on a remote
* CPU, and it prevents life lock if the task setting
* WRITE_HOLD has a lower priority and is bound to
* the same CPU as the task that is spinning here.
*/
preempt_enable();
read_seqlock_excl(&mount_lock);
read_sequnlock_excl(&mount_lock);
preempt_disable();
}
}
/*
* The barrier pairs with the barrier sb_start_ro_state_change() making
* sure that if we see WRITE_HOLD cleared, we will also see
* s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
* mnt_is_readonly() and bail in case we are racing with remount
* read-only.
*/
smp_rmb();
if (mnt_is_readonly(m)) {
mnt_dec_writers(mnt);
ret = -EROFS;
}
preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(mnt_get_write_access);
/**
* mnt_want_write - get write access to a mount
* @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is about to be performed to
* it, and makes sure that writes are allowed (mount is read-write, filesystem
* is not frozen) before returning success. When the write operation is
* finished, mnt_drop_write() must be called. This is effectively a refcount.
*/
int mnt_want_write(struct vfsmount *m)
{
int ret;
sb_start_write(m->mnt_sb);
ret = mnt_get_write_access(m);
if (ret)
sb_end_write(m->mnt_sb);
return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write);
/**
* mnt_get_write_access_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
* This is like mnt_get_write_access, but if @file is already open for write it
* skips incrementing mnt_writers (since the open file already has a reference)
* and instead only does the check for emergency r/o remounts. This must be
* paired with mnt_put_write_access_file.
*/
int mnt_get_write_access_file(struct file *file)
{
if (file->f_mode & FMODE_WRITER) {
/*
* Superblock may have become readonly while there are still
* writable fd's, e.g. due to a fs error with errors=remount-ro
*/
if (__mnt_is_readonly(file->f_path.mnt))
return -EROFS;
return 0;
}
return mnt_get_write_access(file->f_path.mnt);
}
/**
* mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
* This is like mnt_want_write, but if the file is already open for writing it
* skips incrementing mnt_writers (since the open file already has a reference)
* and instead only does the freeze protection and the check for emergency r/o
* remounts. This must be paired with mnt_drop_write_file.
*/
int mnt_want_write_file(struct file *file)
{
int ret;
sb_start_write(file_inode(file)->i_sb);
ret = mnt_get_write_access_file(file);
if (ret)
sb_end_write(file_inode(file)->i_sb);
return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/**
* mnt_put_write_access - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done
* performing writes to it. Must be matched with
* mnt_get_write_access() call above.
*/
void mnt_put_write_access(struct vfsmount *mnt)
{
preempt_disable();
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
EXPORT_SYMBOL_GPL(mnt_put_write_access);
/**
* mnt_drop_write - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done performing writes to it and
* also allows filesystem to be frozen again. Must be matched with
* mnt_want_write() call above.
*/
void mnt_drop_write(struct vfsmount *mnt)
{
mnt_put_write_access(mnt);
sb_end_write(mnt->mnt_sb);
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
void mnt_put_write_access_file(struct file *file)
{
if (!(file->f_mode & FMODE_WRITER))
mnt_put_write_access(file->f_path.mnt);
}
void mnt_drop_write_file(struct file *file)
{
mnt_put_write_access_file(file);
sb_end_write(file_inode(file)->i_sb);
}
EXPORT_SYMBOL(mnt_drop_write_file);
/**
* mnt_hold_writers - prevent write access to the given mount
* @mnt: mnt to prevent write access to
*
* Prevents write access to @mnt if there are no active writers for @mnt.
* This function needs to be called and return successfully before changing
* properties of @mnt that need to remain stable for callers with write access
* to @mnt.
*
* After this functions has been called successfully callers must pair it with
* a call to mnt_unhold_writers() in order to stop preventing write access to
* @mnt.
*
* Context: This function expects to be in mount_locked_reader scope serializing
* setting WRITE_HOLD.
* Return: On success 0 is returned.
* On error, -EBUSY is returned.
*/
static inline int mnt_hold_writers(struct mount *mnt)
{
set_write_hold(mnt);
/*
* After storing WRITE_HOLD, we'll read the counters. This store
* should be visible before we do.
*/
smp_mb();
/*
* With writers on hold, if this value is zero, then there are
* definitely no active writers (although held writers may subsequently
* increment the count, they'll have to wait, and decrement it after
* seeing MNT_READONLY).
*
* It is OK to have counter incremented on one CPU and decremented on
* another: the sum will add up correctly. The danger would be when we
* sum up each counter, if we read a counter before it is incremented,
* but then read another CPU's count which it has been subsequently
* decremented from -- we would see more decrements than we should.
* WRITE_HOLD protects against this scenario, because
* mnt_want_write first increments count, then smp_mb, then spins on
* WRITE_HOLD, so it can't be decremented by another CPU while
* we're counting up here.
*/
if (mnt_get_writers(mnt) > 0)
return -EBUSY;
return 0;
}
/**
* mnt_unhold_writers - stop preventing write access to the given mount
* @mnt: mnt to stop preventing write access to
*
* Stop preventing write access to @mnt allowing callers to gain write access
* to @mnt again.
*
* This function can only be called after a call to mnt_hold_writers().
*
* Context: This function expects to be in the same mount_locked_reader scope
* as the matching mnt_hold_writers().
*/
static inline void mnt_unhold_writers(struct mount *mnt)
{
if (!test_write_hold(mnt))
return;
/*
* MNT_READONLY must become visible before ~WRITE_HOLD, so writers
* that become unheld will see MNT_READONLY.
*/
smp_wmb();
clear_write_hold(mnt);
}
static inline void mnt_del_instance(struct mount *m)
{
struct mount **p = m->mnt_pprev_for_sb;
struct mount *next = m->mnt_next_for_sb;
if (next)
next->mnt_pprev_for_sb = p; *p = next;
}
static inline void mnt_add_instance(struct mount *m, struct super_block *s)
{
struct mount *first = s->s_mounts;
if (first)
first->mnt_pprev_for_sb = &m->mnt_next_for_sb; m->mnt_next_for_sb = first;
m->mnt_pprev_for_sb = &s->s_mounts;
s->s_mounts = m;
}
static int mnt_make_readonly(struct mount *mnt)
{
int ret;
ret = mnt_hold_writers(mnt);
if (!ret)
mnt->mnt.mnt_flags |= MNT_READONLY;
mnt_unhold_writers(mnt);
return ret;
}
int sb_prepare_remount_readonly(struct super_block *sb)
{
int err = 0;
/* Racy optimization. Recheck the counter under WRITE_HOLD */
if (atomic_long_read(&sb->s_remove_count))
return -EBUSY;
guard(mount_locked_reader)();
for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
if (!(m->mnt.mnt_flags & MNT_READONLY)) {
err = mnt_hold_writers(m);
if (err)
break;
}
}
if (!err && atomic_long_read(&sb->s_remove_count))
err = -EBUSY;
if (!err)
sb_start_ro_state_change(sb);
for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
if (test_write_hold(m))
clear_write_hold(m);
}
return err;
}
static void free_vfsmnt(struct mount *mnt)
{
mnt_idmap_put(mnt_idmap(&mnt->mnt));
kfree_const(mnt->mnt_devname);
#ifdef CONFIG_SMP
free_percpu(mnt->mnt_pcp);
#endif
kmem_cache_free(mnt_cache, mnt);
}
static void delayed_free_vfsmnt(struct rcu_head *head)
{
free_vfsmnt(container_of(head, struct mount, mnt_rcu));
}
/* call under rcu_read_lock */
int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{
struct mount *mnt;
if (read_seqretry(&mount_lock, seq))
return 1;
if (bastard == NULL)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
smp_mb(); // see mntput_no_expire() and do_umount()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
lock_mount_hash();
if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) {
mnt_add_count(mnt, -1);
unlock_mount_hash();
return 1;
}
unlock_mount_hash();
/* caller will mntput() */
return -1;
}
/* call under rcu_read_lock */
static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{
int res = __legitimize_mnt(bastard, seq);
if (likely(!res))
return true;
if (unlikely(res < 0)) {
rcu_read_unlock();
mntput(bastard);
rcu_read_lock();
}
return false;
}
/**
* __lookup_mnt - mount hash lookup
* @mnt: parent mount
* @dentry: dentry of mountpoint
*
* If @mnt has a child mount @c mounted on @dentry find and return it.
* Caller must either hold the spinlock component of @mount_lock or
* hold rcu_read_lock(), sample the seqcount component before the call
* and recheck it afterwards.
*
* Return: The child of @mnt mounted on @dentry or %NULL.
*/
struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{
struct hlist_head *head = m_hash(mnt, dentry);
struct mount *p;
hlist_for_each_entry_rcu(p, head, mnt_hash)
if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
return p;
return NULL;
}
/**
* lookup_mnt - Return the child mount mounted at given location
* @path: location in the namespace
*
* Acquires and returns a new reference to mount at given location
* or %NULL if nothing is mounted there.
*/
struct vfsmount *lookup_mnt(const struct path *path)
{
struct mount *child_mnt;
struct vfsmount *m;
unsigned seq;
rcu_read_lock();
do {
seq = read_seqbegin(&mount_lock);
child_mnt = __lookup_mnt(path->mnt, path->dentry);
m = child_mnt ? &child_mnt->mnt : NULL;
} while (!legitimize_mnt(m, seq));
rcu_read_unlock();
return m;
}
/*
* __is_local_mountpoint - Test to see if dentry is a mountpoint in the
* current mount namespace.
*
* The common case is dentries are not mountpoints at all and that
* test is handled inline. For the slow case when we are actually
* dealing with a mountpoint of some kind, walk through all of the
* mounts in the current mount namespace and test to see if the dentry
* is a mountpoint.
*
* The mount_hashtable is not usable in the context because we
* need to identify all mounts that may be in the current mount
* namespace not just a mount that happens to have some specified
* parent mount.
*/
bool __is_local_mountpoint(const struct dentry *dentry)
{
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
struct mount *mnt, *n;
guard(namespace_shared)();
rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node)
if (mnt->mnt_mountpoint == dentry)
return true;
return false;
}
struct pinned_mountpoint {
struct hlist_node node;
struct mountpoint *mp;
struct mount *parent;
};
static bool lookup_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
{
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
hlist_add_head(&m->node, &mp->m_list);
m->mp = mp;
return true;
}
}
return false;
}
static int get_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
{
struct mountpoint *mp __free(kfree) = NULL;
bool found;
int ret;
if (d_mountpoint(dentry)) {
/* might be worth a WARN_ON() */
if (d_unlinked(dentry))
return -ENOENT;
mountpoint:
read_seqlock_excl(&mount_lock);
found = lookup_mountpoint(dentry, m);
read_sequnlock_excl(&mount_lock);
if (found)
return 0;
}
if (!mp)
mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
if (!mp)
return -ENOMEM;
/* Exactly one processes may set d_mounted */
ret = d_set_mounted(dentry);
/* Someone else set d_mounted? */
if (ret == -EBUSY)
goto mountpoint;
/* The dentry is not available as a mountpoint? */
if (ret)
return ret;
/* Add the new mountpoint to the hash table */
read_seqlock_excl(&mount_lock);
mp->m_dentry = dget(dentry);
hlist_add_head(&mp->m_hash, mp_hash(dentry));
INIT_HLIST_HEAD(&mp->m_list);
hlist_add_head(&m->node, &mp->m_list);
m->mp = no_free_ptr(mp);
read_sequnlock_excl(&mount_lock);
return 0;
}
/*
* vfsmount lock must be held. Additionally, the caller is responsible
* for serializing calls for given disposal list.
*/
static void maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list)
{
if (hlist_empty(&mp->m_list)) {
struct dentry *dentry = mp->m_dentry;
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
dput_to_list(dentry, list);
hlist_del(&mp->m_hash);
kfree(mp);
}
}
/*
* locks: mount_lock [read_seqlock_excl], namespace_sem [excl]
*/
static void unpin_mountpoint(struct pinned_mountpoint *m)
{
if (m->mp) {
hlist_del(&m->node);
maybe_free_mountpoint(m->mp, &ex_mountpoints);
}
}
static inline int check_mnt(const struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
static inline bool check_anonymous_mnt(struct mount *mnt)
{
u64 seq;
if (!is_anon_ns(mnt->mnt_ns))
return false;
seq = mnt->mnt_ns->seq_origin;
return !seq || (seq == current->nsproxy->mnt_ns->ns.ns_id);
}
/*
* vfsmount lock must be held for write
*/
static void touch_mnt_namespace(struct mnt_namespace *ns)
{
if (ns) {
ns->event = ++event;
wake_up_interruptible(&ns->poll);
}
}
/*
* vfsmount lock must be held for write
*/
static void __touch_mnt_namespace(struct mnt_namespace *ns)
{
if (ns && ns->event != event) {
ns->event = event;
wake_up_interruptible(&ns->poll);
}
}
/*
* locks: mount_lock[write_seqlock]
*/
static void __umount_mnt(struct mount *mnt, struct list_head *shrink_list)
{
struct mountpoint *mp;
struct mount *parent = mnt->mnt_parent;
if (unlikely(parent->overmount == mnt))
parent->overmount = NULL;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
hlist_del_init_rcu(&mnt->mnt_hash);
hlist_del_init(&mnt->mnt_mp_list);
mp = mnt->mnt_mp;
mnt->mnt_mp = NULL;
maybe_free_mountpoint(mp, shrink_list);
}
/*
* locks: mount_lock[write_seqlock], namespace_sem[excl] (for ex_mountpoints)
*/
static void umount_mnt(struct mount *mnt)
{
__umount_mnt(mnt, &ex_mountpoints);
}
/*
* vfsmount lock must be held for write
*/
void mnt_set_mountpoint(struct mount *mnt,
struct mountpoint *mp,
struct mount *child_mnt)
{
child_mnt->mnt_mountpoint = mp->m_dentry;
child_mnt->mnt_parent = mnt;
child_mnt->mnt_mp = mp;
hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
}
static void make_visible(struct mount *mnt)
{
struct mount *parent = mnt->mnt_parent;
if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root))
parent->overmount = mnt;
hlist_add_head_rcu(&mnt->mnt_hash,
m_hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);}
/**
* attach_mnt - mount a mount, attach to @mount_hashtable and parent's
* list of child mounts
* @parent: the parent
* @mnt: the new mount
* @mp: the new mountpoint
*
* Mount @mnt at @mp on @parent. Then attach @mnt
* to @parent's child mount list and to @mount_hashtable.
*
* Note, when make_visible() is called @mnt->mnt_parent already points
* to the correct parent.
*
* Context: This function expects namespace_lock() and lock_mount_hash()
* to have been acquired in that order.
*/
static void attach_mnt(struct mount *mnt, struct mount *parent,
struct mountpoint *mp)
{
mnt_set_mountpoint(parent, mp, mnt);
make_visible(mnt);
}
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
{
struct mountpoint *old_mp = mnt->mnt_mp;
list_del_init(&mnt->mnt_child);
hlist_del_init(&mnt->mnt_mp_list);
hlist_del_init_rcu(&mnt->mnt_hash);
attach_mnt(mnt, parent, mp);
maybe_free_mountpoint(old_mp, &ex_mountpoints);
}
static inline struct mount *node_to_mount(struct rb_node *node)
{
return node ? rb_entry(node, struct mount, mnt_node) : NULL;
}
static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
{
struct rb_node **link = &ns->mounts.rb_node;
struct rb_node *parent = NULL;
bool mnt_first_node = true, mnt_last_node = true;
WARN_ON(mnt_ns_attached(mnt));
mnt->mnt_ns = ns; while (*link) {
parent = *link;
if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) {
link = &parent->rb_left;
mnt_last_node = false;
} else {
link = &parent->rb_right;
mnt_first_node = false;
}
}
if (mnt_last_node)
ns->mnt_last_node = &mnt->mnt_node; if (mnt_first_node)
ns->mnt_first_node = &mnt->mnt_node;
rb_link_node(&mnt->mnt_node, parent, link);
rb_insert_color(&mnt->mnt_node, &ns->mounts);
mnt_notify_add(mnt);}
static struct mount *next_mnt(struct mount *p, struct mount *root)
{
struct list_head *next = p->mnt_mounts.next;
if (next == &p->mnt_mounts) {
while (1) {
if (p == root) return NULL;
next = p->mnt_child.next;
if (next != &p->mnt_parent->mnt_mounts)
break;
p = p->mnt_parent;
}
}
return list_entry(next, struct mount, mnt_child);
}
static struct mount *skip_mnt_tree(struct mount *p)
{
struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct mount, mnt_child); prev = p->mnt_mounts.prev;
}
return p;
}
/*
* vfsmount lock must be held for write
*/
static void commit_tree(struct mount *mnt)
{
struct mnt_namespace *n = mnt->mnt_parent->mnt_ns;
if (!mnt_ns_attached(mnt)) {
for (struct mount *m = mnt; m; m = next_mnt(m, mnt))
mnt_add_to_ns(n, m);
n->nr_mounts += n->pending_mounts;
n->pending_mounts = 0;
}
make_visible(mnt);
touch_mnt_namespace(n);
}
static void setup_mnt(struct mount *m, struct dentry *root)
{
struct super_block *s = root->d_sb;
atomic_inc(&s->s_active);
m->mnt.mnt_sb = s;
m->mnt.mnt_root = dget(root);
m->mnt_mountpoint = m->mnt.mnt_root;
m->mnt_parent = m;
guard(mount_locked_reader)();
mnt_add_instance(m, s);}
/**
* vfs_create_mount - Create a mount for a configured superblock
* @fc: The configuration context with the superblock attached
*
* Create a mount to an already configured superblock. If necessary, the
* caller should invoke vfs_get_tree() before calling this.
*
* Note that this does not attach the mount to anything.
*/
struct vfsmount *vfs_create_mount(struct fs_context *fc)
{
struct mount *mnt;
if (!fc->root) return ERR_PTR(-EINVAL);
mnt = alloc_vfsmnt(fc->source);
if (!mnt)
return ERR_PTR(-ENOMEM);
if (fc->sb_flags & SB_KERNMOUNT)
mnt->mnt.mnt_flags = MNT_INTERNAL; setup_mnt(mnt, fc->root); return &mnt->mnt;}
EXPORT_SYMBOL(vfs_create_mount);
struct vfsmount *fc_mount(struct fs_context *fc)
{
int err = vfs_get_tree(fc);
if (!err) {
up_write(&fc->root->d_sb->s_umount);
return vfs_create_mount(fc);
}
return ERR_PTR(err);}
EXPORT_SYMBOL(fc_mount);
struct vfsmount *fc_mount_longterm(struct fs_context *fc)
{
struct vfsmount *mnt = fc_mount(fc);
if (!IS_ERR(mnt))
real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; return mnt;}
EXPORT_SYMBOL(fc_mount_longterm);
struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data)
{
struct fs_context *fc;
struct vfsmount *mnt;
int ret = 0;
if (!type)
return ERR_PTR(-EINVAL);
fc = fs_context_for_mount(type, flags);
if (IS_ERR(fc))
return ERR_CAST(fc);
if (name)
ret = vfs_parse_fs_string(fc, "source", name);
if (!ret)
ret = parse_monolithic_mount_data(fc, data);
if (!ret)
mnt = fc_mount(fc);
else
mnt = ERR_PTR(ret);
put_fs_context(fc);
return mnt;
}
EXPORT_SYMBOL_GPL(vfs_kern_mount);
static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
struct mount *mnt;
int err;
mnt = alloc_vfsmnt(old->mnt_devname);
if (!mnt)
return ERR_PTR(-ENOMEM); mnt->mnt.mnt_flags = READ_ONCE(old->mnt.mnt_flags) &
~MNT_INTERNAL_FLAGS;
if (flag & (CL_SLAVE | CL_PRIVATE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
err = mnt_alloc_group_id(mnt);
if (err)
goto out_free;
}
if (mnt->mnt_group_id) set_mnt_shared(mnt); mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
setup_mnt(mnt, root);
if (flag & CL_PRIVATE) // we are done with it
return mnt;
if (peers(mnt, old)) list_add(&mnt->mnt_share, &old->mnt_share); if ((flag & CL_SLAVE) && old->mnt_group_id) { hlist_add_head(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
} else if (IS_MNT_SLAVE(old)) { hlist_add_behind(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master;
}
return mnt;
out_free:
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_PTR(err);
}
static void cleanup_mnt(struct mount *mnt)
{
struct hlist_node *p;
struct mount *m;
/*
* The warning here probably indicates that somebody messed
* up a mnt_want/drop_write() pair. If this happens, the
* filesystem was probably unable to make r/w->r/o transitions.
* The locking used to deal with mnt_count decrement provides barriers,
* so mnt_get_writers() below is safe.
*/
WARN_ON(mnt_get_writers(mnt));
if (unlikely(mnt->mnt_pins.first))
mnt_pin_kill(mnt);
hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
hlist_del(&m->mnt_umount);
mntput(&m->mnt);
}
fsnotify_vfsmount_delete(&mnt->mnt);
dput(mnt->mnt.mnt_root);
deactivate_super(mnt->mnt.mnt_sb);
mnt_free_id(mnt);
call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
}
static void __cleanup_mnt(struct rcu_head *head)
{
cleanup_mnt(container_of(head, struct mount, mnt_rcu));
}
static LLIST_HEAD(delayed_mntput_list);
static void delayed_mntput(struct work_struct *unused)
{
struct llist_node *node = llist_del_all(&delayed_mntput_list);
struct mount *m, *t;
llist_for_each_entry_safe(m, t, node, mnt_llist)
cleanup_mnt(m);
}
static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
LIST_HEAD(list);
int count;
rcu_read_lock(); if (likely(READ_ONCE(mnt->mnt_ns))) {
/*
* Since we don't do lock_mount_hash() here,
* ->mnt_ns can change under us. However, if it's
* non-NULL, then there's a reference that won't
* be dropped until after an RCU delay done after
* turning ->mnt_ns NULL. So if we observe it
* non-NULL under rcu_read_lock(), the reference
* we are dropping is not the final one.
*/
mnt_add_count(mnt, -1);
rcu_read_unlock(); return;
}
lock_mount_hash();
/*
* make sure that if __legitimize_mnt() has not seen us grab
* mount_lock, we'll see their refcount increment here.
*/
smp_mb();
mnt_add_count(mnt, -1);
count = mnt_get_count(mnt);
if (count != 0) {
WARN_ON(count < 0);
rcu_read_unlock();
unlock_mount_hash();
return;
}
if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { rcu_read_unlock();
unlock_mount_hash();
return;
}
mnt->mnt.mnt_flags |= MNT_DOOMED;
rcu_read_unlock(); mnt_del_instance(mnt);
if (unlikely(!list_empty(&mnt->mnt_expire)))
list_del(&mnt->mnt_expire); if (unlikely(!list_empty(&mnt->mnt_mounts))) {
struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
__umount_mnt(p, &list); hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
}
}
unlock_mount_hash();
shrink_dentry_list(&list);
if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
struct task_struct *task = current; if (likely(!(task->flags & PF_KTHREAD))) {
init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
return;
}
if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) schedule_delayed_work(&delayed_mntput_work, 1);
return;
}
cleanup_mnt(mnt);}
void mntput(struct vfsmount *mnt)
{
if (mnt) {
struct mount *m = real_mount(mnt);
/* avoid cacheline pingpong */
if (unlikely(m->mnt_expiry_mark))
WRITE_ONCE(m->mnt_expiry_mark, 0);
mntput_no_expire(m);
}
}
EXPORT_SYMBOL(mntput);
struct vfsmount *mntget(struct vfsmount *mnt)
{
if (mnt)
mnt_add_count(real_mount(mnt), 1);
return mnt;}
EXPORT_SYMBOL(mntget);
/*
* Make a mount point inaccessible to new lookups.
* Because there may still be current users, the caller MUST WAIT
* for an RCU grace period before destroying the mount point.
*/
void mnt_make_shortterm(struct vfsmount *mnt)
{
if (mnt)
real_mount(mnt)->mnt_ns = NULL;
}
/**
* path_is_mountpoint() - Check if path is a mount in the current namespace.
* @path: path to check
*
* d_mountpoint() can only be used reliably to establish if a dentry is
* not mounted in any namespace and that common case is handled inline.
* d_mountpoint() isn't aware of the possibility there may be multiple
* mounts using a given dentry in a different namespace. This function
* checks if the passed in path is a mountpoint rather than the dentry
* alone.
*/
bool path_is_mountpoint(const struct path *path)
{
unsigned seq;
bool res;
if (!d_mountpoint(path->dentry))
return false;
rcu_read_lock();
do {
seq = read_seqbegin(&mount_lock);
res = __path_is_mountpoint(path);
} while (read_seqretry(&mount_lock, seq));
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL(path_is_mountpoint);
struct vfsmount *mnt_clone_internal(const struct path *path)
{
struct mount *p;
p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
if (IS_ERR(p))
return ERR_CAST(p);
p->mnt.mnt_flags |= MNT_INTERNAL;
return &p->mnt;
}
/*
* Returns the mount which either has the specified mnt_id, or has the next
* smallest id afer the specified one.
*/
static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
{
struct rb_node *node = ns->mounts.rb_node;
struct mount *ret = NULL;
while (node) {
struct mount *m = node_to_mount(node);
if (mnt_id <= m->mnt_id_unique) {
ret = node_to_mount(node);
if (mnt_id == m->mnt_id_unique)
break;
node = node->rb_left;
} else {
node = node->rb_right;
}
}
return ret;
}
/*
* Returns the mount which either has the specified mnt_id, or has the next
* greater id before the specified one.
*/
static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id)
{
struct rb_node *node = ns->mounts.rb_node;
struct mount *ret = NULL;
while (node) {
struct mount *m = node_to_mount(node);
if (mnt_id >= m->mnt_id_unique) {
ret = node_to_mount(node);
if (mnt_id == m->mnt_id_unique)
break;
node = node->rb_right;
} else {
node = node->rb_left;
}
}
return ret;
}
#ifdef CONFIG_PROC_FS
/* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_mounts *p = m->private;
down_read(&namespace_sem);
return mnt_find_id_at(p->ns, *pos);
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct mount *next = NULL, *mnt = v;
struct rb_node *node = rb_next(&mnt->mnt_node);
++*pos;
if (node) {
next = node_to_mount(node);
*pos = next->mnt_id_unique;
}
return next;
}
static void m_stop(struct seq_file *m, void *v)
{
up_read(&namespace_sem);
}
static int m_show(struct seq_file *m, void *v)
{
struct proc_mounts *p = m->private;
struct mount *r = v;
return p->show(m, &r->mnt);
}
const struct seq_operations mounts_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = m_show,
};
#endif /* CONFIG_PROC_FS */
/**
* may_umount_tree - check if a mount tree is busy
* @m: root of mount tree
*
* This is called to check if a tree of mounts has any
* open files, pwds, chroots or sub mounts that are
* busy.
*/
int may_umount_tree(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
bool busy = false;
/* write lock needed for mnt_get_count */
lock_mount_hash();
for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) {
if (mnt_get_count(p) > (p == mnt ? 2 : 1)) {
busy = true;
break;
}
}
unlock_mount_hash();
return !busy;
}
EXPORT_SYMBOL(may_umount_tree);
/**
* may_umount - check if a mount point is busy
* @mnt: root of mount
*
* This is called to check if a mount point has any
* open files, pwds, chroots or sub mounts. If the
* mount has sub mounts this will return busy
* regardless of whether the sub mounts are busy.
*
* Doesn't take quota and stuff into account. IOW, in some cases it will
* give false negatives. The main reason why it's here is that we need
* a non-destructive way to look for easily umountable filesystems.
*/
int may_umount(struct vfsmount *mnt)
{
int ret = 1;
down_read(&namespace_sem);
lock_mount_hash();
if (propagate_mount_busy(real_mount(mnt), 2))
ret = 0;
unlock_mount_hash();
up_read(&namespace_sem);
return ret;
}
EXPORT_SYMBOL(may_umount);
#ifdef CONFIG_FSNOTIFY
static void mnt_notify(struct mount *p)
{
if (!p->prev_ns && p->mnt_ns) { fsnotify_mnt_attach(p->mnt_ns, &p->mnt); } else if (p->prev_ns && !p->mnt_ns) { fsnotify_mnt_detach(p->prev_ns, &p->mnt); } else if (p->prev_ns == p->mnt_ns) { fsnotify_mnt_move(p->mnt_ns, &p->mnt);
} else {
fsnotify_mnt_detach(p->prev_ns, &p->mnt);
fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
}
p->prev_ns = p->mnt_ns;
}
static void notify_mnt_list(void)
{
struct mount *m, *tmp;
/*
* Notify about mounts that were added/reparented/detached/remain
* connected after unmount.
*/
list_for_each_entry_safe(m, tmp, ¬ify_list, to_notify) {
mnt_notify(m);
list_del_init(&m->to_notify);
}
}
static bool need_notify_mnt_list(void)
{
return !list_empty(¬ify_list);
}
#else
static void notify_mnt_list(void)
{
}
static bool need_notify_mnt_list(void)
{
return false;
}
#endif
static void free_mnt_ns(struct mnt_namespace *);
static void namespace_unlock(void)
{
struct hlist_head head;
struct hlist_node *p;
struct mount *m;
struct mnt_namespace *ns = emptied_ns;
LIST_HEAD(list);
hlist_move_list(&unmounted, &head); list_splice_init(&ex_mountpoints, &list); emptied_ns = NULL;
if (need_notify_mnt_list()) {
/*
* No point blocking out concurrent readers while notifications
* are sent. This will also allow statmount()/listmount() to run
* concurrently.
*/
downgrade_write(&namespace_sem); notify_mnt_list();
up_read(&namespace_sem);
} else {
up_write(&namespace_sem);
}
if (unlikely(ns)) {
/* Make sure we notice when we leak mounts. */
VFS_WARN_ON_ONCE(!mnt_ns_empty(ns));
free_mnt_ns(ns);
}
shrink_dentry_list(&list);
if (likely(hlist_empty(&head)))
return;
synchronize_rcu_expedited();
hlist_for_each_entry_safe(m, p, &head, mnt_umount) { hlist_del(&m->mnt_umount);
mntput(&m->mnt);
}
}
static inline void namespace_lock(void)
{
down_write(&namespace_sem);
}
enum umount_tree_flags {
UMOUNT_SYNC = 1,
UMOUNT_PROPAGATE = 2,
UMOUNT_CONNECTED = 4,
};
static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
{
/* Leaving mounts connected is only valid for lazy umounts */
if (how & UMOUNT_SYNC)
return true;
/* A mount without a parent has nothing to be connected to */
if (!mnt_has_parent(mnt))
return true;
/* Because the reference counting rules change when mounts are
* unmounted and connected, umounted mounts may not be
* connected to mounted mounts.
*/
if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
return true;
/* Has it been requested that the mount remain connected? */
if (how & UMOUNT_CONNECTED)
return false;
/* Is the mount locked such that it needs to remain connected? */
if (IS_MNT_LOCKED(mnt))
return false;
/* By default disconnect the mount */
return true;
}
/*
* mount_lock must be held
* namespace_sem must be held for write
*/
static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
{
LIST_HEAD(tmp_list);
struct mount *p;
if (how & UMOUNT_PROPAGATE)
propagate_mount_unlock(mnt);
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
if (mnt_ns_attached(p))
move_from_ns(p);
list_add_tail(&p->mnt_list, &tmp_list);
}
/* Hide the mounts from mnt_mounts */
list_for_each_entry(p, &tmp_list, mnt_list) {
list_del_init(&p->mnt_child);
}
/* Add propagated mounts to the tmp_list */
if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
bulk_make_private(&tmp_list);
while (!list_empty(&tmp_list)) {
struct mnt_namespace *ns;
bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
ns = p->mnt_ns;
if (ns) {
ns->nr_mounts--;
__touch_mnt_namespace(ns);
}
p->mnt_ns = NULL;
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
disconnect = disconnect_mount(p, how);
if (mnt_has_parent(p)) {
if (!disconnect) {
/* Don't forget about p */
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
}
}
if (disconnect)
hlist_add_head(&p->mnt_umount, &unmounted);
/*
* At this point p->mnt_ns is NULL, notification will be queued
* only if
*
* - p->prev_ns is non-NULL *and*
* - p->prev_ns->n_fsnotify_marks is non-NULL
*
* This will preclude queuing the mount if this is a cleanup
* after a failed copy_tree() or destruction of an anonymous
* namespace, etc.
*/
mnt_notify_add(p);
}
}
static void shrink_submounts(struct mount *mnt);
static int do_umount_root(struct super_block *sb)
{
int ret = 0;
down_write(&sb->s_umount);
if (!sb_rdonly(sb)) {
struct fs_context *fc;
fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
SB_RDONLY);
if (IS_ERR(fc)) {
ret = PTR_ERR(fc);
} else {
ret = parse_monolithic_mount_data(fc, NULL);
if (!ret)
ret = reconfigure_super(fc);
put_fs_context(fc);
}
}
up_write(&sb->s_umount);
return ret;
}
static int do_umount(struct mount *mnt, int flags)
{
struct super_block *sb = mnt->mnt.mnt_sb;
int retval;
retval = security_sb_umount(&mnt->mnt, flags);
if (retval)
return retval;
/*
* Allow userspace to request a mountpoint be expired rather than
* unmounting unconditionally. Unmount only happens if:
* (1) the mark is already set (the mark is cleared by mntput())
* (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
*/
if (flags & MNT_EXPIRE) {
if (&mnt->mnt == current->fs->root.mnt ||
flags & (MNT_FORCE | MNT_DETACH))
return -EINVAL;
/*
* probably don't strictly need the lock here if we examined
* all race cases, but it's a slowpath.
*/
lock_mount_hash();
if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) {
unlock_mount_hash();
return -EBUSY;
}
unlock_mount_hash();
if (!xchg(&mnt->mnt_expiry_mark, 1))
return -EAGAIN;
}
/*
* If we may have to abort operations to get out of this
* mount, and they will themselves hold resources we must
* allow the fs to do things. In the Unix tradition of
* 'Gee thats tricky lets do it in userspace' the umount_begin
* might fail to complete on the first run through as other tasks
* must return, and the like. Thats for the mount program to worry
* about for the moment.
*/
if (flags & MNT_FORCE && sb->s_op->umount_begin) {
sb->s_op->umount_begin(sb);
}
/*
* No sense to grab the lock for this test, but test itself looks
* somewhat bogus. Suggestions for better replacement?
* Ho-hum... In principle, we might treat that as umount + switch
* to rootfs. GC would eventually take care of the old vfsmount.
* Actually it makes sense, especially if rootfs would contain a
* /reboot - static binary that would close all descriptors and
* call reboot(9). Then init(8) could umount root and exec /reboot.
*/
if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
/*
* Special case for "unmounting" root ...
* we just try to remount it readonly.
*/
if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
return -EPERM;
return do_umount_root(sb);
}
namespace_lock();
lock_mount_hash();
/* Repeat the earlier racy checks, now that we are holding the locks */
retval = -EINVAL;
if (!check_mnt(mnt))
goto out;
if (mnt->mnt.mnt_flags & MNT_LOCKED)
goto out;
if (!mnt_has_parent(mnt)) /* not the absolute root */
goto out;
event++;
if (flags & MNT_DETACH) {
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
smp_mb(); // paired with __legitimize_mnt()
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
}
out:
unlock_mount_hash();
namespace_unlock();
return retval;
}
/*
* __detach_mounts - lazily unmount all mounts on the specified dentry
*
* During unlink, rmdir, and d_drop it is possible to loose the path
* to an existing mountpoint, and wind up leaking the mount.
* detach_mounts allows lazily unmounting those mounts instead of
* leaking them.
*
* The caller may hold dentry->d_inode->i_rwsem.
*/
void __detach_mounts(struct dentry *dentry)
{
struct pinned_mountpoint mp = {};
struct mount *mnt;
guard(namespace_excl)();
guard(mount_writer)();
if (!lookup_mountpoint(dentry, &mp))
return;
event++;
while (mp.node.next) {
mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
umount_mnt(mnt);
hlist_add_head(&mnt->mnt_umount, &unmounted);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
unpin_mountpoint(&mp);
}
/*
* Is the caller allowed to modify his namespace?
*/
bool may_mount(void)
{
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
static void warn_mandlock(void)
{
pr_warn_once("=======================================================\n"
"WARNING: The mand mount option has been deprecated and\n"
" and is ignored by this kernel. Remove the mand\n"
" option from the mount to silence this warning.\n"
"=======================================================\n");
}
static int can_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
struct super_block *sb = path->dentry->d_sb;
if (!may_mount())
return -EPERM;
if (!path_mounted(path))
return -EINVAL;
if (!check_mnt(mnt))
return -EINVAL;
if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
return -EINVAL;
if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
// caller is responsible for flags being sane
int path_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
int ret;
ret = can_umount(path, flags);
if (!ret)
ret = do_umount(mnt, flags);
/* we mustn't call path_put() as that would clear mnt_expiry_mark */
dput(path->dentry);
mntput_no_expire(mnt);
return ret;
}
static int ksys_umount(char __user *name, int flags)
{
int lookup_flags = LOOKUP_MOUNTPOINT;
struct path path;
int ret;
// basic validity checks done first
if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
return -EINVAL;
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
if (ret)
return ret;
return path_umount(&path, flags);
}
SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
{
return ksys_umount(name, flags);
}
#ifdef __ARCH_WANT_SYS_OLDUMOUNT
/*
* The 2.0 compatible umount. No flags.
*/
SYSCALL_DEFINE1(oldumount, char __user *, name)
{
return ksys_umount(name, 0);
}
#endif
static bool is_mnt_ns_file(struct dentry *dentry)
{
struct ns_common *ns;
/* Is this a proxy for a mount namespace? */
if (dentry->d_op != &ns_dentry_operations)
return false;
ns = d_inode(dentry)->i_private;
return ns->ops == &mntns_operations;
}
struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
{
return &mnt->ns;
}
struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous)
{
struct ns_common *ns;
guard(rcu)();
for (;;) {
ns = ns_tree_adjoined_rcu(mntns, previous);
if (IS_ERR(ns))
return ERR_CAST(ns);
mntns = to_mnt_ns(ns);
/*
* The last passive reference count is put with RCU
* delay so accessing the mount namespace is not just
* safe but all relevant members are still valid.
*/
if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN))
continue;
/*
* We need an active reference count as we're persisting
* the mount namespace and it might already be on its
* deathbed.
*/
if (!ns_ref_get(mntns))
continue;
return mntns;
}
}
struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry)
{
if (!is_mnt_ns_file(dentry))
return NULL;
return to_mnt_ns(get_proc_ns(dentry->d_inode));
}
static bool mnt_ns_loop(struct dentry *dentry)
{
/* Could bind mounting the mount namespace inode cause a
* mount namespace loop?
*/
struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry);
if (!mnt_ns)
return false;
return current->nsproxy->mnt_ns->ns.ns_id >= mnt_ns->ns.ns_id;
}
struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
int flag)
{
struct mount *res, *src_parent, *src_root_child, *src_mnt,
*dst_parent, *dst_mnt;
if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root)) return ERR_PTR(-EINVAL); if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
return ERR_PTR(-EINVAL);
res = dst_mnt = clone_mnt(src_root, dentry, flag);
if (IS_ERR(dst_mnt))
return dst_mnt;
src_parent = src_root;
list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) { if (!is_subdir(src_root_child->mnt_mountpoint, dentry)) continue; for (src_mnt = src_root_child; src_mnt; src_mnt = next_mnt(src_mnt, src_root_child)) { if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(src_mnt)) {
if (src_mnt->mnt.mnt_flags & MNT_LOCKED) {
/* Both unbindable and locked. */
dst_mnt = ERR_PTR(-EPERM);
goto out;
} else {
src_mnt = skip_mnt_tree(src_mnt);
continue;
}
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(src_mnt->mnt.mnt_root)) {
src_mnt = skip_mnt_tree(src_mnt);
continue;
}
while (src_parent != src_mnt->mnt_parent) { src_parent = src_parent->mnt_parent;
dst_mnt = dst_mnt->mnt_parent;
}
src_parent = src_mnt;
dst_parent = dst_mnt;
dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag); if (IS_ERR(dst_mnt)) goto out;
lock_mount_hash();
if (src_mnt->mnt.mnt_flags & MNT_LOCKED)
dst_mnt->mnt.mnt_flags |= MNT_LOCKED; if (unlikely(flag & CL_EXPIRE)) {
/* stick the duplicate mount on the same expiry
* list as the original if that was on one */
if (!list_empty(&src_mnt->mnt_expire)) list_add(&dst_mnt->mnt_expire,
&src_mnt->mnt_expire);
}
attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp);
unlock_mount_hash();
}
}
return res;
out:
if (res) {
lock_mount_hash();
umount_tree(res, UMOUNT_SYNC);
unlock_mount_hash();
}
return dst_mnt;
}
static inline bool extend_array(struct path **res, struct path **to_free,
unsigned n, unsigned *count, unsigned new_count)
{
struct path *p;
if (likely(n < *count))
return true;
p = kmalloc_array(new_count, sizeof(struct path), GFP_KERNEL);
if (p && *count)
memcpy(p, *res, *count * sizeof(struct path));
*count = new_count;
kfree(*to_free);
*to_free = *res = p;
return p;
}
const struct path *collect_paths(const struct path *path,
struct path *prealloc, unsigned count)
{
struct mount *root = real_mount(path->mnt);
struct mount *child;
struct path *res = prealloc, *to_free = NULL;
unsigned n = 0;
guard(namespace_shared)();
if (!check_mnt(root))
return ERR_PTR(-EINVAL);
if (!extend_array(&res, &to_free, 0, &count, 32))
return ERR_PTR(-ENOMEM);
res[n++] = *path;
list_for_each_entry(child, &root->mnt_mounts, mnt_child) {
if (!is_subdir(child->mnt_mountpoint, path->dentry))
continue;
for (struct mount *m = child; m; m = next_mnt(m, child)) {
if (!extend_array(&res, &to_free, n, &count, 2 * count))
return ERR_PTR(-ENOMEM);
res[n].mnt = &m->mnt;
res[n].dentry = m->mnt.mnt_root;
n++;
}
}
if (!extend_array(&res, &to_free, n, &count, count + 1))
return ERR_PTR(-ENOMEM);
memset(res + n, 0, (count - n) * sizeof(struct path));
for (struct path *p = res; p->mnt; p++)
path_get(p);
return res;
}
void drop_collected_paths(const struct path *paths, const struct path *prealloc)
{
for (const struct path *p = paths; p->mnt; p++)
path_put(p);
if (paths != prealloc)
kfree(paths);
}
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
void dissolve_on_fput(struct vfsmount *mnt)
{
struct mount *m = real_mount(mnt);
/*
* m used to be the root of anon namespace; if it still is one,
* we need to dissolve the mount tree and free that namespace.
* Let's try to avoid taking namespace_sem if we can determine
* that there's nothing to do without it - rcu_read_lock() is
* enough to make anon_ns_root() memory-safe and once m has
* left its namespace, it's no longer our concern, since it will
* never become a root of anon ns again.
*/
scoped_guard(rcu) {
if (!anon_ns_root(m))
return;
}
scoped_guard(namespace_excl) {
if (!anon_ns_root(m))
return;
emptied_ns = m->mnt_ns;
lock_mount_hash();
umount_tree(m, UMOUNT_CONNECTED);
unlock_mount_hash();
}
}
/* locks: namespace_shared && pinned(mnt) || mount_locked_reader */
static bool __has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
if (!is_subdir(child->mnt_mountpoint, dentry))
continue;
if (child->mnt.mnt_flags & MNT_LOCKED)
return true;
}
return false;
}
bool has_locked_children(struct mount *mnt, struct dentry *dentry)
{
guard(mount_locked_reader)();
return __has_locked_children(mnt, dentry);
}
/*
* Check that there aren't references to earlier/same mount namespaces in the
* specified subtree. Such references can act as pins for mount namespaces
* that aren't checked by the mount-cycle checking code, thereby allowing
* cycles to be made.
*
* locks: mount_locked_reader || namespace_shared && pinned(subtree)
*/
static bool check_for_nsfs_mounts(struct mount *subtree)
{
for (struct mount *p = subtree; p; p = next_mnt(p, subtree))
if (mnt_ns_loop(p->mnt.mnt_root))
return false;
return true;
}
/**
* clone_private_mount - create a private clone of a path
* @path: path to clone
*
* This creates a new vfsmount, which will be the clone of @path. The new mount
* will not be attached anywhere in the namespace and will be private (i.e.
* changes to the originating mount won't be propagated into this).
*
* This assumes caller has called or done the equivalent of may_mount().
*
* Release with mntput().
*/
struct vfsmount *clone_private_mount(const struct path *path)
{
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
guard(namespace_shared)();
if (IS_MNT_UNBINDABLE(old_mnt))
return ERR_PTR(-EINVAL);
/*
* Make sure the source mount is acceptable.
* Anything mounted in our mount namespace is allowed.
* Otherwise, it must be the root of an anonymous mount
* namespace, and we need to make sure no namespace
* loops get created.
*/
if (!check_mnt(old_mnt)) {
if (!anon_ns_root(old_mnt))
return ERR_PTR(-EINVAL);
if (!check_for_nsfs_mounts(old_mnt))
return ERR_PTR(-EINVAL);
}
if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
if (__has_locked_children(old_mnt, path->dentry))
return ERR_PTR(-EINVAL);
new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
if (IS_ERR(new_mnt))
return ERR_PTR(-EINVAL);
/* Longterm mount to be removed by kern_unmount*() */
new_mnt->mnt_ns = MNT_NS_INTERNAL;
return &new_mnt->mnt;
}
EXPORT_SYMBOL_GPL(clone_private_mount);
static void lock_mnt_tree(struct mount *mnt)
{
struct mount *p;
for (p = mnt; p; p = next_mnt(p, mnt)) {
int flags = p->mnt.mnt_flags;
/* Don't allow unprivileged users to change mount flags */
flags |= MNT_LOCK_ATIME;
if (flags & MNT_READONLY)
flags |= MNT_LOCK_READONLY;
if (flags & MNT_NODEV)
flags |= MNT_LOCK_NODEV;
if (flags & MNT_NOSUID)
flags |= MNT_LOCK_NOSUID;
if (flags & MNT_NOEXEC)
flags |= MNT_LOCK_NOEXEC;
/* Don't allow unprivileged users to reveal what is under a mount */
if (list_empty(&p->mnt_expire) && p != mnt)
flags |= MNT_LOCKED;
p->mnt.mnt_flags = flags;
}
}
static void cleanup_group_ids(struct mount *mnt, struct mount *end)
{
struct mount *p;
for (p = mnt; p != end; p = next_mnt(p, mnt)) {
if (p->mnt_group_id && !IS_MNT_SHARED(p))
mnt_release_group_id(p);
}
}
static int invent_group_ids(struct mount *mnt, bool recurse)
{
struct mount *p;
for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
if (!p->mnt_group_id) {
int err = mnt_alloc_group_id(p);
if (err) {
cleanup_group_ids(mnt, p);
return err;
}
}
}
return 0;
}
int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
{
unsigned int max = READ_ONCE(sysctl_mount_max);
unsigned int mounts = 0;
struct mount *p;
if (ns->nr_mounts >= max)
return -ENOSPC;
max -= ns->nr_mounts;
if (ns->pending_mounts >= max)
return -ENOSPC;
max -= ns->pending_mounts;
for (p = mnt; p; p = next_mnt(p, mnt))
mounts++;
if (mounts > max)
return -ENOSPC;
ns->pending_mounts += mounts;
return 0;
}
enum mnt_tree_flags_t {
MNT_TREE_BENEATH = BIT(0),
MNT_TREE_PROPAGATION = BIT(1),
};
/**
* attach_recursive_mnt - attach a source mount tree
* @source_mnt: mount tree to be attached
* @dest: the context for mounting at the place where the tree should go
*
* NOTE: in the table below explains the semantics when a source mount
* of a given type is attached to a destination mount of a given type.
* ---------------------------------------------------------------------------
* | BIND MOUNT OPERATION |
* |**************************************************************************
* | source-->| shared | private | slave | unbindable |
* | dest | | | | |
* | | | | | | |
* | v | | | | |
* |**************************************************************************
* | shared | shared (++) | shared (+) | shared(+++)| invalid |
* | | | | | |
* |non-shared| shared (+) | private | slave (*) | invalid |
* ***************************************************************************
* A bind operation clones the source mount and mounts the clone on the
* destination mount.
*
* (++) the cloned mount is propagated to all the mounts in the propagation
* tree of the destination mount and the cloned mount is added to
* the peer group of the source mount.
* (+) the cloned mount is created under the destination mount and is marked
* as shared. The cloned mount is added to the peer group of the source
* mount.
* (+++) the mount is propagated to all the mounts in the propagation tree
* of the destination mount and the cloned mount is made slave
* of the same master as that of the source mount. The cloned mount
* is marked as 'shared and slave'.
* (*) the cloned mount is made a slave of the same master as that of the
* source mount.
*
* ---------------------------------------------------------------------------
* | MOVE MOUNT OPERATION |
* |**************************************************************************
* | source-->| shared | private | slave | unbindable |
* | dest | | | | |
* | | | | | | |
* | v | | | | |
* |**************************************************************************
* | shared | shared (+) | shared (+) | shared(+++) | invalid |
* | | | | | |
* |non-shared| shared (+*) | private | slave (*) | unbindable |
* ***************************************************************************
*
* (+) the mount is moved to the destination. And is then propagated to
* all the mounts in the propagation tree of the destination mount.
* (+*) the mount is moved to the destination.
* (+++) the mount is moved to the destination and is then propagated to
* all the mounts belonging to the destination mount's propagation tree.
* the mount is marked as 'shared and slave'.
* (*) the mount continues to be a slave at the new location.
*
* if the source mount is a tree, the operations explained above is
* applied to each mount in the tree.
* Must be called without spinlocks held, since this function can sleep
* in allocations.
*
* Context: The function expects namespace_lock() to be held.
* Return: If @source_mnt was successfully attached 0 is returned.
* Otherwise a negative error code is returned.
*/
static int attach_recursive_mnt(struct mount *source_mnt,
const struct pinned_mountpoint *dest)
{
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
struct mount *dest_mnt = dest->parent;
struct mountpoint *dest_mp = dest->mp;
HLIST_HEAD(tree_list);
struct mnt_namespace *ns = dest_mnt->mnt_ns;
struct pinned_mountpoint root = {};
struct mountpoint *shorter = NULL;
struct mount *child, *p;
struct mount *top;
struct hlist_node *n;
int err = 0;
bool moving = mnt_has_parent(source_mnt);
/*
* Preallocate a mountpoint in case the new mounts need to be
* mounted beneath mounts on the same mountpoint.
*/
for (top = source_mnt; unlikely(top->overmount); top = top->overmount) {
if (!shorter && is_mnt_ns_file(top->mnt.mnt_root))
shorter = top->mnt_mp;
}
err = get_mountpoint(top->mnt.mnt_root, &root);
if (err)
return err;
/* Is there space to add these mounts to the mount namespace? */
if (!moving) {
err = count_mounts(ns, source_mnt);
if (err)
goto out;
}
if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true);
if (err)
goto out;
err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
}
lock_mount_hash();
if (err)
goto out_cleanup_ids;
if (IS_MNT_SHARED(dest_mnt)) {
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
}
if (moving) {
umount_mnt(source_mnt);
mnt_notify_add(source_mnt);
/* if the mount is moved, it should no longer be expired
* automatically */
list_del_init(&source_mnt->mnt_expire);
} else {
if (source_mnt->mnt_ns) {
/* move from anon - the caller will destroy */
emptied_ns = source_mnt->mnt_ns;
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
move_from_ns(p);
}
}
mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
/*
* Now the original copy is in the same state as the secondaries -
* its root attached to mountpoint, but not hashed and all mounts
* in it are either in our namespace or in no namespace at all.
* Add the original to the list of copies and deal with the
* rest of work for all of them uniformly.
*/
hlist_add_head(&source_mnt->mnt_hash, &tree_list);
hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
struct mount *q;
hlist_del_init(&child->mnt_hash);
/* Notice when we are propagating across user namespaces */
if (child->mnt_parent->mnt_ns->user_ns != user_ns)
lock_mnt_tree(child);
q = __lookup_mnt(&child->mnt_parent->mnt,
child->mnt_mountpoint);
commit_tree(child);
if (q) {
struct mount *r = topmost_overmount(child);
struct mountpoint *mp = root.mp;
if (unlikely(shorter) && child != source_mnt)
mp = shorter;
mnt_change_mountpoint(r, mp, q);
}
}
unpin_mountpoint(&root);
unlock_mount_hash();
return 0;
out_cleanup_ids:
while (!hlist_empty(&tree_list)) {
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
child->mnt_parent->mnt_ns->pending_mounts = 0;
umount_tree(child, UMOUNT_SYNC);
}
unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL);
out:
ns->pending_mounts = 0;
read_seqlock_excl(&mount_lock);
unpin_mountpoint(&root);
read_sequnlock_excl(&mount_lock);
return err;
}
static inline struct mount *where_to_mount(const struct path *path,
struct dentry **dentry,
bool beneath)
{
struct mount *m;
if (unlikely(beneath)) {
m = topmost_overmount(real_mount(path->mnt));
*dentry = m->mnt_mountpoint;
return m->mnt_parent;
}
m = __lookup_mnt(path->mnt, path->dentry);
if (unlikely(m)) {
m = topmost_overmount(m);
*dentry = m->mnt.mnt_root;
return m;
}
*dentry = path->dentry;
return real_mount(path->mnt);
}
/**
* do_lock_mount - acquire environment for mounting
* @path: target path
* @res: context to set up
* @beneath: whether the intention is to mount beneath @path
*
* To mount something at given location, we need
* namespace_sem locked exclusive
* inode of dentry we are mounting on locked exclusive
* struct mountpoint for that dentry
* struct mount we are mounting on
*
* Results are stored in caller-supplied context (pinned_mountpoint);
* on success we have res->parent and res->mp pointing to parent and
* mountpoint respectively and res->node inserted into the ->m_list
* of the mountpoint, making sure the mountpoint won't disappear.
* On failure we have res->parent set to ERR_PTR(-E...), res->mp
* left NULL, res->node - empty.
* In case of success do_lock_mount returns with locks acquired (in
* proper order - inode lock nests outside of namespace_sem).
*
* Request to mount on overmounted location is treated as "mount on
* top of whatever's overmounting it"; request to mount beneath
* a location - "mount immediately beneath the topmost mount at that
* place".
*
* In all cases the location must not have been unmounted and the
* chosen mountpoint must be allowed to be mounted on. For "beneath"
* case we also require the location to be at the root of a mount
* that has a parent (i.e. is not a root of some namespace).
*/
static void do_lock_mount(const struct path *path,
struct pinned_mountpoint *res,
bool beneath)
{
int err;
if (unlikely(beneath) && !path_mounted(path)) {
res->parent = ERR_PTR(-EINVAL);
return;
}
do {
struct dentry *dentry, *d;
struct mount *m, *n;
scoped_guard(mount_locked_reader) {
m = where_to_mount(path, &dentry, beneath);
if (&m->mnt != path->mnt) {
mntget(&m->mnt);
dget(dentry);
}
}
inode_lock(dentry->d_inode);
namespace_lock();
// check if the chain of mounts (if any) has changed.
scoped_guard(mount_locked_reader)
n = where_to_mount(path, &d, beneath);
if (unlikely(n != m || dentry != d))
err = -EAGAIN; // something moved, retry
else if (unlikely(cant_mount(dentry) || !is_mounted(path->mnt)))
err = -ENOENT; // not to be mounted on
else if (beneath && &m->mnt == path->mnt && !m->overmount)
err = -EINVAL;
else
err = get_mountpoint(dentry, res);
if (unlikely(err)) {
res->parent = ERR_PTR(err);
namespace_unlock();
inode_unlock(dentry->d_inode);
} else {
res->parent = m;
}
/*
* Drop the temporary references. This is subtle - on success
* we are doing that under namespace_sem, which would normally
* be forbidden. However, in that case we are guaranteed that
* refcounts won't reach zero, since we know that path->mnt
* is mounted and thus all mounts reachable from it are pinned
* and stable, along with their mountpoints and roots.
*/
if (&m->mnt != path->mnt) {
dput(dentry);
mntput(&m->mnt);
}
} while (err == -EAGAIN);
}
static void __unlock_mount(struct pinned_mountpoint *m)
{
inode_unlock(m->mp->m_dentry->d_inode);
read_seqlock_excl(&mount_lock);
unpin_mountpoint(m);
read_sequnlock_excl(&mount_lock);
namespace_unlock();
}
static inline void unlock_mount(struct pinned_mountpoint *m)
{
if (!IS_ERR(m->parent))
__unlock_mount(m);
}
#define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \
struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
do_lock_mount((path), &mp, (beneath))
#define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false)
#define LOCK_MOUNT_EXACT(mp, path) \
struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
lock_mount_exact((path), &mp)
static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp)
{
if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
return -EINVAL;
if (d_is_dir(mp->mp->m_dentry) !=
d_is_dir(mnt->mnt.mnt_root))
return -ENOTDIR;
return attach_recursive_mnt(mnt, mp);
}
static int may_change_propagation(const struct mount *m)
{
struct mnt_namespace *ns = m->mnt_ns;
// it must be mounted in some namespace
if (IS_ERR_OR_NULL(ns)) // is_mounted()
return -EINVAL;
// and the caller must be admin in userns of that namespace
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/*
* Sanity check the flags to change_mnt_propagation.
*/
static int flags_to_propagation_type(int ms_flags)
{
int type = ms_flags & ~(MS_REC | MS_SILENT);
/* Fail if any non-propagation flags are set */
if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
return 0;
/* Only one propagation flag should be set */
if (!is_power_of_2(type))
return 0;
return type;
}
/*
* recursively change the type of the mountpoint.
*/
static int do_change_type(const struct path *path, int ms_flags)
{
struct mount *m;
struct mount *mnt = real_mount(path->mnt);
int recurse = ms_flags & MS_REC;
int type;
int err;
if (!path_mounted(path))
return -EINVAL;
type = flags_to_propagation_type(ms_flags);
if (!type)
return -EINVAL;
guard(namespace_excl)();
err = may_change_propagation(mnt);
if (err)
return err;
if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
if (err)
return err;
}
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
change_mnt_propagation(m, type);
return 0;
}
/* may_copy_tree() - check if a mount tree can be copied
* @path: path to the mount tree to be copied
*
* This helper checks if the caller may copy the mount tree starting
* from @path->mnt. The caller may copy the mount tree under the
* following circumstances:
*
* (1) The caller is located in the mount namespace of the mount tree.
* This also implies that the mount does not belong to an anonymous
* mount namespace.
* (2) The caller tries to copy an nfs mount referring to a mount
* namespace, i.e., the caller is trying to copy a mount namespace
* entry from nsfs.
* (3) The caller tries to copy a pidfs mount referring to a pidfd.
* (4) The caller is trying to copy a mount tree that belongs to an
* anonymous mount namespace.
*
* For that to be safe, this helper enforces that the origin mount
* namespace the anonymous mount namespace was created from is the
* same as the caller's mount namespace by comparing the sequence
* numbers.
*
* This is not strictly necessary. The current semantics of the new
* mount api enforce that the caller must be located in the same
* mount namespace as the mount tree it interacts with. Using the
* origin sequence number preserves these semantics even for
* anonymous mount namespaces. However, one could envision extending
* the api to directly operate across mount namespace if needed.
*
* The ownership of a non-anonymous mount namespace such as the
* caller's cannot change.
* => We know that the caller's mount namespace is stable.
*
* If the origin sequence number of the anonymous mount namespace is
* the same as the sequence number of the caller's mount namespace.
* => The owning namespaces are the same.
*
* ==> The earlier capability check on the owning namespace of the
* caller's mount namespace ensures that the caller has the
* ability to copy the mount tree.
*
* Returns true if the mount tree can be copied, false otherwise.
*/
static inline bool may_copy_tree(const struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
const struct dentry_operations *d_op;
if (check_mnt(mnt))
return true;
d_op = path->dentry->d_op;
if (d_op == &ns_dentry_operations)
return true;
if (d_op == &pidfs_dentry_operations)
return true;
if (!is_mounted(path->mnt))
return false;
return check_anonymous_mnt(mnt);
}
static struct mount *__do_loopback(const struct path *old_path, int recurse)
{
struct mount *old = real_mount(old_path->mnt);
if (IS_MNT_UNBINDABLE(old))
return ERR_PTR(-EINVAL);
if (!may_copy_tree(old_path))
return ERR_PTR(-EINVAL);
if (!recurse && __has_locked_children(old, old_path->dentry))
return ERR_PTR(-EINVAL);
if (recurse)
return copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
else
return clone_mnt(old, old_path->dentry, 0);
}
/*
* do loopback mount.
*/
static int do_loopback(const struct path *path, const char *old_name,
int recurse)
{
struct path old_path __free(path_put) = {};
struct mount *mnt = NULL;
int err;
if (!old_name || !*old_name)
return -EINVAL;
err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
if (err)
return err;
if (mnt_ns_loop(old_path.dentry))
return -EINVAL;
LOCK_MOUNT(mp, path);
if (IS_ERR(mp.parent))
return PTR_ERR(mp.parent);
if (!check_mnt(mp.parent))
return -EINVAL;
mnt = __do_loopback(&old_path, recurse);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
err = graft_tree(mnt, &mp);
if (err) {
lock_mount_hash();
umount_tree(mnt, UMOUNT_SYNC);
unlock_mount_hash();
}
return err;
}
static struct mnt_namespace *get_detached_copy(const struct path *path, bool recursive)
{
struct mnt_namespace *ns, *mnt_ns = current->nsproxy->mnt_ns, *src_mnt_ns;
struct user_namespace *user_ns = mnt_ns->user_ns;
struct mount *mnt, *p;
ns = alloc_mnt_ns(user_ns, true);
if (IS_ERR(ns))
return ns;
guard(namespace_excl)();
/*
* Record the sequence number of the source mount namespace.
* This needs to hold namespace_sem to ensure that the mount
* doesn't get attached.
*/
if (is_mounted(path->mnt)) {
src_mnt_ns = real_mount(path->mnt)->mnt_ns;
if (is_anon_ns(src_mnt_ns))
ns->seq_origin = src_mnt_ns->seq_origin;
else
ns->seq_origin = src_mnt_ns->ns.ns_id;
}
mnt = __do_loopback(path, recursive);
if (IS_ERR(mnt)) {
emptied_ns = ns;
return ERR_CAST(mnt);
}
for (p = mnt; p; p = next_mnt(p, mnt)) {
mnt_add_to_ns(ns, p);
ns->nr_mounts++;
}
ns->root = mnt;
return ns;
}
static struct file *open_detached_copy(struct path *path, bool recursive)
{
struct mnt_namespace *ns = get_detached_copy(path, recursive);
struct file *file;
if (IS_ERR(ns))
return ERR_CAST(ns);
mntput(path->mnt);
path->mnt = mntget(&ns->root->mnt);
file = dentry_open(path, O_PATH, current_cred());
if (IS_ERR(file))
dissolve_on_fput(path->mnt);
else
file->f_mode |= FMODE_NEED_UNMOUNT;
return file;
}
static struct file *vfs_open_tree(int dfd, const char __user *filename, unsigned int flags)
{
int ret;
struct path path __free(path_put) = {};
int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
bool detached = flags & OPEN_TREE_CLONE;
BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
OPEN_TREE_CLOEXEC))
return ERR_PTR(-EINVAL);
if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
return ERR_PTR(-EINVAL);
if (flags & AT_NO_AUTOMOUNT)
lookup_flags &= ~LOOKUP_AUTOMOUNT;
if (flags & AT_SYMLINK_NOFOLLOW)
lookup_flags &= ~LOOKUP_FOLLOW;
if (flags & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
if (detached && !may_mount())
return ERR_PTR(-EPERM);
ret = user_path_at(dfd, filename, lookup_flags, &path);
if (unlikely(ret))
return ERR_PTR(ret);
if (detached)
return open_detached_copy(&path, flags & AT_RECURSIVE);
return dentry_open(&path, O_PATH, current_cred());
}
SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
{
int fd;
struct file *file __free(fput) = NULL;
file = vfs_open_tree(dfd, filename, flags);
if (IS_ERR(file))
return PTR_ERR(file);
fd = get_unused_fd_flags(flags & O_CLOEXEC);
if (fd < 0)
return fd;
fd_install(fd, no_free_ptr(file));
return fd;
}
/*
* Don't allow locked mount flags to be cleared.
*
* No locks need to be held here while testing the various MNT_LOCK
* flags because those flags can never be cleared once they are set.
*/
static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
{
unsigned int fl = mnt->mnt.mnt_flags;
if ((fl & MNT_LOCK_READONLY) &&
!(mnt_flags & MNT_READONLY))
return false;
if ((fl & MNT_LOCK_NODEV) &&
!(mnt_flags & MNT_NODEV))
return false;
if ((fl & MNT_LOCK_NOSUID) &&
!(mnt_flags & MNT_NOSUID))
return false;
if ((fl & MNT_LOCK_NOEXEC) &&
!(mnt_flags & MNT_NOEXEC))
return false;
if ((fl & MNT_LOCK_ATIME) &&
((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
return false;
return true;
}
static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
{
bool readonly_request = (mnt_flags & MNT_READONLY);
if (readonly_request == __mnt_is_readonly(&mnt->mnt))
return 0;
if (readonly_request)
return mnt_make_readonly(mnt);
mnt->mnt.mnt_flags &= ~MNT_READONLY;
return 0;
}
static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
{
mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
mnt->mnt.mnt_flags = mnt_flags;
touch_mnt_namespace(mnt->mnt_ns);
}
static void mnt_warn_timestamp_expiry(const struct path *mountpoint,
struct vfsmount *mnt)
{
struct super_block *sb = mnt->mnt_sb;
if (!__mnt_is_readonly(mnt) &&
(!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
(ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
char *buf, *mntpath;
buf = (char *)__get_free_page(GFP_KERNEL);
if (buf)
mntpath = d_path(mountpoint, buf, PAGE_SIZE);
else
mntpath = ERR_PTR(-ENOMEM);
if (IS_ERR(mntpath))
mntpath = "(unknown)";
pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
sb->s_type->name,
is_mounted(mnt) ? "remounted" : "mounted",
mntpath, &sb->s_time_max,
(unsigned long long)sb->s_time_max);
sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
if (buf)
free_page((unsigned long)buf);
}
}
/*
* Handle reconfiguration of the mountpoint only without alteration of the
* superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
* to mount(2).
*/
static int do_reconfigure_mnt(const struct path *path, unsigned int mnt_flags)
{
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
int ret;
if (!check_mnt(mnt))
return -EINVAL;
if (!path_mounted(path))
return -EINVAL;
if (!can_change_locked_flags(mnt, mnt_flags))
return -EPERM;
/*
* We're only checking whether the superblock is read-only not
* changing it, so only take down_read(&sb->s_umount).
*/
down_read(&sb->s_umount);
lock_mount_hash();
ret = change_mount_ro_state(mnt, mnt_flags);
if (ret == 0)
set_mount_attributes(mnt, mnt_flags);
unlock_mount_hash();
up_read(&sb->s_umount);
mnt_warn_timestamp_expiry(path, &mnt->mnt);
return ret;
}
/*
* change filesystem flags. dir should be a physical root of filesystem.
* If you've mounted a non-root directory somewhere and want to do remount
* on it - tough luck.
*/
static int do_remount(const struct path *path, int sb_flags,
int mnt_flags, void *data)
{
int err;
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
struct fs_context *fc;
if (!check_mnt(mnt))
return -EINVAL;
if (!path_mounted(path))
return -EINVAL;
if (!can_change_locked_flags(mnt, mnt_flags))
return -EPERM;
fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
if (IS_ERR(fc))
return PTR_ERR(fc);
/*
* Indicate to the filesystem that the remount request is coming
* from the legacy mount system call.
*/
fc->oldapi = true;
err = parse_monolithic_mount_data(fc, data);
if (!err) {
down_write(&sb->s_umount);
err = -EPERM;
if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
err = reconfigure_super(fc);
if (!err) {
lock_mount_hash();
set_mount_attributes(mnt, mnt_flags);
unlock_mount_hash();
}
}
up_write(&sb->s_umount);
}
mnt_warn_timestamp_expiry(path, &mnt->mnt);
put_fs_context(fc);
return err;
}
static inline int tree_contains_unbindable(struct mount *mnt)
{
struct mount *p;
for (p = mnt; p; p = next_mnt(p, mnt)) {
if (IS_MNT_UNBINDABLE(p))
return 1;
}
return 0;
}
static int do_set_group(const struct path *from_path, const struct path *to_path)
{
struct mount *from = real_mount(from_path->mnt);
struct mount *to = real_mount(to_path->mnt);
int err;
guard(namespace_excl)();
err = may_change_propagation(from);
if (err)
return err;
err = may_change_propagation(to);
if (err)
return err;
/* To and From paths should be mount roots */
if (!path_mounted(from_path))
return -EINVAL;
if (!path_mounted(to_path))
return -EINVAL;
/* Setting sharing groups is only allowed across same superblock */
if (from->mnt.mnt_sb != to->mnt.mnt_sb)
return -EINVAL;
/* From mount root should be wider than To mount root */
if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
return -EINVAL;
/* From mount should not have locked children in place of To's root */
if (__has_locked_children(from, to->mnt.mnt_root))
return -EINVAL;
/* Setting sharing groups is only allowed on private mounts */
if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
return -EINVAL;
/* From should not be private */
if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
return -EINVAL;
if (IS_MNT_SLAVE(from)) {
hlist_add_behind(&to->mnt_slave, &from->mnt_slave);
to->mnt_master = from->mnt_master;
}
if (IS_MNT_SHARED(from)) {
to->mnt_group_id = from->mnt_group_id;
list_add(&to->mnt_share, &from->mnt_share);
set_mnt_shared(to);
}
return 0;
}
/**
* path_overmounted - check if path is overmounted
* @path: path to check
*
* Check if path is overmounted, i.e., if there's a mount on top of
* @path->mnt with @path->dentry as mountpoint.
*
* Context: namespace_sem must be held at least shared.
* MUST NOT be called under lock_mount_hash() (there one should just
* call __lookup_mnt() and check if it returns NULL).
* Return: If path is overmounted true is returned, false if not.
*/
static inline bool path_overmounted(const struct path *path)
{
unsigned seq = read_seqbegin(&mount_lock);
bool no_child;
rcu_read_lock();
no_child = !__lookup_mnt(path->mnt, path->dentry);
rcu_read_unlock();
if (need_seqretry(&mount_lock, seq)) {
read_seqlock_excl(&mount_lock);
no_child = !__lookup_mnt(path->mnt, path->dentry);
read_sequnlock_excl(&mount_lock);
}
return unlikely(!no_child);
}
/*
* Check if there is a possibly empty chain of descent from p1 to p2.
* Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl).
*/
static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2)
{
while (p2 != p1 && mnt_has_parent(p2))
p2 = p2->mnt_parent;
return p2 == p1;
}
/**
* can_move_mount_beneath - check that we can mount beneath the top mount
* @mnt_from: mount we are trying to move
* @mnt_to: mount under which to mount
* @mp: mountpoint of @mnt_to
*
* - Make sure that nothing can be mounted beneath the caller's current
* root or the rootfs of the namespace.
* - Make sure that the caller can unmount the topmost mount ensuring
* that the caller could reveal the underlying mountpoint.
* - Ensure that nothing has been mounted on top of @mnt_from before we
* grabbed @namespace_sem to avoid creating pointless shadow mounts.
* - Prevent mounting beneath a mount if the propagation relationship
* between the source mount, parent mount, and top mount would lead to
* nonsensical mount trees.
*
* Context: This function expects namespace_lock() to be held.
* Return: On success 0, and on error a negative error code is returned.
*/
static int can_move_mount_beneath(const struct mount *mnt_from,
const struct mount *mnt_to,
const struct mountpoint *mp)
{
struct mount *parent_mnt_to = mnt_to->mnt_parent;
if (IS_MNT_LOCKED(mnt_to))
return -EINVAL;
/* Avoid creating shadow mounts during mount propagation. */
if (mnt_from->overmount)
return -EINVAL;
/*
* Mounting beneath the rootfs only makes sense when the
* semantics of pivot_root(".", ".") are used.
*/
if (&mnt_to->mnt == current->fs->root.mnt)
return -EINVAL;
if (parent_mnt_to == current->nsproxy->mnt_ns->root)
return -EINVAL;
if (mount_is_ancestor(mnt_to, mnt_from))
return -EINVAL;
/*
* If the parent mount propagates to the child mount this would
* mean mounting @mnt_from on @mnt_to->mnt_parent and then
* propagating a copy @c of @mnt_from on top of @mnt_to. This
* defeats the whole purpose of mounting beneath another mount.
*/
if (propagation_would_overmount(parent_mnt_to, mnt_to, mp))
return -EINVAL;
/*
* If @mnt_to->mnt_parent propagates to @mnt_from this would
* mean propagating a copy @c of @mnt_from on top of @mnt_from.
* Afterwards @mnt_from would be mounted on top of
* @mnt_to->mnt_parent and @mnt_to would be unmounted from
* @mnt->mnt_parent and remounted on @mnt_from. But since @c is
* already mounted on @mnt_from, @mnt_to would ultimately be
* remounted on top of @c. Afterwards, @mnt_from would be
* covered by a copy @c of @mnt_from and @c would be covered by
* @mnt_from itself. This defeats the whole purpose of mounting
* @mnt_from beneath @mnt_to.
*/
if (check_mnt(mnt_from) &&
propagation_would_overmount(parent_mnt_to, mnt_from, mp))
return -EINVAL;
return 0;
}
/* may_use_mount() - check if a mount tree can be used
* @mnt: vfsmount to be used
*
* This helper checks if the caller may use the mount tree starting
* from @path->mnt. The caller may use the mount tree under the
* following circumstances:
*
* (1) The caller is located in the mount namespace of the mount tree.
* This also implies that the mount does not belong to an anonymous
* mount namespace.
* (2) The caller is trying to use a mount tree that belongs to an
* anonymous mount namespace.
*
* For that to be safe, this helper enforces that the origin mount
* namespace the anonymous mount namespace was created from is the
* same as the caller's mount namespace by comparing the sequence
* numbers.
*
* The ownership of a non-anonymous mount namespace such as the
* caller's cannot change.
* => We know that the caller's mount namespace is stable.
*
* If the origin sequence number of the anonymous mount namespace is
* the same as the sequence number of the caller's mount namespace.
* => The owning namespaces are the same.
*
* ==> The earlier capability check on the owning namespace of the
* caller's mount namespace ensures that the caller has the
* ability to use the mount tree.
*
* Returns true if the mount tree can be used, false otherwise.
*/
static inline bool may_use_mount(struct mount *mnt)
{
if (check_mnt(mnt))
return true;
/*
* Make sure that noone unmounted the target path or somehow
* managed to get their hands on something purely kernel
* internal.
*/
if (!is_mounted(&mnt->mnt))
return false;
return check_anonymous_mnt(mnt);
}
static int do_move_mount(const struct path *old_path,
const struct path *new_path,
enum mnt_tree_flags_t flags)
{
struct mount *old = real_mount(old_path->mnt);
int err;
bool beneath = flags & MNT_TREE_BENEATH;
if (!path_mounted(old_path))
return -EINVAL;
if (d_is_dir(new_path->dentry) != d_is_dir(old_path->dentry))
return -EINVAL;
LOCK_MOUNT_MAYBE_BENEATH(mp, new_path, beneath);
if (IS_ERR(mp.parent))
return PTR_ERR(mp.parent);
if (check_mnt(old)) {
/* if the source is in our namespace... */
/* ... it should be detachable from parent */
if (!mnt_has_parent(old) || IS_MNT_LOCKED(old))
return -EINVAL;
/* ... which should not be shared */
if (IS_MNT_SHARED(old->mnt_parent))
return -EINVAL;
/* ... and the target should be in our namespace */
if (!check_mnt(mp.parent))
return -EINVAL;
} else {
/*
* otherwise the source must be the root of some anon namespace.
*/
if (!anon_ns_root(old))
return -EINVAL;
/*
* Bail out early if the target is within the same namespace -
* subsequent checks would've rejected that, but they lose
* some corner cases if we check it early.
*/
if (old->mnt_ns == mp.parent->mnt_ns)
return -EINVAL;
/*
* Target should be either in our namespace or in an acceptable
* anon namespace, sensu check_anonymous_mnt().
*/
if (!may_use_mount(mp.parent))
return -EINVAL;
}
if (beneath) {
struct mount *over = real_mount(new_path->mnt);
if (mp.parent != over->mnt_parent)
over = mp.parent->overmount;
err = can_move_mount_beneath(old, over, mp.mp);
if (err)
return err;
}
/*
* Don't move a mount tree containing unbindable mounts to a destination
* mount which is shared.
*/
if (IS_MNT_SHARED(mp.parent) && tree_contains_unbindable(old))
return -EINVAL;
if (!check_for_nsfs_mounts(old))
return -ELOOP;
if (mount_is_ancestor(old, mp.parent))
return -ELOOP;
return attach_recursive_mnt(old, &mp);
}
static int do_move_mount_old(const struct path *path, const char *old_name)
{
struct path old_path __free(path_put) = {};
int err;
if (!old_name || !*old_name)
return -EINVAL;
err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
if (err)
return err;
return do_move_mount(&old_path, path, 0);
}
/*
* add a mount into a namespace's mount tree
*/
static int do_add_mount(struct mount *newmnt, const struct pinned_mountpoint *mp,
int mnt_flags)
{
struct mount *parent = mp->parent;
if (IS_ERR(parent))
return PTR_ERR(parent);
mnt_flags &= ~MNT_INTERNAL_FLAGS;
if (unlikely(!check_mnt(parent))) {
/* that's acceptable only for automounts done in private ns */
if (!(mnt_flags & MNT_SHRINKABLE))
return -EINVAL;
/* ... and for those we'd better have mountpoint still alive */
if (!parent->mnt_ns)
return -EINVAL;
}
/* Refuse the same filesystem on the same mount point */
if (parent->mnt.mnt_sb == newmnt->mnt.mnt_sb &&
parent->mnt.mnt_root == mp->mp->m_dentry)
return -EBUSY;
if (d_is_symlink(newmnt->mnt.mnt_root))
return -EINVAL;
newmnt->mnt.mnt_flags = mnt_flags;
return graft_tree(newmnt, mp);
}
static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
/*
* Create a new mount using a superblock configuration and request it
* be added to the namespace tree.
*/
static int do_new_mount_fc(struct fs_context *fc, const struct path *mountpoint,
unsigned int mnt_flags)
{
struct super_block *sb;
struct vfsmount *mnt __free(mntput) = fc_mount(fc);
int error;
if (IS_ERR(mnt))
return PTR_ERR(mnt);
sb = fc->root->d_sb;
error = security_sb_kern_mount(sb);
if (unlikely(error))
return error;
if (unlikely(mount_too_revealing(sb, &mnt_flags))) {
errorfcp(fc, "VFS", "Mount too revealing");
return -EPERM;
}
mnt_warn_timestamp_expiry(mountpoint, mnt);
LOCK_MOUNT(mp, mountpoint);
error = do_add_mount(real_mount(mnt), &mp, mnt_flags);
if (!error)
retain_and_null_ptr(mnt); // consumed on success
return error;
}
/*
* create a new mount for userspace and request it to be added into the
* namespace's tree
*/
static int do_new_mount(const struct path *path, const char *fstype,
int sb_flags, int mnt_flags,
const char *name, void *data)
{
struct file_system_type *type;
struct fs_context *fc;
const char *subtype = NULL;
int err = 0;
if (!fstype)
return -EINVAL;
type = get_fs_type(fstype);
if (!type)
return -ENODEV;
if (type->fs_flags & FS_HAS_SUBTYPE) {
subtype = strchr(fstype, '.');
if (subtype) {
subtype++;
if (!*subtype) {
put_filesystem(type);
return -EINVAL;
}
}
}
fc = fs_context_for_mount(type, sb_flags);
put_filesystem(type);
if (IS_ERR(fc))
return PTR_ERR(fc);
/*
* Indicate to the filesystem that the mount request is coming
* from the legacy mount system call.
*/
fc->oldapi = true;
if (subtype)
err = vfs_parse_fs_string(fc, "subtype", subtype);
if (!err && name)
err = vfs_parse_fs_string(fc, "source", name);
if (!err)
err = parse_monolithic_mount_data(fc, data);
if (!err && !mount_capable(fc))
err = -EPERM;
if (!err)
err = do_new_mount_fc(fc, path, mnt_flags);
put_fs_context(fc);
return err;
}
static void lock_mount_exact(const struct path *path,
struct pinned_mountpoint *mp)
{
struct dentry *dentry = path->dentry;
int err;
inode_lock(dentry->d_inode);
namespace_lock();
if (unlikely(cant_mount(dentry)))
err = -ENOENT;
else if (path_overmounted(path))
err = -EBUSY;
else
err = get_mountpoint(dentry, mp);
if (unlikely(err)) {
namespace_unlock();
inode_unlock(dentry->d_inode);
mp->parent = ERR_PTR(err);
} else {
mp->parent = real_mount(path->mnt);
}
}
int finish_automount(struct vfsmount *__m, const struct path *path)
{
struct vfsmount *m __free(mntput) = __m;
struct mount *mnt;
int err;
if (!m)
return 0;
if (IS_ERR(m))
return PTR_ERR(m);
mnt = real_mount(m);
if (m->mnt_root == path->dentry)
return -ELOOP;
/*
* we don't want to use LOCK_MOUNT() - in this case finding something
* that overmounts our mountpoint to be means "quitely drop what we've
* got", not "try to mount it on top".
*/
LOCK_MOUNT_EXACT(mp, path);
if (mp.parent == ERR_PTR(-EBUSY))
return 0;
err = do_add_mount(mnt, &mp, path->mnt->mnt_flags | MNT_SHRINKABLE);
if (likely(!err))
retain_and_null_ptr(m);
return err;
}
/**
* mnt_set_expiry - Put a mount on an expiration list
* @mnt: The mount to list.
* @expiry_list: The list to add the mount to.
*/
void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
{
guard(mount_locked_reader)();
list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
}
EXPORT_SYMBOL(mnt_set_expiry);
/*
* process a list of expirable mountpoints with the intent of discarding any
* mountpoints that aren't in use and haven't been touched since last we came
* here
*/
void mark_mounts_for_expiry(struct list_head *mounts)
{
struct mount *mnt, *next;
LIST_HEAD(graveyard);
if (list_empty(mounts))
return;
guard(namespace_excl)();
guard(mount_writer)();
/* extract from the expiration list every vfsmount that matches the
* following criteria:
* - already mounted
* - only referenced by its parent vfsmount
* - still marked for expiry (marked on the last call here; marks are
* cleared by mntput())
*/
list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
if (!is_mounted(&mnt->mnt))
continue;
if (!xchg(&mnt->mnt_expiry_mark, 1) ||
propagate_mount_busy(mnt, 1))
continue;
list_move(&mnt->mnt_expire, &graveyard);
}
while (!list_empty(&graveyard)) {
mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
touch_mnt_namespace(mnt->mnt_ns);
umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
}
EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
/*
* Ripoff of 'select_parent()'
*
* search the list of submounts for a given mountpoint, and move any
* shrinkable submounts to the 'graveyard' list.
*/
static int select_submounts(struct mount *parent, struct list_head *graveyard)
{
struct mount *this_parent = parent;
struct list_head *next;
int found = 0;
repeat:
next = this_parent->mnt_mounts.next;
resume:
while (next != &this_parent->mnt_mounts) {
struct list_head *tmp = next;
struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
next = tmp->next;
if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
continue;
/*
* Descend a level if the d_mounts list is non-empty.
*/
if (!list_empty(&mnt->mnt_mounts)) {
this_parent = mnt;
goto repeat;
}
if (!propagate_mount_busy(mnt, 1)) {
list_move_tail(&mnt->mnt_expire, graveyard);
found++;
}
}
/*
* All done at this level ... ascend and resume the search
*/
if (this_parent != parent) {
next = this_parent->mnt_child.next;
this_parent = this_parent->mnt_parent;
goto resume;
}
return found;
}
/*
* process a list of expirable mountpoints with the intent of discarding any
* submounts of a specific parent mountpoint
*
* mount_lock must be held for write
*/
static void shrink_submounts(struct mount *mnt)
{
LIST_HEAD(graveyard);
struct mount *m;
/* extract submounts of 'mountpoint' from the expiration list */
while (select_submounts(mnt, &graveyard)) {
while (!list_empty(&graveyard)) {
m = list_first_entry(&graveyard, struct mount,
mnt_expire);
touch_mnt_namespace(m->mnt_ns);
umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
}
}
static void *copy_mount_options(const void __user * data)
{
char *copy;
unsigned left, offset;
if (!data)
return NULL;
copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!copy)
return ERR_PTR(-ENOMEM);
left = copy_from_user(copy, data, PAGE_SIZE);
/*
* Not all architectures have an exact copy_from_user(). Resort to
* byte at a time.
*/
offset = PAGE_SIZE - left;
while (left) {
char c;
if (get_user(c, (const char __user *)data + offset))
break;
copy[offset] = c;
left--;
offset++;
}
if (left == PAGE_SIZE) {
kfree(copy);
return ERR_PTR(-EFAULT);
}
return copy;
}
static char *copy_mount_string(const void __user *data)
{
return data ? strndup_user(data, PATH_MAX) : NULL;
}
/*
* Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
* be given to the mount() call (ie: read-only, no-dev, no-suid etc).
*
* data is a (void *) that can point to any structure up to
* PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
* information (or be NULL).
*
* Pre-0.97 versions of mount() didn't have a flags word.
* When the flags word was introduced its top half was required
* to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
int path_mount(const char *dev_name, const struct path *path,
const char *type_page, unsigned long flags, void *data_page)
{
unsigned int mnt_flags = 0, sb_flags;
int ret;
/* Discard magic */
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
flags &= ~MS_MGC_MSK;
/* Basic sanity checks */
if (data_page)
((char *)data_page)[PAGE_SIZE - 1] = 0;
if (flags & MS_NOUSER)
return -EINVAL;
ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
if (ret)
return ret;
if (!may_mount())
return -EPERM;
if (flags & SB_MANDLOCK)
warn_mandlock();
/* Default to relatime unless overriden */
if (!(flags & MS_NOATIME))
mnt_flags |= MNT_RELATIME;
/* Separate the per-mountpoint flags */
if (flags & MS_NOSUID)
mnt_flags |= MNT_NOSUID;
if (flags & MS_NODEV)
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
if (flags & MS_NOATIME)
mnt_flags |= MNT_NOATIME;
if (flags & MS_NODIRATIME)
mnt_flags |= MNT_NODIRATIME;
if (flags & MS_STRICTATIME)
mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
if (flags & MS_RDONLY)
mnt_flags |= MNT_READONLY;
if (flags & MS_NOSYMFOLLOW)
mnt_flags |= MNT_NOSYMFOLLOW;
/* The default atime for remount is preservation */
if ((flags & MS_REMOUNT) &&
((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
MS_STRICTATIME)) == 0)) {
mnt_flags &= ~MNT_ATIME_MASK;
mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
}
sb_flags = flags & (SB_RDONLY |
SB_SYNCHRONOUS |
SB_MANDLOCK |
SB_DIRSYNC |
SB_SILENT |
SB_POSIXACL |
SB_LAZYTIME |
SB_I_VERSION);
if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
return do_reconfigure_mnt(path, mnt_flags);
if (flags & MS_REMOUNT)
return do_remount(path, sb_flags, mnt_flags, data_page);
if (flags & MS_BIND)
return do_loopback(path, dev_name, flags & MS_REC);
if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
return do_change_type(path, flags);
if (flags & MS_MOVE)
return do_move_mount_old(path, dev_name);
return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
data_page);
}
int do_mount(const char *dev_name, const char __user *dir_name,
const char *type_page, unsigned long flags, void *data_page)
{
struct path path __free(path_put) = {};
int ret;
ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
if (ret)
return ret;
return path_mount(dev_name, &path, type_page, flags, data_page);
}
static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
{
return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
}
static void dec_mnt_namespaces(struct ucounts *ucounts)
{
dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
}
static void free_mnt_ns(struct mnt_namespace *ns)
{
if (!is_anon_ns(ns))
ns_common_free(ns); dec_mnt_namespaces(ns->ucounts); mnt_ns_tree_remove(ns);
}
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
{
struct mnt_namespace *new_ns;
struct ucounts *ucounts;
int ret;
ucounts = inc_mnt_namespaces(user_ns); if (!ucounts) return ERR_PTR(-ENOSPC);
new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
if (!new_ns) {
dec_mnt_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
}
if (anon)
ret = ns_common_init_inum(new_ns, MNT_NS_ANON_INO);
else
ret = ns_common_init(new_ns);
if (ret) {
kfree(new_ns);
dec_mnt_namespaces(ucounts);
return ERR_PTR(ret);
}
if (!anon)
ns_tree_gen_id(&new_ns->ns);
refcount_set(&new_ns->passive, 1);
new_ns->mounts = RB_ROOT;
init_waitqueue_head(&new_ns->poll);
new_ns->user_ns = get_user_ns(user_ns); new_ns->ucounts = ucounts; return new_ns;}
__latent_entropy
struct mnt_namespace *copy_mnt_ns(u64 flags, struct mnt_namespace *ns,
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
struct vfsmount *rootmnt __free(mntput) = NULL; struct vfsmount *pwdmnt __free(mntput) = NULL;
struct mount *p, *q;
struct mount *old;
struct mount *new;
int copy_flags;
BUG_ON(!ns); if (likely(!(flags & CLONE_NEWNS))) { get_mnt_ns(ns); return ns;
}
old = ns->root;
new_ns = alloc_mnt_ns(user_ns, false);
if (IS_ERR(new_ns))
return new_ns;
guard(namespace_excl)();
/* First pass: copy the tree topology */
copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
if (user_ns != ns->user_ns)
copy_flags |= CL_SLAVE; new = copy_tree(old, old->mnt.mnt_root, copy_flags);
if (IS_ERR(new)) {
emptied_ns = new_ns;
return ERR_CAST(new);
}
if (user_ns != ns->user_ns) {
guard(mount_writer)();
lock_mnt_tree(new);
}
new_ns->root = new;
/*
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
* as belonging to new namespace. We have already acquired a private
* fs_struct, so tsk->fs->lock is not needed.
*/
p = old;
q = new;
while (p) { mnt_add_to_ns(new_ns, q);
new_ns->nr_mounts++;
if (new_fs) {
if (&p->mnt == new_fs->root.mnt) {
new_fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt;
}
if (&p->mnt == new_fs->pwd.mnt) { new_fs->pwd.mnt = mntget(&q->mnt);
pwdmnt = &p->mnt;
}
}
p = next_mnt(p, old); q = next_mnt(q, new); if (!q)
break;
// an mntns binding we'd skipped?
while (p->mnt.mnt_root != q->mnt.mnt_root) p = next_mnt(skip_mnt_tree(p), old);
}
ns_tree_add_raw(new_ns);
return new_ns;
}
struct dentry *mount_subtree(struct vfsmount *m, const char *name)
{
struct mount *mnt = real_mount(m);
struct mnt_namespace *ns;
struct super_block *s;
struct path path;
int err;
ns = alloc_mnt_ns(&init_user_ns, true);
if (IS_ERR(ns)) {
mntput(m);
return ERR_CAST(ns);
}
ns->root = mnt;
ns->nr_mounts++;
mnt_add_to_ns(ns, mnt);
err = vfs_path_lookup(m->mnt_root, m,
name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
put_mnt_ns(ns);
if (err)
return ERR_PTR(err);
/* trade a vfsmount reference for active sb one */
s = path.mnt->mnt_sb;
atomic_inc(&s->s_active);
mntput(path.mnt);
/* lock the sucker */
down_write(&s->s_umount);
/* ... and return the root of (sub)tree on it */
return path.dentry;
}
EXPORT_SYMBOL(mount_subtree);
SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
char __user *, type, unsigned long, flags, void __user *, data)
{
int ret;
char *kernel_type;
char *kernel_dev;
void *options;
kernel_type = copy_mount_string(type);
ret = PTR_ERR(kernel_type);
if (IS_ERR(kernel_type))
goto out_type;
kernel_dev = copy_mount_string(dev_name);
ret = PTR_ERR(kernel_dev);
if (IS_ERR(kernel_dev))
goto out_dev;
options = copy_mount_options(data);
ret = PTR_ERR(options);
if (IS_ERR(options))
goto out_data;
ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
kfree(options);
out_data:
kfree(kernel_dev);
out_dev:
kfree(kernel_type);
out_type:
return ret;
}
#define FSMOUNT_VALID_FLAGS \
(MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
MOUNT_ATTR_NOSYMFOLLOW)
#define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
#define MOUNT_SETATTR_PROPAGATION_FLAGS \
(MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
{
unsigned int mnt_flags = 0;
if (attr_flags & MOUNT_ATTR_RDONLY)
mnt_flags |= MNT_READONLY;
if (attr_flags & MOUNT_ATTR_NOSUID)
mnt_flags |= MNT_NOSUID;
if (attr_flags & MOUNT_ATTR_NODEV)
mnt_flags |= MNT_NODEV;
if (attr_flags & MOUNT_ATTR_NOEXEC)
mnt_flags |= MNT_NOEXEC;
if (attr_flags & MOUNT_ATTR_NODIRATIME)
mnt_flags |= MNT_NODIRATIME;
if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
mnt_flags |= MNT_NOSYMFOLLOW;
return mnt_flags;
}
/*
* Create a kernel mount representation for a new, prepared superblock
* (specified by fs_fd) and attach to an open_tree-like file descriptor.
*/
SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
unsigned int, attr_flags)
{
struct mnt_namespace *ns;
struct fs_context *fc;
struct file *file;
struct path newmount;
struct mount *mnt;
unsigned int mnt_flags = 0;
long ret;
if (!may_mount())
return -EPERM;
if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
return -EINVAL;
if (attr_flags & ~FSMOUNT_VALID_FLAGS)
return -EINVAL;
mnt_flags = attr_flags_to_mnt_flags(attr_flags);
switch (attr_flags & MOUNT_ATTR__ATIME) {
case MOUNT_ATTR_STRICTATIME:
break;
case MOUNT_ATTR_NOATIME:
mnt_flags |= MNT_NOATIME;
break;
case MOUNT_ATTR_RELATIME:
mnt_flags |= MNT_RELATIME;
break;
default:
return -EINVAL;
}
CLASS(fd, f)(fs_fd);
if (fd_empty(f))
return -EBADF;
if (fd_file(f)->f_op != &fscontext_fops)
return -EINVAL;
fc = fd_file(f)->private_data;
ret = mutex_lock_interruptible(&fc->uapi_mutex);
if (ret < 0)
return ret;
/* There must be a valid superblock or we can't mount it */
ret = -EINVAL;
if (!fc->root)
goto err_unlock;
ret = -EPERM;
if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
errorfcp(fc, "VFS", "Mount too revealing");
goto err_unlock;
}
ret = -EBUSY;
if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
goto err_unlock;
if (fc->sb_flags & SB_MANDLOCK)
warn_mandlock();
newmount.mnt = vfs_create_mount(fc);
if (IS_ERR(newmount.mnt)) {
ret = PTR_ERR(newmount.mnt);
goto err_unlock;
}
newmount.dentry = dget(fc->root);
newmount.mnt->mnt_flags = mnt_flags;
/* We've done the mount bit - now move the file context into more or
* less the same state as if we'd done an fspick(). We don't want to
* do any memory allocation or anything like that at this point as we
* don't want to have to handle any errors incurred.
*/
vfs_clean_context(fc);
ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
if (IS_ERR(ns)) {
ret = PTR_ERR(ns);
goto err_path;
}
mnt = real_mount(newmount.mnt);
ns->root = mnt;
ns->nr_mounts = 1;
mnt_add_to_ns(ns, mnt);
mntget(newmount.mnt);
/* Attach to an apparent O_PATH fd with a note that we need to unmount
* it, not just simply put it.
*/
file = dentry_open(&newmount, O_PATH, fc->cred);
if (IS_ERR(file)) {
dissolve_on_fput(newmount.mnt);
ret = PTR_ERR(file);
goto err_path;
}
file->f_mode |= FMODE_NEED_UNMOUNT;
ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
if (ret >= 0)
fd_install(ret, file);
else
fput(file);
err_path:
path_put(&newmount);
err_unlock:
mutex_unlock(&fc->uapi_mutex);
return ret;
}
static inline int vfs_move_mount(const struct path *from_path,
const struct path *to_path,
enum mnt_tree_flags_t mflags)
{
int ret;
ret = security_move_mount(from_path, to_path);
if (ret)
return ret;
if (mflags & MNT_TREE_PROPAGATION)
return do_set_group(from_path, to_path);
return do_move_mount(from_path, to_path, mflags);
}
/*
* Move a mount from one place to another. In combination with
* fsopen()/fsmount() this is used to install a new mount and in combination
* with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
* a mount subtree.
*
* Note the flags value is a combination of MOVE_MOUNT_* flags.
*/
SYSCALL_DEFINE5(move_mount,
int, from_dfd, const char __user *, from_pathname,
int, to_dfd, const char __user *, to_pathname,
unsigned int, flags)
{
struct path to_path __free(path_put) = {};
struct path from_path __free(path_put) = {};
struct filename *to_name __free(putname) = NULL;
struct filename *from_name __free(putname) = NULL;
unsigned int lflags, uflags;
enum mnt_tree_flags_t mflags = 0;
int ret = 0;
if (!may_mount())
return -EPERM;
if (flags & ~MOVE_MOUNT__MASK)
return -EINVAL;
if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) ==
(MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
return -EINVAL;
if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION;
if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH;
uflags = 0;
if (flags & MOVE_MOUNT_T_EMPTY_PATH)
uflags = AT_EMPTY_PATH;
to_name = getname_maybe_null(to_pathname, uflags);
if (IS_ERR(to_name))
return PTR_ERR(to_name);
if (!to_name && to_dfd >= 0) {
CLASS(fd_raw, f_to)(to_dfd);
if (fd_empty(f_to))
return -EBADF;
to_path = fd_file(f_to)->f_path;
path_get(&to_path);
} else {
lflags = 0;
if (flags & MOVE_MOUNT_T_SYMLINKS)
lflags |= LOOKUP_FOLLOW;
if (flags & MOVE_MOUNT_T_AUTOMOUNTS)
lflags |= LOOKUP_AUTOMOUNT;
ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL);
if (ret)
return ret;
}
uflags = 0;
if (flags & MOVE_MOUNT_F_EMPTY_PATH)
uflags = AT_EMPTY_PATH;
from_name = getname_maybe_null(from_pathname, uflags);
if (IS_ERR(from_name))
return PTR_ERR(from_name);
if (!from_name && from_dfd >= 0) {
CLASS(fd_raw, f_from)(from_dfd);
if (fd_empty(f_from))
return -EBADF;
return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags);
}
lflags = 0;
if (flags & MOVE_MOUNT_F_SYMLINKS)
lflags |= LOOKUP_FOLLOW;
if (flags & MOVE_MOUNT_F_AUTOMOUNTS)
lflags |= LOOKUP_AUTOMOUNT;
ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL);
if (ret)
return ret;
return vfs_move_mount(&from_path, &to_path, mflags);
}
/*
* Return true if path is reachable from root
*
* locks: mount_locked_reader || namespace_shared && is_mounted(mnt)
*/
bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
const struct path *root)
{
while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
dentry = mnt->mnt_mountpoint;
mnt = mnt->mnt_parent;
}
return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
}
bool path_is_under(const struct path *path1, const struct path *path2)
{
guard(mount_locked_reader)();
return is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
}
EXPORT_SYMBOL(path_is_under);
/*
* pivot_root Semantics:
* Moves the root file system of the current process to the directory put_old,
* makes new_root as the new root file system of the current process, and sets
* root/cwd of all processes which had them on the current root to new_root.
*
* Restrictions:
* The new_root and put_old must be directories, and must not be on the
* same file system as the current process root. The put_old must be
* underneath new_root, i.e. adding a non-zero number of /.. to the string
* pointed to by put_old must yield the same directory as new_root. No other
* file system may be mounted on put_old. After all, new_root is a mountpoint.
*
* Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
* See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
* in this situation.
*
* Notes:
* - we don't move root/cwd if they are not at the root (reason: if something
* cared enough to change them, it's probably wrong to force them elsewhere)
* - it's okay to pick a root that isn't the root of a file system, e.g.
* /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
* though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
* first.
*/
SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
const char __user *, put_old)
{
struct path new __free(path_put) = {};
struct path old __free(path_put) = {};
struct path root __free(path_put) = {};
struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
int error;
if (!may_mount())
return -EPERM;
error = user_path_at(AT_FDCWD, new_root,
LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
if (error)
return error;
error = user_path_at(AT_FDCWD, put_old,
LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
if (error)
return error;
error = security_sb_pivotroot(&old, &new);
if (error)
return error;
get_fs_root(current->fs, &root);
LOCK_MOUNT(old_mp, &old);
old_mnt = old_mp.parent;
if (IS_ERR(old_mnt))
return PTR_ERR(old_mnt);
new_mnt = real_mount(new.mnt);
root_mnt = real_mount(root.mnt);
ex_parent = new_mnt->mnt_parent;
root_parent = root_mnt->mnt_parent;
if (IS_MNT_SHARED(old_mnt) ||
IS_MNT_SHARED(ex_parent) ||
IS_MNT_SHARED(root_parent))
return -EINVAL;
if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
return -EINVAL;
if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
return -EINVAL;
if (d_unlinked(new.dentry))
return -ENOENT;
if (new_mnt == root_mnt || old_mnt == root_mnt)
return -EBUSY; /* loop, on the same file system */
if (!path_mounted(&root))
return -EINVAL; /* not a mountpoint */
if (!mnt_has_parent(root_mnt))
return -EINVAL; /* absolute root */
if (!path_mounted(&new))
return -EINVAL; /* not a mountpoint */
if (!mnt_has_parent(new_mnt))
return -EINVAL; /* absolute root */
/* make sure we can reach put_old from new_root */
if (!is_path_reachable(old_mnt, old_mp.mp->m_dentry, &new))
return -EINVAL;
/* make certain new is below the root */
if (!is_path_reachable(new_mnt, new.dentry, &root))
return -EINVAL;
lock_mount_hash();
umount_mnt(new_mnt);
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
new_mnt->mnt.mnt_flags |= MNT_LOCKED;
root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
}
/* mount new_root on / */
attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp);
umount_mnt(root_mnt);
/* mount old root on put_old */
attach_mnt(root_mnt, old_mnt, old_mp.mp);
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
unlock_mount_hash();
mnt_notify_add(root_mnt);
mnt_notify_add(new_mnt);
chroot_fs_refs(&root, &new);
return 0;
}
static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
{
unsigned int flags = mnt->mnt.mnt_flags;
/* flags to clear */
flags &= ~kattr->attr_clr;
/* flags to raise */
flags |= kattr->attr_set;
return flags;
}
static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
{
struct vfsmount *m = &mnt->mnt;
struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
if (!kattr->mnt_idmap)
return 0;
/*
* Creating an idmapped mount with the filesystem wide idmapping
* doesn't make sense so block that. We don't allow mushy semantics.
*/
if (kattr->mnt_userns == m->mnt_sb->s_user_ns)
return -EINVAL;
/*
* We only allow an mount to change it's idmapping if it has
* never been accessible to userspace.
*/
if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE) && is_idmapped_mnt(m))
return -EPERM;
/* The underlying filesystem doesn't support idmapped mounts yet. */
if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
return -EINVAL;
/* The filesystem has turned off idmapped mounts. */
if (m->mnt_sb->s_iflags & SB_I_NOIDMAP)
return -EINVAL;
/* We're not controlling the superblock. */
if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
return -EPERM;
/* Mount has already been visible in the filesystem hierarchy. */
if (!is_anon_ns(mnt->mnt_ns))
return -EINVAL;
return 0;
}
/**
* mnt_allow_writers() - check whether the attribute change allows writers
* @kattr: the new mount attributes
* @mnt: the mount to which @kattr will be applied
*
* Check whether thew new mount attributes in @kattr allow concurrent writers.
*
* Return: true if writers need to be held, false if not
*/
static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
const struct mount *mnt)
{
return (!(kattr->attr_set & MNT_READONLY) ||
(mnt->mnt.mnt_flags & MNT_READONLY)) &&
!kattr->mnt_idmap;
}
static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
{
struct mount *m;
int err;
for (m = mnt; m; m = next_mnt(m, mnt)) {
if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
err = -EPERM;
break;
}
err = can_idmap_mount(kattr, m);
if (err)
break;
if (!mnt_allow_writers(kattr, m)) {
err = mnt_hold_writers(m);
if (err) {
m = next_mnt(m, mnt);
break;
}
}
if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
return 0;
}
if (err) {
/* undo all mnt_hold_writers() we'd done */
for (struct mount *p = mnt; p != m; p = next_mnt(p, mnt))
mnt_unhold_writers(p);
}
return err;
}
static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
{
struct mnt_idmap *old_idmap;
if (!kattr->mnt_idmap)
return;
old_idmap = mnt_idmap(&mnt->mnt);
/* Pairs with smp_load_acquire() in mnt_idmap(). */
smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
mnt_idmap_put(old_idmap);
}
static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
{
struct mount *m;
for (m = mnt; m; m = next_mnt(m, mnt)) {
unsigned int flags;
do_idmap_mount(kattr, m);
flags = recalc_flags(kattr, m);
WRITE_ONCE(m->mnt.mnt_flags, flags);
/* If we had to hold writers unblock them. */
mnt_unhold_writers(m);
if (kattr->propagation)
change_mnt_propagation(m, kattr->propagation);
if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
break;
}
touch_mnt_namespace(mnt->mnt_ns);
}
static int do_mount_setattr(const struct path *path, struct mount_kattr *kattr)
{
struct mount *mnt = real_mount(path->mnt);
int err = 0;
if (!path_mounted(path))
return -EINVAL;
if (kattr->mnt_userns) {
struct mnt_idmap *mnt_idmap;
mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
if (IS_ERR(mnt_idmap))
return PTR_ERR(mnt_idmap);
kattr->mnt_idmap = mnt_idmap;
}
if (kattr->propagation) {
/*
* Only take namespace_lock() if we're actually changing
* propagation.
*/
namespace_lock();
if (kattr->propagation == MS_SHARED) {
err = invent_group_ids(mnt, kattr->kflags & MOUNT_KATTR_RECURSE);
if (err) {
namespace_unlock();
return err;
}
}
}
err = -EINVAL;
lock_mount_hash();
if (!anon_ns_root(mnt) && !check_mnt(mnt))
goto out;
/*
* First, we get the mount tree in a shape where we can change mount
* properties without failure. If we succeeded to do so we commit all
* changes and if we failed we clean up.
*/
err = mount_setattr_prepare(kattr, mnt);
if (!err)
mount_setattr_commit(kattr, mnt);
out:
unlock_mount_hash();
if (kattr->propagation) {
if (err)
cleanup_group_ids(mnt, NULL);
namespace_unlock();
}
return err;
}
static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
struct mount_kattr *kattr)
{
struct ns_common *ns;
struct user_namespace *mnt_userns;
if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
return 0;
if (attr->attr_clr & MOUNT_ATTR_IDMAP) {
/*
* We can only remove an idmapping if it's never been
* exposed to userspace.
*/
if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE))
return -EINVAL;
/*
* Removal of idmappings is equivalent to setting
* nop_mnt_idmap.
*/
if (!(attr->attr_set & MOUNT_ATTR_IDMAP)) {
kattr->mnt_idmap = &nop_mnt_idmap;
return 0;
}
}
if (attr->userns_fd > INT_MAX)
return -EINVAL;
CLASS(fd, f)(attr->userns_fd);
if (fd_empty(f))
return -EBADF;
if (!proc_ns_file(fd_file(f)))
return -EINVAL;
ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ns_type != CLONE_NEWUSER)
return -EINVAL;
/*
* The initial idmapping cannot be used to create an idmapped
* mount. We use the initial idmapping as an indicator of a mount
* that is not idmapped. It can simply be passed into helpers that
* are aware of idmapped mounts as a convenient shortcut. A user
* can just create a dedicated identity mapping to achieve the same
* result.
*/
mnt_userns = container_of(ns, struct user_namespace, ns);
if (mnt_userns == &init_user_ns)
return -EPERM;
/* We're not controlling the target namespace. */
if (!ns_capable(mnt_userns, CAP_SYS_ADMIN))
return -EPERM;
kattr->mnt_userns = get_user_ns(mnt_userns);
return 0;
}
static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
struct mount_kattr *kattr)
{
if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
return -EINVAL;
if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
return -EINVAL;
kattr->propagation = attr->propagation;
if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
return -EINVAL;
kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
/*
* Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
* users wanting to transition to a different atime setting cannot
* simply specify the atime setting in @attr_set, but must also
* specify MOUNT_ATTR__ATIME in the @attr_clr field.
* So ensure that MOUNT_ATTR__ATIME can't be partially set in
* @attr_clr and that @attr_set can't have any atime bits set if
* MOUNT_ATTR__ATIME isn't set in @attr_clr.
*/
if (attr->attr_clr & MOUNT_ATTR__ATIME) {
if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
return -EINVAL;
/*
* Clear all previous time settings as they are mutually
* exclusive.
*/
kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
switch (attr->attr_set & MOUNT_ATTR__ATIME) {
case MOUNT_ATTR_RELATIME:
kattr->attr_set |= MNT_RELATIME;
break;
case MOUNT_ATTR_NOATIME:
kattr->attr_set |= MNT_NOATIME;
break;
case MOUNT_ATTR_STRICTATIME:
break;
default:
return -EINVAL;
}
} else {
if (attr->attr_set & MOUNT_ATTR__ATIME)
return -EINVAL;
}
return build_mount_idmapped(attr, usize, kattr);
}
static void finish_mount_kattr(struct mount_kattr *kattr)
{
if (kattr->mnt_userns) {
put_user_ns(kattr->mnt_userns);
kattr->mnt_userns = NULL;
}
if (kattr->mnt_idmap)
mnt_idmap_put(kattr->mnt_idmap);
}
static int wants_mount_setattr(struct mount_attr __user *uattr, size_t usize,
struct mount_kattr *kattr)
{
int ret;
struct mount_attr attr;
BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
if (unlikely(usize > PAGE_SIZE))
return -E2BIG;
if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
return -EINVAL;
if (!may_mount())
return -EPERM;
ret = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
if (ret)
return ret;
/* Don't bother walking through the mounts if this is a nop. */
if (attr.attr_set == 0 &&
attr.attr_clr == 0 &&
attr.propagation == 0)
return 0; /* Tell caller to not bother. */
ret = build_mount_kattr(&attr, usize, kattr);
if (ret < 0)
return ret;
return 1;
}
SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
unsigned int, flags, struct mount_attr __user *, uattr,
size_t, usize)
{
int err;
struct path target;
struct mount_kattr kattr;
unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
if (flags & ~(AT_EMPTY_PATH |
AT_RECURSIVE |
AT_SYMLINK_NOFOLLOW |
AT_NO_AUTOMOUNT))
return -EINVAL;
if (flags & AT_NO_AUTOMOUNT)
lookup_flags &= ~LOOKUP_AUTOMOUNT;
if (flags & AT_SYMLINK_NOFOLLOW)
lookup_flags &= ~LOOKUP_FOLLOW;
if (flags & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
kattr = (struct mount_kattr) {
.lookup_flags = lookup_flags,
};
if (flags & AT_RECURSIVE)
kattr.kflags |= MOUNT_KATTR_RECURSE;
err = wants_mount_setattr(uattr, usize, &kattr);
if (err <= 0)
return err;
err = user_path_at(dfd, path, kattr.lookup_flags, &target);
if (!err) {
err = do_mount_setattr(&target, &kattr);
path_put(&target);
}
finish_mount_kattr(&kattr);
return err;
}
SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename,
unsigned, flags, struct mount_attr __user *, uattr,
size_t, usize)
{
struct file __free(fput) *file = NULL;
int fd;
if (!uattr && usize)
return -EINVAL;
file = vfs_open_tree(dfd, filename, flags);
if (IS_ERR(file))
return PTR_ERR(file);
if (uattr) {
int ret;
struct mount_kattr kattr = {};
if (flags & OPEN_TREE_CLONE)
kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE;
if (flags & AT_RECURSIVE)
kattr.kflags |= MOUNT_KATTR_RECURSE;
ret = wants_mount_setattr(uattr, usize, &kattr);
if (ret > 0) {
ret = do_mount_setattr(&file->f_path, &kattr);
finish_mount_kattr(&kattr);
}
if (ret)
return ret;
}
fd = get_unused_fd_flags(flags & O_CLOEXEC);
if (fd < 0)
return fd;
fd_install(fd, no_free_ptr(file));
return fd;
}
int show_path(struct seq_file *m, struct dentry *root)
{
if (root->d_sb->s_op->show_path)
return root->d_sb->s_op->show_path(m, root);
seq_dentry(m, root, " \t\n\\");
return 0;
}
static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
{
struct mount *mnt = mnt_find_id_at(ns, id);
if (!mnt || mnt->mnt_id_unique != id)
return NULL;
return &mnt->mnt;
}
struct kstatmount {
struct statmount __user *buf;
size_t bufsize;
struct vfsmount *mnt;
struct mnt_idmap *idmap;
u64 mask;
struct path root;
struct seq_file seq;
/* Must be last --ends in a flexible-array member. */
struct statmount sm;
};
static u64 mnt_to_attr_flags(struct vfsmount *mnt)
{
unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags);
u64 attr_flags = 0;
if (mnt_flags & MNT_READONLY)
attr_flags |= MOUNT_ATTR_RDONLY;
if (mnt_flags & MNT_NOSUID)
attr_flags |= MOUNT_ATTR_NOSUID;
if (mnt_flags & MNT_NODEV)
attr_flags |= MOUNT_ATTR_NODEV;
if (mnt_flags & MNT_NOEXEC)
attr_flags |= MOUNT_ATTR_NOEXEC;
if (mnt_flags & MNT_NODIRATIME)
attr_flags |= MOUNT_ATTR_NODIRATIME;
if (mnt_flags & MNT_NOSYMFOLLOW)
attr_flags |= MOUNT_ATTR_NOSYMFOLLOW;
if (mnt_flags & MNT_NOATIME)
attr_flags |= MOUNT_ATTR_NOATIME;
else if (mnt_flags & MNT_RELATIME)
attr_flags |= MOUNT_ATTR_RELATIME;
else
attr_flags |= MOUNT_ATTR_STRICTATIME;
if (is_idmapped_mnt(mnt))
attr_flags |= MOUNT_ATTR_IDMAP;
return attr_flags;
}
static u64 mnt_to_propagation_flags(struct mount *m)
{
u64 propagation = 0;
if (IS_MNT_SHARED(m))
propagation |= MS_SHARED;
if (IS_MNT_SLAVE(m))
propagation |= MS_SLAVE;
if (IS_MNT_UNBINDABLE(m))
propagation |= MS_UNBINDABLE;
if (!propagation)
propagation |= MS_PRIVATE;
return propagation;
}
static void statmount_sb_basic(struct kstatmount *s)
{
struct super_block *sb = s->mnt->mnt_sb;
s->sm.mask |= STATMOUNT_SB_BASIC;
s->sm.sb_dev_major = MAJOR(sb->s_dev);
s->sm.sb_dev_minor = MINOR(sb->s_dev);
s->sm.sb_magic = sb->s_magic;
s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME);
}
static void statmount_mnt_basic(struct kstatmount *s)
{
struct mount *m = real_mount(s->mnt);
s->sm.mask |= STATMOUNT_MNT_BASIC;
s->sm.mnt_id = m->mnt_id_unique;
s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique;
s->sm.mnt_id_old = m->mnt_id;
s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
s->sm.mnt_propagation = mnt_to_propagation_flags(m);
s->sm.mnt_peer_group = m->mnt_group_id;
s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
}
static void statmount_propagate_from(struct kstatmount *s)
{
struct mount *m = real_mount(s->mnt);
s->sm.mask |= STATMOUNT_PROPAGATE_FROM;
if (IS_MNT_SLAVE(m))
s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root);
}
static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq)
{
int ret;
size_t start = seq->count;
ret = show_path(seq, s->mnt->mnt_root);
if (ret)
return ret;
if (unlikely(seq_has_overflowed(seq)))
return -EAGAIN;
/*
* Unescape the result. It would be better if supplied string was not
* escaped in the first place, but that's a pretty invasive change.
*/
seq->buf[seq->count] = '\0';
seq->count = start;
seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
return 0;
}
static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq)
{
struct vfsmount *mnt = s->mnt;
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
int err;
err = seq_path_root(seq, &mnt_path, &s->root, "");
return err == SEQ_SKIP ? 0 : err;
}
static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq)
{
struct super_block *sb = s->mnt->mnt_sb;
seq_puts(seq, sb->s_type->name);
return 0;
}
static void statmount_fs_subtype(struct kstatmount *s, struct seq_file *seq)
{
struct super_block *sb = s->mnt->mnt_sb;
if (sb->s_subtype)
seq_puts(seq, sb->s_subtype);
}
static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq)
{
struct super_block *sb = s->mnt->mnt_sb;
struct mount *r = real_mount(s->mnt);
if (sb->s_op->show_devname) {
size_t start = seq->count;
int ret;
ret = sb->s_op->show_devname(seq, s->mnt->mnt_root);
if (ret)
return ret;
if (unlikely(seq_has_overflowed(seq)))
return -EAGAIN;
/* Unescape the result */
seq->buf[seq->count] = '\0';
seq->count = start;
seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
} else {
seq_puts(seq, r->mnt_devname);
}
return 0;
}
static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns)
{
s->sm.mask |= STATMOUNT_MNT_NS_ID;
s->sm.mnt_ns_id = ns->ns.ns_id;
}
static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
{
struct vfsmount *mnt = s->mnt;
struct super_block *sb = mnt->mnt_sb;
size_t start = seq->count;
int err;
err = security_sb_show_options(seq, sb);
if (err)
return err;
if (sb->s_op->show_options) {
err = sb->s_op->show_options(seq, mnt->mnt_root);
if (err)
return err;
}
if (unlikely(seq_has_overflowed(seq)))
return -EAGAIN;
if (seq->count == start)
return 0;
/* skip leading comma */
memmove(seq->buf + start, seq->buf + start + 1,
seq->count - start - 1);
seq->count--;
return 0;
}
static inline int statmount_opt_process(struct seq_file *seq, size_t start)
{
char *buf_end, *opt_end, *src, *dst;
int count = 0;
if (unlikely(seq_has_overflowed(seq)))
return -EAGAIN;
buf_end = seq->buf + seq->count;
dst = seq->buf + start;
src = dst + 1; /* skip initial comma */
if (src >= buf_end) {
seq->count = start;
return 0;
}
*buf_end = '\0';
for (; src < buf_end; src = opt_end + 1) {
opt_end = strchrnul(src, ',');
*opt_end = '\0';
dst += string_unescape(src, dst, 0, UNESCAPE_OCTAL) + 1;
if (WARN_ON_ONCE(++count == INT_MAX))
return -EOVERFLOW;
}
seq->count = dst - 1 - seq->buf;
return count;
}
static int statmount_opt_array(struct kstatmount *s, struct seq_file *seq)
{
struct vfsmount *mnt = s->mnt;
struct super_block *sb = mnt->mnt_sb;
size_t start = seq->count;
int err;
if (!sb->s_op->show_options)
return 0;
err = sb->s_op->show_options(seq, mnt->mnt_root);
if (err)
return err;
err = statmount_opt_process(seq, start);
if (err < 0)
return err;
s->sm.opt_num = err;
return 0;
}
static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq)
{
struct vfsmount *mnt = s->mnt;
struct super_block *sb = mnt->mnt_sb;
size_t start = seq->count;
int err;
err = security_sb_show_options(seq, sb);
if (err)
return err;
err = statmount_opt_process(seq, start);
if (err < 0)
return err;
s->sm.opt_sec_num = err;
return 0;
}
static inline int statmount_mnt_uidmap(struct kstatmount *s, struct seq_file *seq)
{
int ret;
ret = statmount_mnt_idmap(s->idmap, seq, true);
if (ret < 0)
return ret;
s->sm.mnt_uidmap_num = ret;
/*
* Always raise STATMOUNT_MNT_UIDMAP even if there are no valid
* mappings. This allows userspace to distinguish between a
* non-idmapped mount and an idmapped mount where none of the
* individual mappings are valid in the caller's idmapping.
*/
if (is_valid_mnt_idmap(s->idmap))
s->sm.mask |= STATMOUNT_MNT_UIDMAP;
return 0;
}
static inline int statmount_mnt_gidmap(struct kstatmount *s, struct seq_file *seq)
{
int ret;
ret = statmount_mnt_idmap(s->idmap, seq, false);
if (ret < 0)
return ret;
s->sm.mnt_gidmap_num = ret;
/*
* Always raise STATMOUNT_MNT_GIDMAP even if there are no valid
* mappings. This allows userspace to distinguish between a
* non-idmapped mount and an idmapped mount where none of the
* individual mappings are valid in the caller's idmapping.
*/
if (is_valid_mnt_idmap(s->idmap))
s->sm.mask |= STATMOUNT_MNT_GIDMAP;
return 0;
}
static int statmount_string(struct kstatmount *s, u64 flag)
{
int ret = 0;
size_t kbufsize;
struct seq_file *seq = &s->seq;
struct statmount *sm = &s->sm;
u32 start, *offp;
/* Reserve an empty string at the beginning for any unset offsets */
if (!seq->count)
seq_putc(seq, 0);
start = seq->count;
switch (flag) {
case STATMOUNT_FS_TYPE:
offp = &sm->fs_type;
ret = statmount_fs_type(s, seq);
break;
case STATMOUNT_MNT_ROOT:
offp = &sm->mnt_root;
ret = statmount_mnt_root(s, seq);
break;
case STATMOUNT_MNT_POINT:
offp = &sm->mnt_point;
ret = statmount_mnt_point(s, seq);
break;
case STATMOUNT_MNT_OPTS:
offp = &sm->mnt_opts;
ret = statmount_mnt_opts(s, seq);
break;
case STATMOUNT_OPT_ARRAY:
offp = &sm->opt_array;
ret = statmount_opt_array(s, seq);
break;
case STATMOUNT_OPT_SEC_ARRAY:
offp = &sm->opt_sec_array;
ret = statmount_opt_sec_array(s, seq);
break;
case STATMOUNT_FS_SUBTYPE:
offp = &sm->fs_subtype;
statmount_fs_subtype(s, seq);
break;
case STATMOUNT_SB_SOURCE:
offp = &sm->sb_source;
ret = statmount_sb_source(s, seq);
break;
case STATMOUNT_MNT_UIDMAP:
offp = &sm->mnt_uidmap;
ret = statmount_mnt_uidmap(s, seq);
break;
case STATMOUNT_MNT_GIDMAP:
offp = &sm->mnt_gidmap;
ret = statmount_mnt_gidmap(s, seq);
break;
default:
WARN_ON_ONCE(true);
return -EINVAL;
}
/*
* If nothing was emitted, return to avoid setting the flag
* and terminating the buffer.
*/
if (seq->count == start)
return ret;
if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize)))
return -EOVERFLOW;
if (kbufsize >= s->bufsize)
return -EOVERFLOW;
/* signal a retry */
if (unlikely(seq_has_overflowed(seq)))
return -EAGAIN;
if (ret)
return ret;
seq->buf[seq->count++] = '\0';
sm->mask |= flag;
*offp = start;
return 0;
}
static int copy_statmount_to_user(struct kstatmount *s)
{
struct statmount *sm = &s->sm;
struct seq_file *seq = &s->seq;
char __user *str = ((char __user *)s->buf) + sizeof(*sm);
size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm));
if (seq->count && copy_to_user(str, seq->buf, seq->count))
return -EFAULT;
/* Return the number of bytes copied to the buffer */
sm->size = copysize + seq->count;
if (copy_to_user(s->buf, sm, copysize))
return -EFAULT;
return 0;
}
static struct mount *listmnt_next(struct mount *curr, bool reverse)
{
struct rb_node *node;
if (reverse)
node = rb_prev(&curr->mnt_node);
else
node = rb_next(&curr->mnt_node);
return node_to_mount(node);
}
static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
{
struct mount *first, *child;
rwsem_assert_held(&namespace_sem);
/* We're looking at our own ns, just use get_fs_root. */
if (ns == current->nsproxy->mnt_ns) {
get_fs_root(current->fs, root);
return 0;
}
/*
* We have to find the first mount in our ns and use that, however it
* may not exist, so handle that properly.
*/
if (mnt_ns_empty(ns))
return -ENOENT;
first = child = ns->root;
for (;;) {
child = listmnt_next(child, false);
if (!child)
return -ENOENT;
if (child->mnt_parent == first)
break;
}
root->mnt = mntget(&child->mnt);
root->dentry = dget(root->mnt->mnt_root);
return 0;
}
/* This must be updated whenever a new flag is added */
#define STATMOUNT_SUPPORTED (STATMOUNT_SB_BASIC | \
STATMOUNT_MNT_BASIC | \
STATMOUNT_PROPAGATE_FROM | \
STATMOUNT_MNT_ROOT | \
STATMOUNT_MNT_POINT | \
STATMOUNT_FS_TYPE | \
STATMOUNT_MNT_NS_ID | \
STATMOUNT_MNT_OPTS | \
STATMOUNT_FS_SUBTYPE | \
STATMOUNT_SB_SOURCE | \
STATMOUNT_OPT_ARRAY | \
STATMOUNT_OPT_SEC_ARRAY | \
STATMOUNT_SUPPORTED_MASK | \
STATMOUNT_MNT_UIDMAP | \
STATMOUNT_MNT_GIDMAP)
/* locks: namespace_shared */
static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
struct mnt_namespace *ns)
{
struct mount *m;
int err;
/* Has the namespace already been emptied? */
if (mnt_ns_id && mnt_ns_empty(ns))
return -ENOENT;
s->mnt = lookup_mnt_in_ns(mnt_id, ns);
if (!s->mnt)
return -ENOENT;
err = grab_requested_root(ns, &s->root);
if (err)
return err;
/*
* Don't trigger audit denials. We just want to determine what
* mounts to show users.
*/
m = real_mount(s->mnt);
if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
err = security_sb_statfs(s->mnt->mnt_root);
if (err)
return err;
/*
* Note that mount properties in mnt->mnt_flags, mnt->mnt_idmap
* can change concurrently as we only hold the read-side of the
* namespace semaphore and mount properties may change with only
* the mount lock held.
*
* We could sample the mount lock sequence counter to detect
* those changes and retry. But it's not worth it. Worst that
* happens is that the mnt->mnt_idmap pointer is already changed
* while mnt->mnt_flags isn't or vica versa. So what.
*
* Both mnt->mnt_flags and mnt->mnt_idmap are set and retrieved
* via READ_ONCE()/WRITE_ONCE() and guard against theoretical
* torn read/write. That's all we care about right now.
*/
s->idmap = mnt_idmap(s->mnt);
if (s->mask & STATMOUNT_MNT_BASIC)
statmount_mnt_basic(s);
if (s->mask & STATMOUNT_SB_BASIC)
statmount_sb_basic(s);
if (s->mask & STATMOUNT_PROPAGATE_FROM)
statmount_propagate_from(s);
if (s->mask & STATMOUNT_FS_TYPE)
err = statmount_string(s, STATMOUNT_FS_TYPE);
if (!err && s->mask & STATMOUNT_MNT_ROOT)
err = statmount_string(s, STATMOUNT_MNT_ROOT);
if (!err && s->mask & STATMOUNT_MNT_POINT)
err = statmount_string(s, STATMOUNT_MNT_POINT);
if (!err && s->mask & STATMOUNT_MNT_OPTS)
err = statmount_string(s, STATMOUNT_MNT_OPTS);
if (!err && s->mask & STATMOUNT_OPT_ARRAY)
err = statmount_string(s, STATMOUNT_OPT_ARRAY);
if (!err && s->mask & STATMOUNT_OPT_SEC_ARRAY)
err = statmount_string(s, STATMOUNT_OPT_SEC_ARRAY);
if (!err && s->mask & STATMOUNT_FS_SUBTYPE)
err = statmount_string(s, STATMOUNT_FS_SUBTYPE);
if (!err && s->mask & STATMOUNT_SB_SOURCE)
err = statmount_string(s, STATMOUNT_SB_SOURCE);
if (!err && s->mask & STATMOUNT_MNT_UIDMAP)
err = statmount_string(s, STATMOUNT_MNT_UIDMAP);
if (!err && s->mask & STATMOUNT_MNT_GIDMAP)
err = statmount_string(s, STATMOUNT_MNT_GIDMAP);
if (!err && s->mask & STATMOUNT_MNT_NS_ID)
statmount_mnt_ns_id(s, ns);
if (!err && s->mask & STATMOUNT_SUPPORTED_MASK) {
s->sm.mask |= STATMOUNT_SUPPORTED_MASK;
s->sm.supported_mask = STATMOUNT_SUPPORTED;
}
if (err)
return err;
/* Are there bits in the return mask not present in STATMOUNT_SUPPORTED? */
WARN_ON_ONCE(~STATMOUNT_SUPPORTED & s->sm.mask);
return 0;
}
static inline bool retry_statmount(const long ret, size_t *seq_size)
{
if (likely(ret != -EAGAIN))
return false;
if (unlikely(check_mul_overflow(*seq_size, 2, seq_size)))
return false;
if (unlikely(*seq_size > MAX_RW_COUNT))
return false;
return true;
}
#define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \
STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \
STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY | \
STATMOUNT_MNT_UIDMAP | STATMOUNT_MNT_GIDMAP)
static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
struct statmount __user *buf, size_t bufsize,
size_t seq_size)
{
if (!access_ok(buf, bufsize))
return -EFAULT;
memset(ks, 0, sizeof(*ks));
ks->mask = kreq->param;
ks->buf = buf;
ks->bufsize = bufsize;
if (ks->mask & STATMOUNT_STRING_REQ) {
if (bufsize == sizeof(ks->sm))
return -EOVERFLOW;
ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
if (!ks->seq.buf)
return -ENOMEM;
ks->seq.size = seq_size;
}
return 0;
}
static int copy_mnt_id_req(const struct mnt_id_req __user *req,
struct mnt_id_req *kreq)
{
int ret;
size_t usize;
BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1);
ret = get_user(usize, &req->size);
if (ret)
return -EFAULT;
if (unlikely(usize > PAGE_SIZE))
return -E2BIG;
if (unlikely(usize < MNT_ID_REQ_SIZE_VER0))
return -EINVAL;
memset(kreq, 0, sizeof(*kreq));
ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
if (ret)
return ret;
if (kreq->mnt_ns_fd != 0 && kreq->mnt_ns_id)
return -EINVAL;
/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
return -EINVAL;
return 0;
}
/*
* If the user requested a specific mount namespace id, look that up and return
* that, or if not simply grab a passive reference on our mount namespace and
* return that.
*/
static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq)
{
struct mnt_namespace *mnt_ns;
if (kreq->mnt_ns_id) {
mnt_ns = lookup_mnt_ns(kreq->mnt_ns_id);
if (!mnt_ns)
return ERR_PTR(-ENOENT);
} else if (kreq->mnt_ns_fd) {
struct ns_common *ns;
CLASS(fd, f)(kreq->mnt_ns_fd);
if (fd_empty(f))
return ERR_PTR(-EBADF);
if (!proc_ns_file(fd_file(f)))
return ERR_PTR(-EINVAL);
ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ns_type != CLONE_NEWNS)
return ERR_PTR(-EINVAL);
mnt_ns = to_mnt_ns(ns);
refcount_inc(&mnt_ns->passive);
} else {
mnt_ns = current->nsproxy->mnt_ns;
refcount_inc(&mnt_ns->passive);
}
return mnt_ns;
}
SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
struct statmount __user *, buf, size_t, bufsize,
unsigned int, flags)
{
struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
struct kstatmount *ks __free(kfree) = NULL;
struct mnt_id_req kreq;
/* We currently support retrieval of 3 strings. */
size_t seq_size = 3 * PATH_MAX;
int ret;
if (flags)
return -EINVAL;
ret = copy_mnt_id_req(req, &kreq);
if (ret)
return ret;
ns = grab_requested_mnt_ns(&kreq);
if (IS_ERR(ns))
return PTR_ERR(ns);
if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
return -ENOENT;
ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT);
if (!ks)
return -ENOMEM;
retry:
ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size);
if (ret)
return ret;
scoped_guard(namespace_shared)
ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, ns);
if (!ret)
ret = copy_statmount_to_user(ks);
kvfree(ks->seq.buf);
path_put(&ks->root);
if (retry_statmount(ret, &seq_size))
goto retry;
return ret;
}
struct klistmount {
u64 last_mnt_id;
u64 mnt_parent_id;
u64 *kmnt_ids;
u32 nr_mnt_ids;
struct mnt_namespace *ns;
struct path root;
};
/* locks: namespace_shared */
static ssize_t do_listmount(struct klistmount *kls, bool reverse)
{
struct mnt_namespace *ns = kls->ns;
u64 mnt_parent_id = kls->mnt_parent_id;
u64 last_mnt_id = kls->last_mnt_id;
u64 *mnt_ids = kls->kmnt_ids;
size_t nr_mnt_ids = kls->nr_mnt_ids;
struct path orig;
struct mount *r, *first;
ssize_t ret;
rwsem_assert_held(&namespace_sem);
ret = grab_requested_root(ns, &kls->root);
if (ret)
return ret;
if (mnt_parent_id == LSMT_ROOT) {
orig = kls->root;
} else {
orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
if (!orig.mnt)
return -ENOENT;
orig.dentry = orig.mnt->mnt_root;
}
/*
* Don't trigger audit denials. We just want to determine what
* mounts to show users.
*/
if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) &&
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
ret = security_sb_statfs(orig.dentry);
if (ret)
return ret;
if (!last_mnt_id) {
if (reverse)
first = node_to_mount(ns->mnt_last_node);
else
first = node_to_mount(ns->mnt_first_node);
} else {
if (reverse)
first = mnt_find_id_at_reverse(ns, last_mnt_id - 1);
else
first = mnt_find_id_at(ns, last_mnt_id + 1);
}
for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) {
if (r->mnt_id_unique == mnt_parent_id)
continue;
if (!is_path_reachable(r, r->mnt.mnt_root, &orig))
continue;
*mnt_ids = r->mnt_id_unique;
mnt_ids++;
nr_mnt_ids--;
ret++;
}
return ret;
}
static void __free_klistmount_free(const struct klistmount *kls)
{
path_put(&kls->root);
kvfree(kls->kmnt_ids);
mnt_ns_release(kls->ns);
}
static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq,
size_t nr_mnt_ids)
{
u64 last_mnt_id = kreq->param;
struct mnt_namespace *ns;
/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
return -EINVAL;
kls->last_mnt_id = last_mnt_id;
kls->nr_mnt_ids = nr_mnt_ids;
kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids),
GFP_KERNEL_ACCOUNT);
if (!kls->kmnt_ids)
return -ENOMEM;
ns = grab_requested_mnt_ns(kreq);
if (IS_ERR(ns))
return PTR_ERR(ns);
kls->ns = ns;
kls->mnt_parent_id = kreq->mnt_id;
return 0;
}
SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
{
struct klistmount kls __free(klistmount_free) = {};
const size_t maxcount = 1000000;
struct mnt_id_req kreq;
ssize_t ret;
if (flags & ~LISTMOUNT_REVERSE)
return -EINVAL;
/*
* If the mount namespace really has more than 1 million mounts the
* caller must iterate over the mount namespace (and reconsider their
* system design...).
*/
if (unlikely(nr_mnt_ids > maxcount))
return -EOVERFLOW;
if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
return -EFAULT;
ret = copy_mnt_id_req(req, &kreq);
if (ret)
return ret;
ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids);
if (ret)
return ret;
if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) &&
!ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN))
return -ENOENT;
/*
* We only need to guard against mount topology changes as
* listmount() doesn't care about any mount properties.
*/
scoped_guard(namespace_shared)
ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE));
if (ret <= 0)
return ret;
if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids)))
return -EFAULT;
return ret;
}
struct mnt_namespace init_mnt_ns = {
.ns.inum = ns_init_inum(&init_mnt_ns),
.ns.ops = &mntns_operations,
.user_ns = &init_user_ns,
.ns.__ns_ref = REFCOUNT_INIT(1),
.ns.ns_type = ns_common_type(&init_mnt_ns),
.passive = REFCOUNT_INIT(1),
.mounts = RB_ROOT,
.poll = __WAIT_QUEUE_HEAD_INITIALIZER(init_mnt_ns.poll),
};
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
struct mount *m;
struct path root;
mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options);
if (IS_ERR(mnt))
panic("Can't create rootfs");
m = real_mount(mnt);
init_mnt_ns.root = m;
init_mnt_ns.nr_mounts = 1;
mnt_add_to_ns(&init_mnt_ns, m);
init_task.nsproxy->mnt_ns = &init_mnt_ns;
get_mnt_ns(&init_mnt_ns);
root.mnt = mnt;
root.dentry = mnt->mnt_root;
set_fs_pwd(current->fs, &root);
set_fs_root(current->fs, &root);
ns_tree_add(&init_mnt_ns);
}
void __init mnt_init(void)
{
int err;
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
mount_hashtable = alloc_large_system_hash("Mount-cache",
sizeof(struct hlist_head),
mhash_entries, 19,
HASH_ZERO,
&m_hash_shift, &m_hash_mask, 0, 0);
mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
sizeof(struct hlist_head),
mphash_entries, 19,
HASH_ZERO,
&mp_hash_shift, &mp_hash_mask, 0, 0);
if (!mount_hashtable || !mountpoint_hashtable)
panic("Failed to allocate mount hash table\n");
kernfs_init();
err = sysfs_init();
if (err)
printk(KERN_WARNING "%s: sysfs_init error: %d\n",
__func__, err);
fs_kobj = kobject_create_and_add("fs", NULL);
if (!fs_kobj)
printk(KERN_WARNING "%s: kobj create error\n", __func__);
shmem_init();
init_rootfs();
init_mount_tree();
}
void put_mnt_ns(struct mnt_namespace *ns)
{
if (!ns_ref_put(ns)) return; guard(namespace_excl)();
emptied_ns = ns;
guard(mount_writer)();
umount_tree(ns->root, 0);
}
struct vfsmount *kern_mount(struct file_system_type *type)
{
struct vfsmount *mnt;
mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
if (!IS_ERR(mnt)) {
/*
* it is a longterm mount, don't release mnt until
* we unmount before file sys is unregistered
*/
real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
}
return mnt;
}
EXPORT_SYMBOL_GPL(kern_mount);
void kern_unmount(struct vfsmount *mnt)
{
/* release long term mount so mount point can be released */
if (!IS_ERR(mnt)) {
mnt_make_shortterm(mnt);
synchronize_rcu(); /* yecchhh... */
mntput(mnt);
}
}
EXPORT_SYMBOL(kern_unmount);
void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
{
unsigned int i;
for (i = 0; i < num; i++)
mnt_make_shortterm(mnt[i]);
synchronize_rcu_expedited();
for (i = 0; i < num; i++)
mntput(mnt[i]);
}
EXPORT_SYMBOL(kern_unmount_array);
bool our_mnt(struct vfsmount *mnt)
{
return check_mnt(real_mount(mnt));
}
bool current_chrooted(void)
{
/* Does the current process have a non-standard root */
struct path fs_root __free(path_put) = {};
struct mount *root;
get_fs_root(current->fs, &fs_root);
/* Find the namespace root */
guard(mount_locked_reader)();
root = topmost_overmount(current->nsproxy->mnt_ns->root);
return fs_root.mnt != &root->mnt || !path_mounted(&fs_root);
}
static bool mnt_already_visible(struct mnt_namespace *ns,
const struct super_block *sb,
int *new_mnt_flags)
{
int new_flags = *new_mnt_flags;
struct mount *mnt, *n;
guard(namespace_shared)();
rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
struct mount *child;
int mnt_flags;
if (mnt->mnt.mnt_sb->s_type != sb->s_type)
continue;
/* This mount is not fully visible if it's root directory
* is not the root directory of the filesystem.
*/
if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
continue;
/* A local view of the mount flags */
mnt_flags = mnt->mnt.mnt_flags;
/* Don't miss readonly hidden in the superblock flags */
if (sb_rdonly(mnt->mnt.mnt_sb))
mnt_flags |= MNT_LOCK_READONLY;
/* Verify the mount flags are equal to or more permissive
* than the proposed new mount.
*/
if ((mnt_flags & MNT_LOCK_READONLY) &&
!(new_flags & MNT_READONLY))
continue;
if ((mnt_flags & MNT_LOCK_ATIME) &&
((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
continue;
/* This mount is not fully visible if there are any
* locked child mounts that cover anything except for
* empty directories.
*/
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
struct inode *inode = child->mnt_mountpoint->d_inode;
/* Only worry about locked mounts */
if (!(child->mnt.mnt_flags & MNT_LOCKED))
continue;
/* Is the directory permanently empty? */
if (!is_empty_dir_inode(inode))
goto next;
}
/* Preserve the locked attributes */
*new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
MNT_LOCK_ATIME);
return true;
next: ;
}
return false;
}
static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
{
const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
unsigned long s_iflags;
if (ns->user_ns == &init_user_ns)
return false;
/* Can this filesystem be too revealing? */
s_iflags = sb->s_iflags;
if (!(s_iflags & SB_I_USERNS_VISIBLE))
return false;
if ((s_iflags & required_iflags) != required_iflags) {
WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
required_iflags);
return true;
}
return !mnt_already_visible(ns, sb, new_mnt_flags);
}
bool mnt_may_suid(struct vfsmount *mnt)
{
/*
* Foreign mounts (accessed via fchdir or through /proc
* symlinks) are always treated as if they are nosuid. This
* prevents namespaces from trusting potentially unsafe
* suid/sgid bits, file caps, or security labels that originate
* in other namespaces.
*/
return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
current_in_userns(mnt->mnt_sb->s_user_ns);
}
static struct ns_common *mntns_get(struct task_struct *task)
{
struct ns_common *ns = NULL;
struct nsproxy *nsproxy;
task_lock(task);
nsproxy = task->nsproxy;
if (nsproxy) {
ns = &nsproxy->mnt_ns->ns;
get_mnt_ns(to_mnt_ns(ns));
}
task_unlock(task);
return ns;
}
static void mntns_put(struct ns_common *ns)
{
put_mnt_ns(to_mnt_ns(ns));
}
static int mntns_install(struct nsset *nsset, struct ns_common *ns)
{
struct nsproxy *nsproxy = nsset->nsproxy;
struct fs_struct *fs = nsset->fs;
struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
struct user_namespace *user_ns = nsset->cred->user_ns;
struct path root;
int err;
if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(user_ns, CAP_SYS_CHROOT) ||
!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (is_anon_ns(mnt_ns))
return -EINVAL;
if (fs->users != 1)
return -EINVAL;
get_mnt_ns(mnt_ns);
old_mnt_ns = nsproxy->mnt_ns;
nsproxy->mnt_ns = mnt_ns;
/* Find the root */
err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
"/", LOOKUP_DOWN, &root);
if (err) {
/* revert to old namespace */
nsproxy->mnt_ns = old_mnt_ns;
put_mnt_ns(mnt_ns);
return err;
}
put_mnt_ns(old_mnt_ns);
/* Update the pwd and root */
set_fs_pwd(fs, &root);
set_fs_root(fs, &root);
path_put(&root);
return 0;
}
static struct user_namespace *mntns_owner(struct ns_common *ns)
{
return to_mnt_ns(ns)->user_ns;
}
const struct proc_ns_operations mntns_operations = {
.name = "mnt",
.get = mntns_get,
.put = mntns_put,
.install = mntns_install,
.owner = mntns_owner,
};
#ifdef CONFIG_SYSCTL
static const struct ctl_table fs_namespace_sysctls[] = {
{
.procname = "mount-max",
.data = &sysctl_mount_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
};
static int __init init_fs_namespace_sysctls(void)
{
register_sysctl_init("fs", fs_namespace_sysctls);
return 0;
}
fs_initcall(init_fs_namespace_sysctls);
#endif /* CONFIG_SYSCTL */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Link physical devices with ACPI devices support
*
* Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com>
* Copyright (c) 2005 Intel Corp.
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi_iort.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/platform_device.h>
#include "internal.h"
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
if (type && type->match && type->find_companion) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
pr_info("bus type %s registered\n", type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(register_acpi_bus_type);
int unregister_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return 0;
if (type) {
down_write(&bus_type_sem);
list_del_init(&type->list);
up_write(&bus_type_sem);
pr_info("bus type %s unregistered\n", type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
{
struct acpi_bus_type *tmp, *ret = NULL;
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) { if (tmp->match(dev)) {
ret = tmp;
break;
}
}
up_read(&bus_type_sem);
return ret;
}
#define FIND_CHILD_MIN_SCORE 1
#define FIND_CHILD_MID_SCORE 2
#define FIND_CHILD_MAX_SCORE 3
static int match_any(struct acpi_device *adev, void *not_used)
{
return 1;
}
static bool acpi_dev_has_children(struct acpi_device *adev)
{
return acpi_dev_for_each_child(adev, match_any, NULL) > 0;
}
static int find_child_checks(struct acpi_device *adev, bool check_children)
{
unsigned long long sta;
acpi_status status;
if (check_children && !acpi_dev_has_children(adev))
return -ENODEV;
status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
if (status == AE_NOT_FOUND) {
/*
* Special case: backlight device objects without _STA are
* preferred to other objects with the same _ADR value, because
* it is more likely that they are actually useful.
*/
if (adev->pnp.type.backlight)
return FIND_CHILD_MID_SCORE;
return FIND_CHILD_MIN_SCORE;
}
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return -ENODEV;
/*
* If the device has a _HID returning a valid ACPI/PNP device ID, it is
* better to make it look less attractive here, so that the other device
* with the same _ADR value (that may not have a valid device ID) can be
* matched going forward. [This means a second spec violation in a row,
* so whatever we do here is best effort anyway.]
*/
if (adev->pnp.type.platform_id)
return FIND_CHILD_MIN_SCORE;
return FIND_CHILD_MAX_SCORE;
}
struct find_child_walk_data {
struct acpi_device *adev;
u64 address;
int score;
bool check_sta;
bool check_children;
};
static int check_one_child(struct acpi_device *adev, void *data)
{
struct find_child_walk_data *wd = data;
int score;
if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address)
return 0;
if (!wd->adev) {
/*
* This is the first matching object, so save it. If it is not
* necessary to look for any other matching objects, stop the
* search.
*/
wd->adev = adev;
return !(wd->check_sta || wd->check_children);
}
/*
* There is more than one matching device object with the same _ADR
* value. That really is unexpected, so we are kind of beyond the scope
* of the spec here. We have to choose which one to return, though.
*
* First, get the score for the previously found object and terminate
* the walk if it is maximum.
*/
if (!wd->score) {
score = find_child_checks(wd->adev, wd->check_children);
if (score == FIND_CHILD_MAX_SCORE)
return 1;
wd->score = score;
}
/*
* Second, if the object that has just been found has a better score,
* replace the previously found one with it and terminate the walk if
* the new score is maximum.
*/
score = find_child_checks(adev, wd->check_children);
if (score > wd->score) {
wd->adev = adev;
if (score == FIND_CHILD_MAX_SCORE)
return 1;
wd->score = score;
}
/* Continue, because there may be better matches. */
return 0;
}
static struct acpi_device *acpi_find_child(struct acpi_device *parent,
u64 address, bool check_children,
bool check_sta)
{
struct find_child_walk_data wd = {
.address = address,
.check_children = check_children,
.check_sta = check_sta,
.adev = NULL,
.score = 0,
};
if (parent)
acpi_dev_for_each_child(parent, check_one_child, &wd);
return wd.adev;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children)
{
return acpi_find_child(parent, address, check_children, true);
}
EXPORT_SYMBOL_GPL(acpi_find_child_device);
struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev,
acpi_bus_address adr)
{
return acpi_find_child(adev, adr, false, false);
}
EXPORT_SYMBOL_GPL(acpi_find_child_by_adr);
static void acpi_physnode_link_name(char *buf, unsigned int node_id)
{
if (node_id > 0)
snprintf(buf, PHYSICAL_NODE_NAME_SIZE,
PHYSICAL_NODE_STRING "%u", node_id);
else
strcpy(buf, PHYSICAL_NODE_STRING);
}
int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
{
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
if (has_acpi_companion(dev)) {
if (acpi_dev) {
dev_warn(dev, "ACPI companion already set\n");
return -EINVAL;
} else {
acpi_dev = ACPI_COMPANION(dev);
}
}
if (!acpi_dev) return -EINVAL;
acpi_dev_get(acpi_dev);
get_device(dev);
physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
goto err;
}
mutex_lock(&acpi_dev->physical_node_lock);
/*
* Keep the list sorted by node_id so that the IDs of removed nodes can
* be recycled easily.
*/
physnode_list = &acpi_dev->physical_node_list;
node_id = 0;
list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
/* Sanity check. */
if (pn->dev == dev) {
mutex_unlock(&acpi_dev->physical_node_lock);
dev_warn(dev, "Already associated with ACPI node\n");
kfree(physical_node);
if (ACPI_COMPANION(dev) != acpi_dev)
goto err;
put_device(dev);
acpi_dev_put(acpi_dev);
return 0;
}
if (pn->node_id == node_id) {
physnode_list = &pn->node;
node_id++;
}
}
physical_node->node_id = node_id;
physical_node->dev = dev;
list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
if (!has_acpi_companion(dev))
ACPI_COMPANION_SET(dev, acpi_dev); acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
physical_node_name);
if (retval)
dev_err(&acpi_dev->dev, "Failed to create link %s (%d)\n",
physical_node_name, retval);
retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
"firmware_node");
if (retval)
dev_err(dev, "Failed to create link firmware_node (%d)\n",
retval);
mutex_unlock(&acpi_dev->physical_node_lock);
if (acpi_dev->wakeup.flags.valid)
device_set_wakeup_capable(dev, true);
return 0;
err:
ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
acpi_dev_put(acpi_dev);
return retval;}
EXPORT_SYMBOL_GPL(acpi_bind_one);
int acpi_unbind_one(struct device *dev)
{
struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
struct acpi_device_physical_node *entry;
if (!acpi_dev)
return 0;
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
if (entry->dev == dev) {
char physnode_name[PHYSICAL_NODE_NAME_SIZE];
list_del(&entry->node);
acpi_dev->physical_node_count--;
acpi_physnode_link_name(physnode_name, entry->node_id);
sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
ACPI_COMPANION_SET(dev, NULL);
/* Drop references taken by acpi_bind_one(). */
put_device(dev);
acpi_dev_put(acpi_dev);
kfree(entry);
break;
}
mutex_unlock(&acpi_dev->physical_node_lock);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
void acpi_device_notify(struct device *dev)
{
struct acpi_device *adev;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret) { struct acpi_bus_type *type = acpi_get_bus_type(dev); if (!type) goto err;
adev = type->find_companion(dev);
if (!adev) {
dev_dbg(dev, "ACPI companion not found\n");
goto err;
}
ret = acpi_bind_one(dev, adev);
if (ret)
goto err; if (type->setup) {
type->setup(dev);
goto done;
}
} else {
adev = ACPI_COMPANION(dev); if (dev_is_pci(dev)) {
pci_acpi_setup(dev, adev);
goto done;
} else if (dev_is_platform(dev)) {
acpi_configure_pmsi_domain(dev);
}
}
if (adev->handler && adev->handler->bind) adev->handler->bind(dev);
done:
acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n",
dev_name(dev));
return;
err:
dev_dbg(dev, "No ACPI support\n");
}
void acpi_device_notify_remove(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev)
return;
if (dev_is_pci(dev))
pci_acpi_cleanup(dev, adev);
else if (adev->handler && adev->handler->unbind)
adev->handler->unbind(dev);
acpi_unbind_one(dev);
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions for the 'struct ptr_ring' datastructure.
*
* Author:
* Michael S. Tsirkin <mst@redhat.com>
*
* Copyright (C) 2016 Red Hat, Inc.
*
* This is a limited-size FIFO maintaining pointers in FIFO order, with
* one CPU producing entries and another consuming entries from a FIFO.
*
* This implementation tries to minimize cache-contention when there is a
* single producer and a single consumer CPU.
*/
#ifndef _LINUX_PTR_RING_H
#define _LINUX_PTR_RING_H 1
#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/errno.h>
#endif
struct ptr_ring {
int producer ____cacheline_aligned_in_smp;
spinlock_t producer_lock;
int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
int consumer_tail; /* next entry to invalidate */
spinlock_t consumer_lock;
/* Shared consumer/producer data */
/* Read-only by both the producer and the consumer */
int size ____cacheline_aligned_in_smp; /* max entries in queue */
int batch; /* number of entries to consume in a batch */
void **queue;
};
/* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax().
*
* NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
* see e.g. ptr_ring_full.
*/
static inline bool __ptr_ring_full(struct ptr_ring *r)
{
return r->queue[r->producer];
}
static inline bool ptr_ring_full(struct ptr_ring *r)
{
bool ret;
spin_lock(&r->producer_lock);
ret = __ptr_ring_full(r);
spin_unlock(&r->producer_lock);
return ret;
}
static inline bool ptr_ring_full_irq(struct ptr_ring *r)
{
bool ret;
spin_lock_irq(&r->producer_lock);
ret = __ptr_ring_full(r);
spin_unlock_irq(&r->producer_lock);
return ret;
}
static inline bool ptr_ring_full_any(struct ptr_ring *r)
{
unsigned long flags;
bool ret;
spin_lock_irqsave(&r->producer_lock, flags);
ret = __ptr_ring_full(r);
spin_unlock_irqrestore(&r->producer_lock, flags);
return ret;
}
static inline bool ptr_ring_full_bh(struct ptr_ring *r)
{
bool ret;
spin_lock_bh(&r->producer_lock);
ret = __ptr_ring_full(r);
spin_unlock_bh(&r->producer_lock);
return ret;
}
/* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax(). Callers must hold producer_lock.
* Callers are responsible for making sure pointer that is being queued
* points to a valid data.
*/
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
if (unlikely(!r->size) || r->queue[r->producer])
return -ENOSPC;
/* Make sure the pointer we are storing points to a valid data. */
/* Pairs with the dependency ordering in __ptr_ring_consume. */
smp_wmb();
WRITE_ONCE(r->queue[r->producer++], ptr);
if (unlikely(r->producer >= r->size))
r->producer = 0;
return 0;
}
/*
* Note: resize (below) nests producer lock within consumer lock, so if you
* consume in interrupt or BH context, you must disable interrupts/BH when
* calling this.
*/
static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
int ret;
spin_lock(&r->producer_lock);
ret = __ptr_ring_produce(r, ptr);
spin_unlock(&r->producer_lock);
return ret;
}
static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
{
int ret;
spin_lock_irq(&r->producer_lock);
ret = __ptr_ring_produce(r, ptr);
spin_unlock_irq(&r->producer_lock);
return ret;
}
static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&r->producer_lock, flags);
ret = __ptr_ring_produce(r, ptr);
spin_unlock_irqrestore(&r->producer_lock, flags);
return ret;
}
static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
{
int ret;
spin_lock_bh(&r->producer_lock);
ret = __ptr_ring_produce(r, ptr);
spin_unlock_bh(&r->producer_lock);
return ret;
}
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
return READ_ONCE(r->queue[r->consumer_head]);
return NULL;
}
/*
* Test ring empty status without taking any locks.
*
* NB: This is only safe to call if ring is never resized.
*
* However, if some other CPU consumes ring entries at the same time, the value
* returned is not guaranteed to be correct.
*
* In this case - to avoid incorrectly detecting the ring
* as empty - the CPU consuming the ring entries is responsible
* for either consuming all ring entries until the ring is empty,
* or synchronizing with some other CPU and causing it to
* re-test __ptr_ring_empty and/or consume the ring enteries
* after the synchronization point.
*
* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax().
*/
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
if (likely(r->size))
return !r->queue[READ_ONCE(r->consumer_head)];
return true;
}
static inline bool ptr_ring_empty(struct ptr_ring *r)
{
bool ret;
spin_lock(&r->consumer_lock);
ret = __ptr_ring_empty(r);
spin_unlock(&r->consumer_lock);
return ret;
}
static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
{
bool ret;
spin_lock_irq(&r->consumer_lock);
ret = __ptr_ring_empty(r);
spin_unlock_irq(&r->consumer_lock);
return ret;
}
static inline bool ptr_ring_empty_any(struct ptr_ring *r)
{
unsigned long flags;
bool ret;
spin_lock_irqsave(&r->consumer_lock, flags);
ret = __ptr_ring_empty(r);
spin_unlock_irqrestore(&r->consumer_lock, flags);
return ret;
}
static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
{
bool ret;
spin_lock_bh(&r->consumer_lock);
ret = __ptr_ring_empty(r);
spin_unlock_bh(&r->consumer_lock);
return ret;
}
/* Zero entries from tail to specified head.
* NB: if consumer_head can be >= r->size need to fixup tail later.
*/
static inline void __ptr_ring_zero_tail(struct ptr_ring *r, int consumer_head)
{
int head = consumer_head;
/* Zero out entries in the reverse order: this way we touch the
* cache line that producer might currently be reading the last;
* producer won't make progress and touch other cache lines
* besides the first one until we write out all entries.
*/
while (likely(head > r->consumer_tail))
r->queue[--head] = NULL;
r->consumer_tail = consumer_head;
}
/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
{
/* Fundamentally, what we want to do is update consumer
* index and zero out the entry so producer can reuse it.
* Doing it naively at each consume would be as simple as:
* consumer = r->consumer;
* r->queue[consumer++] = NULL;
* if (unlikely(consumer >= r->size))
* consumer = 0;
* r->consumer = consumer;
* but that is suboptimal when the ring is full as producer is writing
* out new entries in the same cache line. Defer these updates until a
* batch of entries has been consumed.
*/
/* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
* to work correctly.
*/
int consumer_head = r->consumer_head + 1;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
* We also do this when we reach end of the ring - not mandatory
* but helps keep the implementation simple.
*/
if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
consumer_head >= r->size))
__ptr_ring_zero_tail(r, consumer_head);
if (unlikely(consumer_head >= r->size)) {
consumer_head = 0;
r->consumer_tail = 0;
}
/* matching READ_ONCE in __ptr_ring_empty for lockless tests */
WRITE_ONCE(r->consumer_head, consumer_head);
}
static inline void *__ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;
/* The READ_ONCE in __ptr_ring_peek guarantees that anyone
* accessing data through the pointer is up to date. Pairs
* with smp_wmb in __ptr_ring_produce.
*/
ptr = __ptr_ring_peek(r);
if (ptr)
__ptr_ring_discard_one(r);
return ptr;
}
static inline int __ptr_ring_consume_batched(struct ptr_ring *r,
void **array, int n)
{
void *ptr;
int i;
for (i = 0; i < n; i++) {
ptr = __ptr_ring_consume(r);
if (!ptr)
break;
array[i] = ptr;
}
return i;
}
/*
* Note: resize (below) nests producer lock within consumer lock, so if you
* call this in interrupt or BH context, you must disable interrupts/BH when
* producing.
*/
static inline void *ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;
spin_lock(&r->consumer_lock);
ptr = __ptr_ring_consume(r);
spin_unlock(&r->consumer_lock);
return ptr;
}
static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
{
void *ptr;
spin_lock_irq(&r->consumer_lock);
ptr = __ptr_ring_consume(r);
spin_unlock_irq(&r->consumer_lock);
return ptr;
}
static inline void *ptr_ring_consume_any(struct ptr_ring *r)
{
unsigned long flags;
void *ptr;
spin_lock_irqsave(&r->consumer_lock, flags);
ptr = __ptr_ring_consume(r);
spin_unlock_irqrestore(&r->consumer_lock, flags);
return ptr;
}
static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
{
void *ptr;
spin_lock_bh(&r->consumer_lock);
ptr = __ptr_ring_consume(r);
spin_unlock_bh(&r->consumer_lock);
return ptr;
}
static inline int ptr_ring_consume_batched(struct ptr_ring *r,
void **array, int n)
{
int ret;
spin_lock(&r->consumer_lock);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock(&r->consumer_lock);
return ret;
}
static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r,
void **array, int n)
{
int ret;
spin_lock_irq(&r->consumer_lock);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock_irq(&r->consumer_lock);
return ret;
}
static inline int ptr_ring_consume_batched_any(struct ptr_ring *r,
void **array, int n)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&r->consumer_lock, flags);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock_irqrestore(&r->consumer_lock, flags);
return ret;
}
static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
void **array, int n)
{
int ret;
spin_lock_bh(&r->consumer_lock);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock_bh(&r->consumer_lock);
return ret;
}
/* Cast to structure type and call a function without discarding from FIFO.
* Function must return a value.
* Callers must take consumer_lock.
*/
#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
#define PTR_RING_PEEK_CALL(r, f) ({ \
typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
\
spin_lock(&(r)->consumer_lock); \
__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
spin_unlock(&(r)->consumer_lock); \
__PTR_RING_PEEK_CALL_v; \
})
#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
\
spin_lock_irq(&(r)->consumer_lock); \
__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
spin_unlock_irq(&(r)->consumer_lock); \
__PTR_RING_PEEK_CALL_v; \
})
#define PTR_RING_PEEK_CALL_BH(r, f) ({ \
typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
\
spin_lock_bh(&(r)->consumer_lock); \
__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
spin_unlock_bh(&(r)->consumer_lock); \
__PTR_RING_PEEK_CALL_v; \
})
#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
unsigned long __PTR_RING_PEEK_CALL_f;\
\
spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
__PTR_RING_PEEK_CALL_v; \
})
/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
* documentation for vmalloc for which of them are legal.
*/
static inline void **__ptr_ring_init_queue_alloc_noprof(unsigned int size, gfp_t gfp)
{
if (size > KMALLOC_MAX_SIZE / sizeof(void *))
return NULL;
return kvmalloc_array_noprof(size, sizeof(void *), gfp | __GFP_ZERO);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
{
r->size = size;
r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
/* We need to set batch at least to 1 to make logic
* in __ptr_ring_discard_one work correctly.
* Batching too much (because ring is small) would cause a lot of
* burstiness. Needs tuning, for now disable batching.
*/
if (r->batch > r->size / 2 || !r->batch)
r->batch = 1;
}
static inline int ptr_ring_init_noprof(struct ptr_ring *r, int size, gfp_t gfp)
{
r->queue = __ptr_ring_init_queue_alloc_noprof(size, gfp);
if (!r->queue)
return -ENOMEM;
__ptr_ring_set_size(r, size);
r->producer = r->consumer_head = r->consumer_tail = 0;
spin_lock_init(&r->producer_lock);
spin_lock_init(&r->consumer_lock);
return 0;
}
#define ptr_ring_init(...) alloc_hooks(ptr_ring_init_noprof(__VA_ARGS__))
/*
* Return entries into ring. Destroy entries that don't fit.
*
* Note: this is expected to be a rare slow path operation.
*
* Note: producer lock is nested within consumer lock, so if you
* resize you must make sure all uses nest correctly.
* In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so.
*/
static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
void (*destroy)(void *))
{
unsigned long flags;
spin_lock_irqsave(&r->consumer_lock, flags);
spin_lock(&r->producer_lock);
if (!r->size)
goto done;
/*
* Clean out buffered entries (for simplicity). This way following code
* can test entries for NULL and if not assume they are valid.
*/
__ptr_ring_zero_tail(r, r->consumer_head);
/*
* Go over entries in batch, start moving head back and copy entries.
* Stop when we run into previously unconsumed entries.
*/
while (n) {
int head = r->consumer_head - 1;
if (head < 0)
head = r->size - 1;
if (r->queue[head]) {
/* This batch entry will have to be destroyed. */
goto done;
}
r->queue[head] = batch[--n];
r->consumer_tail = head;
/* matching READ_ONCE in __ptr_ring_empty for lockless tests */
WRITE_ONCE(r->consumer_head, head);
}
done:
/* Destroy all entries left in the batch. */
while (n)
destroy(batch[--n]);
spin_unlock(&r->producer_lock);
spin_unlock_irqrestore(&r->consumer_lock, flags);
}
static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
int size, gfp_t gfp,
void (*destroy)(void *))
{
int producer = 0;
void **old;
void *ptr;
while ((ptr = __ptr_ring_consume(r)))
if (producer < size)
queue[producer++] = ptr;
else if (destroy)
destroy(ptr);
if (producer >= size)
producer = 0;
__ptr_ring_set_size(r, size);
r->producer = producer;
r->consumer_head = 0;
r->consumer_tail = 0;
old = r->queue;
r->queue = queue;
return old;
}
/*
* Note: producer lock is nested within consumer lock, so if you
* resize you must make sure all uses nest correctly.
* In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so.
*/
static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp,
void (*destroy)(void *))
{
unsigned long flags;
void **queue = __ptr_ring_init_queue_alloc_noprof(size, gfp);
void **old;
if (!queue)
return -ENOMEM;
spin_lock_irqsave(&(r)->consumer_lock, flags);
spin_lock(&(r)->producer_lock);
old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
spin_unlock(&(r)->producer_lock);
spin_unlock_irqrestore(&(r)->consumer_lock, flags);
kvfree(old);
return 0;
}
#define ptr_ring_resize(...) alloc_hooks(ptr_ring_resize_noprof(__VA_ARGS__))
/*
* Note: producer lock is nested within consumer lock, so if you
* resize you must make sure all uses nest correctly.
* In particular if you consume ring in BH context, you must
* disable BH when doing so.
*/
static inline int ptr_ring_resize_multiple_bh_noprof(struct ptr_ring **rings,
unsigned int nrings,
int size, gfp_t gfp,
void (*destroy)(void *))
{
void ***queues;
int i;
queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp);
if (!queues)
goto noqueues; for (i = 0; i < nrings; ++i) { queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp);
if (!queues[i])
goto nomem;
}
for (i = 0; i < nrings; ++i) {
spin_lock_bh(&(rings[i])->consumer_lock);
spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
spin_unlock(&(rings[i])->producer_lock);
spin_unlock_bh(&(rings[i])->consumer_lock);
}
for (i = 0; i < nrings; ++i) kvfree(queues[i]);
kfree(queues);
return 0;
nomem:
while (--i >= 0) kvfree(queues[i]); kfree(queues);
noqueues:
return -ENOMEM;
}
#define ptr_ring_resize_multiple_bh(...) \
alloc_hooks(ptr_ring_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
{
void *ptr;
if (destroy)
while ((ptr = ptr_ring_consume(r)))
destroy(ptr);
kvfree(r->queue);
}
#endif /* _LINUX_PTR_RING_H */
// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/swap_state.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie
*
* Rewritten to use page cache, (C) 1998 Stephen Tweedie
*/
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/migrate.h>
#include <linux/vmalloc.h>
#include <linux/huge_mm.h>
#include <linux/shmem_fs.h>
#include "internal.h"
#include "swap_table.h"
#include "swap.h"
/*
* swapper_space is a fiction, retained to simplify the path through
* vmscan's shrink_folio_list.
*/
static const struct address_space_operations swap_aops = {
.dirty_folio = noop_dirty_folio,
#ifdef CONFIG_MIGRATION
.migrate_folio = migrate_folio,
#endif
};
/* Set swap_space as read only as swap cache is handled by swap table */
struct address_space swap_space __ro_after_init = {
.a_ops = &swap_aops,
};
static bool enable_vma_readahead __read_mostly = true;
#define SWAP_RA_ORDER_CEILING 5
#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
#define SWAP_RA_VAL(addr, win, hits) \
(((addr) & PAGE_MASK) | \
(((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
((hits) & SWAP_RA_HITS_MASK))
/* Initial readahead hits is 4 to start up with a small window */
#define GET_SWAP_RA_VAL(vma) \
(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
printk("Total swap = %lukB\n", K(total_swap_pages));
}
/**
* swap_cache_get_folio - Looks up a folio in the swap cache.
* @entry: swap entry used for the lookup.
*
* A found folio will be returned unlocked and with its refcount increased.
*
* Context: Caller must ensure @entry is valid and protect the swap device
* with reference count or locks.
* Return: Returns the found folio on success, NULL otherwise. The caller
* must lock nd check if the folio still matches the swap entry before
* use (e.g., folio_matches_swap_entry).
*/
struct folio *swap_cache_get_folio(swp_entry_t entry)
{
unsigned long swp_tb;
struct folio *folio;
for (;;) {
swp_tb = swap_table_get(__swap_entry_to_cluster(entry),
swp_cluster_offset(entry));
if (!swp_tb_is_folio(swp_tb))
return NULL;
folio = swp_tb_to_folio(swp_tb);
if (likely(folio_try_get(folio)))
return folio;
}
return NULL;
}
/**
* swap_cache_get_shadow - Looks up a shadow in the swap cache.
* @entry: swap entry used for the lookup.
*
* Context: Caller must ensure @entry is valid and protect the swap device
* with reference count or locks.
* Return: Returns either NULL or an XA_VALUE (shadow).
*/
void *swap_cache_get_shadow(swp_entry_t entry)
{
unsigned long swp_tb;
swp_tb = swap_table_get(__swap_entry_to_cluster(entry),
swp_cluster_offset(entry));
if (swp_tb_is_shadow(swp_tb))
return swp_tb_to_shadow(swp_tb);
return NULL;
}
/**
* swap_cache_add_folio - Add a folio into the swap cache.
* @folio: The folio to be added.
* @entry: The swap entry corresponding to the folio.
* @gfp: gfp_mask for XArray node allocation.
* @shadowp: If a shadow is found, return the shadow.
*
* Context: Caller must ensure @entry is valid and protect the swap device
* with reference count or locks.
* The caller also needs to update the corresponding swap_map slots with
* SWAP_HAS_CACHE bit to avoid race or conflict.
*/
void swap_cache_add_folio(struct folio *folio, swp_entry_t entry, void **shadowp)
{
void *shadow = NULL;
unsigned long old_tb, new_tb;
struct swap_cluster_info *ci;
unsigned int ci_start, ci_off, ci_end;
unsigned long nr_pages = folio_nr_pages(folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_swapbacked(folio), folio);
new_tb = folio_to_swp_tb(folio);
ci_start = swp_cluster_offset(entry);
ci_end = ci_start + nr_pages;
ci_off = ci_start;
ci = swap_cluster_lock(__swap_entry_to_info(entry), swp_offset(entry));
do {
old_tb = __swap_table_xchg(ci, ci_off, new_tb);
WARN_ON_ONCE(swp_tb_is_folio(old_tb));
if (swp_tb_is_shadow(old_tb))
shadow = swp_tb_to_shadow(old_tb);
} while (++ci_off < ci_end);
folio_ref_add(folio, nr_pages);
folio_set_swapcache(folio);
folio->swap = entry;
swap_cluster_unlock(ci);
node_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr_pages);
if (shadowp)
*shadowp = shadow;
}
/**
* __swap_cache_del_folio - Removes a folio from the swap cache.
* @ci: The locked swap cluster.
* @folio: The folio.
* @entry: The first swap entry that the folio corresponds to.
* @shadow: shadow value to be filled in the swap cache.
*
* Removes a folio from the swap cache and fills a shadow in place.
* This won't put the folio's refcount. The caller has to do that.
*
* Context: Caller must ensure the folio is locked and in the swap cache
* using the index of @entry, and lock the cluster that holds the entries.
*/
void __swap_cache_del_folio(struct swap_cluster_info *ci, struct folio *folio,
swp_entry_t entry, void *shadow)
{
unsigned long old_tb, new_tb;
unsigned int ci_start, ci_off, ci_end;
unsigned long nr_pages = folio_nr_pages(folio);
VM_WARN_ON_ONCE(__swap_entry_to_cluster(entry) != ci);
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_writeback(folio), folio);
new_tb = shadow_swp_to_tb(shadow);
ci_start = swp_cluster_offset(entry);
ci_end = ci_start + nr_pages;
ci_off = ci_start;
do {
/* If shadow is NULL, we sets an empty shadow */
old_tb = __swap_table_xchg(ci, ci_off, new_tb);
WARN_ON_ONCE(!swp_tb_is_folio(old_tb) ||
swp_tb_to_folio(old_tb) != folio);
} while (++ci_off < ci_end);
folio->swap.val = 0;
folio_clear_swapcache(folio);
node_stat_mod_folio(folio, NR_FILE_PAGES, -nr_pages);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr_pages);
}
/**
* swap_cache_del_folio - Removes a folio from the swap cache.
* @folio: The folio.
*
* Same as __swap_cache_del_folio, but handles lock and refcount. The
* caller must ensure the folio is either clean or has a swap count
* equal to zero, or it may cause data loss.
*
* Context: Caller must ensure the folio is locked and in the swap cache.
*/
void swap_cache_del_folio(struct folio *folio)
{
struct swap_cluster_info *ci;
swp_entry_t entry = folio->swap;
ci = swap_cluster_lock(__swap_entry_to_info(entry), swp_offset(entry));
__swap_cache_del_folio(ci, folio, entry, NULL);
swap_cluster_unlock(ci);
put_swap_folio(folio, entry);
folio_ref_sub(folio, folio_nr_pages(folio));
}
/**
* __swap_cache_replace_folio - Replace a folio in the swap cache.
* @ci: The locked swap cluster.
* @old: The old folio to be replaced.
* @new: The new folio.
*
* Replace an existing folio in the swap cache with a new folio. The
* caller is responsible for setting up the new folio's flag and swap
* entries. Replacement will take the new folio's swap entry value as
* the starting offset to override all slots covered by the new folio.
*
* Context: Caller must ensure both folios are locked, and lock the
* cluster that holds the old folio to be replaced.
*/
void __swap_cache_replace_folio(struct swap_cluster_info *ci,
struct folio *old, struct folio *new)
{
swp_entry_t entry = new->swap;
unsigned long nr_pages = folio_nr_pages(new);
unsigned int ci_off = swp_cluster_offset(entry);
unsigned int ci_end = ci_off + nr_pages;
unsigned long old_tb, new_tb;
VM_WARN_ON_ONCE(!folio_test_swapcache(old) || !folio_test_swapcache(new));
VM_WARN_ON_ONCE(!folio_test_locked(old) || !folio_test_locked(new));
VM_WARN_ON_ONCE(!entry.val);
/* Swap cache still stores N entries instead of a high-order entry */
new_tb = folio_to_swp_tb(new);
do {
old_tb = __swap_table_xchg(ci, ci_off, new_tb);
WARN_ON_ONCE(!swp_tb_is_folio(old_tb) || swp_tb_to_folio(old_tb) != old);
} while (++ci_off < ci_end);
/*
* If the old folio is partially replaced (e.g., splitting a large
* folio, the old folio is shrunk, and new split sub folios replace
* the shrunk part), ensure the new folio doesn't overlap it.
*/
if (IS_ENABLED(CONFIG_DEBUG_VM) &&
folio_order(old) != folio_order(new)) {
ci_off = swp_cluster_offset(old->swap);
ci_end = ci_off + folio_nr_pages(old);
while (ci_off++ < ci_end)
WARN_ON_ONCE(swp_tb_to_folio(__swap_table_get(ci, ci_off)) != old);
}
}
/**
* swap_cache_clear_shadow - Clears a set of shadows in the swap cache.
* @entry: The starting index entry.
* @nr_ents: How many slots need to be cleared.
*
* Context: Caller must ensure the range is valid, all in one single cluster,
* not occupied by any folio, and lock the cluster.
*/
void __swap_cache_clear_shadow(swp_entry_t entry, int nr_ents)
{
struct swap_cluster_info *ci = __swap_entry_to_cluster(entry);
unsigned int ci_off = swp_cluster_offset(entry), ci_end;
unsigned long old;
ci_end = ci_off + nr_ents;
do {
old = __swap_table_xchg(ci, ci_off, null_to_swp_tb());
WARN_ON_ONCE(swp_tb_is_folio(old));
} while (++ci_off < ci_end);
}
/*
* If we are the only user, then try to free up the swap cache.
*
* Its ok to check the swapcache flag without the folio lock
* here because we are going to recheck again inside
* folio_free_swap() _with_ the lock.
* - Marcelo
*/
void free_swap_cache(struct folio *folio)
{ if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
folio_trylock(folio)) {
folio_free_swap(folio);
folio_unlock(folio);
}
}
/*
* Freeing a folio and also freeing any swap cache associated with
* this folio if it is the last user.
*/
void free_folio_and_swap_cache(struct folio *folio)
{
free_swap_cache(folio);
if (!is_huge_zero_folio(folio))
folio_put(folio);
}
/*
* Passed an array of pages, drop them all from swapcache and then release
* them. They are removed from the LRU and freed if this is their last use.
*/
void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
{
struct folio_batch folios;
unsigned int refs[PAGEVEC_SIZE];
folio_batch_init(&folios);
for (int i = 0; i < nr; i++) {
struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
free_swap_cache(folio);
refs[folios.nr] = 1;
if (unlikely(encoded_page_flags(pages[i]) &
ENCODED_PAGE_BIT_NR_PAGES_NEXT))
refs[folios.nr] = encoded_nr_pages(pages[++i]);
if (folio_batch_add(&folios, folio) == 0)
folios_put_refs(&folios, refs);
}
if (folios.nr)
folios_put_refs(&folios, refs);
}
static inline bool swap_use_vma_readahead(void)
{
return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
}
/**
* swap_update_readahead - Update the readahead statistics of VMA or globally.
* @folio: the swap cache folio that just got hit.
* @vma: the VMA that should be updated, could be NULL for global update.
* @addr: the addr that triggered the swapin, ignored if @vma is NULL.
*/
void swap_update_readahead(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr)
{
bool readahead, vma_ra = swap_use_vma_readahead();
/*
* At the moment, we don't support PG_readahead for anon THP
* so let's bail out rather than confusing the readahead stat.
*/
if (unlikely(folio_test_large(folio)))
return;
readahead = folio_test_clear_readahead(folio);
if (vma && vma_ra) {
unsigned long ra_val;
int win, hits;
ra_val = GET_SWAP_RA_VAL(vma);
win = SWAP_RA_WIN(ra_val);
hits = SWAP_RA_HITS(ra_val);
if (readahead)
hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
atomic_long_set(&vma->swap_readahead_info,
SWAP_RA_VAL(addr, win, hits));
}
if (readahead) {
count_vm_event(SWAP_RA_HIT);
if (!vma || !vma_ra)
atomic_inc(&swapin_readahead_hits);
}
}
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists)
{
struct swap_info_struct *si = __swap_entry_to_info(entry);
struct folio *folio;
struct folio *new_folio = NULL;
struct folio *result = NULL;
void *shadow = NULL;
*new_page_allocated = false;
for (;;) {
int err;
/*
* Check the swap cache first, if a cached folio is found,
* return it unlocked. The caller will lock and check it.
*/
folio = swap_cache_get_folio(entry);
if (folio)
goto got_folio;
/*
* Just skip read ahead for unused swap slot.
*/
if (!swap_entry_swapped(si, entry))
goto put_and_return;
/*
* Get a new folio to read into from swap. Allocate it now if
* new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
* when -EEXIST will cause any racers to loop around until we
* add it to cache.
*/
if (!new_folio) {
new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
if (!new_folio)
goto put_and_return;
}
/*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry, 1);
if (!err)
break;
else if (err != -EEXIST)
goto put_and_return;
/*
* Protect against a recursive call to __read_swap_cache_async()
* on the same entry waiting forever here because SWAP_HAS_CACHE
* is set but the folio is not the swap cache yet. This can
* happen today if mem_cgroup_swapin_charge_folio() below
* triggers reclaim through zswap, which may call
* __read_swap_cache_async() in the writeback path.
*/
if (skip_if_exists)
goto put_and_return;
/*
* We might race against __swap_cache_del_folio(), and
* stumble across a swap_map entry whose SWAP_HAS_CACHE
* has not yet been cleared. Or race against another
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
* in swap_map, but not yet added its folio to swap cache.
*/
schedule_timeout_uninterruptible(1);
}
/*
* The swap entry is ours to swap in. Prepare the new folio.
*/
__folio_set_locked(new_folio);
__folio_set_swapbacked(new_folio);
if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
goto fail_unlock;
swap_cache_add_folio(new_folio, entry, &shadow);
memcg1_swapin(entry, 1);
if (shadow)
workingset_refault(new_folio, shadow);
/* Caller will initiate read into locked new_folio */
folio_add_lru(new_folio);
*new_page_allocated = true;
folio = new_folio;
got_folio:
result = folio;
goto put_and_return;
fail_unlock:
put_swap_folio(new_folio, entry);
folio_unlock(new_folio);
put_and_return:
if (!(*new_page_allocated) && new_folio)
folio_put(new_folio);
return result;
}
/*
* Locate a page of swap in physical memory, reserving swap cache space
* and reading the disk if it is not already cached.
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*
* get/put_swap_device() aren't needed to call this function, because
* __read_swap_cache_async() call them and swap_read_folio() holds the
* swap cache folio lock.
*/
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug)
{
struct swap_info_struct *si;
bool page_allocated;
struct mempolicy *mpol;
pgoff_t ilx;
struct folio *folio;
si = get_swap_device(entry);
if (!si)
return NULL;
mpol = get_vma_policy(vma, addr, 0, &ilx);
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
mpol_cond_put(mpol);
if (page_allocated)
swap_read_folio(folio, plug);
put_swap_device(si);
return folio;
}
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
unsigned long offset,
int hits,
int max_pages,
int prev_win)
{
unsigned int pages, last_ra;
/*
* This heuristic has been found to work well on both sequential and
* random loads, swapping to hard disk or to SSD: please don't ask
* what the "+ 2" means, it just happens to work well, that's all.
*/
pages = hits + 2;
if (pages == 2) {
/*
* We can have no readahead hits to judge by: but must not get
* stuck here forever, so check for an adjacent offset instead
* (and don't even bother to check whether swap type is same).
*/
if (offset != prev_offset + 1 && offset != prev_offset - 1)
pages = 1;
} else {
unsigned int roundup = 4;
while (roundup < pages)
roundup <<= 1;
pages = roundup;
}
if (pages > max_pages)
pages = max_pages;
/* Don't shrink readahead too fast */
last_ra = prev_win / 2;
if (pages < last_ra)
pages = last_ra;
return pages;
}
static unsigned long swapin_nr_pages(unsigned long offset)
{
static unsigned long prev_offset;
unsigned int hits, pages, max_pages;
static atomic_t last_readahead_pages;
max_pages = 1 << READ_ONCE(page_cluster);
if (max_pages <= 1)
return 1;
hits = atomic_xchg(&swapin_readahead_hits, 0);
pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
max_pages,
atomic_read(&last_readahead_pages));
if (!hits)
WRITE_ONCE(prev_offset, offset);
atomic_set(&last_readahead_pages, pages);
return pages;
}
/**
* swap_cluster_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @gfp_mask: memory allocation flags
* @mpol: NUMA memory allocation policy to be applied
* @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
*
* Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones...
*
* Note: it is intentional that the same NUMA policy and interleave index
* are used for every page of the readahead: neighbouring pages on swap
* are fairly likely to have been swapped out from the same node.
*/
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx)
{
struct folio *folio;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
unsigned long mask;
struct swap_info_struct *si = __swap_entry_to_info(entry);
struct blk_plug plug;
struct swap_iocb *splug = NULL;
bool page_allocated;
mask = swapin_nr_pages(offset) - 1;
if (!mask)
goto skip;
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
end_offset = offset | mask;
if (!start_offset) /* First page is swap header. */
start_offset++;
if (end_offset >= si->max)
end_offset = si->max - 1;
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
folio = __read_swap_cache_async(
swp_entry(swp_type(entry), offset),
gfp_mask, mpol, ilx, &page_allocated, false);
if (!folio)
continue;
if (page_allocated) {
swap_read_folio(folio, &splug);
if (offset != entry_offset) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
folio_put(folio);
}
blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
/* The page was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
return folio;
}
static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
unsigned long *end)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long ra_val;
unsigned long faddr, prev_faddr, left, right;
unsigned int max_win, hits, prev_win, win;
max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
if (max_win == 1)
return 1;
faddr = vmf->address;
ra_val = GET_SWAP_RA_VAL(vma);
prev_faddr = SWAP_RA_ADDR(ra_val);
prev_win = SWAP_RA_WIN(ra_val);
hits = SWAP_RA_HITS(ra_val);
win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
max_win, prev_win);
atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
if (win == 1)
return 1;
if (faddr == prev_faddr + PAGE_SIZE)
left = faddr;
else if (prev_faddr == faddr + PAGE_SIZE)
left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
else
left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
right = left + (win << PAGE_SHIFT);
if ((long)left < 0)
left = 0;
*start = max3(left, vma->vm_start, faddr & PMD_MASK);
*end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
return win;
}
/**
* swap_vma_readahead - swap in pages in hope we need them soon
* @targ_entry: swap entry of the targeted memory
* @gfp_mask: memory allocation flags
* @mpol: NUMA memory allocation policy to be applied
* @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
* @vmf: fault information
*
* Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read in a few pages whose
* virtual addresses are around the fault address in the same vma.
*
* Caller must hold read mmap_lock if vmf->vma is not NULL.
*
*/
static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
{
struct blk_plug plug;
struct swap_iocb *splug = NULL;
struct folio *folio;
pte_t *pte = NULL, pentry;
int win;
unsigned long start, end, addr;
swp_entry_t entry;
pgoff_t ilx;
bool page_allocated;
win = swap_vma_ra_win(vmf, &start, &end);
if (win == 1)
goto skip;
ilx = targ_ilx - PFN_DOWN(vmf->address - start);
blk_start_plug(&plug);
for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
struct swap_info_struct *si = NULL;
if (!pte++) {
pte = pte_offset_map(vmf->pmd, addr);
if (!pte)
break;
}
pentry = ptep_get_lockless(pte);
if (!is_swap_pte(pentry))
continue;
entry = pte_to_swp_entry(pentry);
if (unlikely(non_swap_entry(entry)))
continue;
pte_unmap(pte);
pte = NULL;
/*
* Readahead entry may come from a device that we are not
* holding a reference to, try to grab a reference, or skip.
*/
if (swp_type(entry) != swp_type(targ_entry)) {
si = get_swap_device(entry);
if (!si)
continue;
}
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
if (si)
put_swap_device(si);
if (!folio)
continue;
if (page_allocated) {
swap_read_folio(folio, &splug);
if (addr != vmf->address) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
folio_put(folio);
}
if (pte)
pte_unmap(pte);
blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain();
skip:
/* The folio was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
return folio;
}
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @gfp_mask: memory allocation flags
* @vmf: fault information
*
* Returns the struct folio for entry and addr, after queueing swapin.
*
* It's a main entry function for swap readahead. By the configuration,
* it will read ahead blocks by cluster-based(ie, physical disk based)
* or vma-based(ie, virtual address based on faulty address) readahead.
*/
struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct mempolicy *mpol;
pgoff_t ilx;
struct folio *folio;
mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
folio = swap_use_vma_readahead() ?
swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
mpol_cond_put(mpol);
return folio;
}
#ifdef CONFIG_SYSFS
static ssize_t vma_ra_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead));
}
static ssize_t vma_ra_enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t ret;
ret = kstrtobool(buf, &enable_vma_readahead);
if (ret)
return ret;
return count;
}
static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
static struct attribute *swap_attrs[] = {
&vma_ra_enabled_attr.attr,
NULL,
};
static const struct attribute_group swap_attr_group = {
.attrs = swap_attrs,
};
static int __init swap_init(void)
{
int err;
struct kobject *swap_kobj;
swap_kobj = kobject_create_and_add("swap", mm_kobj);
if (!swap_kobj) {
pr_err("failed to create swap kobject\n");
return -ENOMEM;
}
err = sysfs_create_group(swap_kobj, &swap_attr_group);
if (err) {
pr_err("failed to register swap group\n");
goto delete_obj;
}
/* Swap cache writeback is LRU based, no tags for it */
mapping_set_no_writeback_tags(&swap_space);
return 0;
delete_obj:
kobject_put(swap_kobj);
return err;
}
subsys_initcall(swap_init);
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* linux/kernel/seccomp.c
*
* Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
*
* Copyright (C) 2012 Google, Inc.
* Will Drewry <wad@chromium.org>
*
* This defines a simple but solid secure-computing facility.
*
* Mode 1 uses a fixed list of allowed system calls.
* Mode 2 allows user-defined system call filters in the form
* of Berkeley Packet Filters/Linux Socket Filters.
*/
#define pr_fmt(fmt) "seccomp: " fmt
#include <linux/refcount.h>
#include <linux/audit.h>
#include <linux/compat.h>
#include <linux/coredump.h>
#include <linux/kmemleak.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/seccomp.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <asm/syscall.h>
/* Not exposed in headers: strictly internal use only. */
#define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
#ifdef CONFIG_SECCOMP_FILTER
#include <linux/file.h>
#include <linux/filter.h>
#include <linux/pid.h>
#include <linux/ptrace.h>
#include <linux/capability.h>
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
#include <linux/lockdep.h>
/*
* When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced, it had the
* wrong direction flag in the ioctl number. This is the broken one,
* which the kernel needs to keep supporting until all userspaces stop
* using the wrong command number.
*/
#define SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR SECCOMP_IOR(2, __u64)
enum notify_state {
SECCOMP_NOTIFY_INIT,
SECCOMP_NOTIFY_SENT,
SECCOMP_NOTIFY_REPLIED,
};
struct seccomp_knotif {
/* The struct pid of the task whose filter triggered the notification */
struct task_struct *task;
/* The "cookie" for this request; this is unique for this filter. */
u64 id;
/*
* The seccomp data. This pointer is valid the entire time this
* notification is active, since it comes from __seccomp_filter which
* eclipses the entire lifecycle here.
*/
const struct seccomp_data *data;
/*
* Notification states. When SECCOMP_RET_USER_NOTIF is returned, a
* struct seccomp_knotif is created and starts out in INIT. Once the
* handler reads the notification off of an FD, it transitions to SENT.
* If a signal is received the state transitions back to INIT and
* another message is sent. When the userspace handler replies, state
* transitions to REPLIED.
*/
enum notify_state state;
/* The return values, only valid when in SECCOMP_NOTIFY_REPLIED */
int error;
long val;
u32 flags;
/*
* Signals when this has changed states, such as the listener
* dying, a new seccomp addfd message, or changing to REPLIED
*/
struct completion ready;
struct list_head list;
/* outstanding addfd requests */
struct list_head addfd;
};
/**
* struct seccomp_kaddfd - container for seccomp_addfd ioctl messages
*
* @file: A reference to the file to install in the other task
* @fd: The fd number to install it at. If the fd number is -1, it means the
* installing process should allocate the fd as normal.
* @flags: The flags for the new file descriptor. At the moment, only O_CLOEXEC
* is allowed.
* @ioctl_flags: The flags used for the seccomp_addfd ioctl.
* @setfd: whether or not SECCOMP_ADDFD_FLAG_SETFD was set during notify_addfd
* @ret: The return value of the installing process. It is set to the fd num
* upon success (>= 0).
* @completion: Indicates that the installing process has completed fd
* installation, or gone away (either due to successful
* reply, or signal)
* @list: list_head for chaining seccomp_kaddfd together.
*
*/
struct seccomp_kaddfd {
struct file *file;
int fd;
unsigned int flags;
__u32 ioctl_flags;
union {
bool setfd;
/* To only be set on reply */
int ret;
};
struct completion completion;
struct list_head list;
};
/**
* struct notification - container for seccomp userspace notifications. Since
* most seccomp filters will not have notification listeners attached and this
* structure is fairly large, we store the notification-specific stuff in a
* separate structure.
*
* @requests: A semaphore that users of this notification can wait on for
* changes. Actual reads and writes are still controlled with
* filter->notify_lock.
* @flags: A set of SECCOMP_USER_NOTIF_FD_* flags.
* @next_id: The id of the next request.
* @notifications: A list of struct seccomp_knotif elements.
*/
struct notification {
atomic_t requests;
u32 flags;
u64 next_id;
struct list_head notifications;
};
#ifdef SECCOMP_ARCH_NATIVE
/**
* struct action_cache - per-filter cache of seccomp actions per
* arch/syscall pair
*
* @allow_native: A bitmap where each bit represents whether the
* filter will always allow the syscall, for the
* native architecture.
* @allow_compat: A bitmap where each bit represents whether the
* filter will always allow the syscall, for the
* compat architecture.
*/
struct action_cache {
DECLARE_BITMAP(allow_native, SECCOMP_ARCH_NATIVE_NR);
#ifdef SECCOMP_ARCH_COMPAT
DECLARE_BITMAP(allow_compat, SECCOMP_ARCH_COMPAT_NR);
#endif
};
#else
struct action_cache { };
static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter,
const struct seccomp_data *sd)
{
return false;
}
static inline void seccomp_cache_prepare(struct seccomp_filter *sfilter)
{
}
#endif /* SECCOMP_ARCH_NATIVE */
/**
* struct seccomp_filter - container for seccomp BPF programs
*
* @refs: Reference count to manage the object lifetime.
* A filter's reference count is incremented for each directly
* attached task, once for the dependent filter, and if
* requested for the user notifier. When @refs reaches zero,
* the filter can be freed.
* @users: A filter's @users count is incremented for each directly
* attached task (filter installation, fork(), thread_sync),
* and once for the dependent filter (tracked in filter->prev).
* When it reaches zero it indicates that no direct or indirect
* users of that filter exist. No new tasks can get associated with
* this filter after reaching 0. The @users count is always smaller
* or equal to @refs. Hence, reaching 0 for @users does not mean
* the filter can be freed.
* @cache: cache of arch/syscall mappings to actions
* @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
* @wait_killable_recv: Put notifying process in killable state once the
* notification is received by the userspace listener.
* @prev: points to a previously installed, or inherited, filter
* @prog: the BPF program to evaluate
* @notif: the struct that holds all notification related information
* @notify_lock: A lock for all notification-related accesses.
* @wqh: A wait queue for poll if a notifier is in use.
*
* seccomp_filter objects are organized in a tree linked via the @prev
* pointer. For any task, it appears to be a singly-linked list starting
* with current->seccomp.filter, the most recently attached or inherited filter.
* However, multiple filters may share a @prev node, by way of fork(), which
* results in a unidirectional tree existing in memory. This is similar to
* how namespaces work.
*
* seccomp_filter objects should never be modified after being attached
* to a task_struct (other than @refs).
*/
struct seccomp_filter {
refcount_t refs;
refcount_t users;
bool log;
bool wait_killable_recv;
struct action_cache cache;
struct seccomp_filter *prev;
struct bpf_prog *prog;
struct notification *notif;
struct mutex notify_lock;
wait_queue_head_t wqh;
};
/* Limit any path through the tree to 256KB worth of instructions. */
#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
/*
* Endianness is explicitly ignored and left for BPF program authors to manage
* as per the specific architecture.
*/
static void populate_seccomp_data(struct seccomp_data *sd)
{
/*
* Instead of using current_pt_reg(), we're already doing the work
* to safely fetch "current", so just use "task" everywhere below.
*/
struct task_struct *task = current;
struct pt_regs *regs = task_pt_regs(task);
unsigned long args[6];
sd->nr = syscall_get_nr(task, regs);
sd->arch = syscall_get_arch(task);
syscall_get_arguments(task, regs, args);
sd->args[0] = args[0];
sd->args[1] = args[1];
sd->args[2] = args[2];
sd->args[3] = args[3];
sd->args[4] = args[4];
sd->args[5] = args[5];
sd->instruction_pointer = KSTK_EIP(task);
}
/**
* seccomp_check_filter - verify seccomp filter code
* @filter: filter to verify
* @flen: length of filter
*
* Takes a previously checked filter (by bpf_check_classic) and
* redirects all filter code that loads struct sk_buff data
* and related data through seccomp_bpf_load. It also
* enforces length and alignment checking of those loads.
*
* Returns 0 if the rule set is legal or -EINVAL if not.
*/
static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
{
int pc;
for (pc = 0; pc < flen; pc++) {
struct sock_filter *ftest = &filter[pc];
u16 code = ftest->code;
u32 k = ftest->k;
switch (code) {
case BPF_LD | BPF_W | BPF_ABS:
ftest->code = BPF_LDX | BPF_W | BPF_ABS;
/* 32-bit aligned and not out of bounds. */
if (k >= sizeof(struct seccomp_data) || k & 3)
return -EINVAL;
continue;
case BPF_LD | BPF_W | BPF_LEN:
ftest->code = BPF_LD | BPF_IMM;
ftest->k = sizeof(struct seccomp_data);
continue;
case BPF_LDX | BPF_W | BPF_LEN:
ftest->code = BPF_LDX | BPF_IMM;
ftest->k = sizeof(struct seccomp_data);
continue;
/* Explicitly include allowed calls. */
case BPF_RET | BPF_K:
case BPF_RET | BPF_A:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_NEG:
case BPF_LD | BPF_IMM:
case BPF_LDX | BPF_IMM:
case BPF_MISC | BPF_TAX:
case BPF_MISC | BPF_TXA:
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
case BPF_ST:
case BPF_STX:
case BPF_JMP | BPF_JA:
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
continue;
default:
return -EINVAL;
}
}
return 0;
}
#ifdef SECCOMP_ARCH_NATIVE
static inline bool seccomp_cache_check_allow_bitmap(const void *bitmap,
size_t bitmap_size,
int syscall_nr)
{
if (unlikely(syscall_nr < 0 || syscall_nr >= bitmap_size))
return false;
syscall_nr = array_index_nospec(syscall_nr, bitmap_size);
return test_bit(syscall_nr, bitmap);
}
/**
* seccomp_cache_check_allow - lookup seccomp cache
* @sfilter: The seccomp filter
* @sd: The seccomp data to lookup the cache with
*
* Returns true if the seccomp_data is cached and allowed.
*/
static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter,
const struct seccomp_data *sd)
{
int syscall_nr = sd->nr;
const struct action_cache *cache = &sfilter->cache;
#ifndef SECCOMP_ARCH_COMPAT
/* A native-only architecture doesn't need to check sd->arch. */
return seccomp_cache_check_allow_bitmap(cache->allow_native,
SECCOMP_ARCH_NATIVE_NR,
syscall_nr);
#else
if (likely(sd->arch == SECCOMP_ARCH_NATIVE))
return seccomp_cache_check_allow_bitmap(cache->allow_native,
SECCOMP_ARCH_NATIVE_NR,
syscall_nr);
if (likely(sd->arch == SECCOMP_ARCH_COMPAT))
return seccomp_cache_check_allow_bitmap(cache->allow_compat,
SECCOMP_ARCH_COMPAT_NR,
syscall_nr);
#endif /* SECCOMP_ARCH_COMPAT */
WARN_ON_ONCE(true);
return false;
}
#endif /* SECCOMP_ARCH_NATIVE */
#define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
/**
* seccomp_run_filters - evaluates all seccomp filters against @sd
* @sd: optional seccomp data to be passed to filters
* @match: stores struct seccomp_filter that resulted in the return value,
* unless filter returned SECCOMP_RET_ALLOW, in which case it will
* be unchanged.
*
* Returns valid seccomp BPF response codes.
*/
static u32 seccomp_run_filters(const struct seccomp_data *sd,
struct seccomp_filter **match)
{
u32 ret = SECCOMP_RET_ALLOW;
/* Make sure cross-thread synced filter points somewhere sane. */
struct seccomp_filter *f =
READ_ONCE(current->seccomp.filter);
/* Ensure unexpected behavior doesn't result in failing open. */
if (WARN_ON(f == NULL))
return SECCOMP_RET_KILL_PROCESS;
if (seccomp_cache_check_allow(f, sd))
return SECCOMP_RET_ALLOW;
/*
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
*/
for (; f; f = f->prev) {
u32 cur_ret = bpf_prog_run_pin_on_cpu(f->prog, sd);
if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
ret = cur_ret;
*match = f;
}
}
return ret;
}
#endif /* CONFIG_SECCOMP_FILTER */
static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
{
assert_spin_locked(¤t->sighand->siglock);
if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
return false;
return true;
}
void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
static inline void seccomp_assign_mode(struct task_struct *task,
unsigned long seccomp_mode,
unsigned long flags)
{
assert_spin_locked(&task->sighand->siglock);
task->seccomp.mode = seccomp_mode;
/*
* Make sure SYSCALL_WORK_SECCOMP cannot be set before the mode (and
* filter) is set.
*/
smp_mb__before_atomic();
/* Assume default seccomp processes want spec flaw mitigation. */
if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
arch_seccomp_spec_mitigate(task);
set_task_syscall_work(task, SECCOMP);
}
#ifdef CONFIG_SECCOMP_FILTER
/* Returns 1 if the parent is an ancestor of the child. */
static int is_ancestor(struct seccomp_filter *parent,
struct seccomp_filter *child)
{
/* NULL is the root ancestor. */
if (parent == NULL)
return 1;
for (; child; child = child->prev)
if (child == parent)
return 1;
return 0;
}
/**
* seccomp_can_sync_threads: checks if all threads can be synchronized
*
* Expects sighand and cred_guard_mutex locks to be held.
*
* Returns 0 on success, -ve on error, or the pid of a thread which was
* either not in the correct seccomp mode or did not have an ancestral
* seccomp filter.
*/
static inline pid_t seccomp_can_sync_threads(void)
{
struct task_struct *thread, *caller;
BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
assert_spin_locked(¤t->sighand->siglock);
/* Validate all threads being eligible for synchronization. */
caller = current;
for_each_thread(caller, thread) {
pid_t failed;
/* Skip current, since it is initiating the sync. */
if (thread == caller)
continue;
/* Skip exited threads. */
if (thread->flags & PF_EXITING)
continue;
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
(thread->seccomp.mode == SECCOMP_MODE_FILTER &&
is_ancestor(thread->seccomp.filter,
caller->seccomp.filter)))
continue;
/* Return the first thread that cannot be synchronized. */
failed = task_pid_vnr(thread);
/* If the pid cannot be resolved, then return -ESRCH */
if (WARN_ON(failed == 0))
failed = -ESRCH;
return failed;
}
return 0;
}
static inline void seccomp_filter_free(struct seccomp_filter *filter)
{
if (filter) {
bpf_prog_destroy(filter->prog);
kfree(filter);
}
}
static void __seccomp_filter_orphan(struct seccomp_filter *orig)
{
while (orig && refcount_dec_and_test(&orig->users)) {
if (waitqueue_active(&orig->wqh))
wake_up_poll(&orig->wqh, EPOLLHUP);
orig = orig->prev;
}
}
static void __put_seccomp_filter(struct seccomp_filter *orig)
{
/* Clean up single-reference branches iteratively. */
while (orig && refcount_dec_and_test(&orig->refs)) {
struct seccomp_filter *freeme = orig;
orig = orig->prev;
seccomp_filter_free(freeme);
}
}
static void __seccomp_filter_release(struct seccomp_filter *orig)
{
/* Notify about any unused filters in the task's former filter tree. */
__seccomp_filter_orphan(orig);
/* Finally drop all references to the task's former tree. */
__put_seccomp_filter(orig);
}
/**
* seccomp_filter_release - Detach the task from its filter tree,
* drop its reference count, and notify
* about unused filters
*
* @tsk: task the filter should be released from.
*
* This function should only be called when the task is exiting as
* it detaches it from its filter tree. PF_EXITING has to be set
* for the task.
*/
void seccomp_filter_release(struct task_struct *tsk)
{
struct seccomp_filter *orig;
if (WARN_ON((tsk->flags & PF_EXITING) == 0))
return;
if (READ_ONCE(tsk->seccomp.filter) == NULL)
return;
spin_lock_irq(&tsk->sighand->siglock);
orig = tsk->seccomp.filter;
/* Detach task from its filter tree. */
tsk->seccomp.filter = NULL;
spin_unlock_irq(&tsk->sighand->siglock);
__seccomp_filter_release(orig);
}
/**
* seccomp_sync_threads: sets all threads to use current's filter
*
* @flags: SECCOMP_FILTER_FLAG_* flags to set during sync.
*
* Expects sighand and cred_guard_mutex locks to be held, and for
* seccomp_can_sync_threads() to have returned success already
* without dropping the locks.
*
*/
static inline void seccomp_sync_threads(unsigned long flags)
{
struct task_struct *thread, *caller;
BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
assert_spin_locked(¤t->sighand->siglock);
/*
* Don't touch any of the threads if the process is being killed.
* This allows for a lockless check in seccomp_filter_release.
*/
if (current->signal->flags & SIGNAL_GROUP_EXIT)
return;
/* Synchronize all threads. */
caller = current;
for_each_thread(caller, thread) {
/* Skip current, since it needs no changes. */
if (thread == caller)
continue;
/*
* Skip exited threads. seccomp_filter_release could have
* been already called for this task.
*/
if (thread->flags & PF_EXITING)
continue;
/* Get a task reference for the new leaf node. */
get_seccomp_filter(caller);
/*
* Drop the task reference to the shared ancestor since
* current's path will hold a reference. (This also
* allows a put before the assignment.)
*/
__seccomp_filter_release(thread->seccomp.filter);
/* Make our new filter tree visible. */
smp_store_release(&thread->seccomp.filter,
caller->seccomp.filter);
atomic_set(&thread->seccomp.filter_count,
atomic_read(&caller->seccomp.filter_count));
/*
* Don't let an unprivileged task work around
* the no_new_privs restriction by creating
* a thread that sets it up, enters seccomp,
* then dies.
*/
if (task_no_new_privs(caller))
task_set_no_new_privs(thread);
/*
* Opt the other thread into seccomp if needed.
* As threads are considered to be trust-realm
* equivalent (see ptrace_may_access), it is safe to
* allow one thread to transition the other.
*/
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
flags);
}
}
/**
* seccomp_prepare_filter: Prepares a seccomp filter for use.
* @fprog: BPF program to install
*
* Returns filter on success or an ERR_PTR on failure.
*/
static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
{
struct seccomp_filter *sfilter;
int ret;
const bool save_orig =
#if defined(CONFIG_CHECKPOINT_RESTORE) || defined(SECCOMP_ARCH_NATIVE)
true;
#else
false;
#endif
if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
return ERR_PTR(-EINVAL);
BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
/*
* Installing a seccomp filter requires that the task has
* CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
* This avoids scenarios where unprivileged tasks can affect the
* behavior of privileged children.
*/
if (!task_no_new_privs(current) &&
!ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/* Allocate a new seccomp_filter */
sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
if (!sfilter)
return ERR_PTR(-ENOMEM);
mutex_init(&sfilter->notify_lock);
ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
seccomp_check_filter, save_orig);
if (ret < 0) {
kfree(sfilter);
return ERR_PTR(ret);
}
refcount_set(&sfilter->refs, 1);
refcount_set(&sfilter->users, 1);
init_waitqueue_head(&sfilter->wqh);
return sfilter;
}
/**
* seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
* @user_filter: pointer to the user data containing a sock_fprog.
*
* Returns 0 on success and non-zero otherwise.
*/
static struct seccomp_filter *
seccomp_prepare_user_filter(const char __user *user_filter)
{
struct sock_fprog fprog;
struct seccomp_filter *filter = ERR_PTR(-EFAULT);
#ifdef CONFIG_COMPAT
if (in_compat_syscall()) {
struct compat_sock_fprog fprog32;
if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
goto out;
fprog.len = fprog32.len;
fprog.filter = compat_ptr(fprog32.filter);
} else /* falls through to the if below. */
#endif
if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
goto out;
filter = seccomp_prepare_filter(&fprog);
out:
return filter;
}
#ifdef SECCOMP_ARCH_NATIVE
static bool seccomp_uprobe_exception(struct seccomp_data *sd)
{
#if defined __NR_uretprobe || defined __NR_uprobe
#ifdef SECCOMP_ARCH_COMPAT
if (sd->arch == SECCOMP_ARCH_NATIVE)
#endif
{
#ifdef __NR_uretprobe
if (sd->nr == __NR_uretprobe)
return true;
#endif
#ifdef __NR_uprobe
if (sd->nr == __NR_uprobe)
return true;
#endif
}
#endif
return false;
}
/**
* seccomp_is_const_allow - check if filter is constant allow with given data
* @fprog: The BPF programs
* @sd: The seccomp data to check against, only syscall number and arch
* number are considered constant.
*/
static bool seccomp_is_const_allow(struct sock_fprog_kern *fprog,
struct seccomp_data *sd)
{
unsigned int reg_value = 0;
unsigned int pc;
bool op_res;
if (WARN_ON_ONCE(!fprog))
return false;
/* Our single exception to filtering. */
if (seccomp_uprobe_exception(sd))
return true;
for (pc = 0; pc < fprog->len; pc++) {
struct sock_filter *insn = &fprog->filter[pc];
u16 code = insn->code;
u32 k = insn->k;
switch (code) {
case BPF_LD | BPF_W | BPF_ABS:
switch (k) {
case offsetof(struct seccomp_data, nr):
reg_value = sd->nr;
break;
case offsetof(struct seccomp_data, arch):
reg_value = sd->arch;
break;
default:
/* can't optimize (non-constant value load) */
return false;
}
break;
case BPF_RET | BPF_K:
/* reached return with constant values only, check allow */
return k == SECCOMP_RET_ALLOW;
case BPF_JMP | BPF_JA:
pc += insn->k;
break;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JSET | BPF_K:
switch (BPF_OP(code)) {
case BPF_JEQ:
op_res = reg_value == k;
break;
case BPF_JGE:
op_res = reg_value >= k;
break;
case BPF_JGT:
op_res = reg_value > k;
break;
case BPF_JSET:
op_res = !!(reg_value & k);
break;
default:
/* can't optimize (unknown jump) */
return false;
}
pc += op_res ? insn->jt : insn->jf;
break;
case BPF_ALU | BPF_AND | BPF_K:
reg_value &= k;
break;
default:
/* can't optimize (unknown insn) */
return false;
}
}
/* ran off the end of the filter?! */
WARN_ON(1);
return false;
}
static void seccomp_cache_prepare_bitmap(struct seccomp_filter *sfilter,
void *bitmap, const void *bitmap_prev,
size_t bitmap_size, int arch)
{
struct sock_fprog_kern *fprog = sfilter->prog->orig_prog;
struct seccomp_data sd;
int nr;
if (bitmap_prev) {
/* The new filter must be as restrictive as the last. */
bitmap_copy(bitmap, bitmap_prev, bitmap_size);
} else {
/* Before any filters, all syscalls are always allowed. */
bitmap_fill(bitmap, bitmap_size);
}
for (nr = 0; nr < bitmap_size; nr++) {
/* No bitmap change: not a cacheable action. */
if (!test_bit(nr, bitmap))
continue;
sd.nr = nr;
sd.arch = arch;
/* No bitmap change: continue to always allow. */
if (seccomp_is_const_allow(fprog, &sd))
continue;
/*
* Not a cacheable action: always run filters.
* atomic clear_bit() not needed, filter not visible yet.
*/
__clear_bit(nr, bitmap);
}
}
/**
* seccomp_cache_prepare - emulate the filter to find cacheable syscalls
* @sfilter: The seccomp filter
*
* Returns 0 if successful or -errno if error occurred.
*/
static void seccomp_cache_prepare(struct seccomp_filter *sfilter)
{
struct action_cache *cache = &sfilter->cache;
const struct action_cache *cache_prev =
sfilter->prev ? &sfilter->prev->cache : NULL;
seccomp_cache_prepare_bitmap(sfilter, cache->allow_native,
cache_prev ? cache_prev->allow_native : NULL,
SECCOMP_ARCH_NATIVE_NR,
SECCOMP_ARCH_NATIVE);
#ifdef SECCOMP_ARCH_COMPAT
seccomp_cache_prepare_bitmap(sfilter, cache->allow_compat,
cache_prev ? cache_prev->allow_compat : NULL,
SECCOMP_ARCH_COMPAT_NR,
SECCOMP_ARCH_COMPAT);
#endif /* SECCOMP_ARCH_COMPAT */
}
#endif /* SECCOMP_ARCH_NATIVE */
/**
* seccomp_attach_filter: validate and attach filter
* @flags: flags to change filter behavior
* @filter: seccomp filter to add to the current process
*
* Caller must be holding current->sighand->siglock lock.
*
* Returns 0 on success, -ve on error, or
* - in TSYNC mode: the pid of a thread which was either not in the correct
* seccomp mode or did not have an ancestral seccomp filter
* - in NEW_LISTENER mode: the fd of the new listener
*/
static long seccomp_attach_filter(unsigned int flags,
struct seccomp_filter *filter)
{
unsigned long total_insns;
struct seccomp_filter *walker;
assert_spin_locked(¤t->sighand->siglock);
/* Validate resulting filter length. */
total_insns = filter->prog->len;
for (walker = current->seccomp.filter; walker; walker = walker->prev)
total_insns += walker->prog->len + 4; /* 4 instr penalty */
if (total_insns > MAX_INSNS_PER_PATH)
return -ENOMEM;
/* If thread sync has been requested, check that it is possible. */
if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
int ret;
ret = seccomp_can_sync_threads();
if (ret) {
if (flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH)
return -ESRCH;
else
return ret;
}
}
/* Set log flag, if present. */
if (flags & SECCOMP_FILTER_FLAG_LOG)
filter->log = true;
/* Set wait killable flag, if present. */
if (flags & SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV)
filter->wait_killable_recv = true;
/*
* If there is an existing filter, make it the prev and don't drop its
* task reference.
*/
filter->prev = current->seccomp.filter;
seccomp_cache_prepare(filter);
current->seccomp.filter = filter;
atomic_inc(¤t->seccomp.filter_count);
/* Now that the new filter is in place, synchronize to all threads. */
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
seccomp_sync_threads(flags);
return 0;
}
static void __get_seccomp_filter(struct seccomp_filter *filter)
{
refcount_inc(&filter->refs);
}
/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void get_seccomp_filter(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter; if (!orig)
return;
__get_seccomp_filter(orig); refcount_inc(&orig->users);}
#endif /* CONFIG_SECCOMP_FILTER */
/* For use with seccomp_actions_logged */
#define SECCOMP_LOG_KILL_PROCESS (1 << 0)
#define SECCOMP_LOG_KILL_THREAD (1 << 1)
#define SECCOMP_LOG_TRAP (1 << 2)
#define SECCOMP_LOG_ERRNO (1 << 3)
#define SECCOMP_LOG_TRACE (1 << 4)
#define SECCOMP_LOG_LOG (1 << 5)
#define SECCOMP_LOG_ALLOW (1 << 6)
#define SECCOMP_LOG_USER_NOTIF (1 << 7)
static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
SECCOMP_LOG_KILL_THREAD |
SECCOMP_LOG_TRAP |
SECCOMP_LOG_ERRNO |
SECCOMP_LOG_USER_NOTIF |
SECCOMP_LOG_TRACE |
SECCOMP_LOG_LOG;
static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
bool requested)
{
bool log = false;
switch (action) {
case SECCOMP_RET_ALLOW:
break;
case SECCOMP_RET_TRAP:
log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
break;
case SECCOMP_RET_ERRNO:
log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
break;
case SECCOMP_RET_TRACE:
log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
break;
case SECCOMP_RET_USER_NOTIF:
log = requested && seccomp_actions_logged & SECCOMP_LOG_USER_NOTIF;
break;
case SECCOMP_RET_LOG:
log = seccomp_actions_logged & SECCOMP_LOG_LOG;
break;
case SECCOMP_RET_KILL_THREAD:
log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
break;
case SECCOMP_RET_KILL_PROCESS:
default:
log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
}
/*
* Emit an audit message when the action is RET_KILL_*, RET_LOG, or the
* FILTER_FLAG_LOG bit was set. The admin has the ability to silence
* any action from being logged by removing the action name from the
* seccomp_actions_logged sysctl.
*/
if (!log)
return;
audit_seccomp(syscall, signr, action);
}
/*
* Secure computing mode 1 allows only read/write/exit/sigreturn.
* To be fully secure this must be combined with rlimit
* to limit the stack allocations too.
*/
static const int mode1_syscalls[] = {
__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
#ifdef __NR_uretprobe
__NR_uretprobe,
#endif
#ifdef __NR_uprobe
__NR_uprobe,
#endif
-1, /* negative terminated */
};
static void __secure_computing_strict(int this_syscall)
{
const int *allowed_syscalls = mode1_syscalls;
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
allowed_syscalls = get_compat_mode1_syscalls();
#endif
do {
if (*allowed_syscalls == this_syscall)
return;
} while (*++allowed_syscalls != -1);
#ifdef SECCOMP_DEBUG
dump_stack();
#endif
current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
do_exit(SIGKILL);
}
#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
void secure_computing_strict(int this_syscall)
{
int mode = current->seccomp.mode;
if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return;
if (mode == SECCOMP_MODE_DISABLED)
return;
else if (mode == SECCOMP_MODE_STRICT)
__secure_computing_strict(this_syscall);
else
BUG();
}
int __secure_computing(void)
{
int this_syscall = syscall_get_nr(current, current_pt_regs());
secure_computing_strict(this_syscall);
return 0;
}
#else
#ifdef CONFIG_SECCOMP_FILTER
static u64 seccomp_next_notify_id(struct seccomp_filter *filter)
{
/*
* Note: overflow is ok here, the id just needs to be unique per
* filter.
*/
lockdep_assert_held(&filter->notify_lock);
return filter->notif->next_id++;
}
static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_knotif *n)
{
int fd;
/*
* Remove the notification, and reset the list pointers, indicating
* that it has been handled.
*/
list_del_init(&addfd->list);
if (!addfd->setfd)
fd = receive_fd(addfd->file, NULL, addfd->flags);
else
fd = receive_fd_replace(addfd->fd, addfd->file, addfd->flags);
addfd->ret = fd;
if (addfd->ioctl_flags & SECCOMP_ADDFD_FLAG_SEND) {
/* If we fail reset and return an error to the notifier */
if (fd < 0) {
n->state = SECCOMP_NOTIFY_SENT;
} else {
/* Return the FD we just added */
n->flags = 0;
n->error = 0;
n->val = fd;
}
}
/*
* Mark the notification as completed. From this point, addfd mem
* might be invalidated and we can't safely read it anymore.
*/
complete(&addfd->completion);
}
static bool should_sleep_killable(struct seccomp_filter *match,
struct seccomp_knotif *n)
{
return match->wait_killable_recv && n->state >= SECCOMP_NOTIFY_SENT;
}
static int seccomp_do_user_notification(int this_syscall,
struct seccomp_filter *match,
const struct seccomp_data *sd)
{
int err;
u32 flags = 0;
long ret = 0;
struct seccomp_knotif n = {};
struct seccomp_kaddfd *addfd, *tmp;
mutex_lock(&match->notify_lock);
err = -ENOSYS;
if (!match->notif)
goto out;
n.task = current;
n.state = SECCOMP_NOTIFY_INIT;
n.data = sd;
n.id = seccomp_next_notify_id(match);
init_completion(&n.ready);
list_add_tail(&n.list, &match->notif->notifications);
INIT_LIST_HEAD(&n.addfd);
atomic_inc(&match->notif->requests);
if (match->notif->flags & SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP)
wake_up_poll_on_current_cpu(&match->wqh, EPOLLIN | EPOLLRDNORM);
else
wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
/*
* This is where we wait for a reply from userspace.
*/
do {
bool wait_killable = should_sleep_killable(match, &n);
mutex_unlock(&match->notify_lock);
if (wait_killable)
err = wait_for_completion_killable(&n.ready);
else
err = wait_for_completion_interruptible(&n.ready);
mutex_lock(&match->notify_lock);
if (err != 0) {
/*
* Check to see whether we should switch to wait
* killable. Only return the interrupted error if not.
*/
if (!(!wait_killable && should_sleep_killable(match, &n)))
goto interrupted;
}
addfd = list_first_entry_or_null(&n.addfd,
struct seccomp_kaddfd, list);
/* Check if we were woken up by a addfd message */
if (addfd)
seccomp_handle_addfd(addfd, &n);
} while (n.state != SECCOMP_NOTIFY_REPLIED);
ret = n.val;
err = n.error;
flags = n.flags;
interrupted:
/* If there were any pending addfd calls, clear them out */
list_for_each_entry_safe(addfd, tmp, &n.addfd, list) {
/* The process went away before we got a chance to handle it */
addfd->ret = -ESRCH;
list_del_init(&addfd->list);
complete(&addfd->completion);
}
/*
* Note that it's possible the listener died in between the time when
* we were notified of a response (or a signal) and when we were able to
* re-acquire the lock, so only delete from the list if the
* notification actually exists.
*
* Also note that this test is only valid because there's no way to
* *reattach* to a notifier right now. If one is added, we'll need to
* keep track of the notif itself and make sure they match here.
*/
if (match->notif)
list_del(&n.list);
out:
mutex_unlock(&match->notify_lock);
/* Userspace requests to continue the syscall. */
if (flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE)
return 0;
syscall_set_return_value(current, current_pt_regs(),
err, ret);
return -1;
}
static int __seccomp_filter(int this_syscall, const bool recheck_after_trace)
{
u32 filter_ret, action;
struct seccomp_data sd;
struct seccomp_filter *match = NULL;
int data;
/*
* Make sure that any changes to mode from another thread have
* been seen after SYSCALL_WORK_SECCOMP was seen.
*/
smp_rmb();
populate_seccomp_data(&sd);
filter_ret = seccomp_run_filters(&sd, &match);
data = filter_ret & SECCOMP_RET_DATA;
action = filter_ret & SECCOMP_RET_ACTION_FULL;
switch (action) {
case SECCOMP_RET_ERRNO:
/* Set low-order bits as an errno, capped at MAX_ERRNO. */
if (data > MAX_ERRNO)
data = MAX_ERRNO;
syscall_set_return_value(current, current_pt_regs(),
-data, 0);
goto skip;
case SECCOMP_RET_TRAP:
/* Show the handler the original registers. */
syscall_rollback(current, current_pt_regs());
/* Let the filter pass back 16 bits of data. */
force_sig_seccomp(this_syscall, data, false);
goto skip;
case SECCOMP_RET_TRACE:
/* We've been put in this state by the ptracer already. */
if (recheck_after_trace)
return 0;
/* ENOSYS these calls if there is no tracer attached. */
if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
syscall_set_return_value(current,
current_pt_regs(),
-ENOSYS, 0);
goto skip;
}
/* Allow the BPF to provide the event message */
ptrace_event(PTRACE_EVENT_SECCOMP, data);
/*
* The delivery of a fatal signal during event
* notification may silently skip tracer notification,
* which could leave us with a potentially unmodified
* syscall that the tracer would have liked to have
* changed. Since the process is about to die, we just
* force the syscall to be skipped and let the signal
* kill the process and correctly handle any tracer exit
* notifications.
*/
if (fatal_signal_pending(current))
goto skip;
/* Check if the tracer forced the syscall to be skipped. */
this_syscall = syscall_get_nr(current, current_pt_regs());
if (this_syscall < 0)
goto skip;
/*
* Recheck the syscall, since it may have changed. This
* intentionally uses a NULL struct seccomp_data to force
* a reload of all registers. This does not goto skip since
* a skip would have already been reported.
*/
if (__seccomp_filter(this_syscall, true))
return -1;
return 0;
case SECCOMP_RET_USER_NOTIF:
if (seccomp_do_user_notification(this_syscall, match, &sd))
goto skip;
return 0;
case SECCOMP_RET_LOG:
seccomp_log(this_syscall, 0, action, true);
return 0;
case SECCOMP_RET_ALLOW:
/*
* Note that the "match" filter will always be NULL for
* this action since SECCOMP_RET_ALLOW is the starting
* state in seccomp_run_filters().
*/
return 0;
case SECCOMP_RET_KILL_THREAD:
case SECCOMP_RET_KILL_PROCESS:
default:
current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGSYS, action, true);
/* Dump core only if this is the last remaining thread. */
if (action != SECCOMP_RET_KILL_THREAD ||
(atomic_read(¤t->signal->live) == 1)) {
/* Show the original registers in the dump. */
syscall_rollback(current, current_pt_regs());
/* Trigger a coredump with SIGSYS */
force_sig_seccomp(this_syscall, data, true);
} else {
do_exit(SIGSYS);
}
return -1; /* skip the syscall go directly to signal handling */
}
unreachable();
skip:
seccomp_log(this_syscall, 0, action, match ? match->log : false);
return -1;
}
#else
static int __seccomp_filter(int this_syscall, const bool recheck_after_trace)
{
BUG();
return -1;
}
#endif
int __secure_computing(void)
{
int mode = current->seccomp.mode;
int this_syscall;
if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return 0;
this_syscall = syscall_get_nr(current, current_pt_regs());
switch (mode) {
case SECCOMP_MODE_STRICT:
__secure_computing_strict(this_syscall); /* may call do_exit */
return 0;
case SECCOMP_MODE_FILTER:
return __seccomp_filter(this_syscall, false);
/* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
case SECCOMP_MODE_DEAD:
WARN_ON_ONCE(1);
do_exit(SIGKILL);
return -1;
default:
BUG();
}
}
#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
long prctl_get_seccomp(void)
{
return current->seccomp.mode;
}
/**
* seccomp_set_mode_strict: internal function for setting strict seccomp
*
* Once current->seccomp.mode is non-zero, it may not be changed.
*
* Returns 0 on success or -EINVAL on failure.
*/
static long seccomp_set_mode_strict(void)
{
const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
long ret = -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
if (!seccomp_may_assign_mode(seccomp_mode))
goto out;
#ifdef TIF_NOTSC
disable_TSC();
#endif
seccomp_assign_mode(current, seccomp_mode, 0);
ret = 0;
out:
spin_unlock_irq(¤t->sighand->siglock);
return ret;
}
#ifdef CONFIG_SECCOMP_FILTER
static void seccomp_notify_free(struct seccomp_filter *filter)
{
kfree(filter->notif);
filter->notif = NULL;
}
static void seccomp_notify_detach(struct seccomp_filter *filter)
{
struct seccomp_knotif *knotif;
if (!filter)
return;
mutex_lock(&filter->notify_lock);
/*
* If this file is being closed because e.g. the task who owned it
* died, let's wake everyone up who was waiting on us.
*/
list_for_each_entry(knotif, &filter->notif->notifications, list) {
if (knotif->state == SECCOMP_NOTIFY_REPLIED)
continue;
knotif->state = SECCOMP_NOTIFY_REPLIED;
knotif->error = -ENOSYS;
knotif->val = 0;
/*
* We do not need to wake up any pending addfd messages, as
* the notifier will do that for us, as this just looks
* like a standard reply.
*/
complete(&knotif->ready);
}
seccomp_notify_free(filter);
mutex_unlock(&filter->notify_lock);
}
static int seccomp_notify_release(struct inode *inode, struct file *file)
{
struct seccomp_filter *filter = file->private_data;
seccomp_notify_detach(filter);
__put_seccomp_filter(filter);
return 0;
}
/* must be called with notif_lock held */
static inline struct seccomp_knotif *
find_notification(struct seccomp_filter *filter, u64 id)
{
struct seccomp_knotif *cur;
lockdep_assert_held(&filter->notify_lock);
list_for_each_entry(cur, &filter->notif->notifications, list) {
if (cur->id == id)
return cur;
}
return NULL;
}
static int recv_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
void *key)
{
/* Avoid a wakeup if event not interesting for us. */
if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR | EPOLLHUP)))
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
static int recv_wait_event(struct seccomp_filter *filter)
{
DEFINE_WAIT_FUNC(wait, recv_wake_function);
int ret;
if (refcount_read(&filter->users) == 0)
return 0;
if (atomic_dec_if_positive(&filter->notif->requests) >= 0)
return 0;
for (;;) {
ret = prepare_to_wait_event(&filter->wqh, &wait, TASK_INTERRUPTIBLE);
if (atomic_dec_if_positive(&filter->notif->requests) >= 0)
break;
if (refcount_read(&filter->users) == 0)
break;
if (ret)
return ret;
schedule();
}
finish_wait(&filter->wqh, &wait);
return 0;
}
static long seccomp_notify_recv(struct seccomp_filter *filter,
void __user *buf)
{
struct seccomp_knotif *knotif = NULL, *cur;
struct seccomp_notif unotif;
ssize_t ret;
/* Verify that we're not given garbage to keep struct extensible. */
ret = check_zeroed_user(buf, sizeof(unotif));
if (ret < 0)
return ret;
if (!ret)
return -EINVAL;
memset(&unotif, 0, sizeof(unotif));
ret = recv_wait_event(filter);
if (ret < 0)
return ret;
mutex_lock(&filter->notify_lock);
list_for_each_entry(cur, &filter->notif->notifications, list) {
if (cur->state == SECCOMP_NOTIFY_INIT) {
knotif = cur;
break;
}
}
/*
* If we didn't find a notification, it could be that the task was
* interrupted by a fatal signal between the time we were woken and
* when we were able to acquire the rw lock.
*/
if (!knotif) {
ret = -ENOENT;
goto out;
}
unotif.id = knotif->id;
unotif.pid = task_pid_vnr(knotif->task);
unotif.data = *(knotif->data);
knotif->state = SECCOMP_NOTIFY_SENT;
wake_up_poll(&filter->wqh, EPOLLOUT | EPOLLWRNORM);
ret = 0;
out:
mutex_unlock(&filter->notify_lock);
if (ret == 0 && copy_to_user(buf, &unotif, sizeof(unotif))) {
ret = -EFAULT;
/*
* Userspace screwed up. To make sure that we keep this
* notification alive, let's reset it back to INIT. It
* may have died when we released the lock, so we need to make
* sure it's still around.
*/
mutex_lock(&filter->notify_lock);
knotif = find_notification(filter, unotif.id);
if (knotif) {
/* Reset the process to make sure it's not stuck */
if (should_sleep_killable(filter, knotif))
complete(&knotif->ready);
knotif->state = SECCOMP_NOTIFY_INIT;
atomic_inc(&filter->notif->requests);
wake_up_poll(&filter->wqh, EPOLLIN | EPOLLRDNORM);
}
mutex_unlock(&filter->notify_lock);
}
return ret;
}
static long seccomp_notify_send(struct seccomp_filter *filter,
void __user *buf)
{
struct seccomp_notif_resp resp = {};
struct seccomp_knotif *knotif;
long ret;
if (copy_from_user(&resp, buf, sizeof(resp)))
return -EFAULT;
if (resp.flags & ~SECCOMP_USER_NOTIF_FLAG_CONTINUE)
return -EINVAL;
if ((resp.flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE) &&
(resp.error || resp.val))
return -EINVAL;
ret = mutex_lock_interruptible(&filter->notify_lock);
if (ret < 0)
return ret;
knotif = find_notification(filter, resp.id);
if (!knotif) {
ret = -ENOENT;
goto out;
}
/* Allow exactly one reply. */
if (knotif->state != SECCOMP_NOTIFY_SENT) {
ret = -EINPROGRESS;
goto out;
}
ret = 0;
knotif->state = SECCOMP_NOTIFY_REPLIED;
knotif->error = resp.error;
knotif->val = resp.val;
knotif->flags = resp.flags;
if (filter->notif->flags & SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP)
complete_on_current_cpu(&knotif->ready);
else
complete(&knotif->ready);
out:
mutex_unlock(&filter->notify_lock);
return ret;
}
static long seccomp_notify_id_valid(struct seccomp_filter *filter,
void __user *buf)
{
struct seccomp_knotif *knotif;
u64 id;
long ret;
if (copy_from_user(&id, buf, sizeof(id)))
return -EFAULT;
ret = mutex_lock_interruptible(&filter->notify_lock);
if (ret < 0)
return ret;
knotif = find_notification(filter, id);
if (knotif && knotif->state == SECCOMP_NOTIFY_SENT)
ret = 0;
else
ret = -ENOENT;
mutex_unlock(&filter->notify_lock);
return ret;
}
static long seccomp_notify_set_flags(struct seccomp_filter *filter,
unsigned long flags)
{
long ret;
if (flags & ~SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP)
return -EINVAL;
ret = mutex_lock_interruptible(&filter->notify_lock);
if (ret < 0)
return ret;
filter->notif->flags = flags;
mutex_unlock(&filter->notify_lock);
return 0;
}
static long seccomp_notify_addfd(struct seccomp_filter *filter,
struct seccomp_notif_addfd __user *uaddfd,
unsigned int size)
{
struct seccomp_notif_addfd addfd;
struct seccomp_knotif *knotif;
struct seccomp_kaddfd kaddfd;
int ret;
BUILD_BUG_ON(sizeof(addfd) < SECCOMP_NOTIFY_ADDFD_SIZE_VER0);
BUILD_BUG_ON(sizeof(addfd) != SECCOMP_NOTIFY_ADDFD_SIZE_LATEST);
if (size < SECCOMP_NOTIFY_ADDFD_SIZE_VER0 || size >= PAGE_SIZE)
return -EINVAL;
ret = copy_struct_from_user(&addfd, sizeof(addfd), uaddfd, size);
if (ret)
return ret;
if (addfd.newfd_flags & ~O_CLOEXEC)
return -EINVAL;
if (addfd.flags & ~(SECCOMP_ADDFD_FLAG_SETFD | SECCOMP_ADDFD_FLAG_SEND))
return -EINVAL;
if (addfd.newfd && !(addfd.flags & SECCOMP_ADDFD_FLAG_SETFD))
return -EINVAL;
kaddfd.file = fget(addfd.srcfd);
if (!kaddfd.file)
return -EBADF;
kaddfd.ioctl_flags = addfd.flags;
kaddfd.flags = addfd.newfd_flags;
kaddfd.setfd = addfd.flags & SECCOMP_ADDFD_FLAG_SETFD;
kaddfd.fd = addfd.newfd;
init_completion(&kaddfd.completion);
ret = mutex_lock_interruptible(&filter->notify_lock);
if (ret < 0)
goto out;
knotif = find_notification(filter, addfd.id);
if (!knotif) {
ret = -ENOENT;
goto out_unlock;
}
/*
* We do not want to allow for FD injection to occur before the
* notification has been picked up by a userspace handler, or after
* the notification has been replied to.
*/
if (knotif->state != SECCOMP_NOTIFY_SENT) {
ret = -EINPROGRESS;
goto out_unlock;
}
if (addfd.flags & SECCOMP_ADDFD_FLAG_SEND) {
/*
* Disallow queuing an atomic addfd + send reply while there are
* some addfd requests still to process.
*
* There is no clear reason to support it and allows us to keep
* the loop on the other side straight-forward.
*/
if (!list_empty(&knotif->addfd)) {
ret = -EBUSY;
goto out_unlock;
}
/* Allow exactly only one reply */
knotif->state = SECCOMP_NOTIFY_REPLIED;
}
list_add(&kaddfd.list, &knotif->addfd);
complete(&knotif->ready);
mutex_unlock(&filter->notify_lock);
/* Now we wait for it to be processed or be interrupted */
ret = wait_for_completion_interruptible(&kaddfd.completion);
if (ret == 0) {
/*
* We had a successful completion. The other side has already
* removed us from the addfd queue, and
* wait_for_completion_interruptible has a memory barrier upon
* success that lets us read this value directly without
* locking.
*/
ret = kaddfd.ret;
goto out;
}
mutex_lock(&filter->notify_lock);
/*
* Even though we were woken up by a signal and not a successful
* completion, a completion may have happened in the mean time.
*
* We need to check again if the addfd request has been handled,
* and if not, we will remove it from the queue.
*/
if (list_empty(&kaddfd.list))
ret = kaddfd.ret;
else
list_del(&kaddfd.list);
out_unlock:
mutex_unlock(&filter->notify_lock);
out:
fput(kaddfd.file);
return ret;
}
static long seccomp_notify_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct seccomp_filter *filter = file->private_data;
void __user *buf = (void __user *)arg;
/* Fixed-size ioctls */
switch (cmd) {
case SECCOMP_IOCTL_NOTIF_RECV:
return seccomp_notify_recv(filter, buf);
case SECCOMP_IOCTL_NOTIF_SEND:
return seccomp_notify_send(filter, buf);
case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR:
case SECCOMP_IOCTL_NOTIF_ID_VALID:
return seccomp_notify_id_valid(filter, buf);
case SECCOMP_IOCTL_NOTIF_SET_FLAGS:
return seccomp_notify_set_flags(filter, arg);
}
/* Extensible Argument ioctls */
#define EA_IOCTL(cmd) ((cmd) & ~(IOC_INOUT | IOCSIZE_MASK))
switch (EA_IOCTL(cmd)) {
case EA_IOCTL(SECCOMP_IOCTL_NOTIF_ADDFD):
return seccomp_notify_addfd(filter, buf, _IOC_SIZE(cmd));
default:
return -EINVAL;
}
}
static __poll_t seccomp_notify_poll(struct file *file,
struct poll_table_struct *poll_tab)
{
struct seccomp_filter *filter = file->private_data;
__poll_t ret = 0;
struct seccomp_knotif *cur;
poll_wait(file, &filter->wqh, poll_tab);
if (mutex_lock_interruptible(&filter->notify_lock) < 0)
return EPOLLERR;
list_for_each_entry(cur, &filter->notif->notifications, list) {
if (cur->state == SECCOMP_NOTIFY_INIT)
ret |= EPOLLIN | EPOLLRDNORM;
if (cur->state == SECCOMP_NOTIFY_SENT)
ret |= EPOLLOUT | EPOLLWRNORM;
if ((ret & EPOLLIN) && (ret & EPOLLOUT))
break;
}
mutex_unlock(&filter->notify_lock);
if (refcount_read(&filter->users) == 0)
ret |= EPOLLHUP;
return ret;
}
static const struct file_operations seccomp_notify_ops = {
.poll = seccomp_notify_poll,
.release = seccomp_notify_release,
.unlocked_ioctl = seccomp_notify_ioctl,
.compat_ioctl = seccomp_notify_ioctl,
};
static struct file *init_listener(struct seccomp_filter *filter)
{
struct file *ret;
ret = ERR_PTR(-ENOMEM);
filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL);
if (!filter->notif)
goto out;
filter->notif->next_id = get_random_u64();
INIT_LIST_HEAD(&filter->notif->notifications);
ret = anon_inode_getfile("seccomp notify", &seccomp_notify_ops,
filter, O_RDWR);
if (IS_ERR(ret))
goto out_notif;
/* The file has a reference to it now */
__get_seccomp_filter(filter);
out_notif:
if (IS_ERR(ret))
seccomp_notify_free(filter);
out:
return ret;
}
/*
* Does @new_child have a listener while an ancestor also has a listener?
* If so, we'll want to reject this filter.
* This only has to be tested for the current process, even in the TSYNC case,
* because TSYNC installs @child with the same parent on all threads.
* Note that @new_child is not hooked up to its parent at this point yet, so
* we use current->seccomp.filter.
*/
static bool has_duplicate_listener(struct seccomp_filter *new_child)
{
struct seccomp_filter *cur;
/* must be protected against concurrent TSYNC */
lockdep_assert_held(¤t->sighand->siglock);
if (!new_child->notif)
return false;
for (cur = current->seccomp.filter; cur; cur = cur->prev) {
if (cur->notif)
return true;
}
return false;
}
/**
* seccomp_set_mode_filter: internal function for setting seccomp filter
* @flags: flags to change filter behavior
* @filter: struct sock_fprog containing filter
*
* This function may be called repeatedly to install additional filters.
* Every filter successfully installed will be evaluated (in reverse order)
* for each system call the task makes.
*
* Once current->seccomp.mode is non-zero, it may not be changed.
*
* Returns 0 on success or -EINVAL on failure.
*/
static long seccomp_set_mode_filter(unsigned int flags,
const char __user *filter)
{
const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
struct seccomp_filter *prepared = NULL;
long ret = -EINVAL;
int listener = -1;
struct file *listener_f = NULL;
/* Validate flags. */
if (flags & ~SECCOMP_FILTER_FLAG_MASK)
return -EINVAL;
/*
* In the successful case, NEW_LISTENER returns the new listener fd.
* But in the failure case, TSYNC returns the thread that died. If you
* combine these two flags, there's no way to tell whether something
* succeeded or failed. So, let's disallow this combination if the user
* has not explicitly requested no errors from TSYNC.
*/
if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
(flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) &&
((flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH) == 0))
return -EINVAL;
/*
* The SECCOMP_FILTER_FLAG_WAIT_KILLABLE_SENT flag doesn't make sense
* without the SECCOMP_FILTER_FLAG_NEW_LISTENER flag.
*/
if ((flags & SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV) &&
((flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) == 0))
return -EINVAL;
/* Prepare the new filter before holding any locks. */
prepared = seccomp_prepare_user_filter(filter);
if (IS_ERR(prepared))
return PTR_ERR(prepared);
if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
listener = get_unused_fd_flags(O_CLOEXEC);
if (listener < 0) {
ret = listener;
goto out_free;
}
listener_f = init_listener(prepared);
if (IS_ERR(listener_f)) {
put_unused_fd(listener);
ret = PTR_ERR(listener_f);
goto out_free;
}
}
/*
* Make sure we cannot change seccomp or nnp state via TSYNC
* while another thread is in the middle of calling exec.
*/
if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
mutex_lock_killable(¤t->signal->cred_guard_mutex))
goto out_put_fd;
spin_lock_irq(¤t->sighand->siglock);
if (!seccomp_may_assign_mode(seccomp_mode))
goto out;
if (has_duplicate_listener(prepared)) {
ret = -EBUSY;
goto out;
}
ret = seccomp_attach_filter(flags, prepared);
if (ret)
goto out;
/* Do not free the successfully attached filter. */
prepared = NULL;
seccomp_assign_mode(current, seccomp_mode, flags);
out:
spin_unlock_irq(¤t->sighand->siglock);
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
mutex_unlock(¤t->signal->cred_guard_mutex);
out_put_fd:
if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
if (ret) {
listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);
seccomp_notify_detach(prepared);
} else {
fd_install(listener, listener_f);
ret = listener;
}
}
out_free:
seccomp_filter_free(prepared);
return ret;
}
#else
static inline long seccomp_set_mode_filter(unsigned int flags,
const char __user *filter)
{
return -EINVAL;
}
#endif
static long seccomp_get_action_avail(const char __user *uaction)
{
u32 action;
if (copy_from_user(&action, uaction, sizeof(action)))
return -EFAULT;
switch (action) {
case SECCOMP_RET_KILL_PROCESS:
case SECCOMP_RET_KILL_THREAD:
case SECCOMP_RET_TRAP:
case SECCOMP_RET_ERRNO:
case SECCOMP_RET_USER_NOTIF:
case SECCOMP_RET_TRACE:
case SECCOMP_RET_LOG:
case SECCOMP_RET_ALLOW:
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static long seccomp_get_notif_sizes(void __user *usizes)
{
struct seccomp_notif_sizes sizes = {
.seccomp_notif = sizeof(struct seccomp_notif),
.seccomp_notif_resp = sizeof(struct seccomp_notif_resp),
.seccomp_data = sizeof(struct seccomp_data),
};
if (copy_to_user(usizes, &sizes, sizeof(sizes)))
return -EFAULT;
return 0;
}
/* Common entry point for both prctl and syscall. */
static long do_seccomp(unsigned int op, unsigned int flags,
void __user *uargs)
{
switch (op) {
case SECCOMP_SET_MODE_STRICT:
if (flags != 0 || uargs != NULL)
return -EINVAL;
return seccomp_set_mode_strict();
case SECCOMP_SET_MODE_FILTER:
return seccomp_set_mode_filter(flags, uargs);
case SECCOMP_GET_ACTION_AVAIL:
if (flags != 0)
return -EINVAL;
return seccomp_get_action_avail(uargs);
case SECCOMP_GET_NOTIF_SIZES:
if (flags != 0)
return -EINVAL;
return seccomp_get_notif_sizes(uargs);
default:
return -EINVAL;
}
}
SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
void __user *, uargs)
{
return do_seccomp(op, flags, uargs);
}
/**
* prctl_set_seccomp: configures current->seccomp.mode
* @seccomp_mode: requested mode to use
* @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
*
* Returns 0 on success or -EINVAL on failure.
*/
long prctl_set_seccomp(unsigned long seccomp_mode, void __user *filter)
{
unsigned int op;
void __user *uargs;
switch (seccomp_mode) {
case SECCOMP_MODE_STRICT:
op = SECCOMP_SET_MODE_STRICT;
/*
* Setting strict mode through prctl always ignored filter,
* so make sure it is always NULL here to pass the internal
* check in do_seccomp().
*/
uargs = NULL;
break;
case SECCOMP_MODE_FILTER:
op = SECCOMP_SET_MODE_FILTER;
uargs = filter;
break;
default:
return -EINVAL;
}
/* prctl interface doesn't have flags, so they are always zero. */
return do_seccomp(op, 0, uargs);
}
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
static struct seccomp_filter *get_nth_filter(struct task_struct *task,
unsigned long filter_off)
{
struct seccomp_filter *orig, *filter;
unsigned long count;
/*
* Note: this is only correct because the caller should be the (ptrace)
* tracer of the task, otherwise lock_task_sighand is needed.
*/
spin_lock_irq(&task->sighand->siglock);
if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
spin_unlock_irq(&task->sighand->siglock);
return ERR_PTR(-EINVAL);
}
orig = task->seccomp.filter;
__get_seccomp_filter(orig);
spin_unlock_irq(&task->sighand->siglock);
count = 0;
for (filter = orig; filter; filter = filter->prev)
count++;
if (filter_off >= count) {
filter = ERR_PTR(-ENOENT);
goto out;
}
count -= filter_off;
for (filter = orig; filter && count > 1; filter = filter->prev)
count--;
if (WARN_ON(count != 1 || !filter)) {
filter = ERR_PTR(-ENOENT);
goto out;
}
__get_seccomp_filter(filter);
out:
__put_seccomp_filter(orig);
return filter;
}
long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
void __user *data)
{
struct seccomp_filter *filter;
struct sock_fprog_kern *fprog;
long ret;
if (!capable(CAP_SYS_ADMIN) ||
current->seccomp.mode != SECCOMP_MODE_DISABLED) {
return -EACCES;
}
filter = get_nth_filter(task, filter_off);
if (IS_ERR(filter))
return PTR_ERR(filter);
fprog = filter->prog->orig_prog;
if (!fprog) {
/* This must be a new non-cBPF filter, since we save
* every cBPF filter's orig_prog above when
* CONFIG_CHECKPOINT_RESTORE is enabled.
*/
ret = -EMEDIUMTYPE;
goto out;
}
ret = fprog->len;
if (!data)
goto out;
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
out:
__put_seccomp_filter(filter);
return ret;
}
long seccomp_get_metadata(struct task_struct *task,
unsigned long size, void __user *data)
{
long ret;
struct seccomp_filter *filter;
struct seccomp_metadata kmd = {};
if (!capable(CAP_SYS_ADMIN) ||
current->seccomp.mode != SECCOMP_MODE_DISABLED) {
return -EACCES;
}
size = min_t(unsigned long, size, sizeof(kmd));
if (size < sizeof(kmd.filter_off))
return -EINVAL;
if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
return -EFAULT;
filter = get_nth_filter(task, kmd.filter_off);
if (IS_ERR(filter))
return PTR_ERR(filter);
if (filter->log)
kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
ret = size;
if (copy_to_user(data, &kmd, size))
ret = -EFAULT;
__put_seccomp_filter(filter);
return ret;
}
#endif
#ifdef CONFIG_SYSCTL
/* Human readable action names for friendly sysctl interaction */
#define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
#define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
#define SECCOMP_RET_TRAP_NAME "trap"
#define SECCOMP_RET_ERRNO_NAME "errno"
#define SECCOMP_RET_USER_NOTIF_NAME "user_notif"
#define SECCOMP_RET_TRACE_NAME "trace"
#define SECCOMP_RET_LOG_NAME "log"
#define SECCOMP_RET_ALLOW_NAME "allow"
static const char seccomp_actions_avail[] =
SECCOMP_RET_KILL_PROCESS_NAME " "
SECCOMP_RET_KILL_THREAD_NAME " "
SECCOMP_RET_TRAP_NAME " "
SECCOMP_RET_ERRNO_NAME " "
SECCOMP_RET_USER_NOTIF_NAME " "
SECCOMP_RET_TRACE_NAME " "
SECCOMP_RET_LOG_NAME " "
SECCOMP_RET_ALLOW_NAME;
struct seccomp_log_name {
u32 log;
const char *name;
};
static const struct seccomp_log_name seccomp_log_names[] = {
{ SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
{ SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
{ SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
{ SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
{ SECCOMP_LOG_USER_NOTIF, SECCOMP_RET_USER_NOTIF_NAME },
{ SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
{ SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
{ SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
{ }
};
static bool seccomp_names_from_actions_logged(char *names, size_t size,
u32 actions_logged,
const char *sep)
{
const struct seccomp_log_name *cur;
bool append_sep = false;
for (cur = seccomp_log_names; cur->name && size; cur++) {
ssize_t ret;
if (!(actions_logged & cur->log))
continue;
if (append_sep) {
ret = strscpy(names, sep, size);
if (ret < 0)
return false;
names += ret;
size -= ret;
} else
append_sep = true;
ret = strscpy(names, cur->name, size);
if (ret < 0)
return false;
names += ret;
size -= ret;
}
return true;
}
static bool seccomp_action_logged_from_name(u32 *action_logged,
const char *name)
{
const struct seccomp_log_name *cur;
for (cur = seccomp_log_names; cur->name; cur++) {
if (!strcmp(cur->name, name)) {
*action_logged = cur->log;
return true;
}
}
return false;
}
static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
{
char *name;
*actions_logged = 0;
while ((name = strsep(&names, " ")) && *name) {
u32 action_logged = 0;
if (!seccomp_action_logged_from_name(&action_logged, name))
return false;
*actions_logged |= action_logged;
}
return true;
}
static int read_actions_logged(const struct ctl_table *ro_table, void *buffer,
size_t *lenp, loff_t *ppos)
{
char names[sizeof(seccomp_actions_avail)];
struct ctl_table table;
memset(names, 0, sizeof(names));
if (!seccomp_names_from_actions_logged(names, sizeof(names),
seccomp_actions_logged, " "))
return -EINVAL;
table = *ro_table;
table.data = names;
table.maxlen = sizeof(names);
return proc_dostring(&table, 0, buffer, lenp, ppos);
}
static int write_actions_logged(const struct ctl_table *ro_table, void *buffer,
size_t *lenp, loff_t *ppos, u32 *actions_logged)
{
char names[sizeof(seccomp_actions_avail)];
struct ctl_table table;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
memset(names, 0, sizeof(names));
table = *ro_table;
table.data = names;
table.maxlen = sizeof(names);
ret = proc_dostring(&table, 1, buffer, lenp, ppos);
if (ret)
return ret;
if (!seccomp_actions_logged_from_names(actions_logged, table.data))
return -EINVAL;
if (*actions_logged & SECCOMP_LOG_ALLOW)
return -EINVAL;
seccomp_actions_logged = *actions_logged;
return 0;
}
static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged,
int ret)
{
char names[sizeof(seccomp_actions_avail)];
char old_names[sizeof(seccomp_actions_avail)];
const char *new = names;
const char *old = old_names;
if (!audit_enabled)
return;
memset(names, 0, sizeof(names));
memset(old_names, 0, sizeof(old_names));
if (ret)
new = "?";
else if (!actions_logged)
new = "(none)";
else if (!seccomp_names_from_actions_logged(names, sizeof(names),
actions_logged, ","))
new = "?";
if (!old_actions_logged)
old = "(none)";
else if (!seccomp_names_from_actions_logged(old_names,
sizeof(old_names),
old_actions_logged, ","))
old = "?";
return audit_seccomp_actions_logged(new, old, !ret);
}
static int seccomp_actions_logged_handler(const struct ctl_table *ro_table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
if (write) {
u32 actions_logged = 0;
u32 old_actions_logged = seccomp_actions_logged;
ret = write_actions_logged(ro_table, buffer, lenp, ppos,
&actions_logged);
audit_actions_logged(actions_logged, old_actions_logged, ret);
} else
ret = read_actions_logged(ro_table, buffer, lenp, ppos);
return ret;
}
static const struct ctl_table seccomp_sysctl_table[] = {
{
.procname = "actions_avail",
.data = (void *) &seccomp_actions_avail,
.maxlen = sizeof(seccomp_actions_avail),
.mode = 0444,
.proc_handler = proc_dostring,
},
{
.procname = "actions_logged",
.mode = 0644,
.proc_handler = seccomp_actions_logged_handler,
},
};
static int __init seccomp_sysctl_init(void)
{
register_sysctl_init("kernel/seccomp", seccomp_sysctl_table);
return 0;
}
device_initcall(seccomp_sysctl_init)
#endif /* CONFIG_SYSCTL */
#ifdef CONFIG_SECCOMP_CACHE_DEBUG
/* Currently CONFIG_SECCOMP_CACHE_DEBUG implies SECCOMP_ARCH_NATIVE */
static void proc_pid_seccomp_cache_arch(struct seq_file *m, const char *name,
const void *bitmap, size_t bitmap_size)
{
int nr;
for (nr = 0; nr < bitmap_size; nr++) {
bool cached = test_bit(nr, bitmap);
char *status = cached ? "ALLOW" : "FILTER";
seq_printf(m, "%s %d %s\n", name, nr, status);
}
}
int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
struct seccomp_filter *f;
unsigned long flags;
/*
* We don't want some sandboxed process to know what their seccomp
* filters consist of.
*/
if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
return -EACCES;
if (!lock_task_sighand(task, &flags))
return -ESRCH;
f = READ_ONCE(task->seccomp.filter);
if (!f) {
unlock_task_sighand(task, &flags);
return 0;
}
/* prevent filter from being freed while we are printing it */
__get_seccomp_filter(f);
unlock_task_sighand(task, &flags);
proc_pid_seccomp_cache_arch(m, SECCOMP_ARCH_NATIVE_NAME,
f->cache.allow_native,
SECCOMP_ARCH_NATIVE_NR);
#ifdef SECCOMP_ARCH_COMPAT
proc_pid_seccomp_cache_arch(m, SECCOMP_ARCH_COMPAT_NAME,
f->cache.allow_compat,
SECCOMP_ARCH_COMPAT_NR);
#endif /* SECCOMP_ARCH_COMPAT */
__put_seccomp_filter(f);
return 0;
}
#endif /* CONFIG_SECCOMP_CACHE_DEBUG */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Fast Userspace Mutexes (which I call "Futexes!").
* (C) Rusty Russell, IBM 2002
*
* Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
* (C) Copyright 2003 Red Hat Inc, All Rights Reserved
*
* Removed page pinning, fix privately mapped COW pages and other cleanups
* (C) Copyright 2003, 2004 Jamie Lokier
*
* Robust futex support started by Ingo Molnar
* (C) Copyright 2006 Red Hat Inc, All Rights Reserved
* Thanks to Thomas Gleixner for suggestions, analysis and fixes.
*
* PI-futex support started by Ingo Molnar and Thomas Gleixner
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* PRIVATE futexes by Eric Dumazet
* Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
*
* Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
* Copyright (C) IBM Corporation, 2009
* Thanks to Thomas Gleixner for conceptual design and careful reviews.
*
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
*
* "The futexes are also cursed."
* "But they come in a choice of three flavours!"
*/
#include <linux/compat.h>
#include <linux/jhash.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/plist.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/memblock.h>
#include <linux/fault-inject.h>
#include <linux/slab.h>
#include <linux/prctl.h>
#include <linux/mempolicy.h>
#include <linux/mmap_lock.h>
#include "futex.h"
#include "../locking/rtmutex_common.h"
/*
* The base of the bucket array and its size are always used together
* (after initialization only in futex_hash()), so ensure that they
* reside in the same cacheline.
*/
static struct {
unsigned long hashmask;
unsigned int hashshift;
struct futex_hash_bucket *queues[MAX_NUMNODES];
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_hashmask (__futex_data.hashmask)
#define futex_hashshift (__futex_data.hashshift)
#define futex_queues (__futex_data.queues)
struct futex_private_hash {
int state;
unsigned int hash_mask;
struct rcu_head rcu;
void *mm;
bool custom;
struct futex_hash_bucket queues[];
};
/*
* Fault injections for futexes.
*/
#ifdef CONFIG_FAIL_FUTEX
static struct {
struct fault_attr attr;
bool ignore_private;
} fail_futex = {
.attr = FAULT_ATTR_INITIALIZER,
.ignore_private = false,
};
static int __init setup_fail_futex(char *str)
{
return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);
bool should_fail_futex(bool fshared)
{
if (fail_futex.ignore_private && !fshared)
return false;
return should_fail(&fail_futex.attr, 1);
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init fail_futex_debugfs(void)
{
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
dir = fault_create_debugfs_attr("fail_futex", NULL,
&fail_futex.attr);
if (IS_ERR(dir))
return PTR_ERR(dir);
debugfs_create_bool("ignore-private", mode, dir,
&fail_futex.ignore_private);
return 0;
}
late_initcall(fail_futex_debugfs);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
#endif /* CONFIG_FAIL_FUTEX */
static struct futex_hash_bucket *
__futex_hash(union futex_key *key, struct futex_private_hash *fph);
#ifdef CONFIG_FUTEX_PRIVATE_HASH
static bool futex_ref_get(struct futex_private_hash *fph);
static bool futex_ref_put(struct futex_private_hash *fph);
static bool futex_ref_is_dead(struct futex_private_hash *fph);
enum { FR_PERCPU = 0, FR_ATOMIC };
static inline bool futex_key_is_private(union futex_key *key)
{
/*
* Relies on get_futex_key() to set either bit for shared
* futexes -- see comment with union futex_key.
*/
return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED));
}
static bool futex_private_hash_get(struct futex_private_hash *fph)
{
return futex_ref_get(fph);
}
void futex_private_hash_put(struct futex_private_hash *fph)
{
if (futex_ref_put(fph))
wake_up_var(fph->mm);
}
/**
* futex_hash_get - Get an additional reference for the local hash.
* @hb: ptr to the private local hash.
*
* Obtain an additional reference for the already obtained hash bucket. The
* caller must already own an reference.
*/
void futex_hash_get(struct futex_hash_bucket *hb)
{
struct futex_private_hash *fph = hb->priv;
if (!fph)
return;
WARN_ON_ONCE(!futex_private_hash_get(fph));
}
void futex_hash_put(struct futex_hash_bucket *hb)
{
struct futex_private_hash *fph = hb->priv;
if (!fph)
return;
futex_private_hash_put(fph);
}
static struct futex_hash_bucket *
__futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
{
u32 hash;
if (!futex_key_is_private(key))
return NULL;
if (!fph)
fph = rcu_dereference(key->private.mm->futex_phash);
if (!fph || !fph->hash_mask)
return NULL;
hash = jhash2((void *)&key->private.address,
sizeof(key->private.address) / 4,
key->both.offset);
return &fph->queues[hash & fph->hash_mask];
}
static void futex_rehash_private(struct futex_private_hash *old,
struct futex_private_hash *new)
{
struct futex_hash_bucket *hb_old, *hb_new;
unsigned int slots = old->hash_mask + 1;
unsigned int i;
for (i = 0; i < slots; i++) {
struct futex_q *this, *tmp;
hb_old = &old->queues[i];
spin_lock(&hb_old->lock);
plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) {
plist_del(&this->list, &hb_old->chain);
futex_hb_waiters_dec(hb_old);
WARN_ON_ONCE(this->lock_ptr != &hb_old->lock);
hb_new = __futex_hash(&this->key, new);
futex_hb_waiters_inc(hb_new);
/*
* The new pointer isn't published yet but an already
* moved user can be unqueued due to timeout or signal.
*/
spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING);
plist_add(&this->list, &hb_new->chain);
this->lock_ptr = &hb_new->lock;
spin_unlock(&hb_new->lock);
}
spin_unlock(&hb_old->lock);
}
}
static bool __futex_pivot_hash(struct mm_struct *mm,
struct futex_private_hash *new)
{
struct futex_private_hash *fph;
WARN_ON_ONCE(mm->futex_phash_new);
fph = rcu_dereference_protected(mm->futex_phash,
lockdep_is_held(&mm->futex_hash_lock));
if (fph) {
if (!futex_ref_is_dead(fph)) {
mm->futex_phash_new = new;
return false;
}
futex_rehash_private(fph, new);
}
new->state = FR_PERCPU;
scoped_guard(rcu) {
mm->futex_batches = get_state_synchronize_rcu();
rcu_assign_pointer(mm->futex_phash, new);
}
kvfree_rcu(fph, rcu);
return true;
}
static void futex_pivot_hash(struct mm_struct *mm)
{
scoped_guard(mutex, &mm->futex_hash_lock) {
struct futex_private_hash *fph;
fph = mm->futex_phash_new;
if (fph) {
mm->futex_phash_new = NULL;
__futex_pivot_hash(mm, fph);
}
}
}
struct futex_private_hash *futex_private_hash(void)
{
struct mm_struct *mm = current->mm;
/*
* Ideally we don't loop. If there is a replacement in progress
* then a new private hash is already prepared and a reference can't be
* obtained once the last user dropped it's.
* In that case we block on mm_struct::futex_hash_lock and either have
* to perform the replacement or wait while someone else is doing the
* job. Eitherway, on the second iteration we acquire a reference on the
* new private hash or loop again because a new replacement has been
* requested.
*/
again:
scoped_guard(rcu) {
struct futex_private_hash *fph;
fph = rcu_dereference(mm->futex_phash);
if (!fph)
return NULL;
if (futex_private_hash_get(fph))
return fph;
}
futex_pivot_hash(mm);
goto again;
}
struct futex_hash_bucket *futex_hash(union futex_key *key)
{
struct futex_private_hash *fph;
struct futex_hash_bucket *hb;
again:
scoped_guard(rcu) {
hb = __futex_hash(key, NULL);
fph = hb->priv;
if (!fph || futex_private_hash_get(fph))
return hb;
}
futex_pivot_hash(key->private.mm);
goto again;
}
#else /* !CONFIG_FUTEX_PRIVATE_HASH */
static struct futex_hash_bucket *
__futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
{
return NULL;
}
struct futex_hash_bucket *futex_hash(union futex_key *key)
{
return __futex_hash(key, NULL);
}
#endif /* CONFIG_FUTEX_PRIVATE_HASH */
#ifdef CONFIG_FUTEX_MPOL
static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = vma_lookup(mm, addr);
struct mempolicy *mpol;
int node = FUTEX_NO_NODE;
if (!vma)
return FUTEX_NO_NODE;
mpol = vma_policy(vma);
if (!mpol)
return FUTEX_NO_NODE;
switch (mpol->mode) {
case MPOL_PREFERRED:
node = first_node(mpol->nodes);
break;
case MPOL_PREFERRED_MANY:
case MPOL_BIND:
if (mpol->home_node != NUMA_NO_NODE)
node = mpol->home_node;
break;
default:
break;
}
return node;
}
static int futex_key_to_node_opt(struct mm_struct *mm, unsigned long addr)
{
int seq, node;
guard(rcu)();
if (!mmap_lock_speculate_try_begin(mm, &seq))
return -EBUSY;
node = __futex_key_to_node(mm, addr);
if (mmap_lock_speculate_retry(mm, seq))
return -EAGAIN;
return node;
}
static int futex_mpol(struct mm_struct *mm, unsigned long addr)
{
int node;
node = futex_key_to_node_opt(mm, addr);
if (node >= FUTEX_NO_NODE)
return node;
guard(mmap_read_lock)(mm);
return __futex_key_to_node(mm, addr);
}
#else /* !CONFIG_FUTEX_MPOL */
static int futex_mpol(struct mm_struct *mm, unsigned long addr)
{
return FUTEX_NO_NODE;
}
#endif /* CONFIG_FUTEX_MPOL */
/**
* __futex_hash - Return the hash bucket
* @key: Pointer to the futex key for which the hash is calculated
* @fph: Pointer to private hash if known
*
* We hash on the keys returned from get_futex_key (see below) and return the
* corresponding hash bucket.
* If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the
* private hash) is returned if existing. Otherwise a hash bucket from the
* global hash is returned.
*/
static struct futex_hash_bucket *
__futex_hash(union futex_key *key, struct futex_private_hash *fph)
{
int node = key->both.node;
u32 hash;
if (node == FUTEX_NO_NODE) {
struct futex_hash_bucket *hb;
hb = __futex_hash_private(key, fph);
if (hb)
return hb;
}
hash = jhash2((u32 *)key,
offsetof(typeof(*key), both.offset) / sizeof(u32),
key->both.offset);
if (node == FUTEX_NO_NODE) {
/*
* In case of !FLAGS_NUMA, use some unused hash bits to pick a
* node -- this ensures regular futexes are interleaved across
* the nodes and avoids having to allocate multiple
* hash-tables.
*
* NOTE: this isn't perfectly uniform, but it is fast and
* handles sparse node masks.
*/
node = (hash >> futex_hashshift) % nr_node_ids;
if (!node_possible(node)) {
node = find_next_bit_wrap(node_possible_map.bits,
nr_node_ids, node);
}
}
return &futex_queues[node][hash & futex_hashmask];
}
/**
* futex_setup_timer - set up the sleeping hrtimer.
* @time: ptr to the given timeout value
* @timeout: the hrtimer_sleeper structure to be set up
* @flags: futex flags
* @range_ns: optional range in ns
*
* Return: Initialized hrtimer_sleeper structure or NULL if no timeout
* value given
*/
struct hrtimer_sleeper *
futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
int flags, u64 range_ns)
{
if (!time)
return NULL;
hrtimer_setup_sleeper_on_stack(timeout,
(flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
/*
* If range_ns is 0, calling hrtimer_set_expires_range_ns() is
* effectively the same as calling hrtimer_set_expires().
*/
hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
return timeout;
}
/*
* Generate a machine wide unique identifier for this inode.
*
* This relies on u64 not wrapping in the life-time of the machine; which with
* 1ns resolution means almost 585 years.
*
* This further relies on the fact that a well formed program will not unmap
* the file while it has a (shared) futex waiting on it. This mapping will have
* a file reference which pins the mount and inode.
*
* If for some reason an inode gets evicted and read back in again, it will get
* a new sequence number and will _NOT_ match, even though it is the exact same
* file.
*
* It is important that futex_match() will never have a false-positive, esp.
* for PI futexes that can mess up the state. The above argues that false-negatives
* are only possible for malformed programs.
*/
static u64 get_inode_sequence_number(struct inode *inode)
{
static atomic64_t i_seq;
u64 old;
/* Does the inode already have a sequence number? */
old = atomic64_read(&inode->i_sequence);
if (likely(old))
return old;
for (;;) {
u64 new = atomic64_inc_return(&i_seq);
if (WARN_ON_ONCE(!new))
continue;
old = 0;
if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new))
return old;
return new;
}
}
/**
* get_futex_key() - Get parameters which are the keys for a futex
* @uaddr: virtual address of the futex
* @flags: FLAGS_*
* @key: address where result is stored.
* @rw: mapping needs to be read/write (values: FUTEX_READ,
* FUTEX_WRITE)
*
* Return: a negative error code or 0
*
* The key words are stored in @key on success.
*
* For shared mappings (when @fshared), the key is:
*
* ( inode->i_sequence, page offset within mapping, offset_within_page )
*
* [ also see get_inode_sequence_number() ]
*
* For private mappings (or when !@fshared), the key is:
*
* ( current->mm, address, 0 )
*
* This allows (cross process, where applicable) identification of the futex
* without keeping the page pinned for the duration of the FUTEX_WAIT.
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
enum futex_access rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page;
struct folio *folio;
struct address_space *mapping;
int node, err, size, ro = 0;
bool node_updated = false;
bool fshared;
fshared = flags & FLAGS_SHARED;
size = futex_size(flags);
if (flags & FLAGS_NUMA)
size *= 2;
/*
* The futex address must be "naturally" aligned.
*/
key->both.offset = address % PAGE_SIZE;
if (unlikely((address % size) != 0))
return -EINVAL;
address -= key->both.offset;
if (unlikely(!access_ok(uaddr, size)))
return -EFAULT;
if (unlikely(should_fail_futex(fshared)))
return -EFAULT;
node = FUTEX_NO_NODE;
if (flags & FLAGS_NUMA) {
u32 __user *naddr = (void *)uaddr + size / 2;
if (futex_get_value(&node, naddr))
return -EFAULT;
if ((node != FUTEX_NO_NODE) &&
((unsigned int)node >= MAX_NUMNODES || !node_possible(node)))
return -EINVAL;
}
if (node == FUTEX_NO_NODE && (flags & FLAGS_MPOL)) {
node = futex_mpol(mm, address);
node_updated = true;
}
if (flags & FLAGS_NUMA) {
u32 __user *naddr = (void *)uaddr + size / 2;
if (node == FUTEX_NO_NODE) {
node = numa_node_id();
node_updated = true;
}
if (node_updated && futex_put_value(node, naddr))
return -EFAULT;
}
key->both.node = node;
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
* virtual address, we dont even have to find the underlying vma.
* Note : We do have to check 'uaddr' is a valid user address,
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
/*
* On no-MMU, shared futexes are treated as private, therefore
* we must not include the current process in the key. Since
* there is only one address space, the address is a unique key
* on its own.
*/
if (IS_ENABLED(CONFIG_MMU))
key->private.mm = mm;
else
key->private.mm = NULL;
key->private.address = address;
return 0;
}
again:
/* Ignore any VERIFY_READ mapping (futex common case) */
if (unlikely(should_fail_futex(true)))
return -EFAULT;
err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
/*
* If write access is not required (eg. FUTEX_WAIT), try
* and get read-only access.
*/
if (err == -EFAULT && rw == FUTEX_READ) {
err = get_user_pages_fast(address, 1, 0, &page);
ro = 1;
}
if (err < 0)
return err;
else
err = 0;
/*
* The treatment of mapping from this point on is critical. The folio
* lock protects many things but in this context the folio lock
* stabilizes mapping, prevents inode freeing in the shared
* file-backed region case and guards against movement to swap cache.
*
* Strictly speaking the folio lock is not needed in all cases being
* considered here and folio lock forces unnecessarily serialization.
* From this point on, mapping will be re-verified if necessary and
* folio lock will be acquired only if it is unavoidable
*
* Mapping checks require the folio so it is looked up now. For
* anonymous pages, it does not matter if the folio is split
* in the future as the key is based on the address. For
* filesystem-backed pages, the precise page is required as the
* index of the page determines the key.
*/
folio = page_folio(page);
mapping = READ_ONCE(folio->mapping);
/*
* If folio->mapping is NULL, then it cannot be an anonymous
* page; but it might be the ZERO_PAGE or in the gate area or
* in a special mapping (all cases which we are happy to fail);
* or it may have been a good file page when get_user_pages_fast
* found it, but truncated or holepunched or subjected to
* invalidate_complete_page2 before we got the folio lock (also
* cases which we are happy to fail). And we hold a reference,
* so refcount care in invalidate_inode_page's remove_mapping
* prevents drop_caches from setting mapping to NULL beneath us.
*
* The case we do have to guard against is when memory pressure made
* shmem_writepage move it from filecache to swapcache beneath us:
* an unlikely race, but we do need to retry for folio->mapping.
*/
if (unlikely(!mapping)) {
int shmem_swizzled;
/*
* Folio lock is required to identify which special case above
* applies. If this is really a shmem page then the folio lock
* will prevent unexpected transitions.
*/
folio_lock(folio);
shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
folio_unlock(folio);
folio_put(folio);
if (shmem_swizzled)
goto again;
return -EFAULT;
}
/*
* Private mappings are handled in a simple way.
*
* If the futex key is stored in anonymous memory, then the associated
* object is the mm which is implicitly pinned by the calling process.
*
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
* the object not the particular process.
*/
if (folio_test_anon(folio)) {
/*
* A RO anonymous page will never change and thus doesn't make
* sense for futex operations.
*/
if (unlikely(should_fail_futex(true)) || ro) {
err = -EFAULT;
goto out;
}
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
} else {
struct inode *inode;
/*
* The associated futex object in this case is the inode and
* the folio->mapping must be traversed. Ordinarily this should
* be stabilised under folio lock but it's not strictly
* necessary in this case as we just want to pin the inode, not
* update i_pages or anything like that.
*
* The RCU read lock is taken as the inode is finally freed
* under RCU. If the mapping still matches expectations then the
* mapping->host can be safely accessed as being a valid inode.
*/
rcu_read_lock();
if (READ_ONCE(folio->mapping) != mapping) {
rcu_read_unlock();
folio_put(folio);
goto again;
}
inode = READ_ONCE(mapping->host);
if (!inode) {
rcu_read_unlock();
folio_put(folio);
goto again;
}
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.i_seq = get_inode_sequence_number(inode);
key->shared.pgoff = page_pgoff(folio, page);
rcu_read_unlock();
}
out:
folio_put(folio);
return err;
}
/**
* fault_in_user_writeable() - Fault in user address and verify RW access
* @uaddr: pointer to faulting user space address
*
* Slow path to fixup the fault we just took in the atomic write
* access to @uaddr.
*
* We have no generic implementation of a non-destructive write to the
* user address. We know that we faulted in the atomic pagefault
* disabled section so we can as well avoid the #PF overhead by
* calling get_user_pages() right away.
*/
int fault_in_user_writeable(u32 __user *uaddr)
{
struct mm_struct *mm = current->mm;
int ret;
mmap_read_lock(mm);
ret = fixup_user_fault(mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE, NULL);
mmap_read_unlock(mm);
return ret < 0 ? ret : 0;
}
/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
* @key: the futex key (to distinguish it from other futex futex_q's)
*
* Must be called with the hb lock held.
*/
struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
{
struct futex_q *this;
plist_for_each_entry(this, &hb->chain, list) {
if (futex_match(&this->key, key))
return this;
}
return NULL;
}
/**
* wait_for_owner_exiting - Block until the owner has exited
* @ret: owner's current futex lock status
* @exiting: Pointer to the exiting task
*
* Caller must hold a refcount on @exiting.
*/
void wait_for_owner_exiting(int ret, struct task_struct *exiting)
{
if (ret != -EBUSY) {
WARN_ON_ONCE(exiting);
return;
}
if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
return;
mutex_lock(&exiting->futex_exit_mutex);
/*
* No point in doing state checking here. If the waiter got here
* while the task was in exec()->exec_futex_release() then it can
* have any FUTEX_STATE_* value when the waiter has acquired the
* mutex. OK, if running, EXITING or DEAD if it reached exit()
* already. Highly unlikely and not a problem. Just one more round
* through the futex maze.
*/
mutex_unlock(&exiting->futex_exit_mutex);
put_task_struct(exiting);
}
/**
* __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be NULL and must be held by the caller.
*/
void __futex_unqueue(struct futex_q *q)
{
struct futex_hash_bucket *hb;
if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
return;
lockdep_assert_held(q->lock_ptr);
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
futex_hb_waiters_dec(hb);
}
/* The key must be already stored in q->key. */
void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb)
__acquires(&hb->lock)
{
/*
* Increment the counter before taking the lock so that
* a potential waker won't miss a to-be-slept task that is
* waiting for the spinlock. This is safe as all futex_q_lock()
* users end up calling futex_queue(). Similarly, for housekeeping,
* decrement the counter at futex_q_unlock() when some error has
* occurred and we don't end up adding the task to the list.
*/
futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
}
void futex_q_unlock(struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
futex_hb_waiters_dec(hb);
spin_unlock(&hb->lock);
}
void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
struct task_struct *task)
{
int prio;
/*
* The priority used to register this element is
* - either the real thread-priority for the real-time threads
* (i.e. threads with a priority lower than MAX_RT_PRIO)
* - or MAX_RT_PRIO for non-RT threads.
* Thus, all RT-threads are woken first in priority order, and
* the others are woken last, in FIFO order.
*/
prio = min(current->normal_prio, MAX_RT_PRIO);
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
q->task = task;
}
/**
* futex_unqueue() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
* be paired with exactly one earlier call to futex_queue().
*
* Return:
* - 1 - if the futex_q was still queued (and we removed unqueued it);
* - 0 - if the futex_q was already removed by the waking thread
*/
int futex_unqueue(struct futex_q *q)
{
spinlock_t *lock_ptr;
int ret = 0;
/* RCU so lock_ptr is not going away during locking. */
guard(rcu)();
/* In the common case we don't take the spinlock, which is nice. */
retry:
/*
* q->lock_ptr can change between this read and the following spin_lock.
* Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
* optimizing lock_ptr out of the logic below.
*/
lock_ptr = READ_ONCE(q->lock_ptr);
if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
* spin_lock(), causing us to take the wrong lock. This
* corrects the race condition.
*
* Reasoning goes like this: if we have the wrong lock,
* q->lock_ptr must have changed (maybe several times)
* between reading it and the spin_lock(). It can
* change again after the spin_lock() but only if it was
* already changed before the spin_lock(). It cannot,
* however, change back to the original value. Therefore
* we can detect whether we acquired the correct lock.
*/
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr);
goto retry;
}
__futex_unqueue(q);
BUG_ON(q->pi_state);
spin_unlock(lock_ptr);
ret = 1;
}
return ret;
}
void futex_q_lockptr_lock(struct futex_q *q)
{
spinlock_t *lock_ptr;
/*
* See futex_unqueue() why lock_ptr can change.
*/
guard(rcu)();
retry:
lock_ptr = READ_ONCE(q->lock_ptr);
spin_lock(lock_ptr);
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr);
goto retry;
}
}
/*
* PI futexes can not be requeued and must remove themselves from the hash
* bucket. The hash bucket lock (i.e. lock_ptr) is held.
*/
void futex_unqueue_pi(struct futex_q *q)
{
/*
* If the lock was not acquired (due to timeout or signal) then the
* rt_waiter is removed before futex_q is. If this is observed by
* an unlocker after dropping the rtmutex wait lock and before
* acquiring the hash bucket lock, then the unlocker dequeues the
* futex_q from the hash bucket list to guarantee consistent state
* vs. userspace. Therefore the dequeue here must be conditional.
*/
if (!plist_node_empty(&q->list))
__futex_unqueue(q);
BUG_ON(!q->pi_state);
put_pi_state(q->pi_state);
q->pi_state = NULL;
}
/* Constants for the pending_op argument of handle_futex_death */
#define HANDLE_DEATH_PENDING true
#define HANDLE_DEATH_LIST false
/*
* Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so:
*/
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
bool pi, bool pending_op)
{
u32 uval, nval, mval;
pid_t owner;
int err;
/* Futex address must be 32bit aligned */
if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
return -1;
retry:
if (get_user(uval, uaddr))
return -1;
/*
* Special case for regular (non PI) futexes. The unlock path in
* user space has two race scenarios:
*
* 1. The unlock path releases the user space futex value and
* before it can execute the futex() syscall to wake up
* waiters it is killed.
*
* 2. A woken up waiter is killed before it can acquire the
* futex in user space.
*
* In the second case, the wake up notification could be generated
* by the unlock path in user space after setting the futex value
* to zero or by the kernel after setting the OWNER_DIED bit below.
*
* In both cases the TID validation below prevents a wakeup of
* potential waiters which can cause these waiters to block
* forever.
*
* In both cases the following conditions are met:
*
* 1) task->robust_list->list_op_pending != NULL
* @pending_op == true
* 2) The owner part of user space futex value == 0
* 3) Regular futex: @pi == false
*
* If these conditions are met, it is safe to attempt waking up a
* potential waiter without touching the user space futex value and
* trying to set the OWNER_DIED bit. If the futex value is zero,
* the rest of the user space mutex state is consistent, so a woken
* waiter will just take over the uncontended futex. Setting the
* OWNER_DIED bit would create inconsistent state and malfunction
* of the user space owner died handling. Otherwise, the OWNER_DIED
* bit is already set, and the woken waiter is expected to deal with
* this.
*/
owner = uval & FUTEX_TID_MASK;
if (pending_op && !pi && !owner) {
futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
FUTEX_BITSET_MATCH_ANY);
return 0;
}
if (owner != task_pid_vnr(curr))
return 0;
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
* via cmpxchg, and if the value had FUTEX_WAITERS
* set, wake up a waiter (if any). (We have to do a
* futex_wake() even if OWNER_DIED is already set -
* to handle the rare but possible case of recursive
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/*
* We are not holding a lock here, but we want to have
* the pagefault_disable/enable() protection because
* we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
switch (err) {
case -EFAULT:
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
case -EAGAIN:
cond_resched();
goto retry;
default:
WARN_ON_ONCE(1);
return err;
}
}
if (nval != uval)
goto retry;
/*
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS)) {
futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
/*
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
unsigned int *pi)
{
unsigned long uentry;
if (get_user(uentry, (unsigned long __user *)head))
return -EFAULT;
*entry = (void __user *)(uentry & ~1UL);
*pi = uentry & 1;
return 0;
}
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
* We silently return on any sign of list-walking problem.
*/
static void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned int next_pi;
unsigned long futex_offset;
int rc;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (fetch_robust_entry(&entry, &head->list.next, &pi))
return;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
return;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
/*
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* don't process it twice:
*/
if (entry != pending) {
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi, HANDLE_DEATH_LIST))
return;
}
if (rc)
return;
entry = next_entry;
pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
if (!--limit)
break;
cond_resched();
}
if (pending) {
handle_futex_death((void __user *)pending + futex_offset,
curr, pip, HANDLE_DEATH_PENDING);
}
}
#ifdef CONFIG_COMPAT
static void __user *futex_uaddr(struct robust_list __user *entry,
compat_long_t futex_offset)
{
compat_uptr_t base = ptr_to_compat(entry);
void __user *uaddr = compat_ptr(base + futex_offset);
return uaddr;
}
/*
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/
static inline int
compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
compat_uptr_t __user *head, unsigned int *pi)
{
if (get_user(*uentry, head))
return -EFAULT;
*entry = compat_ptr((*uentry) & ~1);
*pi = (unsigned int)(*uentry) & 1;
return 0;
}
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
* We silently return on any sign of list-walking problem.
*/
static void compat_exit_robust_list(struct task_struct *curr)
{
struct compat_robust_list_head __user *head = curr->compat_robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned int next_pi;
compat_uptr_t uentry, next_uentry, upending;
compat_long_t futex_offset;
int rc;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
return;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
return;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (compat_fetch_robust_entry(&upending, &pending,
&head->list_op_pending, &pip))
return;
next_entry = NULL; /* avoid warning with gcc */
while (entry != (struct robust_list __user *) &head->list) {
/*
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
(compat_uptr_t __user *)&entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* dont process it twice:
*/
if (entry != pending) {
void __user *uaddr = futex_uaddr(entry, futex_offset);
if (handle_futex_death(uaddr, curr, pi,
HANDLE_DEATH_LIST))
return;
}
if (rc)
return;
uentry = next_uentry;
entry = next_entry;
pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
if (!--limit)
break;
cond_resched();
}
if (pending) {
void __user *uaddr = futex_uaddr(pending, futex_offset);
handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
}
}
#endif
#ifdef CONFIG_FUTEX_PI
/*
* This task is holding PI mutexes at exit time => bad.
* Kernel cleans up PI-state, but userspace is likely hosed.
* (Robust-futex cleanup is separate and might save the day for userspace.)
*/
static void exit_pi_state_list(struct task_struct *curr)
{
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
union futex_key key = FUTEX_KEY_INIT;
/*
* The mutex mm_struct::futex_hash_lock might be acquired.
*/
might_sleep();
/*
* Ensure the hash remains stable (no resize) during the while loop
* below. The hb pointer is acquired under the pi_lock so we can't block
* on the mutex.
*/
WARN_ON(curr != current);
guard(private_hash)();
/*
* We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
if (1) {
CLASS(hb, hb)(&key);
/*
* We can race against put_pi_state() removing itself from the
* list (a waiter going away). put_pi_state() will first
* decrement the reference count and then modify the list, so
* its possible to see the list entry but fail this reference
* acquire.
*
* In that case; drop the locks to let put_pi_state() make
* progress and retry the loop.
*/
if (!refcount_inc_not_zero(&pi_state->refcount)) {
raw_spin_unlock_irq(&curr->pi_lock);
cpu_relax();
raw_spin_lock_irq(&curr->pi_lock);
continue;
}
raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
raw_spin_lock(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
*/
if (head->next != next) {
/* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
put_pi_state(pi_state);
continue;
}
WARN_ON(pi_state->owner != curr);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
raw_spin_unlock(&curr->pi_lock);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
}
rt_mutex_futex_unlock(&pi_state->pi_mutex);
put_pi_state(pi_state);
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
}
#else
static inline void exit_pi_state_list(struct task_struct *curr) { }
#endif
static void futex_cleanup(struct task_struct *tsk)
{
if (unlikely(tsk->robust_list)) {
exit_robust_list(tsk);
tsk->robust_list = NULL;
}
#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
compat_exit_robust_list(tsk);
tsk->compat_robust_list = NULL;
}
#endif
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
}
/**
* futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
* @tsk: task to set the state on
*
* Set the futex exit state of the task lockless. The futex waiter code
* observes that state when a task is exiting and loops until the task has
* actually finished the futex cleanup. The worst case for this is that the
* waiter runs through the wait loop until the state becomes visible.
*
* This is called from the recursive fault handling path in make_task_dead().
*
* This is best effort. Either the futex exit code has run already or
* not. If the OWNER_DIED bit has been set on the futex then the waiter can
* take it over. If not, the problem is pushed back to user space. If the
* futex exit code did not run yet, then an already queued waiter might
* block forever, but there is nothing which can be done about that.
*/
void futex_exit_recursive(struct task_struct *tsk)
{
/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
if (tsk->futex_state == FUTEX_STATE_EXITING)
mutex_unlock(&tsk->futex_exit_mutex);
tsk->futex_state = FUTEX_STATE_DEAD;
}
static void futex_cleanup_begin(struct task_struct *tsk)
{
/*
* Prevent various race issues against a concurrent incoming waiter
* including live locks by forcing the waiter to block on
* tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
* attach_to_pi_owner().
*/
mutex_lock(&tsk->futex_exit_mutex);
/*
* Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
*
* This ensures that all subsequent checks of tsk->futex_state in
* attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
* tsk->pi_lock held.
*
* It guarantees also that a pi_state which was queued right before
* the state change under tsk->pi_lock by a concurrent waiter must
* be observed in exit_pi_state_list().
*/
raw_spin_lock_irq(&tsk->pi_lock);
tsk->futex_state = FUTEX_STATE_EXITING;
raw_spin_unlock_irq(&tsk->pi_lock);
}
static void futex_cleanup_end(struct task_struct *tsk, int state)
{
/*
* Lockless store. The only side effect is that an observer might
* take another loop until it becomes visible.
*/
tsk->futex_state = state;
/*
* Drop the exit protection. This unblocks waiters which observed
* FUTEX_STATE_EXITING to reevaluate the state.
*/
mutex_unlock(&tsk->futex_exit_mutex);
}
void futex_exec_release(struct task_struct *tsk)
{
/*
* The state handling is done for consistency, but in the case of
* exec() there is no way to prevent further damage as the PID stays
* the same. But for the unlikely and arguably buggy case that a
* futex is held on exec(), this provides at least as much state
* consistency protection which is possible.
*/
futex_cleanup_begin(tsk);
futex_cleanup(tsk);
/*
* Reset the state to FUTEX_STATE_OK. The task is alive and about
* exec a new binary.
*/
futex_cleanup_end(tsk, FUTEX_STATE_OK);
}
void futex_exit_release(struct task_struct *tsk)
{
futex_cleanup_begin(tsk);
futex_cleanup(tsk);
futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
}
static void futex_hash_bucket_init(struct futex_hash_bucket *fhb,
struct futex_private_hash *fph)
{
#ifdef CONFIG_FUTEX_PRIVATE_HASH
fhb->priv = fph;
#endif
atomic_set(&fhb->waiters, 0);
plist_head_init(&fhb->chain);
spin_lock_init(&fhb->lock);
}
#define FH_CUSTOM 0x01
#ifdef CONFIG_FUTEX_PRIVATE_HASH
/*
* futex-ref
*
* Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that
* code because it just doesn't fit right.
*
* Dual counter, per-cpu / atomic approach like percpu-refcount, except it
* re-initializes the state automatically, such that the fph swizzle is also a
* transition back to per-cpu.
*/
static void futex_ref_rcu(struct rcu_head *head);
static void __futex_ref_atomic_begin(struct futex_private_hash *fph)
{
struct mm_struct *mm = fph->mm;
/*
* The counter we're about to switch to must have fully switched;
* otherwise it would be impossible for it to have reported success
* from futex_ref_is_dead().
*/
WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0);
/*
* Set the atomic to the bias value such that futex_ref_{get,put}()
* will never observe 0. Will be fixed up in __futex_ref_atomic_end()
* when folding in the percpu count.
*/
atomic_long_set(&mm->futex_atomic, LONG_MAX);
smp_store_release(&fph->state, FR_ATOMIC);
call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
}
static void __futex_ref_atomic_end(struct futex_private_hash *fph)
{
struct mm_struct *mm = fph->mm;
unsigned int count = 0;
long ret;
int cpu;
/*
* Per __futex_ref_atomic_begin() the state of the fph must be ATOMIC
* and per this RCU callback, everybody must now observe this state and
* use the atomic variable.
*/
WARN_ON_ONCE(fph->state != FR_ATOMIC);
/*
* Therefore the per-cpu counter is now stable, sum and reset.
*/
for_each_possible_cpu(cpu) {
unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu);
count += *ptr;
*ptr = 0;
}
/*
* Re-init for the next cycle.
*/
this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
/*
* Add actual count, subtract bias and initial refcount.
*
* The moment this atomic operation happens, futex_ref_is_dead() can
* become true.
*/
ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic);
if (!ret)
wake_up_var(mm);
WARN_ON_ONCE(ret < 0);
mmput_async(mm);
}
static void futex_ref_rcu(struct rcu_head *head)
{
struct mm_struct *mm = container_of(head, struct mm_struct, futex_rcu);
struct futex_private_hash *fph = rcu_dereference_raw(mm->futex_phash);
if (fph->state == FR_PERCPU) {
/*
* Per this extra grace-period, everybody must now observe
* fph as the current fph and no previously observed fph's
* are in-flight.
*
* Notably, nobody will now rely on the atomic
* futex_ref_is_dead() state anymore so we can begin the
* migration of the per-cpu counter into the atomic.
*/
__futex_ref_atomic_begin(fph);
return;
}
__futex_ref_atomic_end(fph);
}
/*
* Drop the initial refcount and transition to atomics.
*/
static void futex_ref_drop(struct futex_private_hash *fph)
{
struct mm_struct *mm = fph->mm;
/*
* Can only transition the current fph;
*/
WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph);
/*
* We enqueue at least one RCU callback. Ensure mm stays if the task
* exits before the transition is completed.
*/
mmget(mm);
/*
* In order to avoid the following scenario:
*
* futex_hash() __futex_pivot_hash()
* guard(rcu); guard(mm->futex_hash_lock);
* fph = mm->futex_phash;
* rcu_assign_pointer(&mm->futex_phash, new);
* futex_hash_allocate()
* futex_ref_drop()
* fph->state = FR_ATOMIC;
* atomic_set(, BIAS);
*
* futex_private_hash_get(fph); // OOPS
*
* Where an old fph (which is FR_ATOMIC) and should fail on
* inc_not_zero, will succeed because a new transition is started and
* the atomic is bias'ed away from 0.
*
* There must be at least one full grace-period between publishing a
* new fph and trying to replace it.
*/
if (poll_state_synchronize_rcu(mm->futex_batches)) {
/*
* There was a grace-period, we can begin now.
*/
__futex_ref_atomic_begin(fph);
return;
}
call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
}
static bool futex_ref_get(struct futex_private_hash *fph)
{
struct mm_struct *mm = fph->mm;
guard(preempt)();
if (READ_ONCE(fph->state) == FR_PERCPU) {
__this_cpu_inc(*mm->futex_ref);
return true;
}
return atomic_long_inc_not_zero(&mm->futex_atomic);
}
static bool futex_ref_put(struct futex_private_hash *fph)
{
struct mm_struct *mm = fph->mm;
guard(preempt)();
if (READ_ONCE(fph->state) == FR_PERCPU) {
__this_cpu_dec(*mm->futex_ref);
return false;
}
return atomic_long_dec_and_test(&mm->futex_atomic);
}
static bool futex_ref_is_dead(struct futex_private_hash *fph)
{
struct mm_struct *mm = fph->mm;
guard(rcu)();
if (smp_load_acquire(&fph->state) == FR_PERCPU)
return false;
return atomic_long_read(&mm->futex_atomic) == 0;
}
int futex_mm_init(struct mm_struct *mm)
{
mutex_init(&mm->futex_hash_lock);
RCU_INIT_POINTER(mm->futex_phash, NULL);
mm->futex_phash_new = NULL;
/* futex-ref */
mm->futex_ref = NULL;
atomic_long_set(&mm->futex_atomic, 0);
mm->futex_batches = get_state_synchronize_rcu();
return 0;
}
void futex_hash_free(struct mm_struct *mm)
{
struct futex_private_hash *fph;
free_percpu(mm->futex_ref);
kvfree(mm->futex_phash_new);
fph = rcu_dereference_raw(mm->futex_phash);
if (fph)
kvfree(fph);
}
static bool futex_pivot_pending(struct mm_struct *mm)
{
struct futex_private_hash *fph;
guard(rcu)();
if (!mm->futex_phash_new)
return true;
fph = rcu_dereference(mm->futex_phash);
return futex_ref_is_dead(fph);
}
static bool futex_hash_less(struct futex_private_hash *a,
struct futex_private_hash *b)
{
/* user provided always wins */
if (!a->custom && b->custom)
return true;
if (a->custom && !b->custom)
return false;
/* zero-sized hash wins */
if (!b->hash_mask)
return true;
if (!a->hash_mask)
return false;
/* keep the biggest */
if (a->hash_mask < b->hash_mask)
return true;
if (a->hash_mask > b->hash_mask)
return false;
return false; /* equal */
}
static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
{
struct mm_struct *mm = current->mm;
struct futex_private_hash *fph;
bool custom = flags & FH_CUSTOM;
int i;
if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots)))
return -EINVAL;
/*
* Once we've disabled the global hash there is no way back.
*/
scoped_guard(rcu) {
fph = rcu_dereference(mm->futex_phash);
if (fph && !fph->hash_mask) {
if (custom)
return -EBUSY;
return 0;
}
}
if (!mm->futex_ref) {
/*
* This will always be allocated by the first thread and
* therefore requires no locking.
*/
mm->futex_ref = alloc_percpu(unsigned int);
if (!mm->futex_ref)
return -ENOMEM;
this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
}
fph = kvzalloc(struct_size(fph, queues, hash_slots),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!fph)
return -ENOMEM;
fph->hash_mask = hash_slots ? hash_slots - 1 : 0;
fph->custom = custom;
fph->mm = mm;
for (i = 0; i < hash_slots; i++)
futex_hash_bucket_init(&fph->queues[i], fph);
if (custom) {
/*
* Only let prctl() wait / retry; don't unduly delay clone().
*/
again:
wait_var_event(mm, futex_pivot_pending(mm));
}
scoped_guard(mutex, &mm->futex_hash_lock) {
struct futex_private_hash *free __free(kvfree) = NULL;
struct futex_private_hash *cur, *new;
cur = rcu_dereference_protected(mm->futex_phash,
lockdep_is_held(&mm->futex_hash_lock));
new = mm->futex_phash_new;
mm->futex_phash_new = NULL;
if (fph) {
if (cur && !cur->hash_mask) {
/*
* If two threads simultaneously request the global
* hash then the first one performs the switch,
* the second one returns here.
*/
free = fph;
mm->futex_phash_new = new;
return -EBUSY;
}
if (cur && !new) {
/*
* If we have an existing hash, but do not yet have
* allocated a replacement hash, drop the initial
* reference on the existing hash.
*/
futex_ref_drop(cur);
}
if (new) {
/*
* Two updates raced; throw out the lesser one.
*/
if (futex_hash_less(new, fph)) {
free = new;
new = fph;
} else {
free = fph;
}
} else {
new = fph;
}
fph = NULL;
}
if (new) {
/*
* Will set mm->futex_phash_new on failure;
* futex_private_hash_get() will try again.
*/
if (!__futex_pivot_hash(mm, new) && custom)
goto again;
}
}
return 0;
}
int futex_hash_allocate_default(void)
{
unsigned int threads, buckets, current_buckets = 0;
struct futex_private_hash *fph;
if (!current->mm)
return 0;
scoped_guard(rcu) {
threads = min_t(unsigned int,
get_nr_threads(current),
num_online_cpus());
fph = rcu_dereference(current->mm->futex_phash);
if (fph) {
if (fph->custom)
return 0;
current_buckets = fph->hash_mask + 1;
}
}
/*
* The default allocation will remain within
* 16 <= threads * 4 <= global hash size
*/
buckets = roundup_pow_of_two(4 * threads);
buckets = clamp(buckets, 16, futex_hashmask + 1);
if (current_buckets >= buckets)
return 0;
return futex_hash_allocate(buckets, 0);
}
static int futex_hash_get_slots(void)
{
struct futex_private_hash *fph;
guard(rcu)();
fph = rcu_dereference(current->mm->futex_phash);
if (fph && fph->hash_mask)
return fph->hash_mask + 1;
return 0;
}
#else
static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
{
return -EINVAL;
}
static int futex_hash_get_slots(void)
{
return 0;
}
#endif
int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
{
unsigned int flags = FH_CUSTOM;
int ret;
switch (arg2) {
case PR_FUTEX_HASH_SET_SLOTS:
if (arg4)
return -EINVAL;
ret = futex_hash_allocate(arg3, flags);
break;
case PR_FUTEX_HASH_GET_SLOTS:
ret = futex_hash_get_slots();
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int __init futex_init(void)
{
unsigned long hashsize, i;
unsigned int order, n;
unsigned long size;
#ifdef CONFIG_BASE_SMALL
hashsize = 16;
#else
hashsize = 256 * num_possible_cpus();
hashsize /= num_possible_nodes();
hashsize = max(4, hashsize);
hashsize = roundup_pow_of_two(hashsize);
#endif
futex_hashshift = ilog2(hashsize);
size = sizeof(struct futex_hash_bucket) * hashsize;
order = get_order(size);
for_each_node(n) {
struct futex_hash_bucket *table;
if (order > MAX_PAGE_ORDER)
table = vmalloc_huge_node(size, GFP_KERNEL, n);
else
table = alloc_pages_exact_nid(n, size, GFP_KERNEL);
BUG_ON(!table);
for (i = 0; i < hashsize; i++)
futex_hash_bucket_init(&table[i], NULL);
futex_queues[n] = table;
}
futex_hashmask = hashsize - 1;
pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n",
hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024,
order > MAX_PAGE_ORDER ? "vmalloc" : "linear");
return 0;
}
core_initcall(futex_init);
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* AEAD: Authenticated Encryption with Associated Data
*
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _CRYPTO_AEAD_H
#define _CRYPTO_AEAD_H
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/slab.h>
#include <linux/types.h>
/**
* DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
*
* The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
* (listed as type "aead" in /proc/crypto)
*
* The most prominent examples for this type of encryption is GCM and CCM.
* However, the kernel supports other types of AEAD ciphers which are defined
* with the following cipher string:
*
* authenc(keyed message digest, block cipher)
*
* For example: authenc(hmac(sha256), cbc(aes))
*
* The example code provided for the symmetric key cipher operation applies
* here as well. Naturally all *skcipher* symbols must be exchanged the *aead*
* pendants discussed in the following. In addition, for the AEAD operation,
* the aead_request_set_ad function must be used to set the pointer to the
* associated data memory location before performing the encryption or
* decryption operation. Another deviation from the asynchronous block cipher
* operation is that the caller should explicitly check for -EBADMSG of the
* crypto_aead_decrypt. That error indicates an authentication error, i.e.
* a breach in the integrity of the message. In essence, that -EBADMSG error
* code is the key bonus an AEAD cipher has over "standard" block chaining
* modes.
*
* Memory Structure:
*
* The source scatterlist must contain the concatenation of
* associated data || plaintext or ciphertext.
*
* The destination scatterlist has the same layout, except that the plaintext
* (resp. ciphertext) will grow (resp. shrink) by the authentication tag size
* during encryption (resp. decryption). The authentication tag is generated
* during the encryption operation and appended to the ciphertext. During
* decryption, the authentication tag is consumed along with the ciphertext and
* used to verify the integrity of the plaintext and the associated data.
*
* In-place encryption/decryption is enabled by using the same scatterlist
* pointer for both the source and destination.
*
* Even in the out-of-place case, space must be reserved in the destination for
* the associated data, even though it won't be written to. This makes the
* in-place and out-of-place cases more consistent. It is permissible for the
* "destination" associated data to alias the "source" associated data.
*
* As with the other scatterlist crypto APIs, zero-length scatterlist elements
* are not allowed in the used part of the scatterlist. Thus, if there is no
* associated data, the first element must point to the plaintext/ciphertext.
*
* To meet the needs of IPsec, a special quirk applies to rfc4106, rfc4309,
* rfc4543, and rfc7539esp ciphers. For these ciphers, the final 'ivsize' bytes
* of the associated data buffer must contain a second copy of the IV. This is
* in addition to the copy passed to aead_request_set_crypt(). These two IV
* copies must not differ; different implementations of the same algorithm may
* behave differently in that case. Note that the algorithm might not actually
* treat the IV as associated data; nevertheless the length passed to
* aead_request_set_ad() must include it.
*/
struct crypto_aead;
struct scatterlist;
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
* @assoclen: Length in bytes of associated data for authentication
* @cryptlen: Length of data to be encrypted or decrypted
* @iv: Initialisation vector
* @src: Source data
* @dst: Destination data
* @__ctx: Start of private context data
*/
struct aead_request {
struct crypto_async_request base;
unsigned int assoclen;
unsigned int cryptlen;
u8 *iv;
struct scatterlist *src;
struct scatterlist *dst;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
* As the authentication tag is a message digest to ensure the
* integrity of the encrypted data, a consumer typically wants the
* largest authentication tag possible as defined by this
* variable.
* @setauthsize: Set authentication size for the AEAD transformation. This
* function is used to specify the consumer requested size of the
* authentication tag to be either generated by the transformation
* during encryption or the size of the authentication tag to be
* supplied during the decryption operation. This function is also
* responsible for checking the authentication tag size for
* validity.
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
* after the transformation context was allocated. In case the
* cryptographic hardware has some special requirements which need to
* be handled by software, this function shall check for the precise
* requirement of the transformation and put any software fallbacks
* in place.
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
* @base: Definition of a generic crypto cipher algorithm.
*
* All fields except @ivsize is mandatory and must be filled.
*/
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
int (*encrypt)(struct aead_request *req);
int (*decrypt)(struct aead_request *req);
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
struct crypto_alg base;
};
struct crypto_aead {
unsigned int authsize;
unsigned int reqsize;
struct crypto_tfm base;
};
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_aead, base);
}
/**
* crypto_alloc_aead() - allocate AEAD cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* AEAD cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for an AEAD. The returned struct
* crypto_aead is the cipher handle that is required for any subsequent
* API invocation for that AEAD.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
{
return &tfm->base;
}
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
*
* If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
/**
* crypto_has_aead() - Search for the availability of an aead.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* aead
* @type: specifies the type of the aead
* @mask: specifies the mask for the aead
*
* Return: true when the aead is known to the kernel crypto API; false
* otherwise
*/
int crypto_has_aead(const char *alg_name, u32 type, u32 mask);
static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm)
{
return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
}
static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
{
return container_of(crypto_aead_tfm(tfm)->__crt_alg,
struct aead_alg, base);
}
static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
{
return alg->ivsize;
}
/**
* crypto_aead_ivsize() - obtain IV size
* @tfm: cipher handle
*
* The size of the IV for the aead referenced by the cipher handle is
* returned. This IV size may be zero if the cipher does not need an IV.
*
* Return: IV size in bytes
*/
static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
{
return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
}
/**
* crypto_aead_authsize() - obtain maximum authentication data size
* @tfm: cipher handle
*
* The maximum size of the authentication data for the AEAD cipher referenced
* by the AEAD cipher handle is returned. The authentication data size may be
* zero if the cipher implements a hard-coded maximum.
*
* The authentication data may also be known as "tag value".
*
* Return: authentication data size / tag size in bytes
*/
static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
{
return tfm->authsize;
}
static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
return alg->maxauthsize;
}
static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
{
return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
* The block size for the AEAD referenced with the cipher handle is returned.
* The caller may use that information to allocate appropriate memory for the
* data returned by the encryption or decryption operation
*
* Return: block size of cipher
*/
static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
{
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
}
static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
{
return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
}
static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
{
return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
}
static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
{
crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
}
static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
{
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
/**
* crypto_aead_setkey() - set key for cipher
* @tfm: cipher handle
* @key: buffer holding the key
* @keylen: length of the key in bytes
*
* The caller provided key is set for the AEAD referenced by the cipher
* handle.
*
* Note, the key length determines the cipher type. Many block ciphers implement
* different cipher modes depending on the key size, such as AES-128 vs AES-192
* vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
* is performed.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen);
/**
* crypto_aead_setauthsize() - set authentication data size
* @tfm: cipher handle
* @authsize: size of the authentication data / tag in bytes
*
* Set the authentication data size / tag size. AEAD requires an authentication
* tag (or MAC) in addition to the associated data.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
{
return __crypto_aead_cast(req->base.tfm);
}
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
* needed to perform the cipher operation
*
* Encrypt plaintext data using the aead_request handle. That data structure
* and how it is filled with data is discussed with the aead_request_*
* functions.
*
* IMPORTANT NOTE The encryption operation creates the authentication data /
* tag. That data is concatenated with the created ciphertext.
* The ciphertext memory size is therefore the given number of
* block cipher blocks + the size defined by the
* crypto_aead_setauthsize invocation. The caller must ensure
* that sufficient memory is available for the ciphertext and
* the authentication tag.
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
int crypto_aead_encrypt(struct aead_request *req);
/**
* crypto_aead_decrypt() - decrypt ciphertext
* @req: reference to the aead_request handle that holds all information
* needed to perform the cipher operation
*
* Decrypt ciphertext data using the aead_request handle. That data structure
* and how it is filled with data is discussed with the aead_request_*
* functions.
*
* IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
* authentication data / tag. That authentication data / tag
* must have the size defined by the crypto_aead_setauthsize
* invocation.
*
*
* Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
* cipher operation performs the authentication of the data during the
* decryption operation. Therefore, the function returns this error if
* the authentication of the ciphertext was unsuccessful (i.e. the
* integrity of the ciphertext or the associated data was violated);
* < 0 if an error occurred.
*/
int crypto_aead_decrypt(struct aead_request *req);
/**
* DOC: Asynchronous AEAD Request Handle
*
* The aead_request data structure contains all pointers to data required for
* the AEAD cipher operation. This includes the cipher handle (which can be
* used by multiple aead_request instances), pointer to plaintext and
* ciphertext, asynchronous callback function, etc. It acts as a handle to the
* aead_request_* API calls in a similar way as AEAD handle to the
* crypto_aead_* API calls.
*/
/**
* crypto_aead_reqsize() - obtain size of the request data structure
* @tfm: cipher handle
*
* Return: number of bytes
*/
static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
{
return tfm->reqsize;
}
/**
* aead_request_set_tfm() - update cipher handle reference in request
* @req: request handle to be modified
* @tfm: cipher handle that shall be added to the request handle
*
* Allow the caller to replace the existing aead handle in the request
* data structure with a different one.
*/
static inline void aead_request_set_tfm(struct aead_request *req,
struct crypto_aead *tfm)
{
req->base.tfm = crypto_aead_tfm(tfm);
}
/**
* aead_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
* @gfp: memory allocation flag that is handed to kmalloc by the API call.
*
* Allocate the request data structure that must be used with the AEAD
* encrypt and decrypt API calls. During the allocation, the provided aead
* handle is registered in the request data structure.
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
gfp_t gfp)
{
struct aead_request *req;
req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
if (likely(req))
aead_request_set_tfm(req, tfm);
return req;
}
/**
* aead_request_free() - zeroize and free request data structure
* @req: request data structure cipher handle to be freed
*/
static inline void aead_request_free(struct aead_request *req)
{
kfree_sensitive(req);
}
/**
* aead_request_set_callback() - set asynchronous callback function
* @req: request handle
* @flags: specify zero or an ORing of the flags
* CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
* increase the wait queue beyond the initial maximum size;
* CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
* @compl: callback function pointer to be registered with the request handle
* @data: The data pointer refers to memory that is not used by the kernel
* crypto API, but provided to the callback function for it to use. Here,
* the caller can provide a reference to memory the callback function can
* operate on. As the callback function is invoked asynchronously to the
* related functionality, it may need to access data structures of the
* related functionality which can be referenced using this pointer. The
* callback function can access the memory via the "data" field in the
* crypto_async_request data structure provided to the callback function.
*
* Setting the callback function that is triggered once the cipher operation
* completes
*
* The callback function is registered with the aead_request handle and
* must comply with the following template::
*
* void callback_function(struct crypto_async_request *req, int error)
*/
static inline void aead_request_set_callback(struct aead_request *req,
u32 flags,
crypto_completion_t compl,
void *data)
{
req->base.complete = compl;
req->base.data = data;
req->base.flags = flags;
}
/**
* aead_request_set_crypt - set data buffers
* @req: request handle
* @src: source scatter / gather list
* @dst: destination scatter / gather list
* @cryptlen: number of bytes to process from @src
* @iv: IV for the cipher operation which must comply with the IV size defined
* by crypto_aead_ivsize()
*
* Setting the source data and destination data scatter / gather lists which
* hold the associated data concatenated with the plaintext or ciphertext. See
* below for the authentication tag.
*
* For encryption, the source is treated as the plaintext and the
* destination is the ciphertext. For a decryption operation, the use is
* reversed - the source is the ciphertext and the destination is the plaintext.
*
* The memory structure for cipher operation has the following structure:
*
* - AEAD encryption input: assoc data || plaintext
* - AEAD encryption output: assoc data || ciphertext || auth tag
* - AEAD decryption input: assoc data || ciphertext || auth tag
* - AEAD decryption output: assoc data || plaintext
*
* Albeit the kernel requires the presence of the AAD buffer, however,
* the kernel does not fill the AAD buffer in the output case. If the
* caller wants to have that data buffer filled, the caller must either
* use an in-place cipher operation (i.e. same memory location for
* input/output memory location).
*/
static inline void aead_request_set_crypt(struct aead_request *req,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int cryptlen, u8 *iv)
{
req->src = src;
req->dst = dst;
req->cryptlen = cryptlen;
req->iv = iv;
}
/**
* aead_request_set_ad - set associated data information
* @req: request handle
* @assoclen: number of bytes in associated data
*
* Setting the AD information. This function sets the length of
* the associated data.
*/
static inline void aead_request_set_ad(struct aead_request *req,
unsigned int assoclen)
{
req->assoclen = assoclen;
}
#endif /* _CRYPTO_AEAD_H */
// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include <trace/events/mmap_lock.h>
#include <linux/mm.h>
#include <linux/cgroup.h>
#include <linux/memcontrol.h>
#include <linux/mmap_lock.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/smp.h>
#include <linux/trace_events.h>
#include <linux/local_lock.h>
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
#ifdef CONFIG_TRACING
/*
* Trace calls must be in a separate file, as otherwise there's a circular
* dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
*/
void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
{
trace_mmap_lock_start_locking(mm, write);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking);
void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
bool success)
{
trace_mmap_lock_acquire_returned(mm, write, success);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned);
void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
{
trace_mmap_lock_released(mm, write);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_released);
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MMU
#ifdef CONFIG_PER_VMA_LOCK
static inline bool __vma_enter_locked(struct vm_area_struct *vma, bool detaching)
{
unsigned int tgt_refcnt = VMA_LOCK_OFFSET;
/* Additional refcnt if the vma is attached. */
if (!detaching)
tgt_refcnt++;
/*
* If vma is detached then only vma_mark_attached() can raise the
* vm_refcnt. mmap_write_lock prevents racing with vma_mark_attached().
*/
if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt))
return false;
rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
rcuwait_wait_event(&vma->vm_mm->vma_writer_wait,
refcount_read(&vma->vm_refcnt) == tgt_refcnt,
TASK_UNINTERRUPTIBLE);
lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
return true;}
static inline void __vma_exit_locked(struct vm_area_struct *vma, bool *detached)
{
*detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
}
void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq)
{
bool locked;
/*
* __vma_enter_locked() returns false immediately if the vma is not
* attached, otherwise it waits until refcnt is indicating that vma
* is attached with no readers.
*/
locked = __vma_enter_locked(vma, false);
/*
* We should use WRITE_ONCE() here because we can have concurrent reads
* from the early lockless pessimistic check in vma_start_read().
* We don't really care about the correctness of that early check, but
* we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
*/
WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
if (locked) {
bool detached;
__vma_exit_locked(vma, &detached); WARN_ON_ONCE(detached); /* vma should remain attached */
}
}
EXPORT_SYMBOL_GPL(__vma_start_write);
void vma_mark_detached(struct vm_area_struct *vma)
{
vma_assert_write_locked(vma);
vma_assert_attached(vma);
/*
* We are the only writer, so no need to use vma_refcount_put().
* The condition below is unlikely because the vma has been already
* write-locked and readers can increment vm_refcnt only temporarily
* before they check vm_lock_seq, realize the vma is locked and drop
* back the vm_refcnt. That is a narrow window for observing a raised
* vm_refcnt.
*/
if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
/* Wait until vma is detached with no readers. */
if (__vma_enter_locked(vma, true)) {
bool detached;
__vma_exit_locked(vma, &detached);
WARN_ON_ONCE(!detached);
}
}
}
/*
* Try to read-lock a vma. The function is allowed to occasionally yield false
* locked result to avoid performance overhead, in which case we fall back to
* using mmap_lock. The function should never yield false unlocked result.
* False locked result is possible if mm_lock_seq overflows or if vma gets
* reused and attached to a different mm before we lock it.
* Returns the vma on success, NULL on failure to lock and EAGAIN if vma got
* detached.
*
* IMPORTANT: RCU lock must be held upon entering the function, but upon error
* IT IS RELEASED. The caller must handle this correctly.
*/
static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
struct vm_area_struct *vma)
{
struct mm_struct *other_mm;
int oldcnt;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
/*
* Check before locking. A race might cause false locked result.
* We can use READ_ONCE() for the mm_lock_seq here, and don't need
* ACQUIRE semantics, because this is just a lockless check whose result
* we don't rely on for anything - the mm_lock_seq read against which we
* need ordering is below.
*/
if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) {
vma = NULL;
goto err;
}
/*
* If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire()
* will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET.
* Acquire fence is required here to avoid reordering against later
* vm_lock_seq check and checks inside lock_vma_under_rcu().
*/
if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
VMA_REF_LIMIT))) {
/* return EAGAIN if vma got detached from under us */
vma = oldcnt ? NULL : ERR_PTR(-EAGAIN); goto err;
}
rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
if (unlikely(vma->vm_mm != mm))
goto err_unstable;
/*
* Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
* False unlocked result is impossible because we modify and check
* vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq
* modification invalidates all existing locks.
*
* We must use ACQUIRE semantics for the mm_lock_seq so that if we are
* racing with vma_end_write_all(), we only start reading from the VMA
* after it has been unlocked.
* This pairs with RELEASE semantics in vma_end_write_all().
*/
if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
vma_refcount_put(vma);
vma = NULL;
goto err;
}
return vma;
err:
rcu_read_unlock(); return vma;
err_unstable:
/*
* If vma got attached to another mm from under us, that mm is not
* stable and can be freed in the narrow window after vma->vm_refcnt
* is dropped and before rcuwait_wake_up(mm) is called. Grab it before
* releasing vma->vm_refcnt.
*/
other_mm = vma->vm_mm; /* use a copy as vma can be freed after we drop vm_refcnt */
/* __mmdrop() is a heavy operation, do it after dropping RCU lock. */
rcu_read_unlock();
mmgrab(other_mm);
vma_refcount_put(vma);
mmdrop(other_mm);
return NULL;}
/*
* Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
* stable and not isolated. If the VMA is not found or is being modified the
* function returns NULL.
*/
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address)
{
MA_STATE(mas, &mm->mm_mt, address, address);
struct vm_area_struct *vma;
retry:
rcu_read_lock(); vma = mas_walk(&mas); if (!vma) { rcu_read_unlock(); goto inval;
}
vma = vma_start_read(mm, vma);
if (IS_ERR_OR_NULL(vma)) {
/* Check if the VMA got isolated after we found it */
if (PTR_ERR(vma) == -EAGAIN) {
count_vm_vma_lock_event(VMA_LOCK_MISS);
/* The area was replaced with another one */
mas_set(&mas, address);
goto retry;
}
/* Failed to lock the VMA */
goto inval;
}
/*
* At this point, we have a stable reference to a VMA: The VMA is
* locked and we know it hasn't already been isolated.
* From here on, we can access the VMA without worrying about which
* fields are accessible for RCU readers.
*/
rcu_read_unlock();
/* Check if the vma we locked is the right one. */
if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { vma_end_read(vma); goto inval;
}
return vma;
inval:
count_vm_vma_lock_event(VMA_LOCK_ABORT);
return NULL;
}
static struct vm_area_struct *lock_next_vma_under_mmap_lock(struct mm_struct *mm,
struct vma_iterator *vmi,
unsigned long from_addr)
{
struct vm_area_struct *vma;
int ret;
ret = mmap_read_lock_killable(mm);
if (ret)
return ERR_PTR(ret);
/* Lookup the vma at the last position again under mmap_read_lock */
vma_iter_set(vmi, from_addr);
vma = vma_next(vmi);
if (vma) {
/* Very unlikely vma->vm_refcnt overflow case */
if (unlikely(!vma_start_read_locked(vma)))
vma = ERR_PTR(-EAGAIN);
}
mmap_read_unlock(mm);
return vma;
}
struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
struct vma_iterator *vmi,
unsigned long from_addr)
{
struct vm_area_struct *vma;
unsigned int mm_wr_seq;
bool mmap_unlocked;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu read lock held");
retry:
/* Start mmap_lock speculation in case we need to verify the vma later */
mmap_unlocked = mmap_lock_speculate_try_begin(mm, &mm_wr_seq);
vma = vma_next(vmi);
if (!vma)
return NULL;
vma = vma_start_read(mm, vma);
if (IS_ERR_OR_NULL(vma)) {
/*
* Retry immediately if the vma gets detached from under us.
* Infinite loop should not happen because the vma we find will
* have to be constantly knocked out from under us.
*/
if (PTR_ERR(vma) == -EAGAIN) {
/* reset to search from the last address */
rcu_read_lock();
vma_iter_set(vmi, from_addr);
goto retry;
}
goto fallback;
}
/* Verify the vma is not behind the last search position. */
if (unlikely(from_addr >= vma->vm_end))
goto fallback_unlock;
/*
* vma can be ahead of the last search position but we need to verify
* it was not shrunk after we found it and another vma has not been
* installed ahead of it. Otherwise we might observe a gap that should
* not be there.
*/
if (from_addr < vma->vm_start) {
/* Verify only if the address space might have changed since vma lookup. */
if (!mmap_unlocked || mmap_lock_speculate_retry(mm, mm_wr_seq)) {
vma_iter_set(vmi, from_addr);
if (vma != vma_next(vmi))
goto fallback_unlock;
}
}
return vma;
fallback_unlock:
rcu_read_unlock();
vma_end_read(vma);
fallback:
vma = lock_next_vma_under_mmap_lock(mm, vmi, from_addr);
rcu_read_lock();
/* Reinitialize the iterator after re-entering rcu read section */
vma_iter_set(vmi, IS_ERR_OR_NULL(vma) ? from_addr : vma->vm_end);
return vma;
}
#endif /* CONFIG_PER_VMA_LOCK */
#ifdef CONFIG_LOCK_MM_AND_FIND_VMA
#include <linux/extable.h>
static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
{
if (likely(mmap_read_trylock(mm)))
return true;
if (regs && !user_mode(regs)) {
unsigned long ip = exception_ip(regs);
if (!search_exception_tables(ip))
return false;
}
return !mmap_read_lock_killable(mm);
}
static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
{
/*
* We don't have this operation yet.
*
* It should be easy enough to do: it's basically a
* atomic_long_try_cmpxchg_acquire()
* from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
* it also needs the proper lockdep magic etc.
*/
return false;
}
static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
{
mmap_read_unlock(mm); if (regs && !user_mode(regs)) {
unsigned long ip = exception_ip(regs);
if (!search_exception_tables(ip))
return false;
}
return !mmap_write_lock_killable(mm);
}
/*
* Helper for page fault handling.
*
* This is kind of equivalent to "mmap_read_lock()" followed
* by "find_extend_vma()", except it's a lot more careful about
* the locking (and will drop the lock on failure).
*
* For example, if we have a kernel bug that causes a page
* fault, we don't want to just use mmap_read_lock() to get
* the mm lock, because that would deadlock if the bug were
* to happen while we're holding the mm lock for writing.
*
* So this checks the exception tables on kernel faults in
* order to only do this all for instructions that are actually
* expected to fault.
*
* We can also actually take the mm lock for writing if we
* need to extend the vma, which helps the VM layer a lot.
*/
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long addr, struct pt_regs *regs)
{
struct vm_area_struct *vma;
if (!get_mmap_lock_carefully(mm, regs)) return NULL;
vma = find_vma(mm, addr);
if (likely(vma && (vma->vm_start <= addr)))
return vma;
/*
* Well, dang. We might still be successful, but only
* if we can extend a vma to do so.
*/
if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { mmap_read_unlock(mm);
return NULL;
}
/*
* We can try to upgrade the mmap lock atomically,
* in which case we can continue to use the vma
* we already looked up.
*
* Otherwise we'll have to drop the mmap lock and
* re-take it, and also look up the vma again,
* re-checking it.
*/
if (!mmap_upgrade_trylock(mm)) {
if (!upgrade_mmap_lock_carefully(mm, regs))
return NULL;
vma = find_vma(mm, addr);
if (!vma)
goto fail; if (vma->vm_start <= addr) goto success; if (!(vma->vm_flags & VM_GROWSDOWN))
goto fail;
}
if (expand_stack_locked(vma, addr)) goto fail;
success:
mmap_write_downgrade(mm); return vma;
fail:
mmap_write_unlock(mm); return NULL;}
#endif /* CONFIG_LOCK_MM_AND_FIND_VMA */
#else /* CONFIG_MMU */
/*
* At least xtensa ends up having protection faults even with no
* MMU.. No stack expansion, at least.
*/
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long addr, struct pt_regs *regs)
{
struct vm_area_struct *vma;
mmap_read_lock(mm);
vma = vma_lookup(mm, addr);
if (!vma)
mmap_read_unlock(mm);
return vma;
}
#endif /* CONFIG_MMU */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NET_GENERIC_NETLINK_H
#define __NET_GENERIC_NETLINK_H
#include <linux/net.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <uapi/linux/genetlink.h>
#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
/* Non-parallel generic netlink requests are serialized by a global lock. */
void genl_lock(void);
void genl_unlock(void);
#define MODULE_ALIAS_GENL_FAMILY(family) \
MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
/* Binding to multicast group requires %CAP_NET_ADMIN */
#define GENL_MCAST_CAP_NET_ADMIN BIT(0)
/* Binding to multicast group requires %CAP_SYS_ADMIN */
#define GENL_MCAST_CAP_SYS_ADMIN BIT(1)
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
* @flags: GENL_MCAST_* flags
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
u8 flags;
};
struct genl_split_ops;
struct genl_info;
/**
* struct genl_family - generic netlink family
* @hdrsize: length of user specific header in bytes
* @name: name of family
* @version: protocol version
* @maxattr: maximum number of attributes supported
* @policy: netlink policy
* @netnsok: set to true if the family can handle network
* namespaces and should be presented in all of them
* @parallel_ops: operations can be called in parallel and aren't
* synchronized by the core genetlink code
* @pre_doit: called before an operation's doit callback, it may
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
* @bind: called when family multicast group is added to a netlink socket
* @unbind: called when family multicast group is removed from a netlink socket
* @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
* @resv_start_op: first operation for which reserved fields of the header
* can be validated and policies are required (see below);
* new families should leave this field at zero
* @ops: the operations supported by this family
* @n_ops: number of operations supported by this family
* @small_ops: the small-struct operations supported by this family
* @n_small_ops: number of small-struct operations supported by this family
* @split_ops: the split do/dump form of operation definition
* @n_split_ops: number of entries in @split_ops, note that with split do/dump
* ops the number of entries is not the same as number of commands
* @sock_priv_size: the size of per-socket private memory
* @sock_priv_init: the per-socket private memory initializer
* @sock_priv_destroy: the per-socket private memory destructor
*
* Attribute policies (the combination of @policy and @maxattr fields)
* can be attached at the family level or at the operation level.
* If both are present the per-operation policy takes precedence.
* For operations before @resv_start_op lack of policy means that the core
* will perform no attribute parsing or validation. For newer operations
* if policy is not provided core will reject all TLV attributes.
*/
struct genl_family {
unsigned int hdrsize;
char name[GENL_NAMSIZ];
unsigned int version;
unsigned int maxattr;
u8 netnsok:1;
u8 parallel_ops:1;
u8 n_ops;
u8 n_small_ops;
u8 n_split_ops;
u8 n_mcgrps;
u8 resv_start_op;
const struct nla_policy *policy;
int (*pre_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
void (*post_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
int (*bind)(int mcgrp);
void (*unbind)(int mcgrp);
const struct genl_ops * ops;
const struct genl_small_ops *small_ops;
const struct genl_split_ops *split_ops;
const struct genl_multicast_group *mcgrps;
struct module *module;
size_t sock_priv_size;
void (*sock_priv_init)(void *priv);
void (*sock_priv_destroy)(void *priv);
/* private: internal use only */
/* protocol family identifier */
int id;
/* starting number of multicast group IDs in this family */
unsigned int mcgrp_offset;
/* list of per-socket privs */
struct xarray *sock_privs;
};
/**
* struct genl_info - receiving information
* @snd_seq: sending sequence number
* @snd_portid: netlink portid of sender
* @family: generic netlink family
* @nlhdr: netlink message header
* @genlhdr: generic netlink message header
* @attrs: netlink attributes
* @_net: network namespace
* @ctx: storage space for the use by the family
* @user_ptr: user pointers (deprecated, use ctx instead)
* @extack: extended ACK report struct
*/
struct genl_info {
u32 snd_seq;
u32 snd_portid;
const struct genl_family *family;
const struct nlmsghdr * nlhdr;
struct genlmsghdr * genlhdr;
struct nlattr ** attrs;
possible_net_t _net;
union {
u8 ctx[NETLINK_CTX_SIZE];
void * user_ptr[2];
};
struct netlink_ext_ack *extack;
};
static inline struct net *genl_info_net(const struct genl_info *info)
{
return read_pnet(&info->_net);
}
static inline void genl_info_net_set(struct genl_info *info, struct net *net)
{
write_pnet(&info->_net, net);
}
static inline void *genl_info_userhdr(const struct genl_info *info)
{
return (u8 *)info->genlhdr + GENL_HDRLEN;
}
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
#define GENL_SET_ERR_MSG_FMT(info, msg, args...) \
NL_SET_ERR_MSG_FMT((info)->extack, msg, ##args)
/* Report that a root attribute is missing */
#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
const struct genl_info *__info = (info); \
\
NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
})
enum genl_validate_flags {
GENL_DONT_VALIDATE_STRICT = BIT(0),
GENL_DONT_VALIDATE_DUMP = BIT(1),
GENL_DONT_VALIDATE_DUMP_STRICT = BIT(2),
};
/**
* struct genl_small_ops - generic netlink operations (small version)
* @cmd: command identifier
* @internal_flags: flags used by the family
* @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
* @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback
* @dumpit: callback for dumpers
*
* This is a cut-down version of struct genl_ops for users who don't need
* most of the ancillary infra and want to save space.
*/
struct genl_small_ops {
int (*doit)(struct sk_buff *skb, struct genl_info *info);
int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb);
u8 cmd;
u8 internal_flags;
u8 flags;
u8 validate;
};
/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
* @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
* @maxattr: maximum number of attributes supported
* @policy: netlink policy (takes precedence over family policy)
* @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback
* @start: start callback for dumps
* @dumpit: callback for dumpers
* @done: completion callback for dumps
*/
struct genl_ops {
int (*doit)(struct sk_buff *skb,
struct genl_info *info);
int (*start)(struct netlink_callback *cb);
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
const struct nla_policy *policy;
unsigned int maxattr;
u8 cmd;
u8 internal_flags;
u8 flags;
u8 validate;
};
/**
* struct genl_split_ops - generic netlink operations (do/dump split version)
* @cmd: command identifier
* @internal_flags: flags used by the family
* @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
* @validate: validation flags from enum genl_validate_flags
* @policy: netlink policy (takes precedence over family policy)
* @maxattr: maximum number of attributes supported
*
* Do callbacks:
* @pre_doit: called before an operation's @doit callback, it may
* do additional, common, filtering and return an error
* @doit: standard command callback
* @post_doit: called after an operation's @doit callback, it may
* undo operations done by pre_doit, for example release locks
*
* Dump callbacks:
* @start: start callback for dumps
* @dumpit: callback for dumpers
* @done: completion callback for dumps
*
* Do callbacks can be used if %GENL_CMD_CAP_DO is set in @flags.
* Dump callbacks can be used if %GENL_CMD_CAP_DUMP is set in @flags.
* Exactly one of those flags must be set.
*/
struct genl_split_ops {
union {
struct {
int (*pre_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
int (*doit)(struct sk_buff *skb,
struct genl_info *info);
void (*post_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
};
struct {
int (*start)(struct netlink_callback *cb);
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
};
};
const struct nla_policy *policy;
unsigned int maxattr;
u8 cmd;
u8 internal_flags;
u8 flags;
u8 validate;
};
/**
* struct genl_dumpit_info - info that is available during dumpit op call
* @op: generic netlink ops - for internal genl code usage
* @attrs: netlink attributes
* @info: struct genl_info describing the request
*/
struct genl_dumpit_info {
struct genl_split_ops op;
struct genl_info info;
};
static inline const struct genl_dumpit_info *
genl_dumpit_info(struct netlink_callback *cb)
{
return cb->data;
}
static inline const struct genl_info *
genl_info_dump(struct netlink_callback *cb)
{
return &genl_dumpit_info(cb)->info;
}
/**
* genl_info_init_ntf() - initialize genl_info for notifications
* @info: genl_info struct to set up
* @family: pointer to the genetlink family
* @cmd: command to be used in the notification
*
* Initialize a locally declared struct genl_info to pass to various APIs.
* Intended to be used when creating notifications.
*/
static inline void
genl_info_init_ntf(struct genl_info *info, const struct genl_family *family,
u8 cmd)
{
struct genlmsghdr *hdr = (void *) &info->user_ptr[0];
memset(info, 0, sizeof(*info));
info->family = family;
info->genlhdr = hdr;
hdr->cmd = cmd;
}
static inline bool genl_info_is_ntf(const struct genl_info *info)
{
return !info->nlhdr;
}
void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk);
void *genl_sk_priv_get(struct genl_family *family, struct sock *sk);
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
struct genl_info *info, u32 group, gfp_t flags);
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
const struct genl_family *family, int flags, u8 cmd);
static inline void *
__genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags)
{
return genlmsg_put(skb, info->snd_portid, info->snd_seq, info->family,
flags, info->genlhdr->cmd);
}
/**
* genlmsg_iput - start genetlink message based on genl_info
* @skb: skb in which message header will be placed
* @info: genl_info as provided to do/dump handlers
*
* Convenience wrapper which starts a genetlink message based on
* information in user request. @info should be either the struct passed
* by genetlink core to do/dump handlers (when constructing replies to
* such requests) or a struct initialized by genl_info_init_ntf()
* when constructing notifications.
*
* Returns: pointer to new genetlink header.
*/
static inline void *
genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
{
return __genlmsg_iput(skb, info, 0);
}
/**
* genlmsg_nlhdr - Obtain netlink header from user specified header
* @user_hdr: user header as returned from genlmsg_put()
*
* Returns: pointer to netlink header.
*/
static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr)
{
return (struct nlmsghdr *)((char *)user_hdr -
GENL_HDRLEN -
NLMSG_HDRLEN);
}
/**
* genlmsg_parse_deprecated - parse attributes of a genetlink message
* @nlh: netlink message header
* @family: genetlink message family
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*/
static inline int genlmsg_parse_deprecated(const struct nlmsghdr *nlh,
const struct genl_family *family,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
policy, NL_VALIDATE_LIBERAL, extack);
}
/**
* genlmsg_parse - parse attributes of a genetlink message
* @nlh: netlink message header
* @family: genetlink message family
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*/
static inline int genlmsg_parse(const struct nlmsghdr *nlh,
const struct genl_family *family,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
policy, NL_VALIDATE_STRICT, extack);
}
/**
* genl_dump_check_consistent - check if sequence is consistent and advertise if not
* @cb: netlink callback structure that stores the sequence number
* @user_hdr: user header as returned from genlmsg_put()
*
* Cf. nl_dump_check_consistent(), this just provides a wrapper to make it
* simpler to use with generic netlink.
*/
static inline void genl_dump_check_consistent(struct netlink_callback *cb,
void *user_hdr)
{
nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr));
}
/**
* genlmsg_put_reply - Add generic netlink header to a reply message
* @skb: socket buffer holding the message
* @info: receiver info
* @family: generic netlink family
* @flags: netlink message flags
* @cmd: generic netlink command
*
* Returns: pointer to user specific header
*/
static inline void *genlmsg_put_reply(struct sk_buff *skb,
struct genl_info *info,
const struct genl_family *family,
int flags, u8 cmd)
{
return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
flags, cmd);
}
/**
* genlmsg_end - Finalize a generic netlink message
* @skb: socket buffer the message is stored in
* @hdr: user specific header
*/
static inline void genlmsg_end(struct sk_buff *skb, void *hdr)
{
nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
}
/**
* genlmsg_cancel - Cancel construction of a generic netlink message
* @skb: socket buffer the message is stored in
* @hdr: generic netlink message header
*/
static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
{
if (hdr)
nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
}
/**
* genlmsg_multicast_netns_filtered - multicast a netlink message
* to a specific netns with filter
* function
* @family: the generic netlink family
* @net: the net namespace
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
* @flags: allocation flags
* @filter: filter function
* @filter_data: filter function private data
*
* Return: 0 on success, negative error code for failure.
*/
static inline int
genlmsg_multicast_netns_filtered(const struct genl_family *family,
struct net *net, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags,
netlink_filter_fn filter,
void *filter_data)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
return nlmsg_multicast_filtered(net->genl_sock, skb, portid, group,
flags, filter, filter_data);
}
/**
* genlmsg_multicast_netns - multicast a netlink message to a specific netns
* @family: the generic netlink family
* @net: the net namespace
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
* @flags: allocation flags
*/
static inline int genlmsg_multicast_netns(const struct genl_family *family,
struct net *net, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags)
{
return genlmsg_multicast_netns_filtered(family, net, skb, portid,
group, flags, NULL, NULL);
}
/**
* genlmsg_multicast - multicast a netlink message to the default netns
* @family: the generic netlink family
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
* @flags: allocation flags
*/
static inline int genlmsg_multicast(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags)
{
return genlmsg_multicast_netns(family, &init_net, skb,
portid, group, flags);
}
/**
* genlmsg_multicast_allns - multicast a netlink message to all net namespaces
* @family: the generic netlink family
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
*
* This function must hold the RTNL or rcu_read_lock().
*/
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group);
/**
* genlmsg_unicast - unicast a netlink message
* @net: network namespace to look up @portid in
* @skb: netlink message as socket buffer
* @portid: netlink portid of the destination socket
*/
static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid)
{
return nlmsg_unicast(net->genl_sock, skb, portid);
}
/**
* genlmsg_reply - reply to a request
* @skb: netlink message to be sent back
* @info: receiver information
*/
static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
{
return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
}
/**
* genlmsg_data - head of message payload
* @gnlh: genetlink message header
*/
static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
{
return ((unsigned char *) gnlh + GENL_HDRLEN);
}
/**
* genlmsg_len - length of message payload
* @gnlh: genetlink message header
*/
static inline int genlmsg_len(const struct genlmsghdr *gnlh)
{
struct nlmsghdr *nlh = (struct nlmsghdr *)((unsigned char *)gnlh -
NLMSG_HDRLEN);
return (nlh->nlmsg_len - GENL_HDRLEN - NLMSG_HDRLEN);
}
/**
* genlmsg_msg_size - length of genetlink message not including padding
* @payload: length of message payload
*/
static inline int genlmsg_msg_size(int payload)
{
return GENL_HDRLEN + payload;
}
/**
* genlmsg_total_size - length of genetlink message including padding
* @payload: length of message payload
*/
static inline int genlmsg_total_size(int payload)
{
return NLMSG_ALIGN(genlmsg_msg_size(payload));
}
/**
* genlmsg_new - Allocate a new generic netlink message
* @payload: size of the message payload
* @flags: the type of memory to allocate.
*/
static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
{
return nlmsg_new(genlmsg_total_size(payload), flags);
}
/**
* genl_set_err - report error to genetlink broadcast listeners
* @family: the generic netlink family
* @net: the network namespace to report the error to
* @portid: the PORTID of a process that we want to skip (if any)
* @group: the broadcast group that will notice the error
* (this is the offset of the multicast group in the groups array)
* @code: error code, must be negative (as usual in kernelspace)
*
* This function returns the number of broadcast listeners that have set the
* NETLINK_RECV_NO_ENOBUFS socket option.
*/
static inline int genl_set_err(const struct genl_family *family,
struct net *net, u32 portid,
u32 group, int code)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
return netlink_set_err(net->genl_sock, portid, group, code);
}
static inline int genl_has_listeners(const struct genl_family *family,
struct net *net, unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
return netlink_has_listeners(net->genl_sock, group);
}
#endif /* __NET_GENERIC_NETLINK_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ERR_H
#define _LINUX_ERR_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/errno.h>
/*
* Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a normal
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
* error and pointer decisions.
*/
#define MAX_ERRNO 4095
#ifndef __ASSEMBLY__
/**
* IS_ERR_VALUE - Detect an error pointer.
* @x: The pointer to check.
*
* Like IS_ERR(), but does not generate a compiler warning if result is unused.
*/
#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
/**
* ERR_PTR - Create an error pointer.
* @error: A negative error code.
*
* Encodes @error into a pointer value. Users should consider the result
* opaque and not assume anything about how the error is encoded.
*
* Return: A pointer with @error encoded within its value.
*/
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
/* Return the pointer in the percpu address space. */
#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
/* Cast an error pointer to __iomem. */
#define IOMEM_ERR_PTR(error) (__force void __iomem *)ERR_PTR(error)
/**
* PTR_ERR - Extract the error code from an error pointer.
* @ptr: An error pointer.
* Return: The error code within @ptr.
*/
static inline long __must_check PTR_ERR(__force const void *ptr)
{
return (long) ptr;
}
/* Read an error pointer from the percpu address space. */
#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr)))
/**
* IS_ERR - Detect an error pointer.
* @ptr: The pointer to check.
* Return: true if @ptr is an error pointer, false otherwise.
*/
static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
/* Read an error pointer from the percpu address space. */
#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr)))
/**
* IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
* @ptr: The pointer to check.
*
* Like IS_ERR(), but also returns true for a null pointer.
*/
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
}
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
*
* Explicitly cast an error-valued pointer to another pointer type in such a
* way as to make it clear that's what's going on.
*/
static inline void * __must_check ERR_CAST(__force const void *ptr)
{
/* cast away the const */
return (void *) ptr;
}
/**
* PTR_ERR_OR_ZERO - Extract the error code from a pointer if it has one.
* @ptr: A potential error pointer.
*
* Convenience function that can be used inside a function that returns
* an error code to propagate errors received as error pointers.
* For example, ``return PTR_ERR_OR_ZERO(ptr);`` replaces:
*
* .. code-block:: c
*
* if (IS_ERR(ptr))
* return PTR_ERR(ptr);
* else
* return 0;
*
* Return: The error code within @ptr if it is an error pointer; 0 otherwise.
*/
static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
else
return 0;
}
#endif
#endif /* _LINUX_ERR_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* klist.c - Routines for manipulating klists.
*
* Copyright (C) 2005 Patrick Mochel
*
* This klist interface provides a couple of structures that wrap around
* struct list_head to provide explicit list "head" (struct klist) and list
* "node" (struct klist_node) objects. For struct klist, a spinlock is
* included that protects access to the actual list itself. struct
* klist_node provides a pointer to the klist that owns it and a kref
* reference count that indicates the number of current users of that node
* in the list.
*
* The entire point is to provide an interface for iterating over a list
* that is safe and allows for modification of the list during the
* iteration (e.g. insertion and removal), including modification of the
* current node on the list.
*
* It works using a 3rd object type - struct klist_iter - that is declared
* and initialized before an iteration. klist_next() is used to acquire the
* next element in the list. It returns NULL if there are no more items.
* Internally, that routine takes the klist's lock, decrements the
* reference count of the previous klist_node and increments the count of
* the next klist_node. It then drops the lock and returns.
*
* There are primitives for adding and removing nodes to/from a klist.
* When deleting, klist_del() will simply decrement the reference count.
* Only when the count goes to 0 is the node removed from the list.
* klist_remove() will try to delete the node from the list and block until
* it is actually removed. This is useful for objects (like devices) that
* have been removed from the system and must be freed (but must wait until
* all accessors have finished).
*/
#include <linux/klist.h>
#include <linux/export.h>
#include <linux/sched.h>
/*
* Use the lowest bit of n_klist to mark deleted nodes and exclude
* dead ones from iteration.
*/
#define KNODE_DEAD 1LU
#define KNODE_KLIST_MASK ~KNODE_DEAD
static struct klist *knode_klist(struct klist_node *knode)
{
return (struct klist *)
((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
}
static bool knode_dead(struct klist_node *knode)
{
return (unsigned long)knode->n_klist & KNODE_DEAD;
}
static void knode_set_klist(struct klist_node *knode, struct klist *klist)
{
knode->n_klist = klist;
/* no knode deserves to start its life dead */
WARN_ON(knode_dead(knode));
}
static void knode_kill(struct klist_node *knode)
{
/* and no knode should die twice ever either, see we're very humane */
WARN_ON(knode_dead(knode));
*(unsigned long *)&knode->n_klist |= KNODE_DEAD;
}
/**
* klist_init - Initialize a klist structure.
* @k: The klist we're initializing.
* @get: The get function for the embedding object (NULL if none)
* @put: The put function for the embedding object (NULL if none)
*
* Initialises the klist structure. If the klist_node structures are
* going to be embedded in refcounted objects (necessary for safe
* deletion) then the get/put arguments are used to initialise
* functions that take and release references on the embedding
* objects.
*/
void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *))
{
INIT_LIST_HEAD(&k->k_list);
spin_lock_init(&k->k_lock);
k->get = get;
k->put = put;
}
EXPORT_SYMBOL_GPL(klist_init);
static void add_head(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void add_tail(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void klist_node_init(struct klist *k, struct klist_node *n)
{
INIT_LIST_HEAD(&n->n_node);
kref_init(&n->n_ref);
knode_set_klist(n, k);
if (k->get)
k->get(n);
}
/**
* klist_add_head - Initialize a klist_node and add it to front.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_head(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_head(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_head);
/**
* klist_add_tail - Initialize a klist_node and add it to back.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_tail(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_tail(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_tail);
/**
* klist_add_behind - Init a klist_node and add it after an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
void klist_add_behind(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
list_add(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
EXPORT_SYMBOL_GPL(klist_add_behind);
/**
* klist_add_before - Init a klist_node and add it before an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
void klist_add_before(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
EXPORT_SYMBOL_GPL(klist_add_before);
struct klist_waiter {
struct list_head list;
struct klist_node *node;
struct task_struct *process;
int woken;
};
static DEFINE_SPINLOCK(klist_remove_lock);
static LIST_HEAD(klist_remove_waiters);
static void klist_release(struct kref *kref)
{
struct klist_waiter *waiter, *tmp;
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
WARN_ON(!knode_dead(n));
list_del(&n->n_node);
spin_lock(&klist_remove_lock);
list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
if (waiter->node != n)
continue;
list_del(&waiter->list);
waiter->woken = 1;
mb();
wake_up_process(waiter->process);
}
spin_unlock(&klist_remove_lock);
knode_set_klist(n, NULL);
}
static int klist_dec_and_del(struct klist_node *n)
{
return kref_put(&n->n_ref, klist_release);
}
static void klist_put(struct klist_node *n, bool kill)
{
struct klist *k = knode_klist(n);
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
if (kill)
knode_kill(n);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
/**
* klist_del - Decrement the reference count of node and try to remove.
* @n: node we're deleting.
*/
void klist_del(struct klist_node *n)
{
klist_put(n, true);
}
EXPORT_SYMBOL_GPL(klist_del);
/**
* klist_remove - Decrement the refcount of node and wait for it to go away.
* @n: node we're removing.
*/
void klist_remove(struct klist_node *n)
{
struct klist_waiter waiter;
waiter.node = n;
waiter.process = current;
waiter.woken = 0;
spin_lock(&klist_remove_lock);
list_add(&waiter.list, &klist_remove_waiters);
spin_unlock(&klist_remove_lock);
klist_del(n);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (waiter.woken)
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
EXPORT_SYMBOL_GPL(klist_remove);
/**
* klist_node_attached - Say whether a node is bound to a list or not.
* @n: Node that we're testing.
*/
int klist_node_attached(struct klist_node *n)
{
return (n->n_klist != NULL);
}
EXPORT_SYMBOL_GPL(klist_node_attached);
/**
* klist_iter_init_node - Initialize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter we're filling.
* @n: node to start with.
*
* Similar to klist_iter_init(), but starts the action off with @n,
* instead of with the list head.
*/
void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
i->i_cur = NULL;
if (n && kref_get_unless_zero(&n->n_ref))
i->i_cur = n;
}
EXPORT_SYMBOL_GPL(klist_iter_init_node);
/**
* klist_iter_init - Iniitalize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter structure we're filling.
*
* Similar to klist_iter_init_node(), but start with the list head.
*/
void klist_iter_init(struct klist *k, struct klist_iter *i)
{
klist_iter_init_node(k, i, NULL);
}
EXPORT_SYMBOL_GPL(klist_iter_init);
/**
* klist_iter_exit - Finish a list iteration.
* @i: Iterator structure.
*
* Must be called when done iterating over list, as it decrements the
* refcount of the current node. Necessary in case iteration exited before
* the end of the list was reached, and always good form.
*/
void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
klist_put(i->i_cur, false);
i->i_cur = NULL;
}
}
EXPORT_SYMBOL_GPL(klist_iter_exit);
static struct klist_node *to_klist_node(struct list_head *n)
{
return container_of(n, struct klist_node, n_node);
}
/**
* klist_prev - Ante up prev node in list.
* @i: Iterator structure.
*
* First grab list lock. Decrement the reference count of the previous
* node, if there was one. Grab the prev node, increment its reference
* count, drop the lock, and return that prev node.
*/
struct klist_node *klist_prev(struct klist_iter *i)
{
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
unsigned long flags;
spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
if (!klist_dec_and_del(last))
put = NULL;
} else
prev = to_klist_node(i->i_klist->k_list.prev);
i->i_cur = NULL;
while (prev != to_klist_node(&i->i_klist->k_list)) {
if (likely(!knode_dead(prev))) {
kref_get(&prev->n_ref);
i->i_cur = prev;
break;
}
prev = to_klist_node(prev->n_node.prev);
}
spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_prev);
/**
* klist_next - Ante up next node in list.
* @i: Iterator structure.
*
* First grab list lock. Decrement the reference count of the previous
* node, if there was one. Grab the next node, increment its reference
* count, drop the lock, and return that next node.
*/
struct klist_node *klist_next(struct klist_iter *i)
{
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
unsigned long flags;
spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
if (!klist_dec_and_del(last))
put = NULL;
} else
next = to_klist_node(i->i_klist->k_list.next);
i->i_cur = NULL;
while (next != to_klist_node(&i->i_klist->k_list)) {
if (likely(!knode_dead(next))) {
kref_get(&next->n_ref);
i->i_cur = next;
break;
}
next = to_klist_node(next->n_node.next);
}
spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_next);
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2002 Andi Kleen
*
* This handles calls from both 32bit and 64bit mode.
*
* Lock order:
* context.ldt_usr_sem
* mmap_lock
* context.lock
*/
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <asm/ldt.h>
#include <asm/tlb.h>
#include <asm/desc.h>
#include <asm/mmu_context.h>
#include <asm/pgtable_areas.h>
#include <xen/xen.h>
/* This is a multiple of PAGE_SIZE. */
#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
static inline void *ldt_slot_va(int slot)
{
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
}
void load_mm_ldt(struct mm_struct *mm)
{
struct ldt_struct *ldt;
/* READ_ONCE synchronizes with smp_store_release */
ldt = READ_ONCE(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
* after the IPI is handled by all such CPUs. This means that
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
*
* NB: don't try to convert this to use RCU without extreme care.
* We would still need IRQs off, because we don't want to change
* the local LDT after an IPI loaded a newer value than the one
* that we can see.
*/
if (unlikely(ldt)) {
if (static_cpu_has(X86_FEATURE_PTI)) {
if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
/*
* Whoops -- either the new LDT isn't mapped
* (if slot == -1) or is mapped into a bogus
* slot (if slot > 1).
*/
clear_LDT();
return;
}
/*
* If page table isolation is enabled, ldt->entries
* will not be mapped in the userspace pagetables.
* Tell the CPU to access the LDT through the alias
* at ldt_slot_va(ldt->slot).
*/
set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
} else {
set_ldt(ldt->entries, ldt->nr_entries);
}
} else {
clear_LDT();
}
}
void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
{
/*
* Load the LDT if either the old or new mm had an LDT.
*
* An mm will never go from having an LDT to not having an LDT. Two
* mms never share an LDT, so we don't gain anything by checking to
* see whether the LDT changed. There's also no guarantee that
* prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
* then prev->context.ldt will also be non-NULL.
*
* If we really cared, we could optimize the case where prev == next
* and we're exiting lazy mode. Most of the time, if this happens,
* we don't actually need to reload LDTR, but modify_ldt() is mostly
* used by legacy code and emulators where we don't need this level of
* performance.
*
* This uses | instead of || because it generates better code.
*/
if (unlikely((unsigned long)prev->context.ldt |
(unsigned long)next->context.ldt))
load_mm_ldt(next);
DEBUG_LOCKS_WARN_ON(preemptible());
}
static void refresh_ldt_segments(void)
{
#ifdef CONFIG_X86_64
unsigned short sel;
/*
* Make sure that the cached DS and ES descriptors match the updated
* LDT.
*/
savesegment(ds, sel);
if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
loadsegment(ds, sel);
savesegment(es, sel);
if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
loadsegment(es, sel);
#endif
}
/* context.lock is held by the task which issued the smp function call */
static void flush_ldt(void *__mm)
{
struct mm_struct *mm = __mm;
if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
return;
load_mm_ldt(mm);
refresh_ldt_segments();
}
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
{
struct ldt_struct *new_ldt;
unsigned int alloc_size;
if (num_entries > LDT_ENTRIES)
return NULL;
new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL_ACCOUNT);
if (!new_ldt)
return NULL;
BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
alloc_size = num_entries * LDT_ENTRY_SIZE;
/*
* Xen is very picky: it requires a page-aligned LDT that has no
* trailing nonzero bytes in any page that contains LDT descriptors.
* Keep it simple: zero the whole allocation and never allocate less
* than PAGE_SIZE.
*/
if (alloc_size > PAGE_SIZE)
new_ldt->entries = __vmalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
else
new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!new_ldt->entries) {
kfree(new_ldt);
return NULL;
}
/* The new LDT isn't aliased for PTI yet. */
new_ldt->slot = -1;
new_ldt->nr_entries = num_entries;
return new_ldt;
}
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
static void do_sanity_check(struct mm_struct *mm,
bool had_kernel_mapping,
bool had_user_mapping)
{
if (mm->context.ldt) {
/*
* We already had an LDT. The top-level entry should already
* have been allocated and synchronized with the usermode
* tables.
*/
WARN_ON(!had_kernel_mapping);
if (boot_cpu_has(X86_FEATURE_PTI))
WARN_ON(!had_user_mapping);
} else {
/*
* This is the first time we're mapping an LDT for this process.
* Sync the pgd to the usermode tables.
*/
WARN_ON(had_kernel_mapping);
if (boot_cpu_has(X86_FEATURE_PTI))
WARN_ON(had_user_mapping);
}
}
#ifdef CONFIG_X86_PAE
static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
{
p4d_t *p4d;
pud_t *pud;
if (pgd->pgd == 0)
return NULL;
p4d = p4d_offset(pgd, va);
if (p4d_none(*p4d))
return NULL;
pud = pud_offset(p4d, va);
if (pud_none(*pud))
return NULL;
return pmd_offset(pud, va);
}
static void map_ldt_struct_to_user(struct mm_struct *mm)
{
pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
pmd_t *k_pmd, *u_pmd;
k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pmd(u_pmd, *k_pmd);
}
static void sanity_check_ldt_mapping(struct mm_struct *mm)
{
pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
bool had_kernel, had_user;
pmd_t *k_pmd, *u_pmd;
k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
had_kernel = (k_pmd->pmd != 0);
had_user = (u_pmd->pmd != 0);
do_sanity_check(mm, had_kernel, had_user);
}
#else /* !CONFIG_X86_PAE */
static void map_ldt_struct_to_user(struct mm_struct *mm)
{
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
}
static void sanity_check_ldt_mapping(struct mm_struct *mm)
{
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
bool had_kernel = (pgd->pgd != 0);
bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
do_sanity_check(mm, had_kernel, had_user);
}
#endif /* CONFIG_X86_PAE */
/*
* If PTI is enabled, this maps the LDT into the kernelmode and
* usermode tables for the given mm.
*/
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
{
unsigned long va;
bool is_vmalloc;
spinlock_t *ptl;
int i, nr_pages;
if (!boot_cpu_has(X86_FEATURE_PTI))
return 0;
/*
* Any given ldt_struct should have map_ldt_struct() called at most
* once.
*/
WARN_ON(ldt->slot != -1);
/* Check if the current mappings are sane */
sanity_check_ldt_mapping(mm);
is_vmalloc = is_vmalloc_addr(ldt->entries);
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
for (i = 0; i < nr_pages; i++) {
unsigned long offset = i << PAGE_SHIFT;
const void *src = (char *)ldt->entries + offset;
unsigned long pfn;
pgprot_t pte_prot;
pte_t pte, *ptep;
va = (unsigned long)ldt_slot_va(slot) + offset;
pfn = is_vmalloc ? vmalloc_to_pfn(src) :
page_to_pfn(virt_to_page(src));
/*
* Treat the PTI LDT range as a *userspace* range.
* get_locked_pte() will allocate all needed pagetables
* and account for them in this mm.
*/
ptep = get_locked_pte(mm, va, &ptl);
if (!ptep)
return -ENOMEM;
/*
* Map it RO so the easy to find address is not a primary
* target via some kernel interface which misses a
* permission check.
*/
pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
/* Filter out unsuppored __PAGE_KERNEL* bits: */
pgprot_val(pte_prot) &= __supported_pte_mask;
pte = pfn_pte(pfn, pte_prot);
set_pte_at(mm, va, ptep, pte);
pte_unmap_unlock(ptep, ptl);
}
/* Propagate LDT mapping to the user page-table */
map_ldt_struct_to_user(mm);
ldt->slot = slot;
return 0;
}
static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
{
unsigned long va;
int i, nr_pages;
if (!ldt)
return;
/* LDT map/unmap is only required for PTI */
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
for (i = 0; i < nr_pages; i++) {
unsigned long offset = i << PAGE_SHIFT;
spinlock_t *ptl;
pte_t *ptep;
va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
ptep = get_locked_pte(mm, va, &ptl);
if (!WARN_ON_ONCE(!ptep)) {
pte_clear(mm, va, ptep);
pte_unmap_unlock(ptep, ptl);
}
}
va = (unsigned long)ldt_slot_va(ldt->slot);
flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
}
#else /* !CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
{
return 0;
}
static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
{
}
#endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
static void free_ldt_pgtables(struct mm_struct *mm)
{
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
struct mmu_gather tlb;
unsigned long start = LDT_BASE_ADDR;
unsigned long end = LDT_END_ADDR;
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
/*
* Although free_pgd_range() is intended for freeing user
* page-tables, it also works out for kernel mappings on x86.
* We use tlb_gather_mmu_fullmm() to avoid confusing the
* range-tracking logic in __tlb_adjust_range().
*/
tlb_gather_mmu_fullmm(&tlb, mm);
free_pgd_range(&tlb, start, end, start, end);
tlb_finish_mmu(&tlb);
#endif
}
/* After calling this, the LDT is immutable. */
static void finalize_ldt_struct(struct ldt_struct *ldt)
{
paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
}
static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
{
mutex_lock(&mm->context.lock);
/* Synchronizes with READ_ONCE in load_mm_ldt. */
smp_store_release(&mm->context.ldt, ldt);
/* Activate the LDT for all CPUs using currents mm. */
on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
mutex_unlock(&mm->context.lock);
}
static void free_ldt_struct(struct ldt_struct *ldt)
{
if (likely(!ldt))
return;
paravirt_free_ldt(ldt->entries, ldt->nr_entries);
if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree_atomic(ldt->entries);
else
free_page((unsigned long)ldt->entries);
kfree(ldt);
}
/*
* Called on fork from arch_dup_mmap(). Just copy the current LDT state,
* the new task is not running, so nothing can be installed.
*/
int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ldt_struct *new_ldt;
int retval = 0;
if (!old_mm)
return 0;
mutex_lock(&old_mm->context.lock);
if (!old_mm->context.ldt) goto out_unlock;
new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
if (!new_ldt) {
retval = -ENOMEM;
goto out_unlock;
}
memcpy(new_ldt->entries, old_mm->context.ldt->entries,
new_ldt->nr_entries * LDT_ENTRY_SIZE);
finalize_ldt_struct(new_ldt);
retval = map_ldt_struct(mm, new_ldt, 0);
if (retval) {
free_ldt_pgtables(mm);
free_ldt_struct(new_ldt);
goto out_unlock;
}
mm->context.ldt = new_ldt;
out_unlock:
mutex_unlock(&old_mm->context.lock);
return retval;
}
/*
* No need to lock the MM as we are the last user
*
* 64bit: Don't touch the LDT register - we're already in the next thread.
*/
void destroy_context_ldt(struct mm_struct *mm)
{
free_ldt_struct(mm->context.ldt);
mm->context.ldt = NULL;
}
void ldt_arch_exit_mmap(struct mm_struct *mm)
{
free_ldt_pgtables(mm);
}
static int read_ldt(void __user *ptr, unsigned long bytecount)
{
struct mm_struct *mm = current->mm;
unsigned long entries_size;
int retval;
down_read(&mm->context.ldt_usr_sem);
if (!mm->context.ldt) {
retval = 0;
goto out_unlock;
}
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
if (entries_size > bytecount)
entries_size = bytecount;
if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
retval = -EFAULT;
goto out_unlock;
}
if (entries_size != bytecount) {
/* Zero-fill the rest and pretend we read bytecount bytes. */
if (clear_user(ptr + entries_size, bytecount - entries_size)) {
retval = -EFAULT;
goto out_unlock;
}
}
retval = bytecount;
out_unlock:
up_read(&mm->context.ldt_usr_sem);
return retval;
}
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
{
/* CHECKME: Can we use _one_ random number ? */
#ifdef CONFIG_X86_32
unsigned long size = 5 * sizeof(struct desc_struct);
#else
unsigned long size = 128;
#endif
if (bytecount > size)
bytecount = size;
if (clear_user(ptr, bytecount))
return -EFAULT;
return bytecount;
}
static bool allow_16bit_segments(void)
{
if (!IS_ENABLED(CONFIG_X86_16BIT))
return false;
#ifdef CONFIG_XEN_PV
/*
* Xen PV does not implement ESPFIX64, which means that 16-bit
* segments will not work correctly. Until either Xen PV implements
* ESPFIX64 and can signal this fact to the guest or unless someone
* provides compelling evidence that allowing broken 16-bit segments
* is worthwhile, disallow 16-bit segments under Xen PV.
*/
if (xen_pv_domain()) {
pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n");
return false;
}
#endif
return true;
}
static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
{
struct mm_struct *mm = current->mm;
struct ldt_struct *new_ldt, *old_ldt;
unsigned int old_nr_entries, new_nr_entries;
struct user_desc ldt_info;
struct desc_struct ldt;
int error;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
goto out;
error = -EFAULT;
if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
goto out;
error = -EINVAL;
if (ldt_info.entry_number >= LDT_ENTRIES)
goto out;
if (ldt_info.contents == 3) {
if (oldmode)
goto out;
if (ldt_info.seg_not_present == 0)
goto out;
}
if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
LDT_empty(&ldt_info)) {
/* The user wants to clear the entry. */
memset(&ldt, 0, sizeof(ldt));
} else {
if (!ldt_info.seg_32bit && !allow_16bit_segments()) {
error = -EINVAL;
goto out;
}
fill_ldt(&ldt, &ldt_info);
if (oldmode)
ldt.avl = 0;
}
if (down_write_killable(&mm->context.ldt_usr_sem))
return -EINTR;
old_ldt = mm->context.ldt;
old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
error = -ENOMEM;
new_ldt = alloc_ldt_struct(new_nr_entries);
if (!new_ldt)
goto out_unlock;
if (old_ldt)
memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
new_ldt->entries[ldt_info.entry_number] = ldt;
finalize_ldt_struct(new_ldt);
/*
* If we are using PTI, map the new LDT into the userspace pagetables.
* If there is already an LDT, use the other slot so that other CPUs
* will continue to use the old LDT until install_ldt() switches
* them over to the new LDT.
*/
error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
if (error) {
/*
* This only can fail for the first LDT setup. If an LDT is
* already installed then the PTE page is already
* populated. Mop up a half populated page table.
*/
if (!WARN_ON_ONCE(old_ldt))
free_ldt_pgtables(mm);
free_ldt_struct(new_ldt);
goto out_unlock;
}
install_ldt(mm, new_ldt);
unmap_ldt_struct(mm, old_ldt);
free_ldt_struct(old_ldt);
error = 0;
out_unlock:
up_write(&mm->context.ldt_usr_sem);
out:
return error;
}
SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
unsigned long , bytecount)
{
int ret = -ENOSYS;
switch (func) {
case 0:
ret = read_ldt(ptr, bytecount);
break;
case 1:
ret = write_ldt(ptr, bytecount, 1);
break;
case 2:
ret = read_default_ldt(ptr, bytecount);
break;
case 0x11:
ret = write_ldt(ptr, bytecount, 0);
break;
}
/*
* The SYSCALL_DEFINE() macros give us an 'unsigned long'
* return type, but the ABI for sys_modify_ldt() expects
* 'int'. This cast gives us an int-sized value in %rax
* for the return code. The 'unsigned' is necessary so
* the compiler does not try to sign-extend the negative
* return codes into the high half of the register when
* taking the value from int->long.
*/
return (unsigned int)ret;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* A generic implementation of binary search for the Linux kernel
*
* Copyright (C) 2008-2009 Ksplice, Inc.
* Author: Tim Abbott <tabbott@ksplice.com>
*/
#include <linux/export.h>
#include <linux/bsearch.h>
#include <linux/kprobes.h>
/*
* bsearch - binary search an array of elements
* @key: pointer to item being searched for
* @base: pointer to first element to search
* @num: number of elements
* @size: size of each element
* @cmp: pointer to comparison function
*
* This function does a binary search on the given array. The
* contents of the array should already be in ascending sorted order
* under the provided comparison function.
*
* Note that the key need not have the same type as the elements in
* the array, e.g. key could be a string and the comparison function
* could compare the string with the struct's name field. However, if
* the key and elements in the array are of the same type, you can use
* the same comparison function for both sort() and bsearch().
*/
void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
{ return __inline_bsearch(key, base, num, size, cmp);}
EXPORT_SYMBOL(bsearch);
NOKPROBE_SYMBOL(bsearch);
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/xarray.h>
/**
* idr_alloc_u32() - Allocate an ID.
* @idr: IDR handle.
* @ptr: Pointer to be associated with the new ID.
* @nextid: Pointer to an ID.
* @max: The maximum ID to allocate (inclusive).
* @gfp: Memory allocation flags.
*
* Allocates an unused ID in the range specified by @nextid and @max.
* Note that @max is inclusive whereas the @end parameter to idr_alloc()
* is exclusive. The new ID is assigned to @nextid before the pointer
* is inserted into the IDR, so if @nextid points into the object pointed
* to by @ptr, a concurrent lookup will not find an uninitialised ID.
*
* The caller should provide their own locking to ensure that two
* concurrent modifications to the IDR are not possible. Read-only
* accesses to the IDR may be done under the RCU read lock or may
* exclude simultaneous writers.
*
* Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
* or -ENOSPC if no free IDs could be found. If an error occurred,
* @nextid is unchanged.
*/
int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
unsigned long max, gfp_t gfp)
{
struct radix_tree_iter iter;
void __rcu **slot;
unsigned int base = idr->idr_base;
unsigned int id = *nextid;
if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
idr->idr_rt.xa_flags |= IDR_RT_MARKER;
id = (id < base) ? 0 : id - base; radix_tree_iter_init(&iter, id);
slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
if (IS_ERR(slot)) return PTR_ERR(slot);
*nextid = iter.index + base;
/* there is a memory barrier inside radix_tree_iter_replace() */
radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
return 0;}
EXPORT_SYMBOL_GPL(idr_alloc_u32);
/**
* idr_alloc() - Allocate an ID.
* @idr: IDR handle.
* @ptr: Pointer to be associated with the new ID.
* @start: The minimum ID (inclusive).
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
* Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
*
* The caller should provide their own locking to ensure that two
* concurrent modifications to the IDR are not possible. Read-only
* accesses to the IDR may be done under the RCU read lock or may
* exclude simultaneous writers.
*
* Return: The newly allocated ID, -ENOMEM if memory allocation failed,
* or -ENOSPC if no free IDs could be found.
*/
int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
u32 id = start;
int ret;
if (WARN_ON_ONCE(start < 0))
return -EINVAL;
ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
if (ret)
return ret;
return id;}
EXPORT_SYMBOL_GPL(idr_alloc);
/**
* idr_alloc_cyclic() - Allocate an ID cyclically.
* @idr: IDR handle.
* @ptr: Pointer to be associated with the new ID.
* @start: The minimum ID (inclusive).
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
* Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
* wrap around to @start if no free IDs are found before reaching @end.
*
* The caller should provide their own locking to ensure that two
* concurrent modifications to the IDR are not possible. Read-only
* accesses to the IDR may be done under the RCU read lock or may
* exclude simultaneous writers.
*
* Return: The newly allocated ID, -ENOMEM if memory allocation failed,
* or -ENOSPC if no free IDs could be found.
*/
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
u32 id = idr->idr_next; int err, max = end > 0 ? end - 1 : INT_MAX; if ((int)id < start)
id = start;
err = idr_alloc_u32(idr, ptr, &id, max, gfp); if ((err == -ENOSPC) && (id > start)) {
id = start;
err = idr_alloc_u32(idr, ptr, &id, max, gfp);
}
if (err)
return err;
idr->idr_next = id + 1;
return id;}
EXPORT_SYMBOL(idr_alloc_cyclic);
/**
* idr_remove() - Remove an ID from the IDR.
* @idr: IDR handle.
* @id: Pointer ID.
*
* Removes this ID from the IDR. If the ID was not previously in the IDR,
* this function returns %NULL.
*
* Since this function modifies the IDR, the caller should provide their
* own locking to ensure that concurrent modification of the same IDR is
* not possible.
*
* Return: The pointer formerly associated with this ID.
*/
void *idr_remove(struct idr *idr, unsigned long id)
{
return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
}
EXPORT_SYMBOL_GPL(idr_remove);
/**
* idr_find() - Return pointer for given ID.
* @idr: IDR handle.
* @id: Pointer ID.
*
* Looks up the pointer associated with this ID. A %NULL pointer may
* indicate that @id is not allocated or that the %NULL pointer was
* associated with this ID.
*
* This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed.
*
* Return: The pointer associated with this ID.
*/
void *idr_find(const struct idr *idr, unsigned long id)
{
return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
}
EXPORT_SYMBOL_GPL(idr_find);
/**
* idr_for_each() - Iterate through all stored pointers.
* @idr: IDR handle.
* @fn: Function to be called for each pointer.
* @data: Data passed to callback function.
*
* The callback function will be called for each entry in @idr, passing
* the ID, the entry and @data.
*
* If @fn returns anything other than %0, the iteration stops and that
* value is returned from this function.
*
* idr_for_each() can be called concurrently with idr_alloc() and
* idr_remove() if protected by RCU. Newly added entries may not be
* seen and deleted entries may be seen, but adding and removing entries
* will not cause other entries to be skipped, nor spurious ones to be seen.
*/
int idr_for_each(const struct idr *idr,
int (*fn)(int id, void *p, void *data), void *data)
{
struct radix_tree_iter iter;
void __rcu **slot;
int base = idr->idr_base;
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
int ret;
unsigned long id = iter.index + base; if (WARN_ON_ONCE(id > INT_MAX))
break;
ret = fn(id, rcu_dereference_raw(*slot), data);
if (ret)
return ret;
}
return 0;}
EXPORT_SYMBOL(idr_for_each);
/**
* idr_get_next_ul() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
* Returns the next populated entry in the tree with an ID greater than
* or equal to the value pointed to by @nextid. On exit, @nextid is updated
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
void *entry = NULL;
unsigned long base = idr->idr_base;
unsigned long id = *nextid;
id = (id < base) ? 0 : id - base;
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
entry = rcu_dereference_raw(*slot);
if (!entry)
continue;
if (!xa_is_internal(entry))
break;
if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
break;
slot = radix_tree_iter_retry(&iter);
}
if (!slot)
return NULL;
*nextid = iter.index + base;
return entry;
}
EXPORT_SYMBOL(idr_get_next_ul);
/**
* idr_get_next() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
* Returns the next populated entry in the tree with an ID greater than
* or equal to the value pointed to by @nextid. On exit, @nextid is updated
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
void *idr_get_next(struct idr *idr, int *nextid)
{
unsigned long id = *nextid;
void *entry = idr_get_next_ul(idr, &id);
if (WARN_ON_ONCE(id > INT_MAX))
return NULL;
*nextid = id;
return entry;
}
EXPORT_SYMBOL(idr_get_next);
/**
* idr_replace() - replace pointer for given ID.
* @idr: IDR handle.
* @ptr: New pointer to associate with the ID.
* @id: ID to change.
*
* Replace the pointer registered with an ID and return the old value.
* This function can be called under the RCU read lock concurrently with
* idr_alloc() and idr_remove() (as long as the ID being removed is not
* the one being replaced!).
*
* Returns: the old value on success. %-ENOENT indicates that @id was not
* found. %-EINVAL indicates that @ptr was not valid.
*/
void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
{
struct radix_tree_node *node;
void __rcu **slot = NULL;
void *entry;
id -= idr->idr_base;
entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) return ERR_PTR(-ENOENT);
__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
return entry;}
EXPORT_SYMBOL(idr_replace);
/**
* DOC: IDA description
*
* The IDA is an ID allocator which does not provide the ability to
* associate an ID with a pointer. As such, it only needs to store one
* bit per ID, and so is more space efficient than an IDR. To use an IDA,
* define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
* then initialise it using ida_init()). To allocate a new ID, call
* ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
* To free an ID, call ida_free().
*
* ida_destroy() can be used to dispose of an IDA without needing to
* free the individual IDs in it. You can use ida_is_empty() to find
* out whether the IDA has any IDs currently allocated.
*
* The IDA handles its own locking. It is safe to call any of the IDA
* functions without synchronisation in your code.
*
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
* limitation, it should be quite straightforward to raise the maximum.
*/
/*
* Developer's notes:
*
* The IDA uses the functionality provided by the XArray to store bitmaps in
* each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
* have been set.
*
* I considered telling the XArray that each slot is an order-10 node
* and indexing by bit number, but the XArray can't allow a single multi-index
* entry in the head, which would significantly increase memory consumption
* for the IDA. So instead we divide the index by the number of bits in the
* leaf bitmap before doing a radix tree lookup.
*
* As an optimisation, if there are only a few low bits set in any given
* leaf, instead of allocating a 128-byte bitmap, we store the bits
* as a value entry. Value entries never have the XA_FREE_MARK cleared
* because we can always convert them into a bitmap entry.
*
* It would be possible to optimise further; once we've run out of a
* single 128-byte bitmap, we currently switch to a 576-byte node, put
* the 128-byte bitmap in the first entry and then start allocating extra
* 128-byte entries. We could instead use the 512 bytes of the node's
* data as a bitmap before moving to that scheme. I do not believe this
* is a worthwhile optimisation; Rasmus Villemoes surveyed the current
* users of the IDA and almost none of them use more than 1024 entries.
* Those that do use more than the 8192 IDs that the 512 bytes would
* provide.
*
* The IDA always uses a lock to alloc/free. If we add a 'test_bit'
* equivalent, it will still need locking. Going to RCU lookup would require
* using RCU to free bitmaps, and that's not trivial without embedding an
* RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
* bitmap, which is excessive.
*/
/**
* ida_alloc_range() - Allocate an unused ID.
* @ida: IDA handle.
* @min: Lowest ID to allocate.
* @max: Highest ID to allocate.
* @gfp: Memory allocation flags.
*
* Allocate an ID between @min and @max, inclusive. The allocated ID will
* not exceed %INT_MAX, even if @max is larger.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
gfp_t gfp)
{
XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
unsigned bit = min % IDA_BITMAP_BITS;
unsigned long flags;
struct ida_bitmap *bitmap, *alloc = NULL;
if ((int)min < 0) return -ENOSPC;
if ((int)max < 0)
max = INT_MAX;
retry:
xas_lock_irqsave(&xas, flags);
next:
bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
if (xas.xa_index > min / IDA_BITMAP_BITS)
bit = 0; if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
goto nospc;
if (xa_is_value(bitmap)) {
unsigned long tmp = xa_to_value(bitmap);
if (bit < BITS_PER_XA_VALUE) {
bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
goto nospc; if (bit < BITS_PER_XA_VALUE) {
tmp |= 1UL << bit;
xas_store(&xas, xa_mk_value(tmp));
goto out;
}
}
bitmap = alloc;
if (!bitmap)
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
if (!bitmap)
goto alloc; bitmap->bitmap[0] = tmp;
xas_store(&xas, bitmap);
if (xas_error(&xas)) {
bitmap->bitmap[0] = 0;
goto out;
}
}
if (bitmap) { bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
goto nospc; if (bit == IDA_BITMAP_BITS)
goto next;
__set_bit(bit, bitmap->bitmap); if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
xas_clear_mark(&xas, XA_FREE_MARK);
} else {
if (bit < BITS_PER_XA_VALUE) {
bitmap = xa_mk_value(1UL << bit);
} else {
bitmap = alloc;
if (!bitmap)
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
if (!bitmap)
goto alloc; __set_bit(bit, bitmap->bitmap);
}
xas_store(&xas, bitmap);
}
out:
xas_unlock_irqrestore(&xas, flags);
if (xas_nomem(&xas, gfp)) {
xas.xa_index = min / IDA_BITMAP_BITS;
bit = min % IDA_BITMAP_BITS;
goto retry;
}
if (bitmap != alloc)
kfree(alloc);
if (xas_error(&xas))
return xas_error(&xas);
return xas.xa_index * IDA_BITMAP_BITS + bit;
alloc:
xas_unlock_irqrestore(&xas, flags);
alloc = kzalloc(sizeof(*bitmap), gfp);
if (!alloc)
return -ENOMEM; xas_set(&xas, min / IDA_BITMAP_BITS);
bit = min % IDA_BITMAP_BITS;
goto retry;
nospc:
xas_unlock_irqrestore(&xas, flags);
kfree(alloc);
return -ENOSPC;
}
EXPORT_SYMBOL(ida_alloc_range);
/**
* ida_find_first_range - Get the lowest used ID.
* @ida: IDA handle.
* @min: Lowest ID to get.
* @max: Highest ID to get.
*
* Get the lowest used ID between @min and @max, inclusive. The returned
* ID will not exceed %INT_MAX, even if @max is larger.
*
* Context: Any context. Takes and releases the xa_lock.
* Return: The lowest used ID, or errno if no used ID is found.
*/
int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max)
{
unsigned long index = min / IDA_BITMAP_BITS;
unsigned int offset = min % IDA_BITMAP_BITS;
unsigned long *addr, size, bit;
unsigned long tmp = 0;
unsigned long flags;
void *entry;
int ret;
if ((int)min < 0)
return -EINVAL;
if ((int)max < 0)
max = INT_MAX;
xa_lock_irqsave(&ida->xa, flags);
entry = xa_find(&ida->xa, &index, max / IDA_BITMAP_BITS, XA_PRESENT);
if (!entry) {
ret = -ENOENT;
goto err_unlock;
}
if (index > min / IDA_BITMAP_BITS)
offset = 0;
if (index * IDA_BITMAP_BITS + offset > max) {
ret = -ENOENT;
goto err_unlock;
}
if (xa_is_value(entry)) {
tmp = xa_to_value(entry);
addr = &tmp;
size = BITS_PER_XA_VALUE;
} else {
addr = ((struct ida_bitmap *)entry)->bitmap;
size = IDA_BITMAP_BITS;
}
bit = find_next_bit(addr, size, offset);
xa_unlock_irqrestore(&ida->xa, flags);
if (bit == size ||
index * IDA_BITMAP_BITS + bit > max)
return -ENOENT;
return index * IDA_BITMAP_BITS + bit;
err_unlock:
xa_unlock_irqrestore(&ida->xa, flags);
return ret;
}
EXPORT_SYMBOL(ida_find_first_range);
/**
* ida_free() - Release an allocated ID.
* @ida: IDA handle.
* @id: Previously allocated ID.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
*/
void ida_free(struct ida *ida, unsigned int id)
{
XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
unsigned bit = id % IDA_BITMAP_BITS;
struct ida_bitmap *bitmap;
unsigned long flags;
if ((int)id < 0)
return;
xas_lock_irqsave(&xas, flags);
bitmap = xas_load(&xas);
if (xa_is_value(bitmap)) {
unsigned long v = xa_to_value(bitmap);
if (bit >= BITS_PER_XA_VALUE)
goto err;
if (!(v & (1UL << bit)))
goto err;
v &= ~(1UL << bit);
if (!v)
goto delete;
xas_store(&xas, xa_mk_value(v));
} else {
if (!bitmap || !test_bit(bit, bitmap->bitmap))
goto err;
__clear_bit(bit, bitmap->bitmap);
xas_set_mark(&xas, XA_FREE_MARK);
if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
kfree(bitmap);
delete:
xas_store(&xas, NULL);
}
}
xas_unlock_irqrestore(&xas, flags);
return;
err:
xas_unlock_irqrestore(&xas, flags);
WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
}
EXPORT_SYMBOL(ida_free);
/**
* ida_destroy() - Free all IDs.
* @ida: IDA handle.
*
* Calling this function frees all IDs and releases all resources used
* by an IDA. When this call returns, the IDA is empty and can be reused
* or freed. If the IDA is already empty, there is no need to call this
* function.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
*/
void ida_destroy(struct ida *ida)
{
XA_STATE(xas, &ida->xa, 0);
struct ida_bitmap *bitmap;
unsigned long flags;
xas_lock_irqsave(&xas, flags);
xas_for_each(&xas, bitmap, ULONG_MAX) {
if (!xa_is_value(bitmap))
kfree(bitmap);
xas_store(&xas, NULL);
}
xas_unlock_irqrestore(&xas, flags);
}
EXPORT_SYMBOL(ida_destroy);
#ifndef __KERNEL__
extern void xa_dump_index(unsigned long index, unsigned int shift);
#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
static void ida_dump_entry(void *entry, unsigned long index)
{
unsigned long i;
if (!entry)
return;
if (xa_is_node(entry)) {
struct xa_node *node = xa_to_node(entry);
unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
XA_CHUNK_SHIFT;
xa_dump_index(index * IDA_BITMAP_BITS, shift);
xa_dump_node(node);
for (i = 0; i < XA_CHUNK_SIZE; i++)
ida_dump_entry(node->slots[i],
index | (i << node->shift));
} else if (xa_is_value(entry)) {
xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
} else {
struct ida_bitmap *bitmap = entry;
xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
pr_cont("bitmap: %p data", bitmap);
for (i = 0; i < IDA_BITMAP_LONGS; i++)
pr_cont(" %lx", bitmap->bitmap[i]);
pr_cont("\n");
}
}
static void ida_dump(struct ida *ida)
{
struct xarray *xa = &ida->xa;
pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
xa->xa_flags >> ROOT_TAG_SHIFT);
ida_dump_entry(xa->xa_head, 0);
}
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright (C) 2016 - 2020 Christoph Hellwig
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/major.h>
#include <linux/device_cgroup.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/backing-dev.h>
#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/magic.h>
#include <linux/buffer_head.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/uio.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/part_stat.h>
#include <linux/uaccess.h>
#include <linux/stat.h>
#include "../fs/internal.h"
#include "blk.h"
/* Should we allow writing to mounted block devices? */
static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED);
struct bdev_inode {
struct block_device bdev;
struct inode vfs_inode;
};
static inline struct bdev_inode *BDEV_I(struct inode *inode)
{
return container_of(inode, struct bdev_inode, vfs_inode);
}
static inline struct inode *BD_INODE(struct block_device *bdev)
{
return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
}
struct block_device *I_BDEV(struct inode *inode)
{
return &BDEV_I(inode)->bdev;
}
EXPORT_SYMBOL(I_BDEV);
struct block_device *file_bdev(struct file *bdev_file)
{
return I_BDEV(bdev_file->f_mapping->host);
}
EXPORT_SYMBOL(file_bdev);
static void bdev_write_inode(struct block_device *bdev)
{
struct inode *inode = BD_INODE(bdev);
int ret;
spin_lock(&inode->i_lock);
while (inode->i_state & I_DIRTY) {
spin_unlock(&inode->i_lock);
ret = write_inode_now(inode, true);
if (ret)
pr_warn_ratelimited(
"VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
bdev, ret);
spin_lock(&inode->i_lock);
}
spin_unlock(&inode->i_lock);
}
/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{
struct address_space *mapping = bdev->bd_mapping;
if (mapping_empty(mapping))
return;
invalidate_bh_lrus();
truncate_inode_pages(mapping, 0);
}
/* Invalidate clean unused buffers and pagecache. */
void invalidate_bdev(struct block_device *bdev)
{
struct address_space *mapping = bdev->bd_mapping;
if (mapping->nrpages) {
invalidate_bh_lrus();
lru_add_drain_all(); /* make sure all lru add caches are flushed */
invalidate_mapping_pages(mapping, 0, -1);
}
}
EXPORT_SYMBOL(invalidate_bdev);
/*
* Drop all buffers & page cache for given bdev range. This function bails
* with error if bdev has other exclusive owner (such as filesystem).
*/
int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
loff_t lstart, loff_t lend)
{
/*
* If we don't hold exclusive handle for the device, upgrade to it
* while we discard the buffer cache to avoid discarding buffers
* under live filesystem.
*/
if (!(mode & BLK_OPEN_EXCL)) {
int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
if (err)
goto invalidate;
}
truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, truncate_bdev_range);
return 0;
invalidate:
/*
* Someone else has handle exclusively open. Try invalidating instead.
* The 'end' argument is inclusive so the rounding is safe.
*/
return invalidate_inode_pages2_range(bdev->bd_mapping,
lstart >> PAGE_SHIFT,
lend >> PAGE_SHIFT);
}
static void set_init_blocksize(struct block_device *bdev)
{
unsigned int bsize = bdev_logical_block_size(bdev);
loff_t size = i_size_read(BD_INODE(bdev));
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
mapping_set_folio_min_order(BD_INODE(bdev)->i_mapping,
get_order(bsize));
}
/**
* bdev_validate_blocksize - check that this block size is acceptable
* @bdev: blockdevice to check
* @block_size: block size to check
*
* For block device users that do not use buffer heads or the block device
* page cache, make sure that this block size can be used with the device.
*
* Return: On success zero is returned, negative error code on failure.
*/
int bdev_validate_blocksize(struct block_device *bdev, int block_size)
{
if (blk_validate_block_size(block_size))
return -EINVAL;
/* Size cannot be smaller than the size supported by the device */
if (block_size < bdev_logical_block_size(bdev))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(bdev_validate_blocksize);
int set_blocksize(struct file *file, int size)
{
struct inode *inode = file->f_mapping->host;
struct block_device *bdev = I_BDEV(inode);
int ret;
ret = bdev_validate_blocksize(bdev, size);
if (ret)
return ret;
if (!file->private_data)
return -EINVAL;
/* Don't change the size if it is same as current */
if (inode->i_blkbits != blksize_bits(size)) {
/*
* Flush and truncate the pagecache before we reconfigure the
* mapping geometry because folio sizes are variable now. If a
* reader has already allocated a folio whose size is smaller
* than the new min_order but invokes readahead after the new
* min_order becomes visible, readahead will think there are
* "zero" blocks per folio and crash. Take the inode and
* invalidation locks to avoid racing with
* read/write/fallocate.
*/
inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
sync_blockdev(bdev);
kill_bdev(bdev);
inode->i_blkbits = blksize_bits(size);
mapping_set_folio_min_order(inode->i_mapping, get_order(size));
kill_bdev(bdev);
filemap_invalidate_unlock(inode->i_mapping);
inode_unlock(inode);
}
return 0;
}
EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size)
{
if (!(sb->s_type->fs_flags & FS_LBS) && size > PAGE_SIZE)
return 0;
if (set_blocksize(sb->s_bdev_file, size))
return 0;
/* If we get here, we know size is validated */
sb->s_blocksize = size;
sb->s_blocksize_bits = blksize_bits(size);
return sb->s_blocksize;
}
EXPORT_SYMBOL(sb_set_blocksize);
int __must_check sb_min_blocksize(struct super_block *sb, int size)
{
int minsize = bdev_logical_block_size(sb->s_bdev);
if (size < minsize)
size = minsize;
return sb_set_blocksize(sb, size);
}
EXPORT_SYMBOL(sb_min_blocksize);
int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
return filemap_flush(bdev->bd_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
/*
* Write out and wait upon all the dirty data associated with a block
* device via its mapping. Does not take the superblock lock.
*/
int sync_blockdev(struct block_device *bdev)
{
if (!bdev)
return 0;
return filemap_write_and_wait(bdev->bd_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
{
return filemap_write_and_wait_range(bdev->bd_mapping,
lstart, lend);
}
EXPORT_SYMBOL(sync_blockdev_range);
/**
* bdev_freeze - lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
* The reference counter (bd_fsfreeze_count) guarantees that only the last
* unfreeze process can unfreeze the frozen filesystem actually when multiple
* freeze requests arrive simultaneously. It counts up in bdev_freeze() and
* count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze
* actually.
*
* Return: On success zero is returned, negative error code on failure.
*/
int bdev_freeze(struct block_device *bdev)
{
int error = 0;
mutex_lock(&bdev->bd_fsfreeze_mutex);
if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) {
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return 0;
}
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) {
error = bdev->bd_holder_ops->freeze(bdev);
lockdep_assert_not_held(&bdev->bd_holder_lock);
} else {
mutex_unlock(&bdev->bd_holder_lock);
error = sync_blockdev(bdev);
}
if (error)
atomic_dec(&bdev->bd_fsfreeze_count);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return error;
}
EXPORT_SYMBOL(bdev_freeze);
/**
* bdev_thaw - unlock filesystem
* @bdev: blockdevice to unlock
*
* Unlocks the filesystem and marks it writeable again after bdev_freeze().
*
* Return: On success zero is returned, negative error code on failure.
*/
int bdev_thaw(struct block_device *bdev)
{
int error = -EINVAL, nr_freeze;
mutex_lock(&bdev->bd_fsfreeze_mutex);
/*
* If this returns < 0 it means that @bd_fsfreeze_count was
* already 0 and no decrement was performed.
*/
nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count);
if (nr_freeze < 0)
goto out;
error = 0;
if (nr_freeze > 0)
goto out;
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) {
error = bdev->bd_holder_ops->thaw(bdev);
lockdep_assert_not_held(&bdev->bd_holder_lock);
} else {
mutex_unlock(&bdev->bd_holder_lock);
}
if (error)
atomic_inc(&bdev->bd_fsfreeze_count);
out:
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return error;
}
EXPORT_SYMBOL(bdev_thaw);
/*
* pseudo-fs
*/
static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
static struct kmem_cache *bdev_cachep __ro_after_init;
static struct inode *bdev_alloc_inode(struct super_block *sb)
{
struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
if (!ei)
return NULL;
memset(&ei->bdev, 0, sizeof(ei->bdev));
if (security_bdev_alloc(&ei->bdev)) {
kmem_cache_free(bdev_cachep, ei);
return NULL;
}
return &ei->vfs_inode;
}
static void bdev_free_inode(struct inode *inode)
{
struct block_device *bdev = I_BDEV(inode);
free_percpu(bdev->bd_stats);
kfree(bdev->bd_meta_info);
security_bdev_free(bdev);
if (!bdev_is_partition(bdev)) {
if (bdev->bd_disk && bdev->bd_disk->bdi)
bdi_put(bdev->bd_disk->bdi);
kfree(bdev->bd_disk);
}
if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
blk_free_ext_minor(MINOR(bdev->bd_dev));
kmem_cache_free(bdev_cachep, BDEV_I(inode));
}
static void init_once(void *data)
{
struct bdev_inode *ei = data;
inode_init_once(&ei->vfs_inode);
}
static void bdev_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode); /* is it needed here? */
clear_inode(inode);
}
static const struct super_operations bdev_sops = {
.statfs = simple_statfs,
.alloc_inode = bdev_alloc_inode,
.free_inode = bdev_free_inode,
.drop_inode = inode_just_drop,
.evict_inode = bdev_evict_inode,
};
static int bd_init_fs_context(struct fs_context *fc)
{
struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
if (!ctx)
return -ENOMEM;
fc->s_iflags |= SB_I_CGROUPWB;
ctx->ops = &bdev_sops;
return 0;
}
static struct file_system_type bd_type = {
.name = "bdev",
.init_fs_context = bd_init_fs_context,
.kill_sb = kill_anon_super,
};
struct super_block *blockdev_superblock __ro_after_init;
static struct vfsmount *blockdev_mnt __ro_after_init;
EXPORT_SYMBOL_GPL(blockdev_superblock);
void __init bdev_cache_init(void)
{
int err;
bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_ACCOUNT|SLAB_PANIC),
init_once);
err = register_filesystem(&bd_type);
if (err)
panic("Cannot register bdev pseudo-fs");
blockdev_mnt = kern_mount(&bd_type);
if (IS_ERR(blockdev_mnt))
panic("Cannot create bdev pseudo-fs");
blockdev_superblock = blockdev_mnt->mnt_sb; /* For writeback */
}
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
{
struct block_device *bdev;
struct inode *inode;
inode = new_inode(blockdev_superblock);
if (!inode)
return NULL;
inode->i_mode = S_IFBLK;
inode->i_rdev = 0;
inode->i_data.a_ops = &def_blk_aops;
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
bdev = I_BDEV(inode);
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
mutex_init(&bdev->bd_holder_lock);
atomic_set(&bdev->__bd_flags, partno);
bdev->bd_mapping = &inode->i_data;
bdev->bd_queue = disk->queue;
if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO))
bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO);
bdev->bd_stats = alloc_percpu(struct disk_stats);
if (!bdev->bd_stats) {
iput(inode);
return NULL;
}
bdev->bd_disk = disk;
return bdev;
}
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
void bdev_add(struct block_device *bdev, dev_t dev)
{
struct inode *inode = BD_INODE(bdev);
if (bdev_stable_writes(bdev))
mapping_set_stable_writes(bdev->bd_mapping);
bdev->bd_dev = dev;
inode->i_rdev = dev;
inode->i_ino = dev;
insert_inode_hash(inode);
}
void bdev_unhash(struct block_device *bdev)
{
remove_inode_hash(BD_INODE(bdev));
}
void bdev_drop(struct block_device *bdev)
{
iput(BD_INODE(bdev));
}
long nr_blockdev_pages(void)
{
struct inode *inode;
long ret = 0;
spin_lock(&blockdev_superblock->s_inode_list_lock);
list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) ret += inode->i_mapping->nrpages; spin_unlock(&blockdev_superblock->s_inode_list_lock);
return ret;
}
/**
* bd_may_claim - test whether a block device can be claimed
* @bdev: block device of interest
* @holder: holder trying to claim @bdev
* @hops: holder ops
*
* Test whether @bdev can be claimed by @holder.
*
* RETURNS:
* %true if @bdev can be claimed, %false otherwise.
*/
static bool bd_may_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops)
{
struct block_device *whole = bdev_whole(bdev);
lockdep_assert_held(&bdev_lock);
if (bdev->bd_holder) {
/*
* The same holder can always re-claim.
*/
if (bdev->bd_holder == holder) {
if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
return false;
return true;
}
return false;
}
/*
* If the whole devices holder is set to bd_may_claim, a partition on
* the device is claimed, but not the whole device.
*/
if (whole != bdev &&
whole->bd_holder && whole->bd_holder != bd_may_claim)
return false;
return true;
}
/**
* bd_prepare_to_claim - claim a block device
* @bdev: block device of interest
* @holder: holder trying to claim @bdev
* @hops: holder ops.
*
* Claim @bdev. This function fails if @bdev is already claimed by another
* holder and waits if another claiming is in progress. return, the caller
* has ownership of bd_claiming and bd_holder[s].
*
* RETURNS:
* 0 if @bdev can be claimed, -EBUSY otherwise.
*/
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops)
{
struct block_device *whole = bdev_whole(bdev);
if (WARN_ON_ONCE(!holder))
return -EINVAL;
retry:
mutex_lock(&bdev_lock);
/* if someone else claimed, fail */
if (!bd_may_claim(bdev, holder, hops)) {
mutex_unlock(&bdev_lock);
return -EBUSY;
}
/* if claiming is already in progress, wait for it to finish */
if (whole->bd_claiming) {
wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming);
DEFINE_WAIT(wait);
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
mutex_unlock(&bdev_lock);
schedule();
finish_wait(wq, &wait);
goto retry;
}
/* yay, all mine */
whole->bd_claiming = holder;
mutex_unlock(&bdev_lock);
return 0;
}
EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
static void bd_clear_claiming(struct block_device *whole, void *holder)
{
lockdep_assert_held(&bdev_lock);
/* tell others that we're done */
BUG_ON(whole->bd_claiming != holder);
whole->bd_claiming = NULL;
wake_up_var(&whole->bd_claiming);
}
/**
* bd_finish_claiming - finish claiming of a block device
* @bdev: block device of interest
* @holder: holder that has claimed @bdev
* @hops: block device holder operations
*
* Finish exclusive open of a block device. Mark the device as exlusively
* open by the holder and wake up all waiters for exclusive open to finish.
*/
static void bd_finish_claiming(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops)
{
struct block_device *whole = bdev_whole(bdev);
mutex_lock(&bdev_lock);
BUG_ON(!bd_may_claim(bdev, holder, hops));
/*
* Note that for a whole device bd_holders will be incremented twice,
* and bd_holder will be set to bd_may_claim before being set to holder
*/
whole->bd_holders++;
whole->bd_holder = bd_may_claim;
bdev->bd_holders++;
mutex_lock(&bdev->bd_holder_lock);
bdev->bd_holder = holder;
bdev->bd_holder_ops = hops;
mutex_unlock(&bdev->bd_holder_lock);
bd_clear_claiming(whole, holder);
mutex_unlock(&bdev_lock);
}
/**
* bd_abort_claiming - abort claiming of a block device
* @bdev: block device of interest
* @holder: holder that has claimed @bdev
*
* Abort claiming of a block device when the exclusive open failed. This can be
* also used when exclusive open is not actually desired and we just needed
* to block other exclusive openers for a while.
*/
void bd_abort_claiming(struct block_device *bdev, void *holder)
{
mutex_lock(&bdev_lock);
bd_clear_claiming(bdev_whole(bdev), holder);
mutex_unlock(&bdev_lock);
}
EXPORT_SYMBOL(bd_abort_claiming);
static void bd_end_claim(struct block_device *bdev, void *holder)
{
struct block_device *whole = bdev_whole(bdev);
bool unblock = false;
/*
* Release a claim on the device. The holder fields are protected with
* bdev_lock. open_mutex is used to synchronize disk_holder unlinking.
*/
mutex_lock(&bdev_lock);
WARN_ON_ONCE(bdev->bd_holder != holder);
WARN_ON_ONCE(--bdev->bd_holders < 0);
WARN_ON_ONCE(--whole->bd_holders < 0);
if (!bdev->bd_holders) {
mutex_lock(&bdev->bd_holder_lock);
bdev->bd_holder = NULL;
bdev->bd_holder_ops = NULL;
mutex_unlock(&bdev->bd_holder_lock);
if (bdev_test_flag(bdev, BD_WRITE_HOLDER))
unblock = true;
}
if (!whole->bd_holders)
whole->bd_holder = NULL;
mutex_unlock(&bdev_lock);
/*
* If this was the last claim, remove holder link and unblock evpoll if
* it was a write holder.
*/
if (unblock) {
disk_unblock_events(bdev->bd_disk);
bdev_clear_flag(bdev, BD_WRITE_HOLDER);
}
}
static void blkdev_flush_mapping(struct block_device *bdev)
{
WARN_ON_ONCE(bdev->bd_holders);
sync_blockdev(bdev);
kill_bdev(bdev);
bdev_write_inode(bdev);
}
static void blkdev_put_whole(struct block_device *bdev)
{
if (atomic_dec_and_test(&bdev->bd_openers))
blkdev_flush_mapping(bdev);
if (bdev->bd_disk->fops->release)
bdev->bd_disk->fops->release(bdev->bd_disk);
}
static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
int ret;
if (disk->fops->open) {
ret = disk->fops->open(disk, mode);
if (ret) {
/* avoid ghost partitions on a removed medium */
if (ret == -ENOMEDIUM &&
test_bit(GD_NEED_PART_SCAN, &disk->state))
bdev_disk_changed(disk, true);
return ret;
}
}
if (!atomic_read(&bdev->bd_openers))
set_init_blocksize(bdev);
atomic_inc(&bdev->bd_openers);
if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
/*
* Only return scanning errors if we are called from contexts
* that explicitly want them, e.g. the BLKRRPART ioctl.
*/
ret = bdev_disk_changed(disk, false);
if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
blkdev_put_whole(bdev);
return ret;
}
}
return 0;
}
static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
{
struct gendisk *disk = part->bd_disk;
int ret;
ret = blkdev_get_whole(bdev_whole(part), mode);
if (ret)
return ret;
ret = -ENXIO;
if (!bdev_nr_sectors(part))
goto out_blkdev_put;
if (!atomic_read(&part->bd_openers)) {
disk->open_partitions++;
set_init_blocksize(part);
}
atomic_inc(&part->bd_openers);
return 0;
out_blkdev_put:
blkdev_put_whole(bdev_whole(part));
return ret;
}
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder)
{
int ret;
ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
MAJOR(dev), MINOR(dev),
((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
if (ret)
return ret;
/* Blocking writes requires exclusive opener */
if (mode & BLK_OPEN_RESTRICT_WRITES && !holder)
return -EINVAL;
/*
* We're using error pointers to indicate to ->release() when we
* failed to open that block device. Also this doesn't make sense.
*/
if (WARN_ON_ONCE(IS_ERR(holder)))
return -EINVAL;
return 0;
}
static void blkdev_put_part(struct block_device *part)
{
struct block_device *whole = bdev_whole(part);
if (atomic_dec_and_test(&part->bd_openers)) {
blkdev_flush_mapping(part);
whole->bd_disk->open_partitions--;
}
blkdev_put_whole(whole);
}
struct block_device *blkdev_get_no_open(dev_t dev, bool autoload)
{
struct block_device *bdev;
struct inode *inode;
inode = ilookup(blockdev_superblock, dev);
if (!inode && autoload && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
blk_request_module(dev);
inode = ilookup(blockdev_superblock, dev);
if (inode)
pr_warn_ratelimited(
"block device autoloading is deprecated and will be removed.\n");
}
if (!inode)
return NULL;
/* switch from the inode reference to a device mode one: */
bdev = &BDEV_I(inode)->bdev;
if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
bdev = NULL;
iput(inode);
return bdev;
}
void blkdev_put_no_open(struct block_device *bdev)
{
put_device(&bdev->bd_device);
}
static bool bdev_writes_blocked(struct block_device *bdev)
{
return bdev->bd_writers < 0;
}
static void bdev_block_writes(struct block_device *bdev)
{
bdev->bd_writers--;
}
static void bdev_unblock_writes(struct block_device *bdev)
{
bdev->bd_writers++;
}
static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
{
if (bdev_allow_write_mounted)
return true;
/* Writes blocked? */
if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev))
return false;
if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0)
return false;
return true;
}
static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
{
if (bdev_allow_write_mounted)
return;
/* Claim exclusive or shared write access. */
if (mode & BLK_OPEN_RESTRICT_WRITES)
bdev_block_writes(bdev);
else if (mode & BLK_OPEN_WRITE)
bdev->bd_writers++;
}
static inline bool bdev_unclaimed(const struct file *bdev_file)
{
return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host);
}
static void bdev_yield_write_access(struct file *bdev_file)
{
struct block_device *bdev;
if (bdev_allow_write_mounted)
return;
if (bdev_unclaimed(bdev_file))
return;
bdev = file_bdev(bdev_file);
if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED)
bdev_unblock_writes(bdev);
else if (bdev_file->f_mode & FMODE_WRITE)
bdev->bd_writers--;
}
/**
* bdev_open - open a block device
* @bdev: block device to open
* @mode: open mode (BLK_OPEN_*)
* @holder: exclusive holder identifier
* @hops: holder operations
* @bdev_file: file for the block device
*
* Open the block device. If @holder is not %NULL, the block device is opened
* with exclusive access. Exclusive opens may nest for the same @holder.
*
* CONTEXT:
* Might sleep.
*
* RETURNS:
* zero on success, -errno on failure.
*/
int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops, struct file *bdev_file)
{
bool unblock_events = true;
struct gendisk *disk = bdev->bd_disk;
int ret;
if (holder) {
mode |= BLK_OPEN_EXCL;
ret = bd_prepare_to_claim(bdev, holder, hops);
if (ret)
return ret;
} else {
if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL))
return -EIO;
}
disk_block_events(disk);
mutex_lock(&disk->open_mutex);
ret = -ENXIO;
if (!disk_live(disk))
goto abort_claiming;
if (!try_module_get(disk->fops->owner))
goto abort_claiming;
ret = -EBUSY;
if (!bdev_may_open(bdev, mode))
goto put_module;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);
else
ret = blkdev_get_whole(bdev, mode);
if (ret)
goto put_module;
bdev_claim_write_access(bdev, mode);
if (holder) {
bd_finish_claiming(bdev, holder, hops);
/*
* Block event polling for write claims if requested. Any write
* holder makes the write_holder state stick until all are
* released. This is good enough and tracking individual
* writeable reference is too fragile given the way @mode is
* used in blkdev_get/put().
*/
if ((mode & BLK_OPEN_WRITE) &&
!bdev_test_flag(bdev, BD_WRITE_HOLDER) &&
(disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
bdev_set_flag(bdev, BD_WRITE_HOLDER);
unblock_events = false;
}
}
mutex_unlock(&disk->open_mutex);
if (unblock_events)
disk_unblock_events(disk);
bdev_file->f_flags |= O_LARGEFILE;
bdev_file->f_mode |= FMODE_CAN_ODIRECT;
if (bdev_nowait(bdev))
bdev_file->f_mode |= FMODE_NOWAIT;
if (mode & BLK_OPEN_RESTRICT_WRITES)
bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
bdev_file->f_mapping = bdev->bd_mapping;
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
bdev_file->private_data = holder;
return 0;
put_module:
module_put(disk->fops->owner);
abort_claiming:
if (holder)
bd_abort_claiming(bdev, holder);
mutex_unlock(&disk->open_mutex);
disk_unblock_events(disk);
return ret;
}
/*
* If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk
* associated with the floppy driver where it has allowed ioctls if the
* file was opened for writing, but does not allow reads or writes.
* Make sure that this quirk is reflected in @f_flags.
*
* It can also happen if a block device is opened as O_RDWR | O_WRONLY.
*/
static unsigned blk_to_file_flags(blk_mode_t mode)
{
unsigned int flags = 0;
if ((mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) ==
(BLK_OPEN_READ | BLK_OPEN_WRITE))
flags |= O_RDWR;
else if (mode & BLK_OPEN_WRITE_IOCTL)
flags |= O_RDWR | O_WRONLY;
else if (mode & BLK_OPEN_WRITE)
flags |= O_WRONLY;
else if (mode & BLK_OPEN_READ)
flags |= O_RDONLY; /* homeopathic, because O_RDONLY is 0 */
else
WARN_ON_ONCE(true);
if (mode & BLK_OPEN_NDELAY)
flags |= O_NDELAY;
return flags;
}
struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops)
{
struct file *bdev_file;
struct block_device *bdev;
unsigned int flags;
int ret;
ret = bdev_permission(dev, mode, holder);
if (ret)
return ERR_PTR(ret);
bdev = blkdev_get_no_open(dev, true);
if (!bdev)
return ERR_PTR(-ENXIO);
flags = blk_to_file_flags(mode);
bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
if (IS_ERR(bdev_file)) {
blkdev_put_no_open(bdev);
return bdev_file;
}
ihold(BD_INODE(bdev));
ret = bdev_open(bdev, mode, holder, hops, bdev_file);
if (ret) {
/* We failed to open the block device. Let ->release() know. */
bdev_file->private_data = ERR_PTR(ret);
fput(bdev_file);
return ERR_PTR(ret);
}
return bdev_file;
}
EXPORT_SYMBOL(bdev_file_open_by_dev);
struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
void *holder,
const struct blk_holder_ops *hops)
{
struct file *file;
dev_t dev;
int error;
error = lookup_bdev(path, &dev);
if (error)
return ERR_PTR(error);
file = bdev_file_open_by_dev(dev, mode, holder, hops);
if (!IS_ERR(file) && (mode & BLK_OPEN_WRITE)) {
if (bdev_read_only(file_bdev(file))) {
fput(file);
file = ERR_PTR(-EACCES);
}
}
return file;
}
EXPORT_SYMBOL(bdev_file_open_by_path);
static inline void bd_yield_claim(struct file *bdev_file)
{
struct block_device *bdev = file_bdev(bdev_file);
void *holder = bdev_file->private_data;
lockdep_assert_held(&bdev->bd_disk->open_mutex);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder)))
return;
if (!bdev_unclaimed(bdev_file))
bd_end_claim(bdev, holder);
}
void bdev_release(struct file *bdev_file)
{
struct block_device *bdev = file_bdev(bdev_file);
void *holder = bdev_file->private_data;
struct gendisk *disk = bdev->bd_disk;
/* We failed to open that block device. */
if (IS_ERR(holder))
goto put_no_open;
/*
* Sync early if it looks like we're the last one. If someone else
* opens the block device between now and the decrement of bd_openers
* then we did a sync that we didn't need to, but that's not the end
* of the world and we want to avoid long (could be several minute)
* syncs while holding the mutex.
*/
if (atomic_read(&bdev->bd_openers) == 1)
sync_blockdev(bdev);
mutex_lock(&disk->open_mutex);
bdev_yield_write_access(bdev_file);
if (holder)
bd_yield_claim(bdev_file);
/*
* Trigger event checking and tell drivers to flush MEDIA_CHANGE
* event. This is to ensure detection of media removal commanded
* from userland - e.g. eject(1).
*/
disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
if (bdev_is_partition(bdev))
blkdev_put_part(bdev);
else
blkdev_put_whole(bdev);
mutex_unlock(&disk->open_mutex);
module_put(disk->fops->owner);
put_no_open:
blkdev_put_no_open(bdev);
}
/**
* bdev_fput - yield claim to the block device and put the file
* @bdev_file: open block device
*
* Yield claim on the block device and put the file. Ensure that the
* block device can be reclaimed before the file is closed which is a
* deferred operation.
*/
void bdev_fput(struct file *bdev_file)
{
if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops))
return;
if (bdev_file->private_data) {
struct block_device *bdev = file_bdev(bdev_file);
struct gendisk *disk = bdev->bd_disk;
mutex_lock(&disk->open_mutex);
bdev_yield_write_access(bdev_file);
bd_yield_claim(bdev_file);
/*
* Tell release we already gave up our hold on the
* device and if write restrictions are available that
* we already gave up write access to the device.
*/
bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host);
mutex_unlock(&disk->open_mutex);
}
fput(bdev_file);
}
EXPORT_SYMBOL(bdev_fput);
/**
* lookup_bdev() - Look up a struct block_device by name.
* @pathname: Name of the block device in the filesystem.
* @dev: Pointer to the block device's dev_t, if found.
*
* Lookup the block device's dev_t at @pathname in the current
* namespace if possible and return it in @dev.
*
* Context: May sleep.
* Return: 0 if succeeded, negative errno otherwise.
*/
int lookup_bdev(const char *pathname, dev_t *dev)
{
struct inode *inode;
struct path path;
int error;
if (!pathname || !*pathname)
return -EINVAL;
error = kern_path(pathname, LOOKUP_FOLLOW, &path);
if (error)
return error;
inode = d_backing_inode(path.dentry);
error = -ENOTBLK;
if (!S_ISBLK(inode->i_mode))
goto out_path_put;
error = -EACCES;
if (!may_open_dev(&path))
goto out_path_put;
*dev = inode->i_rdev;
error = 0;
out_path_put:
path_put(&path);
return error;
}
EXPORT_SYMBOL(lookup_bdev);
/**
* bdev_mark_dead - mark a block device as dead
* @bdev: block device to operate on
* @surprise: indicate a surprise removal
*
* Tell the file system that this devices or media is dead. If @surprise is set
* to %true the device or media is already gone, if not we are preparing for an
* orderly removal.
*
* This calls into the file system, which then typicall syncs out all dirty data
* and writes back inodes and then invalidates any cached data in the inodes on
* the file system. In addition we also invalidate the block device mapping.
*/
void bdev_mark_dead(struct block_device *bdev, bool surprise)
{
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
bdev->bd_holder_ops->mark_dead(bdev, surprise);
else {
mutex_unlock(&bdev->bd_holder_lock);
sync_blockdev(bdev);
}
invalidate_bdev(bdev);
}
/*
* New drivers should not use this directly. There are some drivers however
* that needs this for historical reasons. For example, the DASD driver has
* historically had a shutdown to offline mode that doesn't actually remove the
* gendisk that otherwise looks a lot like a safe device removal.
*/
EXPORT_SYMBOL_GPL(bdev_mark_dead);
void sync_bdevs(bool wait)
{
struct inode *inode, *old_inode = NULL;
spin_lock(&blockdev_superblock->s_inode_list_lock);
list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
struct address_space *mapping = inode->i_mapping;
struct block_device *bdev;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
mapping->nrpages == 0) {
spin_unlock(&inode->i_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&blockdev_superblock->s_inode_list_lock);
/*
* We hold a reference to 'inode' so it couldn't have been
* removed from s_inodes list while we dropped the
* s_inode_list_lock We cannot iput the inode now as we can
* be holding the last reference and we cannot iput it under
* s_inode_list_lock. So we keep the reference and iput it
* later.
*/
iput(old_inode);
old_inode = inode;
bdev = I_BDEV(inode);
mutex_lock(&bdev->bd_disk->open_mutex);
if (!atomic_read(&bdev->bd_openers)) {
; /* skip */
} else if (wait) {
/*
* We keep the error status of individual mapping so
* that applications can catch the writeback error using
* fsync(2). See filemap_fdatawait_keep_errors() for
* details.
*/
filemap_fdatawait_keep_errors(inode->i_mapping);
} else {
filemap_fdatawrite(inode->i_mapping);
}
mutex_unlock(&bdev->bd_disk->open_mutex);
spin_lock(&blockdev_superblock->s_inode_list_lock);
}
spin_unlock(&blockdev_superblock->s_inode_list_lock);
iput(old_inode);
}
/*
* Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices.
*/
void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask)
{
struct block_device *bdev;
/*
* Note that d_backing_inode() returns the block device node inode, not
* the block device's internal inode. Therefore it is *not* valid to
* use I_BDEV() here; the block device has to be looked up by i_rdev
* instead.
*/
bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false);
if (!bdev)
return;
if (request_mask & STATX_DIOALIGN) {
stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
stat->dio_offset_align = bdev_logical_block_size(bdev);
stat->result_mask |= STATX_DIOALIGN;
}
if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) {
struct request_queue *bd_queue = bdev->bd_queue;
generic_fill_statx_atomic_writes(stat,
queue_atomic_write_unit_min_bytes(bd_queue),
queue_atomic_write_unit_max_bytes(bd_queue),
0);
}
stat->blksize = bdev_io_min(bdev);
blkdev_put_no_open(bdev);
}
bool disk_live(struct gendisk *disk)
{
return !inode_unhashed(BD_INODE(disk->part0));
}
EXPORT_SYMBOL_GPL(disk_live);
unsigned int block_size(struct block_device *bdev)
{
return 1 << BD_INODE(bdev)->i_blkbits;
}
EXPORT_SYMBOL_GPL(block_size);
static int __init setup_bdev_allow_write_mounted(char *str)
{
if (kstrtobool(str, &bdev_allow_write_mounted))
pr_warn("Invalid option string for bdev_allow_write_mounted:"
" '%s'\n", str);
return 1;
}
__setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1994 Linus Torvalds
*
* Pentium III FXSR, SSE support
* General FPU state handling cleanups
* Gareth Hughes <gareth@valinux.com>, May 2000
* x86-64 work by Andi Kleen 2002
*/
#ifndef _ASM_X86_FPU_API_H
#define _ASM_X86_FPU_API_H
#include <linux/bottom_half.h>
#include <asm/fpu/types.h>
/*
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
* disables preemption and softirq processing, so be careful if you intend to
* use it for long periods of time. Kernel-mode FPU cannot be used in all
* contexts -- see irq_fpu_usable() for details.
*/
/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
extern void fpregs_mark_activate(void);
/* Code that is unaware of kernel_fpu_begin_mask() can use this */
static inline void kernel_fpu_begin(void)
{
#ifdef CONFIG_X86_64
/*
* Any 64-bit code that uses 387 instructions must explicitly request
* KFPU_387.
*/
kernel_fpu_begin_mask(KFPU_MXCSR);
#else
/*
* 32-bit kernel code may use 387 operations as well as SSE2, etc,
* as long as it checks that the CPU has the required capability.
*/
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
#endif
}
/*
* Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate, or while
* using the FPU in kernel mode. A context switch will (and softirq might) save
* CPU's FPU registers to fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving
* CPU's FPU registers in a random state.
*
* local_bh_disable() protects against both preemption and soft interrupts
* on !RT kernels.
*
* On RT kernels local_bh_disable() is not sufficient because it only
* serializes soft interrupt related sections via a local lock, but stays
* preemptible. Disabling preemption is the right choice here as bottom
* half processing is always in thread context on RT kernels so it
* implicitly prevents bottom half processing as well.
*/
static inline void fpregs_lock(void)
{
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_bh_disable();
else
preempt_disable();
}
static inline void fpregs_unlock(void)
{
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_bh_enable();
else
preempt_enable();
}
/*
* FPU state gets lazily restored before returning to userspace. So when in the
* kernel, the valid FPU state may be kept in the buffer. This function will force
* restore all the fpu state to the registers early if needed, and lock them from
* being automatically saved/restored. Then FPU state can be modified safely in the
* registers, before unlocking with fpregs_unlock().
*/
void fpregs_lock_and_load(void);
#ifdef CONFIG_X86_DEBUG_FPU
extern void fpregs_assert_state_consistent(void);
#else
static inline void fpregs_assert_state_consistent(void) { }
#endif
/*
* Load the task FPU state before returning to userspace.
*/
extern void switch_fpu_return(void);
/*
* Query the presence of one or more xfeatures. Works on any legacy CPU as well.
*
* If 'feature_name' is set then put a human-readable description of
* the feature there as well - this can be used to print error (or success)
* messages.
*/
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
/* Trap handling */
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
extern void fpu_sync_fpstate(struct fpu *fpu);
extern void fpu_reset_from_exception_fixup(void);
/* Boot, hotplug and resume */
extern void fpu__init_cpu(void);
extern void fpu__init_system(void);
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
#ifdef CONFIG_MATH_EMULATION
extern void fpstate_init_soft(struct swregs_state *soft);
#else
static inline void fpstate_init_soft(struct swregs_state *soft) {}
#endif
/* State tracking */
DECLARE_PER_CPU(bool, kernel_fpu_allowed);
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
/* Process cleanup */
#ifdef CONFIG_X86_64
extern void fpstate_free(struct fpu *fpu);
#else
static inline void fpstate_free(struct fpu *fpu) { }
#endif
/* fpstate-related functions which are exported to KVM */
extern void fpstate_clear_xstate_component(struct fpstate *fpstate, unsigned int xfeature);
extern u64 xstate_get_guest_group_perm(void);
extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
/* KVM specific functions */
extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest);
extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures);
#ifdef CONFIG_X86_64
extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd);
extern void fpu_sync_guest_vmexit_xfd_state(void);
#else
static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { }
static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
#endif
extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
unsigned int size, u64 xfeatures, u32 pkru);
extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
{
gfpu->fpstate->is_confidential = true;
}
static inline bool fpstate_is_confidential(struct fpu_guest *gfpu)
{
return gfpu->fpstate->is_confidential;
}
/* prctl */
extern long fpu_xstate_prctl(int option, unsigned long arg2);
extern void fpu_idle_fpregs(void);
#endif /* _ASM_X86_FPU_API_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_RCUREF_H
#define _LINUX_RCUREF_H
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/limits.h>
#include <linux/lockdep.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#define RCUREF_ONEREF 0x00000000U
#define RCUREF_MAXREF 0x7FFFFFFFU
#define RCUREF_SATURATED 0xA0000000U
#define RCUREF_RELEASED 0xC0000000U
#define RCUREF_DEAD 0xE0000000U
#define RCUREF_NOREF 0xFFFFFFFFU
/**
* rcuref_init - Initialize a rcuref reference count with the given reference count
* @ref: Pointer to the reference count
* @cnt: The initial reference count typically '1'
*/
static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
{
atomic_set(&ref->refcnt, cnt - 1);
}
/**
* rcuref_read - Read the number of held reference counts of a rcuref
* @ref: Pointer to the reference count
*
* Return: The number of held references (0 ... N). The value 0 does not
* indicate that it is safe to schedule the object, protected by this reference
* counter, for deconstruction.
* If you want to know if the reference counter has been marked DEAD (as
* signaled by rcuref_put()) please use rcuread_is_dead().
*/
static inline unsigned int rcuref_read(rcuref_t *ref)
{
unsigned int c = atomic_read(&ref->refcnt);
/* Return 0 if within the DEAD zone. */
return c >= RCUREF_RELEASED ? 0 : c + 1;
}
/**
* rcuref_is_dead - Check if the rcuref has been already marked dead
* @ref: Pointer to the reference count
*
* Return: True if the object has been marked DEAD. This signals that a previous
* invocation of rcuref_put() returned true on this reference counter meaning
* the protected object can safely be scheduled for deconstruction.
* Otherwise, returns false.
*/
static inline bool rcuref_is_dead(rcuref_t *ref)
{
unsigned int c = atomic_read(&ref->refcnt);
return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF);
}
extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
/**
* rcuref_get - Acquire one reference on a rcuref reference count
* @ref: Pointer to the reference count
*
* Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See documentation in lib/rcuref.c
*
* Return:
* False if the attempt to acquire a reference failed. This happens
* when the last reference has been put already
*
* True if a reference was successfully acquired
*/
static inline __must_check bool rcuref_get(rcuref_t *ref)
{
/*
* Unconditionally increase the reference count. The saturation and
* dead zones provide enough tolerance for this.
*/
if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
return true;
/* Handle the cases inside the saturation and dead zones */
return rcuref_get_slowpath(ref);
}
extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
/*
* Internal helper. Do not invoke directly.
*/
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
{
int cnt;
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
"suspicious rcuref_put_rcusafe() usage");
/*
* Unconditionally decrease the reference count. The saturation and
* dead zones provide enough tolerance for this.
*/
cnt = atomic_sub_return_release(1, &ref->refcnt);
if (likely(cnt >= 0))
return false;
/*
* Handle the last reference drop and cases inside the saturation
* and dead zones.
*/
return rcuref_put_slowpath(ref, cnt);
}
/**
* rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
* @ref: Pointer to the reference count
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Can be invoked from contexts, which guarantee that no grace period can
* happen which would free the object concurrently if the decrement drops
* the last reference and the slowpath races against a concurrent get() and
* put() pair. rcu_read_lock()'ed and atomic contexts qualify.
*
* Return:
* True if this was the last reference with no future references
* possible. This signals the caller that it can safely release the
* object which is protected by the reference counter.
*
* False if there are still active references or the put() raced
* with a concurrent get()/put() pair. Caller is not allowed to
* release the protected object.
*/
static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref)
{
return __rcuref_put(ref);
}
/**
* rcuref_put -- Release one reference for a rcuref reference count
* @ref: Pointer to the reference count
*
* Can be invoked from any context.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Return:
*
* True if this was the last reference with no future references
* possible. This signals the caller that it can safely schedule the
* object, which is protected by the reference counter, for
* deconstruction.
*
* False if there are still active references or the put() raced
* with a concurrent get()/put() pair. Caller is not allowed to
* deconstruct the protected object.
*/
static inline __must_check bool rcuref_put(rcuref_t *ref)
{
bool released;
preempt_disable();
released = __rcuref_put(ref); preempt_enable(); return released;
}
#endif
/* CPU control.
* (C) 2001, 2002, 2003, 2004 Rusty Russell
*
* This code is licenced under the GPL.
*/
#include <linux/sched/mm.h>
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/isolation.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/scs.h>
#include <linux/percpu-rwsem.h>
#include <linux/cpuset.h>
#include <linux/random.h>
#include <linux/cc_platform.h>
#include <linux/parser.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>
#include "smpboot.h"
/**
* struct cpuhp_cpu_state - Per cpu hotplug state storage
* @state: The current cpu state
* @target: The target state
* @fail: Current CPU hotplug callback state
* @thread: Pointer to the hotplug thread
* @should_run: Thread should execute
* @rollback: Perform a rollback
* @single: Single callback invocation
* @bringup: Single callback bringup or teardown selector
* @node: Remote CPU node; for multi-instance, do a
* single entry callback for install/remove
* @last: For multi-instance rollback, remember how far we got
* @cb_state: The state for a single callback (install/uninstall)
* @result: Result of the operation
* @ap_sync_state: State for AP synchronization
* @done_up: Signal completion to the issuer of the task for cpu-up
* @done_down: Signal completion to the issuer of the task for cpu-down
*/
struct cpuhp_cpu_state {
enum cpuhp_state state;
enum cpuhp_state target;
enum cpuhp_state fail;
#ifdef CONFIG_SMP
struct task_struct *thread;
bool should_run;
bool rollback;
bool single;
bool bringup;
struct hlist_node *node;
struct hlist_node *last;
enum cpuhp_state cb_state;
int result;
atomic_t ap_sync_state;
struct completion done_up;
struct completion done_down;
#endif
};
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
.fail = CPUHP_INVALID,
};
#ifdef CONFIG_SMP
cpumask_t cpus_booted_once_mask;
#endif
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
static struct lockdep_map cpuhp_state_down_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
static inline void cpuhp_lock_acquire(bool bringup)
{
lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
static inline void cpuhp_lock_release(bool bringup)
{
lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
#else
static inline void cpuhp_lock_acquire(bool bringup) { }
static inline void cpuhp_lock_release(bool bringup) { }
#endif
/**
* struct cpuhp_step - Hotplug state machine step
* @name: Name of the step
* @startup: Startup function of the step
* @teardown: Teardown function of the step
* @cant_stop: Bringup/teardown can't be stopped at this step
* @multi_instance: State has multiple instances which get added afterwards
*/
struct cpuhp_step {
const char *name;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} startup;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} teardown;
/* private: */
struct hlist_head list;
/* public: */
bool cant_stop;
bool multi_instance;
};
static DEFINE_MUTEX(cpuhp_state_mutex);
static struct cpuhp_step cpuhp_hp_states[];
static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
{
return cpuhp_hp_states + state;
}
static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
{
return bringup ? !step->startup.single : !step->teardown.single;
}
/**
* cpuhp_invoke_callback - Invoke the callbacks for a given state
* @cpu: The cpu for which the callback should be invoked
* @state: The state to do callbacks for
* @bringup: True if the bringup callback should be invoked
* @node: For multi-instance, do a single entry callback for install/remove
* @lastp: For multi-instance rollback, remember how far we got
*
* Called from cpu hotplug and from the state register machinery.
*
* Return: %0 on success or a negative errno code
*/
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
bool bringup, struct hlist_node *node,
struct hlist_node **lastp)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct cpuhp_step *step = cpuhp_get_step(state);
int (*cbm)(unsigned int cpu, struct hlist_node *node);
int (*cb)(unsigned int cpu);
int ret, cnt;
if (st->fail == state) {
st->fail = CPUHP_INVALID;
return -EAGAIN;
}
if (cpuhp_step_empty(bringup, step)) {
WARN_ON_ONCE(1);
return 0;
}
if (!step->multi_instance) {
WARN_ON_ONCE(lastp && *lastp);
cb = bringup ? step->startup.single : step->teardown.single;
trace_cpuhp_enter(cpu, st->target, state, cb);
ret = cb(cpu);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
cbm = bringup ? step->startup.multi : step->teardown.multi;
/* Single invocation for instance add/remove */
if (node) {
WARN_ON_ONCE(lastp && *lastp);
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
/* State transition. Invoke on all instances */
cnt = 0;
hlist_for_each(node, &step->list) {
if (lastp && node == *lastp)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
if (ret) {
if (!lastp)
goto err;
*lastp = node;
return ret;
}
cnt++;
}
if (lastp)
*lastp = NULL;
return 0;
err:
/* Rollback the instances if one failed */
cbm = !bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return ret;
hlist_for_each(node, &step->list) {
if (!cnt--)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
/*
* Rollback must not fail,
*/
WARN_ON_ONCE(ret);
}
return ret;
}
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
/*
* The extra check for CPUHP_TEARDOWN_CPU is only for documentation
* purposes as that state is handled explicitly in cpu_down.
*/
return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
}
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
wait_for_completion(done);
}
static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
complete(done);
}
/*
* The former STARTING/DYING states, ran with IRQs disabled and must not fail.
*/
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
{
return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
}
/* Synchronization state management */
enum cpuhp_sync_state {
SYNC_STATE_DEAD,
SYNC_STATE_KICKED,
SYNC_STATE_SHOULD_DIE,
SYNC_STATE_ALIVE,
SYNC_STATE_SHOULD_ONLINE,
SYNC_STATE_ONLINE,
};
#ifdef CONFIG_HOTPLUG_CORE_SYNC
/**
* cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
* @state: The synchronization state to set
*
* No synchronization point. Just update of the synchronization state, but implies
* a full barrier so that the AP changes are visible before the control CPU proceeds.
*/
static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
{
atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
(void)atomic_xchg(st, state);
}
void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
enum cpuhp_sync_state next_state)
{
atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
ktime_t now, end, start = ktime_get();
int sync;
end = start + 10ULL * NSEC_PER_SEC;
sync = atomic_read(st);
while (1) {
if (sync == state) {
if (!atomic_try_cmpxchg(st, &sync, next_state))
continue;
return true;
}
now = ktime_get();
if (now > end) {
/* Timeout. Leave the state unchanged */
return false;
} else if (now - start < NSEC_PER_MSEC) {
/* Poll for one millisecond */
arch_cpuhp_sync_state_poll();
} else {
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
}
sync = atomic_read(st);
}
return true;
}
#else /* CONFIG_HOTPLUG_CORE_SYNC */
static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
#endif /* !CONFIG_HOTPLUG_CORE_SYNC */
#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
/**
* cpuhp_ap_report_dead - Update synchronization state to DEAD
*
* No synchronization point. Just update of the synchronization state.
*/
void cpuhp_ap_report_dead(void)
{
cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
}
void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
/*
* Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
* because the AP cannot issue complete() at this stage.
*/
static void cpuhp_bp_sync_dead(unsigned int cpu)
{
atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
int sync = atomic_read(st);
do {
/* CPU can have reported dead already. Don't overwrite that! */
if (sync == SYNC_STATE_DEAD)
break;
} while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
/* CPU reached dead state. Invoke the cleanup function */
arch_cpuhp_cleanup_dead_cpu(cpu);
return;
}
/* No further action possible. Emit message and give up. */
pr_err("CPU%u failed to report dead state\n", cpu);
}
#else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
#endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
#ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
/**
* cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
*
* Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
* for the BP to release it.
*/
void cpuhp_ap_sync_alive(void)
{
atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
/* Wait for the control CPU to release it. */
while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
cpu_relax();
}
static bool cpuhp_can_boot_ap(unsigned int cpu)
{
atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
int sync = atomic_read(st);
again:
switch (sync) {
case SYNC_STATE_DEAD:
/* CPU is properly dead */
break;
case SYNC_STATE_KICKED:
/* CPU did not come up in previous attempt */
break;
case SYNC_STATE_ALIVE:
/* CPU is stuck cpuhp_ap_sync_alive(). */
break;
default:
/* CPU failed to report online or dead and is in limbo state. */
return false;
}
/* Prepare for booting */
if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
goto again;
return true;
}
void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
/*
* Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
* because the AP cannot issue complete() so early in the bringup.
*/
static int cpuhp_bp_sync_alive(unsigned int cpu)
{
int ret = 0;
if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
return 0;
if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
pr_err("CPU%u failed to report alive state\n", cpu);
ret = -EIO;
}
/* Let the architecture cleanup the kick alive mechanics. */
arch_cpuhp_cleanup_kick_cpu(cpu);
return ret;
}
#else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
#endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/*
* The following two APIs (cpu_maps_update_begin/done) must be used when
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
*/
void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
void cpu_maps_update_done(void)
{
mutex_unlock(&cpu_add_remove_lock);
}
/*
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
*/
static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
static bool cpu_hotplug_offline_disabled __ro_after_init;
void cpus_read_lock(void)
{
percpu_down_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_lock);
int cpus_read_trylock(void)
{
return percpu_down_read_trylock(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_trylock);
void cpus_read_unlock(void)
{
percpu_up_read(&cpu_hotplug_lock);
}
EXPORT_SYMBOL_GPL(cpus_read_unlock);
void cpus_write_lock(void)
{
percpu_down_write(&cpu_hotplug_lock);
}
void cpus_write_unlock(void)
{
percpu_up_write(&cpu_hotplug_lock);
}
void lockdep_assert_cpus_held(void)
{
/*
* We can't have hotplug operations before userspace starts running,
* and some init codepaths will knowingly not take the hotplug lock.
* This is all valid, so mute lockdep until it makes sense to report
* unheld locks.
*/
if (system_state < SYSTEM_RUNNING)
return;
percpu_rwsem_assert_held(&cpu_hotplug_lock);}
EXPORT_SYMBOL_GPL(lockdep_assert_cpus_held);
#ifdef CONFIG_LOCKDEP
int lockdep_is_cpus_held(void)
{
return percpu_rwsem_is_held(&cpu_hotplug_lock);
}
#endif
static void lockdep_acquire_cpus_lock(void)
{
rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
}
static void lockdep_release_cpus_lock(void)
{
rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
}
/* Declare CPU offlining not supported */
void cpu_hotplug_disable_offlining(void)
{
cpu_maps_update_begin();
cpu_hotplug_offline_disabled = true;
cpu_maps_update_done();
}
/*
* Wait for currently running CPU hotplug operations to complete (if any) and
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
* the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
* hotplug path before performing hotplug operations. So acquiring that lock
* guarantees mutual exclusion from any currently running hotplug operations.
*/
void cpu_hotplug_disable(void)
{
cpu_maps_update_begin();
cpu_hotplug_disabled++;
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
static void __cpu_hotplug_enable(void)
{
if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
return;
cpu_hotplug_disabled--;
}
void cpu_hotplug_enable(void)
{
cpu_maps_update_begin();
__cpu_hotplug_enable();
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#else
static void lockdep_acquire_cpus_lock(void)
{
}
static void lockdep_release_cpus_lock(void)
{
}
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Architectures that need SMT-specific errata handling during SMT hotplug
* should override this.
*/
void __weak arch_smt_update(void) { }
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
static unsigned int cpu_smt_max_threads __ro_after_init;
unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
void __init cpu_smt_disable(bool force)
{
if (!cpu_smt_possible())
return;
if (force) {
pr_info("SMT: Force disabled\n");
cpu_smt_control = CPU_SMT_FORCE_DISABLED;
} else {
pr_info("SMT: disabled\n");
cpu_smt_control = CPU_SMT_DISABLED;
}
cpu_smt_num_threads = 1;
}
/*
* The decision whether SMT is supported can only be done after the full
* CPU identification. Called from architecture code.
*/
void __init cpu_smt_set_num_threads(unsigned int num_threads,
unsigned int max_threads)
{
WARN_ON(!num_threads || (num_threads > max_threads));
if (max_threads == 1)
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
cpu_smt_max_threads = max_threads;
/*
* If SMT has been disabled via the kernel command line or SMT is
* not supported, set cpu_smt_num_threads to 1 for consistency.
* If enabled, take the architecture requested number of threads
* to bring up into account.
*/
if (cpu_smt_control != CPU_SMT_ENABLED)
cpu_smt_num_threads = 1;
else if (num_threads < cpu_smt_num_threads)
cpu_smt_num_threads = num_threads;
}
static int __init smt_cmdline_disable(char *str)
{
cpu_smt_disable(str && !strcmp(str, "force"));
return 0;
}
early_param("nosmt", smt_cmdline_disable);
/*
* For Archicture supporting partial SMT states check if the thread is allowed.
* Otherwise this has already been checked through cpu_smt_max_threads when
* setting the SMT level.
*/
static inline bool cpu_smt_thread_allowed(unsigned int cpu)
{
#ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
return topology_smt_thread_allowed(cpu);
#else
return true;
#endif
}
static inline bool cpu_bootable(unsigned int cpu)
{
if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
return true;
/* All CPUs are bootable if controls are not configured */
if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
return true;
/* All CPUs are bootable if CPU is not SMT capable */
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return true;
if (topology_is_primary_thread(cpu))
return true;
/*
* On x86 it's required to boot all logical CPUs at least once so
* that the init code can get a chance to set CR4.MCE on each
* CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
* core will shutdown the machine.
*/
return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
}
/* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
bool cpu_smt_possible(void)
{
return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
}
EXPORT_SYMBOL_GPL(cpu_smt_possible);
#else
static inline bool cpu_bootable(unsigned int cpu) { return true; }
#endif
static inline enum cpuhp_state
cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
bool bringup = st->state < target;
st->rollback = false;
st->last = NULL;
st->target = target;
st->single = false;
st->bringup = bringup;
if (cpu_dying(cpu) != !bringup)
set_cpu_dying(cpu, !bringup);
return prev_state;
}
static inline void
cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state prev_state)
{
bool bringup = !st->bringup;
st->target = prev_state;
/*
* Already rolling back. No need invert the bringup value or to change
* the current state.
*/
if (st->rollback)
return;
st->rollback = true;
/*
* If we have st->last we need to undo partial multi_instance of this
* state first. Otherwise start undo at the previous state.
*/
if (!st->last) {
if (st->bringup)
st->state--;
else
st->state++;
}
st->bringup = bringup;
if (cpu_dying(cpu) != !bringup)
set_cpu_dying(cpu, !bringup);
}
/* Regular hotplug invocation of the AP hotplug thread */
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{
if (!st->single && st->state == st->target)
return;
st->result = 0;
/*
* Make sure the above stores are visible before should_run becomes
* true. Paired with the mb() above in cpuhp_thread_fun()
*/
smp_mb();
st->should_run = true;
wake_up_process(st->thread);
wait_for_ap_thread(st, st->bringup);
}
static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state;
int ret;
prev_state = cpuhp_set_state(cpu, st, target);
__cpuhp_kick_ap(st);
if ((ret = st->result)) {
cpuhp_reset_state(cpu, st, prev_state);
__cpuhp_kick_ap(st);
}
return ret;
}
static int bringup_wait_for_ap_online(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
wait_for_ap_thread(st, true);
if (WARN_ON_ONCE((!cpu_online(cpu))))
return -ECANCELED;
/* Unpark the hotplug thread of the target cpu */
kthread_unpark(st->thread);
/*
* SMT soft disabling on X86 requires to bring the CPU out of the
* BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
* CPU marked itself as booted_once in notify_cpu_starting() so the
* cpu_bootable() check will now return false if this is not the
* primary sibling.
*/
if (!cpu_bootable(cpu))
return -ECANCELED;
return 0;
}
#ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
static int cpuhp_kick_ap_alive(unsigned int cpu)
{
if (!cpuhp_can_boot_ap(cpu))
return -EAGAIN;
return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
}
static int cpuhp_bringup_ap(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int ret;
/*
* Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online.
* Prevent irq alloc/free across the bringup.
*/
irq_lock_sparse();
ret = cpuhp_bp_sync_alive(cpu);
if (ret)
goto out_unlock;
ret = bringup_wait_for_ap_online(cpu);
if (ret)
goto out_unlock;
irq_unlock_sparse();
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(cpu, st, st->target);
out_unlock:
irq_unlock_sparse();
return ret;
}
#else
static int bringup_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle = idle_thread_get(cpu);
int ret;
if (!cpuhp_can_boot_ap(cpu))
return -EAGAIN;
/*
* Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online.
*
* Prevent irq alloc/free across the bringup by acquiring the
* sparse irq lock. Hold it until the upcoming CPU completes the
* startup in cpuhp_online_idle() which allows to avoid
* intermediate synchronization points in the architecture code.
*/
irq_lock_sparse();
ret = __cpu_up(cpu, idle);
if (ret)
goto out_unlock;
ret = cpuhp_bp_sync_alive(cpu);
if (ret)
goto out_unlock;
ret = bringup_wait_for_ap_online(cpu);
if (ret)
goto out_unlock;
irq_unlock_sparse();
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(cpu, st, st->target);
out_unlock:
irq_unlock_sparse();
return ret;
}
#endif
static int finish_cpu(unsigned int cpu)
{
struct task_struct *idle = idle_thread_get(cpu);
struct mm_struct *mm = idle->active_mm;
/*
* sched_force_init_mm() ensured the use of &init_mm,
* drop that refcount now that the CPU has stopped.
*/
WARN_ON(mm != &init_mm);
idle->active_mm = NULL;
mmdrop_lazy_tlb(mm);
return 0;
}
/*
* Hotplug state machine related functions
*/
/*
* Get the next state to run. Empty ones will be skipped. Returns true if a
* state must be run.
*
* st->state will be modified ahead of time, to match state_to_run, as if it
* has already ran.
*/
static bool cpuhp_next_state(bool bringup,
enum cpuhp_state *state_to_run,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
do {
if (bringup) {
if (st->state >= target)
return false;
*state_to_run = ++st->state;
} else {
if (st->state <= target)
return false;
*state_to_run = st->state--;
}
if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
break;
} while (true);
return true;
}
static int __cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target,
bool nofail)
{
enum cpuhp_state state;
int ret = 0;
while (cpuhp_next_state(bringup, &state, st, target)) {
int err;
err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
if (!err)
continue;
if (nofail) {
pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
cpu, bringup ? "UP" : "DOWN",
cpuhp_get_step(st->state)->name,
st->state, err);
ret = -1;
} else {
ret = err;
break;
}
}
return ret;
}
static inline int cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
}
static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
__cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
}
static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
{
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
return true;
/*
* When CPU hotplug is disabled, then taking the CPU down is not
* possible because takedown_cpu() and the architecture and
* subsystem specific mechanisms are not available. So the CPU
* which would be completely unplugged again needs to stay around
* in the current state.
*/
return st->state <= CPUHP_BRINGUP_CPU;
}
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
ret = cpuhp_invoke_callback_range(true, cpu, st, target);
if (ret) {
pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
ret, cpu, cpuhp_get_step(st->state)->name,
st->state);
cpuhp_reset_state(cpu, st, prev_state);
if (can_rollback_cpu(st))
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
prev_state));
}
return ret;
}
/*
* The cpu hotplug threads manage the bringup and teardown of the cpus
*/
static int cpuhp_should_run(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
return st->should_run;
}
/*
* Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
* callbacks when a state gets [un]installed at runtime.
*
* Each invocation of this function by the smpboot thread does a single AP
* state callback.
*
* It has 3 modes of operation:
* - single: runs st->cb_state
* - up: runs ++st->state, while st->state < st->target
* - down: runs st->state--, while st->state > st->target
*
* When complete or on error, should_run is cleared and the completion is fired.
*/
static void cpuhp_thread_fun(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
bool bringup = st->bringup;
enum cpuhp_state state;
if (WARN_ON_ONCE(!st->should_run))
return;
/*
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
* that if we see ->should_run we also see the rest of the state.
*/
smp_mb();
/*
* The BP holds the hotplug lock, but we're now running on the AP,
* ensure that anybody asserting the lock is held, will actually find
* it so.
*/
lockdep_acquire_cpus_lock();
cpuhp_lock_acquire(bringup);
if (st->single) {
state = st->cb_state;
st->should_run = false;
} else {
st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
if (!st->should_run)
goto end;
}
WARN_ON_ONCE(!cpuhp_is_ap_state(state));
if (cpuhp_is_atomic_state(state)) {
local_irq_disable();
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
local_irq_enable();
/*
* STARTING/DYING must not fail!
*/
WARN_ON_ONCE(st->result);
} else {
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
}
if (st->result) {
/*
* If we fail on a rollback, we're up a creek without no
* paddle, no way forward, no way back. We loose, thanks for
* playing.
*/
WARN_ON_ONCE(st->rollback);
st->should_run = false;
}
end:
cpuhp_lock_release(bringup);
lockdep_release_cpus_lock();
if (!st->should_run)
complete_ap_thread(st, bringup);
}
/* Invoke a single callback on a remote cpu */
static int
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int ret;
if (!cpu_online(cpu))
return 0;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
/*
* If we are up and running, use the hotplug thread. For early calls
* we invoke the thread function directly.
*/
if (!st->thread)
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
st->rollback = false;
st->last = NULL;
st->node = node;
st->bringup = bringup;
st->cb_state = state;
st->single = true;
__cpuhp_kick_ap(st);
/*
* If we failed and did a partial, do a rollback.
*/
if ((ret = st->result) && st->last) {
st->rollback = true;
st->bringup = !bringup;
__cpuhp_kick_ap(st);
}
/*
* Clean up the leftovers so the next hotplug operation wont use stale
* data.
*/
st->node = st->last = NULL;
return ret;
}
static int cpuhp_kick_ap_work(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state prev_state = st->state;
int ret;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
ret = cpuhp_kick_ap(cpu, st, st->target);
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
return ret;
}
static struct smp_hotplug_thread cpuhp_threads = {
.store = &cpuhp_state.thread,
.thread_should_run = cpuhp_should_run,
.thread_fn = cpuhp_thread_fun,
.thread_comm = "cpuhp/%u",
.selfparking = true,
};
static __init void cpuhp_init_state(void)
{
struct cpuhp_cpu_state *st;
int cpu;
for_each_possible_cpu(cpu) {
st = per_cpu_ptr(&cpuhp_state, cpu);
init_completion(&st->done_up);
init_completion(&st->done_down);
}
}
void __init cpuhp_threads_init(void)
{
cpuhp_init_state();
BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
#ifdef CONFIG_HOTPLUG_CPU
#ifndef arch_clear_mm_cpumask_cpu
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
#endif
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
*
* This function walks all processes, finds a valid mm struct for each one and
* then clears a corresponding bit in mm's cpumask. While this all sounds
* trivial, there are various non-obvious corner cases, which this function
* tries to solve in a safe manner.
*
* Also note that the function uses a somewhat relaxed locking scheme, so it may
* be called only for an already offlined CPU.
*/
void clear_tasks_mm_cpumask(int cpu)
{
struct task_struct *p;
/*
* This function is called after the cpu is taken down and marked
* offline, so its not like new tasks will ever get this cpu set in
* their mm mask. -- Peter Zijlstra
* Thus, we may use rcu_read_lock() here, instead of grabbing
* full-fledged tasklist_lock.
*/
WARN_ON(cpu_online(cpu));
rcu_read_lock();
for_each_process(p) {
struct task_struct *t;
/*
* Main thread might exit, but other threads may still have
* a valid mm. Find one.
*/
t = find_lock_task_mm(p);
if (!t)
continue;
arch_clear_mm_cpumask_cpu(cpu, t->mm);
task_unlock(t);
}
rcu_read_unlock();
}
/* Take this CPU down. */
static int take_cpu_down(void *_param)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
int err, cpu = smp_processor_id();
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
return err;
/*
* Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
* down, that the current state is CPUHP_TEARDOWN_CPU - 1.
*/
WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
/*
* Invoke the former CPU_DYING callbacks. DYING must not fail!
*/
cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
/* Park the stopper thread */
stop_machine_park(cpu);
return 0;
}
static int takedown_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
/* Park the smpboot threads */
kthread_park(st->thread);
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
* interrupt affinities.
*/
irq_lock_sparse();
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
if (err) {
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
kthread_unpark(st->thread);
return err;
}
BUG_ON(cpu_online(cpu));
/*
* The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
* all runnable tasks from the CPU, there's only the idle task left now
* that the migration thread is done doing the stop_machine thing.
*
* Wait for the stop thread to go away.
*/
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
hotplug_cpu__broadcast_tick_pull(cpu);
/* This actually kills the CPU. */
__cpu_die(cpu);
cpuhp_bp_sync_dead(cpu);
lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu));
/*
* Callbacks must be re-integrated right away to the RCU state machine.
* Otherwise an RCU callback could block a further teardown function
* waiting for its completion.
*/
rcutree_migrate_callbacks(cpu);
return 0;
}
static void cpuhp_complete_idle_dead(void *arg)
{
struct cpuhp_cpu_state *st = arg;
complete_ap_thread(st, false);
}
void cpuhp_report_idle_dead(void)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
BUG_ON(st->state != CPUHP_AP_OFFLINE);
tick_assert_timekeeping_handover();
rcutree_report_cpu_dead();
st->state = CPUHP_AP_IDLE_DEAD;
/*
* We cannot call complete after rcutree_report_cpu_dead() so we delegate it
* to an online cpu.
*/
smp_call_function_single(cpumask_first(cpu_online_mask),
cpuhp_complete_idle_dead, st, 0);
}
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
ret = cpuhp_invoke_callback_range(false, cpu, st, target);
if (ret) {
pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
ret, cpu, cpuhp_get_step(st->state)->name,
st->state);
cpuhp_reset_state(cpu, st, prev_state);
if (st->state < prev_state)
WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
prev_state));
}
return ret;
}
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
enum cpuhp_state target)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
if (num_online_cpus() == 1)
return -EBUSY;
if (!cpu_present(cpu))
return -EINVAL;
cpus_write_lock();
cpuhp_tasks_frozen = tasks_frozen;
prev_state = cpuhp_set_state(cpu, st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread.
*/
if (st->state > CPUHP_TEARDOWN_CPU) {
st->target = max((int)target, CPUHP_TEARDOWN_CPU);
ret = cpuhp_kick_ap_work(cpu);
/*
* The AP side has done the error rollback already. Just
* return the error code..
*/
if (ret)
goto out;
/*
* We might have stopped still in the range of the AP hotplug
* thread. Nothing to do anymore.
*/
if (st->state > CPUHP_TEARDOWN_CPU)
goto out;
st->target = target;
}
/*
* The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
* to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, target);
if (ret && st->state < prev_state) {
if (st->state == CPUHP_TEARDOWN_CPU) {
cpuhp_reset_state(cpu, st, prev_state);
__cpuhp_kick_ap(st);
} else {
WARN(1, "DEAD callback error for CPU%d", cpu);
}
}
out:
cpus_write_unlock();
arch_smt_update();
return ret;
}
struct cpu_down_work {
unsigned int cpu;
enum cpuhp_state target;
};
static long __cpu_down_maps_locked(void *arg)
{
struct cpu_down_work *work = arg;
return _cpu_down(work->cpu, 0, work->target);
}
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
{
struct cpu_down_work work = { .cpu = cpu, .target = target, };
/*
* If the platform does not support hotplug, report it explicitly to
* differentiate it from a transient offlining failure.
*/
if (cpu_hotplug_offline_disabled)
return -EOPNOTSUPP;
if (cpu_hotplug_disabled)
return -EBUSY;
/*
* Ensure that the control task does not run on the to be offlined
* CPU to prevent a deadlock against cfs_b->period_timer.
* Also keep at least one housekeeping cpu onlined to avoid generating
* an empty sched_domain span.
*/
for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
if (cpu != work.cpu)
return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
}
return -EBUSY;
}
static int cpu_down(unsigned int cpu, enum cpuhp_state target)
{
int err;
cpu_maps_update_begin();
err = cpu_down_maps_locked(cpu, target);
cpu_maps_update_done();
return err;
}
/**
* cpu_device_down - Bring down a cpu device
* @dev: Pointer to the cpu device to offline
*
* This function is meant to be used by device core cpu subsystem only.
*
* Other subsystems should use remove_cpu() instead.
*
* Return: %0 on success or a negative errno code
*/
int cpu_device_down(struct device *dev)
{
return cpu_down(dev->id, CPUHP_OFFLINE);
}
int remove_cpu(unsigned int cpu)
{
int ret;
lock_device_hotplug();
ret = device_offline(get_cpu_device(cpu));
unlock_device_hotplug();
return ret;
}
EXPORT_SYMBOL_GPL(remove_cpu);
void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
{
unsigned int cpu;
int error;
cpu_maps_update_begin();
/*
* Make certain the cpu I'm about to reboot on is online.
*
* This is inline to what migrate_to_reboot_cpu() already do.
*/
if (!cpu_online(primary_cpu))
primary_cpu = cpumask_first(cpu_online_mask);
for_each_online_cpu(cpu) {
if (cpu == primary_cpu)
continue;
error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
if (error) {
pr_err("Failed to offline CPU%d - error=%d",
cpu, error);
break;
}
}
/*
* Ensure all but the reboot CPU are offline.
*/
BUG_ON(num_online_cpus() > 1);
/*
* Make sure the CPUs won't be enabled by someone else after this
* point. Kexec will reboot to a new kernel shortly resetting
* everything along the way.
*/
cpu_hotplug_disabled++;
cpu_maps_update_done();
}
#else
#define takedown_cpu NULL
#endif /*CONFIG_HOTPLUG_CPU*/
/**
* notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
* @cpu: cpu that just started
*
* It must be called by the arch code on the new cpu, before the new cpu
* enables interrupts and before the "boot" cpu returns from __cpu_up().
*/
void notify_cpu_starting(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
cpumask_set_cpu(cpu, &cpus_booted_once_mask);
/*
* STARTING must not fail!
*/
cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
}
/*
* Called from the idle task. Wake up the controlling task which brings the
* hotplug thread of the upcoming CPU up and then delegates the rest of the
* online bringup to the hotplug thread.
*/
void cpuhp_online_idle(enum cpuhp_state state)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
/* Happens for the boot cpu */
if (state != CPUHP_AP_ONLINE_IDLE)
return;
cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
/*
* Unpark the stopper thread before we start the idle loop (and start
* scheduling); this ensures the stopper task is always available.
*/
stop_machine_unpark(smp_processor_id());
st->state = CPUHP_AP_ONLINE_IDLE;
complete_ap_thread(st, true);
}
/* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle;
int ret = 0;
cpus_write_lock();
if (!cpu_present(cpu)) {
ret = -EINVAL;
goto out;
}
/*
* The caller of cpu_up() might have raced with another
* caller. Nothing to do.
*/
if (st->state >= target)
goto out;
if (st->state == CPUHP_OFFLINE) {
/* Let it fail before we try to bring the cpu up */
idle = idle_thread_get(cpu);
if (IS_ERR(idle)) {
ret = PTR_ERR(idle);
goto out;
}
/*
* Reset stale stack state from the last time this CPU was online.
*/
scs_task_reset(idle);
kasan_unpoison_task_stack(idle);
}
cpuhp_tasks_frozen = tasks_frozen;
cpuhp_set_state(cpu, st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread once more.
*/
if (st->state > CPUHP_BRINGUP_CPU) {
ret = cpuhp_kick_ap_work(cpu);
/*
* The AP side has done the error rollback already. Just
* return the error code..
*/
if (ret)
goto out;
}
/*
* Try to reach the target state. We max out on the BP at
* CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
* responsible for bringing it up to the target state.
*/
target = min((int)target, CPUHP_BRINGUP_CPU);
ret = cpuhp_up_callbacks(cpu, st, target);
out:
cpus_write_unlock();
arch_smt_update();
return ret;
}
static int cpu_up(unsigned int cpu, enum cpuhp_state target)
{
int err = 0;
if (!cpu_possible(cpu)) {
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
cpu);
return -EINVAL;
}
err = try_online_node(cpu_to_node(cpu));
if (err)
return err;
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
err = -EBUSY;
goto out;
}
if (!cpu_bootable(cpu)) {
err = -EPERM;
goto out;
}
err = _cpu_up(cpu, 0, target);
out:
cpu_maps_update_done();
return err;
}
/**
* cpu_device_up - Bring up a cpu device
* @dev: Pointer to the cpu device to online
*
* This function is meant to be used by device core cpu subsystem only.
*
* Other subsystems should use add_cpu() instead.
*
* Return: %0 on success or a negative errno code
*/
int cpu_device_up(struct device *dev)
{
return cpu_up(dev->id, CPUHP_ONLINE);
}
int add_cpu(unsigned int cpu)
{
int ret;
lock_device_hotplug();
ret = device_online(get_cpu_device(cpu));
unlock_device_hotplug();
return ret;
}
EXPORT_SYMBOL_GPL(add_cpu);
/**
* bringup_hibernate_cpu - Bring up the CPU that we hibernated on
* @sleep_cpu: The cpu we hibernated on and should be brought up.
*
* On some architectures like arm64, we can hibernate on any CPU, but on
* wake up the CPU we hibernated on might be offline as a side effect of
* using maxcpus= for example.
*
* Return: %0 on success or a negative errno code
*/
int bringup_hibernate_cpu(unsigned int sleep_cpu)
{
int ret;
if (!cpu_online(sleep_cpu)) {
pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
if (ret) {
pr_err("Failed to bring hibernate-CPU up!\n");
return ret;
}
}
return 0;
}
static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
enum cpuhp_state target)
{
unsigned int cpu;
for_each_cpu(cpu, mask) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
/*
* If this failed then cpu_up() might have only
* rolled back to CPUHP_BP_KICK_AP for the final
* online. Clean it up. NOOP if already rolled back.
*/
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
}
if (!--ncpus)
break;
}
}
#ifdef CONFIG_HOTPLUG_PARALLEL
static bool __cpuhp_parallel_bringup __ro_after_init = true;
static int __init parallel_bringup_parse_param(char *arg)
{
return kstrtobool(arg, &__cpuhp_parallel_bringup);
}
early_param("cpuhp.parallel", parallel_bringup_parse_param);
#ifdef CONFIG_HOTPLUG_SMT
static inline bool cpuhp_smt_aware(void)
{
return cpu_smt_max_threads > 1;
}
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_primary_thread_mask;
}
#else
static inline bool cpuhp_smt_aware(void)
{
return false;
}
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_none_mask;
}
#endif
bool __weak arch_cpuhp_init_parallel_bringup(void)
{
return true;
}
/*
* On architectures which have enabled parallel bringup this invokes all BP
* prepare states for each of the to be onlined APs first. The last state
* sends the startup IPI to the APs. The APs proceed through the low level
* bringup code in parallel and then wait for the control CPU to release
* them one by one for the final onlining procedure.
*
* This avoids waiting for each AP to respond to the startup IPI in
* CPUHP_BRINGUP_CPU.
*/
static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
{
const struct cpumask *mask = cpu_present_mask;
if (__cpuhp_parallel_bringup)
__cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
if (!__cpuhp_parallel_bringup)
return false;
if (cpuhp_smt_aware()) {
const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
static struct cpumask tmp_mask __initdata;
/*
* X86 requires to prevent that SMT siblings stopped while
* the primary thread does a microcode update for various
* reasons. Bring the primary threads up first.
*/
cpumask_and(&tmp_mask, mask, pmask);
cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
/* Account for the online CPUs */
ncpus -= num_online_cpus();
if (!ncpus)
return true;
/* Create the mask for secondary CPUs */
cpumask_andnot(&tmp_mask, mask, pmask);
mask = &tmp_mask;
}
/* Bring the not-yet started CPUs up */
cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
return true;
}
#else
static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
#endif /* CONFIG_HOTPLUG_PARALLEL */
void __init bringup_nonboot_cpus(unsigned int max_cpus)
{
if (!max_cpus)
return;
/* Try parallel bringup optimization if enabled */
if (cpuhp_bringup_cpus_parallel(max_cpus))
return;
/* Full per CPU serialized bringup */
cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
}
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
int freeze_secondary_cpus(int primary)
{
int cpu, error = 0;
cpu_maps_update_begin();
if (primary == -1) {
primary = cpumask_first(cpu_online_mask);
if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
primary = housekeeping_any_cpu(HK_TYPE_TIMER);
} else {
if (!cpu_online(primary))
primary = cpumask_first(cpu_online_mask);
}
/*
* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
pr_info("Disabling non-boot CPUs ...\n");
for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) {
if (!cpu_online(cpu) || cpu == primary)
continue;
if (pm_wakeup_pending()) {
pr_info("Wakeup pending. Abort CPU freeze\n");
error = -EBUSY;
break;
}
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
else {
pr_err("Error taking CPU%d down: %d\n", cpu, error);
break;
}
}
if (!error)
BUG_ON(num_online_cpus() > 1);
else
pr_err("Non-boot CPUs are not disabled\n");
/*
* Make sure the CPUs won't be enabled by someone else. We need to do
* this even in case of failure as all freeze_secondary_cpus() users are
* supposed to do thaw_secondary_cpus() on the failure path.
*/
cpu_hotplug_disabled++;
cpu_maps_update_done();
return error;
}
void __weak arch_thaw_secondary_cpus_begin(void)
{
}
void __weak arch_thaw_secondary_cpus_end(void)
{
}
void thaw_secondary_cpus(void)
{
int cpu, error;
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
__cpu_hotplug_enable();
if (cpumask_empty(frozen_cpus))
goto out;
pr_info("Enabling non-boot CPUs ...\n");
arch_thaw_secondary_cpus_begin();
for_each_cpu(cpu, frozen_cpus) {
trace_suspend_resume(TPS("CPU_ON"), cpu, true);
error = _cpu_up(cpu, 1, CPUHP_ONLINE);
trace_suspend_resume(TPS("CPU_ON"), cpu, false);
if (!error) {
pr_info("CPU%d is up\n", cpu);
continue;
}
pr_warn("Error taking CPU%d up: %d\n", cpu, error);
}
arch_thaw_secondary_cpus_end();
cpumask_clear(frozen_cpus);
out:
cpu_maps_update_done();
}
static int __init alloc_frozen_cpus(void)
{
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
return -ENOMEM;
return 0;
}
core_initcall(alloc_frozen_cpus);
/*
* When callbacks for CPU hotplug notifications are being executed, we must
* ensure that the state of the system with respect to the tasks being frozen
* or not, as reported by the notification, remains unchanged *throughout the
* duration* of the execution of the callbacks.
* Hence we need to prevent the freezer from racing with regular CPU hotplug.
*
* This synchronization is implemented by mutually excluding regular CPU
* hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
* Hibernate notifications.
*/
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
unsigned long action, void *ptr)
{
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
cpu_hotplug_disable();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
cpu_hotplug_enable();
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int __init cpu_hotplug_pm_sync_init(void)
{
/*
* cpu_hotplug_pm_callback has higher priority than x86
* bsp_pm_callback which depends on cpu_hotplug_pm_callback
* to disable cpu hotplug to avoid cpu hotplug race.
*/
pm_notifier(cpu_hotplug_pm_callback, 0);
return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);
#endif /* CONFIG_PM_SLEEP_SMP */
int __boot_cpu_id;
#endif /* CONFIG_SMP */
/* Boot processor state steps */
static struct cpuhp_step cpuhp_hp_states[] = {
[CPUHP_OFFLINE] = {
.name = "offline",
.startup.single = NULL,
.teardown.single = NULL,
},
#ifdef CONFIG_SMP
[CPUHP_CREATE_THREADS]= {
.name = "threads:prepare",
.startup.single = smpboot_create_threads,
.teardown.single = NULL,
.cant_stop = true,
},
[CPUHP_RANDOM_PREPARE] = {
.name = "random:prepare",
.startup.single = random_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_WORKQUEUE_PREP] = {
.name = "workqueue:prepare",
.startup.single = workqueue_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_HRTIMERS_PREPARE] = {
.name = "hrtimers:prepare",
.startup.single = hrtimers_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_SMPCFD_PREPARE] = {
.name = "smpcfd:prepare",
.startup.single = smpcfd_prepare_cpu,
.teardown.single = smpcfd_dead_cpu,
},
[CPUHP_RELAY_PREPARE] = {
.name = "relay:prepare",
.startup.single = relay_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_RCUTREE_PREP] = {
.name = "RCU/tree:prepare",
.startup.single = rcutree_prepare_cpu,
.teardown.single = rcutree_dead_cpu,
},
/*
* On the tear-down path, timers_dead_cpu() must be invoked
* before blk_mq_queue_reinit_notify() from notify_dead(),
* otherwise a RCU stall occurs.
*/
[CPUHP_TIMERS_PREPARE] = {
.name = "timers:prepare",
.startup.single = timers_prepare_cpu,
.teardown.single = timers_dead_cpu,
},
#ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
/*
* Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
* the next step will release it.
*/
[CPUHP_BP_KICK_AP] = {
.name = "cpu:kick_ap",
.startup.single = cpuhp_kick_ap_alive,
},
/*
* Waits for the AP to reach cpuhp_ap_sync_alive() and then
* releases it for the complete bringup.
*/
[CPUHP_BRINGUP_CPU] = {
.name = "cpu:bringup",
.startup.single = cpuhp_bringup_ap,
.teardown.single = finish_cpu,
.cant_stop = true,
},
#else
/*
* All-in-one CPU bringup state which includes the kick alive.
*/
[CPUHP_BRINGUP_CPU] = {
.name = "cpu:bringup",
.startup.single = bringup_cpu,
.teardown.single = finish_cpu,
.cant_stop = true,
},
#endif
/* Final state before CPU kills itself */
[CPUHP_AP_IDLE_DEAD] = {
.name = "idle:dead",
},
/*
* Last state before CPU enters the idle loop to die. Transient state
* for synchronization.
*/
[CPUHP_AP_OFFLINE] = {
.name = "ap:offline",
.cant_stop = true,
},
/* First state is scheduler control. Interrupts are disabled */
[CPUHP_AP_SCHED_STARTING] = {
.name = "sched:starting",
.startup.single = sched_cpu_starting,
.teardown.single = sched_cpu_dying,
},
[CPUHP_AP_RCUTREE_DYING] = {
.name = "RCU/tree:dying",
.startup.single = NULL,
.teardown.single = rcutree_dying_cpu,
},
[CPUHP_AP_SMPCFD_DYING] = {
.name = "smpcfd:dying",
.startup.single = NULL,
.teardown.single = smpcfd_dying_cpu,
},
[CPUHP_AP_HRTIMERS_DYING] = {
.name = "hrtimers:dying",
.startup.single = hrtimers_cpu_starting,
.teardown.single = hrtimers_cpu_dying,
},
[CPUHP_AP_TICK_DYING] = {
.name = "tick:dying",
.startup.single = NULL,
.teardown.single = tick_cpu_dying,
},
/* Entry state on starting. Interrupts enabled from here on. Transient
* state for synchronsization */
[CPUHP_AP_ONLINE] = {
.name = "ap:online",
},
/*
* Handled on control processor until the plugged processor manages
* this itself.
*/
[CPUHP_TEARDOWN_CPU] = {
.name = "cpu:teardown",
.startup.single = NULL,
.teardown.single = takedown_cpu,
.cant_stop = true,
},
[CPUHP_AP_SCHED_WAIT_EMPTY] = {
.name = "sched:waitempty",
.startup.single = NULL,
.teardown.single = sched_cpu_wait_empty,
},
/* Handle smpboot threads park/unpark */
[CPUHP_AP_SMPBOOT_THREADS] = {
.name = "smpboot/threads:online",
.startup.single = smpboot_unpark_threads,
.teardown.single = smpboot_park_threads,
},
[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
.name = "irq/affinity:online",
.startup.single = irq_affinity_online_cpu,
.teardown.single = NULL,
},
[CPUHP_AP_PERF_ONLINE] = {
.name = "perf:online",
.startup.single = perf_event_init_cpu,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_AP_WATCHDOG_ONLINE] = {
.name = "lockup_detector:online",
.startup.single = lockup_detector_online_cpu,
.teardown.single = lockup_detector_offline_cpu,
},
[CPUHP_AP_WORKQUEUE_ONLINE] = {
.name = "workqueue:online",
.startup.single = workqueue_online_cpu,
.teardown.single = workqueue_offline_cpu,
},
[CPUHP_AP_RANDOM_ONLINE] = {
.name = "random:online",
.startup.single = random_online_cpu,
.teardown.single = NULL,
},
[CPUHP_AP_RCUTREE_ONLINE] = {
.name = "RCU/tree:online",
.startup.single = rcutree_online_cpu,
.teardown.single = rcutree_offline_cpu,
},
#endif
/*
* The dynamically registered state space is here
*/
#ifdef CONFIG_SMP
/* Last state is scheduler control setting the cpu active */
[CPUHP_AP_ACTIVE] = {
.name = "sched:active",
.startup.single = sched_cpu_activate,
.teardown.single = sched_cpu_deactivate,
},
#endif
/* CPU is fully up and running. */
[CPUHP_ONLINE] = {
.name = "online",
.startup.single = NULL,
.teardown.single = NULL,
},
};
/* Sanity check for callbacks */
static int cpuhp_cb_check(enum cpuhp_state state)
{
if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
return -EINVAL;
return 0;
}
/*
* Returns a free for dynamic slot assignment of the Online state. The states
* are protected by the cpuhp_slot_states mutex and an empty slot is identified
* by having no name assigned.
*/
static int cpuhp_reserve_state(enum cpuhp_state state)
{
enum cpuhp_state i, end;
struct cpuhp_step *step;
switch (state) {
case CPUHP_AP_ONLINE_DYN:
step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
end = CPUHP_AP_ONLINE_DYN_END;
break;
case CPUHP_BP_PREPARE_DYN:
step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
end = CPUHP_BP_PREPARE_DYN_END;
break;
default:
return -EINVAL;
}
for (i = state; i <= end; i++, step++) {
if (!step->name)
return i;
}
WARN(1, "No more dynamic states available for CPU hotplug\n");
return -ENOSPC;
}
static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
/* (Un)Install the callbacks for further cpu hotplug operations */
struct cpuhp_step *sp;
int ret = 0;
/*
* If name is NULL, then the state gets removed.
*
* CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
* the first allocation from these dynamic ranges, so the removal
* would trigger a new allocation and clear the wrong (already
* empty) state, leaving the callbacks of the to be cleared state
* dangling, which causes wreckage on the next hotplug operation.
*/
if (name && (state == CPUHP_AP_ONLINE_DYN ||
state == CPUHP_BP_PREPARE_DYN)) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
return ret;
state = ret;
}
sp = cpuhp_get_step(state);
if (name && sp->name)
return -EBUSY;
sp->startup.single = startup;
sp->teardown.single = teardown;
sp->name = name;
sp->multi_instance = multi_instance;
INIT_HLIST_HEAD(&sp->list);
return ret;
}
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
{
return cpuhp_get_step(state)->teardown.single;
}
/*
* Call the startup/teardown function for a step either on the AP or
* on the current CPU.
*/
static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int ret;
/*
* If there's nothing to do, we done.
* Relies on the union for multi_instance.
*/
if (cpuhp_step_empty(bringup, sp))
return 0;
/*
* The non AP bound callbacks can fail on bringup. On teardown
* e.g. module removal we crash for now.
*/
#ifdef CONFIG_SMP
if (cpuhp_is_ap_state(state))
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#endif
BUG_ON(ret && !bringup);
return ret;
}
/*
* Called from __cpuhp_setup_state on a recoverable failure.
*
* Note: The teardown callbacks for rollback are not allowed to fail!
*/
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
struct hlist_node *node)
{
int cpu;
/* Roll back the already executed steps on the other cpus */
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpu >= failedcpu)
break;
/* Did we invoke the startup call on that cpu ? */
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
}
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
struct hlist_node *node,
bool invoke)
{
struct cpuhp_step *sp;
int cpu;
int ret;
lockdep_assert_cpus_held();
sp = cpuhp_get_step(state);
if (sp->multi_instance == false)
return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi)
goto add_node;
/*
* Try to call the startup callback for each present cpu
* depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate < state)
continue;
ret = cpuhp_issue_call(cpu, state, true, node);
if (ret) {
if (sp->teardown.multi)
cpuhp_rollback_install(cpu, state, node);
goto unlock;
}
}
add_node:
ret = 0;
hlist_add_head(node, &sp->list);
unlock:
mutex_unlock(&cpuhp_state_mutex);
return ret;
}
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke)
{
int ret;
cpus_read_lock();
ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
/**
* __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
* @state: The state to setup
* @name: Name of the step
* @invoke: If true, the startup function is invoked for cpus where
* cpu state >= @state
* @startup: startup callback function
* @teardown: teardown callback function
* @multi_instance: State is set up for multiple instances which get
* added afterwards.
*
* The caller needs to hold cpus read locked while calling this function.
* Return:
* On success:
* Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
* 0 for all other states
* On failure: proper (negative) error code
*/
int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
int cpu, ret = 0;
bool dynstate;
lockdep_assert_cpus_held();
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
if (ret > 0 && dynstate) {
state = ret;
ret = 0;
}
if (ret || !invoke || !startup)
goto out;
/*
* Try to call the startup callback for each present cpu
* depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate < state)
continue;
ret = cpuhp_issue_call(cpu, state, true, NULL);
if (ret) {
if (teardown)
cpuhp_rollback_install(cpu, state, NULL);
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
goto out;
}
}
out:
mutex_unlock(&cpuhp_state_mutex);
/*
* If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
* return the dynamically allocated state in case of success.
*/
if (!ret && dynstate)
return state;
return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
int __cpuhp_setup_state(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
int ret;
cpus_read_lock();
ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
teardown, multi_instance);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state);
int __cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node, bool invoke)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int cpu;
BUG_ON(cpuhp_cb_check(state));
if (!sp->multi_instance)
return -EINVAL;
cpus_read_lock();
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
* Call the teardown callback for each present cpu depending
* on the hotplug state of the cpu. This function is not
* allowed to fail currently!
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
remove:
hlist_del(node);
mutex_unlock(&cpuhp_state_mutex);
cpus_read_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
/**
* __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
* @state: The state to remove
* @invoke: If true, the teardown function is invoked for cpus where
* cpu state >= @state
*
* The caller needs to hold cpus read locked while calling this function.
* The teardown callback is currently not allowed to fail. Think
* about module removal!
*/
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int cpu;
BUG_ON(cpuhp_cb_check(state));
lockdep_assert_cpus_held();
mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) {
WARN(!hlist_empty(&sp->list),
"Error: Removing state %d which has instances left.\n",
state);
goto remove;
}
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
* Call the teardown callback for each present cpu depending
* on the hotplug state of the cpu. This function is not
* allowed to fail currently!
*/
for_each_present_cpu(cpu) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int cpustate = st->state;
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, NULL);
}
remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
mutex_unlock(&cpuhp_state_mutex);
}
EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
{
cpus_read_lock();
__cpuhp_remove_state_cpuslocked(state, invoke);
cpus_read_unlock();
}
EXPORT_SYMBOL(__cpuhp_remove_state);
#ifdef CONFIG_HOTPLUG_SMT
static void cpuhp_offline_cpu_device(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
dev->offline = true;
/* Tell user space about the state change */
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
}
static void cpuhp_online_cpu_device(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
dev->offline = false;
/* Tell user space about the state change */
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
cpu_maps_update_begin();
for_each_online_cpu(cpu) {
if (topology_is_primary_thread(cpu))
continue;
/*
* Disable can be called with CPU_SMT_ENABLED when changing
* from a higher to lower number of SMT threads per core.
*/
if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
continue;
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
if (ret)
break;
/*
* As this needs to hold the cpu maps lock it's impossible
* to call device_offline() because that ends up calling
* cpu_down() which takes cpu maps lock. cpu maps lock
* needs to be held as this might race against in kernel
* abusers of the hotplug machinery (thermal management).
*
* So nothing would update device:offline state. That would
* leave the sysfs entry stale and prevent onlining after
* smt control has been changed to 'off' again. This is
* called under the sysfs hotplug lock, so it is properly
* serialized against the regular offline usage.
*/
cpuhp_offline_cpu_device(cpu);
}
if (!ret)
cpu_smt_control = ctrlval;
cpu_maps_update_done();
return ret;
}
/* Check if the core a CPU belongs to is online */
#if !defined(topology_is_core_online)
static inline bool topology_is_core_online(unsigned int cpu)
{
return true;
}
#endif
int cpuhp_smt_enable(void)
{
int cpu, ret = 0;
cpu_maps_update_begin();
cpu_smt_control = CPU_SMT_ENABLED;
for_each_present_cpu(cpu) {
/* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
continue;
if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
continue;
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
if (ret)
break;
/* See comment in cpuhp_smt_disable() */
cpuhp_online_cpu_device(cpu);
}
cpu_maps_update_done();
return ret;
}
#endif
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->state);
}
static DEVICE_ATTR_RO(state);
static ssize_t target_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
struct cpuhp_step *sp;
int target, ret;
ret = kstrtoint(buf, 10, &target);
if (ret)
return ret;
#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
return -EINVAL;
#else
if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
return -EINVAL;
#endif
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
mutex_lock(&cpuhp_state_mutex);
sp = cpuhp_get_step(target);
ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
goto out;
if (st->state < target)
ret = cpu_up(dev->id, target);
else if (st->state > target)
ret = cpu_down(dev->id, target);
else if (WARN_ON(st->target != target))
st->target = target;
out:
unlock_device_hotplug();
return ret ? ret : count;
}
static ssize_t target_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->target);
}
static DEVICE_ATTR_RW(target);
static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
struct cpuhp_step *sp;
int fail, ret;
ret = kstrtoint(buf, 10, &fail);
if (ret)
return ret;
if (fail == CPUHP_INVALID) {
st->fail = fail;
return count;
}
if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
return -EINVAL;
/*
* Cannot fail STARTING/DYING callbacks.
*/
if (cpuhp_is_atomic_state(fail))
return -EINVAL;
/*
* DEAD callbacks cannot fail...
* ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
* triggering STARTING callbacks, a failure in this state would
* hinder rollback.
*/
if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
return -EINVAL;
/*
* Cannot fail anything that doesn't have callbacks.
*/
mutex_lock(&cpuhp_state_mutex);
sp = cpuhp_get_step(fail);
if (!sp->startup.single && !sp->teardown.single)
ret = -EINVAL;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
return ret;
st->fail = fail;
return count;
}
static ssize_t fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
return sprintf(buf, "%d\n", st->fail);
}
static DEVICE_ATTR_RW(fail);
static struct attribute *cpuhp_cpu_attrs[] = {
&dev_attr_state.attr,
&dev_attr_target.attr,
&dev_attr_fail.attr,
NULL
};
static const struct attribute_group cpuhp_cpu_attr_group = {
.attrs = cpuhp_cpu_attrs,
.name = "hotplug",
};
static ssize_t states_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t cur, res = 0;
int i;
mutex_lock(&cpuhp_state_mutex);
for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
struct cpuhp_step *sp = cpuhp_get_step(i);
if (sp->name) {
cur = sprintf(buf, "%3d: %s\n", i, sp->name);
buf += cur;
res += cur;
}
}
mutex_unlock(&cpuhp_state_mutex);
return res;
}
static DEVICE_ATTR_RO(states);
static struct attribute *cpuhp_cpu_root_attrs[] = {
&dev_attr_states.attr,
NULL
};
static const struct attribute_group cpuhp_cpu_root_attr_group = {
.attrs = cpuhp_cpu_root_attrs,
.name = "hotplug",
};
#ifdef CONFIG_HOTPLUG_SMT
static bool cpu_smt_num_threads_valid(unsigned int threads)
{
if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
return threads >= 1 && threads <= cpu_smt_max_threads;
return threads == 1 || threads == cpu_smt_max_threads;
}
static ssize_t
__store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ctrlval, ret, num_threads, orig_threads;
bool force_off;
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
return -EPERM;
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return -ENODEV;
if (sysfs_streq(buf, "on")) {
ctrlval = CPU_SMT_ENABLED;
num_threads = cpu_smt_max_threads;
} else if (sysfs_streq(buf, "off")) {
ctrlval = CPU_SMT_DISABLED;
num_threads = 1;
} else if (sysfs_streq(buf, "forceoff")) {
ctrlval = CPU_SMT_FORCE_DISABLED;
num_threads = 1;
} else if (kstrtoint(buf, 10, &num_threads) == 0) {
if (num_threads == 1)
ctrlval = CPU_SMT_DISABLED;
else if (cpu_smt_num_threads_valid(num_threads))
ctrlval = CPU_SMT_ENABLED;
else
return -EINVAL;
} else {
return -EINVAL;
}
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
orig_threads = cpu_smt_num_threads;
cpu_smt_num_threads = num_threads;
force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
if (num_threads > orig_threads)
ret = cpuhp_smt_enable();
else if (num_threads < orig_threads || force_off)
ret = cpuhp_smt_disable(ctrlval);
unlock_device_hotplug();
return ret ? ret : count;
}
#else /* !CONFIG_HOTPLUG_SMT */
static ssize_t
__store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return -ENODEV;
}
#endif /* CONFIG_HOTPLUG_SMT */
static const char *smt_states[] = {
[CPU_SMT_ENABLED] = "on",
[CPU_SMT_DISABLED] = "off",
[CPU_SMT_FORCE_DISABLED] = "forceoff",
[CPU_SMT_NOT_SUPPORTED] = "notsupported",
[CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
};
static ssize_t control_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *state = smt_states[cpu_smt_control];
#ifdef CONFIG_HOTPLUG_SMT
/*
* If SMT is enabled but not all threads are enabled then show the
* number of threads. If all threads are enabled show "on". Otherwise
* show the state name.
*/
if (cpu_smt_control == CPU_SMT_ENABLED &&
cpu_smt_num_threads != cpu_smt_max_threads)
return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
#endif
return sysfs_emit(buf, "%s\n", state);
}
static ssize_t control_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return __store_smt_control(dev, attr, buf, count);
}
static DEVICE_ATTR_RW(control);
static ssize_t active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", sched_smt_active());
}
static DEVICE_ATTR_RO(active);
static struct attribute *cpuhp_smt_attrs[] = {
&dev_attr_control.attr,
&dev_attr_active.attr,
NULL
};
static const struct attribute_group cpuhp_smt_attr_group = {
.attrs = cpuhp_smt_attrs,
.name = "smt",
};
static int __init cpu_smt_sysfs_init(void)
{
struct device *dev_root;
int ret = -ENODEV;
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
put_device(dev_root);
}
return ret;
}
static int __init cpuhp_sysfs_init(void)
{
struct device *dev_root;
int cpu, ret;
ret = cpu_smt_sysfs_init();
if (ret)
return ret;
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
put_device(dev_root);
if (ret)
return ret;
}
for_each_possible_cpu(cpu) {
struct device *dev = get_cpu_device(cpu);
if (!dev)
continue;
ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
if (ret)
return ret;
}
return 0;
}
device_initcall(cpuhp_sysfs_init);
#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
* represents all NR_CPUS bits binary values of 1<<nr.
*
* It is used by cpumask_of() to get a constant address to a CPU
* mask value that has a single bit set only.
*/
/* cpu_bit_bitmap[0] is empty - so we can back into it */
#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
#endif
};
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
#ifdef CONFIG_INIT_ALL_POSSIBLE
struct cpumask __cpu_possible_mask __ro_after_init
= {CPU_BITS_ALL};
#else
struct cpumask __cpu_possible_mask __ro_after_init;
#endif
EXPORT_SYMBOL(__cpu_possible_mask);
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(__cpu_online_mask);
struct cpumask __cpu_enabled_mask __read_mostly;
EXPORT_SYMBOL(__cpu_enabled_mask);
struct cpumask __cpu_present_mask __read_mostly;
EXPORT_SYMBOL(__cpu_present_mask);
struct cpumask __cpu_active_mask __read_mostly;
EXPORT_SYMBOL(__cpu_active_mask);
struct cpumask __cpu_dying_mask __read_mostly;
EXPORT_SYMBOL(__cpu_dying_mask);
atomic_t __num_online_cpus __read_mostly;
EXPORT_SYMBOL(__num_online_cpus);
void init_cpu_present(const struct cpumask *src)
{
cpumask_copy(&__cpu_present_mask, src);
}
void init_cpu_possible(const struct cpumask *src)
{
cpumask_copy(&__cpu_possible_mask, src);
}
void set_cpu_online(unsigned int cpu, bool online)
{
/*
* atomic_inc/dec() is required to handle the horrid abuse of this
* function by the reboot and kexec code which invoke it from
* IPI/NMI broadcasts when shutting down CPUs. Invocation from
* regular CPU hotplug is properly serialized.
*
* Note, that the fact that __num_online_cpus is of type atomic_t
* does not protect readers which are not serialized against
* concurrent hotplug operations.
*/
if (online) {
if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
atomic_inc(&__num_online_cpus);
} else {
if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
atomic_dec(&__num_online_cpus);
}
}
/*
* Activate the first processor.
*/
void __init boot_cpu_init(void)
{
int cpu = smp_processor_id();
/* Mark the boot cpu "present", "online" etc for SMP and UP case */
set_cpu_online(cpu, true);
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
#ifdef CONFIG_SMP
__boot_cpu_id = cpu;
#endif
}
/*
* Must be called _AFTER_ setting up the per_cpu areas
*/
void __init boot_cpu_hotplug_init(void)
{
#ifdef CONFIG_SMP
cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
#endif
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
}
#ifdef CONFIG_CPU_MITIGATIONS
/*
* All except the cross-thread attack vector are mitigated by default.
* Cross-thread mitigation often requires disabling SMT which is expensive
* so cross-thread mitigations are only partially enabled by default.
*
* Guest-to-Host and Guest-to-Guest vectors are only needed if KVM support is
* present.
*/
static bool attack_vectors[NR_CPU_ATTACK_VECTORS] __ro_after_init = {
[CPU_MITIGATE_USER_KERNEL] = true,
[CPU_MITIGATE_USER_USER] = true,
[CPU_MITIGATE_GUEST_HOST] = IS_ENABLED(CONFIG_KVM),
[CPU_MITIGATE_GUEST_GUEST] = IS_ENABLED(CONFIG_KVM),
};
bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
{
if (v < NR_CPU_ATTACK_VECTORS)
return attack_vectors[v];
WARN_ONCE(1, "Invalid attack vector %d\n", v);
return false;
}
/*
* There are 3 global options, 'off', 'auto', 'auto,nosmt'. These may optionally
* be combined with attack-vector disables which follow them.
*
* Examples:
* mitigations=auto,no_user_kernel,no_user_user,no_cross_thread
* mitigations=auto,nosmt,no_guest_host,no_guest_guest
*
* mitigations=off is equivalent to disabling all attack vectors.
*/
enum cpu_mitigations {
CPU_MITIGATIONS_OFF,
CPU_MITIGATIONS_AUTO,
CPU_MITIGATIONS_AUTO_NOSMT,
};
enum {
NO_USER_KERNEL,
NO_USER_USER,
NO_GUEST_HOST,
NO_GUEST_GUEST,
NO_CROSS_THREAD,
NR_VECTOR_PARAMS,
};
enum smt_mitigations smt_mitigations __ro_after_init = SMT_MITIGATIONS_AUTO;
static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
static const match_table_t global_mitigations = {
{ CPU_MITIGATIONS_AUTO_NOSMT, "auto,nosmt"},
{ CPU_MITIGATIONS_AUTO, "auto"},
{ CPU_MITIGATIONS_OFF, "off"},
};
static const match_table_t vector_mitigations = {
{ NO_USER_KERNEL, "no_user_kernel"},
{ NO_USER_USER, "no_user_user"},
{ NO_GUEST_HOST, "no_guest_host"},
{ NO_GUEST_GUEST, "no_guest_guest"},
{ NO_CROSS_THREAD, "no_cross_thread"},
{ NR_VECTOR_PARAMS, NULL},
};
static int __init mitigations_parse_global_opt(char *arg)
{
int i;
for (i = 0; i < ARRAY_SIZE(global_mitigations); i++) {
const char *pattern = global_mitigations[i].pattern;
if (!strncmp(arg, pattern, strlen(pattern))) {
cpu_mitigations = global_mitigations[i].token;
return strlen(pattern);
}
}
return 0;
}
static int __init mitigations_parse_cmdline(char *arg)
{
char *s, *p;
int len;
len = mitigations_parse_global_opt(arg);
if (cpu_mitigations_off()) {
memset(attack_vectors, 0, sizeof(attack_vectors));
smt_mitigations = SMT_MITIGATIONS_OFF;
} else if (cpu_mitigations_auto_nosmt()) {
smt_mitigations = SMT_MITIGATIONS_ON;
}
p = arg + len;
if (!*p)
return 0;
/* Attack vector controls may come after the ',' */
if (*p++ != ',' || !IS_ENABLED(CONFIG_ARCH_HAS_CPU_ATTACK_VECTORS)) {
pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", arg);
return 0;
}
while ((s = strsep(&p, ",")) != NULL) {
switch (match_token(s, vector_mitigations, NULL)) {
case NO_USER_KERNEL:
attack_vectors[CPU_MITIGATE_USER_KERNEL] = false;
break;
case NO_USER_USER:
attack_vectors[CPU_MITIGATE_USER_USER] = false;
break;
case NO_GUEST_HOST:
attack_vectors[CPU_MITIGATE_GUEST_HOST] = false;
break;
case NO_GUEST_GUEST:
attack_vectors[CPU_MITIGATE_GUEST_GUEST] = false;
break;
case NO_CROSS_THREAD:
smt_mitigations = SMT_MITIGATIONS_OFF;
break;
default:
pr_crit("Unsupported mitigations options %s\n", s);
return 0;
}
}
return 0;
}
/* mitigations=off */
bool cpu_mitigations_off(void)
{
return cpu_mitigations == CPU_MITIGATIONS_OFF;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_off);
/* mitigations=auto,nosmt */
bool cpu_mitigations_auto_nosmt(void)
{
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
#else
static int __init mitigations_parse_cmdline(char *arg)
{
pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
return 0;
}
#endif
early_param("mitigations", mitigations_parse_cmdline);
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_COREDUMP_H
#define _LINUX_SCHED_COREDUMP_H
#include <linux/mm_types.h>
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
#define SUID_DUMP_USER 1 /* Dump as user of process */
#define SUID_DUMP_ROOT 2 /* Dump as root */
static inline unsigned long __mm_flags_get_dumpable(struct mm_struct *mm)
{
/*
* By convention, dumpable bits are contained in first 32 bits of the
* bitmap, so we can simply access this first unsigned long directly.
*/
return __mm_flags_get_word(mm);
}
static inline void __mm_flags_set_mask_dumpable(struct mm_struct *mm, int value)
{
__mm_flags_set_mask_bits_word(mm, MMF_DUMPABLE_MASK, value);
}
extern void set_dumpable(struct mm_struct *mm, int value);
/*
* This returns the actual value of the suid_dumpable flag. For things
* that are using this for checking for privilege transitions, it must
* test against SUID_DUMP_USER rather than treating it as a boolean
* value.
*/
static inline int __get_dumpable(unsigned long mm_flags)
{
return mm_flags & MMF_DUMPABLE_MASK;
}
static inline int get_dumpable(struct mm_struct *mm)
{
unsigned long flags = __mm_flags_get_dumpable(mm);
return __get_dumpable(flags);
}
#endif /* _LINUX_SCHED_COREDUMP_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
#include <linux/mmzone.h>
#include <linux/range.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
struct resource;
struct device;
/**
* struct vmem_altmap - pre-allocated storage for vmemmap_populate
* @base_pfn: base of the entire dev_pagemap mapping
* @reserve: pages mapped, but reserved for driver use (relative to @base)
* @free: free pages set aside in the mapping for memmap storage
* @align: pages reserved to meet allocation alignments
* @alloc: track pages consumed, private to vmemmap_populate()
*/
struct vmem_altmap {
unsigned long base_pfn;
const unsigned long end_pfn;
const unsigned long reserve;
unsigned long free;
unsigned long align;
unsigned long alloc;
bool inaccessible;
};
/*
* Specialize ZONE_DEVICE memory into multiple types each has a different
* usage.
*
* MEMORY_DEVICE_PRIVATE:
* Device memory that is not directly addressable by the CPU: CPU can neither
* read nor write private memory. In this case, we do still have struct pages
* backing the device memory. Doing so simplifies the implementation, but it is
* important to remember that there are certain points at which the struct page
* must be treated as an opaque object, rather than a "normal" struct page.
*
* A more complete discussion of unaddressable memory may be found in
* include/linux/hmm.h and Documentation/mm/hmm.rst.
*
* MEMORY_DEVICE_COHERENT:
* Device memory that is cache coherent from device and CPU point of view. This
* is used on platforms that have an advanced system bus (like CAPI or CXL). A
* driver can hotplug the device memory using ZONE_DEVICE and with that memory
* type. Any page of a process can be migrated to such memory. However no one
* should be allowed to pin such memory so that it can always be evicted.
*
* MEMORY_DEVICE_FS_DAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
* coherent and supports page pinning. In support of coordinating page
* pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
* wakeup event whenever a page is unpinned and becomes idle. This
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
* MEMORY_DEVICE_GENERIC:
* Host memory that has similar access semantics as System RAM i.e. DMA
* coherent and supports page pinning. This is for example used by DAX devices
* that expose memory using a character device.
*
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
* transactions.
*/
enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_COHERENT,
MEMORY_DEVICE_FS_DAX,
MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
};
struct dev_pagemap_ops {
/*
* Called once the page refcount reaches 0. The reference count will be
* reset to one by the core code after the method is called to prepare
* for handing out the page again.
*/
void (*page_free)(struct page *page);
/*
* Used for private (un-addressable) device memory only. Must migrate
* the page back to a CPU accessible page.
*/
vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
/*
* Handle the memory failure happens on a range of pfns. Notify the
* processes who are using these pfns, and try to recover the data on
* them if necessary. The mf_flags is finally passed to the recover
* function through the whole notify routine.
*
* When this is not implemented, or it returns -EOPNOTSUPP, the caller
* will fall back to a common handler called mf_generic_kill_procs().
*/
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
unsigned long nr_pages, int mf_flags);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @ref: reference count that pins the devm_memremap_pages() mapping
* @done: completion for @ref
* @type: memory type: see MEMORY_* above in memremap.h
* @flags: PGMAP_* flags to specify defailed behavior
* @vmemmap_shift: structural definition of how the vmemmap page metadata
* is populated, specifically the metadata page order.
* A zero value (default) uses base pages as the vmemmap metadata
* representation. A bigger value will set up compound struct pages
* of the requested order value.
* @ops: method table
* @owner: an opaque pointer identifying the entity that manages this
* instance. Used by various helpers to make sure that no
* foreign ZONE_DEVICE memory is accessed.
* @nr_range: number of ranges to be mapped
* @range: range to be mapped when nr_range == 1
* @ranges: array of ranges to be mapped when nr_range > 1
*/
struct dev_pagemap {
struct vmem_altmap altmap;
struct percpu_ref ref;
struct completion done;
enum memory_type type;
unsigned int flags;
unsigned long vmemmap_shift;
const struct dev_pagemap_ops *ops;
void *owner;
int nr_range;
union {
struct range range;
DECLARE_FLEX_ARRAY(struct range, ranges);
};
};
static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
{
return pgmap->ops && pgmap->ops->memory_failure;
}
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
return &pgmap->altmap;
return NULL;
}
static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
{
return 1 << pgmap->vmemmap_shift;
}
static inline bool folio_is_device_private(const struct folio *folio)
{
return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
folio_is_zone_device(folio) &&
folio->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
static inline bool is_device_private_page(const struct page *page)
{
return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
folio_is_device_private(page_folio(page));
}
static inline bool folio_is_pci_p2pdma(const struct folio *folio)
{
return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
folio_is_zone_device(folio) &&
folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
folio_is_pci_p2pdma(page_folio(page));
}
static inline bool folio_is_device_coherent(const struct folio *folio)
{
return folio_is_zone_device(folio) &&
folio->pgmap->type == MEMORY_DEVICE_COHERENT;
}
static inline bool is_device_coherent_page(const struct page *page)
{
return folio_is_device_coherent(page_folio(page));
}
static inline bool folio_is_fsdax(const struct folio *folio)
{
return folio_is_zone_device(folio) &&
folio->pgmap->type == MEMORY_DEVICE_FS_DAX;
}
static inline bool is_fsdax_page(const struct page *page)
{
return folio_is_fsdax(page_folio(page));
}
#ifdef CONFIG_ZONE_DEVICE
void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
{
/*
* Fail attempts to call devm_memremap_pages() without
* ZONE_DEVICE support enabled, this requires callers to fall
* back to plain devm_memremap() based on config
*/
WARN_ON_ONCE(1);
return ERR_PTR(-ENXIO);
}
static inline void devm_memunmap_pages(struct device *dev,
struct dev_pagemap *pgmap)
{
}
static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
{
return NULL;
}
static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
{
return false;
}
/* when memremap_pages() is disabled all archs can remap a single page */
static inline unsigned long memremap_compat_align(void)
{
return PAGE_SIZE;
}
#endif /* CONFIG_ZONE_DEVICE */
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
percpu_ref_put(&pgmap->ref);
}
#endif /* _LINUX_MEMREMAP_H_ */